hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1f5cfe45f3f1d6614357303fdc3f3cece991c2e | 4,023 | py | Python | alipay/aop/api/domain/AlipayUserUnicomOrderInfoSyncModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserUnicomOrderInfoSyncModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserUnicomOrderInfoSyncModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserUnicomOrderInfoSyncModel(object):
def __init__(self):
self._gmt_order_change = None
self._order_no = None
self._order_operate_type = None
self._phone_no = None
self._product_name = None
self._sec_key = None
self._user_id = None
@property
def gmt_order_change(self):
return self._gmt_order_change
@gmt_order_change.setter
def gmt_order_change(self, value):
self._gmt_order_change = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def order_operate_type(self):
return self._order_operate_type
@order_operate_type.setter
def order_operate_type(self, value):
self._order_operate_type = value
@property
def phone_no(self):
return self._phone_no
@phone_no.setter
def phone_no(self, value):
self._phone_no = value
@property
def product_name(self):
return self._product_name
@product_name.setter
def product_name(self, value):
self._product_name = value
@property
def sec_key(self):
return self._sec_key
@sec_key.setter
def sec_key(self, value):
self._sec_key = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.gmt_order_change:
if hasattr(self.gmt_order_change, 'to_alipay_dict'):
params['gmt_order_change'] = self.gmt_order_change.to_alipay_dict()
else:
params['gmt_order_change'] = self.gmt_order_change
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.order_operate_type:
if hasattr(self.order_operate_type, 'to_alipay_dict'):
params['order_operate_type'] = self.order_operate_type.to_alipay_dict()
else:
params['order_operate_type'] = self.order_operate_type
if self.phone_no:
if hasattr(self.phone_no, 'to_alipay_dict'):
params['phone_no'] = self.phone_no.to_alipay_dict()
else:
params['phone_no'] = self.phone_no
if self.product_name:
if hasattr(self.product_name, 'to_alipay_dict'):
params['product_name'] = self.product_name.to_alipay_dict()
else:
params['product_name'] = self.product_name
if self.sec_key:
if hasattr(self.sec_key, 'to_alipay_dict'):
params['sec_key'] = self.sec_key.to_alipay_dict()
else:
params['sec_key'] = self.sec_key
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserUnicomOrderInfoSyncModel()
if 'gmt_order_change' in d:
o.gmt_order_change = d['gmt_order_change']
if 'order_no' in d:
o.order_no = d['order_no']
if 'order_operate_type' in d:
o.order_operate_type = d['order_operate_type']
if 'phone_no' in d:
o.phone_no = d['phone_no']
if 'product_name' in d:
o.product_name = d['product_name']
if 'sec_key' in d:
o.sec_key = d['sec_key']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 30.709924 | 87 | 0.599304 |
e688d8843d900ffb0571e918f15f41bb704b1ab2 | 1,040 | py | Python | cexapi/test.py | codarrenvelvindron/cex.io-api-python | 5e54ca7d4d98e509e18d1ecda311da544649ea60 | [
"MIT"
] | 1 | 2021-11-09T18:47:44.000Z | 2021-11-09T18:47:44.000Z | cexapi/test.py | codarrenvelvindron/cex.io-api-python | 5e54ca7d4d98e509e18d1ecda311da544649ea60 | [
"MIT"
] | null | null | null | cexapi/test.py | codarrenvelvindron/cex.io-api-python | 5e54ca7d4d98e509e18d1ecda311da544649ea60 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import cexapi
username = ''
api_key = ''
api_secret = ''
demo = cexapi.API(username, api_key, api_secret)
print("Ticker (GHS/BTC)")
print(demo.ticker()) ## or demo.ticker('GHS/BTC')
print("Ticker (BF1/BTC)")
print(demo.ticker('BF1/BTC'))
print("Order book (GHS/BTC)")
print(demo.order_book()) ## or demo.order_book('GHS/BTC')
print("Order book (BF1/BTC)")
print(demo.order_book('BF1/BTC'))
print("Trade history since=100 (GHS/BTC)")
print(demo.trade_history(100)) ## or (100,'GHS/BTC')
print("Trade history since=100 (BF1/BTC)")
print(demo.trade_history(100,'BF1/BTC'))
print("Balance")
print(demo.balance())
print("Open orders (GHS/BTC)")
print(demo.current_orders()) ## or ('GHS/BTC')
print("Open orders (BF1/BTC)")
print(demo.current_orders('BF1/BTC'))
print("Cancel order (order_id=100)")
print(demo.cancel_order(100))
print("Plaсe order buy 4GHS/0.1BTC)")
print(demo.place_order('buy',1,0.1)) ## or ('buy',1,0.1,'GHS/BTC')
print("Open orders sell 1BF1/1.5BTC")
print(demo.place_order('sell',1,1.5,'BF1/BTC'))
| 31.515152 | 66 | 0.689423 |
d9b2373a798bf91b400c1761071f8164e1c97506 | 2,381 | py | Python | single_linked_list.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | single_linked_list.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | single_linked_list.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | class Node:
def __init__(self,value):
self.info=value
self.link=None
class SingleLinkedList:
def __init__(self):
self.start = None
def create_list(self):
n = int(input("ENter the number of nodes : "))
if n==0:
return
for i in range(n):
data = int(input("Enter the elements : "))
self.insert_end(data)
def display_list(self):
if self.start is None:
print('List is empty')
return
else:
print("List is : ")
p=self.start
while p is not None:
print(p.info , ' ', end=' ')
p=p.link
print()
def count_node(self):
n=0
p=self.start
while p is not None:
n+=1
p=p.link
print("Number of nodes in the list: ",n)
def search(self,x):
position=1
p=self.start
while p is not None:
if p.info==x:
print(x, " is at position ", position)
return True
position+=1
p=p.link
else:
print(x," not found in list")
return False
def insert_begin(self,data):
temp=Node(data)
temp.link=self.start
self.start=temp
def insert_end(self,data):
temp=Node(data)
if self.start is None:
self.start=temp
return
p=self.start
while p.link is not None:
p=p.link
p.link=temp
def insert_after(self,data,x):
p=self.start
while p is not None:
if p.info == x:
break
p=p.link
if p is None:
print(x, "not present in the list")
else:
temp=Node(data)
temp.link=p.link
p.link=temp
def insert_before(self,data,x):
if self.start is None:
print('List is empty')
return
if x==self.start.info:
temp=Node(data)
temp.link=self.start
self.start=temp
return
p=self.start
while p.link is not None:
if p.link.info==x:
break
p=p.link
if p.link is None:
print(x, "not present in the list")
else:
temp=Node(data)
temp.link=p.link
p.link=temp
def insert_position(self,data,k):
if k==1:
temp=Node(data)
temp.link=self.start
self.start=temp
return
p=self.start
i=1
while i<k-1 and p is not None:
p=p.link
i+=1
if p is None:
print('You can insert only upto position ', i)
else:
temp=Node(data)
temp.link=p.link
p.link=temp
l=SingleLinkedList()
print("1.create a list")
print("2.display_list")
print("3.search element in a list")
print("4.Insert at begin")
print("5.Insert at end")
print("6.")
print("1.create a list")
print("1.create a list")
ch=int(input("Enter your choice : "))
| 18.037879 | 49 | 0.637127 |
12bb7eb5fdcd5b4cc0977118367b7c422e10b73e | 2,066 | py | Python | cyrax/conf.py | piranha/cyrax | fd1c0473f0c69631339b2a5476933b00f604c643 | [
"0BSD"
] | 10 | 2015-12-18T22:39:02.000Z | 2021-03-03T15:11:05.000Z | cyrax/conf.py | piranha/cyrax | fd1c0473f0c69631339b2a5476933b00f604c643 | [
"0BSD"
] | 9 | 2016-01-01T09:28:13.000Z | 2019-12-17T09:39:15.000Z | cyrax/conf.py | piranha/cyrax | fd1c0473f0c69631339b2a5476933b00f604c643 | [
"0BSD"
] | 9 | 2015-12-31T23:18:07.000Z | 2020-08-10T10:56:46.000Z | '''Parser of naive data format
Copyright 2009 Alexander Solovyov, under terms of Poetic License
Format:
key: value
key: [list, of, values]
key: {key: value, key: value}
key: date: yyyy-mm-dd HH:MM:SS
key: True # boolean value
key: False # boolean value
Any line without a ":" is simply skipped. Field with a key of ``date`` are
special case and are checked to contain date value.
'''
from datetime import datetime
def parse(data):
result = {}
for line in data.splitlines():
if ':' in line:
key, value = parse_line(line)
result[key] = value
return result
def parse_date(date):
return datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
def parse_line(line):
key, value = strip(line.split(':', 1))
s, e = value.startswith, value.endswith
if s('[') and e(']'):
value = strip(value[1:-1].split(','))
elif s('{') and e('}'):
value = dict(strip(x.split(':')) for x in value[1:-1].split(','))
elif s('date:'):
value = parse_date(value[len('date:'):].strip())
elif key.strip() == 'date':
try:
value = parse_date(value)
except ValueError:
pass
elif value.lower() in 'true yes on'.split():
value = True
elif value.lower() in 'false no off'.split():
value = False
return key, value
def strip(lst):
return [x.strip() for x in lst]
class Settings(dict):
def __init__(self, parent=None, **kwargs):
self.parent = parent
super(Settings, self).__init__(**kwargs)
def read(self, inp):
self.update(parse(inp))
return self
def __getitem__(self, name):
try:
return super(Settings, self).__getitem__(name)
except KeyError:
if self.parent:
return self.parent[name]
raise
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(str(e))
def __setattr__(self, name, value):
self[name] = value
| 24.891566 | 74 | 0.577928 |
fda5f277728663d63bf565b640729515b4fa070e | 711 | py | Python | CryptoData.py | tysyak/CrypText | e958eb1be97850d7b6f7a168956222a97177aff3 | [
"MIT"
] | null | null | null | CryptoData.py | tysyak/CrypText | e958eb1be97850d7b6f7a168956222a97177aff3 | [
"MIT"
] | null | null | null | CryptoData.py | tysyak/CrypText | e958eb1be97850d7b6f7a168956222a97177aff3 | [
"MIT"
] | 1 | 2021-08-13T02:34:08.000Z | 2021-08-13T02:34:08.000Z | #!/usr/bin/env python3
import time
import pandas as pd
class CryptoData:
def __init__(self) -> None:
self.csv_list = list()
def csv_to_list(self, csv_path='tmp/bat-mxn-max.csv') -> list:
df = pd.read_csv(csv_path, delimiter=',')
self.csv_list = [list(row) for row in df.values]
self.csv_list.insert(0, df.columns.to_list())
self._time_to_timestamp()
return self.csv_list
def _time_to_timestamp(self):
for i in range(1,len(self.csv_list)):
self.csv_list[i][0] = int(time.mktime(
time.strptime(self.csv_list[i][0], '%Y-%m-%d')))
if __name__ == '__main__':
some = CryptoData()
print(some.csv_to_list())
| 26.333333 | 66 | 0.613221 |
5ea232087d6584bc40ad6667274d0a703049d44d | 2,158 | py | Python | temboardagent/validators.py | pgiraud/temboard-agent | 6c087f41f06e1f2992057592f78e11c0c7649e4d | [
"PostgreSQL"
] | null | null | null | temboardagent/validators.py | pgiraud/temboard-agent | 6c087f41f06e1f2992057592f78e11c0c7649e4d | [
"PostgreSQL"
] | null | null | null | temboardagent/validators.py | pgiraud/temboard-agent | 6c087f41f06e1f2992057592f78e11c0c7649e4d | [
"PostgreSQL"
] | null | null | null | # User Input Validator
#
# These functions provide stateless validation of user input, mainly CLI
# arguments and environment variables.
#
# On invalid input, a ValueError is raised. Other exceptions are considered a
# bug.
#
# A validator is idempotent. It must accepts what it returns.
import json
import logging
import os.path
import re
from distutils.util import strtobool
from logging.handlers import SysLogHandler
from .log import HANDLERS as LOG_METHODS
_address_re = re.compile(
r'(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|\d)'
r'(\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|\d'
r')){3}$'
)
def address(raw):
if not _address_re.match(raw):
raise ValueError('invalid address')
return raw
def boolean(raw):
if raw in (True, False):
return raw
return bool(strtobool(raw))
def dir_(raw):
raw = os.path.realpath(raw)
if not os.path.isdir(raw):
raise ValueError('Not a directory')
return raw
def file_(raw):
raw = os.path.realpath(raw)
if not os.path.exists(raw):
raise ValueError('%s: File not found' % raw)
return raw
_identifier_re = re.compile(r'^[a-zA-Z0-9]+$')
def jsonlist(raw):
if hasattr(raw, 'lower'):
raw = json.loads(raw)
if not isinstance(raw, list):
raise ValueError('not a list')
raw = [str(e) for e in raw]
for entry in raw:
if not _identifier_re.match(entry):
raise ValueError('%s is invalid' % entry)
return raw
def port(raw):
port = int(raw)
if 0 > port or port > 65635:
raise ValueError('Port out of range')
return port
def loglevel(raw):
raw = raw.upper()
if raw not in logging._levelNames:
raise ValueError('unkown log level')
return raw
def logmethod(raw):
if raw not in LOG_METHODS:
raise ValueError('unkown method')
return raw
def syslogfacility(raw):
if raw not in SysLogHandler.facility_names:
raise ValueError('unkown syslog facility')
return raw
def writeabledir(raw):
raw = dir_(raw)
if not os.access(raw, os.W_OK):
raise ValueError('Not writable')
return raw
| 20.358491 | 77 | 0.639481 |
a8a3e297d61040ec2bcd7962243fa745d1c05e9d | 2,348 | py | Python | setup.py | VerdantFox/TowerDefense | 1463fcd5067a565d049303ba04564e44051dc7ae | [
"CC-BY-4.0"
] | 6 | 2019-01-30T00:06:43.000Z | 2021-11-29T13:15:42.000Z | setup.py | VerdantFox/TowerDefense | 1463fcd5067a565d049303ba04564e44051dc7ae | [
"CC-BY-4.0"
] | null | null | null | setup.py | VerdantFox/TowerDefense | 1463fcd5067a565d049303ba04564e44051dc7ae | [
"CC-BY-4.0"
] | 2 | 2020-12-15T07:55:34.000Z | 2021-07-28T12:09:16.000Z | import cx_Freeze
import os
os.environ['TCL_LIBRARY'] = "C:\\Users\\Teddy\AppData\\Local\\Programs\\Python\\Python36-32\\tcl\\tcl8.6"
os.environ['TK_LIBRARY'] = "C:\\Users\\Teddy\AppData\\Local\\Programs\\Python\\Python36-32\\tcl\\tk8.6"
executables = [cx_Freeze.Executable("main.py", base="Win32GUI")]
# to run, go to directory in cmd prompt
# python setup.py build
cx_Freeze.setup(
name="Twisted Towers",
version="1.0",
author="Theodore_Williams",
description="Tower Defense Game built in Python's Pygame",
options={"build_exe": {
"excludes": ["ctypes",
"email",
"html",
"http",
"json",
"lib2to3",
"logging",
"multiprocessing",
"OpenGl",
"pkg_resources",
"pydoc_data",
"tcl",
"test",
"tk,",
"tkinter",
"unittest",
"urlib",
"xml",
"xmlrpc"
],
"packages": ["pygame",
"sys",
"effects",
"Enemies",
"Enemies.dragon",
"Enemies.orc",
"Enemies.spider",
"Enemies.turtle",
"Enemies.wolf",
"music",
"soundEffects",
"soundEffects.Deaths",
"soundEffects.TowerShots",
"towers"
],
"include_files": ["README.md",
"definitions.py",
"enemies.py",
"gameBackDrop.png",
"gameParameters.py",
"gameText.py",
"generalClass.py",
"helpers.py",
"lists.py",
"main.py",
"setup.py",
"sounds.py",
"towerClass.py"
],
"optimize": 2}},
executables=executables
)
| 33.542857 | 105 | 0.362436 |
9bfbf8b4f4e16754f14ef51dc02bd3709471302c | 1,011 | py | Python | d9/part1.py | Jamie-Chang/advent2021 | c3fda9098283b06265b03070085aeb0c6e27d408 | [
"Apache-2.0"
] | null | null | null | d9/part1.py | Jamie-Chang/advent2021 | c3fda9098283b06265b03070085aeb0c6e27d408 | [
"Apache-2.0"
] | null | null | null | d9/part1.py | Jamie-Chang/advent2021 | c3fda9098283b06265b03070085aeb0c6e27d408 | [
"Apache-2.0"
] | null | null | null | from typing import Iterator
def read() -> list[list[int]]:
with open("d9/input.txt") as f:
return [[int(c) for c in l.strip()] for l in f]
def _adjacent_coord(row: int, col: int) -> Iterator[tuple[int, int]]:
yield row - 1, col
yield row + 1, col
yield row, col - 1
yield row, col + 1
def _in_bound(rows: int, cols: int, row: int, col: int) -> bool:
return 0 <= row < rows and 0 <= col < cols
def adjacent(grid: list[list[int]], row: int, col: int) -> Iterator[int]:
rows = len(grid)
cols = len(grid[0])
for row, col in _adjacent_coord(row, col):
if _in_bound(rows, cols, row, col):
yield grid[row][col]
def get_low_points(grid: list[list[int]]) -> Iterator[int]:
for r, row in enumerate(grid):
for c, value in enumerate(row):
if all(value < v for v in adjacent(grid, r, c)):
yield value
if __name__ == "__main__":
grid = read()
print(sum(height + 1 for height in get_low_points(grid)))
| 26.605263 | 73 | 0.592483 |
ff1f3a20713ebba5a48fcbdad5e0c747183f9d3f | 3,782 | py | Python | unittests/test_cli.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 1 | 2019-09-18T14:38:55.000Z | 2019-09-18T14:38:55.000Z | unittests/test_cli.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 14 | 2019-05-24T18:55:23.000Z | 2022-02-25T16:56:28.000Z | unittests/test_cli.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 8 | 2019-04-08T14:48:35.000Z | 2022-02-04T16:59:59.000Z | # -*- coding: utf-8 -*-
"""Module unittests.test_cli.py
This module contains methods to test the cli module via pytest.
Attributes:
user_config_dir (str): path to directory containing sample user configs
user_config_success (str): file containing correct user config
user_config_failure (str): file containing error-generating config
json_output_file (str): path to output file
"""
import os
import click
import signal
from compliance_suite.cli import main, report
from unittests.constants import OUTPUT_DIR as od
from unittests.methods import *
from click.testing import CliRunner
from multiprocessing import Process
user_config_dir = "unittests/data/user_config/"
user_config_success = user_config_dir + "config_0.yaml"
user_config_failure = user_config_dir + "fail_0.yaml"
json_output_file = "unittest_output.json"
def test_main():
"""asserts that the 'main' method of cli module can be executed"""
runner = CliRunner()
runner.invoke(main)
assert True
def test_report():
"""asserts that the 'report' method of cli module executes successfully"""
remove_output_dirs()
runner = CliRunner()
result = runner.invoke(report, ['-c', user_config_success])
assert result.exit_code == 0
remove_output_dirs()
os.mkdir(od)
runner = CliRunner()
result = runner.invoke(report, ['-c', user_config_success,
'-o', od, '--no-tar', '-f'])
assert result.exit_code == 0
remove_output_dirs()
# runner = CliRunner()
# result = runner.invoke(report, ['-c', user_config_success, '-o', od,
# '--no-tar', '-f', '--serve', '-u', '2'])
# assert result.exit_code == 0
def test_exceptions():
"""asserts program raises appropriate exceptions with incorrect params"""
remove_output_dirs()
# empty cli, exception should be caught, program exits with status code "1"
runner = CliRunner()
result = runner.invoke(report, [])
assert result.exit_code == 1
remove_output_dirs()
# incorrectly formatted user config submitted, exception should be caught
runner = CliRunner()
result = runner.invoke(report, ["-c", user_config_failure, "-o", od])
assert result.exit_code == 1
remove_output_dirs()
# non-existing user config submitted, exception should be caught,
# program exits with status code "1"
runner = CliRunner()
result = runner.invoke(report, ["-c", "file.yaml", "-o", od])
assert result.exit_code == 1
remove_output_dirs()
# server uptime is a string, not number, exception should be caught,
# program exits with status code "1"
runner = CliRunner()
result = runner.invoke(report, ["-c", user_config_failure, "-o", od,
"--uptime", 'String'])
assert result.exit_code == 1
remove_output_dirs()
# output directory does not exist
runner = CliRunner()
result = runner.invoke(report, ["-c", user_config_success, "-o",
"directory/doesnot/exist"])
assert result.exit_code == 1
remove_output_dirs()
# cannot overwrite output directory
runner = CliRunner()
result = runner.invoke(report, ["-c", user_config_success, "-o",
"unittests/data/results"])
assert result.exit_code == 1
remove_output_dirs()
# TODO: re-enable test once I've figured out how to get it working on travis ci
# def test_mock_server():
# """asserts mock server is launched and shutdown without error"""
#
# remove_output_dir()
# runner = CliRunner()
# result = runner.invoke(report, ["-c", user_config_success, "-o", od,
# "--serve", "--uptime", '1'])
# assert result.exit_code == 0
| 34.381818 | 79 | 0.653622 |
3916977184e09a533d8a4ade94b8f4d0f44528ae | 749 | py | Python | cohesity_management_sdk/models/type_hive_table_enum.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/type_hive_table_enum.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/type_hive_table_enum.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class TypeHiveTableEnum(object):
"""Implementation of the 'Type_HiveTable' enum.
Specifies the type of table ex. MANAGED,VIRTUAL etc.
Specifies the type of an Hive table.
'kManaged' indicates a MANAGED Hive table.
'kExternal' indicates a EXTERNAL Hive table.
'kVirtual' indicates a VIRTUAL Hive tablet.
'kIndex' indicates a INDEX Hive table.
Attributes:
KMANAGER: TODO: type description here.
KEXTERNAL: TODO: type description here.
KVIRTUAL: TODO: type description here.
KINDEX: TODO: type description here.
"""
KMANAGER = 'kManaged'
KEXTERNAL = 'kExternal'
KVIRTUAL = 'kVirtual'
KINDEX = 'kIndex'
| 23.40625 | 56 | 0.667557 |
ef1a4586d477feef2b330af563460f4c4d036e98 | 18,698 | py | Python | qa/rpc-tests/wallet.py | bitcoinvenezuela/bven | a2fd442fc14dbd5a1a704dff83ee373b70dbe348 | [
"MIT"
] | 1 | 2019-04-19T02:09:07.000Z | 2019-04-19T02:09:07.000Z | qa/rpc-tests/wallet.py | MelonousHeadous/BVEN | 2fc8214d147bcd5cf5a2c7fe23d3efb75d07ff12 | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet.py | MelonousHeadous/BVEN | 2fc8214d147bcd5cf5a2c7fe23d3efb75d07ff12 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3], redirect_stderr=True)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 BVEN from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 BVEN in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True, False, True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True, False, True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 BVEN normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.00001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 BVEN with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 BVEN
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 BVEN with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3], redirect_stderr=True))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
# set lower ancestor limit for later
self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
signedtx = self.nodes[0].signrawtransaction(rawtx)
singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit*2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
assert_equal(len(txid_list), chainlimit*2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert(extra_txid not in self.nodes[0].getrawmempool())
assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()])
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*",99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
stop_node(self.nodes[0],0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_message(JSONRPCException, "mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
if __name__ == '__main__':
WalletTest().main()
| 46.167901 | 181 | 0.641726 |
d253b2bb92708a274812649ac7be8b85a6d477dd | 393 | py | Python | web/rnp_find/asgi.py | mnahinkhan/rnpfind | 5aa956ddd528ab9ebd9588be845f78c449915b78 | [
"MIT"
] | 3 | 2021-06-08T03:55:03.000Z | 2021-06-15T07:33:08.000Z | web/rnp_find/asgi.py | mnahinkhan/RNPFind | 8b561e087f943421c847dcb708ee386ee6439fa5 | [
"MIT"
] | 1 | 2022-02-24T15:34:24.000Z | 2022-03-04T09:59:10.000Z | web/rnp_find/asgi.py | mnahinkhan/RNPFind | 8b561e087f943421c847dcb708ee386ee6439fa5 | [
"MIT"
] | 1 | 2021-07-22T04:13:34.000Z | 2021-07-22T04:13:34.000Z | """
ASGI config for rnp_find project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rnp_find.settings")
application = get_asgi_application()
| 23.117647 | 78 | 0.78626 |
bbdfefcde414e9eff78832c3aab5860074278b7f | 207 | py | Python | jessiql/integration/graphql/schema.py | kolypto/py-jessiql | 724a1eda84e912483bb2d96bb0f74ce6a12098a3 | [
"MIT"
] | null | null | null | jessiql/integration/graphql/schema.py | kolypto/py-jessiql | 724a1eda84e912483bb2d96bb0f74ce6a12098a3 | [
"MIT"
] | null | null | null | jessiql/integration/graphql/schema.py | kolypto/py-jessiql | 724a1eda84e912483bb2d96bb0f74ce6a12098a3 | [
"MIT"
] | null | null | null | import os.path
# Load GraphQL definitions from the file
pwd = os.path.dirname(__file__)
# Get this schema
with open(os.path.join(pwd, './schema.graphql'), 'rt') as f:
graphql_jessiql_schema = f.read()
| 23 | 60 | 0.714976 |
4eb8634b81af8399d5bb85d0709d2f34743546b9 | 792 | py | Python | cbab/settings/test.py | acdh-oeaw/cbab | 7cd25f057913dccf85f851e448b1dbc2c5f8d624 | [
"MIT"
] | 1 | 2021-09-20T12:51:47.000Z | 2021-09-20T12:51:47.000Z | cbab/settings/test.py | acdh-oeaw/cbab | 7cd25f057913dccf85f851e448b1dbc2c5f8d624 | [
"MIT"
] | null | null | null | cbab/settings/test.py | acdh-oeaw/cbab | 7cd25f057913dccf85f851e448b1dbc2c5f8d624 | [
"MIT"
] | null | null | null | from .base import *
SECRET_KEY = 'whatever'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS + [
'django_nose',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-coverage', # generate coverage
'--cover-package=bib,browsing,burials,cbab,django_spaghetti,places,vocabs,webpage',
'--cover-html', # generate a html cover report
'--nocapture', # needed to show print output in console
'--nologcapture', # needed to show print output in console
'--cover-erase', # without cover erase test coverage artifacts could remain
]
| 26.4 | 90 | 0.627525 |
81d562e83185193112101ce27bb4ea96d6ab484d | 34,130 | py | Python | keras/layers/rnn/bidirectional_test.py | code-review-doctor/keras | 96130040540e1405ffe746ddf2b2cceb9b8b8f65 | [
"Apache-2.0"
] | null | null | null | keras/layers/rnn/bidirectional_test.py | code-review-doctor/keras | 96130040540e1405ffe746ddf2b2cceb9b8b8f65 | [
"Apache-2.0"
] | null | null | null | keras/layers/rnn/bidirectional_test.py | code-review-doctor/keras | 96130040540e1405ffe746ddf2b2cceb9b8b8f65 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bidirectional wrapper."""
import copy
from absl.testing import parameterized
import keras
from keras.engine import base_layer_utils
from keras.layers import core
from keras.layers.rnn.cell_wrappers import ResidualWrapper
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import generic_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.training.tracking import util as trackable_util
class _RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, constant_size, **kwargs):
self.units = units
self.state_size = units
self.constant_size = constant_size
super(_RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(self.constant_size, self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units, 'constant_size': self.constant_size}
base_config = super(_RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class _ResidualLSTMCell(keras.layers.LSTMCell):
def call(self, inputs, states, training=None):
output, states = super(_ResidualLSTMCell, self).call(inputs, states)
return output + inputs, states
class _AddOneCell(keras.layers.AbstractRNNCell):
"""Increments inputs and state by one on each call."""
@property
def state_size(self):
return 1
@property
def output_size(self):
return 1
def call(self, inputs, state):
inputs = tf.reduce_mean(inputs, axis=1, keepdims=True)
outputs = inputs + 1.0
state = tf.nest.map_structure(lambda t: t + 1.0, state)
return outputs, state
@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))
class BidirectionalTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(['sum', 'concat', 'ave', 'mul'])
def test_bidirectional(self, mode):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_object_ids = {
id(o) for o in trackable_util.list_objects(model)
}
for v in model.variables:
self.assertIn(id(v), checkpointed_object_ids)
# test compute output shape
ref_shape = model.layers[-1].output.shape
shape = model.layers[-1].compute_output_shape(
(None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_bidirectional_invalid_init(self):
x = tf.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegex(
ValueError,
'Please initialize `Bidirectional` layer with a '
'`tf.keras.layers.Layer` instance.'):
keras.layers.Bidirectional(x)
def test_bidirectional_weight_loading(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), input_shape=(timesteps, dim)))
y_ref = model.predict(x)
weights = model.layers[-1].get_weights()
model.layers[-1].set_weights(weights)
y = model.predict(x)
self.assertAllClose(y, y_ref)
def test_bidirectional_stacked(self):
# test stacked bidirectional layers
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.layers.Input((timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_bidirectional_statefulness(self):
# Bidirectional and stateful
def run_test():
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = keras.layers.Input(batch_shape=(1, timesteps, dim))
bidi_rnn = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)
self.assertTrue(bidi_rnn.stateful)
output = bidi_rnn(inputs)
model = keras.models.Model(inputs, output)
y_1 = model.predict(x, batch_size=1)
model.reset_states()
y_2 = model.predict(x, batch_size=1)
self.assertAllClose(y_1, y_2)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
if tf.executing_eagerly():
run_test()
else:
tf_test_util.enable_output_all_intermediates(run_test)()
@parameterized.parameters(['sum', 'mul', 'ave', 'concat', None])
def test_Bidirectional_merged_value(self, merge_mode):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
if merge_mode == 'sum':
merge_func = lambda y, y_rev: y + y_rev
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: y * y_rev
elif merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
else:
merge_func = lambda y, y_rev: [y, y_rev]
# basic case
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], _to_list(layer(inputs)))
f_forward = keras.backend.function([inputs],
[layer.forward_layer(inputs)])
f_backward = keras.backend.function(
[inputs],
[keras.backend.reverse(layer.backward_layer(inputs), 1)])
y_merged = f_merged(x)
y_expected = _to_list(merge_func(f_forward(x)[0], f_backward(x)[0]))
assert len(y_merged) == len(y_expected)
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
# test return_state
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer(inputs))
f_backward = keras.backend.function([inputs],
layer.backward_layer(inputs))
n_states = len(layer.layer.states)
y_merged = f_merged(x)
y_forward = f_forward(x)
y_backward = f_backward(x)
y_expected = _to_list(merge_func(y_forward[0], y_backward[0]))
assert len(y_merged) == len(y_expected) + n_states * 2
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
y_merged = y_merged[-n_states * 2:]
y_forward = y_forward[-n_states:]
y_backward = y_backward[-n_states:]
for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
self.assertAllClose(state_birnn, state_inner, atol=1e-5)
@parameterized.parameters([True, False])
def test_Bidirectional_with_time_major_input(self, time_major):
batch_size, time, input_dim = 2, 3, 1
inputs = tf.zeros((batch_size, time, input_dim))
# length is [1 2]. Within the batch, the first element has 1 step, and the
# second element as 2 steps.
lengths = tf.range(1, 1 + batch_size)
mask = tf.sequence_mask(lengths, maxlen=time, dtype=tf.float32)
forward_cell = _AddOneCell(name='forward')
backward_cell = _AddOneCell(name='backward')
layer = keras.layers.Bidirectional(
layer=keras.layers.RNN(
forward_cell, time_major=time_major, return_sequences=True),
backward_layer=keras.layers.RNN(
backward_cell, time_major=time_major, return_sequences=True,
go_backwards=True))
# Switch to time-major.
if time_major:
inputs = tf.transpose(inputs, [1, 0, 2])
mask = tf.transpose(mask, [1, 0])
keras_outputs = layer(inputs, mask=mask)
if time_major:
keras_outputs = tf.transpose(keras_outputs, [1, 0, 2])
# expect the first element in batch has 1 step and second element in batch
# has 2 steps.
expected_result = np.array([[[1., 1.], [0., 0.], [0., 0.]],
[[1., 1.], [1., 1.], [0., 0.]]])
self.assertAllClose(expected_result, keras_outputs)
def test_Bidirectional_dropout(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'sum'
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs, training=True))
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
model = keras.Model(inputs, outputs)
y1 = _to_list(model.predict(x))
y2 = _to_list(model.predict(x))
for x1, x2 in zip(y1, y2):
self.assertAllClose(x1, x2, atol=1e-5)
def test_Bidirectional_state_reuse(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = keras.layers.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
# test passing invalid initial_state: passing a tensor
input2 = keras.layers.Input((timesteps, dim))
with self.assertRaises(ValueError):
keras.layers.Bidirectional(rnn(units))(input2, initial_state=state[0])
# test valid usage: passing a list
output = keras.layers.Bidirectional(rnn(units))(input2,
initial_state=state)
model = keras.models.Model([input1, input2], output)
assert len(model.layers) == 4
assert isinstance(model.layers[-1].input, list)
inputs = [np.random.rand(samples, timesteps, dim),
np.random.rand(samples, timesteps, dim)]
model.predict(inputs)
def test_Bidirectional_state_reuse_with_np_input(self):
# See https://github.com/tensorflow/tensorflow/issues/28761 for more detail.
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = np.random.rand(samples, timesteps, dim).astype(np.float32)
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
input2 = np.random.rand(samples, timesteps, dim).astype(np.float32)
keras.layers.Bidirectional(rnn(units))(input2, initial_state=state)
def test_Bidirectional_trainable(self):
# test layers that need learning_phase to be set
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert len(layer.trainable_weights) == 6
layer.trainable = False
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.trainable_weights) == 6
def test_Bidirectional_updates(self):
if tf.executing_eagerly():
self.skipTest('layer.updates is only available in graph mode.')
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
x_reachable_update = x * x
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert not layer.updates
# TODO(b/128684069): Remove when Wrapper sublayers are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
layer.forward_layer.add_update(x_reachable_update)
layer.forward_layer.add_update(1)
layer.backward_layer.add_update(x_reachable_update)
layer.backward_layer.add_update(1)
assert len(layer.updates) == 4
def test_Bidirectional_losses(self):
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.Bidirectional(
keras.layers.SimpleRNN(
3,
kernel_regularizer='l1',
bias_regularizer='l1',
activity_regularizer='l1'))
_ = layer(x)
assert len(layer.losses) == 6
loss = x * x
layer.forward_layer.add_loss(loss)
layer.backward_layer.add_loss(loss)
assert len(layer.losses) == 8
def test_Bidirectional_with_constants(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
cell = _RNNCellWithConstants(32, 3)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, c])
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_Bidirectional_with_constants_layer_passing_initial_state(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
s_for = keras.Input((32,))
s_bac = keras.Input((32,))
cell = _RNNCellWithConstants(32, 3)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)),
np.zeros((6, 32)),
np.zeros((6, 32)),
np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_fw_np = np.random.random((6, 32))
s_bk_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Verify that state is used
y_np_2_different_s = model.predict(
[x_np, s_fw_np + 10., s_bk_np + 10., c_np])
assert np.mean(y_np - y_np_2_different_s) != 0
# Test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, s_for, s_bac, c])
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
@parameterized.parameters([keras.layers.LSTM, keras.layers.GRU])
def test_Bidirectional_output_shape(self, rnn):
input_shape = [None, 2, 1]
num_state = 4 if rnn == keras.layers.LSTM else 2
wrapper = keras.layers.Bidirectional(rnn(3))
output_shape = wrapper.compute_output_shape(input_shape)
self.assertEqual(output_shape.as_list(), [None, 6])
wrapper = keras.layers.Bidirectional(rnn(3, return_state=True))
output_shape = wrapper.compute_output_shape(input_shape)
# 1 for output and the rest for forward and backward states
self.assertLen(output_shape, 1 + num_state)
self.assertEqual(output_shape[0].as_list(), [None, 6])
for shape in output_shape[1:]:
self.assertEqual(shape.as_list(), [None, 3])
wrapper = keras.layers.Bidirectional(rnn(3, return_state=True),
merge_mode=None)
output_shape = wrapper.compute_output_shape(input_shape)
# 1 for forward output and 1 for backward output, and the rest for states
self.assertLen(output_shape, 2 + num_state)
for shape in output_shape:
self.assertEqual(shape.as_list(), [None, 3])
def test_Bidirectional_output_shape_return_types(self):
class TestLayer(keras.layers.SimpleRNN):
def call(self, inputs):
return tf.concat([inputs, inputs], axis=-1)
def compute_output_shape(self, input_shape):
output_shape = tf.TensorShape(input_shape).as_list()
output_shape[-1] = output_shape[-1] * 2
return tf.TensorShape(output_shape)
class TestListLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestListLayer, self).compute_output_shape(input_shape)
return shape.as_list()
class TestTupleLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestTupleLayer, self).compute_output_shape(input_shape)
return tuple(shape.as_list())
# Layers can specify output shape as list/tuple/TensorShape
test_layers = [TestLayer, TestListLayer, TestTupleLayer]
for layer in test_layers:
input_layer = keras.layers.Bidirectional(layer(1))
inputs = keras.backend.placeholder(shape=(None, 2, 4))
output = input_layer(inputs)
self.assertEqual(output.shape.as_list(), [None, 2, 16])
self.assertEqual(
input_layer.compute_output_shape([None, 2, 4]).as_list(),
[None, 2, 16])
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_Bidirectional_last_output_with_masking(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'concat'
x = np.random.rand(samples, timesteps, dim)
# clear the first record's timestep 2. Last output should be same as state,
# not zeroed.
x[0, 2] = 0
with self.cached_session():
inputs = keras.Input((timesteps, dim))
masked_inputs = keras.layers.Masking()(inputs)
wrapped = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(masked_inputs, training=True))
self.assertLen(outputs, 5)
self.assertEqual(outputs[0].shape.as_list(), [None, units * 2])
model = keras.Model(inputs, outputs)
y = _to_list(model.predict(x))
self.assertLen(y, 5)
self.assertAllClose(y[0], np.concatenate([y[1], y[3]], axis=1))
@parameterized.parameters([keras.layers.LSTM, keras.layers.GRU])
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input yet.')
def test_Bidirectional_sequence_output_with_masking(self, rnn):
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'concat'
x = np.random.rand(samples, timesteps, dim)
# clear the first record's timestep 2, and expect the output of timestep 2
# is also 0s.
x[0, 2] = 0
with self.cached_session():
inputs = keras.Input((timesteps, dim))
masked_inputs = keras.layers.Masking()(inputs)
wrapped = keras.layers.Bidirectional(
rnn(units, return_sequences=True),
merge_mode=merge_mode)
outputs = _to_list(wrapped(masked_inputs, training=True))
self.assertLen(outputs, 1)
self.assertEqual(outputs[0].shape.as_list(), [None, timesteps, units * 2])
model = keras.Model(inputs, outputs)
y = _to_list(model.predict(x))
self.assertLen(y, 1)
self.assertAllClose(y[0][0, 2], np.zeros(units * 2))
@parameterized.parameters(['sum', 'concat'])
def test_custom_backward_layer(self, mode):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
forward_layer = rnn(output_dim)
backward_layer = rnn(output_dim, go_backwards=True)
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
forward_layer,
merge_mode=mode,
backward_layer=backward_layer,
input_shape=(timesteps, dim)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_object_ids = {
id(o) for o in trackable_util.list_objects(model)
}
for v in model.variables:
self.assertIn(id(v), checkpointed_object_ids)
# test compute output shape
ref_shape = model.layers[-1].output.shape
shape = model.layers[-1].compute_output_shape((None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_custom_backward_layer_error_check(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units)
with self.assertRaisesRegex(ValueError,
'should have different `go_backwards` value.'):
keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
for attr in ('stateful', 'return_sequences', 'return_state'):
kwargs = {attr: True}
backward_layer = rnn(units, go_backwards=True, **kwargs)
with self.assertRaisesRegex(
ValueError, 'expected to have the same value for attribute "' + attr):
keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
def test_custom_backward_layer_serialization(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units, go_backwards=True)
layer = keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
config = layer.get_config()
layer_from_config = keras.layers.Bidirectional.from_config(config)
new_config = layer_from_config.get_config()
self.assertDictEqual(config, new_config)
def test_rnn_layer_name(self):
rnn = keras.layers.LSTM
units = 2
layer = keras.layers.Bidirectional(rnn(units, name='rnn'))
config = layer.get_config()
self.assertEqual(config['layer']['config']['name'], 'rnn')
layer_from_config = keras.layers.Bidirectional.from_config(config)
self.assertEqual(layer_from_config.forward_layer.name, 'forward_rnn')
self.assertEqual(layer_from_config.backward_layer.name, 'backward_rnn')
def test_custom_backward_rnn_layer_name(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units, go_backwards=True)
layer = keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
config = layer.get_config()
self.assertEqual(config['layer']['config']['name'], 'lstm')
self.assertEqual(config['backward_layer']['config']['name'], 'lstm_1')
layer_from_config = keras.layers.Bidirectional.from_config(config)
self.assertEqual(layer_from_config.forward_layer.name, 'forward_lstm')
self.assertEqual(layer_from_config.backward_layer.name, 'backward_lstm_1')
def test_rnn_with_customized_cell(self):
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = _ResidualLSTMCell(units)
forward_layer = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
bidirectional_rnn = keras.layers.Bidirectional(
forward_layer, merge_mode=merge_mode)
outputs = _to_list(bidirectional_rnn(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
def test_rnn_with_customized_cell_stacking(self):
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = [_ResidualLSTMCell(units), _ResidualLSTMCell(units)]
forward_layer = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
bidirectional_rnn = keras.layers.Bidirectional(
forward_layer, merge_mode=merge_mode)
outputs = _to_list(bidirectional_rnn(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
@test_utils.run_v2_only
def test_wrapped_rnn_cell(self):
# See https://github.com/tensorflow/tensorflow/issues/26581.
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = keras.layers.LSTMCell(units)
cell = ResidualWrapper(cell)
rnn = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(rnn, merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
@parameterized.parameters(['ave', 'concat', 'mul'])
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message='Skipping as ROCm RNN does not support ragged tensors yet.')
def test_Bidirectional_ragged_input(self, merge_mode):
np.random.seed(100)
rnn = keras.layers.LSTM
units = 3
x = tf.ragged.constant(
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]],
ragged_rank=1)
x = tf.cast(x, 'float32')
# pylint: disable=g-long-lambda
with self.cached_session():
if merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: tf.concat(
(y, y_rev), axis=-1)
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: (y * y_rev)
# pylint: enable=g-long-lambda
inputs = keras.Input(
shape=(None, 3), batch_size=4, dtype='float32', ragged=True)
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer(inputs))
# TODO(kaftan): after KerasTensor refactor TF op layers should work
# with many composite tensors, and this shouldn't need to be a lambda
# layer.
reverse_layer = core.Lambda(tf.reverse, arguments=dict(axis=[1]))
f_backward = keras.backend.function(
[inputs],
reverse_layer(layer.backward_layer(inputs)))
y_merged = f_merged(x)
y_expected = merge_func(
convert_ragged_tensor_value(f_forward(x)),
convert_ragged_tensor_value(f_backward(x)))
y_merged = convert_ragged_tensor_value(y_merged)
self.assertAllClose(y_merged.flat_values, y_expected.flat_values)
def test_Bidirectional_nested_state_reuse(self):
if not tf.executing_eagerly():
self.skipTest('Only test eager mode.')
x = tf.random.normal([4, 8, 16])
layer = keras.layers.Bidirectional(
keras.layers.RNN([keras.layers.LSTMCell(5),
keras.layers.LSTMCell(5)],
return_sequences=True,
return_state=True))
y = layer(x)
self.assertAllClose(layer([x] + y[1:]), layer(x, initial_state=y[1:]))
def test_full_input_spec(self):
# See https://github.com/tensorflow/tensorflow/issues/38403
inputs = keras.layers.Input(batch_shape=(1, 1, 1))
fw_state = keras.layers.Input(batch_shape=(1, 1))
bw_state = keras.layers.Input(batch_shape=(1, 1))
states = [fw_state, bw_state]
bidirectional_rnn = keras.layers.Bidirectional(
keras.layers.SimpleRNN(1, stateful=True))
rnn_output = bidirectional_rnn(inputs, initial_state=states)
model = keras.Model([inputs, fw_state, bw_state], rnn_output)
output1 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))])
output2 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))])
model.reset_states()
output3 = model.predict(
[np.ones((1, 1, 1)), np.ones((1, 1)), np.ones((1, 1))])
self.assertAllClose(output1, output3)
self.assertNotAllClose(output1, output2)
def _to_list(ls):
if isinstance(ls, list):
return ls
else:
return [ls]
def convert_ragged_tensor_value(inputs):
if isinstance(inputs, tf.compat.v1.ragged.RaggedTensorValue):
flat_values = tf.convert_to_tensor(
value=inputs.flat_values,
name='flat_values')
return tf.RaggedTensor.from_nested_row_splits(
flat_values, inputs.nested_row_splits, validate=False)
return inputs
if __name__ == '__main__':
tf.test.main()
| 36.347178 | 80 | 0.658717 |
727cacece07409e921b91b5d89597157fea60c3c | 6,592 | py | Python | pysnmp-with-texts/CTRON-ETWMIM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CTRON-ETWMIM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CTRON-ETWMIM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CTRON-ETWMIM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CTRON-ETWMIM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:30:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
ctPModuleETWMIM, = mibBuilder.importSymbols("CTRON-MIB-NAMES", "ctPModuleETWMIM")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, MibIdentifier, Bits, Unsigned32, ObjectIdentity, Integer32, iso, IpAddress, Gauge32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibIdentifier", "Bits", "Unsigned32", "ObjectIdentity", "Integer32", "iso", "IpAddress", "Gauge32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter32", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
etwDbExist = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("exists", 1), ("no-exists", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etwDbExist.setStatus('mandatory')
if mibBuilder.loadTexts: etwDbExist.setDescription('Denotes whether a T1 daughter board is attached to the Etwmim card being managed')
etwDbEnabled = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etwDbEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: etwDbEnabled.setDescription('This object exists if etwDbExist is 1. Setting this object to 1 will cause the WAN port to use the daughterboard T1 port. Setting this object to 2 will cause the WAN port to use the synchronous port')
etwDbFracToggle = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("update-table", 1), ("display-new", 2), ("display-old", 3), ("restore-old", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etwDbFracToggle.setReference('IETF RFC 1232')
if mibBuilder.loadTexts: etwDbFracToggle.setStatus('mandatory')
if mibBuilder.loadTexts: etwDbFracToggle.setDescription('This object exists if etwDbExist is 1. Setting this object to 1 will cause the T1 fractional table to be updated with the new values, as entered. Setting this object to 2, will cause the T1 fractional table to be the table as it is being entered. Setting this object to 3, will cause the T1 fractional table to be the table that is currently in use, regardless of any changes being entered. Setting this object to 4, will cause any changes that have been made to the fractional table since the last update-table to be deleted.')
etwFWRev = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: etwFWRev.setStatus('mandatory')
if mibBuilder.loadTexts: etwFWRev.setDescription('Denotes the revision of firmware in the module. The format of the string is xx.xx.xx, where the first pair indicates version, and the second pair indicates revision.')
etwHWRev = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etwHWRev.setStatus('mandatory')
if mibBuilder.loadTexts: etwHWRev.setDescription('Denotes the revision of hardware.')
etwEpimEnabled = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etwEpimEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: etwEpimEnabled.setDescription('This object gives the status of the EPIM port on the etwmim. A value of 1 indicates that the hardware has been set to use the Epim port for the ethernet connection. A value of 2 indicates that the hardware has been set to use the A channel for the ethernet connection')
etwEpimType = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 7), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etwEpimType.setStatus('mandatory')
if mibBuilder.loadTexts: etwEpimType.setDescription('Identifies the type of EPIM that is present in the EPIM slot. The value is allocated in the Ctron naming tree.')
etwEpimLink = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("link-established", 1), ("link-not-established", 2), ("link-unknown", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etwEpimLink.setStatus('mandatory')
if mibBuilder.loadTexts: etwEpimLink.setDescription('This object will indicate whether a link is established on the EPIM link. A value of 1 indicates that a link is established. A value of 2 indicates that a link is not established. A value of 3 indicates that the status of the link is unknown or not valid for the type of Epim installed.')
etwClearNvramOnBoot = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 1, 1, 4, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etwClearNvramOnBoot.setStatus('mandatory')
if mibBuilder.loadTexts: etwClearNvramOnBoot.setDescription('When set to a 1, the system will clear all persistant objects, except for the download objects, from nvram on the next system boot')
mibBuilder.exportSymbols("CTRON-ETWMIM-MIB", etwHWRev=etwHWRev, etwEpimType=etwEpimType, etwClearNvramOnBoot=etwClearNvramOnBoot, etwDbExist=etwDbExist, etwEpimLink=etwEpimLink, etwDbEnabled=etwDbEnabled, etwDbFracToggle=etwDbFracToggle, etwFWRev=etwFWRev, etwEpimEnabled=etwEpimEnabled)
| 149.818182 | 585 | 0.774272 |
11262d022fdfb857ea0f852b2a97a2eed8725368 | 971 | py | Python | BeyondNet.JsPlatform/js/algorithms/famous-algorithms/bellman-ford-algorithm/python/BellmanFord.py | beyondnetPeru/BeyondNet.Sample.Js | 41126537519bf423063633a204e03aa71ae89d4b | [
"Apache-2.0"
] | null | null | null | BeyondNet.JsPlatform/js/algorithms/famous-algorithms/bellman-ford-algorithm/python/BellmanFord.py | beyondnetPeru/BeyondNet.Sample.Js | 41126537519bf423063633a204e03aa71ae89d4b | [
"Apache-2.0"
] | null | null | null | BeyondNet.JsPlatform/js/algorithms/famous-algorithms/bellman-ford-algorithm/python/BellmanFord.py | beyondnetPeru/BeyondNet.Sample.Js | 41126537519bf423063633a204e03aa71ae89d4b | [
"Apache-2.0"
] | null | null | null | class Graph:
def __init__(self, v):
self.v = v
self.graph = []
def add_edge(self, s, d, w):
self.graph.append([s,d,w])
def bellman_ford(self, src):
dist = [float('inf')]* self.v
dist[src] = 0
# Relax v-1
for i in range(self.v - 1):
for u, v, c in self.graph:
if dist[u] != float('inf') and dist[u] + c < dist[v]:
dist[v] = dist[u] + c
# - ve cycles
for u, v, c in self.graph:
if dist[u] != float('inf') and dist[u] + c < dist[v]:
print('Graph contains -ve cycle')
print("vertex distance from the source :")
for i in range(self.v):
print(i, ' : ', dist[i])
g = Graph(6)
g.add_edge(0, 1, 8)
g.add_edge(0, 5, 5)
g.add_edge(0, 3, 3)
g.add_edge(1, 2, 6)
g.add_edge(2, 4, 4)
g.add_edge(3, 4, -1)
g.add_edge(5, 1, -4)
g.add_edge(5, 2, -1)
g.add_edge(5, 4, -3)
g.bellman_ford(0)
| 21.577778 | 69 | 0.486097 |
8be6a789311723596a303b493edb1344cc977ebb | 5,281 | py | Python | config.py | shoheietzel/proj10-meetme | 38efe0ba015678b87070b1ec5b742b49662d5304 | [
"Artistic-2.0"
] | null | null | null | config.py | shoheietzel/proj10-meetme | 38efe0ba015678b87070b1ec5b742b49662d5304 | [
"Artistic-2.0"
] | null | null | null | config.py | shoheietzel/proj10-meetme | 38efe0ba015678b87070b1ec5b742b49662d5304 | [
"Artistic-2.0"
] | null | null | null | """
Configure from
app.ini (if provided)
credentials.ini
command line (unless invoked with proxied=True)
in that order (i.e., in opposite order of precedence).
A configuration namespace module returned by this module is
suitable for configuring a Flask applicaton object.
configparser makes all configuration variables lower case;
Flask configuration object recognizes only upper case configuration
variables. To resolve this conflict, we convert all configuration
variables from .ini files to upper case.
Potential extensions:
- Really need to factor out the parts that are unique for a
project ... but that includes some documentation. Maybe
just the command line parsing part?
- Use environment variables? With what precedence relative
to configuration files? (NO, for now)
"""
import configparser
import argparse
import os
import logging
logging.basicConfig(format='%(levelname)s:%(message)s',
level=logging.INFO)
log = logging.getLogger(__name__)
HERE = os.path.dirname(__file__)
def command_line_args():
"""Returns namespace with settings from command line"""
log.debug("-> Command line args")
parser = argparse.ArgumentParser(description="Meeting Arranger")
parser.add_argument("-D", "--debug", dest="DEBUG",
action="store_const", const=True,
help="Turn on debugging and verbose logging")
parser.add_argument("-P", "--port", type=int, dest="PORT",
help="Port for Flask built-in server (only)")
parser.add_argument("-C", "--config", type=str,
help="Alternate configuration file")
cli_args = parser.parse_args()
log.debug("<- Command line args: {}".format(cli_args))
return cli_args
def fake_cli_args():
"""When we're running under a proxy like gunicorn, the command
line belongs to the proxy and not to us, so we ignore it. We
create a fake, empty cli_args instead, so that we have a namespace
with a compatible structure.
"""
log.debug("-> Fake cli args")
parser = argparse.ArgumentParser(description="This is a stub")
cli_args = parser.parse_args([])
log.debug("<- Command line args: {}".format(cli_args))
return cli_args
def config_file_args(config_file_paths, project=None):
"""Returns dict of values from the configuration files,
accessing them in the order they appear in config_file_paths.
If the project kwarg is provided, we will take configuration
values from that section of the configuration file if it exists,
otherwise from DEFAULT section.
"""
log.debug("-> config file args")
config = configparser.ConfigParser()
for path in config_file_paths:
relative = os.path.join(HERE, path)
if os.path.exists(path):
log.info("Configuring from {}".format(path))
config.read(path)
elif os.path.exists(relative):
log.info("Configuring from {}".format(relative))
config.read(relative)
else:
log.info("No configuration file {}; skipping".format(path))
section = project or "DEFAULT"
log.debug("Using configuration section {}".format(section))
args = config[section]
log.debug("<- config file args: {}".format(args))
return args
def imply_types(ns):
"""Convert values to implied types. We assume that strings of
digits should be integers, and True/False (with any casing) should
be boolean. """
for var in ns:
val = ns[var]
if type(val) != str:
continue
if val.lower() == "true":
ns[var] = True
elif val.lower() == "false":
ns[var] = False
elif val.isdecimal():
ns[var] = int(val)
def configuration(proxied=False):
"""
Returns namespace (that is, object) of configuration
values, giving precedence to command line arguments over
configuration file values.
When proxied = True, the command line is not read; all
configuration must come from the config.ini file. A proxy
like gunicorn may not use some some configuration values,
such as the PORT.
"""
log.debug("-> configuration")
if proxied:
cli = fake_cli_args()
else:
cli = command_line_args()
cli_vars = vars(cli) # Access the namespace as a dict
log.debug("CLI variables: {}".format(cli_vars))
config_file_paths = ["app.ini", "credentials.ini"]
if cli_vars.get("config"):
config_file_path.append(cli_vars.get("config"))
log.debug("Will read config files from '{}'".format(config_file_paths))
config_for_project = cli_vars.get("project", None)
ini = config_file_args(config_file_paths, config_for_project)
log.debug("Config file args: {}".format(ini))
# Fold into cli namespace with precedence for command line arguments
for var_lower in ini:
var_upper = var_lower.upper()
log.debug("Variable '{}'".format(var_upper))
if var_upper in cli_vars and cli_vars[var_upper]:
log.debug("Overridden by cli val '{}'".format(cli_vars[var_upper]))
else:
log.debug("Storing in cli")
cli_vars[var_upper] = ini[var_lower]
imply_types(cli_vars)
return cli
| 36.673611 | 79 | 0.664647 |
bc82cda115eee8fee56f6228226ff7f50645cc91 | 48,928 | py | Python | updateHostsFile.py | colinmkeith/hosts | d5d8733d7d2638770c25db7cc88daf1985568af6 | [
"MIT"
] | null | null | null | updateHostsFile.py | colinmkeith/hosts | d5d8733d7d2638770c25db7cc88daf1985568af6 | [
"MIT"
] | null | null | null | updateHostsFile.py | colinmkeith/hosts | d5d8733d7d2638770c25db7cc88daf1985568af6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Script by Ben Limmer
# https://github.com/l1m5
#
# This Python script will combine all the host files you provide
# as sources into one, unique host file to keep you internet browsing happy.
import argparse
import fnmatch
import json
import locale
import os
import platform
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import time
from glob import glob
import lxml # noqa: F401
from bs4 import BeautifulSoup
# Detecting Python 3 for version-dependent implementations
PY3 = sys.version_info >= (3, 0)
if PY3:
from urllib.request import urlopen
else:
raise Exception('We do not support Python 2 anymore.')
# Syntactic sugar for "sudo" command in UNIX / Linux
if platform.system() == "OpenBSD":
SUDO = ["/usr/bin/doas"]
else:
SUDO = ["/usr/bin/env", "sudo"]
# Project Settings
BASEDIR_PATH = os.path.dirname(os.path.realpath(__file__))
def get_defaults():
"""
Helper method for getting the default settings.
Returns
-------
default_settings : dict
A dictionary of the default settings when updating host information.
"""
return {
"numberofrules": 0,
"datapath": path_join_robust(BASEDIR_PATH, "data"),
"freshen": True,
"replace": False,
"backup": False,
"skipstatichosts": False,
"keepdomaincomments": True,
"extensionspath": path_join_robust(BASEDIR_PATH, "extensions"),
"extensions": [],
"compress": False,
"minimise": False,
"outputsubfolder": "",
"hostfilename": "hosts",
"targetip": "0.0.0.0",
"sourcedatafilename": "update.json",
"sourcesdata": [],
"readmefilename": "readme.md",
"readmetemplate": path_join_robust(BASEDIR_PATH, "readme_template.md"),
"readmedata": {},
"readmedatafilename": path_join_robust(BASEDIR_PATH, "readmeData.json"),
"exclusionpattern": r"([a-zA-Z\d-]+\.){0,}",
"exclusionregexs": [],
"exclusions": [],
"commonexclusions": ["hulu.com"],
"blacklistfile": path_join_robust(BASEDIR_PATH, "blacklist"),
"whitelistfile": path_join_robust(BASEDIR_PATH, "whitelist")}
# End Project Settings
def main():
parser = argparse.ArgumentParser(description="Creates a unified hosts "
"file from hosts stored in "
"data subfolders.")
parser.add_argument("--auto", "-a", dest="auto", default=False,
action="store_true", help="Run without prompting.")
parser.add_argument("--backup", "-b", dest="backup", default=False,
action="store_true", help="Backup the hosts "
"files before they "
"are overridden.")
parser.add_argument("--extensions", "-e", dest="extensions", default=[],
nargs="*", help="Host extensions to include "
"in the final hosts file.")
parser.add_argument("--ip", "-i", dest="targetip", default="0.0.0.0",
help="Target IP address. Default is 0.0.0.0.")
parser.add_argument("--keepdomaincomments", "-k",
dest="keepdomaincomments", action="store_false", default=True,
help="Do not keep domain line comments.")
parser.add_argument("--noupdate", "-n", dest="noupdate", default=False,
action="store_true", help="Don't update from "
"host data sources.")
parser.add_argument("--skipstatichosts", "-s", dest="skipstatichosts",
default=False, action="store_true",
help="Skip static localhost entries "
"in the final hosts file.")
parser.add_argument("--output", "-o", dest="outputsubfolder", default="",
help="Output subfolder for generated hosts file.")
parser.add_argument("--replace", "-r", dest="replace", default=False,
action="store_true", help="Replace your active "
"hosts file with this "
"new hosts file.")
parser.add_argument("--flush-dns-cache", "-f", dest="flushdnscache",
default=False, action="store_true",
help="Attempt to flush DNS cache "
"after replacing the hosts file.")
parser.add_argument("--compress", "-c", dest="compress",
default=False, action="store_true",
help="Compress the hosts file "
"ignoring non-necessary lines "
"(empty lines and comments) and "
"putting multiple domains in "
"each line. Improve the "
"performances under Windows.")
parser.add_argument("--minimise", "-m", dest="minimise",
default=False, action="store_true",
help="Minimise the hosts file "
"ignoring non-necessary lines "
"(empty lines and comments).")
global settings
options = vars(parser.parse_args())
options["outputpath"] = path_join_robust(BASEDIR_PATH, options["outputsubfolder"])
options["freshen"] = not options["noupdate"]
settings = get_defaults()
settings.update(options)
data_path = settings["datapath"]
extensions_path = settings["extensionspath"]
settings["sources"] = list_dir_no_hidden(data_path)
settings["extensionsources"] = list_dir_no_hidden(extensions_path)
# All our extensions folders...
settings["extensions"] = [os.path.basename(item) for item in list_dir_no_hidden(extensions_path)]
# ... intersected with the extensions passed-in as arguments, then sorted.
settings["extensions"] = sorted(list(
set(options["extensions"]).intersection(settings["extensions"])))
auto = settings["auto"]
exclusion_regexes = settings["exclusionregexs"]
source_data_filename = settings["sourcedatafilename"]
update_sources = prompt_for_update(freshen=settings["freshen"],
update_auto=auto)
if update_sources:
update_all_sources(source_data_filename, settings["hostfilename"])
gather_exclusions = prompt_for_exclusions(skip_prompt=auto)
if gather_exclusions:
common_exclusions = settings["commonexclusions"]
exclusion_pattern = settings["exclusionpattern"]
exclusion_regexes = display_exclusion_options(
common_exclusions=common_exclusions,
exclusion_pattern=exclusion_pattern,
exclusion_regexes=exclusion_regexes)
extensions = settings["extensions"]
sources_data = update_sources_data(settings["sourcesdata"],
datapath=data_path,
extensions=extensions,
extensionspath=extensions_path,
sourcedatafilename=source_data_filename)
merge_file = create_initial_file()
remove_old_hosts_file(settings["backup"])
if settings["compress"]:
final_file = open(path_join_robust(settings["outputpath"], "hosts"), "w+b")
compressed_file = tempfile.NamedTemporaryFile()
remove_dups_and_excl(merge_file, exclusion_regexes, compressed_file)
compress_file(compressed_file, settings["targetip"], final_file)
elif settings["minimise"]:
final_file = open(path_join_robust(settings["outputpath"], "hosts"), "w+b")
minimised_file = tempfile.NamedTemporaryFile()
remove_dups_and_excl(merge_file, exclusion_regexes, minimised_file)
minimise_file(minimised_file, settings["targetip"], final_file)
else:
final_file = remove_dups_and_excl(merge_file, exclusion_regexes)
number_of_rules = settings["numberofrules"]
output_subfolder = settings["outputsubfolder"]
skip_static_hosts = settings["skipstatichosts"]
write_opening_header(final_file, extensions=extensions,
numberofrules=number_of_rules,
outputsubfolder=output_subfolder,
skipstatichosts=skip_static_hosts)
final_file.close()
update_readme_data(settings["readmedatafilename"],
extensions=extensions,
numberofrules=number_of_rules,
outputsubfolder=output_subfolder,
sourcesdata=sources_data)
print_success("Success! The hosts file has been saved in folder " +
output_subfolder + "\nIt contains " +
"{:,}".format(number_of_rules) +
" unique entries.")
move_file = prompt_for_move(final_file, auto=auto,
replace=settings["replace"],
skipstatichosts=skip_static_hosts)
# We only flush the DNS cache if we have
# moved a new hosts file into place.
if move_file:
prompt_for_flush_dns_cache(flush_cache=settings["flushdnscache"],
prompt_flush=not auto)
# Prompt the User
def prompt_for_update(freshen, update_auto):
"""
Prompt the user to update all hosts files.
If requested, the function will update all data sources after it
checks that a hosts file does indeed exist.
Parameters
----------
freshen : bool
Whether data sources should be updated. This function will return
if it is requested that data sources not be updated.
update_auto : bool
Whether or not to automatically update all data sources.
Returns
-------
update_sources : bool
Whether or not we should update data sources for exclusion files.
"""
# Create a hosts file if it doesn't exist.
hosts_file = path_join_robust(BASEDIR_PATH, "hosts")
if not os.path.isfile(hosts_file):
try:
open(hosts_file, "w+").close()
except (IOError, OSError):
# Starting in Python 3.3, IOError is aliased
# OSError. However, we have to catch both for
# Python 2.x failures.
print_failure("ERROR: No 'hosts' file in the folder. Try creating one manually.")
if not freshen:
return
prompt = "Do you want to update all data sources?"
if update_auto or query_yes_no(prompt):
return True
elif not update_auto:
print("OK, we'll stick with what we've got locally.")
return False
def prompt_for_exclusions(skip_prompt):
"""
Prompt the user to exclude any custom domains from being blocked.
Parameters
----------
skip_prompt : bool
Whether or not to skip prompting for custom domains to be excluded.
If true, the function returns immediately.
Returns
-------
gather_exclusions : bool
Whether or not we should proceed to prompt the user to exclude any
custom domains beyond those in the whitelist.
"""
prompt = ("Do you want to exclude any domains?\n"
"For example, hulu.com video streaming must be able to access "
"its tracking and ad servers in order to play video.")
if not skip_prompt:
if query_yes_no(prompt):
return True
else:
print("OK, we'll only exclude domains in the whitelist.")
return False
def prompt_for_flush_dns_cache(flush_cache, prompt_flush):
"""
Prompt the user to flush the DNS cache.
Parameters
----------
flush_cache : bool
Whether to flush the DNS cache without prompting.
prompt_flush : bool
If `flush_cache` is False, whether we should prompt for flushing the
cache. Otherwise, the function returns immediately.
"""
if flush_cache:
flush_dns_cache()
elif prompt_flush:
if query_yes_no("Attempt to flush the DNS cache?"):
flush_dns_cache()
def prompt_for_move(final_file, **move_params):
"""
Prompt the user to move the newly created hosts file to its designated
location in the OS.
Parameters
----------
final_file : file
The file object that contains the newly created hosts data.
move_params : kwargs
Dictionary providing additional parameters for moving the hosts file
into place. Currently, those fields are:
1) auto
2) replace
3) skipstatichosts
Returns
-------
move_file : bool
Whether or not the final hosts file was moved.
"""
skip_static_hosts = move_params["skipstatichosts"]
if move_params["replace"] and not skip_static_hosts:
move_file = True
elif move_params["auto"] or skip_static_hosts:
move_file = False
else:
prompt = "Do you want to replace your existing hosts file with the newly generated file?"
move_file = query_yes_no(prompt)
if move_file:
move_hosts_file_into_place(final_file)
return move_file
# End Prompt the User
# Exclusion logic
def display_exclusion_options(common_exclusions, exclusion_pattern, exclusion_regexes):
"""
Display the exclusion options to the user.
This function checks whether a user wants to exclude particular domains,
and if so, excludes them.
Parameters
----------
common_exclusions : list
A list of common domains that are excluded from being blocked. One
example is Hulu. This setting is set directly in the script and cannot
be overwritten by the user.
exclusion_pattern : str
The exclusion pattern with which to create the domain regex.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
Returns
-------
aug_exclusion_regexes : list
The original list of regex patterns potentially with additional
patterns from domains that user chooses to exclude.
"""
for exclusion_option in common_exclusions:
prompt = "Do you want to exclude the domain " + exclusion_option + " ?"
if query_yes_no(prompt):
exclusion_regexes = exclude_domain(exclusion_option,
exclusion_pattern,
exclusion_regexes)
else:
continue
if query_yes_no("Do you want to exclude any other domains?"):
exclusion_regexes = gather_custom_exclusions(exclusion_pattern,
exclusion_regexes)
return exclusion_regexes
def gather_custom_exclusions(exclusion_pattern, exclusion_regexes):
"""
Gather custom exclusions from the user.
Parameters
----------
exclusion_pattern : str
The exclusion pattern with which to create the domain regex.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
Returns
-------
aug_exclusion_regexes : list
The original list of regex patterns potentially with additional
patterns from domains that user chooses to exclude.
"""
# We continue running this while-loop until the user
# says that they have no more domains to exclude.
while True:
domain_prompt = "Enter the domain you want to exclude (e.g. facebook.com): "
user_domain = input(domain_prompt)
if is_valid_domain_format(user_domain):
exclusion_regexes = exclude_domain(user_domain, exclusion_pattern, exclusion_regexes)
continue_prompt = "Do you have more domains you want to enter?"
if not query_yes_no(continue_prompt):
break
return exclusion_regexes
def exclude_domain(domain, exclusion_pattern, exclusion_regexes):
"""
Exclude a domain from being blocked.
This create the domain regex by which to exclude this domain and appends
it a list of already-existing exclusion regexes.
Parameters
----------
domain : str
The filename or regex pattern to exclude.
exclusion_pattern : str
The exclusion pattern with which to create the domain regex.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
Returns
-------
aug_exclusion_regexes : list
The original list of regex patterns with one additional pattern from
the `domain` input.
"""
exclusion_regex = re.compile(exclusion_pattern + domain)
exclusion_regexes.append(exclusion_regex)
return exclusion_regexes
def matches_exclusions(stripped_rule, exclusion_regexes):
"""
Check whether a rule matches an exclusion rule we already provided.
If this function returns True, that means this rule should be excluded
from the final hosts file.
Parameters
----------
stripped_rule : str
The rule that we are checking.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
Returns
-------
matches_exclusion : bool
Whether or not the rule string matches a provided exclusion.
"""
stripped_domain = stripped_rule.split()[1]
for exclusionRegex in exclusion_regexes:
if exclusionRegex.search(stripped_domain):
return True
return False
# End Exclusion Logic
# Update Logic
def update_sources_data(sources_data, **sources_params):
"""
Update the sources data and information for each source.
Parameters
----------
sources_data : list
The list of sources data that we are to update.
sources_params : kwargs
Dictionary providing additional parameters for updating the
sources data. Currently, those fields are:
1) datapath
2) extensions
3) extensionspath
4) sourcedatafilename
Returns
-------
update_sources_data : list
The original source data list with new source data appended.
"""
source_data_filename = sources_params["sourcedatafilename"]
for source in recursive_glob(sources_params["datapath"], source_data_filename):
update_file = open(source, "r")
update_data = json.load(update_file)
sources_data.append(update_data)
update_file.close()
for source in sources_params["extensions"]:
source_dir = path_join_robust(
sources_params["extensionspath"], source)
for update_file_path in recursive_glob(source_dir, source_data_filename):
update_file = open(update_file_path, "r")
update_data = json.load(update_file)
sources_data.append(update_data)
update_file.close()
return sources_data
def jsonarray(json_array_string):
"""
Transformer, converts a json array string hosts into one host per
line, prefixing each line with "127.0.0.1 ".
Parameters
----------
json_array_string : str
The json array string in the form
'["example1.com", "example1.com", ...]'
"""
temp_list = json.loads(json_array_string)
hostlines = "127.0.0.1 " + "\n127.0.0.1 ".join(temp_list)
return hostlines
def update_all_sources(source_data_filename, host_filename):
"""
Update all host files, regardless of folder depth.
Parameters
----------
source_data_filename : str
The name of the filename where information regarding updating
sources for a particular URL is stored. This filename is assumed
to be the same for all sources.
host_filename : str
The name of the file in which the updated source information
in stored for a particular URL. This filename is assumed to be
the same for all sources.
"""
# The transforms we support
transform_methods = {
'jsonarray': jsonarray
}
all_sources = recursive_glob("*", source_data_filename)
for source in all_sources:
update_file = open(source, "r")
update_data = json.load(update_file)
update_file.close()
update_url = update_data["url"]
update_transforms = []
if update_data.get("transforms"):
update_transforms = update_data["transforms"]
print("Updating source " + os.path.dirname(source) + " from " + update_url)
try:
updated_file = get_file_by_url(update_url)
# spin the transforms as required
for transform in update_transforms:
updated_file = transform_methods[transform](updated_file)
# get rid of carriage-return symbols
updated_file = updated_file.replace("\r", "")
hosts_file = open(path_join_robust(BASEDIR_PATH,
os.path.dirname(source),
host_filename), "wb")
write_data(hosts_file, updated_file)
hosts_file.close()
except Exception:
print("Error in updating source: ", update_url)
# End Update Logic
# File Logic
def create_initial_file():
"""
Initialize the file in which we merge all host files for later pruning.
"""
merge_file = tempfile.NamedTemporaryFile()
# spin the sources for the base file
for source in recursive_glob(settings["datapath"],
settings["hostfilename"]):
start = "# Start {}\n\n".format(os.path.basename(os.path.dirname(source)))
end = "# End {}\n\n".format(os.path.basename(os.path.dirname(source)))
with open(source, "r") as curFile:
write_data(merge_file, start + curFile.read() + end)
# spin the sources for extensions to the base file
for source in settings["extensions"]:
for filename in recursive_glob(path_join_robust(
settings["extensionspath"], source), settings["hostfilename"]):
with open(filename, "r") as curFile:
write_data(merge_file, curFile.read())
maybe_copy_example_file(settings["blacklistfile"])
if os.path.isfile(settings["blacklistfile"]):
with open(settings["blacklistfile"], "r") as curFile:
write_data(merge_file, curFile.read())
return merge_file
def compress_file(input_file, target_ip, output_file):
"""
Reduce the file dimension removing non-necessary lines (empty lines and
comments) and putting multiple domains in each line.
Reducing the number of lines of the file, the parsing under Microsoft
Windows is much faster.
Parameters
----------
input_file : file
The file object that contains the hostnames that we are reducing.
target_ip : str
The target IP address.
output_file : file
The file object that will contain the reduced hostnames.
"""
input_file.seek(0) # reset file pointer
write_data(output_file, '\n')
target_ip_len = len(target_ip)
lines = [target_ip]
lines_index = 0
for line in input_file.readlines():
line = line.decode("UTF-8")
if line.startswith(target_ip):
if lines[lines_index].count(' ') < 9:
lines[lines_index] += ' ' \
+ line[target_ip_len:line.find('#')].strip()
else:
lines[lines_index] += '\n'
lines.append(line[:line.find('#')].strip())
lines_index += 1
for line in lines:
write_data(output_file, line)
input_file.close()
def minimise_file(input_file, target_ip, output_file):
"""
Reduce the file dimension removing non-necessary lines (empty lines and
comments).
Parameters
----------
input_file : file
The file object that contains the hostnames that we are reducing.
target_ip : str
The target IP address.
output_file : file
The file object that will contain the reduced hostnames.
"""
input_file.seek(0) # reset file pointer
write_data(output_file, '\n')
lines = []
for line in input_file.readlines():
line = line.decode("UTF-8")
if line.startswith(target_ip):
lines.append(line[:line.find('#')].strip() + '\n')
for line in lines:
write_data(output_file, line)
input_file.close()
def remove_dups_and_excl(merge_file, exclusion_regexes, output_file=None):
"""
Remove duplicates and remove hosts that we are excluding.
We check for duplicate hostnames as well as remove any hostnames that
have been explicitly excluded by the user.
Parameters
----------
merge_file : file
The file object that contains the hostnames that we are pruning.
exclusion_regexes : list
The list of regex patterns used to exclude domains.
output_file : file
The file object in which the result is written. If None, the file
'settings["outputpath"]' will be created.
"""
number_of_rules = settings["numberofrules"]
maybe_copy_example_file(settings["whitelistfile"])
if os.path.isfile(settings["whitelistfile"]):
with open(settings["whitelistfile"], "r") as ins:
for line in ins:
line = line.strip(" \t\n\r")
if line and not line.startswith("#"):
settings["exclusions"].append(line)
if not os.path.exists(settings["outputpath"]):
os.makedirs(settings["outputpath"])
if output_file is None:
final_file = open(path_join_robust(settings["outputpath"], "hosts"), "w+b")
else:
final_file = output_file
merge_file.seek(0) # reset file pointer
hostnames = {"localhost", "localhost.localdomain", "local", "broadcasthost"}
exclusions = settings["exclusions"]
for line in merge_file.readlines():
write_line = True
# Explicit encoding
line = line.decode("UTF-8")
# replace tabs with space
line = line.replace("\t+", " ")
# see gh-271: trim trailing whitespace, periods
line = line.rstrip(' .')
# Testing the first character doesn't require startswith
if line[0] == "#" or re.match(r'^\s*$', line[0]):
write_data(final_file, line)
continue
if "::1" in line:
continue
stripped_rule = strip_rule(line) # strip comments
if not stripped_rule or matches_exclusions(stripped_rule,
exclusion_regexes):
continue
# Normalize rule
hostname, normalized_rule = normalize_rule(
stripped_rule, target_ip=settings["targetip"],
keep_domain_comments=settings["keepdomaincomments"])
for exclude in exclusions:
if re.search(r'[\s\.]' + re.escape(exclude) + r'\s', line):
write_line = False
break
if normalized_rule and (hostname not in hostnames) and write_line:
write_data(final_file, normalized_rule)
hostnames.add(hostname)
number_of_rules += 1
settings["numberofrules"] = number_of_rules
merge_file.close()
if output_file is None:
return final_file
def normalize_rule(rule, target_ip, keep_domain_comments):
"""
Standardize and format the rule string provided.
Parameters
----------
rule : str
The rule whose spelling and spacing we are standardizing.
target_ip : str
The target IP address for the rule.
keep_domain_comments : bool
Whether or not to keep comments regarding these domains in
the normalized rule.
Returns
-------
normalized_rule : tuple
A tuple of the hostname and the rule string with spelling
and spacing reformatted.
"""
"""
first try: IP followed by domain
"""
regex = r'^\s*(\d{1,3}\.){3}\d{1,3}\s+([\w\.-]+[a-zA-Z])(.*)'
result = re.search(regex, rule)
if result:
hostname, suffix = result.group(2, 3)
# Explicitly lowercase and trim the hostname.
hostname = hostname.lower().strip()
rule = "%s %s" % (target_ip, hostname)
if suffix and keep_domain_comments:
if not suffix.strip().startswith('#'):
rule += " #%s" % suffix
else:
rule += " %s" % suffix
return hostname, rule + "\n"
"""
next try: IP address followed by host IP address
"""
regex = r'^\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*(.*)'
result = re.search(regex, rule)
if result:
ip_host, suffix = result.group(2, 3)
# Explicitly trim the ip host.
ip_host = ip_host.strip()
rule = "%s %s" % (target_ip, ip_host)
if suffix and keep_domain_comments:
if not suffix.strip().startswith('#'):
rule += " #%s" % suffix
else:
rule += " %s" % suffix
return ip_host, rule + "\n"
"""
finally, if we get here, just belch to screen
"""
print("==>%s<==" % rule)
return None, None
def strip_rule(line):
"""
Sanitize a rule string provided before writing it to the output hosts file.
Parameters
----------
line : str
The rule provided for sanitation.
Returns
-------
sanitized_line : str
The sanitized rule.
"""
split_line = line.split()
if len(split_line) < 2:
# just return blank
return ""
else:
return " ".join(split_line)
def write_opening_header(final_file, **header_params):
"""
Write the header information into the newly-created hosts file.
Parameters
----------
final_file : file
The file object that points to the newly-created hosts file.
header_params : kwargs
Dictionary providing additional parameters for populating the header
information. Currently, those fields are:
1) extensions
2) numberofrules
3) outputsubfolder
4) skipstatichosts
"""
final_file.seek(0) # Reset file pointer.
file_contents = final_file.read() # Save content.
final_file.seek(0) # Write at the top.
write_data(final_file, "# This hosts file is a merged collection "
"of hosts from reputable sources,\n")
write_data(final_file, "# with a dash of crowd sourcing via Github\n#\n")
write_data(final_file, "# Date: " + time.strftime("%d %B %Y %H:%M:%S (%Z)", time.gmtime()) + "\n")
if header_params["extensions"]:
write_data(final_file, "# Extensions added to this file: " + ", ".join(
header_params["extensions"]) + "\n")
write_data(final_file, ("# Number of unique domains: {:,}\n#\n".format(header_params["numberofrules"])))
write_data(final_file, "# Fetch the latest version of this file: "
"https://raw.githubusercontent.com/StevenBlack/hosts/master/" +
path_join_robust(header_params["outputsubfolder"], "") + "hosts\n")
write_data(final_file, "# Project home page: https://github.com/StevenBlack/hosts\n")
write_data(final_file, "# Project releases: https://github.com/StevenBlack/hosts/releases\n#\n")
write_data(final_file, "# ===============================================================\n")
write_data(final_file, "\n")
if not header_params["skipstatichosts"]:
write_data(final_file, "127.0.0.1 localhost\n")
write_data(final_file, "127.0.0.1 localhost.localdomain\n")
write_data(final_file, "127.0.0.1 local\n")
write_data(final_file, "255.255.255.255 broadcasthost\n")
write_data(final_file, "::1 localhost\n")
write_data(final_file, "::1 ip6-localhost\n")
write_data(final_file, "::1 ip6-loopback\n")
write_data(final_file, "fe80::1%lo0 localhost\n")
write_data(final_file, "ff00::0 ip6-localnet\n")
write_data(final_file, "ff00::0 ip6-mcastprefix\n")
write_data(final_file, "ff02::1 ip6-allnodes\n")
write_data(final_file, "ff02::2 ip6-allrouters\n")
write_data(final_file, "ff02::3 ip6-allhosts\n")
write_data(final_file, "0.0.0.0 0.0.0.0\n")
if platform.system() == "Linux":
write_data(final_file, "127.0.1.1 " + socket.gethostname() + "\n")
write_data(final_file, "127.0.0.53 " + socket.gethostname() + "\n")
write_data(final_file, "\n")
preamble = path_join_robust(BASEDIR_PATH, "myhosts")
maybe_copy_example_file(preamble)
if os.path.isfile(preamble):
with open(preamble, "r") as f:
write_data(final_file, f.read())
final_file.write(file_contents)
def update_readme_data(readme_file, **readme_updates):
"""
Update the host and website information provided in the README JSON data.
Parameters
----------
readme_file : str
The name of the README file to update.
readme_updates : kwargs
Dictionary providing additional JSON fields to update before
saving the data. Currently, those fields are:
1) extensions
2) sourcesdata
3) numberofrules
4) outputsubfolder
"""
extensions_key = "base"
extensions = readme_updates["extensions"]
if extensions:
extensions_key = "-".join(extensions)
output_folder = readme_updates["outputsubfolder"]
generation_data = {"location": path_join_robust(output_folder, ""),
"entries": readme_updates["numberofrules"],
"sourcesdata": readme_updates["sourcesdata"]}
with open(readme_file, "r") as f:
readme_data = json.load(f)
readme_data[extensions_key] = generation_data
with open(readme_file, "w") as f:
json.dump(readme_data, f)
def move_hosts_file_into_place(final_file):
"""
Move the newly-created hosts file into its correct location on the OS.
For UNIX systems, the hosts file is "etc/hosts." On Windows, it's
"C:\Windows\System32\drivers\etc\hosts."
For this move to work, you must have administrator privileges to do this.
On UNIX systems, this means having "sudo" access, and on Windows, it
means being able to run command prompt in administrator mode.
Parameters
----------
final_file : file object
The newly-created hosts file to move.
""" # noqa: W605
filename = os.path.abspath(final_file.name)
if os.name == "posix":
print("Moving the file requires administrative privileges. You might need to enter your password.")
if subprocess.call(SUDO + ["cp", filename, "/etc/hosts"]):
print_failure("Moving the file failed.")
elif os.name == "nt":
print("Automatically moving the hosts file in place is not yet supported.")
print("Please move the generated file to %SystemRoot%\system32\drivers\etc\hosts") # noqa: W605
def flush_dns_cache():
"""
Flush the DNS cache.
"""
print("Flushing the DNS cache to utilize new hosts file...")
print("Flushing the DNS cache requires administrative privileges. You might need to enter your password.")
dns_cache_found = False
if platform.system() == "Darwin":
if subprocess.call(SUDO + ["killall", "-HUP", "mDNSResponder"]):
print_failure("Flushing the DNS cache failed.")
elif os.name == "nt":
print("Automatically flushing the DNS cache is not yet supported.")
print("Please copy and paste the command 'ipconfig /flushdns' in "
"administrator command prompt after running this script.")
else:
nscd_prefixes = ["/etc", "/etc/rc.d"]
nscd_msg = "Flushing the DNS cache by restarting nscd {result}"
for nscd_prefix in nscd_prefixes:
nscd_cache = nscd_prefix + "/init.d/nscd"
if os.path.isfile(nscd_cache):
dns_cache_found = True
if subprocess.call(SUDO + [nscd_cache, "restart"]):
print_failure(nscd_msg.format(result="failed"))
else:
print_success(nscd_msg.format(result="succeeded"))
centos_file = "/etc/init.d/network"
centos_msg = "Flushing the DNS cache by restarting network {result}"
if os.path.isfile(centos_file):
if subprocess.call(SUDO + [centos_file, "restart"]):
print_failure(centos_msg.format(result="failed"))
else:
print_success(centos_msg.format(result="succeeded"))
system_prefixes = ["/usr", ""]
service_types = ["NetworkManager", "wicd", "dnsmasq", "networking"]
for system_prefix in system_prefixes:
systemctl = system_prefix + "/bin/systemctl"
system_dir = system_prefix + "/lib/systemd/system"
for service_type in service_types:
service = service_type + ".service"
service_file = path_join_robust(system_dir, service)
service_msg = ("Flushing the DNS cache by restarting " + service + " {result}")
if os.path.isfile(service_file):
dns_cache_found = True
if subprocess.call(SUDO + [systemctl, "restart", service]):
print_failure(service_msg.format(result="failed"))
else:
print_success(service_msg.format(result="succeeded"))
dns_clean_file = "/etc/init.d/dns-clean"
dns_clean_msg = "Flushing the DNS cache via dns-clean executable {result}"
if os.path.isfile(dns_clean_file):
dns_cache_found = True
if subprocess.call(SUDO + [dns_clean_file, "start"]):
print_failure(dns_clean_msg.format(result="failed"))
else:
print_success(dns_clean_msg.format(result="succeeded"))
if not dns_cache_found:
print_failure("Unable to determine DNS management tool.")
def remove_old_hosts_file(backup):
"""
Remove the old hosts file.
This is a hotfix because merging with an already existing hosts file leads
to artifacts and duplicates.
Parameters
----------
backup : boolean, default False
Whether or not to backup the existing hosts file.
"""
old_file_path = path_join_robust(BASEDIR_PATH, "hosts")
# Create if already removed, so remove won't raise an error.
open(old_file_path, "a").close()
if backup:
backup_file_path = path_join_robust(BASEDIR_PATH, "hosts-{}".format(
time.strftime("%Y-%m-%d-%H-%M-%S")))
# Make a backup copy, marking the date in which the list was updated
shutil.copy(old_file_path, backup_file_path)
os.remove(old_file_path)
# Create new empty hosts file
open(old_file_path, "a").close()
# End File Logic
def domain_to_idna(line):
"""
Encode a domain which is presente into a line into `idna`. This way we
avoid the most encoding issue.
Parameters
----------
line : str
The line we have to encode/decode.
Returns
-------
line : str
The line in a converted format.
Notes
-----
- This function encode only the domain to `idna` format because in
most cases, the encoding issue is due to a domain which looks like
`b'\xc9\xa2oogle.com'.decode('idna')`.
- About the splitting:
We split because we only want to encode the domain and not the full
line, which may cause some issues. Keep in mind that we split, but we
still concatenate once we encoded the domain.
- The following split the prefix `0.0.0.0` or `127.0.0.1` of a line.
- The following also split the trailing comment of a given line.
"""
if not line.startswith('#'):
tabs = '\t'
space = ' '
tabs_position, space_position = (line.find(tabs), line.find(space))
if tabs_position > -1 and space_position > -1:
if space_position < tabs_position:
separator = space
else:
separator = tabs
elif not tabs_position == -1:
separator = tabs
elif not space_position == -1:
separator = space
else:
separator = ''
if separator:
splited_line = line.split(separator)
index = 1
while index < len(splited_line):
if splited_line[index]:
break
index += 1
if '#' in splited_line[index]:
index_comment = splited_line[index].find('#')
if index_comment > -1:
comment = splited_line[index][index_comment:]
splited_line[index] = splited_line[index] \
.split(comment)[0] \
.encode("IDNA").decode("UTF-8") + \
comment
splited_line[index] = splited_line[index] \
.encode("IDNA") \
.decode("UTF-8")
return separator.join(splited_line)
return line.encode("IDNA").decode("UTF-8")
return line.encode("UTF-8").decode("UTF-8")
# Helper Functions
def maybe_copy_example_file(file_path):
"""
Given a file path, copy over its ".example" if the path doesn't exist.
If the path does exist, nothing happens in this function.
If the path doesn't exist, and the ".example" file doesn't exist, nothing happens in this function.
Parameters
----------
file_path : str
The full file path to check.
"""
if not os.path.isfile(file_path):
example_file_path = file_path + ".example"
if os.path.isfile(example_file_path):
shutil.copyfile(example_file_path, file_path)
def get_file_by_url(url):
"""
Get a file data located at a particular URL.
Parameters
----------
url : str
The URL at which to access the data.
Returns
-------
url_data : str or None
The data retrieved at that URL from the file. Returns None if the
attempted retrieval is unsuccessful.
Note
----
- BeautifulSoup is used in this case to avoid having to search in which
format we have to encode or decode data before parsing it to UTF-8.
"""
try:
f = urlopen(url)
soup = BeautifulSoup(f.read(), 'lxml').get_text()
return '\n'.join(list(map(domain_to_idna, soup.split('\n'))))
except Exception:
print("Problem getting file: ", url)
def write_data(f, data):
"""
Write data to a file object.
Parameters
----------
f : file
The file object at which to write the data.
data : str
The data to write to the file.
"""
f.write(bytes(data, "UTF-8"))
def list_dir_no_hidden(path):
"""
List all files in a directory, except for hidden files.
Parameters
----------
path : str
The path of the directory whose files we wish to list.
"""
return glob(os.path.join(path, "*"))
def query_yes_no(question, default="yes"):
"""
Ask a yes/no question via input() and get answer from the user.
Inspired by the following implementation:
http://code.activestate.com/recipes/577058
Parameters
----------
question : str
The question presented to the user.
default : str, default "yes"
The presumed answer if the user just hits <Enter>. It must be "yes",
"no", or None (means an answer is required of the user).
Returns
-------
yes : Whether or not the user replied yes to the question.
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
prompt = {None: " [y/n] ",
"yes": " [Y/n] ",
"no": " [y/N] "}.get(default, None)
if not prompt:
raise ValueError("invalid default answer: '%s'" % default)
reply = None
while not reply:
sys.stdout.write(colorize(question, Colors.PROMPT) + prompt)
choice = input().lower()
reply = None
if default and not choice:
reply = default
elif choice in valid:
reply = valid[choice]
else:
print_failure("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
return reply == "yes"
def is_valid_domain_format(domain):
"""
Check whether a provided domain is valid.
Parameters
----------
domain : str
The domain against which to check.
Returns
-------
valid_domain : bool
Whether or not the domain provided is valid.
"""
if domain == "":
print("You didn't enter a domain. Try again.")
return False
domain_regex = re.compile(r"www\d{0,3}[.]|https?")
if domain_regex.match(domain):
print("The domain " + domain + " is not valid. Do not include "
"www.domain.com or http(s)://domain.com. Try again.")
return False
else:
return True
def recursive_glob(stem, file_pattern):
"""
Recursively match files in a directory according to a pattern.
Parameters
----------
stem : str
The directory in which to recurse
file_pattern : str
The filename regex pattern to which to match.
Returns
-------
matches_list : list
A list of filenames in the directory that match the file pattern.
"""
if sys.version_info >= (3, 5):
return glob(stem + "/**/" + file_pattern, recursive=True)
else:
# gh-316: this will avoid invalid unicode comparisons in Python 2.x
if stem == str("*"):
stem = "."
matches = []
for root, dirnames, filenames in os.walk(stem):
for filename in fnmatch.filter(filenames, file_pattern):
matches.append(path_join_robust(root, filename))
return matches
def path_join_robust(path, *paths):
"""
Wrapper around `os.path.join` with handling for locale issues.
Parameters
----------
path : str
The first path to join.
paths : varargs
Subsequent path strings to join.
Returns
-------
joined_path : str
The joined path string of the two path inputs.
Raises
------
locale.Error : A locale issue was detected that prevents path joining.
"""
try:
# gh-316: joining unicode and str can be saddening in Python 2.x
path = str(path)
paths = [str(another_path) for another_path in paths]
return os.path.join(path, *paths)
except UnicodeDecodeError as e:
raise locale.Error("Unable to construct path. This is likely a LOCALE issue:\n\n" + str(e))
# Colors
class Colors(object):
PROMPT = "\033[94m"
SUCCESS = "\033[92m"
FAIL = "\033[91m"
ENDC = "\033[0m"
def supports_color():
"""
Check whether the running terminal or command prompt supports color.
Inspired by StackOverflow link (and Django implementation) here:
https://stackoverflow.com/questions/7445658
Returns
-------
colors_supported : bool
Whether the running terminal or command prompt supports color.
"""
sys_platform = sys.platform
supported = sys_platform != "Pocket PC" and (sys_platform != "win32" or "ANSICON" in os.environ)
atty_connected = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
return supported and atty_connected
def colorize(text, color):
"""
Wrap a string so that it displays in a particular color.
This function adds a prefix and suffix to a text string so that it is
displayed as a particular color, either in command prompt or the terminal.
If the running terminal or command prompt does not support color, the
original text is returned without being wrapped.
Parameters
----------
text : str
The message to display.
color : str
The color string prefix to put before the text.
Returns
-------
wrapped_str : str
The wrapped string to display in color, if possible.
"""
if not supports_color():
return text
return color + text + Colors.ENDC
def print_success(text):
"""
Print a success message.
Parameters
----------
text : str
The message to display.
"""
print(colorize(text, Colors.SUCCESS))
def print_failure(text):
"""
Print a failure message.
Parameters
----------
text : str
The message to display.
"""
print(colorize(text, Colors.FAIL))
# End Helper Functions
if __name__ == "__main__":
main()
| 32.06291 | 110 | 0.612083 |
1620de2fd3e4bc1e9c0c3fe69d74ea7857e0b9b5 | 1,757 | py | Python | scripts/column_comparison.py | alniniclas/junit-to-jmh-experiment | 7351ce7aa172da6d1bb047b46667b027044593a4 | [
"Apache-2.0"
] | null | null | null | scripts/column_comparison.py | alniniclas/junit-to-jmh-experiment | 7351ce7aa172da6d1bb047b46667b027044593a4 | [
"Apache-2.0"
] | null | null | null | scripts/column_comparison.py | alniniclas/junit-to-jmh-experiment | 7351ce7aa172da6d1bb047b46667b027044593a4 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import csv
def read_input(input_file):
with open(input_file, 'r') as f:
reader = csv.DictReader(f, dialect=csv.unix_dialect)
return [row for row in reader]
def get_output_key(row):
test_class = row['class']
test_name = row['test']
if 'repetition' not in row:
return test_class, test_name
else:
return test_class, test_name, row['repetition']
def write_output(output_file, column_names, rows):
with open(output_file, 'w') as f:
csv_writer = csv.DictWriter(f, column_names, dialect=csv.unix_dialect)
csv_writer.writeheader()
csv_writer.writerows(rows)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_file', type=str)
parser.add_argument('output_file', type=str)
parser.add_argument('column', type=str, choices={
'measurements', 'mean', 'variance', 'standard_deviation', 'standard_error', 'cv', 'cv_est'
})
args = parser.parse_args()
output_dict = {}
output_rows = []
has_repetitions = False
config_names = []
for row in read_input(args.input_file):
key = get_output_key(row)
if key not in output_dict:
output_dict[key] = {
'class': row['class'],
'test': row['test']
}
if 'repetition' in row:
has_repetitions = True
output_dict[key]['repetition'] = row['repetition']
output_rows.append(output_dict[key])
if row['config_name'] not in config_names:
config_names.append(row['config_name'])
output_dict[key][row['config_name']] = row[args.column]
output_column_names = ['class', 'test'] + (['repetition'] if has_repetitions else []) + config_names
write_output(args.output_file, output_column_names, output_rows)
if __name__ == '__main__':
main()
| 28.803279 | 102 | 0.684121 |
099f9bbd1e33685aefa815286c0d121d7448f4f4 | 3,527 | py | Python | bindings/python/ensmallen/datasets/string/xanthomonadalesbacterium6970.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/xanthomonadalesbacterium6970.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/xanthomonadalesbacterium6970.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Xanthomonadales bacterium 69-70.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def XanthomonadalesBacterium6970(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Xanthomonadales bacterium 69-70 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Xanthomonadales bacterium 69-70 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="XanthomonadalesBacterium6970",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.590476 | 223 | 0.681032 |
9b8d252070f5b50477a052aa0ecd0f2bc056683e | 14,895 | py | Python | Program_Final/venv/Lib/site-packages/Bio/Blast/Record.py | isacasini/SNV_Xia_et_al_2020 | d729b68e37babf9d30941683af6723aad65319f7 | [
"MIT"
] | 2 | 2019-11-21T02:34:52.000Z | 2021-02-14T07:47:43.000Z | Bio/Blast/Record.py | EngineerKhan/biopython | 4e359e2aa9255aa8b420ad512d3c4cbe15c07a35 | [
"BSD-3-Clause"
] | null | null | null | Bio/Blast/Record.py | EngineerKhan/biopython | 4e359e2aa9255aa8b420ad512d3c4cbe15c07a35 | [
"BSD-3-Clause"
] | 1 | 2021-02-14T07:47:46.000Z | 2021-02-14T07:47:46.000Z | # Copyright 1999-2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Record classes to hold BLAST output.
Classes:
Blast Holds all the information from a blast search.
PSIBlast Holds all the information from a psi-blast search.
Header Holds information from the header.
Description Holds information about one hit description.
Alignment Holds information about one alignment hit.
HSP Holds information about one HSP.
MultipleAlignment Holds information about a multiple alignment.
DatabaseReport Holds information from the database report.
Parameters Holds information from the parameters.
"""
# XXX finish printable BLAST output
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
class Header(object):
"""Saves information from a blast header.
Members:
application The name of the BLAST flavor that generated this data.
version Version of blast used.
date Date this data was generated.
reference Reference for blast.
query Name of query sequence.
query_letters Number of letters in the query sequence. (int)
database Name of the database.
database_sequences Number of sequences in the database. (int)
database_letters Number of letters in the database. (int)
"""
def __init__(self):
"""Initialize the class."""
self.application = ''
self.version = ''
self.date = ''
self.reference = ''
self.query = ''
self.query_letters = None
self.database = ''
self.database_sequences = None
self.database_letters = None
class Description(object):
"""Stores information about one hit in the descriptions section.
Members:
title Title of the hit.
score Number of bits. (int)
bits Bit score. (float)
e E value. (float)
num_alignments Number of alignments for the same subject. (int)
"""
def __init__(self):
"""Initialize the class."""
self.title = ''
self.score = None
self.bits = None
self.e = None
self.num_alignments = None
def __str__(self):
return "%-66s %5s %s" % (self.title, self.score, self.e)
class Alignment(object):
"""Stores information about one hit in the alignments section.
Members:
title Name.
hit_id Hit identifier. (str)
hit_def Hit definition. (str)
length Length. (int)
hsps A list of HSP objects.
"""
def __init__(self):
"""Initialize the class."""
self.title = ''
self.hit_id = ''
self.hit_def = ''
self.length = None
self.hsps = []
def __str__(self):
lines = self.title.split('\n')
lines.append("Length = %s\n" % self.length)
return '\n '.join(lines)
class HSP(object):
"""Stores information about one hsp in an alignment hit.
Members:
- score BLAST score of hit. (float)
- bits Number of bits for that score. (float)
- expect Expect value. (float)
- num_alignments Number of alignments for same subject. (int)
- identities Number of identities (int) if using the XML parser.
Tuple of number of identities/total aligned (int, int)
if using the (obsolete) plain text parser.
- positives Number of positives (int) if using the XML parser.
Tuple of number of positives/total aligned (int, int)
if using the (obsolete) plain text parser.
- gaps Number of gaps (int) if using the XML parser.
Tuple of number of gaps/total aligned (int, int) if
using the (obsolete) plain text parser.
- align_length Length of the alignment. (int)
- strand Tuple of (query, target) strand.
- frame Tuple of 1 or 2 frame shifts, depending on the flavor.
- query The query sequence.
- query_start The start residue for the query sequence. (1-based)
- query_end The end residue for the query sequence. (1-based)
- match The match sequence.
- sbjct The sbjct sequence.
- sbjct_start The start residue for the sbjct sequence. (1-based)
- sbjct_end The end residue for the sbjct sequence. (1-based)
Not all flavors of BLAST return values for every attribute::
score expect identities positives strand frame
BLASTP X X X X
BLASTN X X X X X
BLASTX X X X X X
TBLASTN X X X X X
TBLASTX X X X X X/X
Note: for BLASTX, the query sequence is shown as a protein sequence,
but the numbering is based on the nucleotides. Thus, the numbering
is 3x larger than the number of amino acid residues. A similar effect
can be seen for the sbjct sequence in TBLASTN, and for both sequences
in TBLASTX.
Also, for negative frames, the sequence numbering starts from
query_start and counts down.
"""
def __init__(self):
"""Initialize the class."""
self.score = None
self.bits = None
self.expect = None
self.num_alignments = None
self.identities = (None, None)
self.positives = (None, None)
self.gaps = (None, None)
self.align_length = None
self.strand = (None, None)
self.frame = ()
self.query = ''
self.query_start = None
self.query_end = None
self.match = ''
self.sbjct = ''
self.sbjct_start = None
self.sbjct_end = None
def __str__(self):
lines = ["Score %i (%i bits), expectation %0.1e, alignment length %i"
% (self.score, self.bits, self.expect, self.align_length)]
if self.align_length < 50:
lines.append("Query:%s %s %s" % (str(self.query_start).rjust(8),
str(self.query),
str(self.query_end)))
lines.append(" %s"
% (str(self.match)))
lines.append("Sbjct:%s %s %s" % (str(self.sbjct_start).rjust(8),
str(self.sbjct),
str(self.sbjct_end)))
else:
lines.append("Query:%s %s...%s %s"
% (str(self.query_start).rjust(8),
str(self.query)[:45],
str(self.query)[-3:],
str(self.query_end)))
lines.append(" %s...%s"
% (str(self.match)[:45],
str(self.match)[-3:]))
lines.append("Sbjct:%s %s...%s %s"
% (str(self.sbjct_start).rjust(8),
str(self.sbjct)[:45],
str(self.sbjct)[-3:],
str(self.sbjct_end)))
return "\n".join(lines)
class MultipleAlignment(object):
"""Holds information about a multiple alignment.
Members:
alignment A list of tuples (name, start residue, sequence, end residue).
The start residue is 1-based. It may be blank, if that sequence is
not aligned in the multiple alignment.
"""
def __init__(self):
"""Initialize the class."""
self.alignment = []
def to_generic(self, alphabet):
"""Retrieve generic alignment object for the given alignment.
Instead of the tuples, this returns a MultipleSeqAlignment object
from Bio.Align, through which you can manipulate and query
the object.
alphabet is the specified alphabet for the sequences in the code (for
example IUPAC.IUPACProtein).
Thanks to James Casbon for the code.
"""
# TODO - Switch to new Bio.Align.MultipleSeqAlignment class?
seq_parts = []
seq_names = []
parse_number = 0
n = 0
for name, start, seq, end in self.alignment:
if name == 'QUERY': # QUERY is the first in each alignment block
parse_number += 1
n = 0
if parse_number == 1: # create on first_parse, append on all others
seq_parts.append(seq)
seq_names.append(name)
else:
seq_parts[n] += seq
n += 1
generic = MultipleSeqAlignment([], alphabet)
for (name, seq) in zip(seq_names, seq_parts):
generic.append(SeqRecord(Seq(seq, alphabet), name))
return generic
class Round(object):
"""Holds information from a PSI-BLAST round.
Members:
number Round number. (int)
reused_seqs Sequences in model, found again. List of Description objects.
new_seqs Sequences not found, or below threshold. List of Description.
alignments A list of Alignment objects.
multiple_alignment A MultipleAlignment object.
"""
def __init__(self):
"""Initialize the class."""
self.number = None
self.reused_seqs = []
self.new_seqs = []
self.alignments = []
self.multiple_alignment = None
class DatabaseReport(object):
"""Holds information about a database report.
Members:
database_name List of database names. (can have multiple dbs)
num_letters_in_database Number of letters in the database. (int)
num_sequences_in_database List of number of sequences in the database.
posted_date List of the dates the databases were posted.
ka_params A tuple of (lambda, k, h) values. (floats)
gapped # XXX this isn't set right!
ka_params_gap A tuple of (lambda, k, h) values. (floats)
"""
def __init__(self):
"""Initialize the class."""
self.database_name = []
self.posted_date = []
self.num_letters_in_database = []
self.num_sequences_in_database = []
self.ka_params = (None, None, None)
self.gapped = 0
self.ka_params_gap = (None, None, None)
class Parameters(object):
"""Holds information about the parameters.
Members:
matrix Name of the matrix.
gap_penalties Tuple of (open, extend) penalties. (floats)
sc_match Match score for nucleotide-nucleotide comparison
sc_mismatch Mismatch penalty for nucleotide-nucleotide comparison
num_hits Number of hits to the database. (int)
num_sequences Number of sequences. (int)
num_good_extends Number of extensions. (int)
num_seqs_better_e Number of sequences better than e-value. (int)
hsps_no_gap Number of HSP's better, without gapping. (int)
hsps_prelim_gapped Number of HSP's gapped in prelim test. (int)
hsps_prelim_gapped_attemped Number of HSP's attempted in prelim. (int)
hsps_gapped Total number of HSP's gapped. (int)
query_length Length of the query. (int)
query_id Identifier of the query sequence. (str)
database_length Number of letters in the database. (int)
effective_hsp_length Effective HSP length. (int)
effective_query_length Effective length of query. (int)
effective_database_length Effective length of database. (int)
effective_search_space Effective search space. (int)
effective_search_space_used Effective search space used. (int)
frameshift Frameshift window. Tuple of (int, float)
threshold Threshold. (int)
window_size Window size. (int)
dropoff_1st_pass Tuple of (score, bits). (int, float)
gap_x_dropoff Tuple of (score, bits). (int, float)
gap_x_dropoff_final Tuple of (score, bits). (int, float)
gap_trigger Tuple of (score, bits). (int, float)
blast_cutoff Tuple of (score, bits). (int, float)
"""
def __init__(self):
"""Initialize the class."""
self.matrix = ''
self.gap_penalties = (None, None)
self.sc_match = None
self.sc_mismatch = None
self.num_hits = None
self.num_sequences = None
self.num_good_extends = None
self.num_seqs_better_e = None
self.hsps_no_gap = None
self.hsps_prelim_gapped = None
self.hsps_prelim_gapped_attemped = None
self.hsps_gapped = None
self.query_id = None
self.query_length = None
self.database_length = None
self.effective_hsp_length = None
self.effective_query_length = None
self.effective_database_length = None
self.effective_search_space = None
self.effective_search_space_used = None
self.frameshift = (None, None)
self.threshold = None
self.window_size = None
self.dropoff_1st_pass = (None, None)
self.gap_x_dropoff = (None, None)
self.gap_x_dropoff_final = (None, None)
self.gap_trigger = (None, None)
self.blast_cutoff = (None, None)
# TODO - Add a friendly __str__ method to BLAST results
class Blast(Header, DatabaseReport, Parameters):
"""Saves the results from a blast search.
Members:
descriptions A list of Description objects.
alignments A list of Alignment objects.
multiple_alignment A MultipleAlignment object.
+ members inherited from base classes
"""
def __init__(self):
"""Initialize the class."""
Header.__init__(self)
DatabaseReport.__init__(self)
Parameters.__init__(self)
self.descriptions = []
self.alignments = []
self.multiple_alignment = None
class PSIBlast(Header, DatabaseReport, Parameters):
"""Saves the results from a blastpgp search.
Members:
rounds A list of Round objects.
converged Whether the search converged.
+ members inherited from base classes
"""
def __init__(self):
"""Initialize the class."""
Header.__init__(self)
DatabaseReport.__init__(self)
Parameters.__init__(self)
self.rounds = []
self.converged = 0
| 36.329268 | 80 | 0.58476 |
c2bf9c975cdc52a2fbeccc566a8536c67950ed2c | 10,564 | py | Python | proxy.py | hadagalberto/python-proxy | d56a31abeac42884ecae8d59216a5bc27e4697a0 | [
"Apache-2.0"
] | 3 | 2017-11-09T03:53:15.000Z | 2018-09-25T18:36:54.000Z | proxy.py | hadagalberto/python-proxy | d56a31abeac42884ecae8d59216a5bc27e4697a0 | [
"Apache-2.0"
] | null | null | null | proxy.py | hadagalberto/python-proxy | d56a31abeac42884ecae8d59216a5bc27e4697a0 | [
"Apache-2.0"
] | 4 | 2017-11-08T21:56:55.000Z | 2021-05-21T19:44:55.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pprint import pprint
import sys
import http.client
from socketserver import ThreadingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
from threading import Lock, Timer
from io import StringIO
from urllib.parse import urlsplit
import socket
import select
import gzip
import zlib
import re
import traceback
import json
from time import sleep
print("Iniciando Proxy Python")
sleep(0.5)
print("Modificado por @HiddenBR")
sleep(0.5)
print("Carregando Configurações")
sleep(1.5)
with open("config.json") as data_file:
config = json.load(data_file)
print("Configurações Carregadas")
servers = config['servers']
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
address_family = socket.AF_INET
def handle_error(self, request, client_address):
print('-'*40, file=sys.stderr)
print('Exception happened during processing of request from', client_address, file=sys.stderr)
traceback.print_exc()
print('-'*40, file=sys.stderr)
class ThreadingHTTPServer6(ThreadingHTTPServer):
address_family = socket.AF_INET6
class SimpleHTTPProxyHandler(BaseHTTPRequestHandler):
global_lock = Lock()
conn_table = {}
timeout = 300
upstream_timeout = 300
proxy_via = None
def log_error(self, format, *args):
if format == "Request timed out: %r":
return
self.log_message(format, *args)
def do_CONNECT(self):
req = self
reqbody = None
if ':22' in req.path:
hostip = req.path.replace(':22', '')
elif ':443' in req.path:
hostip = req.path.replace(':443', '')
req.path = "https://%s/" % req.path.replace(':443', '')
replaced_reqbody = self.request_handler(req, reqbody)
if replaced_reqbody is True:
return
u = urlsplit(req.path)
address = (u.hostname, u.port or 443)
try:
conn = socket.create_connection(address)
except socket.error:
return
self.send_response(200, config["resposta"])
self.send_header('Connection', 'close')
self.end_headers()
conns = [self.connection, conn]
keep_connection = True
while keep_connection:
if not servers.find(hostip) != -1:
self.send_error(403, "Host nao permitido")
self.close_connection
keep_connection = False
rlist, wlist, xlist = select.select(conns, [], conns, self.timeout)
if xlist:
break
for r in rlist:
other = conns[1] if r is conns[0] else conns[0]
data = r.recv(8192)
if data:
other.sendall(data)
keep_connection = True
conn.close()
def do_HEAD(self):
self.do_SPAM()
def do_GET(self):
self.do_SPAM()
def do_POST(self):
self.do_SPAM()
def do_SPAM(self):
req = self
content_length = int(req.headers.get('Content-Length', 0))
if content_length > 0:
reqbody = self.rfile.read(content_length)
else:
reqbody = None
replaced_reqbody = self.request_handler(req, reqbody)
if replaced_reqbody is True:
return
elif replaced_reqbody is not None:
reqbody = replaced_reqbody
if 'Content-Length' in req.headers:
req.headers['Content-Length'] = str(len(reqbody))
self.remove_hop_by_hop_headers(req.headers)
if self.upstream_timeout:
req.headers['Connection'] = 'Keep-Alive'
else:
req.headers['Connection'] = 'close'
if self.proxy_via:
self.modify_via_header(req.headers)
try:
res, resdata = self.request_to_upstream_server(req, reqbody)
except socket.error:
return
content_encoding = res.headers.get('Content-Encoding', 'identity')
resbody = self.decode_content_body(resdata, content_encoding)
replaced_resbody = self.response_handler(req, reqbody, res, resbody)
if replaced_resbody is True:
return
elif replaced_resbody is not None:
resdata = self.encode_content_body(replaced_resbody, content_encoding)
if 'Content-Length' in res.headers:
res.headers['Content-Length'] = str(len(resdata))
resbody = replaced_resbody
self.remove_hop_by_hop_headers(res.headers)
if self.timeout:
res.headers['Connection'] = 'Keep-Alive'
else:
res.headers['Connection'] = 'close'
if self.proxy_via:
self.modify_via_header(res.headers)
self.send_response(res.status, res.reason)
for k, v in list(res.headers.items()):
if k == 'set-cookie':
for value in self.split_set_cookie_header(v):
self.send_header(k, value)
else:
self.send_header(k, v)
self.end_headers()
if self.command != 'HEAD':
self.wfile.write(resdata)
with self.global_lock:
self.save_handler(req, reqbody, res, resbody)
def request_to_upstream_server(self, req, reqbody):
u = urlsplit(req.path)
origin = (u.scheme, u.netloc)
req.headers['Host'] = u.netloc
selector = "%s?%s" % (u.path, u.query) if u.query else u.path
while True:
with self.lock_origin(origin):
conn = self.open_origin(origin)
try:
conn.request(req.command, selector, reqbody, headers=dict(req.headers))
except socket.error:
self.close_origin(origin)
raise
try:
res = conn.getresponse(buffering=True)
except http.client.BadStatusLine as e:
if e.line == "''":
self.close_origin(origin)
continue
else:
raise
resdata = res.read()
res.headers = res.msg
if not self.upstream_timeout or 'close' in res.headers.get('Connection', ''):
self.close_origin(origin)
else:
self.reset_timer(origin)
return res, resdata
def lock_origin(self, origin):
d = self.conn_table.setdefault(origin, {})
if not 'lock' in d:
d['lock'] = Lock()
return d['lock']
def open_origin(self, origin):
conn = self.conn_table[origin].get('connection')
if not conn:
scheme, netloc = origin
if scheme == 'https':
conn = http.client.HTTPSConnection(netloc)
else:
conn = http.client.HTTPConnection(netloc)
self.reset_timer(origin)
self.conn_table[origin]['connection'] = conn
return conn
def reset_timer(self, origin):
timer = self.conn_table[origin].get('timer')
if timer:
timer.cancel()
if self.upstream_timeout:
timer = Timer(self.upstream_timeout, self.close_origin, args=[origin])
timer.daemon = True
timer.start()
else:
timer = None
self.conn_table[origin]['timer'] = timer
def close_origin(self, origin):
timer = self.conn_table[origin]['timer']
if timer:
timer.cancel()
conn = self.conn_table[origin]['connection']
conn.close()
del self.conn_table[origin]['connection']
def remove_hop_by_hop_headers(self, headers):
hop_by_hop_headers = ['Connection', 'Keep-Alive', 'Proxy-Authenticate', 'Proxy-Authorization', 'TE', 'Trailers', 'Trailer', 'Transfer-Encoding', 'Upgrade']
connection = headers.get('Connection')
if connection:
keys = re.split(r',\s*', connection)
hop_by_hop_headers.extend(keys)
for k in hop_by_hop_headers:
if k in headers:
del headers[k]
def modify_via_header(self, headers):
via_string = "%s %s" % (self.protocol_version, self.proxy_via)
via_string = re.sub(r'^HTTP/', '', via_string)
original = headers.get('Via')
if original:
headers['Via'] = original + ', ' + via_string
else:
headers['Via'] = via_string
def decode_content_body(self, data, content_encoding):
if content_encoding in ('gzip', 'x-gzip'):
io = StringIO(data)
with gzip.GzipFile(fileobj=io) as f:
body = f.read()
elif content_encoding == 'deflate':
body = zlib.decompress(data)
elif content_encoding == 'identity':
body = data
else:
raise Exception("Unknown Content-Encoding: %s" % content_encoding)
return body
def encode_content_body(self, body, content_encoding):
if content_encoding in ('gzip', 'x-gzip'):
io = StringIO()
with gzip.GzipFile(fileobj=io, mode='wb') as f:
f.write(body)
data = io.getvalue()
elif content_encoding == 'deflate':
data = zlib.compress(body)
elif content_encoding == 'identity':
data = body
else:
raise Exception("Unknown Content-Encoding: %s" % content_encoding)
return data
def split_set_cookie_header(self, value):
re_cookies = r'([^=]+=[^,;]+(?:;\s*Expires=[^,]+,[^,;]+|;[^,;]+)*)(?:,\s*)?'
return re.findall(re_cookies, value, flags=re.IGNORECASE)
def request_handler(self, req, reqbody):
pass
def response_handler(self, req, reqbody, res, resbody):
pass
def save_handler(self, req, reqbody, res, resbody):
pass
def test(HandlerClass=SimpleHTTPProxyHandler, ServerClass=ThreadingHTTPServer, protocol="HTTP/1.1"):
port = config['porta']
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Servidor Proxy inciado em " + str(sa[0]) + " na porta " + str(sa[1]))
httpd.serve_forever()
if __name__ == '__main__':
test()
| 31.254438 | 163 | 0.571469 |
103c33ca7c8ef0935020914b8455b3c5563a2059 | 2,132 | py | Python | preprocessing/audio_extractor.py | ElonGagarin/easy_meeting_hack | 27cbead34d73d410ad478a4f4ce7c27418aed116 | [
"MIT"
] | 1 | 2021-10-12T10:21:06.000Z | 2021-10-12T10:21:06.000Z | preprocessing/audio_extractor.py | Maximgitman/easy_meeting | eb60d740e136ce139fddb447cd4e2aacb636ad98 | [
"MIT"
] | null | null | null | preprocessing/audio_extractor.py | Maximgitman/easy_meeting | eb60d740e136ce139fddb447cd4e2aacb636ad98 | [
"MIT"
] | null | null | null | import os
import moviepy.editor as mp
__version__ = "0.0.5"
class AudioExtractor(object):
def __init__(self, path):
self.path = path
self.dir = os.path.split(path)[0]
filename, file_ext = os.path.splitext(path)
self.filename = filename
self.file_ext = file_ext[1:]
self.sr = 16000
self.video_ext = ('mp4', 'avi', 'mkv')
self.audio_ext = ('mp3', 'wav')
def get_audio(self, output_ext):
if self.file_ext in self.video_ext:
self.extract_audio(output_ext)
elif self.file_ext in self.audio_ext:
self.convert_audio(output_ext)
def extract_audio(self, output_ext):
clip = mp.VideoFileClip(f"{self.path}")
if output_ext == 'wav':
clip.audio.write_audiofile(f"output.{output_ext}",
codec="pcm_s16le",
fps=self.sr,
ffmpeg_params=["-ac", "1"])
elif output_ext == 'mp3':
clip.audio.write_audiofile(f"output.{output_ext}")
clip.close()
def convert_audio(self, output_ext):
clip = mp.AudioFileClip(f"{self.path}")
if output_ext == 'wav':
clip.write_audiofile(f"output.{output_ext}",
codec="pcm_s16le",
fps=self.sr,
ffmpeg_params=["-ac", "1"])
elif output_ext == 'mp3':
clip.write_audiofile(f"output.{output_ext}")
clip.close()
def multiple_extraction(filename, formats=[], remove_original=False):
# name = filename.split('.')[0]
# for format in formats:
# if os.path.exists('' + name + '.' + format):
# os.remove('' + name + '.' + format)
extractor = AudioExtractor(filename)
for format in formats:
extractor.get_audio(output_ext=format)
if remove_original:
os.remove(''+filename)
if __name__ == "__main__":
path = input("Введите путь к файлу: ")
extractor = AudioExtractor(path)
extractor.get_audio(output_ext='wav')
| 32.8 | 69 | 0.550657 |
1f23e75a8d624bb5ed90f1e5680715d560422409 | 3,467 | py | Python | Stanford CS20 2018/examples/03_logreg_placeholder.py | neilliang90/DeepLearning-Tensorflow | 213be49273a43ee2636789235b33331cce49736e | [
"MIT"
] | null | null | null | Stanford CS20 2018/examples/03_logreg_placeholder.py | neilliang90/DeepLearning-Tensorflow | 213be49273a43ee2636789235b33331cce49736e | [
"MIT"
] | null | null | null | Stanford CS20 2018/examples/03_logreg_placeholder.py | neilliang90/DeepLearning-Tensorflow | 213be49273a43ee2636789235b33331cce49736e | [
"MIT"
] | null | null | null | """ Solution for simple logistic regression model for MNIST
with placeholder
MNIST dataset: yann.lecun.com/exdb/mnist/
Created by Chip Huyen (huyenn@cs.stanford.edu)
CS20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Lecture 03
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import time
import utils
tf.reset_default_graph()
# Define paramaters for the model
learning_rate = 0.01
batch_size = 128
n_epochs = 30
# Step 1: Read in data
# using TF Learn's built in function to load MNIST data to the folder data/mnist
mnist = input_data.read_data_sets('data/mnist', one_hot=True)
X_batch, Y_batch = mnist.train.next_batch(batch_size)
# Step 2: create placeholders for features and labels
# each image in the MNIST data is of shape 28*28 = 784
# therefore, each image is represented with a 1x784 tensor
# there are 10 classes for each image, corresponding to digits 0 - 9.
# each lable is one hot vector.
X = tf.placeholder(tf.float32, [batch_size, 784], name='image')
Y = tf.placeholder(tf.int32, [batch_size, 10], name='label')
# Step 3: create weights and bias
# w is initialized to random variables with mean of 0, stddev of 0.01
# b is initialized to 0
# shape of w depends on the dimension of X and Y so that Y = tf.matmul(X, w)
# shape of b depends on Y
w = tf.get_variable(name='weights', shape=(784, 10), initializer=tf.random_normal_initializer())
b = tf.get_variable(name='bias', shape=(1, 10), initializer=tf.zeros_initializer())
# Step 4: build model
# the model that returns the logits.
# this logits will be later passed through softmax layer
logits = tf.matmul(X, w) + b
# Step 5: define loss function
# use cross entropy of softmax of logits as the loss function
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name='loss')
loss = tf.reduce_mean(entropy) # computes the mean over all the examples in the batch
# loss = tf.reduce_mean(-tf.reduce_sum(tf.nn.softmax(logits) * tf.log(Y), reduction_indices=[1]))
# Step 6: define training op
# using gradient descent with learning rate of 0.01 to minimize loss
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# Step 7: calculate accuracy with test set
preds = tf.nn.softmax(logits)
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
writer = tf.summary.FileWriter('./graphs/logreg_placeholder', tf.get_default_graph())
with tf.Session() as sess:
start_time = time.time()
sess.run(tf.global_variables_initializer())
n_batches = int(mnist.train.num_examples/batch_size)
# train the model n_epochs times
for i in range(n_epochs):
total_loss = 0
for j in range(n_batches):
X_batch, Y_batch = mnist.train.next_batch(batch_size)
_, loss_batch = sess.run([optimizer, loss], {X: X_batch, Y:Y_batch})
total_loss += loss_batch
print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))
print('Total time: {0} seconds'.format(time.time() - start_time))
# test the model
n_batches = int(mnist.test.num_examples/batch_size)
total_correct_preds = 0
for i in range(n_batches):
X_batch, Y_batch = mnist.test.next_batch(batch_size)
accuracy_batch = sess.run(accuracy, {X: X_batch, Y:Y_batch})
total_correct_preds += accuracy_batch
print('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))
writer.close()
| 36.114583 | 97 | 0.752235 |
bbef6c1dc4b12aa77aacc1d360f25f9416fe2828 | 929 | py | Python | applications/views.py | djangulo/ta_platform | 0aa9659180a4133dc460799308b57ecdf8fa96d4 | [
"MIT"
] | null | null | null | applications/views.py | djangulo/ta_platform | 0aa9659180a4133dc460799308b57ecdf8fa96d4 | [
"MIT"
] | null | null | null | applications/views.py | djangulo/ta_platform | 0aa9659180a4133dc460799308b57ecdf8fa96d4 | [
"MIT"
] | 2 | 2019-11-30T07:05:46.000Z | 2020-10-13T11:59:01.000Z | from django.conf import settings
from django.views.generic import FormView
from django.shortcuts import render, get_object_or_404, redirect
from applications.forms import ApplicationForm
def create_application(request):
if request.method == 'POST':
form = ApplicationForm(request.POST)
if form.is_valid():
application = form.save(commit=False)
application.save()
else:
form = ApplicationForm()
return render(request, 'applications/application_form.html', context={
'form': form,
'COMPANY_NAME': settings.BRAND_DICT['COMPANY_NAME'],
})
def edit_application(request, id=None):
application = get_object_or_404(Application, id=id)
form = ApplicationForm(request.Post or None, instance=application)
if form.is_valid():
form.save()
return render(request, 'applications/application_form.htlm', {'form': form})
| 32.034483 | 80 | 0.688913 |
e6d8ca093ecb5d54bb7143209223467e14e9c539 | 28 | py | Python | 2020-04-16 - Python Medellin - Creating a chat service with WebSockets/Examples/Chat/app/models/__init__.py | williamegomezo/Talks | 0b123c6c8dec9c1a2357c142b617109068caf060 | [
"MIT"
] | 2 | 2020-04-24T23:03:41.000Z | 2020-11-10T15:08:38.000Z | 2020-04-16 - Python Medellin - Creating a chat service with WebSockets/Examples/Chat/app/models/__init__.py | williamegomezo/Talks | 0b123c6c8dec9c1a2357c142b617109068caf060 | [
"MIT"
] | 5 | 2021-03-10T11:42:15.000Z | 2022-02-10T21:38:54.000Z | 2020-04-16 - Python Medellin - Creating a chat service with WebSockets/Examples/Chat/app/models/__init__.py | williamegomezo/Talks | 0b123c6c8dec9c1a2357c142b617109068caf060 | [
"MIT"
] | 4 | 2020-10-01T04:32:27.000Z | 2022-02-14T15:08:16.000Z | from .user import Base, User | 28 | 28 | 0.785714 |
6022b6ac47f82fd0b0b1c0849dc21065b0908e2c | 5,155 | py | Python | ansible/modules/network/junos/junos_banner.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | ansible/modules/network/junos/junos_banner.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | ansible/modules/network/junos/junos_banner.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_banner
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage multiline banners on Juniper JUNOS devices
description:
- This will configure both login and motd banners on network devices.
It allows playbooks to add or remote
banner text from the active running configuration.
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device. Value C(login) indicates
system login message prior to authenticating, C(motd) is login
announcement after successful authentication.
required: true
choices: ['login', 'motd']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
choices: [True, False]
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
"""
EXAMPLES = """
- name: configure the login banner
junos_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
junos_banner:
banner: motd
state: absent
- name: deactivate the motd banner
junos_banner:
banner: motd
state: present
active: False
- name: activate the motd banner
junos_banner:
banner: motd
state: present
active: True
- name: Configure banner from file
junos_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit system login]
+ message \"this is my login banner\";
"""
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.junos import junos_argument_spec, check_args
from ansible.module_utils.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.junos import commit_configuration, discard_changes, locked_config
try:
from lxml.etree import tostring
except ImportError:
from xml.etree.ElementTree import tostring
USE_PERSISTENT_CONNECTION = True
def validate_param_values(module, obj):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(module.params.get(key), module)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent']),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'system/login'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('text', {'xpath': 'message' if module.params['banner'] == 'login' else 'announcement', 'leaf_only': True})
])
validate_param_values(module, param_to_xpath_map)
want = map_params_to_obj(module, param_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
with locked_config(module):
diff = load_config(module, tostring(ele), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| 28.324176 | 115 | 0.671387 |
7a915085b0bb91f6566e3946f34ea5fb1f788d65 | 3,074 | py | Python | arkanoid/PPO/test.py | econnolly27/ReinforcementLearningNES | 45464fbf193b5c7249013f7ece06afec27a60d5a | [
"MIT",
"BSD-3-Clause"
] | 2 | 2022-01-29T10:38:05.000Z | 2022-01-29T10:38:12.000Z | arkanoid/PPO/test.py | econnolly27/ReinforcementLearningNES | 45464fbf193b5c7249013f7ece06afec27a60d5a | [
"MIT",
"BSD-3-Clause"
] | null | null | null | arkanoid/PPO/test.py | econnolly27/ReinforcementLearningNES | 45464fbf193b5c7249013f7ece06afec27a60d5a | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """
@author: Viet Nguyen <nhviet1009@gmail.com>
From: https://github.com/uvipen/Super-mario-bros-PPO-pytorch
Modified for Benchmarking Reinforcement Learning Algorithms in NES Games by Erin-Louise Connolly
"""
import os
import argparse
import torch
from src.env import create_train_env
from src.model import PPO
from src.env import MultipleEnvironments
from src.helpers import JoypadSpace, SIMPLE_MOVEMENT, COMPLEX_MOVEMENT, RIGHT_ONLY
import torch.nn.functional as F
import time
import csv
os.environ['DISPLAY'] = ':1'
os.environ['OMP_NUM_THREADS'] = '1'
def get_args():
timestr = time.strftime("%Y%m%d-%H%M%S")
parser = argparse.ArgumentParser(
"""Implementation of model described in the paper: Proximal Policy Optimization Algorithms for Contra Nes""")
parser.add_argument("--action_type", type=str, default="complex")
parser.add_argument("--saved_path", type=str, default="trained_models")
parser.add_argument("--output_path", type=str, default="output")
parser.add_argument("--timestr", type=str, default=timestr)
args = parser.parse_args()
return args
def test(opt):
opt.saved_path = os.getcwd() + '/arkanoid/PPO/' + opt.saved_path
savefile = opt.saved_path + '/arkanoid_ppo_test' + opt.timestr + '.csv'
print(savefile)
title = ['Score']
with open(savefile, 'w', newline='') as sfile:
writer = csv.writer(sfile)
writer.writerow(title)
if torch.cuda.is_available():
torch.cuda.manual_seed(123)
else:
torch.manual_seed(123)
if opt.action_type == "right":
actions = RIGHT_ONLY
elif opt.action_type == "simple":
actions = SIMPLE_MOVEMENT
else:
actions = COMPLEX_MOVEMENT
env = create_train_env(1, 1, actions)
# env = MultipleEnvironments(opt.world, opt.stage, opt.action_type, 1)
model = PPO(env.observation_space.shape[0], len(actions))
print(os.getcwd())
if torch.cuda.is_available():
#model.load_state_dict(torch.load("trained_models/abc"))
model.load_state_dict(torch.load("{}/ppo_arkanoid".format(opt.saved_path)))
model.cuda()
else:
model.load_state_dict(torch.load("{}/ppo_arkanoid".format(opt.saved_path),
map_location=lambda storage, loc: storage))
model.eval()
state = torch.from_numpy(env.reset())
scores= []
while True:
if torch.cuda.is_available():
state = state.cuda()
logits, value = model(state)
policy = F.softmax(logits, dim=1)
action = torch.argmax(policy).item()
state, reward, done, info = env.step(action)
state = torch.from_numpy(state)
scores.append(info['score'])
data = [info['score']]
with open(savefile, 'a', newline='') as sfile:
writer = csv.writer(sfile)
writer.writerows([data])
env.render()
if done:
#scores.append(info['score'])
print(max(scores))
break
if __name__ == "__main__":
opt = get_args()
test(opt)
| 33.053763 | 117 | 0.649967 |
a374f50369bfe478c208f576a5f50b40f87f8b52 | 5,157 | py | Python | dbaas/maintenance/admin/database_create.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | dbaas/maintenance/admin/database_create.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | dbaas/maintenance/admin/database_create.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import patterns, url
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.html import format_html
from .database_maintenance_task import DatabaseMaintenanceTaskAdmin
from ..models import DatabaseCreate
from notification.tasks import TaskRegister
class DatabaseCreateAdmin(DatabaseMaintenanceTaskAdmin):
search_fields = ["name", "infra__name", "user", "task__id",
"task__task_id"]
list_filter = ["plan", "team", "status", "project"]
list_display = (
"name", "infra", "team", "project", "environment", "plan_name", "user",
"current_step", "friendly_status", "maintenance_action", "link_task",
"started_at", "finished_at"
)
readonly_fields = (
"database", "infra", "plan", "plan_name", "environment", "team",
"project", "name", "description", "subscribe_to_email_events",
"is_protected", "user", "link_task", "started_at", "finished_at",
"current_step", "status", "maintenance_action"
)
def maintenance_action(self, maintenance_task):
if not maintenance_task.is_status_error:
return 'N/A'
if not maintenance_task.can_do_retry:
return 'N/A'
url_retry = "/admin/maintenance/databasecreate/{}/retry/".format(
maintenance_task.id
)
html_retry = ("<a title='Retry' class='btn btn-info' "
"href='{}'>Retry</a>").format(url_retry)
url_rollback = "/admin/maintenance/databasecreate/{}/rollback/".format(
maintenance_task.id
)
html_rollback = ("<a title='Rollback' class='btn btn-danger' "
"href='{}'>Rollback</a>").format(url_rollback)
spaces = ' ' * 3
html_content = '{}{}{}'.format(html_retry, spaces, html_rollback)
return format_html(html_content)
def get_urls(self):
base = super(DatabaseCreateAdmin, self).get_urls()
admin = patterns(
'',
url(
r'^/?(?P<create_id>\d+)/retry/$',
self.admin_site.admin_view(self.retry_view),
name="create_database_retry"
),
url(
r'^/?(?P<create_id>\d+)/rollback/$',
self.admin_site.admin_view(self.rollback_view),
name="create_database_rollback"
),
)
return admin + base
def retry_view(self, request, create_id):
retry_from = get_object_or_404(DatabaseCreate, pk=create_id)
success, redirect = self.check_status(request, create_id, 'retry')
if not success:
return redirect
TaskRegister.database_create(
name=retry_from.name,
plan=retry_from.plan,
environment=retry_from.environment,
team=retry_from.team,
project=retry_from.project,
description=retry_from.description,
backup_hour=retry_from.infra.backup_hour,
maintenance_window=retry_from.infra.maintenance_window,
maintenance_day=retry_from.infra.maintenance_day,
subscribe_to_email_events=retry_from.subscribe_to_email_events,
is_protected=retry_from.is_protected,
user=request.user,
retry_from=retry_from
)
url = reverse('admin:notification_taskhistory_changelist')
filter = "user={}".format(request.user.username)
return HttpResponseRedirect('{}?{}'.format(url, filter))
def rollback_view(self, request, create_id):
rollback_from = get_object_or_404(DatabaseCreate, pk=create_id)
success, redirect = self.check_status(request, create_id, 'rollback')
if not success:
return redirect
TaskRegister.database_create_rollback(
rollback_from=rollback_from,
user=request.user,
)
url = reverse('admin:notification_taskhistory_changelist')
filter = "user={}".format(request.user.username)
return HttpResponseRedirect('{}?{}'.format(url, filter))
def check_status(self, request, create_id, operation):
create = DatabaseCreate.objects.get(id=create_id)
success = True
if success and not create.is_status_error:
success = False
messages.add_message(
request, messages.ERROR,
"You can not do {} because create status is '{}'".format(
operation, create.get_status_display()
),
)
if success and not create.can_do_retry:
success = False
messages.add_message(
request, messages.ERROR,
"Create {} is disabled".format(operation)
)
return success, HttpResponseRedirect(
reverse(
'admin:maintenance_databasecreate_change', args=(create_id,)
)
)
| 36.574468 | 79 | 0.61528 |
b2a2574a4de39313bbbe18c79b224df96393690c | 934 | py | Python | bridge/py_api/bohrium_api/messaging.py | bh107/bohrium | 5b83e7117285fefc7779ed0e9acb0f8e74c7e068 | [
"Apache-2.0"
] | 236 | 2015-03-31T15:39:30.000Z | 2022-03-24T01:43:14.000Z | bridge/py_api/bohrium_api/messaging.py | bh107/bohrium | 5b83e7117285fefc7779ed0e9acb0f8e74c7e068 | [
"Apache-2.0"
] | 324 | 2015-05-27T10:35:38.000Z | 2021-12-10T07:34:10.000Z | bridge/py_api/bohrium_api/messaging.py | bh107/bohrium | 5b83e7117285fefc7779ed0e9acb0f8e74c7e068 | [
"Apache-2.0"
] | 41 | 2015-05-26T12:38:42.000Z | 2022-01-10T15:16:37.000Z | """
Send and receive pre-defined messages through the Bohrium component stack
=========================================================================
"""
from ._bh_api import message as msg
def statistic_enable_and_reset():
"""Reset and enable the Bohrium statistic"""
return msg("statistic_enable_and_reset")
def statistic():
"""Return a YAML string of Bohrium statistic"""
return msg("statistic")
def gpu_disable():
"""Disable the GPU backend in the current runtime stack"""
return msg("GPU: disable")
def gpu_enable():
"""Enable the GPU backend in the current runtime stack"""
return msg("GPU: enable")
def runtime_info():
"""Return a YAML string describing the current Bohrium runtime"""
return msg("info")
def cuda_use_current_context():
"""Tell the CUDA backend to use the current CUDA context (useful for PyCUDA interop)"""
return msg("CUDA: use current context")
| 25.243243 | 91 | 0.649893 |
1df1f1946a947c980dacf570fd9c9c2aabd90dd5 | 809 | py | Python | gallery/urls.py | Robinkariuki/Gallery | 21a93bed7813490ba03967e3a9c784b923791735 | [
"MIT"
] | null | null | null | gallery/urls.py | Robinkariuki/Gallery | 21a93bed7813490ba03967e3a9c784b923791735 | [
"MIT"
] | 3 | 2020-02-12T03:13:09.000Z | 2021-06-10T22:03:28.000Z | gallery/urls.py | Robinkariuki/Gallery | 21a93bed7813490ba03967e3a9c784b923791735 | [
"MIT"
] | 1 | 2020-11-26T05:40:17.000Z | 2020-11-26T05:40:17.000Z | """gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('images.urls'))
]
| 33.708333 | 79 | 0.699629 |
be0e0f473c0c19038079dd6b3b9c3e63a17529ac | 1,176 | py | Python | setup.py | smilelight/lightSpider | 7430324753501c8f61e7152b6403845a74c12dee | [
"Apache-2.0"
] | 12 | 2020-01-07T01:20:54.000Z | 2021-04-25T06:47:53.000Z | setup.py | smilelight/lightSpider | 7430324753501c8f61e7152b6403845a74c12dee | [
"Apache-2.0"
] | 1 | 2020-01-09T13:54:06.000Z | 2020-01-11T09:36:59.000Z | setup.py | smilelight/lightSpider | 7430324753501c8f61e7152b6403845a74c12dee | [
"Apache-2.0"
] | 6 | 2020-01-07T04:06:55.000Z | 2021-04-21T02:40:02.000Z | from distutils.core import setup
import setuptools
with open('./version.txt', encoding='utf8') as f:
version = f.read()
with open('./README.md', 'r', encoding='utf8') as f:
long_description = f.read()
with open('./requirements.txt', 'r', encoding='utf8') as f:
install_requires = list(map(lambda x: x.strip(), f.readlines()))
setup(
name='lightSpider',
version=version,
description="lightsmile's personal spider for crawling data",
author='lightsmile',
author_email='iamlightsmile@gmail.com',
url='https://github.com/smilelight/lightSpider',
packages=setuptools.find_packages(),
install_requires=install_requires,
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache-2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries'
],
)
| 32.666667 | 68 | 0.659014 |
84a97b19724e5cc2760099535bdc9215b6f6300b | 6,184 | py | Python | [Kaleido-subs]/Completed/Joshiraku [BD]/JoshirakuBD_12.py | LightArrowsEXE/Encoding-Projects | 4ea96a5b25a7710f615ada5ff25949c496492b53 | [
"MIT"
] | 57 | 2019-01-31T17:32:46.000Z | 2022-03-23T05:46:51.000Z | [Kaleido-subs]/Completed/Joshiraku [BD]/JoshirakuBD_12.py | LightArrowsEXE/Encoding-Projects | 4ea96a5b25a7710f615ada5ff25949c496492b53 | [
"MIT"
] | null | null | null | [Kaleido-subs]/Completed/Joshiraku [BD]/JoshirakuBD_12.py | LightArrowsEXE/Encoding-Projects | 4ea96a5b25a7710f615ada5ff25949c496492b53 | [
"MIT"
] | 12 | 2019-04-30T06:16:13.000Z | 2022-03-14T16:15:07.000Z | from typing import Optional, Sequence, Tuple, Union
import vapoursynth as vs
from lvsfunc.misc import source
from vardautomation import (JAPANESE, FileInfo, MplsReader, PresetAAC,
PresetBD, PresetChapXML, VPath)
from project_module import encoder as enc, flt
core = vs.core
core.num_threads = 4
# Sources
JP_BD = FileInfo(r'BDMV/130522_JOSHIRAKU_VOL6/BDMV/STREAM/00000.m2ts', (34815, 69581),
idx=lambda x: source(x, cachedir=''),
preset=[PresetBD, PresetAAC, PresetChapXML])
JP_BD_NCOP = FileInfo(r'BDMV/120926_JOSHIRAKU_VOL1/BDMV/STREAM/00001.m2ts', (24, -24),
idx=lambda x: source(x, cachedir=''))
JP_BD_NCED = FileInfo(r'BDMV/120926_JOSHIRAKU_VOL1/BDMV/STREAM/00002.m2ts', (24, -24),
idx=lambda x: source(x, cachedir=''))
JP_BD_03 = FileInfo(r'BDMV/121024_JOSHIRAKU_VOL2/BDMV/STREAM/00000.m2ts', (24, 2182),
idx=lambda x: source(x, cachedir=''))
JP_BD.name_file_final = VPath(fr"premux/{JP_BD.name} (Premux).mkv")
JP_BD.a_src_cut = VPath(f"{JP_BD.name}_cut.aac")
JP_BD.do_qpfile = True
# Chapter handling
CHAPTERS = MplsReader(r"BDMV/130522_JOSHIRAKU_VOL6", lang=JAPANESE).get_playlist()[0].mpls_chapters[0].to_chapters()
CHAP_NAMES: Sequence[Optional[str]] = ['OP', 'Part A', 'Part B', 'Part C', 'ED', 'Preview']
CHAPTERS = CHAPTERS[6:12]
# OP/ED Variables
opstart = 0
edstart = 31888
op_offset = 2
ed_offset = 1
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
"""Main filterchain"""
import havsfunc as haf
import lvsfunc as lvf
import rekt
import vardefunc as vdf
from adptvgrnMod import adptvgrnMod
from awsmfunc import bbmod
from ccd import ccd
from vsutil import depth, get_y
from xvs import WarpFixChromaBlend
src = JP_BD.clip_cut
src_NCOP, src_NCED = JP_BD_NCOP.clip_cut, JP_BD_NCED.clip_cut
src_NCOP = src_NCOP + src_NCOP[-1] * 11
src_NCED = src_NCED + src_NCED[-1]
src_03 = JP_BD_03.clip_cut
# b = core.std.BlankClip(src, length=1)
# Fixing an animation error in the NCOP
sqmask_NCOP = lvf.mask.BoundingBox((419, 827), (1500, 68))
masked_NCOP = core.std.MaskedMerge(src_NCOP, src_03, sqmask_NCOP.get_mask(src_NCOP))
masked_NCOP = lvf.rfs(src_NCOP, masked_NCOP, [(opstart+2064, opstart+2107)])
# OP/ED stack comps to check that it lines up
# op_scomp = lvf.scomp(src[opstart:opstart+src_NCOP.num_frames-1]+b, masked_NCOP[:-op_offset]+b) # noqa
# ed_scomp = lvf.scomp(src[edstart:edstart+src_NCED.num_frames-1]+b, src_NCED[:-ed_offset]+b) # noqa
# Masking credits
op_mask = vdf.dcm(
src, src[opstart:opstart+src_NCOP.num_frames-op_offset], masked_NCOP[:-op_offset],
start_frame=opstart, thr=25, prefilter=True) if opstart is not False \
else get_y(core.std.BlankClip(src))
ed_mask = vdf.dcm(
src, src[edstart:edstart+src_NCED.num_frames-ed_offset], src_NCED[:-ed_offset],
start_frame=edstart, thr=25, prefilter=False) if edstart is not False \
else get_y(core.std.BlankClip(src))
credit_mask = core.std.Expr([op_mask, ed_mask], expr='x y +')
credit_mask = depth(credit_mask, 16).std.Binarize()
# Edgefixing
rkt = rekt.rektlvls(
src,
[0, 1079], [17, 16],
[0, 1, 2, 3] + [1917, 1918, 1919], [16, 4, -2, 2] + [-2, 5, 14]
)
ef = bbmod(rkt, left=4, right=3, y=False)
ef = depth(ef, 32)
# Descaling + Rescaling
src_y = get_y(ef)
descaled = lvf.kernels.Bicubic().descale(src_y, 1280, 720)
rescaled = vdf.scale.nnedi3_upscale(descaled)
downscaled = lvf.kernels.Bicubic(-1/2, 1/4).scale(rescaled, 1920, 1080)
l_mask = vdf.mask.FDOG().get_mask(src_y, lthr=0.065, hthr=0.065).std.Maximum().std.Minimum()
l_mask = l_mask.std.Median().std.Convolution([1] * 9)
rescaled_masked = core.std.MaskedMerge(src_y, downscaled, l_mask)
scaled = depth(vdf.misc.merge_chroma(rescaled_masked, ef), 16)
unwarp = flt.line_darkening(scaled, 0.145).warp.AWarpSharp2(depth=2)
sharp = haf.LSFmod(unwarp, strength=65, Smode=3, Lmode=1, edgemode=1, edgemaskHQ=True)
mask_sharp = core.std.MaskedMerge(scaled, sharp, depth(l_mask, 16))
upscaled = lvf.kernels.Bicubic().scale(descaled, 1920, 1080)
descale_mask = lvf.scale.descale_detail_mask(src_y, upscaled)
scale_restore_mask = core.std.Expr([credit_mask, descale_mask], "x y +")
credits_merged = core.std.MaskedMerge(mask_sharp, depth(ef, 16), scale_restore_mask)
# Denoising
denoise_y = core.knlm.KNLMeansCL(credits_merged, d=1, a=3, s=4, h=0.15, channels='Y')
denoise_uv = ccd(denoise_y, threshold=6, matrix='709')
stab = haf.GSMC(denoise_uv, radius=2, planes=[0])
decs = vdf.noise.decsiz(stab, sigmaS=8, min_in=208 << 8, max_in=232 << 8)
# Fixing chroma
cshift = haf.FixChromaBleedingMod(decs, cx=-.25, cy=0, thr=100, strength=1, blur=True)
cwarp = WarpFixChromaBlend(cshift, thresh=88, blur=3, depth=6)
# Regular debanding + graining
detail_mask = flt.detail_mask(cwarp, brz=(1800, 3500))
deband = vdf.deband.dumb3kdb(cwarp, threshold=32, grain=16)
deband_masked = core.std.MaskedMerge(deband, cwarp, detail_mask)
grain: vs.VideoNode = adptvgrnMod(deband_masked, 0.2, luma_scaling=10, size=1.35, static=True, grain_chroma=False)
return grain
if __name__ == '__main__':
FILTERED = filterchain()
enc.Encoder(JP_BD, FILTERED, CHAPTERS, CHAP_NAMES).run(clean_up=True) # type: ignore
elif __name__ == '__vapoursynth__':
FILTERED = filterchain()
if not isinstance(FILTERED, vs.VideoNode):
raise ImportError(
f"Input clip has multiple output nodes ({len(FILTERED)})! Please output just 1 clip"
)
else:
enc.dither_down(FILTERED).set_output(0)
else:
JP_BD.clip_cut.std.SetFrameProp('node', intval=0).set_output(0)
FILTERED = filterchain()
if not isinstance(FILTERED, vs.VideoNode):
for i, clip_filtered in enumerate(FILTERED, start=1):
clip_filtered.std.SetFrameProp('node', intval=i).set_output(i)
else:
FILTERED.std.SetFrameProp('node', intval=1).set_output(1)
| 41.503356 | 118 | 0.681921 |
7de63304dbfcab2afa62ffdc0e7b383ab0f529aa | 311 | py | Python | spoticly/commands/base.py | onhernandes/spoticly | ed109f4ea118c57557af1e7a10120a120fd29fa4 | [
"MIT"
] | 6 | 2018-12-23T16:16:36.000Z | 2019-01-11T12:10:49.000Z | spoticly/commands/base.py | onhernandes/spoticly | ed109f4ea118c57557af1e7a10120a120fd29fa4 | [
"MIT"
] | null | null | null | spoticly/commands/base.py | onhernandes/spoticly | ed109f4ea118c57557af1e7a10120a120fd29fa4 | [
"MIT"
] | null | null | null | """The base command."""
class Base(object):
"""A base command."""
def __init__(self, options, *args, **kwargs):
self.options = options
self.args = args
self.kwargs = kwargs
def run(self):
raise NotImplementedError("You must implement the run() method yourself!")
| 22.214286 | 82 | 0.607717 |
fbe74492ae45aa540ea7eaa69aa2ee5f2e5f47ca | 4,410 | py | Python | models/models.py | XLEric/ganimation | 024fdc0adbeae582a139635a5141b2737969bc48 | [
"MIT"
] | null | null | null | models/models.py | XLEric/ganimation | 024fdc0adbeae582a139635a5141b2737969bc48 | [
"MIT"
] | null | null | null | models/models.py | XLEric/ganimation | 024fdc0adbeae582a139635a5141b2737969bc48 | [
"MIT"
] | null | null | null | import os
import torch
from torch.optim import lr_scheduler
class ModelsFactory:
def __init__(self):
pass
@staticmethod
def get_by_name(model_name, *args, **kwargs):
model = None
if model_name == 'ganimation':
from .ganimation import GANimation
model = GANimation(*args, **kwargs)
else:
raise ValueError("Model %s not recognized." % model_name)
print("Model %s was created" % model.name)
return model
class BaseModel(object):
def __init__(self, opt):
self._name = 'BaseModel'
self._opt = opt
self._gpu_ids = opt.gpu_ids
self._is_train = opt.is_train
self._Tensor = torch.cuda.FloatTensor if self._gpu_ids else torch.Tensor
self._save_dir = os.path.join(opt.checkpoints_dir, opt.name)
@property
def name(self):
return self._name
@property
def is_train(self):
return self._is_train
def set_input(self, input):
assert False, "set_input not implemented"
def set_train(self):
assert False, "set_train not implemented"
def set_eval(self):
assert False, "set_eval not implemented"
def forward(self, keep_data_for_visuals=False):
assert False, "forward not implemented"
# used in test time, no backprop
def test(self):
assert False, "test not implemented"
def get_image_paths(self):
return {}
def optimize_parameters(self):
assert False, "optimize_parameters not implemented"
def get_current_visuals(self):
return {}
def get_current_errors(self):
return {}
def get_current_scalars(self):
return {}
def save(self, label):
assert False, "save not implemented"
def load(self):
assert False, "load not implemented"
def _save_optimizer(self, optimizer, optimizer_label, epoch_label):
save_filename = 'opt_epoch_%s_id_%s.pth' % (epoch_label, optimizer_label)
save_path = os.path.join(self._save_dir, save_filename)
torch.save(optimizer.state_dict(), save_path)
def _load_optimizer(self, optimizer, optimizer_label, epoch_label):
load_filename = 'opt_epoch_%s_id_%s.pth' % (epoch_label, optimizer_label)
load_path = os.path.join(self._save_dir, load_filename)
assert os.path.exists(
load_path), 'Weights file not found. Have you trained a model!? We are not providing one' % load_path
optimizer.load_state_dict(torch.load(load_path))
print('loaded optimizer: %s' % load_path)
def _save_network(self, network, network_label, epoch_label):
save_filename = 'net_epoch_%s_id_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self._save_dir, save_filename)
torch.save(network.state_dict(), save_path)
print('saved net: %s' % save_path)
def _load_network(self, network, network_label, epoch_label):
load_filename = 'net_epoch_%s_id_%s.pth' % (epoch_label, network_label)
load_path = os.path.join(self._save_dir, load_filename)
assert os.path.exists(
load_path), 'Weights file not found. Have you trained a model!? We are not providing one' % load_path
network.load_state_dict(torch.load(load_path))
print('loaded net: %s' % load_path)
def update_learning_rate(self):
pass
def print_network(self, network):
num_params = 0
for param in network.parameters():
num_params += param.numel()
print(network)
print('Total number of parameters: %d' % num_params)
def _get_scheduler(self, optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
| 33.157895 | 117 | 0.650567 |
79ea97dec7e116370e4585a8ee5f40b4bfb9c62c | 4,486 | py | Python | sktime/experiments/losses.py | fkiraly/sktime | 3cd0064f7750844178b60d67422b083b5004131a | [
"BSD-3-Clause"
] | 2 | 2021-01-19T05:10:50.000Z | 2021-01-22T18:39:56.000Z | sktime/experiments/losses.py | ClaudiaSanches/sktime | 63e7839e80ca6d5fe5fc4f33389ec3bcacd8aa59 | [
"BSD-3-Clause"
] | 1 | 2019-11-05T16:48:00.000Z | 2019-11-05T16:48:00.000Z | sktime/experiments/losses.py | ClaudiaSanches/sktime | 63e7839e80ca6d5fe5fc4f33389ec3bcacd8aa59 | [
"BSD-3-Clause"
] | 1 | 2019-11-05T16:34:32.000Z | 2019-11-05T16:34:32.000Z | import collections
import pandas as pd
class Losses(object):
"""
Calculates prediction losses on test datasets achieved by the trained estimators. When the class is instantiated it creates a dictionary that stores the losses.
Parameters
----------
metric: `mlaut.analyze_results.scores` object
score function that will be used for the estimation. Must be `mlaut.analyze_results.scores` object.
estimators : `array of mlaut estimators`
Array of estimators on which the results will be compared.
exact_match : bool
If `True` when predictions for all estimators in the estimators array is not available no evaluation is performed on the remaining estimators.
"""
def __init__(self, metric):
self._losses = collections.defaultdict(list)
self._metric = metric
self._losses_per_estimator = collections.defaultdict(list)
self._losses_per_dataset_per_estimator = collections.defaultdict(list)
def evaluate(self, predictions, true_labels, dataset_name, strategy_name):
"""
Calculates the loss metrics on the test sets.
Parameters
----------
predictions : numpy array
Predictions of trained estimators in the form
true_labels : numpy array
true labels of test dataset.
dataset_name : str
Name of the dataset
dataset_name : str
Name of the strategy
"""
# evaluates error per estimator
loss = self._metric.calculate(true_labels, predictions)
if strategy_name in self._losses_per_estimator:
self._losses_per_estimator[strategy_name].append(loss)
else:
self._losses_per_estimator[strategy_name] = [loss]
# evaluate per dataset
avg_score, std_score = self._metric.calculate_per_dataset(y_true=true_labels,
y_pred=predictions)
self._losses_per_dataset_per_estimator[dataset_name].append([strategy_name, avg_score, std_score])
def get_losses(self):
"""
When the Losses class is instantiated a dictionary that holds all losses is created and appended every time the evaluate() method is run. This method returns this dictionary with the losses.
Returns
-------
tuple
errors_per_estimator (dictionary), errors_per_dataset_per_estimator (dictionary), errors_per_dataset_per_estimator_df (pandas DataFrame): Returns dictionaries with the errors achieved by each estimator and errors achieved by each estimator on each of the datasets. ``errors_per_dataset_per_estimator`` and ``errors_per_dataset_per_estimator_df`` return the same results but the first object is a dictionary and the second one a pandas DataFrame. ``errors_per_dataset_per_estimator`` and ``errors_per_dataset_per_estimator_df`` contain both the mean error and deviation.
"""
return self._losses_per_estimator, self._losses_to_dataframe(self._losses_per_dataset_per_estimator)
def _losses_to_dataframe(self, losses):
"""
Reformats the output of the dictionary returned by the :func:`mlaut.analyze_results.losses.Losses.get_losses` to a pandas DataFrame. This method can only be applied to reformat the output produced by :func:`sktime.experiments.Losses.evaluate_per_dataset`.
Parameters
----------
losses : dict
Dictionary returned by the :func:`sktime.experiments.losses.Losses.get_losses` generated by :func:`sktime.experiments.losses.Losses.evaluate_per_dataset`
Returns
-------
dataframe
Multiindex dataframe with the losses
"""
df = pd.DataFrame(losses)
# unpivot the data
df = df.melt(var_name='dts', value_name='values')
df['classifier'] = df.apply(lambda raw: raw.values[1][0], axis=1)
df['loss'] = df.apply(lambda raw: raw.values[1][1], axis=1)
df['std_error'] = df.apply(lambda raw: raw.values[1][2], axis=1)
df = df.drop('values', axis=1)
# create multilevel index dataframe
dts = df['dts'].unique()
estimators_list = df['classifier'].unique()
score = df['loss'].values
std = df['std_error'].values
df = df.drop('dts', axis=1)
df = df.drop('classifier', axis=1)
df.index = pd.MultiIndex.from_product([dts, estimators_list])
return df
| 44.86 | 582 | 0.671868 |
6d16542907872c5482c73dca780a6ef723e1d78a | 2,107 | py | Python | Filters/Core/Testing/Python/QuadricDecimation.py | forestGzh/VTK | bc98327275bd5cfa95c5825f80a2755a458b6da8 | [
"BSD-3-Clause"
] | 1,755 | 2015-01-03T06:55:00.000Z | 2022-03-29T05:23:26.000Z | Filters/Core/Testing/Python/QuadricDecimation.py | forestGzh/VTK | bc98327275bd5cfa95c5825f80a2755a458b6da8 | [
"BSD-3-Clause"
] | 29 | 2015-04-23T20:58:30.000Z | 2022-03-02T16:16:42.000Z | Filters/Core/Testing/Python/QuadricDecimation.py | forestGzh/VTK | bc98327275bd5cfa95c5825f80a2755a458b6da8 | [
"BSD-3-Clause"
] | 1,044 | 2015-01-05T22:48:27.000Z | 2022-03-31T02:38:26.000Z | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# pipeline stuff
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
pl3d_output = pl3d.GetOutput().GetBlock(0)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(pl3d_output)
tf = vtk.vtkTriangleFilter()
tf.SetInputConnection(gf.GetOutputPort())
gMapper = vtk.vtkPolyDataMapper()
gMapper.SetInputConnection(gf.GetOutputPort())
gMapper.SetScalarRange(pl3d_output.GetScalarRange())
gActor = vtk.vtkActor()
gActor.SetMapper(gMapper)
# Don't look at attributes
mesh = vtk.vtkQuadricDecimation()
mesh.SetInputConnection(tf.GetOutputPort())
mesh.SetTargetReduction(.90)
mesh.AttributeErrorMetricOn()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(mesh.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# This time worry about attributes
mesh2 = vtk.vtkQuadricDecimation()
mesh2.SetInputConnection(tf.GetOutputPort())
mesh2.SetTargetReduction(.90)
mesh2.AttributeErrorMetricOff()
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(mesh2.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
actor2.AddPosition(0,12,0)
# Create rendering instances
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Set up the camera parameters
#
camera = vtk.vtkCamera()
camera.SetPosition(19.34,6.128,-11.96)
camera.SetFocalPoint(8.25451,6.0,29.77)
camera.SetViewUp(0.9664,0.00605,0.256883)
camera.SetViewAngle(30)
camera.SetClippingRange(26,64)
ren1.SetActiveCamera(camera)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.AddActor(actor2)
ren1.SetBackground(1,1,1)
renWin.SetSize(400,400)
iren.Initialize()
# render the image
#
# prevent the tk window from showing up then start the event loop
threshold = 50
# --- end of script --
| 29.676056 | 66 | 0.792596 |
ccb0074610ea1596f00ac813ab920561a6fb95bc | 528 | py | Python | solutions/python/the-salesman.py | harimm/hackerrank-solutions | 359e37b160d976da7a30dbc9b80da89e3335ddc0 | [
"MIT"
] | null | null | null | solutions/python/the-salesman.py | harimm/hackerrank-solutions | 359e37b160d976da7a30dbc9b80da89e3335ddc0 | [
"MIT"
] | null | null | null | solutions/python/the-salesman.py | harimm/hackerrank-solutions | 359e37b160d976da7a30dbc9b80da89e3335ddc0 | [
"MIT"
] | null | null | null | # Solution for the problem "The Salesman"
# https://www.hackerrank.com/contests/world-codesprint-12/challenges/the-salesman
# Approach we are taking is to find the maximum and minimum numbers and then find their difference
# Number of test cases
t = int(input())
for i in range(t):
# No. of houses. We are not going to use this variable
n = int(input())
nums = map(int, input().strip().split(' '))
# Compute max and min and get the difference
maxV = max(nums)
minV = min(nums)
print(maxV - minV) | 29.333333 | 98 | 0.683712 |
0c0665d26a47bcde032ffc10b9c40d1c98c4dfdc | 23,425 | py | Python | reviewboard/accounts/forms/pages.py | b1pb1p/reviewboard | b13aca3b88bc16d3c4258adce5df79cd1da577d3 | [
"MIT"
] | 921 | 2015-01-01T15:26:28.000Z | 2022-03-29T11:30:38.000Z | reviewboard/accounts/forms/pages.py | josnin/reviewboard | e1213abd5151e059548d5b75e514e68e76b89b48 | [
"MIT"
] | 5 | 2015-03-17T18:57:47.000Z | 2020-10-02T13:24:31.000Z | reviewboard/accounts/forms/pages.py | josnin/reviewboard | e1213abd5151e059548d5b75e514e68e76b89b48 | [
"MIT"
] | 285 | 2015-01-12T06:24:36.000Z | 2022-03-29T11:03:50.000Z | from __future__ import unicode_literals
import logging
from collections import OrderedDict
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.forms import widgets
from django.http import HttpResponseRedirect
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext_lazy as _
from djblets.avatars.forms import (
AvatarSettingsForm as DjbletsAvatarSettingsForm)
from djblets.configforms.forms import ConfigPageForm
from djblets.forms.fields import TimeZoneField
from djblets.privacy.consent.forms import ConsentConfigPageFormMixin
from djblets.siteconfig.models import SiteConfiguration
from oauth2_provider.models import AccessToken
from reviewboard.accounts.backends import get_enabled_auth_backends
from reviewboard.avatars import avatar_services
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.oauth.models import Application
from reviewboard.reviews.models import Group
from reviewboard.site.models import LocalSite
from reviewboard.site.urlresolvers import local_site_reverse
class AccountPageForm(ConfigPageForm):
"""Base class for a form on the My Account page.
AccountPageForms belong to AccountPages, and will be displayed on the
My Account page for a user.
A simple form presents fields that can be filled out and posted.
More advanced forms can supply their own template or even their own
JavaScript models and views.
"""
#: Features required for a form to be displayed.
required_features = []
def is_visible(self):
"""Return whether or not the form should be rendered.
This is a base implementation that takes into account a set of required
features.
Returns
bool:
Whether or not the form should be rendered.
"""
return all(feature.is_enabled() for feature in self.required_features)
class AccountSettingsForm(AccountPageForm):
"""Form for the Settings page for an account."""
form_id = 'settings'
form_title = _('Settings')
timezone = TimeZoneField(
label=_('Time zone'),
required=True,
help_text=_("The time zone you're in."))
syntax_highlighting = forms.BooleanField(
label=_('Enable syntax highlighting in the diff viewer'),
required=False)
open_an_issue = forms.BooleanField(
label=_('Always open an issue when comment box opens'),
required=False)
default_use_rich_text = forms.BooleanField(
label=_('Always use Markdown for text fields'),
required=False)
should_send_email = forms.BooleanField(
label=_('Get e-mail notification for review requests and reviews'),
required=False)
should_send_own_updates = forms.BooleanField(
label=_('Get e-mail notifications for my own activity'),
required=False)
enable_desktop_notifications = forms.BooleanField(
label=_('Show desktop notifications'),
required=False)
def load(self):
"""Load data for the form."""
profile = self.user.get_profile()
siteconfig = SiteConfiguration.objects.get_current()
diffviewer_syntax_highlighting = siteconfig.get(
'diffviewer_syntax_highlighting')
self.set_initial({
'open_an_issue': profile.open_an_issue,
'syntax_highlighting': (profile.syntax_highlighting and
diffviewer_syntax_highlighting),
'timezone': profile.timezone,
'default_use_rich_text': profile.should_use_rich_text,
'should_send_email': profile.should_send_email,
'should_send_own_updates': profile.should_send_own_updates,
'enable_desktop_notifications':
profile.should_enable_desktop_notifications,
})
if not diffviewer_syntax_highlighting:
self.fields['syntax_highlighting'].widget.attrs.update({
'disabled': True,
})
def save(self):
"""Save the form."""
profile = self.user.get_profile()
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('diffviewer_syntax_highlighting'):
profile.syntax_highlighting = \
self.cleaned_data['syntax_highlighting']
profile.open_an_issue = self.cleaned_data['open_an_issue']
profile.default_use_rich_text = \
self.cleaned_data['default_use_rich_text']
profile.timezone = self.cleaned_data['timezone']
profile.should_send_email = self.cleaned_data['should_send_email']
profile.should_send_own_updates = \
self.cleaned_data['should_send_own_updates']
profile.settings['enable_desktop_notifications'] = \
self.cleaned_data['enable_desktop_notifications']
profile.save(update_fields=(
'default_use_rich_text',
'open_an_issue',
'settings',
'should_send_email',
'should_send_own_updates',
'syntax_highlighting',
'timezone',
))
messages.add_message(self.request, messages.INFO,
_('Your settings have been saved.'))
class Meta:
fieldsets = (
(_('General Settings'), {
'fields': ('form_target',
'timezone',
'syntax_highlighting',
'open_an_issue',
'default_use_rich_text'),
}),
(_('Notifications'), {
'fields': ('should_send_email',
'should_send_own_updates',
'enable_desktop_notifications'),
})
)
class AvatarSettingsForm(DjbletsAvatarSettingsForm):
"""A form for configuring the avatar for a user.
This form will only be shown when avatars are enabled for the server.
"""
avatar_service_registry = avatar_services
def is_visible(self):
"""Return whether or not to show the avatar settings form.
Returns:
bool:
Whether or not to show the avatar settings form.
"""
return (super(AvatarSettingsForm, self).is_visible() and
self.avatar_service_registry.avatars_enabled and
len(self.avatar_service_registry.enabled_services) > 0)
class APITokensForm(AccountPageForm):
"""Form for showing a user's API tokens."""
form_id = 'api_tokens'
form_title = _('API Tokens')
save_label = None
js_view_class = 'RB.APITokensView'
def get_js_view_data(self):
"""Get data to pass to the JavaScript view."""
# Fetch the list of the user's API tokens, globally.
api_tokens = self.user.webapi_tokens.all()
# Group the API tokens by LocalSite or the global site.
serialized_api_tokens = OrderedDict()
serialized_api_tokens[''] = \
self._serialize_api_tokens(None, api_tokens)
for local_site in self.page.config_view.ordered_user_local_sites:
serialized_api_tokens[local_site.name] = \
self._serialize_api_tokens(local_site, api_tokens)
return {
'apiTokens': serialized_api_tokens,
}
def _serialize_api_tokens(self, local_site, api_tokens):
if local_site:
local_site_prefix = local_site_reverse(
'root',
local_site_name=local_site.name)[1:]
else:
local_site_prefix = None
return {
'localSitePrefix': local_site_prefix,
'tokens': [
{
'id': api_token.pk,
'tokenValue': api_token.token,
'timeAdded': api_token.time_added,
'lastUpdated': api_token.last_updated,
'note': api_token.note,
'policy': api_token.policy,
}
for api_token in api_tokens
if api_token.local_site == local_site
]
}
class ChangePasswordForm(AccountPageForm):
"""Form for changing a user's password."""
form_id = 'change_password'
form_title = _('Change Password')
save_label = _('Change Password')
old_password = forms.CharField(
label=_('Current password'),
required=True,
widget=widgets.PasswordInput())
password1 = forms.CharField(
label=_('New password'),
required=True,
widget=widgets.PasswordInput())
password2 = forms.CharField(
label=_('New password (confirm)'),
required=True,
widget=widgets.PasswordInput())
def is_visible(self):
"""Return whether or not the "change password" form should be shown.
Returns:
bool:
Whether or not the form will be rendered.
"""
return (super(ChangePasswordForm, self).is_visible() and
get_enabled_auth_backends()[0].supports_change_password)
def clean_old_password(self):
"""Validate the 'old_password' field.
This checks to make sure the old password is correct when changing the
password.
"""
backend = get_enabled_auth_backends()[0]
password = self.cleaned_data['old_password']
try:
is_authenticated = backend.authenticate(
request=None,
username=self.user.username,
password=password)
except Exception as e:
logging.exception('Error when calling authenticate for auth '
'backend %r: %s',
backend, e)
raise forms.ValidationError(_('Unexpected error when validating '
'the password. Please contact the '
'administrator.'))
if not is_authenticated:
raise forms.ValidationError(_('This password is incorrect'))
def clean_password2(self):
"""Validate the 'password2' field.
This makes sure that the two password fields match.
"""
p1 = self.cleaned_data['password1']
p2 = self.cleaned_data['password2']
if p1 != p2:
raise forms.ValidationError(_('Passwords do not match'))
return p2
def save(self):
"""Save the form."""
from reviewboard.notifications.email.signal_handlers import \
send_password_changed_mail
backend = get_enabled_auth_backends()[0]
try:
backend.update_password(self.user, self.cleaned_data['password1'])
self.user.save()
messages.add_message(self.request, messages.INFO,
_('Your password has been changed.'))
except Exception as e:
logging.error('Error when calling update_password for auth '
'backend %r: %s',
backend, e, exc_info=1)
messages.add_message(self.request, messages.INFO,
_('Unexpected error when changing your '
'password. Please contact the '
'administrator.'))
else:
send_password_changed_mail(self.user)
class ProfileForm(AccountPageForm):
"""Form for the Profile page for an account."""
form_id = 'profile'
form_title = _('Profile')
save_label = _('Save Profile')
first_name = forms.CharField(
label=_('First name'),
required=False)
last_name = forms.CharField(
label=_('Last name'),
required=False)
email = forms.EmailField(
label=_('E-mail address'),
required=True)
profile_private = forms.BooleanField(
required=False,
label=_('Keep profile information private'),
help_text=_(
'By default, profile information (full name, e-mail address, and '
'timezone) is only hidden from users who are not logged in. With '
'this setting enabled, it will also be hidden from '
'non-administrators.'))
def load(self):
"""Load data for the form."""
profile = self.user.get_profile()
self.set_initial({
'first_name': self.user.first_name,
'last_name': self.user.last_name,
'email': self.user.email,
'profile_private': profile.is_private,
})
backend = get_enabled_auth_backends()[0]
if not backend.supports_change_name:
del self.fields['first_name']
del self.fields['last_name']
if not backend.supports_change_email:
del self.fields['email']
def save(self):
"""Save the form."""
backend = get_enabled_auth_backends()[0]
if backend.supports_change_name:
self.user.first_name = self.cleaned_data['first_name']
self.user.last_name = self.cleaned_data['last_name']
try:
backend.update_name(self.user)
except Exception as e:
logging.error('Error when calling update_name for auth '
'backend %r: %s',
backend, e, exc_info=1)
if backend.supports_change_email:
new_email = self.cleaned_data['email']
if new_email != self.user.email:
self.user.email = new_email
try:
backend.update_email(self.user)
except Exception as e:
logging.error('Error when calling update_email for auth '
'backend %r: %s',
backend, e, exc_info=1)
self.user.save()
profile = self.user.get_profile()
profile.is_private = self.cleaned_data['profile_private']
profile.save(update_fields=('is_private',))
messages.add_message(self.request, messages.INFO,
_('Your profile has been saved.'))
class GroupsForm(AccountPageForm):
"""Form for the group membership page.
Unlike most forms, this doesn't deal with fields or saving to the database.
Instead, it sets up the JavaScript View and provides serialized data
representing the groups. The View handles group membership through the
API.
"""
form_id = 'groups'
form_title = _('Groups')
save_label = None
js_view_class = 'RB.JoinedGroupsView'
def get_js_view_data(self):
"""Get data to pass to the JavaScript view."""
# Fetch the list of IDs of groups the user has joined.
joined_group_ids = self.user.review_groups.values_list('pk', flat=True)
# Fetch the list of groups available to the user.
serialized_groups = OrderedDict()
serialized_groups[''] = self._serialize_groups(None, joined_group_ids)
for local_site in self.page.config_view.ordered_user_local_sites:
serialized_groups[local_site.name] = self._serialize_groups(
local_site, joined_group_ids)
return {
'groups': serialized_groups,
}
def _serialize_groups(self, local_site, joined_group_ids):
if local_site:
local_site_name = local_site.name
else:
local_site_name = None
groups = Group.objects.accessible(user=self.user,
local_site=local_site)
return [
{
'name': group.name,
'reviewGroupID': group.pk,
'displayName': group.display_name,
'localSiteName': local_site_name,
'joined': group.pk in joined_group_ids,
'url': local_site_reverse('group',
local_site_name=local_site_name,
kwargs={'name': group.name}),
}
for group in groups.order_by('name')
]
class OAuthApplicationsForm(AccountPageForm):
"""The OAuth Application form.
This provides a list of all current OAuth2 applications the user has
access to.
"""
form_id = 'oauth'
form_title = _('OAuth Applications')
js_view_class = 'RB.OAuthApplicationsView'
required_features = [oauth2_service_feature]
save_label = None
def get_js_view_data(self):
"""Return the data for the associated Javascript view.
Returns:
dict:
Data to be passed to the Javascript view.
"""
apps = {
site_name: []
for site_name in (
LocalSite.objects
.filter(users=self.user)
.values_list('name', flat=True)
)
}
apps[''] = []
app_qs = (
Application.objects
.select_related('local_site')
.filter(user=self.user)
)
for app in app_qs:
app = self.serialize_app(app)
apps[app['localSiteName'] or ''].append(app)
return {
'apps': apps,
'editURL': reverse('edit-oauth-app'),
'baseURL': reverse('oauth-apps-resource'),
}
@staticmethod
def serialize_app(app):
"""Serialize an application for the JavaScript view.
Args:
app (reviewboard.oauth.models.Application):
The application to serialize.
Returns:
dict:
The serialized application.
"""
if app.local_site is not None:
local_site_name = app.local_site.name
else:
local_site_name = None
enabled = app.enabled
is_disabled_for_security = (not enabled and
app.is_disabled_for_security)
original_user = None
if is_disabled_for_security:
original_user = app.original_user.username
return {
'id': app.pk,
'editURL': reverse('edit-oauth-app', kwargs={'app_id': app.pk}),
'enabled': app.enabled,
'isDisabledForSecurity': app.is_disabled_for_security,
'localSiteName': local_site_name,
'name': app.name,
'originalUser': original_user,
}
class OAuthTokensForm(AccountPageForm):
"""The OAuth Token form
This provides a list of all current OAuth2 tokens the user has created.
"""
form_id = 'oauth_tokens'
form_title = _('OAuth Tokens')
js_view_class = 'RB.OAuthTokensView'
required_features = [oauth2_service_feature]
save_label = None
def get_js_view_data(self):
"""Return the data for the JavaScript view.
Returns:
dict:
A dict containing a single key:
``'tokens'`` (:py:class:`list`):
A list of serialized information about each token.
"""
tokens = [
self.serialize_token(token)
for token in (
AccessToken.objects
.select_related('application', 'application__local_site')
.filter(user=self.user)
)
]
return {
'tokens': tokens,
}
@staticmethod
def serialize_token(token):
"""Serialize a single token for the JavaScript view.
Returns:
dict:
A dict with the following keys:
``'apiURL'`` (:py:class:`unicode`):
The URL to access the token via the API.
``'application'`` (:py:class:`unicode`):
The name of the application the token is associated with.
"""
return {
'apiURL': local_site_reverse(
'oauth-token-resource',
local_site=token.application.local_site,
kwargs={
'oauth_token_id': token.pk,
},
),
'application': token.application.name,
}
class PrivacyForm(ConsentConfigPageFormMixin, AccountPageForm):
"""A form for displaying privacy information and gathering consent.
This will display a user's privacy rights, link to any configured
Privacy Policy document, and display a form for gathering consent for
features that make use of the user's personally identifying information.
"""
next_url = forms.CharField(required=False,
widget=forms.HiddenInput)
form_title = _('My Privacy Rights')
template_name = 'accounts/privacy_form.html'
def __init__(self, *args, **kwargs):
"""Initialize the form.
Args:
*args (tuple):
Positional arguments to pass to the parent form.
**kwargs (dict):
Keyword arguments to pass to the parent form.
"""
super(PrivacyForm, self).__init__(*args, **kwargs)
siteconfig = SiteConfiguration.objects.get_current()
if not siteconfig.get('privacy_enable_user_consent'):
del self.fields[self.consent_field_name]
self.save_label = None
def load(self):
"""Load the form data.
If a ``?next`` query argument is provided, it will be loaded into the
initial value for the ``next_url`` so that it will persist through
page submission.
"""
super(PrivacyForm, self).load()
next_url = self.request.GET.get('next')
if next_url:
self.set_initial({'next_url': unquote(next_url)})
def is_visible(self):
"""Return whether or not the form should be rendered.
This will check if there's any information to display in this form.
It's only displayed if consent requirements are enabled or there's
any privacy information configured in Admin Settings.
Returns
bool:
Whether or not the form should be rendered.
"""
siteconfig = SiteConfiguration.objects.get_current()
return (siteconfig.get('privacy_enable_user_consent') or
bool(siteconfig.get('privacy_info_html')))
def get_extra_context(self):
"""Return extra context for the template.
Returns:
dict:
Context used for rendering the form's template.
"""
siteconfig = SiteConfiguration.objects.get_current()
return {
'privacy_info_html': siteconfig.get('privacy_info_html'),
}
def clean_next_url(self):
"""Clean the next_url field.
Returns:
unicode:
The URL to redirect to, if any.
"""
return self.cleaned_data.get('next_url', '').strip() or None
def save(self):
"""Save the privacy form.
This may redirect the user to the next URL if it is specified.
Returns:
django.http.HttpResponseRedirect:
A redirect to the next URL if given and ``None`` otherwise.
"""
next_url = self.cleaned_data.get('next_url')
if next_url:
self.save_consent(self.request.user)
return HttpResponseRedirect(next_url)
else:
return super(PrivacyForm, self).save()
| 32.762238 | 79 | 0.596841 |
916b1dd78984b393da46ccb78736222bad429085 | 1,541 | py | Python | tests/backends/aiida_django/migrations/test_migrations_0048_computer_name_to_label.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 180 | 2019-07-12T07:45:26.000Z | 2022-03-22T13:16:57.000Z | tests/backends/aiida_django/migrations/test_migrations_0048_computer_name_to_label.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 2,325 | 2019-07-04T13:41:44.000Z | 2022-03-31T12:17:10.000Z | tests/backends/aiida_django/migrations/test_migrations_0048_computer_name_to_label.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2019-07-06T01:42:39.000Z | 2022-03-18T14:20:09.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=import-error,no-name-in-module,invalid-name
"""Test migration that renames the ``name`` column of the ``Computer`` entity to ``label``."""
from .test_migrations_common import TestMigrations
class TestMigration(TestMigrations):
"""Test migration that renames the ``name`` column of the ``Computer`` entity to ``label``."""
migrate_from = '0047_migrate_repository'
migrate_to = '0048_computer_name_to_label'
def setUpBeforeMigration(self):
DbComputer = self.apps.get_model('db', 'DbComputer')
computer = DbComputer(name='testing')
computer.save()
self.computer_pk = computer.pk
def test_migration(self):
"""Test that the migration was performed correctly."""
DbComputer = self.apps.get_model('db', 'DbComputer')
computer = DbComputer.objects.get(pk=self.computer_pk)
assert computer.label == 'testing'
| 45.323529 | 98 | 0.560026 |
553d15771435eba907f3dba5c81ff3991dc865ad | 5,322 | py | Python | scripts/generate_sync_api.py | arjun27/playwright-python | 048cc223899f382aee1806ddefa8932f24c633a9 | [
"Apache-2.0"
] | 1 | 2021-03-16T09:47:01.000Z | 2021-03-16T09:47:01.000Z | scripts/generate_sync_api.py | yetone/playwright-python | d1b6a1bd08609214f421792b4422f1bb722156e7 | [
"Apache-2.0"
] | null | null | null | scripts/generate_sync_api.py | yetone/playwright-python | d1b6a1bd08609214f421792b4422f1bb722156e7 | [
"Apache-2.0"
] | 1 | 2020-12-10T10:53:40.000Z | 2020-12-10T10:53:40.000Z | #!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import re
from types import FunctionType
from typing import Any, get_type_hints # type: ignore
from scripts.documentation_provider import DocumentationProvider
from scripts.generate_api import (
all_types,
api_globals,
arguments,
header,
process_type,
return_type,
return_value,
short_name,
signature,
)
documentation_provider = DocumentationProvider()
def generate(t: Any) -> None:
print("")
class_name = short_name(t)
base_class = t.__bases__[0].__name__
base_sync_class = (
"SyncBase"
if base_class == "ChannelOwner" or base_class == "object"
else base_class
)
print(f"class {class_name}({base_sync_class}):")
print("")
print(f" def __init__(self, obj: {class_name}Impl):")
print(" super().__init__(obj)")
for [name, type] in get_type_hints(t, api_globals).items():
print("")
print(" @property")
print(f" def {name}(self) -> {process_type(type)}:")
documentation_provider.print_entry(class_name, name, {"return": type})
[prefix, suffix] = return_value(type)
prefix = " return " + prefix + f"self._impl_obj.{name}"
print(f"{prefix}{suffix}")
for [name, value] in t.__dict__.items():
if name.startswith("_"):
continue
if not name.startswith("_") and str(value).startswith("<property"):
value = value.fget
print("")
print(" @property")
print(
f" def {name}({signature(value, len(name) + 9)}) -> {return_type(value)}:"
)
documentation_provider.print_entry(
class_name, name, get_type_hints(value, api_globals)
)
[prefix, suffix] = return_value(
get_type_hints(value, api_globals)["return"]
)
prefix = " return " + prefix + f"self._impl_obj.{name}"
print(f"{prefix}{arguments(value, len(prefix))}{suffix}")
for [name, value] in t.__dict__.items():
if (
not name.startswith("_")
and isinstance(value, FunctionType)
and "expect_" not in name
):
print("")
print(
f" def {name}({signature(value, len(name) + 9)}) -> {return_type(value)}:"
)
documentation_provider.print_entry(
class_name, name, get_type_hints(value, api_globals)
)
[prefix, suffix] = return_value(
get_type_hints(value, api_globals)["return"]
)
if inspect.iscoroutinefunction(value):
prefix = (
" return " + prefix + f"self._sync(self._impl_obj.{name}("
)
suffix = "))" + suffix
else:
prefix = " return " + prefix + f"self._impl_obj.{name}("
suffix = ")" + suffix
print(f"{prefix}{arguments(value, len(prefix))}{suffix}")
if "expect_" in name:
print("")
return_type_value = return_type(value)
return_type_value = re.sub(r"\"([^\"]+)Impl\"", r"\1", return_type_value)
event_name = re.sub(r"expect_(.*)", r"\1", name)
event_name = re.sub(r"_", "", event_name)
event_name = re.sub(r"consolemessage", "console", event_name)
print(
f" def {name}({signature(value, len(name) + 9)}) -> {return_type_value}:"
)
wait_for_method = "waitForEvent(event, predicate, timeout)"
if event_name == "request":
wait_for_method = "waitForRequest(url, predicate, timeout)"
elif event_name == "response":
wait_for_method = "waitForResponse(url, predicate, timeout)"
elif event_name == "loadstate":
wait_for_method = "waitForLoadState(state, timeout)"
elif event_name == "navigation":
wait_for_method = "waitForNavigation(url, waitUntil, timeout)"
elif event_name != "event":
print(f' event = "{event_name}"')
print(
f" return EventContextManager(self._loop, self._impl_obj.{wait_for_method})"
)
print("")
print(f"mapping.register({class_name}Impl, {class_name})")
def main() -> None:
print(header)
print("from playwright.sync_base import EventContextManager, SyncBase, mapping")
print("NoneType = type(None)")
for t in all_types:
generate(t)
documentation_provider.print_remainder()
if __name__ == "__main__": # pragma: no cover
main()
| 36.703448 | 99 | 0.578354 |
bb7bfbfc84b90baad3b059b50732725046f82ca9 | 16,455 | py | Python | libs/cidre/cidre/draw.py | YuzhongHuangCS/journal-citation-cartels | 7b82d9e081555f5b40eb6cc6f44f65ce1c3c1a0f | [
"BSD-2-Clause"
] | 2 | 2020-09-22T09:59:56.000Z | 2022-01-31T20:00:49.000Z | libs/cidre/cidre/draw.py | YuzhongHuangCS/journal-citation-cartels | 7b82d9e081555f5b40eb6cc6f44f65ce1c3c1a0f | [
"BSD-2-Clause"
] | 1 | 2021-07-20T20:01:46.000Z | 2021-07-20T21:21:03.000Z | libs/cidre/cidre/draw.py | YuzhongHuangCS/journal-citation-cartels | 7b82d9e081555f5b40eb6cc6f44f65ce1c3c1a0f | [
"BSD-2-Clause"
] | 3 | 2021-03-09T04:38:56.000Z | 2021-07-13T04:49:15.000Z | import pandas as pd
import numpy as np
from scipy import sparse
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import patches
import matplotlib.colors as colors
import textwrap
import re
class DrawCartel:
def __init__(self):
self.theta_1 = np.pi * 0.7
self.angle_margin = 3 * np.pi / 25
self.node2color = {}
self.angles = None
self.radius = 2
self.label_node_margin = 0.35
self.group_arc_node_margin = 0.1
self.edge_norm = lambda x: np.power(x, 1 / 2)
self.max_edge_width = 15
self.font_size = 15
self.node_size = 0.25
self.label_width = 15
self.max_label_width = 35
self.group_order = {"source": 0, "target": 1, "reciprocal": 2, "other": 3}
def draw(
self,
A,
node_ids,
donor_score,
recipient_score,
theta,
node_names,
cmap=None,
ax=None,
):
"""
Draw citation networks within a citation cartel
Parameters
----------
A : scipy.sparse matrix
The adjacency matrix for the network with all nodes
node_ids : np.array or list
The node ids of the nodes in the cartel
donor_score : np.array or list
The donor score of the nodes in the cartel.
We assume the donor_score[i] indicates the
donor score for node_ids[i]
recipient_score : np.array or list
The recipient score of the nodes in the cartel.
We assume the recipient_score[i] indicates the
recipient score for node_ids[i]
theta : float
The threshold for the donor and recipient score
node_name : list
Node names. Assume that node_name[i] indicates the
name of the node_ids[i]
cmap : matplotlib color map or list
Color map or List of strings indicating hex code
ax : axis
Return
------
ax : axis
"""
#
# Input formatting
#
node_ids = np.array(node_ids)
donor_score = np.array(donor_score)
recipient_score = np.array(recipient_score)
# Classify nodes into donor, recipient and reciprocal
node_types = self.classify_nodes(donor_score, recipient_score, theta)
# Change the angle for the reciprocal journals based on the number of it
num_reciprocal = np.sum(
np.array([node_types[i] == "reciprocal" for i in range(len(node_types))])
)
if num_reciprocal > 2:
self.theta_1 = np.pi * 0.3
else:
self.theta_1 = np.pi * 0.7
#
# Construct the adjacency matrix with 'Other' node
#
brow = A[:, node_ids].sum(axis=0)
bcol = A[node_ids, :].sum(axis=1)
As = A[:, node_ids][node_ids, :].toarray()
# Add 'Other' node to the adjacency matrix
As = np.block([[As, bcol], [brow, np.array([0])]])
As = np.array(As)
node_types += ["other"]
node_names += ["Other"]
#
# Calculate the positions and sizes
#
# Make node table
num_nodes = len(node_types)
node_table = pd.DataFrame(
{"id": np.arange(num_nodes), "group": node_types, "name": node_names}
)
# Calculate the angle of each node
node_table, self.angles = self.calc_node_angles(node_table)
# Calculate the position of journals based on the angles
node_table = self.calc_node_position(node_table)
# Text folding
node_table = self.fold_node_name(node_table)
# Compute the edge positions based on the nodes
edge_table = self.make_edge_table(node_table, As)
# make color map
self.make_color_map(node_table, cmap)
#
# Plot
#
self.plot_edges(node_table, edge_table, ax)
self.plot_node_label(node_table, ax)
self.plot_nodes(node_table, A, As, node_ids, ax)
self.plot_group_arc(node_table, ax)
self.trim(ax)
return ax
def classify_nodes(self, donor_score, recipient_score, threshold):
is_recipient = recipient_score >= threshold
is_donor = donor_score >= threshold
is_reciprocal = is_recipient & is_donor
is_recipient = is_recipient & (~is_reciprocal)
is_donor = is_donor & (~is_reciprocal)
node_type = is_recipient + 2 * is_donor + 3 * is_reciprocal
node_type = np.array(["", "target", "source", "reciprocal"])[node_type]
return node_type.tolist()
def calc_node_angles(self, node_table):
# Compute the coordinate of nodes
self.theta_2 = np.pi - self.theta_1 - 2 * self.angle_margin
node_table["within_group_id"] = -1
node_table["angle"] = -1
angles = {"margin_angle": self.angle_margin}
for group_name in ["source", "target", "reciprocal", "other"]:
dg = node_table[node_table.group == group_name]
if group_name == "source":
start_angle = -self.angle_margin - self.theta_1 - self.theta_2 / 2
end_angle = start_angle + self.theta_1
elif group_name == "target":
start_angle = self.angle_margin + self.theta_2 / 2
end_angle = start_angle + self.theta_1
elif group_name == "reciprocal":
start_angle = -self.theta_2 / 2
end_angle = start_angle + self.theta_2
elif group_name == "other":
start_angle = self.theta_2 / 2 + self.angle_margin * 2 + self.theta_1
end_angle = start_angle + self.theta_2
ids = np.arange(dg.shape[0])
node_table.loc[dg.index, "within_group_id"] = ids
n = dg.shape[0]
if (group_name == "reciprocal") and (n >= 2):
a = (
(ids) * ((end_angle - start_angle) - self.angle_margin) / (n - 1)
+ start_angle
+ 0.5 * self.angle_margin
)
else:
if n >= 2:
a = (
ids
* ((end_angle - start_angle) - 1.5 * self.angle_margin)
/ (n - 1)
+ start_angle
+ 0.75 * self.angle_margin
)
else:
a = (
(ids + 1)
* ((end_angle - start_angle) - 1.5 * self.angle_margin)
/ (n + 1)
+ start_angle
+ 0.75 * self.angle_margin
)
# node_table.loc[dg.index, "angle"] = (ids +1) * angle_group / (n-1) + start_angle
node_table.loc[dg.index, "angle"] = a
angles[group_name] = {"start": start_angle, "end": end_angle}
return node_table, angles
def calc_node_position(self, node_table):
nodes = node_table.copy()
nodes["x"] = self.radius * np.sin(nodes.angle)
nodes["y"] = self.radius * np.cos(nodes.angle)
return nodes
def make_edge_table(self, node_table, As):
# Compute the edge table
src, trg = np.where(As)
selfloop = src != trg
src, trg = src[selfloop], trg[selfloop]
w = As[(src, trg)]
edge_table = pd.DataFrame({"src": src, "trg": trg, "w": w})
edges = edge_table.copy()
edges = pd.merge(
edges,
node_table[["id", "x", "y"]],
left_on="src",
right_on="id",
how="left",
).rename(columns={"x": "src_x", "y": "src_y"})
edges = pd.merge(
edges,
node_table[["id", "x", "y"]],
left_on="trg",
right_on="id",
how="left",
).rename(columns={"x": "trg_x", "y": "trg_y"})
# Normalize the maximum to be one
wmax = np.maximum(
np.max(np.triu(As[:, :-1][:-1, :], 1)),
np.max(np.tril(As[:, :-1][:-1, :], 1)),
)
edges["w"] = edges["w"] / wmax
edges["w"] = self.edge_norm(edges["w"])
edges["w"] = edges["w"] / edges["w"].max()
n = As.shape[0] - 1
return edges
def make_color_map(self, node_table, cmap):
n = node_table.shape[0]
# sort nodes
_node_table = node_table.copy()
_node_table["group_order"] = _node_table.apply(
lambda x: self.group_order[x["group"]], axis=1
)
_node_table = _node_table.sort_values(by="group_order")
if cmap is None:
if _node_table.shape[0] <= 8:
cmap = sns.color_palette().as_hex()
elif _node_table.shape[0] <= 20:
cmap = sns.color_palette().as_hex()
cmap2 = sns.color_palette("husl", 12).as_hex()
cmap = cmap + cmap2
elif _node_table.shape[0] <= 20:
cmap = sns.color_palette().as_hex()
cmap2 = sns.color_palette("Paired").as_hex()
cmap = cmap + [c for i, c in enumerate(cmap2) if i % 2 == 1] + [
c for i, c in enumerate(cmap2) if i % 2 == 0
]
elif _node_table.shape[0] <= 40:
# cmap = sns.color_palette("Set1").as_hex()
cmap = sns.color_palette("tab20").as_hex()
cmap_list = []
for l in range(5):
cmap_list += [c for i, c in enumerate(cmap) if i % 2 == l]
cmap_1 = cmap_list
cmap = sns.color_palette("tab20b").as_hex()
cmap_list = []
for l in range(5):
cmap_list += [c for i, c in enumerate(cmap) if i % 2 == l]
cmap_2 = cmap_list
cmap = cmap_1 + cmap_2
else:
cmap = sns.color_palette("Spectral", n).as_hex()
self.node2color = {}
i = 0
for _, row in _node_table.iterrows():
if row["group"] == "other":
self.node2color[row["id"]] = "#c4c4c4"
else:
self.node2color[row["id"]] = cmap[i]
i += 1
return self.node2color
def plot_edges(self, node_table, edge_table, ax):
_edges = edge_table.sort_values(by="w").sort_values(
by=["src", "trg"], ascending=False
)
for i, edge in _edges.iterrows():
if edge.src == edge.trg:
continue
x_pos = edge.src_x
y_pos = edge.src_y
dx = edge.trg_x - edge.src_x
dy = edge.trg_y - edge.src_y
length = np.sqrt(dx * dx + dy * dy)
orient = np.array([edge.trg_x, edge.trg_y])
w = edge["w"]
style = """Simple,tail_width={w},head_width={w1}
""".format(
w=w * self.max_edge_width, w1=2 * w * self.max_edge_width,
)
src_id = int(edge.src)
trg_id = int(edge.trg)
color = self.node2color[int(edge.src)]
color = (
color + "66"
if node_table.iloc[src_id]["group"] == "other"
else color + "cc"
)
kw = dict(arrowstyle=style, color=color, linewidth=0,)
connectionstyle = "arc3,rad=.2"
a3 = patches.FancyArrowPatch(
(edge.src_x, edge.src_y),
(edge.trg_x, edge.trg_y),
# shrinkB=15,
connectionstyle=connectionstyle,
**kw
)
ax.add_patch(a3)
def plot_node_label(self, node_table, ax):
reciprocal_num = np.sum(node_table["group"].values == "reciprocal")
texts = []
for i, row in node_table.iterrows():
x = row["x"] + (self.label_node_margin) * np.sin(row["angle"])
y = row["y"] + (self.label_node_margin) * np.cos(row["angle"])
if (x > 0) and (y > 0):
ha = "left"
va = "bottom"
elif (x > 0) and (y < 0):
ha = "left"
va = "top"
elif (x < 0) and (y < 0):
ha = "right"
va = "top"
elif (x < 0) and (y > 0):
ha = "right"
va = "bottom"
va = "center"
if row["group"] == "reciprocal":
if reciprocal_num > 2:
pass
else:
ha = "center"
va = "bottom"
if row["group"] == "other":
ha = "center"
va = "top"
texts += [ax.text(x, y, row["name"], ha=ha, va=va, fontsize = self.font_size)]
def plot_group_arc(self, node_table, ax):
params = {"lw": 3, "fc": "white", "alpha": 0.3, "zorder": 0, "angle": 90}
a3 = patches.Arc(
(0, 0),
2 * self.radius * (self.group_arc_node_margin + 1),
2 * self.radius * (self.group_arc_node_margin + 1),
theta1=-180 * self.angles["reciprocal"]["end"] / np.pi,
theta2=-180 * self.angles["source"]["start"] / np.pi,
ec="black",
ls="-",
**params
)
ax.add_patch(a3)
a3 = patches.Arc(
(0, 0),
2 * self.radius * (self.group_arc_node_margin + 1.05),
2 * self.radius * (self.group_arc_node_margin + 1.05),
theta2=-180 * self.angles["reciprocal"]["start"] / np.pi,
theta1=-180 * self.angles["target"]["end"] / np.pi,
ec="black",
ls="--",
**params
)
ax.add_patch(a3)
def trim(self, ax):
R = self.radius * 1.2
ax.set_xlim(left=-R, right=R)
ax.set_ylim(bottom=-R, top=R)
ax.axis("off")
def plot_nodes(self, node_table, A, As, node_ids, ax):
# Calculate the angle of pie
indeg = np.array(A[:, node_ids].sum(axis=0)).reshape(-1)
share = As[:-1, :-1] @ np.diag(1.0 / np.maximum(1, indeg))
for i, row in node_table.iterrows():
if row["group"] == "other":
ax.pie(
[1],
startangle=90,
colors=[self.node2color[row["id"]]],
center=(row["x"], row["y"]),
radius=self.node_size,
wedgeprops={"edgecolor": self.node2color[i], "linewidth": 3},
)
else:
order = np.argsort(share[:, i])
node_color_list = [self.node2color[j] for j in order]
ax.pie(
[1 - np.sum(share[:, i])]
+ np.array(share[order, i]).reshape(-1).tolist(),
startangle=90,
colors=["#ffffffff"] + node_color_list,
center=(row["x"], row["y"]),
radius=self.node_size,
wedgeprops={"linewidth": 0},
)
c = patches.Circle(
(row["x"], row["y"]),
self.node_size,
fill=None,
edgecolor=self.node2color[row["id"]],
linewidth=3,
)
ax.add_patch(c)
def fold_node_name(self, node_table):
for g, dg in node_table.groupby("group"):
if dg.shape[0] < 8:
node_table.loc[dg.index, "name"] = dg["name"].apply(
lambda x: self.fold_text(
x, width=self.label_width, max_width=self.max_label_width
)
)
else:
node_table.loc[dg.index, "name"] = dg["name"].apply(
lambda x: self.fold_text(x, width=45, max_width=99999)
)
return node_table
def fold_text(self, txt, width, max_width):
if width == "auto":
w = 10
while True:
s = textwrap.wrap(txt, w)
if (len(s) <= 2) or (w >= max_width):
break
w += 1
txt = "\n".join(s)
elif width is not None:
txt = "\n".join(textwrap.wrap(txt, width))
return txt
| 34.936306 | 98 | 0.489274 |
2f472ab1931d145ebe23127be4e83a0bd901ebbc | 664 | py | Python | fv3gfs/util/_capture_stream.py | ai2cm/fv3gfs-util | 56fd8e93cefe6951396717a49390c4020a0bc20c | [
"BSD-3-Clause"
] | 1 | 2021-01-05T20:55:01.000Z | 2021-01-05T20:55:01.000Z | fv3gfs/util/_capture_stream.py | VulcanClimateModeling/fv3gfs-util | 1d7c302b836befe905d776b0a972f464bfd3a255 | [
"BSD-3-Clause"
] | 34 | 2020-11-10T18:06:18.000Z | 2021-07-20T22:46:31.000Z | fv3gfs/util/_capture_stream.py | ai2cm/fv3gfs-util | 56fd8e93cefe6951396717a49390c4020a0bc20c | [
"BSD-3-Clause"
] | 1 | 2021-08-10T21:36:44.000Z | 2021-08-10T21:36:44.000Z | import contextlib
import tempfile
import os
import io
@contextlib.contextmanager
def capture_stream(stream):
out_stream = io.BytesIO()
# parent process:
# close the reading end, we won't need this
orig_file_handle = os.dup(stream.fileno())
with tempfile.NamedTemporaryFile() as out:
# overwrite the streams fileno with a the pipe to be read by the forked
# process below
os.dup2(out.fileno(), stream.fileno())
yield out_stream
# restore the original file handle
os.dup2(orig_file_handle, stream.fileno())
# print logging info
out.seek(0)
out_stream.write(out.read())
| 25.538462 | 79 | 0.667169 |
97732b1a1326669a4d6afb0ed54364e582552f33 | 865 | py | Python | src/commands/fire/fire_city_restaurants_download.py | jherrerotardon/spies | ec855b3c1bd207c8ee2beb829e446fa575354c59 | [
"Apache-2.0"
] | null | null | null | src/commands/fire/fire_city_restaurants_download.py | jherrerotardon/spies | ec855b3c1bd207c8ee2beb829e446fa575354c59 | [
"Apache-2.0"
] | null | null | null | src/commands/fire/fire_city_restaurants_download.py | jherrerotardon/spies | ec855b3c1bd207c8ee2beb829e446fa575354c59 | [
"Apache-2.0"
] | null | null | null | from pyframework.exceptions.custom_exceptions import ArgumentException
from .base_fire import BaseFire, Event
from ...models.city import City
class FireCityRestaurantsDownload(BaseFire):
_name = 'fire:cityDownload'
_description = 'Launch an event to download entities info from city.'
_arguments = [
['-c', '--city', 'City ID to be fired.']
]
_city = {}
def set_up(self):
city_id = self.get_argument('city')
if not city_id:
raise ArgumentException('City ID is required.')
self._city = City().get_city(city_id)
if not self._city:
raise ArgumentException('No valid city ID.')
def handle(self) -> int:
info = {
'place_id': self._city['id']
}
self._fire_event(Event.PLACE_DOWNLOAD_ACTION, info)
return self.RETURN_SUCCESS
| 24.714286 | 73 | 0.63237 |
fa64eea4b2b6d34c5a01342722ac30c4f9cd587b | 3,946 | py | Python | drive.py | dhiegomaga/Behavioral-Cloning | d9577beb94a2e5ab683728d2bef3fc94947c7dd0 | [
"MIT"
] | null | null | null | drive.py | dhiegomaga/Behavioral-Cloning | d9577beb94a2e5ab683728d2bef3fc94947c7dd0 | [
"MIT"
] | null | null | null | drive.py | dhiegomaga/Behavioral-Cloning | d9577beb94a2e5ab683728d2bef3fc94947c7dd0 | [
"MIT"
] | null | null | null | import argparse
import base64
from datetime import datetime
import os
import shutil
# Prevent from using GPU
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
from keras.models import load_model
import h5py
from keras import __version__ as keras_version
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired(self, desired):
self.set_point = desired
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral += self.error
return self.Kp * self.error + self.Ki * self.integral
controller = SimplePIController(0.1, 0.002)
set_speed = 9
controller.set_desired(set_speed)
@sio.on('telemetry')
def telemetry(sid, data):
if data:
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))
throttle = controller.update(float(speed))
print(steering_angle, throttle)
send_control(steering_angle, throttle)
# save frame
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
# NOTE: DON'T EDIT THIS.
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
# check that model Keras version is same as local Keras version
f = h5py.File(args.model, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('You are using Keras version ', keras_version,
', but the model was built using ', model_version)
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| 27.594406 | 89 | 0.639128 |
e41ebb8845f0d7ada367a6a8b9f512be2dd89b22 | 31,389 | py | Python | gslib/tests/test_rewrite.py | maxshine/gsutil | c81d67f2286402accfcdf79f0199844949bebefc | [
"Apache-2.0"
] | 1,894 | 2015-04-17T18:29:53.000Z | 2022-03-28T22:41:06.000Z | gslib/tests/test_rewrite.py | maxshine/gsutil | c81d67f2286402accfcdf79f0199844949bebefc | [
"Apache-2.0"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | gslib/tests/test_rewrite.py | maxshine/gsutil | c81d67f2286402accfcdf79f0199844949bebefc | [
"Apache-2.0"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for rewrite command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import logging
import os
import re
import unittest
from boto.storage_uri import BucketStorageUri
from gslib.cs_api_map import ApiSelector
from gslib.discard_messages_queue import DiscardMessagesQueue
from gslib.gcs_json_api import GcsJsonApi
from gslib.project_id import PopulateProjectId
from gslib.tests.rewrite_helper import EnsureRewriteRestartCallbackHandler
from gslib.tests.rewrite_helper import EnsureRewriteResumeCallbackHandler
from gslib.tests.rewrite_helper import HaltingRewriteCallbackHandler
from gslib.tests.rewrite_helper import RewriteHaltException
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import GenerationFromURI as urigen
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import TEST_ENCRYPTION_KEY4
from gslib.tests.util import unittest
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import GetRewriteTrackerFilePath
from gslib.utils.encryption_helper import CryptoKeyWrapperFromKey
from gslib.utils.unit_util import ONE_MIB
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
class TestRewrite(testcase.GsUtilIntegrationTestCase):
"""Integration tests for rewrite command."""
def test_rewrite_missing_flag(self):
"""Tests rewrite with no transformation flag."""
stderr = self.RunGsUtil(
['rewrite', '%s://some_url' % self.default_provider],
return_stderr=True,
expected_status=1)
self.assertIn('command requires at least one transformation flag', stderr)
def test_rewrite_generation_url(self):
"""Tests that rewrite fails on a URL that includes a generation."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
generation = object_uri.generation
stderr = self.RunGsUtil(
['rewrite', '-k',
'%s#%s' % (suri(object_uri), generation)],
return_stderr=True,
expected_status=1)
self.assertIn('"rewrite" called on URL with generation', stderr)
def test_rewrite_missing_decryption_key(self):
"""Tests that rewrite fails when no decryption key matches."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(object_name='foo',
contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY3)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('No decryption key matches object %s' % suri(object_uri),
stderr)
def test_rewrite_stdin_args(self):
"""Tests rewrite with arguments supplied on stdin."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
stdin_arg = suri(object_uri)
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['rewrite', '-k', '-I'], stdin=stdin_arg)
self.AssertObjectUsesCSEK(stdin_arg, TEST_ENCRYPTION_KEY2)
def test_rewrite_overwrite_acl(self):
"""Tests rewrite with the -O flag."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
self.RunGsUtil(['acl', 'ch', '-u', 'AllUsers:R', suri(object_uri)])
stdout = self.RunGsUtil(['acl', 'get', suri(object_uri)],
return_stdout=True)
self.assertIn('allUsers', stdout)
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['rewrite', '-k', '-O', suri(object_uri)])
self.AssertObjectUsesCSEK(suri(object_uri), TEST_ENCRYPTION_KEY2)
stdout = self.RunGsUtil(['acl', 'get', suri(object_uri)],
return_stdout=True)
self.assertNotIn('allUsers', stdout)
def test_rewrite_bucket_recursive(self):
"""Tests rewrite command recursively on a bucket."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
self._test_rewrite_key_rotation_bucket(
bucket_uri,
['rewrite', '-k', '-r', suri(bucket_uri)])
def test_parallel_rewrite_bucket_flat_wildcard(self):
"""Tests parallel rewrite command with a flat wildcard on a bucket."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
self._test_rewrite_key_rotation_bucket(
bucket_uri,
['-m', 'rewrite', '-k', suri(bucket_uri, '**')])
def _test_rewrite_key_rotation_bucket(self, bucket_uri, command_args):
"""Helper function for testing key rotation on a bucket.
Args:
bucket_uri: bucket StorageUri to use for the test.
command_args: list of args to gsutil command.
"""
object_contents = b'bar'
object_uri1 = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/foo',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
object_uri2 = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/bar',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY2)
object_uri3 = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/baz',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY3)
object_uri4 = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/qux',
contents=object_contents)
# Rotate all keys to TEST_ENCRYPTION_KEY1.
boto_config_for_test = [
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1.decode('ascii')),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY2.decode('ascii')),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY3.decode('ascii'))
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(command_args, return_stderr=True)
# Object one already has the correct key.
self.assertIn('Skipping %s' % suri(object_uri1), stderr)
# Other objects should be rotated.
self.assertIn('Rotating', stderr)
for object_uri_str in (suri(object_uri1), suri(object_uri2),
suri(object_uri3), suri(object_uri4)):
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY1)
# Remove all encryption.
boto_config_for_test2 = [('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY1)
]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(command_args, return_stderr=True)
self.assertIn('Decrypting', stderr)
for object_uri_str in (suri(object_uri1), suri(object_uri2),
suri(object_uri3), suri(object_uri4)):
self.AssertObjectUnencrypted(object_uri_str)
def test_rewrite_seek_ahead(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
# Remove encryption
boto_config_for_test = [('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY1),
('GSUtil', 'task_estimation_threshold', '1'),
('GSUtil', 'task_estimation_force', 'True')]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['-m', 'rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn(
'Estimated work for this command: objects: 1, total size: 3', stderr)
def test_rewrite_unintentional_key_rotation_fails(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
encrypted_obj_uri = self.CreateObject(contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
unencrypted_obj_uri = self.CreateObject(contents=b'bar')
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
# Executing rewrite without the -k flag should fail if your boto file has
# a different encryption_key than was last used to encrypt the object.
stderr = self.RunGsUtil(['rewrite', '-s', 'dra',
suri(encrypted_obj_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('EncryptionException', stderr)
# Should also fail for a previously unencrypted object.
stderr = self.RunGsUtil(
['rewrite', '-s', 'dra',
suri(unencrypted_obj_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('EncryptionException', stderr)
def test_rewrite_key_rotation_single_object(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
# Rotate key.
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Rotating', stderr)
self.AssertObjectUsesCSEK(suri(object_uri), TEST_ENCRYPTION_KEY2)
# Remove encryption.
boto_config_for_test2 = [('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY2)
]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Decrypting', stderr)
self.AssertObjectUnencrypted(suri(object_uri))
def test_rewrite_key_rotation_bucket_subdir(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
object_contents = b'bar'
rotate_subdir = suri(bucket_uri, 'bar')
object_uri1 = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/bar',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
object_uri2 = self.CreateObject(bucket_uri=bucket_uri,
object_name='bar/foo',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY2)
object_uri3 = self.CreateObject(bucket_uri=bucket_uri,
object_name='bar/baz',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY3)
object_uri4 = self.CreateObject(bucket_uri=bucket_uri,
object_name='bar/qux',
contents=object_contents)
# Rotate subdir keys to TEST_ENCRYPTION_KEY3.
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY1)]
self.AssertNObjectsInBucket(bucket_uri, 4)
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(['rewrite', '-r', '-k', rotate_subdir],
return_stderr=True)
self.assertIn('Rotating', stderr) # Object 2.
self.assertIn('Skipping %s' % suri(object_uri3), stderr)
self.assertIn('Encrypting', stderr) # Object 4.
# First subdir should be unaffected.
self.AssertObjectUsesCSEK(suri(object_uri1), TEST_ENCRYPTION_KEY1)
for object_uri_str in (suri(object_uri2), suri(object_uri3),
suri(object_uri4)):
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY3)
# Remove encryption in subdir.
boto_config_for_test2 = [('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY3)
]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(['rewrite', '-r', '-k', rotate_subdir],
return_stderr=True)
self.assertIn('Decrypting', stderr)
# First subdir should be unaffected.
self.AssertObjectUsesCSEK(suri(object_uri1), TEST_ENCRYPTION_KEY1)
for object_uri_str in (suri(object_uri2), suri(object_uri3),
suri(object_uri4)):
self.AssertObjectUnencrypted(object_uri_str)
def test_rewrite_with_nonkey_transform_works_when_key_is_unchanged(self):
# Tests that when a valid transformation flag aside from "-k" is supplied,
# the "-k" flag is not supplied, and the encryption key previously used to
# encrypt the target object matches the encryption_key in the user's boto
# config file (via hash comparison), that the rewrite command properly
# passes the same tuple for decryption and encryption, in addition to
# performing the other desired transformations.
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(['rewrite', '-s', 'nearline',
suri(object_uri)],
return_stderr=True)
self.assertIn('Rewriting', stderr)
def test_rewrite_key_rotation_with_storage_class_change(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar',
encryption_key=TEST_ENCRYPTION_KEY1)
# Rotate key and change storage class to nearline.
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-s', 'nearline', '-k',
suri(object_uri)],
return_stderr=True)
self.assertIn('Rotating', stderr)
self.AssertObjectUsesCSEK(suri(object_uri), TEST_ENCRYPTION_KEY2)
stdout = self.RunGsUtil(['stat', suri(object_uri)], return_stdout=True)
self.assertRegexpMatchesWithFlags(
stdout,
r'Storage class:\s+NEARLINE',
flags=re.IGNORECASE,
msg=('Storage class appears not to have been changed.'))
def test_rewrite_with_only_storage_class_change(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar')
# Change storage class to nearline.
stderr = self.RunGsUtil(['rewrite', '-s', 'nearline',
suri(object_uri)],
return_stderr=True)
self.assertIn('Rewriting', stderr)
stdout = self.RunGsUtil(['stat', suri(object_uri)], return_stdout=True)
self.assertRegexpMatchesWithFlags(
stdout,
r'Storage class:\s+NEARLINE',
flags=re.IGNORECASE,
msg=('Storage class appears not to have been changed.'))
def test_rewrite_to_same_storage_class_is_skipped(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'bar')
stderr = self.RunGsUtil(['rewrite', '-s', 'standard',
suri(object_uri)],
return_stderr=True)
self.assertIn('Skipping %s' % suri(object_uri), stderr)
def test_rewrite_with_same_key_and_storage_class_is_skipped(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
object_uri = self.CreateObject(contents=b'foo',
encryption_key=TEST_ENCRYPTION_KEY1,
storage_class='standard')
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', '-s', 'standard',
suri(object_uri)],
return_stderr=True)
self.assertIn('Skipping %s' % suri(object_uri), stderr)
def test_rewrite_with_no_value_for_minus_s(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
stderr = self.RunGsUtil(['rewrite', '-s', 'gs://some-random-name'],
return_stderr=True,
expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn('expects at least one URL', stderr)
def test_rewrite_resume(self):
self._test_rewrite_resume_or_restart(TEST_ENCRYPTION_KEY1,
TEST_ENCRYPTION_KEY2)
def test_rewrite_resume_restart_source_encryption_changed(self):
self._test_rewrite_resume_or_restart(TEST_ENCRYPTION_KEY1,
TEST_ENCRYPTION_KEY2,
new_dec_key=TEST_ENCRYPTION_KEY3)
def test_rewrite_resume_restart_dest_encryption_changed(self):
self._test_rewrite_resume_or_restart(TEST_ENCRYPTION_KEY1,
TEST_ENCRYPTION_KEY2,
new_enc_key=TEST_ENCRYPTION_KEY3)
def test_rewrite_resume_restart_both_encryption_changed(self):
self._test_rewrite_resume_or_restart(TEST_ENCRYPTION_KEY1,
TEST_ENCRYPTION_KEY2,
new_dec_key=TEST_ENCRYPTION_KEY3,
new_enc_key=TEST_ENCRYPTION_KEY4)
def authorize_project_to_use_testing_kms_key(
self, key_name=testcase.KmsTestingResources.CONSTANT_KEY_NAME):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(keyring_fqn, key_name)
# Make sure that the service account for our default project is authorized
# to use our test KMS key.
self.RunGsUtil(['kms', 'authorize', '-k', key_fqn])
return key_fqn
def test_rewrite_to_kms_then_unencrypted(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
key_fqn = self.authorize_project_to_use_testing_kms_key()
object_uri = self.CreateObject(contents=b'foo')
boto_config_for_test = [('GSUtil', 'encryption_key', key_fqn)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Encrypting', stderr)
self.AssertObjectUsesCMEK(suri(object_uri), key_fqn)
# Rewrite back to unencrypted and make sure no KMS key was used.
boto_config_for_test = [('GSUtil', 'encryption_key', None)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Decrypting', stderr)
self.AssertObjectUnencrypted(suri(object_uri))
def test_rewrite_to_kms_then_csek(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
key_fqn = self.authorize_project_to_use_testing_kms_key()
object_uri = self.CreateObject(contents=b'foo')
boto_config_for_test = [('GSUtil', 'encryption_key', key_fqn)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Encrypting', stderr)
self.AssertObjectUsesCMEK(suri(object_uri), key_fqn)
# Rewrite from CMEK to CSEK encryption.
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Rotating', stderr)
self.AssertObjectUsesCSEK(suri(object_uri), TEST_ENCRYPTION_KEY1)
def test_rewrite_to_csek_then_kms(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
key_fqn = self.authorize_project_to_use_testing_kms_key()
object_uri = self.CreateObject(contents=b'foo')
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Encrypting', stderr)
self.AssertObjectUsesCSEK(suri(object_uri), TEST_ENCRYPTION_KEY1)
# Rewrite from CSEK to CMEK encryption.
boto_config_for_test = [
('GSUtil', 'encryption_key', key_fqn),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY1),
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Rotating', stderr)
self.AssertObjectUsesCMEK(suri(object_uri), key_fqn)
def test_rewrite_with_no_encryption_key_operates_on_unencrypted_objects(self):
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
# Since the introduction of default KMS keys for GCS buckets, rewriting
# with no explicitly specified CSEK/CMEK can still result in the rewritten
# objects being encrypted. Before KMS support, this would always result in
# decrypted objects. With this new possibility, we want to always rewrite
# every specified object when no encryption_key was set in the boto config,
# since we don't know if the operation will end up decrypting the object or
# implicitly encrypting it with the bucket's default KMS key.
key_fqn = self.authorize_project_to_use_testing_kms_key()
# Create an unencrypted object.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
# Set the bucket's default KMS key.
self.RunGsUtil(['kms', 'encryption', '-k', key_fqn, suri(bucket_uri)])
# Rewriting with no encryption_key should rewrite the object, resulting in
# the bucket's default KMS key being used to encrypt it.
with SetBotoConfigForTest([('GSUtil', 'encryption_key', None)]):
stderr = self.RunGsUtil(
['rewrite', '-k', suri(object_uri)], return_stderr=True)
self.assertIn('Rewriting', stderr)
self.AssertObjectUsesCMEK(suri(object_uri), key_fqn)
def _test_rewrite_resume_or_restart(self,
initial_dec_key,
initial_enc_key,
new_dec_key=None,
new_enc_key=None):
"""Tests that the rewrite command restarts if the object's key changed.
Args:
initial_dec_key: Initial key the object is encrypted with, used as
decryption key in the first rewrite call.
initial_enc_key: Initial encryption key to rewrite the object with,
used as encryption key in the first rewrite call.
new_dec_key: Decryption key for the second rewrite call; if specified,
object will be overwritten with a new encryption key in between
the first and second rewrite calls, and this key will be used for
the second rewrite call.
new_enc_key: Encryption key for the second rewrite call; if specified,
this key will be used for the second rewrite call, otherwise the
initial key will be used.
Returns:
None
"""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True,
encryption_key=initial_dec_key)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
with SetBotoConfigForTest([('GSUtil', 'decryption_key1', initial_dec_key)]):
src_obj_metadata = gsutil_api.GetObjectMetadata(
object_uri.bucket_name,
object_uri.object_name,
provider=self.default_provider,
fields=['bucket', 'contentType', 'etag', 'name'])
dst_obj_metadata = src_obj_metadata
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
decryption_tuple = CryptoKeyWrapperFromKey(initial_dec_key)
decryption_tuple2 = CryptoKeyWrapperFromKey(new_dec_key or initial_dec_key)
encryption_tuple = CryptoKeyWrapperFromKey(initial_enc_key)
encryption_tuple2 = CryptoKeyWrapperFromKey(new_enc_key or initial_enc_key)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB,
decryption_tuple=decryption_tuple,
encryption_tuple=encryption_tuple)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Tracker file should be left over.
self.assertTrue(os.path.exists(tracker_file_name))
if new_dec_key:
# Recreate the object with a different encryption key.
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True,
encryption_key=new_dec_key,
gs_idempotent_generation=urigen(object_uri))
with SetBotoConfigForTest([('GSUtil', 'decryption_key1', new_dec_key or
initial_dec_key)]):
original_md5 = gsutil_api.GetObjectMetadata(
src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption', 'md5Hash']).md5Hash
if new_dec_key or new_enc_key:
# Keys changed, rewrite should be restarted.
progress_callback = EnsureRewriteRestartCallbackHandler(ONE_MIB).call
else:
# Keys are the same, rewrite should be resumed.
progress_callback = EnsureRewriteResumeCallbackHandler(ONE_MIB * 2).call
# Now resume. Callback ensures the appropriate resume/restart behavior.
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
progress_callback=progress_callback,
max_bytes_per_call=ONE_MIB,
decryption_tuple=decryption_tuple2,
encryption_tuple=encryption_tuple2)
# Copy completed; tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
final_enc_key = new_enc_key or initial_enc_key
with SetBotoConfigForTest([('GSUtil', 'encryption_key', final_enc_key)]):
self.assertEqual(
original_md5,
gsutil_api.GetObjectMetadata(
dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption', 'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
| 47.05997 | 80 | 0.655007 |
76c7f1498069dc61b65eadc995da762a3f6bdbea | 11,446 | py | Python | petabvis/utils.py | PEtab-dev/petab-interactive-viz | c24039e919bf08eb01bb93ae72701c270a99b913 | [
"BSD-3-Clause"
] | 6 | 2021-01-29T14:17:27.000Z | 2021-02-04T18:04:54.000Z | petabvis/utils.py | PEtab-dev/petabvis | c24039e919bf08eb01bb93ae72701c270a99b913 | [
"BSD-3-Clause"
] | 3 | 2021-03-04T09:32:12.000Z | 2021-03-23T16:42:57.000Z | petabvis/utils.py | PEtab-dev/petabvis | c24039e919bf08eb01bb93ae72701c270a99b913 | [
"BSD-3-Clause"
] | null | null | null | import warnings
import numpy as np
import pandas as pd
import petab.C as ptc
import scipy
from PySide6 import QtCore
from PySide6.QtWidgets import QComboBox
import matplotlib.pyplot as plt
import pyqtgraph as pg
def get_legend_name(plot_spec: pd.Series):
"""
Return the plot title of the plot specification
Arguments:
plot_spec: A single row of a visualization df
Return:
The name of the legend entry
"""
legend_name = ""
if ptc.DATASET_ID in plot_spec.index:
legend_name = plot_spec[ptc.DATASET_ID]
if ptc.LEGEND_ENTRY in plot_spec.index:
legend_name = plot_spec[ptc.LEGEND_ENTRY]
return legend_name
def get_x_var(plot_spec: pd.Series):
"""
Return the name of the x variable of the plot specification
Arguments:
plot_spec: A single row of a visualization df
Return:
The name of the x variable
"""
x_var = "time" # default value
if ptc.X_VALUES in plot_spec.index:
x_var = plot_spec[ptc.X_VALUES]
return x_var
def get_observable_id(line_data: pd.DataFrame):
observable_id = line_data[ptc.OBSERVABLE_ID].unique()
if len(observable_id) > 1:
warnings.warn("Observable ID is not unique for line"
"(IDs: " + ', '.join(observable_id) +
" might affect coloring)")
return observable_id[0]
def get_y_var(plot_spec: pd.Series):
"""
Return the observable which should be plotted on the y-axis
Arguments:
plot_spec: A single row of a visualization df
Return:
observable which should be plotted on the y-axis
"""
y_var = "" # default value
if ptc.Y_VALUES in plot_spec.index:
y_var = plot_spec[ptc.Y_VALUES]
return y_var
def get_x_offset(plot_spec: pd.Series):
"""
Return the x offset
Arguments:
plot_spec: A single row of a visualization df
Return:
The x offset
"""
x_offset = 0 # default value
if ptc.X_OFFSET in plot_spec.index:
x_offset = float(plot_spec[ptc.X_OFFSET])
return x_offset
def get_x_scale(plot_spec: pd.Series):
"""
Return the scale of the x axis (lin, log or ordinal)
Arguments:
plot_spec: A single row of a visualization df
Return:
The x scale
"""
x_scale = "lin" # default value
if ptc.X_SCALE in plot_spec.index:
x_scale = plot_spec[ptc.X_SCALE]
return x_scale
def get_y_scale(plot_spec: pd.Series):
"""
Return the scale of the y axis (lin, log or ordinal)
Arguments:
plot_spec: A single row of a visualization df
Return:
The x offset
"""
y_scale = "lin" # default value
if ptc.Y_SCALE in plot_spec.index:
y_scale = plot_spec[ptc.Y_SCALE]
return y_scale
def get_y_offset(plot_spec: pd.Series):
"""
Return the y offset
Arguments:
plot_spec: A single row of a visualization df
Return:
The y offset
"""
y_offset = 0 # default value
if ptc.Y_OFFSET in plot_spec.index:
y_offset = float(plot_spec[ptc.Y_OFFSET])
return y_offset
def get_x_label(plot_spec: pd.Series):
"""
Return the label of the x axis
Arguments:
plot_spec: A single row of a visualization df
Return:
The label of the x axis
"""
x_label = get_x_var(plot_spec) # defaults to x_var
if ptc.X_LABEL in plot_spec.index:
x_label = plot_spec[ptc.X_LABEL]
return x_label
def get_y_label(plot_spec: pd.Series):
"""
Return the label of the y axis
Arguments:
plot_spec: A single row of a visualization df
Return:
The label of the y axis
"""
y_label = ptc.MEASUREMENT # defaults to y_var
if ptc.Y_LABEL in plot_spec.index:
y_label = plot_spec[ptc.Y_LABEL]
return y_label
def get_dataset_id(plot_spec: pd.Series):
"""
Return the dataset id
Arguments:
plot_spec: A single row of a visualization df
Return:
The dataset id
"""
dataset_id = ""
if ptc.DATASET_ID in plot_spec.index:
dataset_id = plot_spec[ptc.DATASET_ID]
return dataset_id
def get_plot_type_data(plot_spec: pd.Series):
"""
Return the dataset id
Arguments:
plot_spec: A single row of a visualization df
Return:
The dataset id
"""
plot_type_data = "MeanAndSD"
if ptc.PLOT_TYPE_DATA in plot_spec.index:
plot_type_data = plot_spec[ptc.PLOT_TYPE_DATA]
return plot_type_data
def reduce_condition_df(line_data, condition_df):
"""
Reduce the condition df to the relevant rows based
on the unique condition ids in the line_data df
Arguments:
line_data: A subset of a measurement df
condition_df: The condition df
Return:
The reduced condition df
"""
uni_condition_id, uind = np.unique(
line_data[ptc.SIMULATION_CONDITION_ID],
return_index=True)
# keep the ordering which was given by user from top to bottom
# (avoid ordering by names '1','10','11','2',...)'
uni_condition_id = uni_condition_id[np.argsort(uind)]
# extract conditions (plot input) from condition file
ind_cond = condition_df.index.isin(uni_condition_id)
condition_df = condition_df[ind_cond]
return condition_df
def get_plot_title(visualization_df_rows: pd.DataFrame):
"""
Return the title of the plot
Arguments:
visualization_df_rows: A single row of a visualization df
Return:
The plot title
"""
plot_title = ""
if visualization_df_rows is not None:
if ptc.PLOT_NAME in visualization_df_rows.columns:
plot_title = visualization_df_rows.iloc[0][ptc.PLOT_NAME]
elif ptc.PLOT_ID in visualization_df_rows.columns:
plot_title = visualization_df_rows.iloc[0][ptc.PLOT_ID]
return plot_title
def mean_replicates(line_data: pd.DataFrame, x_var: str = ptc.TIME,
y_var: str = ptc.MEASUREMENT):
"""
Calculate the mean of the replicates.
Note: The line_data already has to be reduced to the relevant
simulationConditionIds for concentration plots
Arguments:
line_data: A subset of the measurement file
x_var: Name of the x-variable
y_var: Name of the y-variable (measurement or simulation)
Return:
The mean grouped by x_var
"""
grouping = ptc.TIME
if x_var != ptc.TIME:
# for concentration plots we group by
# simulationConditionId
grouping = ptc.SIMULATION_CONDITION_ID
line_data = line_data[[y_var, grouping]]
means = line_data.groupby(grouping).mean()
means = means[y_var].to_numpy()
return means
def sd_replicates(line_data: pd.DataFrame, x_var: str, is_simulation: bool):
"""
Calculate the standard deviation of the replicates.
Arguments:
line_data: A subset of the measurement file
x_var: Name of the x-variable
is_simulation: Boolean to check if the y variable
is measurement or simulation
Return:
The std grouped by x_var
"""
y_var = ptc.MEASUREMENT
if is_simulation:
y_var = ptc.SIMULATION
grouping = ptc.TIME
if x_var != ptc.TIME:
# for concentration plots we group by
# simulationConditionId
grouping = ptc.SIMULATION_CONDITION_ID
line_data = line_data[[grouping, y_var]]
# std with ddof = 0 (degrees of freedom)
# to match np.std that is used in petab
sds = line_data.groupby(grouping).std(ddof=0)
sds = sds[y_var].to_numpy()
return sds
def sem_replicates(line_data: pd.DataFrame, x_var: str, is_simulation: bool):
"""
Calculate the standard error of the mean of the replicates
Arguments:
line_data: A subset of the measurement file
x_var: Name of the x-variable
is_simulation: Boolean to check if the y variable
is measurement or simulation
Return:
The std grouped by x_var
"""
grouping = ptc.TIME
if x_var != ptc.TIME:
# for concentration plots we group by
# simulationConditionId
grouping = ptc.SIMULATION_CONDITION_ID
sd = sd_replicates(line_data, x_var, is_simulation)
n_replicates = [len(replicates) for replicates in
line_data.groupby(grouping)]
sem = sd / np.sqrt(n_replicates)
return sem
def split_replicates(line_data: pd.DataFrame):
"""
Split the line_data df into replicate dfs based on their
replicate Id.
If no replicateId column is in the line_data, line_data will
be returned.
Arguments:
line_data: A subset of the measurement file
Return:
The std grouped by x_var
"""
replicates = []
if ptc.REPLICATE_ID in line_data.columns:
for repl_id in np.unique(line_data[ptc.REPLICATE_ID]):
repl = line_data[line_data[ptc.REPLICATE_ID] == repl_id]
replicates.append(repl)
else:
replicates.append(line_data)
return replicates
def add_plotnames_to_cbox(exp_data: pd.DataFrame,
visualization_df: pd.DataFrame, cbox: QComboBox):
"""
Add the name of every plot in the visualization df
to the cbox
Arguments:
visualization_df: PEtab visualization table
cbox: The list of plots (UI)
"""
if visualization_df is not None:
plot_ids = np.unique(visualization_df[ptc.PLOT_ID])
if ptc.PLOT_NAME in visualization_df.columns:
# for every identical plot_id, the plot_name has to be the same
plot_names = list(visualization_df[ptc.PLOT_NAME].unique())
if len(plot_ids) != len(plot_names):
warnings.warn(
"The number of plot ids should be" +
" the same as the number of plot names")
for name in plot_names:
cbox.addItem(name)
else:
for id in np.unique(visualization_df[ptc.PLOT_ID]):
cbox.addItem(id)
else:
# the default plots are grouped by observable ID
observable_ids = list(exp_data[ptc.OBSERVABLE_ID].unique())
for observable_id in observable_ids:
cbox.addItem(observable_id)
def get_signals(source):
"""
Print out all signals that are implemented in source
(only for debug purposes)
"""
cls = source if isinstance(source, type) else type(source)
signal = type(QtCore.Signal())
print("Signals:")
for name in dir(source):
try:
if isinstance(getattr(cls, name), signal):
print(name)
except Exception:
print("skipped")
def r_squared(measurements, simulations):
"""
Calculate the R squared value between
the measurement and simulation values.
"""
if not measurements or not simulations:
return 0
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(
measurements, simulations)
return r_value ** 2
def generate_color_map(cm_name: str):
"""
Create a pyqtgraph Colormap corresponding
to the matplotlib name of a colormap.
Arguments:
cm_name: Name of a matplotlib colormap.
"""
colors = (np.array(plt.get_cmap(cm_name).colors)*255).tolist()
positions = np.linspace(0, 1, len(colors))
pg_map = pg.ColorMap(positions, colors)
return pg_map
| 26.43418 | 77 | 0.650446 |
eb4c086ef83f644f754023b484af770e47c89233 | 4,785 | py | Python | server/rest/competitor.py | jjojala/results | bfcf6820ff4b2dd05d8974bc98b0a59bc6c3585f | [
"Apache-2.0"
] | null | null | null | server/rest/competitor.py | jjojala/results | bfcf6820ff4b2dd05d8974bc98b0a59bc6c3585f | [
"Apache-2.0"
] | 7 | 2015-11-25T22:26:25.000Z | 2016-10-18T22:14:35.000Z | server/rest/competitor.py | jjojala/results | bfcf6820ff4b2dd05d8974bc98b0a59bc6c3585f | [
"Apache-2.0"
] | null | null | null | from flask import request
from flask_restful import Resource, reqparse
from .notification import CREATED, UPDATED, PATCHED, REMOVED
import rest.timeservice as timeservice
from util.patch import patch, PatchConflict
competitors = [
]
_NOTIFICATION_ARG = "notifications"
_API_ARG = "api"
_TYPE = "Competitor"
class Competitors(Resource):
def makeArgs(notifications, api):
return {
_NOTIFICATION_ARG: notifications,
_API_ARG: api }
def __init__(self, **kwargs):
self._notifications = kwargs[_NOTIFICATION_ARG]
self._api = kwargs[_API_ARG]
@timeservice.time_service
def get(self):
return competitors, 200
class Competitor(Resource):
def makeArgs(notifications, api):
return {
_NOTIFICATION_ARG: notifications,
_API_ARG: api }
def __init__(self, **kwargs):
self._notifications = kwargs[_NOTIFICATION_ARG]
self._api = kwargs[_API_ARG]
@timeservice.time_service
def get(self, id):
for i in competitors:
if (id == i["id"]):
return i, 200
return "{} with id {} not found".format(_TYPE, id), 404
@timeservice.time_service
def post(self, id):
parser = reqparse.RequestParser()
parser.add_argument("start")
parser.add_argument("finish")
parser.add_argument("tags", type=list, location='json')
parser.add_argument("status")
args = parser.parse_args()
for i in competitors:
if (id == i["id"]):
return "{} with id {} already exists".format(
_TYPE, id), 409
item = {
"id": id,
"start": args["start"],
"finish": args["finish"],
"tags": args["tags"],
"status": args["status"]
}
competitors.append(item)
self._notifications.submit(CREATED, _TYPE, item)
return item, 201, { 'Location': self._api + id }
@timeservice.time_service
def put(self, id):
parser = reqparse.RequestParser()
parser.add_argument("start")
parser.add_argument("finish")
parser.add_argument("tags", type=list, location='json')
parser.add_argument("status")
args = parser.parse_args()
for i in competitors:
if (id == i["id"]):
i["start"] = args["start"]
i["finish"] = args["finish"]
i["tags"] = args["tags"]
i["status"] = args["status"]
self._notifications.submit(UPDATED, _TYPE, i)
return i, 200
return "{} with id {} not found".format(_TYPE, id), 404
@timeservice.time_service
def delete(self, id):
global competitors
new = [i for i in competitors if i["id"] != id]
if (len(new) < len(competitors)):
competitors = new
self._notifications.submit(REMOVED, _TYPE, id)
return "{} is deleted.".format(id), 200
return "{} with id {} not found".format(_TYPE, id), 404
@timeservice.time_service
def patch(self, id):
diff = request.json
# TODO: explicitly lock the item
for i in competitors:
if (id == i["id"]):
try:
patched = patch(i, diff)
i["start"] = patched["start"]
i["finish"] = patched["finish"]
i["tags"] = patched["tags"]
i["status"] = patched["status"]
self._notifications.submit(PATCHED, _TYPE, diff)
return i, 200
except PatchConflict as ex:
return "Patching {} with id {} failed: {}".format(
_TYPE, id, str(ex)), 409
return "{} with id {} not found".format(_TYPE, id), 404
| 39.221311 | 90 | 0.443678 |
137f36702455b49c44c351bfa71bcad0b6d1a270 | 994 | py | Python | timeflux_ml/utils/sklearn_helpers.py | sylvchev/timeflux_ml | 31fefc41ff5f62fc770e87c2963e872768301de3 | [
"MIT"
] | 2 | 2019-12-11T10:53:19.000Z | 2020-03-21T17:35:21.000Z | timeflux_ml/utils/sklearn_helpers.py | sylvchev/timeflux_ml | 31fefc41ff5f62fc770e87c2963e872768301de3 | [
"MIT"
] | 1 | 2019-04-03T13:45:26.000Z | 2019-04-03T13:45:26.000Z | timeflux_ml/utils/sklearn_helpers.py | sylvchev/timeflux_ml | 31fefc41ff5f62fc770e87c2963e872768301de3 | [
"MIT"
] | 1 | 2019-04-03T09:17:36.000Z | 2019-04-03T09:17:36.000Z | from sklearn.pipeline import Pipeline
from timeflux_ml.utils.import_helpers import make_object
def make_pipeline(steps, params):
"""
Args:
steps (dict): (name, module_name, method_name) Tuples to specify steps of the pipeline to fit.
params (dict): string -> object. Parameters passed to the fit method of
each step, where each parameter name is prefixed
such that parameter `p` for step `s` has key `s__p`.
Returns:
pipeline: sklearn Pipeline object.
"""
step_estimators = []
for step_name, step_fullname in steps.items():
step_estimator = make_object(step_fullname)
step_estimators.append((step_name, step_estimator))
pipeline = Pipeline(steps=step_estimators)
try:
pipeline.set_params(**params)
except ValueError:
raise ValueError("Could not set params of pipeline. Check the validity. ")
return pipeline
| 36.814815 | 103 | 0.642857 |
2a917af395c9ea6d4560d6b6ec791ea3516362c0 | 2,516 | py | Python | context-n.py | adlez27/context-n | 37660632d8b0ec3837bae779cf1c0fa64248ae13 | [
"MIT"
] | 2 | 2019-08-05T09:32:57.000Z | 2020-07-16T21:58:52.000Z | context-n.py | adlez27/context-n | 37660632d8b0ec3837bae779cf1c0fa64248ae13 | [
"MIT"
] | 2 | 2019-08-25T00:03:04.000Z | 2021-07-06T06:27:30.000Z | context-n.py | adlez27/context-n | 37660632d8b0ec3837bae779cf1c0fa64248ae13 | [
"MIT"
] | null | null | null | import sys
import os.path
import shutil
import configparser
ust = configparser.RawConfigParser(allow_no_value=True)
ust.optionxform = lambda option: option
ust.read(sys.argv[1])
config = configparser.RawConfigParser(allow_no_value=True)
config.optionxform = lambda option: option
if (os.path.exists(ust['#SETTING']['VoiceDir'] + "\\context-n.ini")):
config.read(ust['#SETTING']['VoiceDir'] + "\\context-n.ini")
else:
shutil.copyfile('default.ini', ust['#SETTING']['VoiceDir'] + "\\context-n.ini")
config.read(ust['#SETTING']['VoiceDir'] + "\\context-n.ini")
# TODO: GUI opens to create new per-vb settings
if ('#NEXT' in ust):
first_note_dex = int(ust.sections()[3][-4:])
last_note_count = len(ust.sections())-5
partial = True
else:
first_note_dex = int(ust.sections()[2][-4:])
last_note_count = len(ust.sections())-4
partial = False
def edit(current,next):
if(config['settings'].getboolean('romaji')):
if('n' in ust['#'+current]['Lyric']):
valid = True
for ex in config['exceptions']:
if (ex in ust['#'+current]['Lyric']):
valid = False
break
if(valid):
for next_lyric in config['mapping']:
if(next_lyric in ust['#'+next]['Lyric']):
ust['#'+current]['Lyric'] = ust['#'+current]['Lyric'].replace('n',config['mapping'].get(next_lyric))
break
else:
if('ん' in ust['#'+current]['Lyric']):
for next_lyric in config['mapping']:
if(next_lyric in ust['#'+next]['Lyric']):
ust['#'+current]['Lyric'] = ust['#'+current]['Lyric'].replace('ん',config['mapping'].get(next_lyric))
break
note_count = 0
for note in ust.sections():
if ((note != '#VERSION') and (note != '#SETTING') and (note != '#PREV') and (note != '#NEXT') and (note !='#TRACKEND')):
current_note_dex = str(first_note_dex + note_count).zfill(4)
next_note_dex = str(first_note_dex + note_count + 1).zfill(4)
if(note_count == last_note_count):
if(partial):
next_note_dex = 'NEXT'
edit(current_note_dex, next_note_dex)
break
else:
edit(current_note_dex, next_note_dex)
note_count += 1
with open(sys.argv[1], 'w') as output:
ust.write(output, space_around_delimiters=False)
| 38.707692 | 125 | 0.562798 |
793c87b3beb68a8c920d9048fc0f9192a5476779 | 1,273 | py | Python | src/__init__.py | DiSiqueira/DSImgur | 11bb392a0253adf278d3dbfb469c41eec14c5706 | [
"MIT"
] | 119 | 2016-01-28T11:02:58.000Z | 2020-11-30T15:05:09.000Z | src/__init__.py | DiSiqueira/Imgur | 11bb392a0253adf278d3dbfb469c41eec14c5706 | [
"MIT"
] | 3 | 2016-03-20T16:45:13.000Z | 2017-09-29T15:12:07.000Z | src/__init__.py | DiSiqueira/Imgur | 11bb392a0253adf278d3dbfb469c41eec14c5706 | [
"MIT"
] | 6 | 2016-04-03T02:59:26.000Z | 2019-04-17T03:19:09.000Z | #!/usr/bin/env python
"""
Diego Martins de Siqueira
MIT License
DSImgur - Easily download images, Albums, Galleries and entire Profiles from Imgur. The most powerful Imgur Downloader!! You can use as program or as module!
"""
import sys
import argparse
from DSImgur import DSImgur
def main(argv=sys.argv[0]):
parser = argparse.ArgumentParser(
description = "Easily download images, Albums, Galleries and entire Profiles from Imgur. The most powerful Imgur Downloader!! You can use as program or as module!")
parser.add_argument("--workers", type=int, default=5,
help="Number of parallel downloads. The default is 5.")
parser.add_argument("--output", type=str, default="downloads",
help="Output folder")
parser.add_argument("--http", action="store_true",
help="Force use HTTP (Insecure). Default is HTTPS")
parser.add_argument('urls', type=str, nargs='+',
help='URLs to be downloaded')
args = parser.parse_args()
protocol = 'https://'
if args.http:
protocol = 'http://'
try:
i = DSImgur(args.workers, args.output,protocol)
i.addUrl(args.urls)
i.download()
print 'All images have been downloaded.'
except KeyboardInterrupt:
print 'Interrupt received, stopping downloads'
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:]) | 28.931818 | 166 | 0.725844 |
261af8a0f5645c441c5fcb6f5cbbd8b76519ebd4 | 1,702 | py | Python | etc/n_CR/opt_2/pp2.py | jimsrc/seatos | e775dba1a2a96ff44b837cf8d85101ccfef302b1 | [
"MIT"
] | null | null | null | etc/n_CR/opt_2/pp2.py | jimsrc/seatos | e775dba1a2a96ff44b837cf8d85101ccfef302b1 | [
"MIT"
] | null | null | null | etc/n_CR/opt_2/pp2.py | jimsrc/seatos | e775dba1a2a96ff44b837cf8d85101ccfef302b1 | [
"MIT"
] | 1 | 2018-10-02T17:51:57.000Z | 2018-10-02T17:51:57.000Z | #!/usr/bin/env ipython
from pylab import *
from load_data import sh, mc, cr
import funcs as ff
#++++++++++++++++++++++++++++++++++++++++++++++++++++
mc.cc = mc.t>0.0
mc.tout = 3.0*mc.t[mc.cc]+1.0
mc.rms = mc.avr[mc.cc]
cr.mc.crs = cr.mc.avr[mc.cc]
sh.cc = sh.t<1.0
sh.tout = sh.t[sh.cc]
sh.rms = sh.avr[sh.cc]
cr.sh.crs = cr.sh.avr[sh.cc]
tpre = 0.0 #-1.0 # tiempo antes del cual se toma data para el rms-del-quiet-SW
rms_o = np.mean(sh.rms[sh.t<tpre]) #0.06 #0.025 #np.mean(sh.rms[sh.t<-1.0]) #0.03
t = np.concatenate([sh.tout, mc.tout])
rms = np.concatenate([sh.rms, mc.rms])
crs = np.concatenate([cr.sh.crs, cr.mc.crs])
t, rms, crs = t[t>=0.0], rms[t>=0.0], crs[t>=0.0]
dt = t[1:-1] - t[0:-2]
cte = 0.0
q = -440.0 #-170.0
#--- 'fc' es la version trozos de 'rms'
cc = ((rms-rms_o)>=0.0) & (t<5.0)
fc = np.zeros(rms.size)
fc[cc] = (rms-rms_o)[cc]
#tau = 3.0
#ncr = nCR2([t, fc], tau, q)
#++++++++++++++++++++++++++++++++++++++++++++++++ figura
fig = figure(1, figsize=(6,4))
ax0 = fig.add_subplot(111)
ax1 = ax0.twinx()
#--- plot der
ax1.plot(t[1:-1], fc[1:-1], c='gray')
tau_o, q = 2.0, -400.0
fit = ff.fit_forbush([t, fc, crs], [tau_o, q])
fit.make_fit()
print fit.par
#ncr = nCR2([t, fc], tau, q)
"""
#--- plot izq
ax0.plot(t, ncr, label='$\\tau=%2.2g$'%tau)
ax0.plot(t, crs, '-o', c='k', ms=3)
ax0.axhline(y=0.0, c='g')
ax0.axvline(x=0, ls='--', c='gray', lw=3)
ax0.axvline(x=1, ls='--', c='gray', lw=3)
ax0.axvline(x=4, ls='--', c='gray', lw=3)
ax0.legend()
ax0.grid()
ax0.set_xlim(-2,+7)
ax0.set_ylim(-10, +5.)
fname_fig = './test_2.png'
savefig(fname_fig, dpi=135, bbox_inches='tight')
close()
"""
| 25.787879 | 84 | 0.525264 |
5cb040572bc6f348ca181000ce15b6f18e1d0332 | 1,190 | py | Python | configs/detection/fsce/voc/split2/fsce_r101_fpn_voc-split2_3shot-fine-tuning.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 376 | 2021-11-23T13:29:57.000Z | 2022-03-30T07:22:14.000Z | configs/detection/fsce/voc/split2/fsce_r101_fpn_voc-split2_3shot-fine-tuning.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 51 | 2021-11-23T14:45:08.000Z | 2022-03-30T03:37:15.000Z | configs/detection/fsce/voc/split2/fsce_r101_fpn_voc-split2_3shot-fine-tuning.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 56 | 2021-11-23T14:02:27.000Z | 2022-03-31T09:01:50.000Z | _base_ = [
'../../../_base_/datasets/fine_tune_based/few_shot_voc.py',
'../../../_base_/schedules/schedule.py', '../../fsce_r101_fpn.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
# FewShotVOCDefaultDataset predefine ann_cfg for model reproducibility.
data = dict(
train=dict(
type='FewShotVOCDefaultDataset',
ann_cfg=[dict(method='FSCE', setting='SPLIT2_3SHOT')],
num_novel_shots=3,
num_base_shots=3,
classes='ALL_CLASSES_SPLIT2'),
val=dict(classes='ALL_CLASSES_SPLIT2'),
test=dict(classes='ALL_CLASSES_SPLIT2'))
evaluation = dict(
interval=4500,
class_splits=['BASE_CLASSES_SPLIT2', 'NOVEL_CLASSES_SPLIT2'])
checkpoint_config = dict(interval=4500)
optimizer = dict(lr=0.001)
lr_config = dict(warmup_iters=200, gamma=0.5, step=[5000, 7000])
runner = dict(max_iters=9000)
# base model needs to be initialized with following script:
# tools/detection/misc/initialize_bbox_head.py
# please refer to configs/detection/fsce/README.md for more details.
load_from = ('work_dirs/fsce_r101_fpn_voc-split2_base-training/'
'base_model_random_init_bbox_head.pth')
| 41.034483 | 71 | 0.720168 |
849f40e3005f3288885373a5d1d68079ab91084f | 40,586 | py | Python | train.py | Robert-JunWang/pytorch-image-models | 7c67d6aca992f039eece0af5f7c29a43d48c00e4 | [
"Apache-2.0"
] | 5 | 2022-03-03T00:42:03.000Z | 2022-03-25T04:29:56.000Z | train.py | Robert-JunWang/pytorch-image-models | 7c67d6aca992f039eece0af5f7c29a43d48c00e4 | [
"Apache-2.0"
] | null | null | null | train.py | Robert-JunWang/pytorch-image-models | 7c67d6aca992f039eece0af5f7c29a43d48c00e4 | [
"Apache-2.0"
] | 1 | 2022-03-22T23:34:00.000Z | 2022-03-22T23:34:00.000Z | #!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\
convert_splitbn_model, model_parameters
from timm.utils import *
from timm.loss import *
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
parser.add_argument('data_dir', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
# Model parameters
parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='validation batch size override (default: None)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
parser.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=100, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-repeats', type=int, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
parser.add_argument('--bce-target-thresh', type=float, default=None,
help='Threshold for binarizing softened BCE targets (default: None, disabled)')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--worker-seeding', type=str, default='all',
help='worker seed mode (default: all)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--no-ddp-bb', action='store_true', default=False,
help='Force broadcast buffers for native DDP to off.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on 1 GPUs.')
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
random_seed(args.seed, args.rank)
if args.fuser:
set_jit_fuser(args.fuser)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.local_rank == 0:
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp == 'apex':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp == 'apex':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb)
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset, root=args.data_dir, split=args.train_split, is_training=True,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
repeats=args.epoch_repeats)
dataset_eval = create_dataset(
args.dataset, root=args.data_dir, split=args.val_split, is_training=False,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
worker_seeding=args.worker_seeding,
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# smoothing is handled with mixup target transform which outputs sparse, soft targets
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh)
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if args.rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = get_outdir(args.output if args.output else './output/train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
| 48.605988 | 137 | 0.641773 |
ada7b61ec6075b6c5ba84a7c067b5e1c1c1fc0f9 | 1,755 | py | Python | data/dataset.py | Janspiry/distributed-pytorch-template | 8b612c8877d4ec3ffba28fcb3bc1ccfda44b6c10 | [
"MIT"
] | 3 | 2022-01-30T07:10:02.000Z | 2022-02-11T10:16:43.000Z | data/dataset.py | Janspiry/A-Seed-Project-For-Deep-Learning-by-Pytorch | 8b612c8877d4ec3ffba28fcb3bc1ccfda44b6c10 | [
"MIT"
] | null | null | null | data/dataset.py | Janspiry/A-Seed-Project-For-Deep-Learning-by-Pytorch | 8b612c8877d4ec3ffba28fcb3bc1ccfda44b6c10 | [
"MIT"
] | null | null | null | import torch.utils.data as data
from torchvision import transforms
from PIL import Image
import os
import os.path
from .auto_augment import AutoAugment, ImageNetAutoAugment
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
if is_image_file(fname) and ('O' in fname or 'F' in fname):
path = os.path.join(root, fname)
images.append(path)
return images
def pil_loader(path):
return Image.open(path).convert('RGB')
class Dataset(data.Dataset):
def __init__(self, data_root, phase='train', image_size=[256, 256], loader=pil_loader):
imgs = make_dataset(data_root)
self.imgs = imgs
if phase == 'train':
self.tfs = transforms.Compose([
transforms.Resize((image_size[0], image_size[1])),
ImageNetAutoAugment(),
transforms.ToTensor()
])
else:
self.tfs = transforms.Compose([
transforms.Resize((image_size[0], image_size[1])),
transforms.ToTensor()
])
self.loader = loader
def __getitem__(self, index):
ret = {}
path = self.imgs[index]
img = self.loader(path)
img = self.tfs(img)
ret['input'] = img
ret['path'] = path.rsplit("/")[-1]
return ret
def __len__(self):
return len(self.imgs)
| 28.770492 | 91 | 0.579487 |
224741a87ab106a561bf76f57b580f85d912f2fc | 5,479 | py | Python | ve/unit/test_if_else.py | furiosa-ai/pyvsc | 612de9e6244c685a3df1972e4860abfe35b614e1 | [
"Apache-2.0"
] | 54 | 2020-03-28T17:54:00.000Z | 2022-03-27T08:53:13.000Z | ve/unit/test_if_else.py | furiosa-ai/pyvsc | 612de9e6244c685a3df1972e4860abfe35b614e1 | [
"Apache-2.0"
] | 124 | 2020-04-10T03:06:03.000Z | 2022-03-24T18:35:46.000Z | ve/unit/test_if_else.py | furiosa-ai/pyvsc | 612de9e6244c685a3df1972e4860abfe35b614e1 | [
"Apache-2.0"
] | 17 | 2020-04-09T21:47:58.000Z | 2022-02-23T19:37:37.000Z |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from vsc.types import rangelist
from vsc_test_case import VscTestCase
'''
Created on Jul 28, 2019
@author: ballance
'''
import unittest
from unittest.case import TestCase
import vsc
class TestIfElse(VscTestCase):
def test_if_then(self):
@vsc.randobj
class my_s(object):
def __init__(self):
super().__init__()
self.a = vsc.rand_bit_t(8)
self.b = vsc.rand_bit_t(8)
self.c = vsc.rand_bit_t(8)
self.d = vsc.rand_bit_t(8)
@vsc.constraint
def ab_c(self):
with vsc.if_then(self.a == 1):
self.b == 1
v = my_s()
v.randomize()
v.a = 1
v.a = 2
def test_else_if(self):
@vsc.randobj
class my_s(object):
def __init__(self):
super().__init__()
self.a = vsc.rand_bit_t(8)
self.b = vsc.rand_bit_t(8)
self.c = vsc.rand_bit_t(8)
self.d = vsc.rand_bit_t(8)
@vsc.constraint
def ab_c(self):
self.a in rangelist(1,5)
with vsc.if_then(self.a == 1):
self.b in rangelist(0,10)
with vsc.else_if(self.a == 2):
self.b in rangelist(11,20)
with vsc.else_if(self.a == 3):
self.b in rangelist(21,30)
with vsc.else_if(self.a == 4):
self.b in rangelist(31,40)
with vsc.else_if(self.a == 5):
self.b in rangelist(41,50)
v = my_s()
for i in range(8):
v.randomize()
print("a=" + str(v.a) + " b=" + str(v.b))
def test_else_if_2(self):
@vsc.randobj
class my_s(object):
def __init__(self):
super().__init__()
self.a = vsc.rand_bit_t(8)
self.b = vsc.rand_bit_t(8)
self.c = vsc.rand_bit_t(8)
self.d = vsc.rand_bit_t(8)
@vsc.constraint
def ab_c(self):
self.a == 5
with vsc.if_then(self.a == 1):
self.b == 1
with vsc.else_if(self.a == 2):
self.b == 2
with vsc.else_if(self.a == 3):
self.b == 4
with vsc.else_if(self.a == 4):
self.b == 8
with vsc.else_if(self.a == 5):
self.b == 16
with vsc.else_then():
self.b == 0
v = my_s()
for i in range(8):
v.randomize()
print("a=" + str(v.a) + " b=" + str(v.b))
def test_else_if_3(self):
@vsc.randobj
class my_s(object):
def __init__(self):
super().__init__()
self.a = vsc.rand_bit_t(8)
self.b = vsc.rand_bit_t(8)
self.c = vsc.rand_bit_t(8)
self.d = vsc.rand_bit_t(8)
@vsc.constraint
def ab_c(self):
self.a == 5
with vsc.if_then(self.a == 1):
self.b == 1
with vsc.else_if(self.a == 2):
self.b == 2
with vsc.else_if(self.a == 3):
self.b == 4
with vsc.else_if(self.a == 4):
self.b == 8
with vsc.else_if(self.a == 5):
self.b == 16
with vsc.else_then:
self.b == 0
v = my_s()
for i in range(8):
v.randomize()
print("a=" + str(v.a) + " b=" + str(v.b))
def test_else_then(self):
@vsc.randobj
class my_s(object):
def __init__(self):
self.a = vsc.rand_bit_t(8)
self.b = vsc.rand_bit_t(8)
self.c = vsc.rand_bit_t(8)
self.d = vsc.rand_bit_t(8)
@vsc.constraint
def ab_c(self):
self.a == 1
with vsc.if_then(self.a == 1):
self.b == 1
with vsc.else_then():
self.b == 2
v = my_s()
vsc.randomize(v)
| 29.299465 | 62 | 0.441321 |
13330889323200f12c00344088dd7bba33fbc99e | 6,966 | py | Python | src/flask_track_usage/storage/sql.py | gouthambs/flask-track-usage | 81c3cb3555498e0438c300000f4a5457352dc719 | [
"BSD-3-Clause"
] | null | null | null | src/flask_track_usage/storage/sql.py | gouthambs/flask-track-usage | 81c3cb3555498e0438c300000f4a5457352dc719 | [
"BSD-3-Clause"
] | null | null | null | src/flask_track_usage/storage/sql.py | gouthambs/flask-track-usage | 81c3cb3555498e0438c300000f4a5457352dc719 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2014 Gouthaman Balaraman
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3)The name of the author may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
SQL storage based on SQLAlchemy
"""
from . import Storage
import json
import datetime
class SQLStorage(Storage):
"""
Uses SQLAlchemy to connect to various databases such as SQLite, Oracle,
MySQL, Postgres, etc. Please SQLAlchemy wrapper for full support and
functionalities.
.. versionadded:: 1.0.0
SQLStorage was added.
"""
def set_up(self, conn_str=None, table_name="flask_usage", db=None):
"""
Sets the SQLAlchemy database
:Parameters:
- `conn_str`: The SQLAlchemy connection string
- `table_name`: Table name for storing the analytics. Defaults to \
`flask_usage`.
- `db`: Instead of providing the conn_str, one can optionally
provide the Flask-SQLAlchemy's SQLALchemy object created as
SQLAlchemy(app).
"""
import sqlalchemy as sql
if db is not None:
self._eng = db.engine
elif conn_str is not None:
self._eng = sql.create_engine(conn_str)
else:
raise ValueError("Both conn_str and db cannot be None")
self._con = None
with self._eng.begin() as self._con:
if db:
meta = db.metadata
else:
meta = sql.MetaData()
if not self._con.dialect.has_table(self._con, table_name):
self.track_table = sql.Table(
table_name, meta,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('url', sql.String(128)),
sql.Column('ua_browser', sql.String(16)),
sql.Column('ua_language', sql.String(16)),
sql.Column('ua_platform', sql.String(16)),
sql.Column('ua_version', sql.String(16)),
sql.Column('blueprint', sql.String(16)),
sql.Column('view_args', sql.String(64)),
sql.Column('status', sql.Integer),
sql.Column('remote_addr', sql.String(24)),
sql.Column('xforwardedfor', sql.String(24)),
sql.Column('authorization', sql.Boolean),
sql.Column('ip_info', sql.String(128)),
sql.Column('path', sql.String(32)),
sql.Column('speed', sql.Float),
sql.Column('datetime', sql.DateTime)
)
meta.create_all(self._eng)
else:
meta.reflect(bind=self._eng)
self.track_table = meta.tables[table_name]
def store(self, data):
"""
Executed on "function call".
:Parameters:
- `data`: Data to store.
"""
user_agent = data["user_agent"]
utcdatetime = datetime.datetime.fromtimestamp(data['date'])
with self._eng.begin() as con:
stmt = self.track_table.insert().values(
url=data['url'],
ua_browser=user_agent.browser,
ua_language=user_agent.language,
ua_platform=user_agent.platform,
ua_version=user_agent.version,
blueprint=data["blueprint"],
view_args=json.dumps(data["view_args"], ensure_ascii=False),
status=data["status"],
remote_addr=data["remote_addr"],
xforwardedfor=data["xforwardedfor"],
authorization=data["authorization"],
ip_info=data["ip_info"],
path=data["path"],
speed=data["speed"],
datetime=utcdatetime
)
con.execute(stmt)
def _get_usage(self, start_date=None, end_date=None, limit=500, page=1):
'''
This is what translates the raw data into the proper structure.
'''
raw_data = self._get_raw(start_date, end_date, limit, page)
usage_data = [
{
'url': r[1],
'user_agent': {
'browser': r[2],
'language': r[3],
'platform': r[4],
'version': r[5],
},
'blueprint': r[6],
'view_args': r[7] if r[7] != '{}' else None,
'status': int(r[8]),
'remote_addr': r[9],
'xforwardedfor': r[10],
'authorization': r[11],
'ip_info': r[12],
'path': r[13],
'speed': r[14],
'date': r[15]
} for r in raw_data]
return usage_data
def _get_raw(self, start_date=None, end_date=None, limit=500, page=1):
'''
This is the raw getter from database
'''
import sqlalchemy as sql
page = max(1, page) # min bound
if end_date is None:
end_date = datetime.datetime.utcnow()
if start_date is None:
start_date = datetime.datetime(1970, 1, 1)
with self._eng.begin() as con:
_table = self.track_table
stmt = sql.select([self.track_table])\
.where(_table.c.datetime.between(start_date, end_date))\
.limit(limit)\
.offset(limit * (page - 1))\
.order_by(sql.desc(self.track_table.c.datetime))
res = con.execute(stmt)
result = res.fetchall()
return result
| 39.355932 | 78 | 0.562016 |
5a00fb91d00b28593bd9a71fb509a39e0f5753f3 | 2,346 | py | Python | shatter/QM_helper.py | jisazaTappsi/mastermind | bb096119b6e062611dec8e8062feb5f838d5a198 | [
"MIT"
] | 1 | 2016-06-10T05:55:05.000Z | 2016-06-10T05:55:05.000Z | shatter/QM_helper.py | jisazaTappsi/mastermind | bb096119b6e062611dec8e8062feb5f838d5a198 | [
"MIT"
] | 1 | 2016-02-05T06:59:46.000Z | 2016-02-05T06:59:46.000Z | shatter/QM_helper.py | jisazaTappsi/mastermind | bb096119b6e062611dec8e8062feb5f838d5a198 | [
"MIT"
] | 1 | 2019-03-12T07:49:01.000Z | 2019-03-12T07:49:01.000Z | #!/usr/bin/env python
"""Defines Quine McCluskey helper methods"""
from shatter import qm
from shatter.rules import Rules
from shatter.util import helpers as h
from shatter.code_generator import translate_to_python_expression
__author__ = 'juan pablo isaza'
def from_table_to_ones(table):
"""
Gets the ones as a list of strings from a truth table like set, containing tuples.
:param table: truth table
:return: list containing bits.
"""
ones = []
for row in table:
# case 1: when the output is explicit.
if Rules.is_explicit(row):
if row[1]: # only do it for true outputs.# TODO change for non booleans.
ones.append(''.join(list(map(h.from_bool_to_bit, list(row[0])))))
else: # case 2: The output is an implicit True, inputs are in the row.
ones.append(''.join(list(map(h.from_bool_to_bit, list(row)))))
return ones
def execute_qm_algorithm(ones):
"""
Quine McCluskey algorithm.
outputs the minimal boolean expression. Assumes that all none ones have a False output.
:param ones: input combinations for which output is true
:return: set containing lists of boolean expressions encoded as strings.
Where: '1' = boolean ,'0' = not(boolean), '-' = don't care, '^^' = boolean0 ^ boolean1
Example: set('1-','-0') = bit0 or not bit1
"""
# TODO: cannot solve ones = ['00'] or a not(or(b0,b1))
# TODO: change to True, add XOR logic
qm_obj = qm.QuineMcCluskey(use_xor=False)
return qm_obj.simplify_los(ones)
def get_boolean_expression(table, inputs, the_output):
"""
Get boolean expression. Can return empty string.
solution provided by Quine-McCluskey algorithm.
outputs a function that maps {0,1} -> {0,1}.
:param inputs: Function explicit inputs or implicit added rules.
:param table: truth table.
:param the_output: function output
:return: string with boolean expression.
"""
ones = from_table_to_ones(table)
if len(ones) > 0:
qm_output = execute_qm_algorithm(ones)
expression = translate_to_python_expression(inputs, qm_output)
else:
expression = ''
if expression == '':
return '{}'.format(the_output) # This happens when regardless of the input the output is the same
else:
return expression
| 34 | 106 | 0.671355 |
7c49058b086073206457d42f3aa42786669003f2 | 2,456 | py | Python | dist_matrix/tests/test_helper.py | sparks-baird/wasserstein-distance | a37b5c2a0f7322cd8edb1432148445cf5de468a2 | [
"MIT"
] | 5 | 2021-12-10T15:48:55.000Z | 2022-03-29T15:28:06.000Z | dist_matrix/tests/test_helper.py | sparks-baird/wasserstein-distance | a37b5c2a0f7322cd8edb1432148445cf5de468a2 | [
"MIT"
] | null | null | null | dist_matrix/tests/test_helper.py | sparks-baird/wasserstein-distance | a37b5c2a0f7322cd8edb1432148445cf5de468a2 | [
"MIT"
] | null | null | null | # """
# Test helper functions for distance matrix calculations.
# """
# import os
# import numpy as np
# from numba.cuda.testing import unittest, CUDATestCase
# # import numba.cuda.kernels.device.helper as hp
# import mat_discover.ElM2D.helper as hp
# from numpy.testing import assert_allclose, assert_equal
# bits = int(os.environ.get("MACHINE_BITS", "32"))
# if bits == 32:
# np_float = np.float32
# np_int = np.float32
# elif bits == 64:
# np_float = np.float64
# np_int = np.float64
# tol = 1e-5
# class TestHelperFunctions(CUDATestCase):
# def test_concatenate(self):
# vec = np.random.rand(5)
# vec2 = np.random.rand(10)
# check = np.concatenate((vec, vec2))
# out = np.zeros(len(vec) + len(vec2), dtype=np_float)
# hp.concatenate(vec, vec2, out)
# assert_allclose(out, check)
# vec[0] = 100.0
# assert_allclose(out, check)
# def test_diff(self):
# vec = np.random.rand(10)
# out = np.zeros(len(vec) - 1)
# hp.diff(vec, out)
# check = np.diff(vec)
# assert_equal(out, check)
# def test_bisect_right(self):
# a = np.random.rand(10)
# a.sort()
# v = np.random.rand(5)
# ids = np.zeros_like(v[:-1], dtype=np_int)
# hp.bisect_right(a, v[:-1], ids)
# check = np.searchsorted(a, v[:-1], side="right")
# assert_equal(ids, check)
# def test_sort_by_indices(self):
# v = np.random.rand(10)
# ids = np.arange(len(v))
# np.random.shuffle(ids)
# out = np.zeros_like(v)
# hp.sort_by_indices(v, ids, out)
# check = v[ids]
# assert_equal(out, check)
# v = np.array([0.0, 0.5528931, 1.1455898, 1.5933731])
# ids = np.array([1, 2, 3, 3, 3])
# out = np.zeros_like(ids, dtype=np_float)
# hp.sort_by_indices(v, ids, out)
# check = v[ids]
# assert_allclose(out, check)
# def test_cumsum(self):
# vec = np.random.rand(10)
# out = np.zeros_like(vec, dtype=np_float)
# hp.cumsum(vec, out)
# check = np.cumsum(vec)
# assert_allclose(out, check)
# def test_divide(self):
# v = np.random.rand(10)
# b = 4.62
# out = np.zeros_like(v, dtype=np_float)
# hp.divide(v, b, out)
# check = v / b
# assert_allclose(out, check)
# if __name__ == "__main__":
# unittest.main()
| 28.229885 | 62 | 0.561889 |
0f1e9292ac8c3b8226f822a526fbb927c884fc34 | 1,066 | py | Python | N-aryTreePreorderTraversal589.py | Bit64L/LeetCode-Python- | 64847cbb1adcaca4561b949e8acc52e8e031a6cb | [
"MIT"
] | null | null | null | N-aryTreePreorderTraversal589.py | Bit64L/LeetCode-Python- | 64847cbb1adcaca4561b949e8acc52e8e031a6cb | [
"MIT"
] | null | null | null | N-aryTreePreorderTraversal589.py | Bit64L/LeetCode-Python- | 64847cbb1adcaca4561b949e8acc52e8e031a6cb | [
"MIT"
] | null | null | null | """
# Definition for a Node.
"""
class TreeNode(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def preorder(self, root):
ans = []
if root is None:
return ans
stack = []
stack.append(root)
while len(stack) != 0:
tmp = stack.pop()
ans.append(tmp.val)
tmp.children.reverse()
for child in tmp.children:
stack.append(child)
return ans
def preorder1(self, root):
"""
:type root: Node
:rtype: List[int]
"""
ans = []
def helper(root, ans):
if root is None:
return
ans.append(root.val)
for child in root.children:
helper(child, ans)
helper(root, ans)
return ans
node2 = TreeNode(2, [])
node3 = TreeNode(3, [])
children = [node2, node3]
node1 = TreeNode(1, children)
solution = Solution()
print(solution.preorder(node1))
| 20.5 | 39 | 0.510319 |
f603c6a3ef256e8c039ec7e86699692f76b12df2 | 380 | py | Python | build/lib/YAPI_heureka_code/yapi.py | heureka-code/YAPI-heureka-code | ea6390593d873d3f5c7c906c902ed55cb707bf24 | [
"MIT"
] | null | null | null | build/lib/YAPI_heureka_code/yapi.py | heureka-code/YAPI-heureka-code | ea6390593d873d3f5c7c906c902ed55cb707bf24 | [
"MIT"
] | null | null | null | build/lib/YAPI_heureka_code/yapi.py | heureka-code/YAPI-heureka-code | ea6390593d873d3f5c7c906c902ed55cb707bf24 | [
"MIT"
] | null | null | null | from .Lexer import Lexer
from .token_gruppen import YAPIGroup, TokenGruppen
from .token_data import Token
class YAPI:
def __init__(self, group: YAPIGroup):
self.__group: TokenGruppen = TokenGruppen(group)
pass
def execute(self, statement) -> list[Token]:
lexer = Lexer(self.__group, statement)
return lexer.tokens
pass
| 25.333333 | 57 | 0.668421 |
b7983fe071c361a33cfdb142c714004ecd01ea17 | 5,923 | py | Python | Kerberos/raghuram/tgs.py | mohith7548/Cryptography-LAB | 05757072e38558134f2885d36915bfb0c5a26e9b | [
"MIT"
] | 2 | 2021-02-21T18:04:19.000Z | 2021-02-23T06:48:10.000Z | Kerberos/raghuram/tgs.py | mohith7548/Cryptography-LAB | 05757072e38558134f2885d36915bfb0c5a26e9b | [
"MIT"
] | null | null | null | Kerberos/raghuram/tgs.py | mohith7548/Cryptography-LAB | 05757072e38558134f2885d36915bfb0c5a26e9b | [
"MIT"
] | 2 | 2019-11-29T01:06:03.000Z | 2019-12-07T19:34:45.000Z | import socket
import threading
import time
import pyDes
import string
import random
#####################Already Shared Keys######################################
Key_V="ac3Hn57a" # already shared between application server tgs server
Key_TGS="59ZFTjCt" # already shared with AS and TGS
##################### Session Keys Declaration ###############################
Key_C_TGS="" # session Key recieved from AS via client
Key_C_V="" # session key for client and application server
time_stamp4=""
life_time4="4600"
#================================================================================================================
# function for random key generator
def randomKeyGenerator():
stringLength=8
lettersAndDigits = string.ascii_letters + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
#=================================================================================================================
#Encryption and decrytion for DES
def des_encrypt(key, plain_text):
IV="33333333"
key_handler = pyDes.des(key, pyDes.CBC, IV, pad=None, padmode=pyDes.PAD_PKCS5)
cipher_text = key_handler.encrypt(plain_text)
#print("encrpted: {}".format(cipher_text))
return cipher_text
def des_decrypt(key, cipher_text):
IV="33333333"
key_handler = pyDes.des(key, pyDes.CBC, IV, pad=None, padmode=pyDes.PAD_PKCS5)
plain_text = key_handler.decrypt(cipher_text)
#print("decrpted: {}".format(plain_text))
return plain_text
#================================================================================================================
#================================================================================================================
# Method to genereate tgs-ticket
def generateApplicationServerTicket(Key_C_V,client_id, server_id, time_stamp4, Address_C, life_time4):
server_ticket = Key_C_V+"||"+client_id+"||"+Address_C+"||"+server_id+"||"+time_stamp4+"||"+life_time4
#encryt the tgs_ticket with key_tgs(already shared between AS and tgs)
return server_ticket
#=================================================================================================================
#=================================================================================================================
#Threading class and constructor
class serverThread(threading.Thread):
def __init__(self, client):
threading.Thread.__init__(self)
self.client = client
def run(self):
dataRecvd = self.client.recv(256)
if len(dataRecvd ) == 0:
msg = "error"
else:
#=====================================================================================
#message received from client
print("================ Request recieved from client====================")
print("")
print()
print(dataRecvd)
print()
#=====================================================================================
# Reading the message
values = dataRecvd.split('||'.encode('utf-8'))
server_id = values[0].decode('utf-8')
ticket_tgs_encrypted = values[1]
authenticator_client_encrypted=values[2]
print("Components of resquesT recieved: ")
print()
print("server_id: {}".format(server_id))
print()
print("ticket_tgs_encrypted: {}".format(ticket_tgs_encrypted))
print()
print("authenticator_client_encrypted: {}".format(authenticator_client_encrypted))
#decrypt tgs_ticket with Key_TGS
tgs_ticket=des_decrypt(Key_TGS,ticket_tgs_encrypted).decode('utf-8')
print()
print("======================== Tgs Ticket =============================")
print(tgs_ticket)
print()
values=tgs_ticket.split('||')
Key_C_TGS=values[0]
client_id=values[1]
client_address=values[2]
tgs_id=values[3]
time_stamp2=values[4]
life_time2=values[5]
print("Key_C_TGS: {}".format(Key_C_TGS))
print("client_id: {}".format(client_id))
print("client_address: {}".format(client_address))
print("tgs_id: {}".format(tgs_id))
print("time_stamp2: {}".format(time_stamp2))
print("life_time2: {}".format(life_time2))
# decrypt authenticator with Key_C_TGS
authenticator_client = des_decrypt(Key_C_TGS,authenticator_client_encrypted).decode('utf-8')
values=authenticator_client.split('||')
client_id=values[0]
client_address=values[1]
time_stamp3=values[2]
time_stamp4=str(time.time())
#verify authenticator here.....
#Netwrok address of Client .. for the assignment purpose, otherwise actual netwrok address of client will be used.
Key_C_V=randomKeyGenerator()
server_ticket=generateApplicationServerTicket(Key_C_V,client_id, server_id, time_stamp4, client_address, life_time4)
server_ticket_encrypted=des_encrypt(Key_V,server_ticket)
print("server_ticket")
print(server_ticket)
msg = Key_C_V+"||"+server_id+"||"+time_stamp4+"||"
msg = msg.encode('utf-8')+server_ticket_encrypted
msg_encypted=des_encrypt(Key_C_TGS,msg)
print("=================================================================")
print("ApplicationServer-Ticket + Session Key(Key_C_V) being sent to client")
print()
print(msg_encypted)
# Encrypt the msg with Key_C(already shared key bettwen AS and client)
self.client.send(msg_encypted)
self.client.close()
#=================================================================================================================
#=================================================================================================================
#Creation of threads by Server
server_socket = socket.socket()
server_socket.bind(('127.0.0.1', 3334))
server_socket.listen(5)
while True:
print('TGS is Listening... ')
client,addr = server_socket.accept()
print('Connected to : {}'.format(addr))
serverThread(client).start()
server_socket.close()
#=================================================================================================================
| 31.673797 | 119 | 0.54854 |
cde9a962e92ae72a22ff52e1a8cb48a8c59bb5c2 | 2,019 | py | Python | kunquat/tracker/ui/controller/kqtivalidator.py | kagu/kunquat | 83a2e972121e6a114ecc5ef4392b501ce926bb06 | [
"CC0-1.0"
] | 13 | 2016-09-01T21:52:49.000Z | 2022-03-24T06:07:20.000Z | kunquat/tracker/ui/controller/kqtivalidator.py | kagu/kunquat | 83a2e972121e6a114ecc5ef4392b501ce926bb06 | [
"CC0-1.0"
] | 290 | 2015-03-14T10:59:25.000Z | 2022-03-20T08:32:17.000Z | kunquat/tracker/ui/controller/kqtivalidator.py | kagu/kunquat | 83a2e972121e6a114ecc5ef4392b501ce926bb06 | [
"CC0-1.0"
] | 7 | 2015-03-19T13:28:11.000Z | 2019-09-03T16:21:16.000Z | # -*- coding: utf-8 -*-
#
# Author: Tomi Jylhä-Ollila, Finland 2016-2018
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from kunquat.kunquat.kunquat import Kunquat, KunquatFormatError
import kunquat.tracker.cmdline as cmdline
from .dataconverters import VersionError, UnsupportedVersionError
class KqtiValidator():
def __init__(self, contents, data_converters):
self._validator = Kunquat()
self._validator.set_loader_thread_count(cmdline.get_default_thread_count())
self._contents = contents
self._data_converters = data_converters
self._validation_error = None
self._progress = 0
def get_progress(self):
return self._progress
def get_validation_steps(self):
target_prefix = 'au_00'
step_count = len(self._contents.items())
for i, (au_key, value) in enumerate(self._contents.items()):
yield
key = '/'.join((target_prefix, au_key))
try:
self._data_converters.convert_key_and_data(key, value)
except UnsupportedVersionError as e:
version_data = self._contents.get('m_editor_version.json')
self._validation_error = e.get_message('audio unit', version_data)
break
except VersionError as e:
self._validation_error = e.args[0]
break
self._validator.set_data(key, value)
self._progress = i / step_count
def is_valid(self):
if self._validation_error:
return False
try:
self._validator.validate()
except KunquatFormatError as e:
self._validation_error = e['message']
return False
return True
def get_validation_error(self):
return self._validation_error
| 30.134328 | 83 | 0.647845 |
b1e5262264f13e0c492bececfb8b0bd8f9e961c5 | 6,830 | py | Python | sutils/applications/cancel/test_cancel.py | t-mertz/slurm_utils | 6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd | [
"MIT"
] | null | null | null | sutils/applications/cancel/test_cancel.py | t-mertz/slurm_utils | 6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd | [
"MIT"
] | null | null | null | sutils/applications/cancel/test_cancel.py | t-mertz/slurm_utils | 6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import patch, call, Mock
import getpass
import subprocess
from . import cancel
from ...test import test
def fix_mock_popen(func):
def wrapper(self, popen, get_jobs):
process_mock = Mock()
attrs = {'communicate.return_value': (b'', b'')}
process_mock.configure_mock(**attrs)
popen.return_value = process_mock
func(self, popen, get_jobs)
return wrapper
@patch("sys.stdout.write", Mock())
@patch("sutils.applications.cancel.core.get_jobs", return_value=(0, 1, 2))
@patch("subprocess.Popen")
class TestScancel(unittest.TestCase):
@fix_mock_popen
def test_calls_get_jobs(self, popen, get_jobs):
cancel.run({'all': True, 'last': None, 'first': None, 'force': False})
from . import core
core.get_jobs.assert_called_once_with()
@fix_mock_popen
def test_all(self, popen, get_jobs):
cancel.run({'all': True, 'last': None, 'first': None, 'force': False})
calls = [
call(["scancel", '0'], stderr=-1, stdout=-1),
call().communicate(),
call(["scancel", '1'], stderr=-1, stdout=-1),
call().communicate(),
call(["scancel", '2'], stderr=-1, stdout=-1),
call().communicate()
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 3)
@fix_mock_popen
def test_all_force(self, popen, get_jobs):
cancel.run({'all': True, 'last': None, 'first': None, 'force': True})
calls = [
call(['scancel', '0', '1', '2'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 1)
@fix_mock_popen
def test_first1(self, popen, get_jobs):
cancel.run({'all': None, 'last': None, 'first': 1, 'force': False})
calls = [
call(['scancel', '0'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 1)
@fix_mock_popen
def test_first2(self, popen, get_jobs):
cancel.run({'all': None, 'last': None, 'first': 2, 'force': False})
calls = [
call(['scancel', '0'], stderr=-1, stdout=-1),
call().communicate(),
call(['scancel', '1'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 2)
@fix_mock_popen
def test_last1(self, popen, get_jobs):
cancel.run({'all': None, 'last': 1, 'first': None, 'force': False})
calls = [
call(["scancel", '2'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 1)
@fix_mock_popen
def test_last2(self, popen, get_jobs):
cancel.run({'all': None, 'last': 2, 'first': None, 'force': False})
calls = [
call(["scancel", '2'], stderr=-1, stdout=-1),
call().communicate(),
call(["scancel", '1'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 2)
@fix_mock_popen
def test_last2_force(self, popen, get_jobs):
cancel.run({'all': None, 'last': 2, 'first': None, 'force': True})
#res = test.cmd_buffer.flush()
#commands = res.strip().split("\n")
calls = [
call(["scancel", '2', '1'], stderr=-1, stdout=-1),
call().communicate()
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 1)
# self.assertEqual(commands[0], "squeue --user {} --noheader".format(getpass.getuser()))
# self.assertEqual(commands[1], "scancel {}".format("2 1"))
# self.assertEqual(len(commands), 2)
@fix_mock_popen
def test_first2_force(self, popen, get_jobs):
cancel.run({'all': None, 'last': None, 'first': 2, 'force': True})
calls = [
call(['scancel', '0', '1'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 1)
@fix_mock_popen
def test_last1_force(self, popen, get_jobs):
cancel.run({'all': None, 'last': 1, 'first': None, 'force': True})
calls = [
call(["scancel", '2'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 1)
#res = test.cmd_buffer.flush()
#commands = res.strip().split("\n")
#self.assertEqual(commands[0], "squeue --user {} --noheader".format(getpass.getuser()))
#self.assertEqual(commands[1], "scancel {}".format("2"))
#self.assertEqual(len(commands), 2)
@fix_mock_popen
def test_first1_force(self, popen, get_jobs):
cancel.run({'all': None, 'last': None, 'first': 1, 'force': True})
calls = [
call(['scancel', '0'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 1)
@fix_mock_popen
def test_first5_cancels_all_if_less_are_running(self, popen, get_jobs):
cancel.run({'all': None, 'last': None, 'first': 5, 'force': False})
calls = [
call(['scancel', '0'], stderr=-1, stdout=-1),
call().communicate(),
call(['scancel', '1'], stderr=-1, stdout=-1),
call().communicate(),
call(['scancel', '2'], stderr=-1, stdout=-1),
call().communicate()
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 3)
@fix_mock_popen
def test_last5_cancels_all_if_less_are_running(self, popen, get_jobs):
cancel.run({'all': None, 'last': 5, 'first': None, 'force': False})
#res = test.cmd_buffer.flush()
#commands = res.strip().split("\n")
calls = [
call(["scancel", '2'], stderr=-1, stdout=-1),
call().communicate(),
call(["scancel", '1'], stderr=-1, stdout=-1),
call().communicate(),
call(["scancel", '0'], stderr=-1, stdout=-1),
call().communicate(),
]
popen.assert_has_calls(calls)
self.assertEqual(popen.call_count, 3)
# self.assertEqual(commands[0], "squeue --user {} --noheader".format(getpass.getuser()))
# self.assertEqual(commands[1], "scancel {}".format(2))
# self.assertEqual(commands[2], "scancel {}".format(1))
# self.assertEqual(commands[3], "scancel {}".format(0))
# self.assertEqual(len(commands), 4)
| 37.734807 | 96 | 0.561933 |
53327b6b7c0bb9c3affd01e877f2cb15f4968c26 | 5,189 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/audit/auditnslogpolicy_vpnvserver_binding.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 1 | 2015-04-05T21:21:26.000Z | 2015-04-05T21:21:26.000Z | nssrc/com/citrix/netscaler/nitro/resource/config/audit/auditnslogpolicy_vpnvserver_binding.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 1 | 2017-01-20T22:56:58.000Z | 2017-01-20T22:56:58.000Z | nssrc/com/citrix/netscaler/nitro/resource/config/audit/auditnslogpolicy_vpnvserver_binding.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 6 | 2015-04-21T13:14:08.000Z | 2020-12-03T07:27:52.000Z | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class auditnslogpolicy_vpnvserver_binding(base_resource) :
""" Binding class showing the vpnvserver that can be bound to auditnslogpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def name(self) :
ur"""Name of the policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
ur"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
ur"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(auditnslogpolicy_vpnvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.auditnslogpolicy_vpnvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch auditnslogpolicy_vpnvserver_binding resources.
"""
try :
obj = auditnslogpolicy_vpnvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of auditnslogpolicy_vpnvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditnslogpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count auditnslogpolicy_vpnvserver_binding resources configued on NetScaler.
"""
try :
obj = auditnslogpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of auditnslogpolicy_vpnvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditnslogpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class auditnslogpolicy_vpnvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.auditnslogpolicy_vpnvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.auditnslogpolicy_vpnvserver_binding = [auditnslogpolicy_vpnvserver_binding() for _ in range(length)]
| 28.355191 | 137 | 0.72461 |
bc3063da49d6a07aaabf4c6e400ea075cf719e61 | 1,279 | py | Python | src/Learning/visualizations.py | olekscode/NameGen | bb0683194df15c9709a1d09252c638a80999b40c | [
"MIT"
] | null | null | null | src/Learning/visualizations.py | olekscode/NameGen | bb0683194df15c9709a1d09252c638a80999b40c | [
"MIT"
] | null | null | null | src/Learning/visualizations.py | olekscode/NameGen | bb0683194df15c9709a1d09252c638a80999b40c | [
"MIT"
] | null | null | null | import constants
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
COLORS = {
'green': sns.xkcd_rgb["faded green"],
'red': sns.xkcd_rgb["pale red"],
'blue': sns.xkcd_rgb["medium blue"],
'yellow': sns.xkcd_rgb["ochre"]
}
def plot_confusion_dataframe(df, nrows=5, with_percents=False, total=None):
df = df.head(nrows)
plt.tight_layout()
if with_percents:
assert total != None
percents = df / total * 100
fig, ax = plt.subplots(1, 2, figsize=(18,5), sharey=True)
__plot_heatmap(df, ax=ax[0], fmt="d")
__plot_heatmap(percents, ax=ax[1], fmt="0.1f")
else:
fig, ax = plt.subplots()
__plot_heatmap(df, ax=ax, fmt="d")
return fig
def plot_history(history, color, title, ylabel, xlabel='Iteration'):
fig, ax = plt.subplots()
x = constants.LOG_EVERY * np.arange(len(history))
ax.plot(x, history, color)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig
def __plot_heatmap(df, ax, fmt):
sns.heatmap(df, ax=ax, annot=True, fmt=fmt, cmap="Blues", cbar=False)
ax.xaxis.tick_top()
ax.set_ylabel('')
ax.tick_params(axis='y', labelrotation=0)
ax.tick_params(axis='both', labelsize=16) | 25.078431 | 75 | 0.639562 |
f4318c108c7e51480c6e64b4defb19b630bdb87f | 10,177 | py | Python | Lib/distutils/tests/test_filelist.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | 1 | 2018-06-21T18:21:24.000Z | 2018-06-21T18:21:24.000Z | Lib/distutils/tests/test_filelist.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | Lib/distutils/tests/test_filelist.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | """Tests dla distutils.filelist."""
zaimportuj os
zaimportuj re
zaimportuj unittest
z distutils zaimportuj debug
z distutils.log zaimportuj WARN
z distutils.errors zaimportuj DistutilsTemplateError
z distutils.filelist zaimportuj glob_to_re, translate_pattern, FileList
z test.support zaimportuj captured_stdout, run_unittest
z distutils.tests zaimportuj support
MANIFEST_IN = """\
include ok
include xo
exclude xo
include foo.tmp
include buildout.cfg
global-include *.x
global-include *.txt
global-exclude *.tmp
recursive-include f *.oo
recursive-exclude global *.x
graft dir
prune dir3
"""
def make_local_path(s):
"""Converts '/' w a string to os.sep"""
zwróć s.replace('/', os.sep)
klasa FileListTestCase(support.LoggingSilencer,
unittest.TestCase):
def assertNoWarnings(self):
self.assertEqual(self.get_logs(WARN), [])
self.clear_logs()
def assertWarnings(self):
self.assertGreater(len(self.get_logs(WARN)), 0)
self.clear_logs()
def test_glob_to_re(self):
sep = os.sep
jeżeli os.sep == '\\':
sep = re.escape(os.sep)
dla glob, regex w (
# simple cases
('foo*', r'foo[^%(sep)s]*\Z(?ms)'),
('foo?', r'foo[^%(sep)s]\Z(?ms)'),
('foo??', r'foo[^%(sep)s][^%(sep)s]\Z(?ms)'),
# special cases
(r'foo\\*', r'foo\\\\[^%(sep)s]*\Z(?ms)'),
(r'foo\\\*', r'foo\\\\\\[^%(sep)s]*\Z(?ms)'),
('foo????', r'foo[^%(sep)s][^%(sep)s][^%(sep)s][^%(sep)s]\Z(?ms)'),
(r'foo\\??', r'foo\\\\[^%(sep)s][^%(sep)s]\Z(?ms)')):
regex = regex % {'sep': sep}
self.assertEqual(glob_to_re(glob), regex)
def test_process_template_line(self):
# testing all MANIFEST.in template patterns
file_list = FileList()
l = make_local_path
# simulated file list
file_list.allfiles = ['foo.tmp', 'ok', 'xo', 'four.txt',
'buildout.cfg',
# filelist does nie filter out VCS directories,
# it's sdist that does
l('.hg/last-message.txt'),
l('global/one.txt'),
l('global/two.txt'),
l('global/files.x'),
l('global/here.tmp'),
l('f/o/f.oo'),
l('dir/graft-one'),
l('dir/dir2/graft2'),
l('dir3/ok'),
l('dir3/sub/ok.txt'),
]
dla line w MANIFEST_IN.split('\n'):
jeżeli line.strip() == '':
kontynuuj
file_list.process_template_line(line)
wanted = ['ok',
'buildout.cfg',
'four.txt',
l('.hg/last-message.txt'),
l('global/one.txt'),
l('global/two.txt'),
l('f/o/f.oo'),
l('dir/graft-one'),
l('dir/dir2/graft2'),
]
self.assertEqual(file_list.files, wanted)
def test_debug_print(self):
file_list = FileList()
przy captured_stdout() jako stdout:
file_list.debug_print('xxx')
self.assertEqual(stdout.getvalue(), '')
debug.DEBUG = Prawda
spróbuj:
przy captured_stdout() jako stdout:
file_list.debug_print('xxx')
self.assertEqual(stdout.getvalue(), 'xxx\n')
w_końcu:
debug.DEBUG = Nieprawda
def test_set_allfiles(self):
file_list = FileList()
files = ['a', 'b', 'c']
file_list.set_allfiles(files)
self.assertEqual(file_list.allfiles, files)
def test_remove_duplicates(self):
file_list = FileList()
file_list.files = ['a', 'b', 'a', 'g', 'c', 'g']
# files must be sorted beforehand (sdist does it)
file_list.sort()
file_list.remove_duplicates()
self.assertEqual(file_list.files, ['a', 'b', 'c', 'g'])
def test_translate_pattern(self):
# nie regex
self.assertPrawda(hasattr(
translate_pattern('a', anchor=Prawda, is_regex=Nieprawda),
'search'))
# jest a regex
regex = re.compile('a')
self.assertEqual(
translate_pattern(regex, anchor=Prawda, is_regex=Prawda),
regex)
# plain string flagged jako regex
self.assertPrawda(hasattr(
translate_pattern('a', anchor=Prawda, is_regex=Prawda),
'search'))
# glob support
self.assertPrawda(translate_pattern(
'*.py', anchor=Prawda, is_regex=Nieprawda).search('filelist.py'))
def test_exclude_pattern(self):
# zwróć Nieprawda jeżeli no match
file_list = FileList()
self.assertNieprawda(file_list.exclude_pattern('*.py'))
# zwróć Prawda jeżeli files match
file_list = FileList()
file_list.files = ['a.py', 'b.py']
self.assertPrawda(file_list.exclude_pattern('*.py'))
# test excludes
file_list = FileList()
file_list.files = ['a.py', 'a.txt']
file_list.exclude_pattern('*.py')
self.assertEqual(file_list.files, ['a.txt'])
def test_include_pattern(self):
# zwróć Nieprawda jeżeli no match
file_list = FileList()
file_list.set_allfiles([])
self.assertNieprawda(file_list.include_pattern('*.py'))
# zwróć Prawda jeżeli files match
file_list = FileList()
file_list.set_allfiles(['a.py', 'b.txt'])
self.assertPrawda(file_list.include_pattern('*.py'))
# test * matches all files
file_list = FileList()
self.assertIsNic(file_list.allfiles)
file_list.set_allfiles(['a.py', 'b.txt'])
file_list.include_pattern('*')
self.assertEqual(file_list.allfiles, ['a.py', 'b.txt'])
def test_process_template(self):
l = make_local_path
# invalid lines
file_list = FileList()
dla action w ('include', 'exclude', 'global-include',
'global-exclude', 'recursive-include',
'recursive-exclude', 'graft', 'prune', 'blarg'):
self.assertRaises(DistutilsTemplateError,
file_list.process_template_line, action)
# include
file_list = FileList()
file_list.set_allfiles(['a.py', 'b.txt', l('d/c.py')])
file_list.process_template_line('include *.py')
self.assertEqual(file_list.files, ['a.py'])
self.assertNoWarnings()
file_list.process_template_line('include *.rb')
self.assertEqual(file_list.files, ['a.py'])
self.assertWarnings()
# exclude
file_list = FileList()
file_list.files = ['a.py', 'b.txt', l('d/c.py')]
file_list.process_template_line('exclude *.py')
self.assertEqual(file_list.files, ['b.txt', l('d/c.py')])
self.assertNoWarnings()
file_list.process_template_line('exclude *.rb')
self.assertEqual(file_list.files, ['b.txt', l('d/c.py')])
self.assertWarnings()
# global-include
file_list = FileList()
file_list.set_allfiles(['a.py', 'b.txt', l('d/c.py')])
file_list.process_template_line('global-include *.py')
self.assertEqual(file_list.files, ['a.py', l('d/c.py')])
self.assertNoWarnings()
file_list.process_template_line('global-include *.rb')
self.assertEqual(file_list.files, ['a.py', l('d/c.py')])
self.assertWarnings()
# global-exclude
file_list = FileList()
file_list.files = ['a.py', 'b.txt', l('d/c.py')]
file_list.process_template_line('global-exclude *.py')
self.assertEqual(file_list.files, ['b.txt'])
self.assertNoWarnings()
file_list.process_template_line('global-exclude *.rb')
self.assertEqual(file_list.files, ['b.txt'])
self.assertWarnings()
# recursive-include
file_list = FileList()
file_list.set_allfiles(['a.py', l('d/b.py'), l('d/c.txt'),
l('d/d/e.py')])
file_list.process_template_line('recursive-include d *.py')
self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')])
self.assertNoWarnings()
file_list.process_template_line('recursive-include e *.py')
self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')])
self.assertWarnings()
# recursive-exclude
file_list = FileList()
file_list.files = ['a.py', l('d/b.py'), l('d/c.txt'), l('d/d/e.py')]
file_list.process_template_line('recursive-exclude d *.py')
self.assertEqual(file_list.files, ['a.py', l('d/c.txt')])
self.assertNoWarnings()
file_list.process_template_line('recursive-exclude e *.py')
self.assertEqual(file_list.files, ['a.py', l('d/c.txt')])
self.assertWarnings()
# graft
file_list = FileList()
file_list.set_allfiles(['a.py', l('d/b.py'), l('d/d/e.py'),
l('f/f.py')])
file_list.process_template_line('graft d')
self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')])
self.assertNoWarnings()
file_list.process_template_line('graft e')
self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')])
self.assertWarnings()
# prune
file_list = FileList()
file_list.files = ['a.py', l('d/b.py'), l('d/d/e.py'), l('f/f.py')]
file_list.process_template_line('prune d')
self.assertEqual(file_list.files, ['a.py', l('f/f.py')])
self.assertNoWarnings()
file_list.process_template_line('prune e')
self.assertEqual(file_list.files, ['a.py', l('f/f.py')])
self.assertWarnings()
def test_suite():
zwróć unittest.makeSuite(FileListTestCase)
jeżeli __name__ == "__main__":
run_unittest(test_suite())
| 33.923333 | 79 | 0.554289 |
8113d52abefbf2b463fd71a5f1031403fcb968ce | 276 | py | Python | sample/models.py | ernestom/django-google-maps | d415971901fd114019d0483f98a22aa7c897e8bf | [
"BSD-2-Clause"
] | 205 | 2015-01-18T21:03:08.000Z | 2022-03-26T17:02:05.000Z | sample/models.py | ernestom/django-google-maps | d415971901fd114019d0483f98a22aa7c897e8bf | [
"BSD-2-Clause"
] | 55 | 2015-05-06T18:36:41.000Z | 2022-03-22T11:48:11.000Z | sample/models.py | ernestom/django-google-maps | d415971901fd114019d0483f98a22aa7c897e8bf | [
"BSD-2-Clause"
] | 92 | 2015-02-16T20:01:31.000Z | 2022-03-02T14:15:36.000Z | from django.db import models
from django_google_maps.fields import AddressField, GeoLocationField
class SampleModel(models.Model):
address = AddressField(max_length=100)
geolocation = GeoLocationField(blank=True)
def __str__(self):
return self.address
| 23 | 68 | 0.768116 |
ad8b4d31df93a9f8f2f3978ddfb089cc492cf94e | 1,149 | py | Python | scripts/seperate_two_real.py | TechieBoy/deepfake-detection | 5bc7164710a32a64a65dd55e09aa58a030e8d0ee | [
"MIT"
] | null | null | null | scripts/seperate_two_real.py | TechieBoy/deepfake-detection | 5bc7164710a32a64a65dd55e09aa58a030e8d0ee | [
"MIT"
] | null | null | null | scripts/seperate_two_real.py | TechieBoy/deepfake-detection | 5bc7164710a32a64a65dd55e09aa58a030e8d0ee | [
"MIT"
] | null | null | null | import pandas as pd
from shutil import copy
import os
import torch
import csv
df = pd.read_csv("~/deepfake/raw/combined_metadata.csv")
real_videos = df[df.label == "REAL"]
cf = {}
fd = {}
small_folder = "/home/teh_devs/deepfake/small"
for i in range(50):
folder_name = f"/home/teh_devs/deepfake/raw/dfdc_train_part_{i}"
two_real = real_videos[real_videos.folder == folder_name].head(2)["index"].tolist()
for r in two_real:
# new_folder = os.path.join(small_folder, r)
# os.mkdir(new_folder)
# real_video = os.path.join(folder_name, r)
# copy(real_video, new_folder)
csf = df[df.original == r]["index"].tolist()
# for fake in csf:
# copy(os.path.join(folder_name, fake), new_folder)
cf[r] = csf
fd[r] = folder_name
# torch.save(cf, "real_to_fake_mapping.pickle")
# torch.save(fd, "real_to_folder_mapping.pickle")
with open("rf_mapping.csv", "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["real", "fake", "folder"])
for real, fakes in cf.items():
for fake in fakes:
writer.writerow([real, fake, fd[real]])
| 28.02439 | 87 | 0.642298 |
880d4e8252c1a261f88fde4f17ee817369ea334b | 1,006 | py | Python | treasuryio/tweetbot.py | csvsoundsystem/pytreasuryio | 728caf815d16cd2f3548d8b67c84313de76f9be7 | [
"MIT"
] | 2 | 2017-10-09T23:29:02.000Z | 2019-08-12T18:20:27.000Z | treasuryio/tweetbot.py | csvsoundsystem/pytreasuryio | 728caf815d16cd2f3548d8b67c84313de76f9be7 | [
"MIT"
] | null | null | null | treasuryio/tweetbot.py | csvsoundsystem/pytreasuryio | 728caf815d16cd2f3548d8b67c84313de76f9be7 | [
"MIT"
] | 3 | 2019-08-12T18:20:33.000Z | 2021-01-11T21:26:47.000Z | import os
import yaml
from datetime import datetime
import tweepy
from query import query
def _connect_to_twitter(config = os.path.expanduser("~/.twitter.yml")):
conf = yaml.safe_load(open(config))
auth = tweepy.OAuthHandler(conf['consumer_key'], conf['consumer_secret'])
auth.set_access_token(conf['access_token'], conf['access_token_secret'])
api = tweepy.API(auth)
return api
def tweet(tweet_text_func):
'''
A decorator to make a function Tweet
Parameters
- `tweet_text_func` is a function that takes no parameters and returns a tweetable string
For example::
@tweet
def total_deposits_this_week():
# ...
@tweet
def not_an_interesting_tweet():
return 'This tweet is not data-driven.'
'''
def tweet_func():
api = _connect_to_twitter()
tweet = tweet_text_func()
print "Tweeting: %s" % tweet
api.update_status(tweet)
return tweet
return tweet_func
| 23.952381 | 93 | 0.656064 |
34dd363fce3a9b2a38da5f7e1afb4f26d0a4f796 | 8,922 | py | Python | mypython/Lib/site-packages/pandas/core/accessor.py | lilianatang/data-modelling-with-postgresql | 4b5d057d23c346cc36695dc0548f11908aeb5431 | [
"Apache-2.0"
] | null | null | null | mypython/Lib/site-packages/pandas/core/accessor.py | lilianatang/data-modelling-with-postgresql | 4b5d057d23c346cc36695dc0548f11908aeb5431 | [
"Apache-2.0"
] | null | null | null | mypython/Lib/site-packages/pandas/core/accessor.py | lilianatang/data-modelling-with-postgresql | 4b5d057d23c346cc36695dc0548f11908aeb5431 | [
"Apache-2.0"
] | 1 | 2021-04-26T22:41:56.000Z | 2021-04-26T22:41:56.000Z | """
accessor.py contains base classes for implementing accessor properties
that can be mixed into or pinned onto other pandas classes.
"""
from typing import FrozenSet, List, Set
import warnings
from pandas.util._decorators import doc
class DirNamesMixin:
_accessors: Set[str] = set()
_hidden_attrs: FrozenSet[str] = frozenset()
def _dir_deletions(self) -> Set[str]:
"""
Delete unwanted __dir__ for this object.
"""
return self._accessors | self._hidden_attrs
def _dir_additions(self) -> Set[str]:
"""
Add additional __dir__ for this object.
"""
return {accessor for accessor in self._accessors if hasattr(self, accessor)}
def __dir__(self) -> List[str]:
"""
Provide method name lookup and completion.
Notes
-----
Only provide 'public' methods.
"""
rv = set(super().__dir__())
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
class PandasDelegate:
"""
Abstract base class for delegating methods/properties.
"""
def _delegate_property_get(self, name, *args, **kwargs):
raise TypeError(f"You cannot access the property {name}")
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError(f"The property {name} cannot be set")
def _delegate_method(self, name, *args, **kwargs):
raise TypeError(f"You cannot call method {name}")
@classmethod
def _add_delegate_accessors(
cls, delegate, accessors, typ: str, overwrite: bool = False
):
"""
Add accessors to cls from the delegate class.
Parameters
----------
cls
Class to add the methods/properties to.
delegate
Class to get methods/properties and doc-strings.
accessors : list of str
List of accessors to add.
typ : {'property', 'method'}
overwrite : bool, default False
Overwrite the method/property in the target class if it exists.
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(
fget=_getter, fset=_setter, doc=getattr(delegate, name).__doc__
)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate, name).__doc__
return f
for name in accessors:
if typ == "property":
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
def delegate_names(delegate, accessors, typ: str, overwrite: bool = False):
"""
Add delegated names to a class using a class decorator. This provides
an alternative usage to directly calling `_add_delegate_accessors`
below a class definition.
Parameters
----------
delegate : object
The class to get methods/properties & doc-strings.
accessors : Sequence[str]
List of accessor to add.
typ : {'property', 'method'}
overwrite : bool, default False
Overwrite the method/property in the target class if it exists.
Returns
-------
callable
A class decorator.
Examples
--------
@delegate_names(Categorical, ["categories", "ordered"], "property")
class CategoricalAccessor(PandasDelegate):
[...]
"""
def add_delegate_accessors(cls):
cls._add_delegate_accessors(delegate, accessors, typ, overwrite=overwrite)
return cls
return add_delegate_accessors
# Ported with modifications from xarray
# https://github.com/pydata/xarray/blob/master/xarray/core/extensions.py
# 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors
# 2. We use a UserWarning instead of a custom Warning
class CachedAccessor:
"""
Custom property-like object.
A descriptor for caching accessors.
Parameters
----------
name : str
Namespace that will be accessed under, e.g. ``df.foo``.
accessor : cls
Class with the extension methods.
Notes
-----
For accessor, The class's __init__ method assumes that one of
``Series``, ``DataFrame`` or ``Index`` as the
single argument ``data``.
"""
def __init__(self, name: str, accessor) -> None:
self._name = name
self._accessor = accessor
def __get__(self, obj, cls):
if obj is None:
# we're accessing the attribute of the class, i.e., Dataset.geo
return self._accessor
accessor_obj = self._accessor(obj)
# Replace the property with the accessor object. Inspired by:
# https://www.pydanny.com/cached-property.html
# We need to use object.__setattr__ because we overwrite __setattr__ on
# NDFrame
object.__setattr__(obj, self._name, accessor_obj)
return accessor_obj
@doc(klass="", others="")
def _register_accessor(name, cls):
"""
Register a custom accessor on {klass} objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
When accessed, your accessor will be initialized with the pandas object
the user is interacting with. So the signature must be
.. code-block:: python
def __init__(self, pandas_object): # noqa: E999
...
For consistency with pandas methods, you should raise an ``AttributeError``
if the data passed to your accessor has an incorrect dtype.
>>> pd.Series(['a', 'b']).dt
Traceback (most recent call last):
...
AttributeError: Can only use .dt accessor with datetimelike values
Examples
--------
In your library code::
import pandas as pd
@pd.api.extensions.register_dataframe_accessor("geo")
class GeoAccessor:
def __init__(self, pandas_obj):
self._obj = pandas_obj
@property
def center(self):
# return the geographic center point of this DataFrame
lat = self._obj.latitude
lon = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def plot(self):
# plot this array's data on a map, e.g., using Cartopy
pass
Back in an interactive IPython session:
.. code-block:: ipython
In [1]: ds = pd.DataFrame({{"longitude": np.linspace(0, 10),
...: "latitude": np.linspace(0, 20)}})
In [2]: ds.geo.center
Out[2]: (5.0, 10.0)
In [3]: ds.geo.plot() # plots data on a map
"""
def decorator(accessor):
if hasattr(cls, name):
warnings.warn(
f"registration of accessor {repr(accessor)} under name "
f"{repr(name)} for type {repr(cls)} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=2,
)
setattr(cls, name, CachedAccessor(name, accessor))
cls._accessors.add(name)
return accessor
return decorator
@doc(_register_accessor, klass="DataFrame")
def register_dataframe_accessor(name):
from pandas import DataFrame
return _register_accessor(name, DataFrame)
@doc(_register_accessor, klass="Series")
def register_series_accessor(name):
from pandas import Series
return _register_accessor(name, Series)
@doc(_register_accessor, klass="Index")
def register_index_accessor(name):
from pandas import Index
return _register_accessor(name, Index)
| 30.040404 | 85 | 0.594486 |
86a6d43260cc34811b2167f52ced115d882c7e82 | 1,308 | py | Python | ica/paraphraseator/callbacks/LoggerCallback.py | pedrorio/image_caption_augmentation | 683ed90cecd4bc12f65dc238f1ff2dedbbc1b666 | [
"MIT"
] | null | null | null | ica/paraphraseator/callbacks/LoggerCallback.py | pedrorio/image_caption_augmentation | 683ed90cecd4bc12f65dc238f1ff2dedbbc1b666 | [
"MIT"
] | null | null | null | ica/paraphraseator/callbacks/LoggerCallback.py | pedrorio/image_caption_augmentation | 683ed90cecd4bc12f65dc238f1ff2dedbbc1b666 | [
"MIT"
] | null | null | null | import os
import logging
from pytorch_lightning import Callback, seed_everything
seed_everything(42)
class LoggerCallback(Callback):
def __init__(self):
self.logger = logging.getLogger(__name__)
def on_validation_end(self, trainer, module):
self.logger.info("***** Validation results *****")
if self.is_logger(trainer):
metrics = trainer.callback_metrics
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
self.logger.info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer, module):
self.logger.info("***** Test results *****")
if self.is_logger(trainer):
metrics = trainer.callback_metrics
output_test_results_file = os.path.join(
module.output_dir,
"test_results.txt"
)
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
self.logger.info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def is_logger(self, trainer):
return trainer.global_rank <= 0
| 33.538462 | 84 | 0.570336 |
1504a3b5fc3bcfbf476c45b92bb608923c6f58dc | 1,838 | py | Python | utils.py | swapno-ahmed/CoursePicker | 60260a283faac5988ccb1bf70c31db013ec71602 | [
"MIT"
] | 3 | 2022-01-23T06:46:22.000Z | 2022-01-23T07:03:06.000Z | utils.py | swapno-ahmed/CoursePicker | 60260a283faac5988ccb1bf70c31db013ec71602 | [
"MIT"
] | null | null | null | utils.py | swapno-ahmed/CoursePicker | 60260a283faac5988ccb1bf70c31db013ec71602 | [
"MIT"
] | null | null | null | from collections import defaultdict
import pandas as pd
def make_ds(data):
follow_up = defaultdict(list)
prereq_count = defaultdict(int)
start = []
for i, row in data.iterrows():
follow_up[i] = []
prereq_count[i] = 0
for i, row in data.iterrows():
for col in data.loc[i]:
if col == '':
continue
follow_up[col].append(i)
prereq_count[i] += 1
if prereq_count[i] == 0:
start.append(i)
return follow_up, prereq_count, start
def reset_completed(data):
for i, row in data.iterrows():
data.loc[i, 'FINISHED'] = False
def dfs_util(graph, course, completed, pre_requisite_count, pre_requisite_completed, unlocked):
for follow_up in graph[course]:
if not completed.loc[follow_up, 'Finished']:
pre_requisite_completed[follow_up] += 1
if pre_requisite_completed[follow_up] == pre_requisite_count[follow_up]:
unlocked.append(follow_up)
continue
dfs_util(graph, follow_up, completed, pre_requisite_count,
pre_requisite_completed, unlocked)
def dfs(graph, start, completed, pre_requisite_count):
pre_requisite_completed = defaultdict(int)
unlocked = []
for course in start:
if not completed.loc[course, 'Finished']:
unlocked.append(course)
continue
dfs_util(graph, course, completed, pre_requisite_count,
pre_requisite_completed, unlocked)
unlocked.sort()
return unlocked
def get_unlocked_course(data, completed):
course_structure, pre_requisite_count, starting_course = make_ds(data)
unlocked_courses = dfs(course_structure, starting_course,
completed, pre_requisite_count)
return unlocked_courses
| 28.71875 | 95 | 0.642002 |
b7b9ed2b92e6a2bea5fa9d4dfa86026a076a4dd6 | 475 | py | Python | env/Lib/site-packages/plotly/validators/isosurface/colorbar/_outlinewidth.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/isosurface/colorbar/_outlinewidth.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/isosurface/colorbar/_outlinewidth.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class OutlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="outlinewidth", parent_name="isosurface.colorbar", **kwargs
):
super(OutlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs
)
| 31.666667 | 85 | 0.650526 |
769724780eb110fef8772210efaf064a92b8ca70 | 8,041 | py | Python | classla/models/lemma/trainer.py | lkrsnik/classla-stanfordnlp | 1ab8771aadfbc648cec51b4c6716797f698e67ff | [
"Apache-2.0"
] | 1 | 2020-07-04T21:06:20.000Z | 2020-07-04T21:06:20.000Z | classla/models/lemma/trainer.py | lkrsnik/classla-stanfordnlp | 1ab8771aadfbc648cec51b4c6716797f698e67ff | [
"Apache-2.0"
] | null | null | null | classla/models/lemma/trainer.py | lkrsnik/classla-stanfordnlp | 1ab8771aadfbc648cec51b4c6716797f698e67ff | [
"Apache-2.0"
] | null | null | null | """
A trainer class to handle training and testing of models.
"""
import sys
import numpy as np
from collections import Counter
import torch
from torch import nn
import torch.nn.init as init
import classla.models.common.seq2seq_constant as constant
from classla.models.common.seq2seq_model import Seq2SeqModel
from classla.models.common import utils, loss
from classla.models.lemma import edit
from classla.models.lemma.vocab import MultiVocab
def unpack_batch(batch, use_cuda):
""" Unpack a batch from the data loader. """
if use_cuda:
inputs = [b.cuda() if b is not None else None for b in batch[:6]]
else:
inputs = [b if b is not None else None for b in batch[:6]]
orig_idx = batch[6]
return inputs, orig_idx
class Trainer(object):
""" A trainer for training models. """
def __init__(self, args=None, vocab=None, emb_matrix=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
if model_file is not None:
# load everything from file
self.load(model_file, use_cuda)
else:
# build model from scratch
self.args = args
self.model = None if args['dict_only'] else Seq2SeqModel(args, emb_matrix=emb_matrix, use_cuda=use_cuda)
self.vocab = vocab
# dict-based components
self.fallback_dict = dict()
self.composite_dict = dict()
if not self.args['dict_only']:
if self.args.get('edit', False):
self.crit = loss.MixLoss(self.vocab['char'].size, self.args['alpha'])
print("[Running seq2seq lemmatizer with edit classifier]")
else:
self.crit = loss.SequenceLoss(self.vocab['char'].size)
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if use_cuda:
self.model.cuda()
self.crit.cuda()
else:
self.model.cpu()
self.crit.cpu()
self.optimizer = utils.get_optimizer(self.args['optim'], self.parameters, self.args['lr'])
def update(self, batch, eval=False):
inputs, orig_idx = unpack_batch(batch, self.use_cuda)
src, src_mask, tgt_in, tgt_out, pos, edits = inputs
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
log_probs, edit_logits = self.model(src, src_mask, tgt_in, pos)
if self.args.get('edit', False):
assert edit_logits is not None
loss = self.crit(log_probs.view(-1, self.vocab['char'].size), tgt_out.view(-1), \
edit_logits, edits)
else:
loss = self.crit(log_probs.view(-1, self.vocab['char'].size), tgt_out.view(-1))
loss_val = loss.data.item()
if eval:
return loss_val
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, beam_size=1):
inputs, orig_idx = unpack_batch(batch, self.use_cuda)
src, src_mask, tgt, tgt_mask, pos, edits = inputs
self.model.eval()
batch_size = src.size(0)
preds, edit_logits = self.model.predict(src, src_mask, pos=pos, beam_size=beam_size)
pred_seqs = [self.vocab['char'].unmap(ids) for ids in preds] # unmap to tokens
pred_seqs = utils.prune_decoded_seqs(pred_seqs)
pred_tokens = ["".join(seq) for seq in pred_seqs] # join chars to be tokens
pred_tokens = utils.unsort(pred_tokens, orig_idx)
if self.args.get('edit', False):
assert edit_logits is not None
edits = np.argmax(edit_logits.data.cpu().numpy(), axis=1).reshape([batch_size]).tolist()
edits = utils.unsort(edits, orig_idx)
else:
edits = None
return pred_tokens, edits
def postprocess(self, words, preds, edits=None):
""" Postprocess, mainly for handing edits. """
assert len(words) == len(preds), "Lemma predictions must have same length as words."
edited = []
if self.args.get('edit', False):
assert edits is not None and len(words) == len(edits)
for w, p, e in zip(words, preds, edits):
lem = edit.edit_word(w, p, e)
edited += [lem]
else:
edited = preds # do not edit
# final sanity check
assert len(edited) == len(words)
final = []
for lem, w in zip(edited, words):
if len(lem) == 0 or constant.UNK in lem:
final += [w] # invalid prediction, fall back to word
else:
final += [lem]
return final
def update_lr(self, new_lr):
utils.change_lr(self.optimizer, new_lr)
def train_dict(self, triples):
""" Train a dict lemmatizer given training (word, pos, lemma) tuples. """
# accumulate counter
ctr = Counter()
ctr.update([(p[0], p[1], p[2]) for p in triples])
# find the most frequent mappings
for p, _ in ctr.most_common():
w, pos, l = p
if (w,pos) not in self.composite_dict:
self.composite_dict[(w,pos)] = l
#if (w, pos) not in self.fallback_dict:
# self.fallback_dict[(w,pos)] = l
return
def predict_dict(self, pairs):
""" Predict a list of lemmas using the dict model given (word, pos) pairs. """
lemmas = []
for p in pairs:
w, pos = p
if (w,pos) in self.composite_dict:
lemmas += [self.composite_dict[(w,pos)]]
#elif (w,pos) in self.fallback_dict:
# lemmas += [self.fallback_dict[(w,pos)]]
else:
lemmas += [w]
return lemmas
def skip_seq2seq(self, pairs):
""" Determine if we can skip the seq2seq module when ensembling with the frequency lexicon. """
skip = []
for p in pairs:
w, pos = p
if (w,pos) in self.composite_dict:
skip.append(True)
#elif w in self.fallback_dict:
# skip.append(True)
else:
skip.append(False)
return skip
def ensemble(self, pairs, other_preds):
""" Ensemble the dict with statitical model predictions. """
lemmas = []
assert len(pairs) == len(other_preds)
for p, pred in zip(pairs, other_preds):
w, pos = p
if (w,pos) in self.composite_dict:
lemmas += [self.composite_dict[(w,pos)]]
#elif (w,pos) in self.fallback_dict:
# lemmas += [self.fallback_dict[(w,pos)]]
else:
lemmas += [pred]
return lemmas
def save(self, filename):
params = {
'model': self.model.state_dict() if self.model is not None else None,
'dicts': (self.fallback_dict, self.composite_dict),
'vocab': self.vocab.state_dict(),
'config': self.args
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename, use_cuda=False):
try:
checkpoint = torch.load(filename, lambda storage, loc: storage)
except BaseException:
print("Cannot load model from {}".format(filename))
sys.exit(1)
self.args = checkpoint['config']
self.fallback_dict, self.composite_dict = checkpoint['dicts']
if not self.args['dict_only']:
self.model = Seq2SeqModel(self.args, use_cuda=use_cuda)
self.model.load_state_dict(checkpoint['model'])
else:
self.model = None
self.vocab = MultiVocab.load_state_dict(checkpoint['vocab'])
| 38.845411 | 116 | 0.575053 |
ab143492ef3bc0b77544ce29a49d7b8d05cbbdf0 | 317 | py | Python | examples/hexbin_marginals.py | tomasojea/seaborn | 9b03f8138949402a351fa06e2598144812aae586 | [
"BSD-3-Clause"
] | 2 | 2020-07-24T04:45:51.000Z | 2020-09-04T11:10:27.000Z | examples/hexbin_marginals.py | tomasojea/seaborn | 9b03f8138949402a351fa06e2598144812aae586 | [
"BSD-3-Clause"
] | 1 | 2020-10-31T23:31:41.000Z | 2020-10-31T23:31:41.000Z | examples/hexbin_marginals.py | tomasojea/seaborn | 9b03f8138949402a351fa06e2598144812aae586 | [
"BSD-3-Clause"
] | 2 | 2020-11-02T18:25:54.000Z | 2021-07-23T16:15:34.000Z | """
Hexbin plot with marginal distributions
=======================================
_thumb: .45, .4
"""
import numpy as np
import seaborn as sns
sns.set(style="ticks")
rs = np.random.RandomState(11)
x = rs.gamma(2, size=1000)
y = -.5 * x + rs.normal(size=1000)
sns.jointplot(x=x, y=y, kind="hex", color="#4CB391")
| 19.8125 | 52 | 0.586751 |
e1fca749c8ee6ac5bcdba83ab1c3afb860c5490f | 6,884 | py | Python | userbot/modules/system_stats.py | MADEWGN/botgabut | d92094598fd7cdfd40f41e31fa44035212ed2cd8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/system_stats.py | MADEWGN/botgabut | d92094598fd7cdfd40f41e31fa44035212ed2cd8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/system_stats.py | MADEWGN/botgabut | d92094598fd7cdfd40f41e31fa44035212ed2cd8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for getting information about the server. """
from asyncio import create_subprocess_exec as asyncrunapp
from asyncio.subprocess import PIPE as asyncPIPE
from os import remove
from platform import python_version, uname
from shutil import which
from git import Repo
from telethon import version
from telethon.errors.rpcerrorlist import MediaEmptyError
from userbot import ALIVE_LOGO, ALIVE_NAME, CMD_HELP, bot
from userbot.events import register
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
repo = Repo()
# ============================================
@register(outgoing=True, pattern=r"^\.sysd$")
async def sysdetails(sysd):
"""For .sysd command, get system info using neofetch."""
if not sysd.text[0].isalpha() and sysd.text[0] not in ("/", "#", "@", "!"):
try:
fetch = await asyncrunapp(
"neofetch",
"--stdout",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await fetch.communicate()
result = str(stdout.decode().strip()) + str(stderr.decode().strip())
await sysd.edit("`" + result + "`")
except FileNotFoundError:
await sysd.edit("`Install neofetch first !!`")
@register(outgoing=True, pattern=r"^\.botver$")
async def bot_ver(event):
"""For .botver command, get the bot version."""
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
if which("git") is not None:
ver = await asyncrunapp(
"git",
"describe",
"--all",
"--long",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await ver.communicate()
verout = str(stdout.decode().strip()) + str(stderr.decode().strip())
rev = await asyncrunapp(
"git",
"rev-list",
"--all",
"--count",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await rev.communicate()
revout = str(stdout.decode().strip()) + str(stderr.decode().strip())
await event.edit(
"`Userbot Version: " f"{verout}" "` \n" "`Revision: " f"{revout}" "`"
)
else:
await event.edit(
"Shame that you don't have git, you're running - 'v1.beta.4' anyway!"
)
@register(outgoing=True, pattern=r"^\.pip(?: |$)(.*)")
async def pipcheck(pip):
"""For .pip command, do a pip search."""
if not pip.text[0].isalpha() and pip.text[0] not in ("/", "#", "@", "!"):
pipmodule = pip.pattern_match.group(1)
if pipmodule:
await pip.edit("`Searching . . .`")
pipc = await asyncrunapp(
"pip3",
"search",
pipmodule,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await pipc.communicate()
pipout = str(stdout.decode().strip()) + str(stderr.decode().strip())
if pipout:
if len(pipout) > 4096:
await pip.edit("`Output too large, sending as file`")
file = open("output.txt", "w+")
file.write(pipout)
file.close()
await pip.client.send_file(
pip.chat_id,
"output.txt",
reply_to=pip.id,
)
remove("output.txt")
return
await pip.edit(
"**Query: **\n`"
f"pip3 search {pipmodule}"
"`\n**Result: **\n`"
f"{pipout}"
"`"
)
else:
await pip.edit(
"**Query: **\n`"
f"pip3 search {pipmodule}"
"`\n**Result: **\n`No Result Returned/False`"
)
else:
await pip.edit("`Use .help pip to see an example`")
@register(outgoing=True, pattern=r"^\.(alive|on)$")
async def amireallyalive(alive):
"""For .alive command, check if the bot is running."""
logo = ALIVE_LOGO
output = (
f" **[BotGabut](https://UserLazy.github.io/BotGabut)** \n"
f"\n__**My Userbot is Alive**__\n"
f"━━━━━━━━━━━━━━━━━━━━\n"
f"**♛ User** \n"
f" ➥ {DEFAULTUSER}` \n"
f"┏━━━━━━━━━━━━━━━━━━━\n"
f"┣ • Telethon : v{version.__version__} \n"
f"┣ • Python : v{python_version()} \n"
f"┣ • Modules : {len(CMD_HELP)} Modules \n"
f"┗━━━━━━━━━━━━━━━━━━━\n"
)
if ALIVE_LOGO:
try:
logo = ALIVE_LOGO
await bot.send_file(alive.chat_id, logo, caption=output)
await alive.delete()
except MediaEmptyError:
await alive.edit(
output + "\n\n *`The provided logo is invalid."
"\nMake sure the link is directed to the logo picture`"
)
else:
await alive.edit(output)
@register(outgoing=True, pattern=r"^\.aliveu")
async def amireallyaliveuser(username):
"""For .aliveu command, change the username in the .alive command."""
message = username.text
output = ".aliveu [new user without brackets] nor can it be empty"
if not (message == ".aliveu" or message[7:8] != " "):
newuser = message[8:]
global DEFAULTUSER
DEFAULTUSER = newuser
output = "Successfully changed user to " + newuser + "!"
await username.edit("`" f"{output}" "`")
@register(outgoing=True, pattern=r"^\.resetalive$")
async def amireallyalivereset(ureset):
"""For .resetalive command, reset the username in the .alive command."""
global DEFAULTUSER
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
await ureset.edit("`" "Successfully reset user for alive!" "`")
CMD_HELP.update(
{
"sysd": ">`.sysd`" "\nUsage: Shows system information using neofetch.",
"botver": ">`.botver`" "\nUsage: Shows the userbot version.",
"pip": ">`.pip <module(s)>`" "\nUsage: Does a search of pip modules(s).",
"alive": ">`.alive`"
"\nUsage: Type .alive to see wether your bot is working or not."
"\n\n>`.aliveu <text>`"
"\nUsage: Changes the 'user' in alive to the text you want."
"\n\n>`.resetalive`"
"\nUsage: Resets the user to default.",
}
)
| 35.122449 | 85 | 0.514672 |
edee8cf11802c613b871dd30a55c6ccca5720e88 | 136 | py | Python | testes e exercícios/exercicios/script_008.py | LightSnow17/exercicios-Python | 3ac016ce284860f45d71cfb396d33a73ec06c25d | [
"MIT"
] | null | null | null | testes e exercícios/exercicios/script_008.py | LightSnow17/exercicios-Python | 3ac016ce284860f45d71cfb396d33a73ec06c25d | [
"MIT"
] | null | null | null | testes e exercícios/exercicios/script_008.py | LightSnow17/exercicios-Python | 3ac016ce284860f45d71cfb396d33a73ec06c25d | [
"MIT"
] | null | null | null | c = float(input('Coloque a temperatura em ºC: '))
f = ((9*c)/5) + 32
print('O valor informado em {}ºC corresponde a {}ºF'.format(c, f))
| 34 | 66 | 0.632353 |
b28b9a5c21913e022e95af2e4d9a68924c795668 | 1,787 | py | Python | code/scores.py | nishant-sachdeva/BRED_PRIOJECT | 06d9909478080259b2ef5fee291b73bd8bbabfe1 | [
"MIT"
] | null | null | null | code/scores.py | nishant-sachdeva/BRED_PRIOJECT | 06d9909478080259b2ef5fee291b73bd8bbabfe1 | [
"MIT"
] | null | null | null | code/scores.py | nishant-sachdeva/BRED_PRIOJECT | 06d9909478080259b2ef5fee291b73bd8bbabfe1 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
def get_score(file_name):
file_path = "../data/csv_files/" + file_name
dataFrame = pd.read_csv(file_path)
dataFrame = dataFrame.dropna()
response_times = dataFrame.iloc[:, -1]
responses = dataFrame.iloc[:,-2].value_counts(sort=False)
average_resoponse_time = response_times.mean()
average_correctness = (responses[1]*100) / (responses[1] + responses[0])
return average_resoponse_time, average_correctness
personalityAverage = []
def addMedian(data, data_desc):
medians = [data[col].median() for col in data]
data_desc.loc[len(data_desc.index)] = medians
last = data_desc.index[-1]
data_desc = data_desc.rename(index={last: 'median'})
return data_desc
def get_data_descriptions(data):
data_desc = data.describe()
data_desc = addMedian(data, data_desc)
print(data_desc)
return
def calculate_scores(personality_data, sart_csv_list):
# we have the personality data, sart_csv_list
# personality data => pandas dataframe
# sart csv list => simple list of all files
sart_scores = []
for file_name in sart_csv_list:
correctness, response_time = get_score(file_name)
sart_scores.append([correctness, response_time])
sartDataFrame =pd.DataFrame(sart_scores, columns = ['Response Time', 'Correctness'])
get_data_descriptions(personality_data)
get_data_descriptions(sartDataFrame)
completeDataFrame = personality_data.copy()
completeDataFrame = completeDataFrame.assign(ResponseTime = sartDataFrame['Response Time'])
completeDataFrame = completeDataFrame.assign(Correctness = sartDataFrame['Correctness'])
completeDataFrame.to_csv("../processed_data/BRED_Project_Values.csv")
return completeDataFrame
| 33.716981 | 95 | 0.729715 |
88de0f9397896de78ff9c92ba6564b60928997d9 | 2,021 | py | Python | leetcode/binary_tree/230.py | 1lch2/PythonExercise | 9adbe5fc2bce71f4c09ccf83079c44699c27fce4 | [
"MIT"
] | 1 | 2020-08-19T09:26:20.000Z | 2020-08-19T09:26:20.000Z | leetcode/binary_tree/230.py | 1lch2/PythonExercise | 9adbe5fc2bce71f4c09ccf83079c44699c27fce4 | [
"MIT"
] | null | null | null | leetcode/binary_tree/230.py | 1lch2/PythonExercise | 9adbe5fc2bce71f4c09ccf83079c44699c27fce4 | [
"MIT"
] | null | null | null | # 给定一个二叉搜索树,编写一个函数 kthSmallest 来查找其中第 k 个最小的元素。
# 说明:
# 你可以假设 k 总是有效的,1 ≤ k ≤ 二叉搜索树元素个数。
# 示例 1:
# 输入: root = [3,1,4,null,2], k = 1
# 3
# / \
# 1 4
# \
# 2
# 输出: 1
# 示例 2:
# 输入: root = [5,3,6,2,4,null,null,1], k = 3
# 5
# / \
# 3 6
# / \
# 2 4
# /
# 1
# 输出: 3
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Simple idea of combining dfs and quicksort.
# High time and memory cost.
class Solution0:
def kthSmallest(self, root: TreeNode, k: int) -> int:
def dfs(root, res):
res.append(root.val)
if root.left != None:
dfs(root.left, res)
if root.right != None:
dfs(root.right, res)
res = []
dfs(root, res)
def quicksort(seq, low, high):
i = low
j = high
if low < high:
base = seq[low]
while i < j:
while seq[j] > base and j > i:
j -= 1
if j > i:
seq[i] = seq[j]
i += 1
while seq[i] < base and i < j:
i += 1
if i < j:
seq[j] = seq[i]
j -= 1
seq[i] = base
quicksort(seq, low, i-1)
quicksort(seq, i+1, high)
quicksort(res, 0, len(res)-1)
return res[k-1]
# Inorder traversal.
class Solution1:
def kthSmallest(self, root: TreeNode, k: int) -> int:
def inorder(root, res):
if root.left != None:
inorder(root.left, res)
res.append(root.val)
if root.right != None:
inorder(root.right, res)
res = []
inorder(root, res)
return res[k-1]
| 22.965909 | 57 | 0.402771 |
7a8a9572cae2af1407d3f055a629d3fabf58a72b | 5,907 | py | Python | tests/broker/test_request_review.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | tests/broker/test_request_review.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | tests/broker/test_request_review.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the request review command."""
import os
from shutil import rmtree
from subprocess import PIPE
from subprocess import Popen
import unittest
from brokertest import TestBrokerCommand
if __name__ == "__main__":
import utils
utils.import_depends()
class TestRequestReview(TestBrokerCommand):
@classmethod
def setUpClass(cls):
super(TestRequestReview, cls).setUpClass()
# Run "make clean" on templates before anything else.
testdir = os.path.join(cls.sandboxdir, "reviewtest1", "t")
if os.path.exists(os.path.join(testdir, "Makefile")):
p = Popen(('/usr/bin/make', 'clean'),
cwd=testdir, env=cls.gitenv(
env={'PATH': '/bin:/usr/bin'}),
stdout=PIPE, stderr=PIPE)
(out, err) = p.communicate()
cls.assertEqual(p.returncode, 0,
"Non-zero return code running "
"make clean in sandbox, "
"STDOUT:\n@@@'{}'\n@@@\nSTDERR:\n@@@'{}'@@@\n"
.format(out, err))
def test_111_make_change(self):
sandboxdir = os.path.join(self.sandboxdir, "reviewtest1")
template = self.find_template("aquilon", "archetype", "base",
sandbox="reviewtest1")
with open(template) as f:
contents = f.readlines()
contents.append("#Added by unittest\n")
with open(template, 'w') as f:
f.writelines(contents)
self.gitcommand(["commit", "-a", "-m", "added unittest comment"],
cwd=sandboxdir)
def test_121_publish_reviewtest1_sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "reviewtest1")
self.successtest(["publish", "--branch", "reviewtest1"],
env=self.gitenv(), cwd=sandboxdir)
# FIXME: Check the branch on the broker directly?
def test_131_publish_reviewtest1_sandbox_no_review_created(self):
command = ["show_review",
"--source", "reviewtest1",
"--target", "prod"]
self.notfoundtest(command)
def test_141_verify_reviewtest1(self):
sandboxdir = os.path.join(self.sandboxdir, "reviewtest1")
p = Popen(["/bin/rm", "-rf", sandboxdir], stdout=1, stderr=2)
p.wait()
self.successtest(["get", "--sandbox", "reviewtest1"])
self.assertTrue(os.path.exists(sandboxdir))
template = self.find_template("aquilon", "archetype", "base",
sandbox="reviewtest1")
self.assertTrue(os.path.exists(template),
"aq get did not retrive '%s'" % template)
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by unittest\n")
def test_151_show_review(self):
review_head = self.head_commit("reviewtest1")
command = ["show_review",
"--source", "reviewtest1",
"--target", "prod"]
out = self.commandtest(command)
self.output_equals(out, """
Review request
Target Domain: prod
Source Sandbox: reviewtest1
Published Commit: %s
Code Review URL: TEST_GERRIT_PR_URL
Testing Status: Untested
Approval Status: No decision
""" % review_head,
command)
def test_161_add_reviewtest_domain(self):
command = ["add_domain",
"--domain", "reviewtestdomain",
"--start", "prod"] + self.valid_just_tcm
self.successtest(command)
def test_171_reviewtest1_sandbox_no_review_created(self):
command = ["show_review",
"--source", "reviewtest1",
"--target", "reviewtestdomain"]
self.notfoundtest(command)
def test_181_request_review(self):
command = ["request_review",
"--source", "reviewtest1",
"--target", "reviewtestdomain"]
self.successtest(command)
def test_191_show_review(self):
review_head = self.head_commit("reviewtest1")
command = ["show_review",
"--source", "reviewtest1",
"--target", "reviewtestdomain"]
out = self.commandtest(command)
self.output_equals(out, """
Review request
Target Domain: reviewtestdomain
Source Sandbox: reviewtest1
Published Commit: %s
Code Review URL: TEST_GERRIT_PR_URL
Testing Status: Untested
Approval Status: No decision
""" % review_head,
command)
def test_999_cleanup(self):
self.statustest(["del_sandbox", "--sandbox", "reviewtest1"])
sandboxdir = os.path.join(self.sandboxdir, "reviewtest1")
rmtree(sandboxdir, ignore_errors=True)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestRequestReview)
unittest.TextTestRunner(verbosity=2).run(suite)
| 38.109677 | 74 | 0.585746 |
d2ce9282417481e205eb2ba30d4d78268a61ee6c | 713 | py | Python | solutions/python3/1246.py | sm2774us/amazon_interview_prep_2021 | f580080e4a6b712b0b295bb429bf676eb15668de | [
"MIT"
] | 42 | 2020-08-02T07:03:49.000Z | 2022-03-26T07:50:15.000Z | solutions/python3/1246.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | null | null | null | solutions/python3/1246.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | 40 | 2020-02-08T02:50:24.000Z | 2022-03-26T15:38:10.000Z | class Solution:
def minimumMoves(self, arr: List[int]) -> int:
n = len(arr)
dp = [[0] * (n + 1) for _ in range(n + 1)]
for l in range(1, n + 1):
i, j = 0, l - 1
while j < n:
if l == 1:
dp[i][j] = 1
else:
dp[i][j] = 1 + dp[i + 1][j]
if arr[i] == arr[i + 1]:
dp[i][j] = min(1 + dp[i + 2][j], dp[i][j])
for k in range(i + 2, j + 1):
if arr[i] == arr[k]:
dp[i][j] = min(dp[i + 1][k - 1] + dp[k + 1][j], dp[i][j])
i, j = i + 1, j + 1
return dp[0][n - 1]
| 37.526316 | 85 | 0.295933 |
a69b92e0a07f2711471a48b4c3a6cdaa3557b1c6 | 9,813 | py | Python | scimm/bin/scimm.py | movingpictures83/SCIMM | ef28ca7a56dcb7576f7e566d4296bb7c28c0306e | [
"MIT"
] | null | null | null | scimm/bin/scimm.py | movingpictures83/SCIMM | ef28ca7a56dcb7576f7e566d4296bb7c28c0306e | [
"MIT"
] | null | null | null | scimm/bin/scimm.py | movingpictures83/SCIMM | ef28ca7a56dcb7576f7e566d4296bb7c28c0306e | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
from optparse import OptionParser, SUPPRESS_HELP
import os, glob, subprocess, sys, math, shutil
import imm_cluster, util
############################################################
# scimm.py
#
# Sequence Clustering with Interpolated Markov Models
#
# Author: David Kelley
############################################################
scimm_bin = "/Users/dk/research/umd/metagenomics_clustering/Scimm/bin"
bin_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + ':' + bin_dir
else:
os.environ['PYTHONPATH'] = bin_dir
############################################################
# main
############################################################
def main():
parser = OptionParser()
# generic options
parser.add_option('-s','-r', dest='readsf', help='Fasta file of sequences')
parser.add_option('-k', dest='k', type='int', help='Number of clusters')
parser.add_option('-p', dest='proc', type='int', default=2, help='Number of processes to run [Default=%default]')
# help='Use a soft assignment of reads to clusters [Default=%default]'
parser.add_option('--em',dest='soft_assign', action='store_true', default=False, help=SUPPRESS_HELP)
# likelybin options
parser.add_option('--ls', dest='lb_starts', type='int', default=1, help='Number of random LikelyBin starts [Default=%default]')
parser.add_option('--ln', dest='lb_numreads', type='int', default=3000, help='Number of reads to sample for LikelyBin [Default=%default]')
parser.add_option('--lt', dest='lb_threads', type='int', default=2, help='Number of LikelyBin threads per start, and CPUs for imm_cluster [Default=%default]')
parser.add_option('--lo', dest='lb_order', type='int', default=3, help='Order of LikelyBin Markov model [Default=%default]')
# compostbin options
parser.add_option('--cs', dest='cb_starts', type='int', default=1, help='Number of random CompostBin starts [Default=%default]')
parser.add_option('--cn', dest='cb_numreads', type='int', default=3000, help='Number of reads to sample for CompostBin [Default=%default]')
parser.add_option('--ct', dest='cb_threads', type='int', default=1, help='Number of CPUs for imm_cluster [Default=%default]')
parser.add_option('--co','--cm', dest='cb_mers', type='int', default=5, help='mers to count in CompostBin [Default=%default]')
parser.add_option('--outfile', dest='outfile', default='sample.fa.binning', help='output file')
(options, args) = parser.parse_args()
options.readsf = os.path.abspath(options.readsf)
total_starts = options.lb_starts + options.cb_starts
if options.soft_assign:
em = '--em'
else:
em = ''
# run initial samples
i = 0
while i < total_starts:
p = []
j = 0
while j < options.proc and i < total_starts:
# LikelyBin
if i < options.lb_starts:
# double check processes
if j + options.lb_threads <= options.proc:
# make a temp dir to compute in and cd to it
#temp_dir('tmp.start%d' % i)
p.append(subprocess.Popen('%s/lb_init.py -r %s -n %d -k %d -o %d -p %d --outfile %s %s' % (bin_dir, options.readsf, options.lb_numreads, options.k, options.lb_order, options.lb_threads, options.outfile, em), shell=True))
#os.chdir('..')
i += 1
elif j == 0:
print 'Cannot use more lb threads than processes'
exit()
j += options.lb_threads # even if not true, just move things along
# CompostBin
else:
# double check processes
if j + options.cb_threads <= options.proc:
# make a temp dir to compute in and cd to it
#temp_dir('tmp.start%d' % i)
p.append(subprocess.Popen('%s/cb_init.py -r %s -n %d -k %d -m %d -p %d %s' % (bin_dir, options.readsf, options.cb_numreads, options.k, options.cb_mers, options.cb_threads, em), shell=True))
#os.chdir('..')
i += 1
elif j == 0:
print 'Cannot use more cb threads than processes'
exit()
j += options.lb_threads # even if not true, just move things along
# wait for processes to finish
for j in range(len(p)):
os.waitpid(p[j].pid, 0)
# choose best start
#maxlike_clusters(total_starts, options.readsf, options.k, options.soft_assign)
minentropy_clusters(total_starts, options.readsf, options.k, options.soft_assign)
# in case k changed
new_k = determine_k(options.soft_assign, options.k)
# run imm clustering completely
#os.system('%s/imm_cluster.py -k %d -r %s -p %d -i --trained %s &> immc.log' % (bin_dir, new_k, options.readsf, options.proc, em))
p = subprocess.Popen('%s/imm_cluster.py -k %d -r %s -p %d -i --trained %s &> immc.log' % (bin_dir, new_k, options.readsf, options.proc, em), shell=True)
#print("LAUNCHED "+str(p.pid))
#print("EXITING...")
#sys.exit()
os.waitpid(p.pid, 0)
#print("DONE WAITING")
############################################################
# temp_dir
#
# Create and change to a temporary directory to do initial
# runs within
############################################################
def temp_dir(tmpdir):
if os.path.isdir(tmpdir):
os.chdir(tmpdir)
for f in glob.glob('*'):
os.remove(f)
else:
os.mkdir(tmpdir)
os.chdir(tmpdir)
############################################################
# maxlike_clusters
#
# Copy the clustering with maximum likelihood to the main
# directory
############################################################
def maxlike_clusters(total_starts, readsf, k, soft_assign):
like = [0]*total_starts
for i in range(total_starts):
#os.chdir('tmp.start%d' % i)
if len(glob.glob('cluster-*.fa')) > 0:
# determine likelihood
like[i] = scimm_like(readsf, k, soft_assign)
else:
# something failed
like[i] = ''
#os.chdir('..')
# find max likelihood initial partitioning
max_like = min(like) # '' is greater than numbers
for i in range(len(like)):
if like[i] != '' and like[i] >= max_like:
max_like = like[i]
max_clust = i
# get files from max
#for c in range(len(glob.glob('cluster-*.fa'))):
# shutil.copy('tmp.start%d/cluster-%d.fa' % (max_clust,c), 'cluster-%d.fa' % c)
# shutil.copy('tmp.start%d/icm-%dscores.tmp' % (max_clust,c), 'icm-%dscores.tmp' % c)
############################################################
# scimm_like
#
# Calculate the likelihood of the given clustering and IMM
############################################################
def scimm_like(readsf, k, soft_assign):
new_k = determine_k(soft_assign, k)
priors = imm_cluster.update_priors([1.0/new_k]*new_k, readsf, {}, {}, soft_assign)
(likelihood,read_probs) = imm_cluster.get_read_probs(priors, {}, {}, soft_assign)
return likelihood
############################################################
# minentropy_clusters
#
# Copy the clustering with minimum entropy to the main
# directory.
############################################################
def minentropy_clusters(total_starts, readsf, k, soft_assign):
entropy = [0]*total_starts
for i in range(total_starts):
#os.chdir('tmp.start%d' % i)
if len(glob.glob('cluster-*.fa')) > 0:
# determine likelihood
entropy[i] = get_entropy(readsf, k, soft_assign)
else:
# something failed
entropy[i] = ''
#os.chdir('..')
# find min entropy partitioning ('' is greater than numbers)
(min_entropy, min_clust) = util.min_i(entropy)
# get files from min
#for c in range(len(glob.glob('tmp.start%d/cluster-*.fa' % min_clust))):
# shutil.copy('tmp.start%d/cluster-%d.fa' % (min_clust,c), 'cluster-%d.fa' % c)
# shutil.copy('tmp.start%d/icm-%d.scores.tmp' % (min_clust,c), 'icm-%d.scores.tmp' % c)
############################################################
# get_entropy
#
# Return the entropy of the clusters in the current
# directory.
############################################################
def get_entropy(readsf, k, soft_assign):
new_k = determine_k(soft_assign, k)
priors = imm_cluster.update_priors([1.0/new_k]*new_k, readsf, {}, {}, soft_assign)
(like, read_probs) = imm_cluster.get_read_probs(priors, {}, {}, soft_assign)
entropy = 0.0
for r in read_probs:
for c in range(len(read_probs[r])):
if read_probs[r][c] > 0:
entropy += -read_probs[r][c]*math.log(read_probs[r][c])
return entropy
############################################################
# determine_k
#
# In case, I'm letting k change within LikelyBin
############################################################
def determine_k(soft_assign, k):
new_k = 0
for i in range(k):
if soft_assign:
f = 'cluster-%d.build.fa' % i
else:
f = 'cluster-%d.fa' % i
if os.path.isfile(f) and os.path.getsize(f) > 0:
new_k += 1
# or if job is done
elif os.path.isfile(f+'.headers') and os.path.getsize(f+'.headers') > 0:
new_k += 1
return new_k
############################################################
# __main__
############################################################
if __name__ == '__main__':
main()
| 39.728745 | 240 | 0.53959 |
085283fcb6b243437f1e0c66363c8a6d5b0e15a8 | 1,544 | py | Python | src/compas_rv2/singular/rhino/geometry/curve.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | 4 | 2022-01-17T19:17:22.000Z | 2022-01-21T18:06:02.000Z | src/compas_rv2/singular/rhino/geometry/curve.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | null | null | null | src/compas_rv2/singular/rhino/geometry/curve.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas_rhino.geometry import RhinoCurve
class RhinoCurve(RhinoCurve):
def __init__(self):
super(RhinoCurve, self).__init__()
def divide(self, number_of_segments, over_space=False):
points = []
compas_rhino.rs.EnableRedraw(False)
if over_space:
space = self.space(number_of_segments + 1)
if space:
points = [list(compas_rhino.rs.EvaluateCurve(self.guid, param)) for param in space]
else:
points = compas_rhino.rs.DivideCurve(self.guid, number_of_segments, create_points=False, return_points=True)
points[:] = map(list, points)
compas_rhino.rs.EnableRedraw(True)
return points
def length(self):
"""Return the length of the curve.
Returns
-------
float
The curve's length.
"""
return compas_rhino.rs.CurveLength(self.guid)
def tangents(self, points):
tangents = []
if compas_rhino.rs.IsPolyCurve(self.guid):
pass
elif compas_rhino.rs.IsCurve(self.guid):
for point in points:
param = compas_rhino.rs.CurveClosestPoint(self.guid, point)
vector = list(compas_rhino.rs.CurveTangent(self.guid, param))
tangents.append(vector)
else:
raise Exception('Object is not a curve.')
return tangents
| 31.510204 | 120 | 0.626295 |
f20622007638d3745e5c1bc31c12fb530a3df21c | 1,074 | py | Python | 5430 AC.py | jangThang/Baekjoon-problem | f34c7d9977ad95fbe2a59c0096bf8ff1e885c01f | [
"MIT"
] | null | null | null | 5430 AC.py | jangThang/Baekjoon-problem | f34c7d9977ad95fbe2a59c0096bf8ff1e885c01f | [
"MIT"
] | null | null | null | 5430 AC.py | jangThang/Baekjoon-problem | f34c7d9977ad95fbe2a59c0096bf8ff1e885c01f | [
"MIT"
] | null | null | null | from collections import deque
import sys
input = sys.stdin.readline
#입력
T = int(input())
for _ in range(T):
p = input().rstrip()
n = int(input())
if n == 0:
input()
numlist = deque([])
else:
numlist = deque(list(input().strip("[]\n").split(",")))
#연산시작
front = True #앞에서부터 제거
for op in p:
# 뒤집기
if op =='R':
#Toggle
if front:
front = False
else:
front = True
# 버리기
else:
#빈 리스트가 아님
if numlist:
#앞에서부터 꺼내기
if front:
numlist.popleft()
#뒤에서부터 꺼내기
else:
numlist.pop()
#빈 배열임
else:
print("error")
break
#error없이 모든 연산을 마침
else:
if front:
#앞쪽으로 모든 배열 꺼내기
lst = ",".join(list(numlist))
else:
#뒤쪽으로 모든 배열 꺼내기
lst = ",".join(list(numlist)[::-1])
print(f"[{lst}]")
| 21.48 | 63 | 0.393855 |
7836b8443000e73b4c3deb29caf043387ccad91b | 372 | py | Python | djangocms_forms/managers.py | gruy/djangocms-forms | 96fa3c249b8f443d0063f07b9273f1eaca3314fe | [
"BSD-3-Clause"
] | 136 | 2015-04-28T18:09:13.000Z | 2022-01-11T19:46:44.000Z | djangocms_forms/managers.py | gruy/djangocms-forms | 96fa3c249b8f443d0063f07b9273f1eaca3314fe | [
"BSD-3-Clause"
] | 80 | 2015-05-20T15:33:13.000Z | 2021-05-31T07:49:23.000Z | djangocms_forms/managers.py | gruy/djangocms-forms | 96fa3c249b8f443d0063f07b9273f1eaca3314fe | [
"BSD-3-Clause"
] | 91 | 2015-04-28T18:13:55.000Z | 2022-03-17T10:49:04.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models import Count
class ActiveFormManager(models.Manager):
def get_queryset(self):
qs = super(ActiveFormManager, self).get_queryset()
return qs.annotate(submission_count=Count('submissions')) \
.filter(submission_count__gt=0)
| 26.571429 | 67 | 0.72043 |
2a1cf4084c960345167d1cac1cfdcb5e665d956a | 457 | py | Python | Question/migrations/0004_auto_20200206_1117.py | AlirezAkbary/FeedbackSys | 250acd75d4b903cc647ed86aab7b1a22909a056e | [
"bzip2-1.0.6"
] | null | null | null | Question/migrations/0004_auto_20200206_1117.py | AlirezAkbary/FeedbackSys | 250acd75d4b903cc647ed86aab7b1a22909a056e | [
"bzip2-1.0.6"
] | null | null | null | Question/migrations/0004_auto_20200206_1117.py | AlirezAkbary/FeedbackSys | 250acd75d4b903cc647ed86aab7b1a22909a056e | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.0.7 on 2020-02-06 07:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Question', '0003_auto_20200203_2212'),
]
operations = [
migrations.AlterField(
model_name='question',
name='q_type',
field=models.CharField(choices=[('M', 'MultipleChoice'), ('L', 'LongAnswer')], default='M', max_length=1),
),
]
| 24.052632 | 118 | 0.599562 |
d7f08c0ae8d862d23814dcbcca0822b9c715cfc5 | 2,185 | py | Python | pc_spec/data.py | konrad-kocik/pc-spec | 027fe7e831d500c4b8b73994ebb421d8bf5c8617 | [
"MIT"
] | null | null | null | pc_spec/data.py | konrad-kocik/pc-spec | 027fe7e831d500c4b8b73994ebb421d8bf5c8617 | [
"MIT"
] | 14 | 2022-02-19T14:53:37.000Z | 2022-03-02T19:32:42.000Z | pc_spec/data.py | konrad-kocik/pc-spec | 027fe7e831d500c4b8b73994ebb421d8bf5c8617 | [
"MIT"
] | null | null | null | from json import dump, load, JSONDecodeError
from pathlib import Path
from typing import List, Dict, Tuple
from pc_spec.pc import PC, Components
from pc_spec.store import Store
def save_store(store: Store, target_dir: Path):
"""
Saves given store to JSON file created in given directory.
If given directory doesn't exist then it is created (together with all missing parent directories).
:param store: collection of PCs to be saved
:param target_dir: path to directory where JSON file will be created
"""
file_path = Path(target_dir, __get_store_file_name())
__create_dir_if_necessary(target_dir)
serializable_pcs = __to_serializable_pcs(store.pcs)
__save_to_json(serializable_pcs, file_path)
def load_store(source_dir: Path) -> Store:
"""
Loads store from JSON file saved in given directory.
If given directory doesn't exist then empty store is loaded.
If JSON file in given directory doesn't exist or is empty then empty store is loaded.
:param source_dir: path to directory which contains store JSON file
:return: loaded store
"""
file_path = Path(source_dir, __get_store_file_name())
return Store(__get_pcs_from_json_file(file_path)) if file_path.is_file() else Store()
def __get_store_file_name() -> str:
return 'store.json'
def __create_dir_if_necessary(dir_path: Path):
if not dir_path.is_dir():
dir_path.mkdir(parents=True)
def __to_serializable_pcs(pcs: List[PC]) -> List[Dict[str, Components]]:
return [{pc.name: pc.components} for pc in pcs]
def __save_to_json(serializable: List, file_path: Path):
with open(file_path, 'w') as json_file:
dump(serializable, json_file)
def __get_pcs_from_json_file(file_path: Path) -> List[PC]:
with open(file_path, 'r') as json_file:
try:
return[PC(*__unpack_serialized_pc(serialized_pc)) for serialized_pc in load(json_file)]
except JSONDecodeError:
return []
def __unpack_serialized_pc(serialized_pc: Dict[str, Components]) -> Tuple[str, Components]:
name = list(serialized_pc.keys())[0]
components = list(serialized_pc.values())[0]
return name, components
| 34.140625 | 103 | 0.727231 |
6abf03f820d8d11b6a983105d547b539751c469a | 121 | py | Python | hello_world.py.py | Kbrane-08/ASTR-119-HW | 73c9de7a647345653db5ec37f31e8006800a1c5e | [
"MIT"
] | null | null | null | hello_world.py.py | Kbrane-08/ASTR-119-HW | 73c9de7a647345653db5ec37f31e8006800a1c5e | [
"MIT"
] | null | null | null | hello_world.py.py | Kbrane-08/ASTR-119-HW | 73c9de7a647345653db5ec37f31e8006800a1c5e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#this program will write
#Hello World!
print("Hello World!") # print out Hello World!
| 15.125 | 49 | 0.652893 |
9c53b140fbf8fdf21e55658b28f6ea3c52381748 | 76,414 | py | Python | torchxrayvision/datasets.py | ahatamiz/torchxrayvision | e5863899a48fcb69bae216cf16325d00b7c17e95 | [
"Apache-2.0"
] | 1 | 2021-09-26T21:51:10.000Z | 2021-09-26T21:51:10.000Z | torchxrayvision/datasets.py | ahatamiz/torchxrayvision | e5863899a48fcb69bae216cf16325d00b7c17e95 | [
"Apache-2.0"
] | null | null | null | torchxrayvision/datasets.py | ahatamiz/torchxrayvision | e5863899a48fcb69bae216cf16325d00b7c17e95 | [
"Apache-2.0"
] | null | null | null | from PIL import Image
from os.path import join
from skimage.io import imread, imsave
import imageio
from torch import nn
from torch.nn.modules.linear import Linear
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm import tqdm
import numpy as np
import os,sys,os.path
import pandas as pd
import pickle
import pydicom
import skimage
import glob
import collections
import pprint
import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms.functional as TF
import skimage.transform
import warnings
import tarfile
import zipfile
import random
default_pathologies = [ 'Atelectasis',
'Consolidation',
'Infiltration',
'Pneumothorax',
'Edema',
'Emphysema',
'Fibrosis',
'Effusion',
'Pneumonia',
'Pleural_Thickening',
'Cardiomegaly',
'Nodule',
'Mass',
'Hernia',
'Lung Lesion',
'Fracture',
'Lung Opacity',
'Enlarged Cardiomediastinum'
]
thispath = os.path.dirname(os.path.realpath(__file__))
# this is for caching small things for speed
_cache_dict = {}
def normalize(sample, maxval):
"""Scales images to be roughly [-1024 1024]."""
if sample.max() > maxval:
raise Exception("max image value ({}) higher than expected bound ({}).".format(sample.max(), maxval))
sample = (2 * (sample.astype(np.float32) / maxval) - 1.) * 1024
#sample = sample / np.std(sample)
return sample
def relabel_dataset(pathologies, dataset, silent=False):
"""
Reorder, remove, or add (nans) to a dataset's labels.
Use this to align with the output of a network.
"""
will_drop = set(dataset.pathologies).difference(pathologies)
if will_drop != set():
if not silent:
print("{} will be dropped".format(will_drop))
new_labels = []
dataset.pathologies = list(dataset.pathologies)
for pathology in pathologies:
if pathology in dataset.pathologies:
pathology_idx = dataset.pathologies.index(pathology)
new_labels.append(dataset.labels[:,pathology_idx])
else:
if not silent:
print("{} doesn't exist. Adding nans instead.".format(pathology))
values = np.empty(dataset.labels.shape[0])
values.fill(np.nan)
new_labels.append(values)
new_labels = np.asarray(new_labels).T
dataset.labels = new_labels
dataset.pathologies = pathologies
class Dataset():
def __init__(self):
pass
def totals(self):
counts = [dict(collections.Counter(items[~np.isnan(items)]).most_common()) for items in self.labels.T]
return dict(zip(self.pathologies,counts))
def __repr__(self):
pprint.pprint(self.totals())
return self.string()
def check_paths_exist(self):
#if self.imagezipfile is not None:
if not os.path.isdir(self.imgpath):
raise Exception("imgpath must be a directory")
if not os.path.isfile(self.csvpath):
raise Exception("csvpath must be a file")
class Merge_Dataset(Dataset):
def __init__(self, datasets, seed=0, label_concat=False):
super(Merge_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.datasets = datasets
self.length = 0
self.pathologies = datasets[0].pathologies
self.which_dataset = np.zeros(0)
self.offset = np.zeros(0)
currentoffset = 0
for i, dataset in enumerate(datasets):
self.which_dataset = np.concatenate([self.which_dataset, np.zeros(len(dataset))+i])
self.length += len(dataset)
self.offset = np.concatenate([self.offset, np.zeros(len(dataset))+currentoffset])
currentoffset += len(dataset)
if dataset.pathologies != self.pathologies:
raise Exception("incorrect pathology alignment")
if hasattr(datasets[0], 'labels'):
self.labels = np.concatenate([d.labels for d in datasets])
else:
print("WARN: not adding .labels")
self.which_dataset = self.which_dataset.astype(int)
if label_concat:
new_labels = np.zeros([self.labels.shape[0], self.labels.shape[1]*len(datasets)])*np.nan
for i, shift in enumerate(self.which_dataset):
size = self.labels.shape[1]
new_labels[i,shift*size:shift*size+size] = self.labels[i]
self.labels = new_labels
try:
self.csv = pd.concat([d.csv for d in datasets])
except:
print("Could not merge dataframes (.csv not available):", sys.exc_info()[0])
self.csv = self.csv.reset_index()
def string(self):
s = self.__class__.__name__ + " num_samples={}\n".format(len(self))
for d in self.datasets:
s += "└ " + d.string().replace("\n","\n ") + "\n"
return s
def __len__(self):
return self.length
def __getitem__(self, idx):
item = self.datasets[int(self.which_dataset[idx])][idx - int(self.offset[idx])]
item["lab"] = self.labels[idx]
item["source"] = self.which_dataset[idx]
return item
class FilterDataset(Dataset):
def __init__(self, dataset, labels=None):
super(FilterDataset, self).__init__()
self.dataset = dataset
self.pathologies = dataset.pathologies
# self.idxs = np.where(np.nansum(dataset.labels, axis=1) > 0)[0]
self.idxs = []
if labels:
for label in labels:
print("filtering for ", label)
self.idxs += list(np.where(dataset.labels[:,list(dataset.pathologies).index(label)] == 1)[0])
# singlelabel = np.nanargmax(dataset.labels[self.idxs], axis=1)
# subset = [k in labels for k in singlelabel]
# self.idxs = self.idxs[np.array(subset)]
self.labels = self.dataset.labels[self.idxs]
self.csv = self.dataset.csv.iloc[self.idxs]
def string(self):
return self.__class__.__name__ + " num_samples={}\n".format(len(self)) + "└ of " + self.dataset.string().replace("\n","\n ")
def __len__(self):
return len(self.idxs)
def __getitem__(self, idx):
return self.dataset[self.idxs[idx]]
class SubsetDataset(Dataset):
def __init__(self, dataset, idxs=None):
super(SubsetDataset, self).__init__()
self.dataset = dataset
self.pathologies = dataset.pathologies
self.idxs = idxs
self.labels = self.dataset.labels[self.idxs]
self.csv = self.dataset.csv.iloc[self.idxs]
self.csv = self.csv.reset_index(drop=True)
if hasattr(self.dataset, 'which_dataset'):
self.which_dataset = self.dataset.which_dataset[self.idxs]
def string(self):
return self.__class__.__name__ + " num_samples={}\n".format(len(self)) + "└ of " + self.dataset.string().replace("\n","\n ")
def __len__(self):
return len(self.idxs)
def __getitem__(self, idx):
return self.dataset[self.idxs[idx]]
class NIH_Dataset(Dataset):
"""
NIH ChestX-ray8 dataset
Dataset release website:
https://www.nih.gov/news-events/news-releases/nih-clinical-center-provides-one-largest-publicly-available-chest-x-ray-datasets-scientific-community
Download full size images here:
https://academictorrents.com/details/557481faacd824c83fbf57dcf7b6da9383b3235a
Download resized (224x224) images here:
https://academictorrents.com/details/e615d3aebce373f1dc8bd9d11064da55bdadede0
"""
def __init__(self, imgpath,
csvpath=os.path.join(thispath, "Data_Entry_2017_v2020.csv.gz"),
bbox_list_path=os.path.join(thispath, "BBox_List_2017.csv.gz"),
views=["PA"],
transform=None,
data_aug=None,
nrows=None,
seed=0,
pure_labels=False,
unique_patients=True,
normalize=True,
pathology_masks=False):
super(NIH_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.csvpath = csvpath
self.transform = transform
self.data_aug = data_aug
self.pathology_masks = pathology_masks
self.pathologies = ["Atelectasis", "Consolidation", "Infiltration",
"Pneumothorax", "Edema", "Emphysema", "Fibrosis",
"Effusion", "Pneumonia", "Pleural_Thickening",
"Cardiomegaly", "Nodule", "Mass", "Hernia"]
self.pathologies = sorted(self.pathologies)
self.normalize = normalize
# Load data
self.check_paths_exist()
self.csv = pd.read_csv(self.csvpath, nrows=nrows)
self.MAXVAL = 255 # Range [0 255]
if type(views) is not list:
views = [views]
self.views = views
# Remove images with view position other than specified
self.csv["view"] = self.csv['View Position']
self.csv = self.csv[self.csv["view"].isin(self.views)]
# Remove multi-finding images.
if pure_labels:
self.csv = self.csv[~self.csv["Finding Labels"].str.contains("\|")]
if unique_patients:
self.csv = self.csv.groupby("Patient ID").first()
self.csv = self.csv.reset_index()
####### pathology masks ########
# load nih pathology masks
self.pathology_maskscsv = pd.read_csv(bbox_list_path,
names=["Image Index","Finding Label","x","y","w","h","_1","_2","_3"],
skiprows=1)
# change label name to match
self.pathology_maskscsv["Finding Label"][self.pathology_maskscsv["Finding Label"] == "Infiltrate"] = "Infiltration"
self.csv["has_masks"] = self.csv["Image Index"].isin(self.pathology_maskscsv["Image Index"])
####### pathology masks ########
# Get our classes.
self.labels = []
for pathology in self.pathologies:
self.labels.append(self.csv["Finding Labels"].str.contains(pathology).values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
########## add consistent csv values
# offset_day_int
#self.csv["offset_day_int"] =
# patientid
self.csv["patientid"] = self.csv["Patient ID"].astype(str)
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {}
sample["idx"] = idx
sample["lab"] = self.labels[idx]
imgid = self.csv['Image Index'].iloc[idx]
img_path = os.path.join(self.imgpath, imgid)
#print(img_path)
img = imread(img_path)
if self.normalize:
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
sample["img"] = img[None, :, :]
transform_seed = np.random.randint(2147483647)
if self.pathology_masks:
sample["pathology_masks"] = self.get_mask_dict(imgid, sample["img"].shape[2])
if self.transform is not None:
random.seed(transform_seed)
sample["img"] = self.transform(sample["img"])
if self.pathology_masks:
for i in sample["pathology_masks"].keys():
random.seed(transform_seed)
sample["pathology_masks"][i] = self.transform(sample["pathology_masks"][i])
if self.data_aug is not None:
random.seed(transform_seed)
sample["img"] = self.data_aug(sample["img"])
if self.pathology_masks:
for i in sample["pathology_masks"].keys():
random.seed(transform_seed)
sample["pathology_masks"][i] = self.data_aug(sample["pathology_masks"][i])
return sample
def get_mask_dict(self, image_name, this_size):
base_size = 1024
scale = this_size/base_size
images_with_masks = self.pathology_maskscsv[self.pathology_maskscsv["Image Index"] == image_name]
path_mask = {}
for i in range(len(images_with_masks)):
row = images_with_masks.iloc[i]
# don't add masks for labels we don't have
if row["Finding Label"] in self.pathologies:
mask = np.zeros([this_size,this_size])
xywh = np.asarray([row.x,row.y,row.w,row.h])
xywh = xywh*scale
xywh = xywh.astype(int)
mask[xywh[1]:xywh[1]+xywh[3],xywh[0]:xywh[0]+xywh[2]] = 1
# resize so image resizing works
mask = mask[None, :, :]
path_mask[self.pathologies.index(row["Finding Label"])] = mask
return path_mask
class RSNA_Pneumonia_Dataset(Dataset):
"""
RSNA Pneumonia Detection Challenge
Augmenting the National Institutes of Health Chest Radiograph Dataset with Expert
Annotations of Possible Pneumonia.
Shih, George, Wu, Carol C., Halabi, Safwan S., Kohli, Marc D., Prevedello, Luciano M.,
Cook, Tessa S., Sharma, Arjun, Amorosa, Judith K., Arteaga, Veronica, Galperin-Aizenberg,
Maya, Gill, Ritu R., Godoy, Myrna C.B., Hobbs, Stephen, Jeudy, Jean, Laroia, Archana,
Shah, Palmi N., Vummidi, Dharshan, Yaddanapudi, Kavitha, and Stein, Anouk.
Radiology: Artificial Intelligence, 1 2019. doi: 10.1148/ryai.2019180041.
More info: https://www.rsna.org/en/education/ai-resources-and-training/ai-image-challenge/RSNA-Pneumonia-Detection-Challenge-2018
Challenge site:
https://www.kaggle.com/c/rsna-pneumonia-detection-challenge
JPG files stored here:
https://academictorrents.com/details/95588a735c9ae4d123f3ca408e56570409bcf2a9
"""
def __init__(self,
imgpath,
csvpath=os.path.join(thispath, "kaggle_stage_2_train_labels.csv.zip"),
dicomcsvpath=os.path.join(thispath, "kaggle_stage_2_train_images_dicom_headers.csv.gz"),
views=["PA"],
transform=None,
data_aug=None,
nrows=None,
seed=0,
pure_labels=False,
unique_patients=True,
normalize=True,
pathology_masks=False,
extension=".jpg"):
super(RSNA_Pneumonia_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.pathology_masks = pathology_masks
self.pathologies = ["Pneumonia", "Lung Opacity"]
self.pathologies = sorted(self.pathologies)
self.normalize=normalize
self.extension = extension
self.use_pydicom=( extension == ".dcm" )
# Load data
self.csvpath = csvpath
self.raw_csv = pd.read_csv(self.csvpath, nrows=nrows)
# the labels have multiple instances for each mask
# so we just need one to get the target label
self.csv = self.raw_csv.groupby("patientId").first()
self.dicomcsvpath = dicomcsvpath
self.dicomcsv = pd.read_csv(self.dicomcsvpath, nrows=nrows, index_col="PatientID")
self.csv = self.csv.join(self.dicomcsv, on="patientId")
self.MAXVAL = 255 # Range [0 255]
if type(views) is not list:
views = [views]
self.views = views
# Remove images with view position other than specified
self.csv["view"] = self.csv['ViewPosition']
self.csv = self.csv[self.csv["view"].isin(self.views)]
self.csv = self.csv.reset_index()
# Get our classes.
self.labels = []
self.labels.append(self.csv["Target"].values)
self.labels.append(self.csv["Target"].values) #same labels for both
# set if we have masks
self.csv["has_masks"] = ~np.isnan(self.csv["x"])
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
########## add consistent csv values
# offset_day_int
#TODO: merge with NIH metadata to get dates for images
# patientid
self.csv["patientid"] = self.csv["patientId"].astype(str)
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {}
sample["idx"] = idx
sample["lab"] = self.labels[idx]
imgid = self.csv['patientId'].iloc[idx]
img_path = os.path.join(self.imgpath, imgid + self.extension)
#print(img_path)
if self.use_pydicom:
img=pydicom.filereader.dcmread(img_path).pixel_array
else:
img = imread(img_path)
if self.normalize:
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
sample["img"] = img[None, :, :]
transform_seed = np.random.randint(2147483647)
if self.pathology_masks:
sample["pathology_masks"] = self.get_mask_dict(imgid, sample["img"].shape[2])
if self.transform is not None:
random.seed(transform_seed)
sample["img"] = self.transform(sample["img"])
if self.pathology_masks:
for i in sample["pathology_masks"].keys():
random.seed(transform_seed)
sample["pathology_masks"][i] = self.transform(sample["pathology_masks"][i])
if self.data_aug is not None:
random.seed(transform_seed)
sample["img"] = self.data_aug(sample["img"])
if self.pathology_masks:
for i in sample["pathology_masks"].keys():
random.seed(transform_seed)
sample["pathology_masks"][i] = self.data_aug(sample["pathology_masks"][i])
return sample
def get_mask_dict(self, image_name, this_size):
base_size = 1024
scale = this_size/base_size
images_with_masks = self.raw_csv[self.raw_csv["patientId"] == image_name]
path_mask = {}
# all masks are for both pathologies
for patho in ["Pneumonia", "Lung Opacity"]:
mask = np.zeros([this_size,this_size])
# don't add masks for labels we don't have
if patho in self.pathologies:
for i in range(len(images_with_masks)):
row = images_with_masks.iloc[i]
xywh = np.asarray([row.x,row.y,row.width,row.height])
xywh = xywh*scale
xywh = xywh.astype(int)
mask[xywh[1]:xywh[1]+xywh[3],xywh[0]:xywh[0]+xywh[2]] = 1
# resize so image resizing works
mask = mask[None, :, :]
path_mask[self.pathologies.index(patho)] = mask
return path_mask
class NIH_Google_Dataset(Dataset):
"""
Chest Radiograph Interpretation with Deep Learning Models: Assessment with
Radiologist-adjudicated Reference Standards and Population-adjusted Evaluation
Anna Majkowska, Sid Mittal, David F. Steiner, Joshua J. Reicher, Scott Mayer
McKinney, Gavin E. Duggan, Krish Eswaran, Po-Hsuan Cameron Chen, Yun Liu,
Sreenivasa Raju Kalidindi, Alexander Ding, Greg S. Corrado, Daniel Tse, and
Shravya Shetty. Radiology 2020
https://pubs.rsna.org/doi/10.1148/radiol.2019191293
"""
def __init__(self, imgpath,
csvpath=os.path.join(thispath, "google2019_nih-chest-xray-labels.csv.gz"),
views=["PA"],
transform=None,
data_aug=None,
nrows=None,
seed=0,
pure_labels=False,
unique_patients=True,
normalize=True):
super(NIH_Google_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.pathologies = ["Fracture", "Pneumothorax", "Airspace opacity",
"Nodule or mass"]
self.pathologies = sorted(self.pathologies)
self.normalize = normalize
# Load data
self.csvpath = csvpath
self.csv = pd.read_csv(self.csvpath, nrows=nrows)
self.MAXVAL = 255 # Range [0 255]
if type(views) is not list:
views = [views]
self.views = views
# Remove images with view position other than specified
self.csv["view"] = self.csv['View Position']
self.csv = self.csv[self.csv["view"].isin(self.views)]
if unique_patients:
self.csv = self.csv.groupby("Patient ID").first().reset_index()
# Get our classes.
self.labels = []
for pathology in self.pathologies:
#if pathology in self.csv.columns:
#self.csv.loc[pathology] = 0
mask = self.csv[pathology] == "YES"
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
# rename pathologies
self.pathologies = np.char.replace(self.pathologies, "Airspace opacity", "Lung Opacity")
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
imgid = self.csv['Image Index'].iloc[idx]
img_path = os.path.join(self.imgpath, imgid)
#print(img_path)
img = imread(img_path)
if self.normalize:
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
if self.transform is not None:
img = self.transform(img)
if self.data_aug is not None:
img = self.data_aug(img)
return {"img":img, "lab":self.labels[idx], "idx":idx}
class PC_Dataset(Dataset):
"""
PadChest dataset
Hospital San Juan de Alicante - University of Alicante
PadChest: A large chest x-ray image dataset with multi-label annotated reports.
Aurelia Bustos, Antonio Pertusa, Jose-Maria Salinas, and Maria de la Iglesia-Vayá.
arXiv preprint, 2019. https://arxiv.org/abs/1901.07441
Dataset website:
http://bimcv.cipf.es/bimcv-projects/padchest/
Download full size images here:
https://academictorrents.com/details/dec12db21d57e158f78621f06dcbe78248d14850
Download resized (224x224) images here (recropped):
https://academictorrents.com/details/96ebb4f92b85929eadfb16761f310a6d04105797
"""
def __init__(self, imgpath,
csvpath=os.path.join(thispath, "PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv.gz"),
views=["PA"],
transform=None,
data_aug=None,
flat_dir=True,
seed=0,
unique_patients=True):
super(PC_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.pathologies = ["Atelectasis", "Consolidation", "Infiltration",
"Pneumothorax", "Edema", "Emphysema", "Fibrosis",
"Effusion", "Pneumonia", "Pleural_Thickening",
"Cardiomegaly", "Nodule", "Mass", "Hernia","Fracture",
"Granuloma", "Flattened Diaphragm", "Bronchiectasis",
"Aortic Elongation", "Scoliosis",
"Hilar Enlargement", "Support Devices" , "Tuberculosis",
"Air Trapping", "Costophrenic Angle Blunting", "Aortic Atheromatosis",
"Hemidiaphragm Elevation"]
self.pathologies = sorted(self.pathologies)
mapping = dict()
mapping["Infiltration"] = ["infiltrates",
"interstitial pattern",
"ground glass pattern",
"reticular interstitial pattern",
"reticulonodular interstitial pattern",
"alveolar pattern",
"consolidation",
"air bronchogram"]
mapping["Pleural_Thickening"] = ["pleural thickening"]
mapping["Consolidation"] = ["air bronchogram"]
mapping["Hilar Enlargement"] = ["adenopathy",
"pulmonary artery enlargement"]
mapping["Support Devices"] = ["device",
"pacemaker"]
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.flat_dir = flat_dir
self.csvpath = csvpath
self.check_paths_exist()
self.csv = pd.read_csv(self.csvpath, low_memory=False)
self.MAXVAL = 65535
# standardize view names
self.csv.loc[self.csv["Projection"].isin(["AP_horizontal"]),"Projection"] = "AP Supine"
# Keep only the specified views
if type(views) is not list:
views = [views]
self.views = views
self.csv["view"] = self.csv['Projection']
self.csv = self.csv[self.csv["view"].isin(self.views)]
# remove null stuff
self.csv = self.csv[~self.csv["Labels"].isnull()]
# remove missing files
missing = ["216840111366964012819207061112010307142602253_04-014-084.png",
"216840111366964012989926673512011074122523403_00-163-058.png",
"216840111366964012959786098432011033083840143_00-176-115.png",
"216840111366964012558082906712009327122220177_00-102-064.png",
"216840111366964012339356563862009072111404053_00-043-192.png",
"216840111366964013076187734852011291090445391_00-196-188.png",
"216840111366964012373310883942009117084022290_00-064-025.png",
"216840111366964012283393834152009033102258826_00-059-087.png",
"216840111366964012373310883942009170084120009_00-097-074.png",
"216840111366964012819207061112010315104455352_04-024-184.png"]
self.csv = self.csv[~self.csv["ImageID"].isin(missing)]
if unique_patients:
self.csv = self.csv.groupby("PatientID").first().reset_index()
# Get our classes.
self.labels = []
for pathology in self.pathologies:
mask = self.csv["Labels"].str.contains(pathology.lower())
if pathology in mapping:
for syn in mapping[pathology]:
#print("mapping", syn)
mask |= self.csv["Labels"].str.contains(syn.lower())
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
########## add consistent csv values
# offset_day_int
dt = pd.to_datetime(self.csv["StudyDate_DICOM"], format="%Y%m%d")
self.csv["offset_day_int"] = dt.astype(np.int)// 10**9 // 86400
# patientid
self.csv["patientid"] = self.csv["PatientID"].astype(str)
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
imgid = self.csv['ImageID'].iloc[idx]
img_path = os.path.join(self.imgpath,imgid)
img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
if self.transform is not None:
img = self.transform(img)
if self.data_aug is not None:
img = self.data_aug(img)
return {"img":img, "lab":self.labels[idx], "idx":idx}
class CheX_Dataset(Dataset):
"""
CheXpert: A Large Chest Radiograph Dataset with Uncertainty Labels and Expert Comparison.
Jeremy Irvin *, Pranav Rajpurkar *, Michael Ko, Yifan Yu, Silviana Ciurea-Ilcus, Chris Chute,
Henrik Marklund, Behzad Haghgoo, Robyn Ball, Katie Shpanskaya, Jayne Seekins, David A. Mong,
Safwan S. Halabi, Jesse K. Sandberg, Ricky Jones, David B. Larson, Curtis P. Langlotz,
Bhavik N. Patel, Matthew P. Lungren, Andrew Y. Ng. https://arxiv.org/abs/1901.07031
Dataset website here:
https://stanfordmlgroup.github.io/competitions/chexpert/
"""
def __init__(self, imgpath, csvpath, views=["PA"], transform=None, data_aug=None,
flat_dir=True, seed=0, unique_patients=True):
super(CheX_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.MAXVAL = 255
self.pathologies = ["Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Opacity",
"Lung Lesion",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices"]
self.pathologies = sorted(self.pathologies)
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.csvpath = csvpath
self.csv = pd.read_csv(self.csvpath)
# To list
if type(views) is not list:
views = [views]
self.views = views
self.csv["view"] = self.csv["Frontal/Lateral"] # Assign view column
self.csv.loc[(self.csv["view"] == "Frontal"), "view"] = self.csv["AP/PA"] # If Frontal change with the corresponding value in the AP/PA column otherwise remains Lateral
self.csv["view"] = self.csv["view"].replace({'Lateral': "L"}) # Rename Lateral with L
self.csv = self.csv[self.csv["view"].isin(self.views)] # Select the view
if unique_patients:
self.csv["PatientID"] = self.csv["Path"].str.extract(pat = '(patient\d+)')
self.csv = self.csv.groupby("PatientID").first().reset_index()
# Get our classes.
healthy = self.csv["No Finding"] == 1
self.labels = []
for pathology in self.pathologies:
if pathology in self.csv.columns:
self.csv.loc[healthy, pathology] = 0
mask = self.csv[pathology]
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
# make all the -1 values into nans to keep things simple
self.labels[self.labels == -1] = np.nan
# rename pathologies
self.pathologies = list(np.char.replace(self.pathologies, "Pleural Effusion", "Effusion"))
########## add consistent csv values
# offset_day_int
# patientid
if 'train' in csvpath:
patientid = self.csv.Path.str.split("train/", expand=True)[1]
elif 'valid' in csvpath:
patientid = self.csv.Path.str.split("valid/", expand=True)[1]
else:
raise NotImplemented
patientid = patientid.str.split("/study", expand=True)[0]
patientid = patientid.str.replace("patient","")
self.csv["patientid"] = patientid
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
imgid = self.csv['Path'].iloc[idx]
imgid = imgid.replace("CheXpert-v1.0-small/","")
img_path = os.path.join(self.imgpath, imgid)
img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
if self.transform is not None:
img = self.transform(img)
if self.data_aug is not None:
img = self.data_aug(img)
return {"img":img, "lab":self.labels[idx], "idx":idx}
class MIMIC_Dataset(Dataset):
"""
Johnson AE, Pollard TJ, Berkowitz S, Greenbaum NR, Lungren MP, Deng CY, Mark RG, Horng S.
MIMIC-CXR: A large publicly available database of labeled chest radiographs.
arXiv preprint arXiv:1901.07042. 2019 Jan 21.
https://arxiv.org/abs/1901.07042
Dataset website here:
https://physionet.org/content/mimic-cxr-jpg/2.0.0/
"""
def __init__(self, imgpath, csvpath,metacsvpath, views=["PA"], transform=None, data_aug=None,
flat_dir=True, seed=0, unique_patients=True):
super(MIMIC_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.MAXVAL = 255
self.pathologies = ["Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Opacity",
"Lung Lesion",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices"]
self.pathologies = sorted(self.pathologies)
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.csvpath = csvpath
self.csv = pd.read_csv(self.csvpath)
self.metacsvpath = metacsvpath
self.metacsv = pd.read_csv(self.metacsvpath)
self.csv = self.csv.set_index(['subject_id', 'study_id'])
self.metacsv = self.metacsv.set_index(['subject_id', 'study_id'])
self.csv = self.csv.join(self.metacsv).reset_index()
# Keep only the desired view
self.views = views
if self.views:
if type(views) is not list:
views = [views]
self.views = views
self.csv["view"] = self.csv["ViewPosition"]
self.csv = self.csv[self.csv["view"].isin(self.views)]
if unique_patients:
self.csv = self.csv.groupby("subject_id").first().reset_index()
# Get our classes.
healthy = self.csv["No Finding"] == 1
self.labels = []
for pathology in self.pathologies:
if pathology in self.csv.columns:
self.csv.loc[healthy, pathology] = 0
mask = self.csv[pathology]
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
# make all the -1 values into nans to keep things simple
self.labels[self.labels == -1] = np.nan
# rename pathologies
self.pathologies = np.char.replace(self.pathologies, "Pleural Effusion", "Effusion")
########## add consistent csv values
# offset_day_int
self.csv["offset_day_int"] = self.csv["StudyDate"]
# patientid
self.csv["patientid"] = self.csv["subject_id"].astype(str)
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
subjectid = str(self.csv.iloc[idx]["subject_id"])
studyid = str(self.csv.iloc[idx]["study_id"])
dicom_id = str(self.csv.iloc[idx]["dicom_id"])
img_path = os.path.join(self.imgpath, "p" + subjectid[:2], "p" + subjectid, "s" + studyid, dicom_id + ".jpg")
img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
if self.transform is not None:
img = self.transform(img)
if self.data_aug is not None:
img = self.data_aug(img)
return {"img":img, "lab":self.labels[idx], "idx":idx}
class Openi_Dataset(Dataset):
"""
OpenI
Dina Demner-Fushman, Marc D. Kohli, Marc B. Rosenman, Sonya E. Shooshan, Laritza
Rodriguez, Sameer Antani, George R. Thoma, and Clement J. McDonald. Preparing a
collection of radiology examinations for distribution and retrieval. Journal of the American
Medical Informatics Association, 2016. doi: 10.1093/jamia/ocv080.
Dataset website:
https://openi.nlm.nih.gov/faq
Download images:
https://academictorrents.com/details/5a3a439df24931f410fac269b87b050203d9467d
"""
def __init__(self, imgpath,
xmlpath=os.path.join(thispath, "NLMCXR_reports.tgz"),
dicomcsv_path=os.path.join(thispath, "nlmcxr_dicom_metadata.csv.gz"),
tsnepacsv_path=os.path.join(thispath, "nlmcxr_tsne_pa.csv.gz"),
filter_pa=True,
transform=None, data_aug=None,
nrows=None, seed=0,
pure_labels=False, unique_patients=True):
super(Openi_Dataset, self).__init__()
import xml
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.pathologies = ["Atelectasis", "Fibrosis",
"Pneumonia", "Effusion", "Lesion",
"Cardiomegaly", "Calcified Granuloma",
"Fracture", "Edema", "Granuloma", "Emphysema",
"Hernia", "Mass", "Nodule", "Opacity", "Infiltration",
"Pleural_Thickening", "Pneumothorax", ]
self.pathologies = sorted(self.pathologies)
mapping = dict()
mapping["Pleural_Thickening"] = ["pleural thickening"]
mapping["Infiltration"] = ["Infiltrate"]
mapping["Atelectasis"] = ["Atelectases"]
# Load data
self.xmlpath = xmlpath
tarf = tarfile.open(xmlpath, 'r:gz')
samples = []
#for f in os.listdir(xmlpath):
# tree = xml.etree.ElementTree.parse(os.path.join(xmlpath, f))
for filename in tarf.getnames():
if (filename.endswith(".xml")):
tree = xml.etree.ElementTree.parse(tarf.extractfile(filename))
root = tree.getroot()
uid = root.find("uId").attrib["id"]
labels_m = [node.text.lower() for node in root.findall(".//MeSH/major")]
labels_m = "|".join(np.unique(labels_m))
labels_a = [node.text.lower() for node in root.findall(".//MeSH/automatic")]
labels_a = "|".join(np.unique(labels_a))
image_nodes = root.findall(".//parentImage")
for image in image_nodes:
sample = {}
sample["uid"] = uid
sample["imageid"] = image.attrib["id"]
sample["labels_major"] = labels_m
sample["labels_automatic"] = labels_a
samples.append(sample)
self.csv = pd.DataFrame(samples)
self.MAXVAL = 255 # Range [0 255]
self.dicom_metadata = pd.read_csv(dicomcsv_path, index_col="imageid", low_memory=False)
# merge in dicom metadata
self.csv = self.csv.join(self.dicom_metadata, on="imageid")
#filter only PA/AP view
if filter_pa:
tsne_pa = pd.read_csv(tsnepacsv_path, index_col="imageid")
self.csv = self.csv.join(tsne_pa, on="imageid")
self.csv = self.csv[self.csv["tsne-view"] == "PA"]
# self.csv = self.csv[self.csv["View Position"] != "RL"]
# self.csv = self.csv[self.csv["View Position"] != "LATERAL"]
# self.csv = self.csv[self.csv["View Position"] != "LL"]
if unique_patients:
self.csv = self.csv.groupby("uid").first().reset_index()
# Get our classes.
self.labels = []
for pathology in self.pathologies:
mask = self.csv["labels_automatic"].str.contains(pathology.lower())
if pathology in mapping:
for syn in mapping[pathology]:
#print("mapping", syn)
mask |= self.csv["labels_automatic"].str.contains(syn.lower())
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
# rename pathologies
self.pathologies = np.char.replace(self.pathologies, "Opacity", "Lung Opacity")
self.pathologies = np.char.replace(self.pathologies, "Lesion", "Lung Lesion")
########## add consistent csv values
# offset_day_int
#self.csv["offset_day_int"] =
# patientid
self.csv["patientid"] = self.csv["uid"].astype(str)
def string(self):
return self.__class__.__name__ + " num_samples={}".format(len(self))
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
imageid = self.csv.iloc[idx].imageid
img_path = os.path.join(self.imgpath,imageid + ".png")
#print(img_path)
img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
if self.transform is not None:
img = self.transform(img)
if self.data_aug is not None:
img = self.data_aug(img)
return {"img":img, "lab":self.labels[idx], "idx":idx}
class COVID19_Dataset(Dataset):
"""
COVID-19 Image Data Collection: Prospective Predictions Are the Future
Joseph Paul Cohen and Paul Morrison and Lan Dao and Karsten Roth and Tim Q Duong and Marzyeh Ghassemi
arXiv:2006.11988, 2020
COVID-19 image data collection,
Joseph Paul Cohen and Paul Morrison and Lan Dao
arXiv:2003.11597, 2020
Dataset: https://github.com/ieee8023/covid-chestxray-dataset
Paper: https://arxiv.org/abs/2003.11597
"""
def __init__(self,
imgpath=os.path.join(thispath, "covid-chestxray-dataset", "images"),
csvpath=os.path.join(thispath, "covid-chestxray-dataset", "metadata.csv"),
semantic_masks_v7labs_lungs_path=os.path.join(thispath, "data" , "semantic_masks_v7labs_lungs.zip"),
views=["PA", "AP"],
transform=None,
data_aug=None,
nrows=None,
seed=0,
pure_labels=False,
unique_patients=True,
semantic_masks=False):
super(COVID19_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.views = views
self.semantic_masks = semantic_masks
self.semantic_masks_v7labs_lungs_path = semantic_masks_v7labs_lungs_path
# Load data
self.csvpath = csvpath
self.csv = pd.read_csv(self.csvpath, nrows=nrows)
self.MAXVAL = 255 # Range [0 255]
# Keep only the frontal views.
#idx_pa = self.csv["view"].isin(["PA", "AP", "AP Supine"])
idx_pa = self.csv["view"].isin(self.views)
self.csv = self.csv[idx_pa]
#filter out in progress samples
self.csv = self.csv[~(self.csv.finding == "todo")]
self.csv = self.csv[~(self.csv.finding == "Unknown")]
self.pathologies = self.csv.finding.str.split("/", expand=True).values.ravel()
self.pathologies = self.pathologies[~pd.isnull(self.pathologies)]
self.pathologies = sorted(np.unique(self.pathologies))
self.labels = []
for pathology in self.pathologies:
mask = self.csv["finding"].str.contains(pathology)
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
self.csv = self.csv.reset_index()
if self.semantic_masks:
temp = zipfile.ZipFile(self.semantic_masks_v7labs_lungs_path)
self.semantic_masks_v7labs_lungs_namelist = temp.namelist()
########## add consistent csv values
# offset_day_int
self.csv["offset_day_int"] = self.csv["offset"]
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {}
sample["idx"] = idx
sample["lab"] = self.labels[idx]
imgid = self.csv['filename'].iloc[idx]
img_path = os.path.join(self.imgpath, imgid)
#print(img_path)
img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
sample["img"] = img[None, :, :]
transform_seed = np.random.randint(2147483647)
if self.semantic_masks:
sample["semantic_masks"] = self.get_semantic_mask_dict(imgid, sample["img"].shape)
if self.transform is not None:
random.seed(transform_seed)
sample["img"] = self.transform(sample["img"])
if self.semantic_masks:
for i in sample["semantic_masks"].keys():
random.seed(transform_seed)
sample["semantic_masks"][i] = self.transform(sample["semantic_masks"][i])
if self.data_aug is not None:
random.seed(transform_seed)
sample["img"] = self.data_aug(sample["img"])
if self.semantic_masks:
for i in sample["semantic_masks"].keys():
random.seed(transform_seed)
sample["semantic_masks"][i] = self.data_aug(sample["semantic_masks"][i])
return sample
def get_semantic_mask_dict(self, image_name, this_shape):
archive_path = "semantic_masks_v7labs_lungs/" + image_name
semantic_masks = {}
if archive_path in self.semantic_masks_v7labs_lungs_namelist:
with zipfile.ZipFile(self.semantic_masks_v7labs_lungs_path).open(archive_path) as file:
mask = imageio.imread(file.read())
mask = (mask == 255).astype(np.float)
# reshape so image resizing works
mask = mask[None, :, :]
semantic_masks["Lungs"] = mask
return semantic_masks
class NLMTB_Dataset(Dataset):
"""
National Library of Medicine Tuberculosis Datasets
https://lhncbc.nlm.nih.gov/publication/pub9931
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4256233/
Jaeger S, Candemir S, Antani S, Wang YX, Lu PX, Thoma G. Two public chest X-ray
datasets for computer-aided screening of pulmonary diseases. Quant Imaging Med
Surg. 2014 Dec;4(6):475-7. doi: 10.3978/j.issn.2223-4292.2014.11.20.
PMID: 25525580; PMCID: PMC4256233.
Download Links:
Montgomery County
https://academictorrents.com/details/ac786f74878a5775c81d490b23842fd4736bfe33
http://openi.nlm.nih.gov/imgs/collections/NLM-MontgomeryCXRSet.zip
Shenzhen
https://academictorrents.com/details/462728e890bd37c05e9439c885df7afc36209cc8
http://openi.nlm.nih.gov/imgs/collections/ChinaSet_AllFiles.zip
"""
def __init__(self,
imgpath,
transform=None,
data_aug=None,
seed=0,
views=["PA", "AP"]
):
"""
Args:
img_path (str): Path to `MontgomerySet` or `ChinaSet_AllFiles` folder
"""
super(NLMTB_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
file_list = []
source_list = []
for fname in sorted(os.listdir(os.path.join(self.imgpath, "CXR_png"))):
if fname.endswith(".png"):
file_list.append(fname)
self.csv = pd.DataFrame({"fname": file_list})
#Label is the last digit on the simage filename
self.csv["label"] = self.csv["fname"].apply(lambda x: int(x.split(".")[-2][-1]))
self.labels = self.csv["label"].values.reshape(-1,1)
self.pathologies = ["Tuberculosis"]
self.views = views
self.MAXVAL = 255
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = self.csv.iloc[idx]
img_path = os.path.join(self.imgpath, "CXR_png", item["fname"])
#print(img_path)
img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
if self.transform is not None:
img = self.transform(img)
if self.data_aug is not None:
img = self.data_aug(img)
return {"img":img, "lab":self.labels[idx], "idx":idx}
class SIIM_Pneumothorax_Dataset(Dataset):
"""
https://academictorrents.com/details/6ef7c6d039e85152c4d0f31d83fa70edc4aba088
https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation
"The data is comprised of images in DICOM format and annotations in the form of image IDs and run-length-encoded (RLE) masks. Some of the images contain instances of pneumothorax (collapsed lung), which are indicated by encoded binary masks in the annotations. Some training images have multiple annotations.
Images without pneumothorax have a mask value of -1."
"""
def __init__(self,
imgpath,
csvpath,
transform=None,
data_aug=None,
seed=0,
unique_patients=True,
masks=False):
super(SIIM_Pneumothorax_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.masks = masks
# Load data
self.csvpath = csvpath
self.csv = pd.read_csv(self.csvpath)
self.MAXVAL = 255 # Range [0 255]
self.pathologies = ["Pneumothorax"]
self.labels = []
self.labels.append(self.csv[" EncodedPixels"] != "-1")
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
self.csv = self.csv.reset_index()
self.csv["has_masks"] = self.csv[" EncodedPixels"] != "-1"
#to figure out the paths
#TODO: make faster
if not ("siim_file_map" in _cache_dict):
file_map = {}
for root, directories, files in os.walk(self.imgpath, followlinks=False):
for filename in files:
filePath = os.path.join(root,filename)
file_map[filename] = filePath
_cache_dict["siim_file_map"] = file_map
self.file_map = _cache_dict["siim_file_map"]
def string(self):
return self.__class__.__name__ + " num_samples={} data_aug={}".format(len(self), self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {}
sample["idx"] = idx
sample["lab"] = self.labels[idx]
imgid = self.csv['ImageId'].iloc[idx]
img_path = self.file_map[imgid + ".dcm"]
#print(img_path)
img = pydicom.filereader.dcmread(img_path).pixel_array
#img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
sample["img"] = img[None, :, :]
transform_seed = np.random.randint(2147483647)
if self.masks:
sample["pathology_masks"] = self.get_pathology_mask_dict(imgid, sample["img"].shape[2])
if self.transform is not None:
random.seed(transform_seed)
sample["img"] = self.transform(sample["img"])
if self.masks:
for i in sample["pathology_masks"].keys():
random.seed(transform_seed)
sample["pathology_masks"][i] = self.transform(sample["pathology_masks"][i])
if self.data_aug is not None:
random.seed(transform_seed)
sample["img"] = self.data_aug(sample["img"])
if self.masks:
for i in sample["pathology_masks"].keys():
random.seed(transform_seed)
sample["pathology_masks"][i] = self.data_aug(sample["pathology_masks"][i])
return sample
def get_pathology_mask_dict(self, image_name, this_size):
base_size = 1024
images_with_masks = self.csv[np.logical_and(self.csv["ImageId"] == image_name,
self.csv[" EncodedPixels"] != "-1")]
path_mask = {}
# from kaggle code
def rle2mask(rle, width, height):
mask= np.zeros(width* height)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2]
lengths = array[1::2]
current_position = 0
for index, start in enumerate(starts):
current_position += start
mask[current_position:current_position+lengths[index]] = 1
current_position += lengths[index]
return mask.reshape(width, height)
if len(images_with_masks) > 0:
# using a for loop so it is consistent with the other code
for patho in ["Pneumothorax"]:
mask = np.zeros([this_size,this_size])
# don't add masks for labels we don't have
if patho in self.pathologies:
for i in range(len(images_with_masks)):
row = images_with_masks.iloc[i]
mask = rle2mask(row[" EncodedPixels"],base_size,base_size)
mask = mask.T
mask = skimage.transform.resize(mask, (this_size, this_size), mode='constant', order=0)
mask = mask.round() #make 0,1
# reshape so image resizing works
mask = mask[None, :, :]
path_mask[self.pathologies.index(patho)] = mask
return path_mask
class VinBrain_Dataset(Dataset):
"""
Nguyen et al., VinDr-CXR: An open dataset of chest X-rays with radiologist's annotations
https://arxiv.org/abs/2012.15029
https://www.kaggle.com/c/vinbigdata-chest-xray-abnormalities-detection
"""
def __init__(self, imgpath,
csvpath,
views=None,
transform=None,
data_aug=None,
seed=0,
pathology_masks=False):
super(VinBrain_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.csvpath = csvpath
self.transform = transform
self.data_aug = data_aug
self.pathology_masks = pathology_masks
self.views = views
self.pathologies = [ 'Aortic enlargement',
'Atelectasis',
'Calcification',
'Cardiomegaly',
'Consolidation',
'ILD',
'Infiltration',
'Lung Opacity',
'Nodule/Mass',
'Lesion',
'Effusion',
'Pleural_Thickening',
'Pneumothorax',
'Pulmonary Fibrosis']
self.pathologies = sorted(np.unique(self.pathologies))
self.mapping = dict()
self.mapping["Pleural_Thickening"] = ["Pleural thickening"]
self.mapping["Effusion"] = ["Pleural effusion"]
self.normalize = normalize
# Load data
self.check_paths_exist()
self.rawcsv = pd.read_csv(self.csvpath)
self.csv = pd.DataFrame(self.rawcsv.groupby("image_id")["class_name"].apply(lambda x: "|".join(np.unique(x))))
self.labels = []
for pathology in self.pathologies:
mask = self.csv["class_name"].str.lower().str.contains(pathology.lower())
if pathology in self.mapping:
for syn in self.mapping[pathology]:
mask |= self.csv["class_name"].str.lower().str.contains(syn.lower())
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
self.csv = self.csv.reset_index()
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {}
sample["idx"] = idx
sample["lab"] = self.labels[idx]
imgid = self.csv['image_id'].iloc[idx]
img_path = os.path.join(self.imgpath, imgid + ".dicom")
#print(img_path)
from pydicom.pixel_data_handlers.util import apply_modality_lut
dicom_obj = pydicom.filereader.dcmread(img_path)
#print(dicom_obj)
img = apply_modality_lut(dicom_obj.pixel_array, dicom_obj)
img = pydicom.pixel_data_handlers.apply_windowing(img, dicom_obj)
# Photometric Interpretation to see if the image needs to be inverted
mode = dicom_obj[0x28, 0x04].value
bitdepth = dicom_obj[0x28, 0x101].value
# hack!
if img.max() < 256:
bitdepth = 8
if mode == "MONOCHROME1":
img = -1*img + 2**float(bitdepth)
elif mode == "MONOCHROME2":
pass
else:
raise Exception("Unknown Photometric Interpretation mode")
if self.normalize:
img = normalize(img, 2**float(bitdepth))
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
sample["img"] = img[None, :, :]
transform_seed = np.random.randint(2147483647)
if self.pathology_masks:
sample["pathology_masks"] = self.get_mask_dict(imgid, sample["img"].shape)
if self.transform is not None:
random.seed(transform_seed)
sample["img"] = self.transform(sample["img"])
if self.pathology_masks:
for i in sample["pathology_masks"].keys():
random.seed(transform_seed)
sample["pathology_masks"][i] = self.transform(sample["pathology_masks"][i])
if self.data_aug is not None:
random.seed(transform_seed)
sample["img"] = self.data_aug(sample["img"])
if self.pathology_masks:
for i in sample["pathology_masks"].keys():
random.seed(transform_seed)
sample["pathology_masks"][i] = self.data_aug(sample["pathology_masks"][i])
return sample
def get_mask_dict(self, image_name, this_size):
c, h, w = this_size
path_mask = {}
rows = self.rawcsv[self.rawcsv.image_id.str.contains(image_name)]
for i, pathology in enumerate(self.pathologies):
for group_name, df_group in rows.groupby("class_name"):
if (group_name == pathology) or ((pathology in self.mapping) and (group_name in self.mapping[pathology])):
mask = np.zeros([h, w])
for idx, row in df_group.iterrows():
mask[int(row.y_min):int(row.y_max), int(row.x_min):int(row.x_max)] = 1
path_mask[i] = mask[None, :, :]
return path_mask
class StonyBrookCOVID_Dataset(Dataset):
"""
This dataset loads the Stonybrook
Radiographic Assessment of Lung Opacity Score Dataset
https://doi.org/10.5281/zenodo.4633999
Citation will be set soon.
"""
def __init__(self,
imgpath, # path to CXR_images_scored
csvpath, # path to ralo-dataset-metadata.csv
transform=None,
data_aug=None,
seed=0):
super(StonyBrookCOVID_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
# Load data
self.csvpath = csvpath
self.csv = pd.read_csv(self.csvpath, skiprows=1)
self.MAXVAL = 255 # Range [0 255]
self.pathologies = ["Geographic Extent","Lung Opacity"]
self.csv["Geographic Extent"] = (self.csv["Total GEOGRAPHIC"] + self.csv["Total GEOGRAPHIC.1"])/2
self.csv["Lung Opacity"] = (self.csv["Total OPACITY"] + self.csv["Total OPACITY.1"])/2
self.labels = []
self.labels.append(self.csv["Geographic Extent"])
self.labels.append(self.csv["Lung Opacity"])
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
########## add consistent csv values
# offset_day_int
date_col = self.csv["Exam_DateTime"].str.split("_",expand=True)[0]
dt = pd.to_datetime(date_col, format="%Y%m%d")
self.csv["offset_day_int"] = dt.astype(np.int)// 10**9 // 86400
# patientid
self.csv["patientid"] = self.csv["Subject_ID"].astype(str)
def string(self):
return self.__class__.__name__ + " num_samples={}".format(len(self))
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {}
sample["idx"] = idx
sample["lab"] = self.labels[idx]
img_path = os.path.join(self.imgpath, str(idx) + ".jpg")
#print(img_path)
img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
sample["img"] = img[None, :, :]
transform_seed = np.random.randint(2147483647)
if self.transform is not None:
random.seed(transform_seed)
sample["img"] = self.transform(sample["img"])
if self.data_aug is not None:
random.seed(transform_seed)
sample["img"] = self.data_aug(sample["img"])
return sample
class ToPILImage(object):
def __init__(self):
self.to_pil = transforms.ToPILImage(mode="F")
def __call__(self, x):
return(self.to_pil(x[0]))
class XRayResizer(object):
def __init__(self, size, engine="skimage"):
self.size = size
self.engine = engine
if 'cv2' in sys.modules:
print("Setting XRayResizer engine to cv2 could increase performance.")
def __call__(self, img):
if self.engine == "skimage":
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return skimage.transform.resize(img, (1, self.size, self.size), mode='constant', preserve_range=True).astype(np.float32)
elif self.engine == "cv2":
import cv2 # pip install opencv-python
return cv2.resize(img[0,:,:],
(self.size, self.size),
interpolation = cv2.INTER_AREA
).reshape(1,self.size,self.size).astype(np.float32)
else:
raise Exception("Unknown engine, Must be skimage (default) or cv2.")
class XRayCenterCrop(object):
def crop_center(self, img):
_, y, x = img.shape
crop_size = np.min([y,x])
startx = x // 2 - (crop_size // 2)
starty = y // 2 - (crop_size // 2)
return img[:, starty:starty + crop_size, startx:startx + crop_size]
def __call__(self, img):
return self.crop_center(img)
class CovariateDataset(Dataset):
"""
Dataset which will correlate the dataset with a specific label.
Viviano et al. Saliency is a Possible Red Herring When Diagnosing Poor Generalization
https://arxiv.org/abs/1910.00199
"""
def __init__(self,
d1, d1_target,
d2, d2_target,
ratio=0.5, mode="train",
seed=0, nsamples=None,
splits=[0.5, 0.25, 0.25],
verbose=False):
super(CovariateDataset, self).__init__()
self.splits = np.array(splits)
self.d1 = d1
self.d1_target = d1_target
self.d2 = d2
self.d2_target = d2_target
assert mode in ['train', 'valid', 'test']
assert np.sum(self.splits) == 1.0
np.random.seed(seed) # Reset the seed so all runs are the same.
all_imageids = np.concatenate([np.arange(len(self.d1)),
np.arange(len(self.d2))]).astype(int)
all_idx = np.arange(len(all_imageids)).astype(int)
all_labels = np.concatenate([d1_target,
d2_target]).astype(int)
all_site = np.concatenate([np.zeros(len(self.d1)),
np.ones(len(self.d2))]).astype(int)
idx_sick = all_labels==1
n_per_category = np.min([sum(idx_sick[all_site==0]),
sum(idx_sick[all_site==1]),
sum(~idx_sick[all_site==0]),
sum(~idx_sick[all_site==1])])
if verbose:
print("n_per_category={}".format(n_per_category))
all_0_neg = all_idx[np.where((all_site==0) & (all_labels==0))]
all_0_neg = np.random.choice(all_0_neg, n_per_category, replace=False)
all_0_pos = all_idx[np.where((all_site==0) & (all_labels==1))]
all_0_pos = np.random.choice(all_0_pos, n_per_category, replace=False)
all_1_neg = all_idx[np.where((all_site==1) & (all_labels==0))]
all_1_neg = np.random.choice(all_1_neg, n_per_category, replace=False)
all_1_pos = all_idx[np.where((all_site==1) & (all_labels==1))]
all_1_pos = np.random.choice(all_1_pos, n_per_category, replace=False)
# TRAIN
train_0_neg = np.random.choice(
all_0_neg, int(n_per_category*ratio*splits[0]*2), replace=False)
train_0_pos = np.random.choice(
all_0_pos, int(n_per_category*(1-ratio)*splits[0]*2), replace=False)
train_1_neg = np.random.choice(
all_1_neg, int(n_per_category*(1-ratio)*splits[0]*2), replace=False)
train_1_pos = np.random.choice(
all_1_pos, int(n_per_category*ratio*splits[0]*2), replace=False)
# REDUCE POST-TRAIN
all_0_neg = np.setdiff1d(all_0_neg, train_0_neg)
all_0_pos = np.setdiff1d(all_0_pos, train_0_pos)
all_1_neg = np.setdiff1d(all_1_neg, train_1_neg)
all_1_pos = np.setdiff1d(all_1_pos, train_1_pos)
if verbose:
print("TRAIN: neg={}, pos={}".format(len(train_0_neg)+len(train_1_neg),
len(train_0_pos)+len(train_1_pos)))
# VALID
valid_0_neg = np.random.choice(
all_0_neg, int(n_per_category*(1-ratio)*splits[1]*2), replace=False)
valid_0_pos = np.random.choice(
all_0_pos, int(n_per_category*ratio*splits[1]*2), replace=False)
valid_1_neg = np.random.choice(
all_1_neg, int(n_per_category*ratio*splits[1]*2), replace=False)
valid_1_pos = np.random.choice(
all_1_pos, int(n_per_category*(1-ratio)*splits[1]*2), replace=False)
# REDUCE POST-VALID
all_0_neg = np.setdiff1d(all_0_neg, valid_0_neg)
all_0_pos = np.setdiff1d(all_0_pos, valid_0_pos)
all_1_neg = np.setdiff1d(all_1_neg, valid_1_neg)
all_1_pos = np.setdiff1d(all_1_pos, valid_1_pos)
if verbose:
print("VALID: neg={}, pos={}".format(len(valid_0_neg)+len(valid_1_neg),
len(valid_0_pos)+len(valid_1_pos)))
# TEST
test_0_neg = all_0_neg
test_0_pos = all_0_pos
test_1_neg = all_1_neg
test_1_pos = all_1_pos
if verbose:
print("TEST: neg={}, pos={}".format(len(test_0_neg)+len(test_1_neg),
len(test_0_pos)+len(test_1_pos)))
def _reduce_nsamples(nsamples, a, b, c, d):
if nsamples:
a = a[:int(np.floor(nsamples/4))]
b = b[:int(np.ceil(nsamples/4))]
c = c[:int(np.ceil(nsamples/4))]
d = d[:int(np.floor(nsamples/4))]
return (a, b, c, d)
if mode == "train":
(a, b, c, d) = _reduce_nsamples(
nsamples, train_0_neg, train_0_pos, train_1_neg, train_1_pos)
elif mode == "valid":
(a, b, c, d) = _reduce_nsamples(
nsamples, valid_0_neg, valid_0_pos, valid_1_neg, valid_1_pos)
elif mode == "test":
(a, b, c, d) = _reduce_nsamples(
nsamples, test_0_neg, test_0_pos, test_1_neg, test_1_pos)
else:
raise Exception("unknown mode")
self.select_idx = np.concatenate([a, b, c, d])
self.imageids = all_imageids[self.select_idx]
self.pathologies = ["Custom"]
self.labels = all_labels[self.select_idx].reshape(-1,1)
self.site = all_site[self.select_idx]
def __repr__(self):
pprint.pprint(self.totals())
return self.__class__.__name__ + " num_samples={}".format(len(self))
def __len__(self):
return len(self.imageids)
def __getitem__(self, idx):
if self.site[idx] == 0:
dataset = self.d1
else:
dataset = self.d2
sample = dataset[self.imageids[idx]]
img = sample["img"]
# replace the labels with the specific label we focus on
sample["lab-old"] = sample["lab"]
sample["lab"] = self.labels[idx]
sample["site"] = self.site[idx]
return sample
| 37.791296 | 312 | 0.557738 |
2f7199f806ef3bacd40350f91b815bc349c5eff0 | 14,762 | py | Python | rapid7vmconsole/models/operating_system_cpe.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 61 | 2018-05-17T05:57:09.000Z | 2022-03-08T13:59:21.000Z | rapid7vmconsole/models/operating_system_cpe.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 33 | 2018-06-26T16:21:14.000Z | 2022-03-03T20:55:47.000Z | rapid7vmconsole/models/operating_system_cpe.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 43 | 2018-02-24T05:45:53.000Z | 2022-03-31T22:15:16.000Z | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OperatingSystemCpe(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'edition': 'str',
'language': 'str',
'other': 'str',
'part': 'str',
'product': 'str',
'sw_edition': 'str',
'target_hw': 'str',
'target_sw': 'str',
'update': 'str',
'v2_2': 'str',
'v2_3': 'str',
'vendor': 'str',
'version': 'str'
}
attribute_map = {
'edition': 'edition',
'language': 'language',
'other': 'other',
'part': 'part',
'product': 'product',
'sw_edition': 'swEdition',
'target_hw': 'targetHW',
'target_sw': 'targetSW',
'update': 'update',
'v2_2': 'v2.2',
'v2_3': 'v2.3',
'vendor': 'vendor',
'version': 'version'
}
def __init__(self, edition=None, language=None, other=None, part=None, product=None, sw_edition=None, target_hw=None, target_sw=None, update=None, v2_2=None, v2_3=None, vendor=None, version=None): # noqa: E501
"""OperatingSystemCpe - a model defined in Swagger""" # noqa: E501
self._edition = None
self._language = None
self._other = None
self._part = None
self._product = None
self._sw_edition = None
self._target_hw = None
self._target_sw = None
self._update = None
self._v2_2 = None
self._v2_3 = None
self._vendor = None
self._version = None
self.discriminator = None
if edition is not None:
self.edition = edition
if language is not None:
self.language = language
if other is not None:
self.other = other
self.part = part
if product is not None:
self.product = product
if sw_edition is not None:
self.sw_edition = sw_edition
if target_hw is not None:
self.target_hw = target_hw
if target_sw is not None:
self.target_sw = target_sw
if update is not None:
self.update = update
if v2_2 is not None:
self.v2_2 = v2_2
if v2_3 is not None:
self.v2_3 = v2_3
if vendor is not None:
self.vendor = vendor
if version is not None:
self.version = version
@property
def edition(self):
"""Gets the edition of this OperatingSystemCpe. # noqa: E501
Edition-related terms applied by the vendor to the product. # noqa: E501
:return: The edition of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._edition
@edition.setter
def edition(self, edition):
"""Sets the edition of this OperatingSystemCpe.
Edition-related terms applied by the vendor to the product. # noqa: E501
:param edition: The edition of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._edition = edition
@property
def language(self):
"""Gets the language of this OperatingSystemCpe. # noqa: E501
Defines the language supported in the user interface of the product being described. The format is of the language tag adheres to <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://tools.ietf.org/html/rfc5646\">RFC5646</a>. # noqa: E501
:return: The language of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this OperatingSystemCpe.
Defines the language supported in the user interface of the product being described. The format is of the language tag adheres to <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://tools.ietf.org/html/rfc5646\">RFC5646</a>. # noqa: E501
:param language: The language of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._language = language
@property
def other(self):
"""Gets the other of this OperatingSystemCpe. # noqa: E501
Captures any other general descriptive or identifying information which is vendor- or product-specific and which does not logically fit in any other attribute value. # noqa: E501
:return: The other of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._other
@other.setter
def other(self, other):
"""Sets the other of this OperatingSystemCpe.
Captures any other general descriptive or identifying information which is vendor- or product-specific and which does not logically fit in any other attribute value. # noqa: E501
:param other: The other of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._other = other
@property
def part(self):
"""Gets the part of this OperatingSystemCpe. # noqa: E501
A single letter code that designates the particular platform part that is being identified. # noqa: E501
:return: The part of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._part
@part.setter
def part(self, part):
"""Sets the part of this OperatingSystemCpe.
A single letter code that designates the particular platform part that is being identified. # noqa: E501
:param part: The part of this OperatingSystemCpe. # noqa: E501
:type: str
"""
if part is None:
raise ValueError("Invalid value for `part`, must not be `None`") # noqa: E501
allowed_values = ["o", "a", "h"] # noqa: E501
if part not in allowed_values:
raise ValueError(
"Invalid value for `part` ({0}), must be one of {1}" # noqa: E501
.format(part, allowed_values)
)
self._part = part
@property
def product(self):
"""Gets the product of this OperatingSystemCpe. # noqa: E501
the most common and recognizable title or name of the product. # noqa: E501
:return: The product of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._product
@product.setter
def product(self, product):
"""Sets the product of this OperatingSystemCpe.
the most common and recognizable title or name of the product. # noqa: E501
:param product: The product of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._product = product
@property
def sw_edition(self):
"""Gets the sw_edition of this OperatingSystemCpe. # noqa: E501
Characterizes how the product is tailored to a particular market or class of end users. # noqa: E501
:return: The sw_edition of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._sw_edition
@sw_edition.setter
def sw_edition(self, sw_edition):
"""Sets the sw_edition of this OperatingSystemCpe.
Characterizes how the product is tailored to a particular market or class of end users. # noqa: E501
:param sw_edition: The sw_edition of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._sw_edition = sw_edition
@property
def target_hw(self):
"""Gets the target_hw of this OperatingSystemCpe. # noqa: E501
Characterize the instruction set architecture on which the product operates. # noqa: E501
:return: The target_hw of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._target_hw
@target_hw.setter
def target_hw(self, target_hw):
"""Sets the target_hw of this OperatingSystemCpe.
Characterize the instruction set architecture on which the product operates. # noqa: E501
:param target_hw: The target_hw of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._target_hw = target_hw
@property
def target_sw(self):
"""Gets the target_sw of this OperatingSystemCpe. # noqa: E501
Characterize the software computing environment within which the product operates. # noqa: E501
:return: The target_sw of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._target_sw
@target_sw.setter
def target_sw(self, target_sw):
"""Sets the target_sw of this OperatingSystemCpe.
Characterize the software computing environment within which the product operates. # noqa: E501
:param target_sw: The target_sw of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._target_sw = target_sw
@property
def update(self):
"""Gets the update of this OperatingSystemCpe. # noqa: E501
Vendor-specific alphanumeric strings characterizing the particular update, service pack, or point release of the product. # noqa: E501
:return: The update of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._update
@update.setter
def update(self, update):
"""Sets the update of this OperatingSystemCpe.
Vendor-specific alphanumeric strings characterizing the particular update, service pack, or point release of the product. # noqa: E501
:param update: The update of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._update = update
@property
def v2_2(self):
"""Gets the v2_2 of this OperatingSystemCpe. # noqa: E501
The full CPE string in the <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://cpe.mitre.org/files/cpe-specification_2.2.pdf\">CPE 2.2</a> format. # noqa: E501
:return: The v2_2 of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._v2_2
@v2_2.setter
def v2_2(self, v2_2):
"""Sets the v2_2 of this OperatingSystemCpe.
The full CPE string in the <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://cpe.mitre.org/files/cpe-specification_2.2.pdf\">CPE 2.2</a> format. # noqa: E501
:param v2_2: The v2_2 of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._v2_2 = v2_2
@property
def v2_3(self):
"""Gets the v2_3 of this OperatingSystemCpe. # noqa: E501
The full CPE string in the <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf\">CPE 2.3</a> format. # noqa: E501
:return: The v2_3 of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._v2_3
@v2_3.setter
def v2_3(self, v2_3):
"""Sets the v2_3 of this OperatingSystemCpe.
The full CPE string in the <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf\">CPE 2.3</a> format. # noqa: E501
:param v2_3: The v2_3 of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._v2_3 = v2_3
@property
def vendor(self):
"""Gets the vendor of this OperatingSystemCpe. # noqa: E501
The person or organization that manufactured or created the product. # noqa: E501
:return: The vendor of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._vendor
@vendor.setter
def vendor(self, vendor):
"""Sets the vendor of this OperatingSystemCpe.
The person or organization that manufactured or created the product. # noqa: E501
:param vendor: The vendor of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._vendor = vendor
@property
def version(self):
"""Gets the version of this OperatingSystemCpe. # noqa: E501
Vendor-specific alphanumeric strings characterizing the particular release version of the product. # noqa: E501
:return: The version of this OperatingSystemCpe. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this OperatingSystemCpe.
Vendor-specific alphanumeric strings characterizing the particular release version of the product. # noqa: E501
:param version: The version of this OperatingSystemCpe. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OperatingSystemCpe, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OperatingSystemCpe):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.16122 | 258 | 0.604593 |
6cedabfe1e70d580ea447c58515c5b5c1265f293 | 30 | py | Python | btd6_memory_info/generated/System/Runtime/Remoting/Messaging/LogicalCallContext/logical_call_context.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/System/Runtime/Remoting/Messaging/LogicalCallContext/logical_call_context.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/System/Runtime/Remoting/Messaging/LogicalCallContext/logical_call_context.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | class LogicalCallContext: pass | 30 | 30 | 0.9 |
23b3b73ebf4d0935f37a073f255576ac3e641354 | 716 | py | Python | celery_janitor/backends/base.py | comandrei/celery-janitor | 5f1ad663958e24ccb3abceaa227a12da495b6ecb | [
"MIT"
] | null | null | null | celery_janitor/backends/base.py | comandrei/celery-janitor | 5f1ad663958e24ccb3abceaa227a12da495b6ecb | [
"MIT"
] | null | null | null | celery_janitor/backends/base.py | comandrei/celery-janitor | 5f1ad663958e24ccb3abceaa227a12da495b6ecb | [
"MIT"
] | null | null | null | class Queue(object):
def __init__(self, queue):
self._queue = queue
self.name = None
def delete(self):
raise NotImplementedError()
class BrokerBackend(object):
def __init__(self):
self._queues = None
@property
def queues(self):
if self._queues is None:
self._queues = self._get_queues()
return self._queues
def _get_queues(self):
raise NotImplementedError()
def filter_queues(self, prefix=None):
def queue_filter(queue):
skip = False
if prefix:
skip = skip or queue.name.startswith(prefix)
return skip
return filter(queue_filter, self.queues)
| 21.69697 | 60 | 0.593575 |
a97f1ea166cde690cbf285cdd16974e29a30e3ae | 23,042 | py | Python | FC3_LightVLAD/frame_level_models.py | YichaoOU/DeepCats | 9b1909e3608ca358d6f82a7db7afb21a68cb5b76 | [
"MIT"
] | 1 | 2018-09-18T15:56:42.000Z | 2018-09-18T15:56:42.000Z | FC3_LightVLAD/frame_level_models.py | YichaoOU/DeepCats | 9b1909e3608ca358d6f82a7db7afb21a68cb5b76 | [
"MIT"
] | null | null | null | FC3_LightVLAD/frame_level_models.py | YichaoOU/DeepCats | 9b1909e3608ca358d6f82a7db7afb21a68cb5b76 | [
"MIT"
] | 1 | 2018-09-18T15:56:44.000Z | 2018-09-18T15:56:44.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of models which operate on variable-length sequences.
"""
import math
import models
import video_level_models
import tensorflow as tf
import model_utils as utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
FLAGS = flags.FLAGS
flags.DEFINE_bool("gating_remove_diag", False,
"Remove diag for self gating")
flags.DEFINE_bool("lightvlad", True,
"Light or full NetVLAD")
flags.DEFINE_bool("vlagd", False,
"vlagd of vlad")
flags.DEFINE_integer("iterations", 30,
"Number of frames per batch for DBoF.")
flags.DEFINE_bool("dbof_add_batch_norm", True,
"Adds batch normalization to the DBoF model.")
flags.DEFINE_bool(
"sample_random_frames", True,
"If true samples random frames (for frame level models). If false, a random"
"sequence of frames is sampled instead.")
flags.DEFINE_integer("dbof_cluster_size", 16384,
"Number of units in the DBoF cluster layer.")
flags.DEFINE_integer("dbof_hidden_size", 2048,
"Number of units in the DBoF hidden layer.")
flags.DEFINE_bool("dbof_relu", True, 'add ReLU to hidden layer')
flags.DEFINE_integer("dbof_var_features", 0,
"Variance features on top of Dbof cluster layer.")
flags.DEFINE_string("dbof_activation", "relu", 'dbof activation')
flags.DEFINE_bool("softdbof_maxpool", False, 'add max pool to soft dbof')
flags.DEFINE_integer("netvlad_cluster_size", 64,
"Number of units in the NetVLAD cluster layer.")
flags.DEFINE_bool("netvlad_relu", True, 'add ReLU to hidden layer')
flags.DEFINE_integer("netvlad_dimred", -1,
"NetVLAD output dimension reduction")
flags.DEFINE_integer("gatednetvlad_dimred", 1024,
"GatedNetVLAD output dimension reduction")
flags.DEFINE_integer("audio_cluster_size", 40,
"liyc added")
flags.DEFINE_bool("gating", True,
"Gating for NetVLAD")
flags.DEFINE_integer("hidden_size", 1024,
"size of hidden layer for BasicStatModel.")
flags.DEFINE_integer("hidden_size2", 1024,
"size of hidden layer for BasicStatModel.")
flags.DEFINE_integer("netvlad_hidden_size", 1024,
"Number of units in the NetVLAD hidden layer.")
flags.DEFINE_integer("netvlad_hidden_size2", 1024,
"Number of units in the NetVLAD hidden layer.")
flags.DEFINE_integer("netvlad_hidden_size3", 1024,
"Number of units in the NetVLAD hidden layer.")
flags.DEFINE_integer("netvlad_hidden_size_video", 1024,
"Number of units in the NetVLAD video hidden layer.")
flags.DEFINE_integer("netvlad_hidden_size_audio", 64,
"Number of units in the NetVLAD audio hidden layer.")
flags.DEFINE_bool("netvlad_add_batch_norm", True,
"Adds batch normalization to the DBoF model.")
flags.DEFINE_integer("fv_cluster_size", 64,
"Number of units in the NetVLAD cluster layer.")
flags.DEFINE_integer("fv_hidden_size", 2048,
"Number of units in the NetVLAD hidden layer.")
flags.DEFINE_bool("fv_relu", True,
"ReLU after the NetFV hidden layer.")
flags.DEFINE_bool("fv_couple_weights", True,
"Coupling cluster weights or not")
flags.DEFINE_float("fv_coupling_factor", 0.01,
"Coupling factor")
flags.DEFINE_string("dbof_pooling_method", "max",
"The pooling method used in the DBoF cluster layer. "
"Choices are 'average' and 'max'.")
flags.DEFINE_string("video_level_classifier_model", "MoeModel",
"Some Frame-Level models can be decomposed into a "
"generalized pooling operation followed by a "
"classifier layer")
flags.DEFINE_integer("lstm_cells", 1024, "Number of LSTM cells.")
flags.DEFINE_integer("lstm_layers", 2, "Number of LSTM layers.")
flags.DEFINE_integer("lstm_cells_video", 1024, "Number of LSTM cells (video).")
flags.DEFINE_integer("lstm_cells_audio", 128, "Number of LSTM cells (audio).")
flags.DEFINE_integer("gru_cells", 1024, "Number of GRU cells.")
flags.DEFINE_integer("gru_cells_video", 1024, "Number of GRU cells (video).")
flags.DEFINE_integer("gru_cells_audio", 128, "Number of GRU cells (audio).")
flags.DEFINE_integer("gru_layers", 2, "Number of GRU layers.")
flags.DEFINE_bool("lstm_random_sequence", False,
"Random sequence input for lstm.")
flags.DEFINE_bool("gru_random_sequence", False,
"Random sequence input for gru.")
flags.DEFINE_bool("gru_backward", False, "BW reading for GRU")
flags.DEFINE_bool("lstm_backward", False, "BW reading for LSTM")
flags.DEFINE_bool("fc_dimred", True, "Adding FC dimred after pooling")
class LightVLAD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.nn.l2_normalize(vlad,1)
vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.nn.l2_normalize(vlad,1)
return vlad
class NetVLAD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("NetVLAD_cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("NetVLAD_cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("NetVLAD_cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("NetVLAD_cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.subtract(vlad,a)
vlad = tf.nn.l2_normalize(vlad,1)
vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.nn.l2_normalize(vlad,1)
return vlad
class NetVLADModelLF(models.BaseModel):
"""Creates a NetVLAD based model.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.netvlad_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.netvlad_cluster_size
hidden1_size = hidden_size or FLAGS.netvlad_hidden_size
hidden2_size = FLAGS.netvlad_hidden_size2
hidden3_size = FLAGS.netvlad_hidden_size3
relu = FLAGS.netvlad_relu
dimred = FLAGS.netvlad_dimred
gating = FLAGS.gating
remove_diag = FLAGS.gating_remove_diag
audio_size = FLAGS.audio_cluster_size
print "FLAGS.lightvlad",FLAGS.lightvlad
lightvlad = FLAGS.lightvlad
vlagd = FLAGS.vlagd
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
print "num_frames:",num_frames
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
video_NetVLAD = LightVLAD(1024,max_frames,cluster_size, add_batch_norm, is_training)
audio_NetVLAD = LightVLAD(128,max_frames,audio_size, add_batch_norm, is_training)
# audio_NetVLAD2 = NetVLAD(128,max_frames,cluster_size/2, add_batch_norm)
if add_batch_norm:# and not lightvlad:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
with tf.variable_scope("video_VLAD"):
vlad_video = video_NetVLAD.forward(reshaped_input[:,0:1024])
with tf.variable_scope("audio_VLAD"):
vlad_audio = audio_NetVLAD.forward(reshaped_input[:,1024:])
vlad = tf.concat([vlad_video, vlad_audio],1)
vlad_dim = vlad.get_shape().as_list()[1]
## begin FC layer
hidden1_weights = tf.get_variable("hidden1_weights",
[vlad_dim, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
hidden2_weights = tf.get_variable("hidden2_weights",
[hidden1_size, hidden2_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(hidden1_size)))
hidden3_weights = tf.get_variable("hidden3_weights",
[hidden2_size, hidden3_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(hidden2_size)))
print "hidden2_size:",hidden2_size
print "hidden1_size:",hidden1_size
print "hidden3_size:",hidden3_size
print "vlad_dim:",vlad_dim
h_fc1 = tf.matmul(vlad, hidden1_weights)
h_fc1 = slim.batch_norm(
h_fc1,
center=True,
scale=True,
is_training=is_training,
scope="h_fc1_bn")
h_fc1 = tf.nn.relu6(h_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, 0.8)
h_fc2 = tf.matmul(h_fc1_drop, hidden2_weights)
h_fc2 = slim.batch_norm(
h_fc2,
center=True,
scale=True,
is_training=is_training,
scope="h_fc2_bn")
h_fc2 = tf.nn.relu6(h_fc2)
h_fc2_drop = tf.nn.dropout(h_fc2, 0.8)
h_fc3 = tf.matmul(h_fc2_drop, hidden3_weights)
h_fc3 = slim.batch_norm(
h_fc3,
center=True,
scale=True,
is_training=is_training,
scope="h_fc3_bn")
activation = tf.nn.relu6(h_fc3)
# end FC layer
if gating:
gating_weights = tf.get_variable("gating_weights_2",
[hidden3_size, hidden3_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(hidden3_size)))
gates = tf.matmul(activation, gating_weights)
if remove_diag:
#removes diagonals coefficients
diagonals = tf.matrix_diag_part(gating_weights)
gates = gates - tf.multiply(diagonals,activation)
if add_batch_norm:
gates = slim.batch_norm(
gates,
center=True,
scale=True,
is_training=is_training,
scope="gating_bn")
else:
gating_biases = tf.get_variable("gating_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
gates += gating_biases
gates = tf.sigmoid(gates)
activation = tf.multiply(activation,gates)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
is_training=is_training,
**unused_params)
class NetVLADModelLF2(models.BaseModel):
"""Creates a NetVLAD based model.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.netvlad_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.netvlad_cluster_size
hidden1_size = hidden_size or FLAGS.netvlad_hidden_size
relu = FLAGS.netvlad_relu
dimred = FLAGS.netvlad_dimred
gating = FLAGS.gating
remove_diag = FLAGS.gating_remove_diag
print "FLAGS.lightvlad",FLAGS.lightvlad
lightvlad = FLAGS.lightvlad
vlagd = FLAGS.vlagd
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
print "num_frames:",num_frames
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
if lightvlad:
video_NetVLAD = LightVLAD(1024,max_frames,cluster_size, add_batch_norm)
audio_NetVLAD = LightVLAD(128,max_frames,cluster_size/2, add_batch_norm)
if add_batch_norm:# and not lightvlad:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
scope="input_bn")
with tf.variable_scope("video_VLAD"):
vlad_video = video_NetVLAD.forward(reshaped_input[:,0:1024])
with tf.variable_scope("audio_VLAD"):
vlad_audio = audio_NetVLAD.forward(reshaped_input[:,1024:])
vlad = tf.concat([vlad_video, vlad_audio],1)
vlad_dim = vlad.get_shape().as_list()[1]
hidden1_weights = tf.get_variable("hidden1_weights",
[vlad_dim, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
activation = tf.matmul(vlad, hidden1_weights)
if add_batch_norm and relu:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
if relu:
activation = tf.nn.relu6(activation)
if gating:
gating_weights = tf.get_variable("gating_weights_2",
[hidden1_size, hidden1_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(hidden1_size)))
gates = tf.matmul(activation, gating_weights)
if remove_diag:
#removes diagonals coefficients
diagonals = tf.matrix_diag_part(gating_weights)
gates = gates - tf.multiply(diagonals,activation)
if add_batch_norm:
gates = slim.batch_norm(
gates,
center=True,
scale=True,
scope="gating_bn")
else:
gating_biases = tf.get_variable("gating_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
gates += gating_biases
gates = tf.sigmoid(gates)
activation = tf.multiply(activation,gates)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
**unused_params)
class FrameLevelLogisticModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
feature_size = model_input.get_shape().as_list()[2]
denominators = tf.reshape(
tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
avg_pooled = tf.reduce_sum(model_input,
axis=[1]) / denominators
output = slim.fully_connected(
avg_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(1e-8))
return {"predictions": output}
class LstmModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
| 35.178626 | 98 | 0.650334 |
92b064865a4701456b1a41d0cb07f6ce5f9124fe | 490 | py | Python | Mundo 1/Aula10.Ex29.py | uirasiqueira/Exercicios_Python | 409b7be9cf278e3043149654de7b41be56a3d951 | [
"MIT"
] | null | null | null | Mundo 1/Aula10.Ex29.py | uirasiqueira/Exercicios_Python | 409b7be9cf278e3043149654de7b41be56a3d951 | [
"MIT"
] | null | null | null | Mundo 1/Aula10.Ex29.py | uirasiqueira/Exercicios_Python | 409b7be9cf278e3043149654de7b41be56a3d951 | [
"MIT"
] | null | null | null | ''' Escreva um programa que leia a velocidade de um carro.
Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado.
A multa vai custar R$ 7,00 por cada Km acima do limite'''
v = float(input('Qual a velocidade atual do seu carro? '))
if v > 80:
n = (v - 80)*7
print(f'MULTADO!!!! A velocidade maxima permitida e de 80Km. \nA sua multa sera no valor de {n:.2f} euros!')
print('Tenha um bom dia e dirija com segurança.\n =====LEMBRE-SE, se beber não dirija!!=====') | 54.444444 | 112 | 0.685714 |
a1c1951566833e0e755955e299855df11c02295f | 2,602 | py | Python | src/generate_feature9.py | drivendata/countable-care-3rd-place | d1bba2f09ba0196cc3f35d2a41ea93bfbc4086a2 | [
"MIT"
] | 2 | 2020-06-26T12:00:28.000Z | 2021-09-20T19:28:24.000Z | src/generate_feature9.py | drivendata/countable-care-3rd-place | d1bba2f09ba0196cc3f35d2a41ea93bfbc4086a2 | [
"MIT"
] | null | null | null | src/generate_feature9.py | drivendata/countable-care-3rd-place | d1bba2f09ba0196cc3f35d2a41ea93bfbc4086a2 | [
"MIT"
] | 2 | 2019-05-16T17:40:03.000Z | 2021-09-20T19:28:25.000Z | #!/usr/bin/env python
from scipy import sparse
from sklearn.datasets import dump_svmlight_file
from sklearn.preprocessing import LabelEncoder
import argparse
import logging
import numpy as np
import os
import pandas as pd
from kaggler.util import encode_categorical_features, normalize_numerical_feature
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG)
def generate_feature(train_file, label_file, test_file, feature_dir,
feature_name):
# Load data files
logging.info('Loading training and test data')
trn = pd.read_csv(train_file, index_col=0)
tst = pd.read_csv(test_file, index_col=0)
label = pd.read_csv(label_file, index_col=0)
n_trn = trn.shape[0]
n_tst = tst.shape[0]
lbl_enc = LabelEncoder()
trn['release'] = lbl_enc.fit_transform(trn.release.values)
tst['release'] = lbl_enc.fit_transform(tst.release.values)
logging.info('Combining training and test data')
df = pd.concat([trn, tst], ignore_index=True)
df.fillna(-1, inplace=True)
cols = list(df.columns)
cat_cols = [x for x in cols if x[0] == 'c']
# One-Hot-Encoding for categorical variables
logging.info('One-hot-encoding categorical columns')
for col in cat_cols:
df[col] = lbl_enc.fit_transform(df[col].values)
logging.info('Saving features into {}'.format(feature_dir))
for i in range(label.shape[1]):
train_feature_file = os.path.join(feature_dir, '{}.trn{:02d}.sps'.format(feature_name, i))
test_feature_file = os.path.join(feature_dir, '{}.tst{:02d}.sps'.format(feature_name, i))
dump_svmlight_file(df.values[:n_trn], label.ix[:, i], train_feature_file,
zero_based=False)
dump_svmlight_file(df.values[n_trn:], np.zeros((n_tst,)), test_feature_file,
zero_based=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-file', required=True, dest='train')
parser.add_argument('--label-file', required=True, dest='label')
parser.add_argument('--test-file', required=True, dest='test')
parser.add_argument('--feature-dir', required=True, dest='feature_dir')
parser.add_argument('--feature-name', required=True, dest='feature_name')
args = parser.parse_args()
generate_feature(train_file=args.train,
label_file=args.label,
test_file=args.test,
feature_dir=args.feature_dir,
feature_name=args.feature_name)
| 35.643836 | 98 | 0.671022 |
94d99a82ec553099faa9ccff561d31129c0203ed | 57,746 | py | Python | src/sage/schemes/elliptic_curves/isogeny_class.py | yzpopulation/sage | d2dc2f80b5a8e039701e292653e25366e3e5ec1e | [
"BSL-1.0"
] | null | null | null | src/sage/schemes/elliptic_curves/isogeny_class.py | yzpopulation/sage | d2dc2f80b5a8e039701e292653e25366e3e5ec1e | [
"BSL-1.0"
] | null | null | null | src/sage/schemes/elliptic_curves/isogeny_class.py | yzpopulation/sage | d2dc2f80b5a8e039701e292653e25366e3e5ec1e | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Isogeny class of elliptic curves over number fields
AUTHORS:
- David Roe (2012-03-29) -- initial version.
- John Cremona (2014-08) -- extend to number fields.
"""
##############################################################################
# Copyright (C) 2012-2014 David Roe <roed.math@gmail.com>
# John Cremona <john.cremona@gmail.com>
# William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
##############################################################################
from sage.structure.sage_object import SageObject
from sage.structure.richcmp import richcmp_method, richcmp
import sage.databases.cremona
from sage.rings.all import ZZ, QQ
from sage.misc.all import flatten, cached_method
from sage.schemes.elliptic_curves.ell_field import EllipticCurve_field
from sage.schemes.elliptic_curves.ell_number_field import EllipticCurve_number_field
@richcmp_method
class IsogenyClass_EC(SageObject):
r"""
Isogeny class of an elliptic curve.
.. note::
The current implementation chooses a curve from each isomorphism
class in the isogeny class. Over `\QQ` this is a unique reduced
minimal model in each isomorphism class. Over number fields the
model chosen may change in future.
"""
def __init__(self, E, label=None, empty=False):
r"""
Over `\QQ` we use curves since minimal models exist and there
is a canonical choice of one.
INPUT:
- ``label`` -- string or ``None``, a Cremona or LMFDB label, used
in printing. Ignored if base field is not `\QQ`.
EXAMPLES::
sage: cls = EllipticCurve('1011b1').isogeny_class()
sage: print("\n".join(repr(E) for E in cls.curves))
Elliptic Curve defined by y^2 + x*y = x^3 - 8*x - 9 over Rational Field
Elliptic Curve defined by y^2 + x*y = x^3 - 23*x + 30 over Rational Field
"""
self.E = E
self._label = label
if not empty:
self._compute()
def __len__(self):
"""
The number of curves in the class.
EXAMPLES::
sage: E = EllipticCurve('15a')
sage: len(E.isogeny_class()) # indirect doctest
8
"""
return len(self.curves)
def __iter__(self):
"""
Iterator over curves in the class.
EXAMPLES::
sage: E = EllipticCurve('15a')
sage: all(C.conductor() == 15 for C in E.isogeny_class()) # indirect doctest
True
"""
return iter(self.curves)
def __getitem__(self, i):
"""
Return the `i`th curve in the class.
EXAMPLES::
sage: E = EllipticCurve('990j1')
sage: iso = E.isogeny_class(order="lmfdb") # orders lexicographically on a-invariants
sage: iso[2] == E # indirect doctest
True
"""
return self.curves[i]
def index(self, C):
"""
Return the index of a curve in this class.
INPUT:
- ``C`` -- an elliptic curve in this isogeny class.
OUTPUT:
- ``i`` -- an integer so that the ``i`` th curve in the class
is isomorphic to ``C``
EXAMPLES::
sage: E = EllipticCurve('990j1')
sage: iso = E.isogeny_class(order="lmfdb") # orders lexicographically on a-invariants
sage: iso.index(E.short_weierstrass_model())
2
"""
# This will need updating once we start talking about curves
# over more general number fields
if not isinstance(C, EllipticCurve_number_field):
raise ValueError("x not in isogeny class")
for i, E in enumerate(self.curves):
if C.is_isomorphic(E):
return i
raise ValueError("%s is not in isogeny class %s" % (C,self))
def __richcmp__(self, other, op):
"""
Compare ``self`` and ``other``.
If they are different, compares the sorted underlying lists of
curves.
Note that two isogeny classes with different orderings will
compare as the same. If you want to include the ordering,
just compare the list of curves.
EXAMPLES::
sage: E = EllipticCurve('990j1')
sage: EE = EllipticCurve('990j4')
sage: E.isogeny_class() == EE.isogeny_class() # indirect doctest
True
"""
if isinstance(other, IsogenyClass_EC):
return richcmp(sorted(e.a_invariants() for e in self.curves),
sorted(f.a_invariants() for f in other.curves), op)
return NotImplemented
def __hash__(self):
"""
Hash is based on the a-invariants of the sorted list of
minimal models.
EXAMPLES::
sage: E = EllipticCurve('990j1')
sage: C = E.isogeny_class()
sage: hash(C) == hash(tuple(sorted([curve.a_invariants() for curve in C.curves]))) # indirect doctest
True
"""
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(sorted(E.a_invariants() for E in self.curves)))
return self._hash
def _repr_(self):
r"""
The string representation of this isogeny class.
.. note::
Over `\QQ`, the string representation depends on whether an
LMFDB or Cremona label for the curve is known when this
isogeny class is constructed. Over general number fields,
instead of labels the representation uses that of the curve
initially used to create the class.
EXAMPLES:
If the curve is constructed from an LMFDB label then that
label is used::
sage: E = EllipticCurve('462.f3')
sage: E.isogeny_class() # indirect doctest
Elliptic curve isogeny class 462.f
If the curve is constructed from a Cremona label then that
label is used::
sage: E = EllipticCurve('990j1')
sage: E.isogeny_class()
Elliptic curve isogeny class 990j
Otherwise, including curves whose base field is not `\QQ`,the
representation is determined from the curve used to create the
class::
sage: E = EllipticCurve([1,2,3,4,5])
sage: E.isogeny_class()
Isogeny class of Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 4*x + 5 over Rational Field
sage: K.<i> = QuadraticField(-1)
sage: E = EllipticCurve(K, [0,0,0,0,1]); E
Elliptic Curve defined by y^2 = x^3 + 1 over Number Field in i with defining polynomial x^2 + 1 with i = 1*I
sage: C = E.isogeny_class()
sage: C
Isogeny class of Elliptic Curve defined by y^2 = x^3 + 1 over Number Field in i with defining polynomial x^2 + 1 with i = 1*I
sage: C.curves
[Elliptic Curve defined by y^2 = x^3 + (-27) over Number Field in i with defining polynomial x^2 + 1 with i = 1*I,
Elliptic Curve defined by y^2 = x^3 + 1 over Number Field in i with defining polynomial x^2 + 1 with i = 1*I,
Elliptic Curve defined by y^2 + (i+1)*x*y + (i+1)*y = x^3 + i*x^2 + (-i+3)*x + 4*i over Number Field in i with defining polynomial x^2 + 1 with i = 1*I,
Elliptic Curve defined by y^2 + (i+1)*x*y + (i+1)*y = x^3 + i*x^2 + (-i+33)*x + (-58*i) over Number Field in i with defining polynomial x^2 + 1 with i = 1*I]
"""
if self._label:
return "Elliptic curve isogeny class %s"%(self._label)
else:
return "Isogeny class of %r"%(self.E)
def __contains__(self, x):
"""
INPUT:
- ``x`` -- a Python object.
OUTPUT:
- boolean -- ``True`` iff ``x`` is an elliptic curve in this
isogeny class.
.. note::
If the input is isomorphic but not identical to a curve in
the class, then ``False`` will be returned.
EXAMPLES::
sage: cls = EllipticCurve('15a3').isogeny_class()
sage: E = EllipticCurve('15a7'); E in cls
True
sage: E.short_weierstrass_model() in cls
True
"""
if not isinstance(x, EllipticCurve_field):
return False
return any(x.is_isomorphic(y) for y in self.curves)
@cached_method
def matrix(self, fill=True):
"""
Return the matrix whose entries give the minimal degrees of
isogenies between curves in this class.
INPUT:
- ``fill`` -- boolean (default ``True``). If ``False`` then the
matrix will contain only zeros and prime entries; if ``True`` it
will fill in the other degrees.
EXAMPLES::
sage: isocls = EllipticCurve('15a3').isogeny_class()
sage: isocls.matrix()
[ 1 2 2 2 4 4 8 8]
[ 2 1 4 4 8 8 16 16]
[ 2 4 1 4 8 8 16 16]
[ 2 4 4 1 2 2 4 4]
[ 4 8 8 2 1 4 8 8]
[ 4 8 8 2 4 1 2 2]
[ 8 16 16 4 8 2 1 4]
[ 8 16 16 4 8 2 4 1]
sage: isocls.matrix(fill=False)
[0 2 2 2 0 0 0 0]
[2 0 0 0 0 0 0 0]
[2 0 0 0 0 0 0 0]
[2 0 0 0 2 2 0 0]
[0 0 0 2 0 0 0 0]
[0 0 0 2 0 0 2 2]
[0 0 0 0 0 2 0 0]
[0 0 0 0 0 2 0 0]
"""
if self._mat is None:
self._compute_matrix()
mat = self._mat
if fill and mat[0, 0] == 0:
from sage.schemes.elliptic_curves.ell_curve_isogeny import fill_isogeny_matrix
mat = fill_isogeny_matrix(mat)
if not fill and mat[0, 0] == 1:
from sage.schemes.elliptic_curves.ell_curve_isogeny import unfill_isogeny_matrix
mat = unfill_isogeny_matrix(mat)
return mat
@cached_method
def qf_matrix(self):
"""
Return the array whose entries are quadratic forms
representing the degrees of isogenies between curves in this
class (CM case only).
OUTPUT:
a `2x2` array (list of lists) of list, each of the form [2] or
[2,1,3] representing the coefficients of an integral quadratic
form in 1 or 2 variables whose values are the possible isogeny
degrees between the i'th and j'th curve in the class.
EXAMPLES::
sage: pol = PolynomialRing(QQ,'x')([1,0,3,0,1])
sage: K.<c> = NumberField(pol)
sage: j = 1480640+565760*c^2
sage: E = EllipticCurve(j=j)
sage: C = E.isogeny_class()
sage: C.qf_matrix()
[[[1], [2, 2, 3]], [[2, 2, 3], [1]]]
"""
if self._qfmat is None:
raise ValueError("qf_matrix only defined for isogeny classes with rational CM")
else:
return self._qfmat
@cached_method
def isogenies(self, fill=False):
r"""
Return a list of lists of isogenies and 0s, corresponding to
the entries of :meth:`matrix`
INPUT:
- ``fill`` -- boolean (default ``False``). Whether to only return
prime degree isogenies. Currently only implemented for
``fill=False``.
OUTPUT:
- a list of lists, where the ``j`` th entry of the ``i`` th list
is either zero or a prime degree isogeny from the ``i`` th curve
in this class to the ``j`` th curve.
.. WARNING::
The domains and codomains of the isogenies will have the same
Weierstrass equation as the curves in this class, but they
may not be identical python objects in the current
implementation.
EXAMPLES::
sage: isocls = EllipticCurve('15a3').isogeny_class()
sage: f = isocls.isogenies()[0][1]; f
Isogeny of degree 2 from Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 5*x + 2 over Rational Field to Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 80*x + 242 over Rational Field
sage: f.domain() == isocls.curves[0] and f.codomain() == isocls.curves[1]
True
"""
if fill:
raise NotImplementedError
isogenies = self._maps
if isogenies is None:
self._compute_isogenies()
isogenies = self._maps
return isogenies
@cached_method
def graph(self):
r"""
Return a graph whose vertices correspond to curves in this
class, and whose edges correspond to prime degree isogenies.
.. note::
There are only finitely many possible isogeny graphs for
curves over `\QQ` [Maz1978b]. This function tries to lay out
the graph nicely by special casing each isogeny graph.
This could also be done over other number fields, such as
quadratic fields.
.. note::
The vertices are labeled 1 to n rather than 0 to n-1 to
match LMFDB and Cremona labels for curves over `\QQ`.
EXAMPLES::
sage: isocls = EllipticCurve('15a3').isogeny_class()
sage: G = isocls.graph()
sage: sorted(G._pos.items())
[(1, [-0.8660254, 0.5]), (2, [-0.8660254, 1.5]), (3, [-1.7320508, 0]), (4, [0, 0]), (5, [0, -1]), (6, [0.8660254, 0.5]), (7, [0.8660254, 1.5]), (8, [1.7320508, 0])]
"""
from sage.graphs.graph import Graph
if not self.E.base_field() is QQ:
M = self.matrix(fill = False)
n = len(self)
G = Graph(M, format='weighted_adjacency_matrix')
D = dict([(v,self.curves[v]) for v in G.vertices()])
G.set_vertices(D)
if self._qfmat: # i.e. self.E.has_rational_cm():
for i in range(n):
for j in range(n):
if M[i,j]:
G.set_edge_label(i,j,str(self._qfmat[i][j]))
G.relabel(list(range(1, n + 1)))
return G
M = self.matrix(fill = False)
n = M.nrows() # = M.ncols()
G = Graph(M, format='weighted_adjacency_matrix')
N = self.matrix(fill = True)
D = dict([(v,self.curves[v]) for v in G.vertices()])
# The maximum degree classifies the shape of the isogeny
# graph, though the number of vertices is often enough.
# This only holds over Q, so this code will need to change
# once other isogeny classes are implemented.
if n == 1:
# one vertex
pass
elif n == 2:
# one edge, two vertices. We align horizontally and put
# the lower number on the left vertex.
G.set_pos(pos={0:[-0.5,0],1:[0.5,0]})
else:
maxdegree = max(max(N))
if n == 3:
# o--o--o
centervert = [i for i in range(3) if max(N.row(i)) < maxdegree][0]
other = [i for i in range(3) if i != centervert]
G.set_pos(pos={centervert:[0,0],other[0]:[-1,0],other[1]:[1,0]})
elif maxdegree == 4:
# o--o<8
centervert = [i for i in range(4) if max(N.row(i)) < maxdegree][0]
other = [i for i in range(4) if i != centervert]
G.set_pos(pos={centervert:[0,0],other[0]:[0,1],other[1]:[-0.8660254,-0.5],other[2]:[0.8660254,-0.5]})
elif maxdegree == 27:
# o--o--o--o
centers = [i for i in range(4) if list(N.row(i)).count(3) == 2]
left = [j for j in range(4) if N[centers[0],j] == 3 and j not in centers][0]
right = [j for j in range(4) if N[centers[1],j] == 3 and j not in centers][0]
G.set_pos(pos={left:[-1.5,0],centers[0]:[-0.5,0],centers[1]:[0.5,0],right:[1.5,0]})
elif n == 4:
# square
opp = [i for i in range(1,4) if not N[0,i].is_prime()][0]
other = [i for i in range(1,4) if i != opp]
G.set_pos(pos={0:[1,1],other[0]:[-1,1],opp:[-1,-1],other[1]:[1,-1]})
elif maxdegree == 8:
# 8>o--o<8
centers = [i for i in range(6) if list(N.row(i)).count(2) == 3]
left = [j for j in range(6) if N[centers[0],j] == 2 and j not in centers]
right = [j for j in range(6) if N[centers[1],j] == 2 and j not in centers]
G.set_pos(pos={centers[0]:[-0.5,0],left[0]:[-1,0.8660254],left[1]:[-1,-0.8660254],centers[1]:[0.5,0],right[0]:[1,0.8660254],right[1]:[1,-0.8660254]})
elif maxdegree == 18:
# two squares joined on an edge
centers = [i for i in range(6) if list(N.row(i)).count(3) == 2]
top = [j for j in range(6) if N[centers[0],j] == 3]
bl = [j for j in range(6) if N[top[0],j] == 2][0]
br = [j for j in range(6) if N[top[1],j] == 2][0]
G.set_pos(pos={centers[0]:[0,0.5],centers[1]:[0,-0.5],top[0]:[-1,0.5],top[1]:[1,0.5],bl:[-1,-0.5],br:[1,-0.5]})
elif maxdegree == 16:
# tree from bottom, 3 regular except for the leaves.
centers = [i for i in range(8) if list(N.row(i)).count(2) == 3]
center = [i for i in centers if len([j for j in centers if N[i,j] == 2]) == 2][0]
centers.remove(center)
bottom = [j for j in range(8) if N[center,j] == 2 and j not in centers][0]
left = [j for j in range(8) if N[centers[0],j] == 2 and j != center]
right = [j for j in range(8) if N[centers[1],j] == 2 and j != center]
G.set_pos(pos={center:[0,0],bottom:[0,-1],centers[0]:[-0.8660254,0.5],centers[1]:[0.8660254,0.5],left[0]:[-0.8660254,1.5],right[0]:[0.8660254,1.5],left[1]:[-1.7320508,0],right[1]:[1.7320508,0]})
elif maxdegree == 12:
# tent
centers = [i for i in range(8) if list(N.row(i)).count(2) == 3]
left = [j for j in range(8) if N[centers[0],j] == 2]
right = []
for i in range(3):
right.append([j for j in range(8) if N[centers[1],j] == 2 and N[left[i],j] == 3][0])
G.set_pos(pos={centers[0]:[-0.75,0],centers[1]:[0.75,0],left[0]:[-0.75,1],right[0]:[0.75,1],left[1]:[-1.25,-0.75],right[1]:[0.25,-0.75],left[2]:[-0.25,-0.25],right[2]:[1.25,-0.25]})
G.set_vertices(D)
G.relabel(list(range(1, n + 1)))
return G
@cached_method
def reorder(self, order):
r"""
Return a new isogeny class with the curves reordered.
INPUT:
- ``order`` -- None, a string or an iterable over all curves
in this class. See
:meth:`sage.schemes.elliptic_curves.ell_rational_field.EllipticCurve_rational_field.isogeny_class`
for more details.
OUTPUT:
- Another :class:`IsogenyClass_EC` with the curves reordered
(and matrices and maps changed as appropriate)
EXAMPLES::
sage: isocls = EllipticCurve('15a1').isogeny_class()
sage: print("\n".join(repr(C) for C in isocls.curves))
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 10*x - 10 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 5*x + 2 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 + 35*x - 28 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 135*x - 660 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 80*x + 242 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 110*x - 880 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 2160*x - 39540 over Rational Field
sage: isocls2 = isocls.reorder('lmfdb')
sage: print("\n".join(repr(C) for C in isocls2.curves))
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 2160*x - 39540 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 135*x - 660 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 110*x - 880 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 80*x + 242 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 10*x - 10 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 5*x + 2 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 over Rational Field
Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 + 35*x - 28 over Rational Field
"""
if order is None or isinstance(order, str) and order == self._algorithm:
return self
if isinstance(order, str):
if order == "lmfdb":
reordered_curves = sorted(self.curves, key = lambda E: E.a_invariants())
else:
reordered_curves = list(self.E.isogeny_class(algorithm=order))
elif isinstance(order, (list, tuple, IsogenyClass_EC)):
reordered_curves = list(order)
if len(reordered_curves) != len(self.curves):
raise ValueError("Incorrect length")
else:
raise TypeError("order parameter should be a string, list of curves or isogeny class")
need_perm = self._mat is not None
cpy = self.copy()
curves = []
perm = []
for E in reordered_curves:
try:
j = self.curves.index(E)
except ValueError:
try:
j = self.curves.index(E.minimal_model())
except ValueError:
raise ValueError("order does not yield a permutation of curves")
curves.append(self.curves[j])
if need_perm:
perm.append(j+1)
cpy.curves = tuple(curves)
if need_perm:
from sage.groups.perm_gps.permgroup_named import SymmetricGroup
perm = SymmetricGroup(len(self.curves))(perm)
cpy._mat = perm.matrix() * self._mat * (~perm).matrix()
if self._maps is not None:
n = len(self._maps)
cpy._maps = [self._maps[perm(i+1)-1] for i in range(n)]
for i in range(n):
cpy._maps[i] = [cpy._maps[i][perm(jj + 1)-1]
for jj in range(n)]
else:
cpy._mat = None
cpy._maps = None
return cpy
class IsogenyClass_EC_NumberField(IsogenyClass_EC):
"""
Isogeny classes for elliptic curves over number fields.
"""
def __init__(self, E, reducible_primes=None, algorithm='Billerey', minimal_models=True):
r"""
INPUT:
- ``E`` -- an elliptic curve over a number field.
- ``reducible_primes`` (list of ints, or ``None`` (default)) -- if
not ``None`` then this should be a list of primes; in computing
the isogeny class, only composites isogenies of these
degrees will be used.
- ``algorithm`` (string, default 'Billerey') -- the algorithm
to use to compute the reducible primes. Ignored for CM
curves or if ``reducible_primes`` is provided. Values are
'Billerey' (default), 'Larson', and 'heuristic'.
- ``minimal_models`` (bool, default ``True``) -- if ``True``,
all curves in the class will be minimal or semi-minimal
models. Over fields of larger degree it can be expensive to
compute these so set to ``False``.
EXAMPLES::
sage: K.<i> = QuadraticField(-1)
sage: E = EllipticCurve(K, [0,0,0,0,1])
sage: C = E.isogeny_class(); C
Isogeny class of Elliptic Curve defined by y^2 = x^3 + 1 over Number Field in i with defining polynomial x^2 + 1 with i = 1*I
The curves in the class (sorted)::
sage: [E1.ainvs() for E1 in C]
[(0, 0, 0, 0, -27),
(0, 0, 0, 0, 1),
(i + 1, i, i + 1, -i + 3, 4*i),
(i + 1, i, i + 1, -i + 33, -58*i)]
The matrix of degrees of cyclic isogenies between curves::
sage: C.matrix()
[1 3 6 2]
[3 1 2 6]
[6 2 1 3]
[2 6 3 1]
The array of isogenies themselves is not filled out but only
contains those used to construct the class, the other entries
containing the integer 0. This will be changed when the
class :class:`EllipticCurveIsogeny` allowed composition. In
this case we used `2`-isogenies to go from 0 to 2 and from 1
to 3, and `3`-isogenies to go from 0 to 1 and from 2 to 3::
sage: isogs = C.isogenies()
sage: [((i,j),isogs[i][j].degree()) for i in range(4) for j in range(4) if isogs[i][j]!=0]
[((0, 1), 3),
((0, 3), 2),
((1, 0), 3),
((1, 2), 2),
((2, 1), 2),
((2, 3), 3),
((3, 0), 2),
((3, 2), 3)]
sage: [((i,j),isogs[i][j].x_rational_map()) for i in range(4) for j in range(4) if isogs[i][j]!=0]
[((0, 1), (1/9*x^3 - 12)/x^2),
((0, 3), (-1/2*i*x^2 + i*x - 12*i)/(x - 3)),
((1, 0), (x^3 + 4)/x^2),
((1, 2), (-1/2*i*x^2 - i*x - 2*i)/(x + 1)),
((2, 1), (1/2*i*x^2 - x)/(x + 3/2*i)),
((2, 3), (x^3 + 4*i*x^2 - 10*x - 10*i)/(x^2 + 4*i*x - 4)),
((3, 0), (1/2*i*x^2 + x + 4*i)/(x - 5/2*i)),
((3, 2), (1/9*x^3 - 4/3*i*x^2 - 34/3*x + 226/9*i)/(x^2 - 8*i*x - 16))]
sage: K.<i> = QuadraticField(-1)
sage: E = EllipticCurve([1+i, -i, i, 1, 0])
sage: C = E.isogeny_class(); C
Isogeny class of Elliptic Curve defined by y^2 + (i+1)*x*y + i*y = x^3 + (-i)*x^2 + x over Number Field in i with defining polynomial x^2 + 1 with i = 1*I
sage: len(C)
6
sage: C.matrix()
[ 1 3 9 18 6 2]
[ 3 1 3 6 2 6]
[ 9 3 1 2 6 18]
[18 6 2 1 3 9]
[ 6 2 6 3 1 3]
[ 2 6 18 9 3 1]
sage: [E1.ainvs() for E1 in C]
[(i + 1, i - 1, i, -i - 1, -i + 1),
(i + 1, i - 1, i, 14*i + 4, 7*i + 14),
(i + 1, i - 1, i, 59*i + 99, 372*i - 410),
(i + 1, -i, i, -240*i - 399, 2869*i + 2627),
(i + 1, -i, i, -5*i - 4, 2*i + 5),
(i + 1, -i, i, 1, 0)]
An example with CM by `\sqrt{-5}`::
sage: pol = PolynomialRing(QQ,'x')([1,0,3,0,1])
sage: K.<c> = NumberField(pol)
sage: j = 1480640+565760*c^2
sage: E = EllipticCurve(j=j)
sage: E.has_cm()
True
sage: E.has_rational_cm()
True
sage: E.cm_discriminant()
-20
sage: C = E.isogeny_class()
sage: len(C)
2
sage: C.matrix()
[1 2]
[2 1]
sage: [E.ainvs() for E in C]
[(0, 0, 0, 83490*c^2 - 147015, -64739840*c^2 - 84465260),
(0, 0, 0, -161535*c^2 + 70785, -62264180*c^3 + 6229080*c)]
sage: C.isogenies()[0][1]
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + (83490*c^2-147015)*x + (-64739840*c^2-84465260) over Number Field in c with defining polynomial x^4 + 3*x^2 + 1 to Elliptic Curve defined by y^2 = x^3 + (-161535*c^2+70785)*x + (-62264180*c^3+6229080*c) over Number Field in c with defining polynomial x^4 + 3*x^2 + 1
TESTS::
sage: TestSuite(C).run()
"""
self._algorithm = "sage"
self._reducible_primes = reducible_primes
self._algorithm = algorithm
self._minimal_models = minimal_models
IsogenyClass_EC.__init__(self, E, label=None, empty=False)
def copy(self):
"""
Return a copy (mostly used in reordering).
EXAMPLES::
sage: K.<i> = QuadraticField(-1)
sage: E = EllipticCurve(K, [0,0,0,0,1])
sage: C = E.isogeny_class()
sage: C2 = C.copy()
sage: C is C2
False
sage: C == C2
True
"""
ans = IsogenyClass_EC_NumberField(self.E, reducible_primes=self._reducible_primes, algorithm=self._algorithm, minimal_models=self._minimal_models)
# The following isn't needed internally, but it will keep
# things from breaking if this is used for something other
# than reordering.
ans.curves = self.curves
ans._mat = None
ans._maps = None
return ans
def _compute(self, verbose=False):
"""
Compute the list of curves, the matrix and prime-degree
isogenies.
EXAMPLES::
sage: K.<i> = QuadraticField(-1)
sage: E = EllipticCurve(K, [0,0,0,0,1])
sage: C = E.isogeny_class()
sage: C2 = C.copy()
sage: C2._mat
sage: C2._compute()
sage: C2._mat
[1 3 6 2]
[3 1 2 6]
[6 2 1 3]
[2 6 3 1]
sage: C2._compute(verbose=True)
possible isogeny degrees: [2, 3] -actual isogeny degrees: {2, 3} -added curve #1 (degree 2)... -added tuple [0, 1, 2]... -added tuple [1, 0, 2]... -added curve #2 (degree 3)... -added tuple [0, 2, 3]... -added tuple [2, 0, 3]...... relevant degrees: [2, 3]... -now completing the isogeny class... -processing curve #1... -added tuple [1, 0, 2]... -added tuple [0, 1, 2]... -added curve #3... -added tuple [1, 3, 3]... -added tuple [3, 1, 3]... -processing curve #2... -added tuple [2, 3, 2]... -added tuple [3, 2, 2]... -added tuple [2, 0, 3]... -added tuple [0, 2, 3]... -processing curve #3... -added tuple [3, 2, 2]... -added tuple [2, 3, 2]... -added tuple [3, 1, 3]... -added tuple [1, 3, 3]...... isogeny class has size 4
Sorting permutation = {0: 1, 1: 2, 2: 0, 3: 3}
Matrix = [1 3 6 2]
[3 1 2 6]
[6 2 1 3]
[2 6 3 1]
TESTS:
Check that :trac:`19030` is fixed (codomains of reverse isogenies were wrong)::
sage: K.<i> = NumberField(x^2+1)
sage: E = EllipticCurve([1, i + 1, 1, -72*i + 8, 95*i + 146])
sage: C = E.isogeny_class()
sage: curves = C.curves
sage: isos = C.isogenies()
sage: isos[0][3].codomain() == curves[3]
True
"""
from sage.schemes.elliptic_curves.ell_curve_isogeny import fill_isogeny_matrix
from sage.matrix.all import MatrixSpace
from sage.sets.set import Set
self._maps = None
if self._minimal_models:
E = self.E.global_minimal_model(semi_global=True)
else:
E = self.E
degs = self._reducible_primes
if degs is None:
self._reducible_primes = possible_isogeny_degrees(E, algorithm=self._algorithm)
degs = self._reducible_primes
if verbose:
import sys
sys.stdout.write(" possible isogeny degrees: %s" % degs)
sys.stdout.flush()
isogenies = E.isogenies_prime_degree(degs, minimal_models=self._minimal_models)
if verbose:
sys.stdout.write(" -actual isogeny degrees: %s" % Set(phi.degree() for phi in isogenies))
sys.stdout.flush()
# Add all new codomains to the list and collect degrees:
curves = [E]
ncurves = 1
degs = []
# tuples (i,j,l,phi) where curve i is l-isogenous to curve j via phi
tuples = []
def add_tup(t):
for T in [t, [t[1], t[0], t[2], 0]]:
if not T in tuples:
tuples.append(T)
if verbose:
sys.stdout.write(" -added tuple %s..." % T[:3])
sys.stdout.flush()
for phi in isogenies:
E2 = phi.codomain()
d = ZZ(phi.degree())
if not any(E2.is_isomorphic(E3) for E3 in curves):
curves.append(E2)
if verbose:
sys.stdout.write(" -added curve #%s (degree %s)..." % (ncurves,d))
sys.stdout.flush()
add_tup([0,ncurves,d,phi])
ncurves += 1
if not d in degs:
degs.append(d)
if verbose:
sys.stdout.write("... relevant degrees: %s..." % degs)
sys.stdout.write(" -now completing the isogeny class...")
sys.stdout.flush()
i = 1
while i < ncurves:
E1 = curves[i]
if verbose:
sys.stdout.write(" -processing curve #%s..." % i)
sys.stdout.flush()
isogenies = E1.isogenies_prime_degree(degs, minimal_models=self._minimal_models)
for phi in isogenies:
E2 = phi.codomain()
d = phi.degree()
js = [j for j,E3 in enumerate(curves) if E2.is_isomorphic(E3)]
if js: # seen codomain already -- up to isomorphism
j = js[0]
if phi.codomain()!=curves[j]:
phi = E2.isomorphism_to(curves[j]) * phi
assert phi.domain()==curves[i] and phi.codomain()==curves[j]
add_tup([i,j,d,phi])
else:
curves.append(E2)
if verbose:
sys.stdout.write(" -added curve #%s..." % ncurves)
sys.stdout.flush()
add_tup([i,ncurves,d,phi])
ncurves += 1
i += 1
if verbose:
print("... isogeny class has size %s" % ncurves)
# key function for sorting
if E.has_rational_cm():
key_function = lambda E: (-E.cm_discriminant(),
flatten([list(ai) for ai in E.ainvs()]))
else:
key_function = lambda E: flatten([list(ai) for ai in E.ainvs()])
self.curves = sorted(curves,key=key_function)
perm = dict([(ind, self.curves.index(Ei))
for ind, Ei in enumerate(curves)])
if verbose:
print("Sorting permutation = %s" % perm)
mat = MatrixSpace(ZZ, ncurves)(0)
self._maps = [[0] * ncurves for _ in range(ncurves)]
for i,j,l,phi in tuples:
if phi!=0:
mat[perm[i],perm[j]] = l
self._maps[perm[i]][perm[j]] = phi
self._mat = fill_isogeny_matrix(mat)
if verbose:
print("Matrix = %s" % self._mat)
if not E.has_rational_cm():
self._qfmat = None
return
# In the CM case, we will have found some "horizontal"
# isogenies of composite degree and would like to replace them
# by isogenies of prime degree, mainly to make the isogeny
# graph look better. We also construct a matrix whose entries
# are not degrees of cyclic isogenies, but rather quadratic
# forms (in 1 or 2 variables) representing the isogeny
# degrees. For this we take a short cut: properly speaking,
# when `\text{End}(E_1)=\text{End}(E_2)=O`, the set
# `\text{Hom}(E_1,E_2)` is a rank `1` projective `O`-module,
# hence has a well-defined ideal class associated to it, and
# hence (using an identification between the ideal class group
# and the group of classes of primitive quadratic forms of the
# same discriminant) an equivalence class of quadratic forms.
# But we currently only care about the numbers represented by
# the form, i.e. which genus it is in rather than the exact
# class. So it suffices to find one form of the correct
# discriminant which represents one isogeny degree from `E_1`
# to `E_2` in order to obtain a form which represents all such
# degrees.
if verbose:
print("Creating degree matrix (CM case)")
allQs = {} # keys: discriminants d
# values: lists of equivalence classes of
# primitive forms of discriminant d
def find_quadratic_form(d,n):
if not d in allQs:
from sage.quadratic_forms.binary_qf import BinaryQF_reduced_representatives
allQs[d] = BinaryQF_reduced_representatives(d, primitive_only=True)
# now test which of the Qs represents n
for Q in allQs[d]:
if Q.solve_integer(n):
return Q
raise ValueError("No form of discriminant %d represents %s" %(d,n))
mat = self._mat
qfmat = [[0 for i in range(ncurves)] for j in range(ncurves)]
for i, E1 in enumerate(self.curves):
for j, E2 in enumerate(self.curves):
if j<i:
qfmat[i][j] = qfmat[j][i]
mat[i,j] = mat[j,i]
elif i==j:
qfmat[i][j] = [1]
# mat[i,j] already 1
else:
d = E1.cm_discriminant()
if d != E2.cm_discriminant():
qfmat[i][j] = [mat[i,j]]
# mat[i,j] already unique
else: # horizontal isogeny
q = find_quadratic_form(d,mat[i,j])
qfmat[i][j] = list(q)
mat[i,j] = q.small_prime_value()
self._mat = mat
self._qfmat = qfmat
if verbose:
print("new matrix = %s" % mat)
print("matrix of forms = %s" % qfmat)
def _compute_matrix(self):
"""
Compute the matrix, assuming that the list of curves is computed.
EXAMPLES::
sage: isocls = EllipticCurve('1225h1').isogeny_class('database')
sage: isocls._mat
sage: isocls._compute_matrix(); isocls._mat
[ 0 37]
[37 0]
"""
self._mat = self.E.isogeny_class(order=self.curves)._mat
def _compute_isogenies(self):
"""
EXAMPLES::
sage: E = EllipticCurve('15a1')
sage: isocls = E.isogeny_class()
sage: maps = isocls.isogenies() # indirect doctest
sage: f = maps[0][1]
sage: f.domain() == isocls[0] and f.codomain() == isocls[1]
True
"""
recomputed = self.E.isogeny_class(order=self.curves)
self._mat = recomputed._mat
# The domains and codomains here will be equal, but not the same Python object.
self._maps = recomputed._maps
class IsogenyClass_EC_Rational(IsogenyClass_EC_NumberField):
r"""
Isogeny classes for elliptic curves over `\QQ`.
"""
def __init__(self, E, algorithm="sage", label=None, empty=False):
r"""
INPUT:
- ``E`` -- an elliptic curve over `\QQ`.
- ``algorithm`` -- a string (default "sage"). One of the
following:
- "sage" -- Use sage's implementation to compute the curves,
matrix and isogenies
- "database" -- Use the Cremona database (only works if the
curve is in the database)
- ``label`` -- a string, the label of this isogeny class
(e.g. '15a' or '37.b'). Used in printing.
- ``empty`` -- don't compute the curves right now (used when reordering)
EXAMPLES::
sage: isocls = EllipticCurve('389a1').isogeny_class(); isocls
Elliptic curve isogeny class 389a
sage: E = EllipticCurve([0, 0, 0, 0, 1001]) # conductor 108216108
sage: E.isogeny_class(order='database')
Traceback (most recent call last):
...
LookupError: Cremona database does not contain entry for Elliptic Curve defined by y^2 = x^3 + 1001 over Rational Field
sage: TestSuite(isocls).run()
"""
self._algorithm = algorithm
IsogenyClass_EC.__init__(self, E, label=label, empty=empty)
def copy(self):
"""
Return a copy (mostly used in reordering).
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: C = E.isogeny_class()
sage: C2 = C.copy()
sage: C is C2
False
sage: C == C2
True
"""
ans = IsogenyClass_EC_Rational(self.E, self._algorithm, self._label, empty=True)
# The following isn't needed internally, but it will keep
# things from breaking if this is used for something other
# than reordering.
ans.curves = self.curves
ans._mat = None
ans._maps = None
return ans
def _compute(self):
"""
Compute the list of curves, and possibly the matrix and
prime-degree isogenies (depending on the algorithm selected).
EXAMPLES::
sage: isocls = EllipticCurve('48a1').isogeny_class('sage').copy()
sage: isocls._mat
sage: isocls._compute(); isocls._mat
[0 2 2 2 0 0]
[2 0 0 0 2 2]
[2 0 0 0 0 0]
[2 0 0 0 0 0]
[0 2 0 0 0 0]
[0 2 0 0 0 0]
"""
algorithm = self._algorithm
from sage.matrix.all import MatrixSpace
self._maps = None
if algorithm == "database":
try:
label = self.E.cremona_label(space=False)
except RuntimeError:
raise RuntimeError("unable to find %s in the database" % self.E)
db = sage.databases.cremona.CremonaDatabase()
curves = db.isogeny_class(label)
if not curves:
raise RuntimeError("unable to find %s in the database" % self.E)
# All curves will have the same conductor and isogeny class,
# and there are most 8 of them, so lexicographic sorting is okay.
self.curves = tuple(sorted(curves, key = lambda E: E.cremona_label()))
self._mat = None
elif algorithm == "sage":
curves = [self.E.minimal_model()]
ijl_triples = []
l_list = None
i = 0
while i<len(curves):
E = curves[i]
isogs = E.isogenies_prime_degree(l_list)
for phi in isogs:
Edash = phi.codomain()
l = phi.degree()
# look to see if Edash is new. Note that the
# curves returned by isogenies_prime_degree() are
# standard minimal models, so it suffices to check
# equality rather than isomorphism here.
try:
j = curves.index(Edash)
except ValueError:
j = len(curves)
curves.append(Edash)
ijl_triples.append((i,j,l,phi))
if l_list is None:
l_list = [d for d in set([ZZ(f.degree()) for f in isogs])]
i += 1
self.curves = tuple(curves)
ncurves = len(curves)
self._mat = MatrixSpace(ZZ,ncurves)(0)
self._maps = [[0]*ncurves for _ in range(ncurves)]
for i,j,l,phi in ijl_triples:
self._mat[i,j] = l
self._maps[i][j]=phi
else:
raise ValueError("unknown algorithm '%s'" % algorithm)
def isogeny_degrees_cm(E, verbose=False):
r"""
Return a list of primes `\ell` sufficient to generate the
isogeny class of `E`, where `E` has CM.
INPUT:
- ``E`` -- An elliptic curve defined over a number field.
OUTPUT:
A finite list of primes `\ell` such that every curve isogenous to
this curve can be obtained by a finite sequence of isogenies of
degree one of the primes in the list.
ALGORITHM:
For curves with CM by the order `O` of discriminant `d`, the
Galois representation is always non-surjective and the curve will
admit `\ell`-isogenies for infinitely many primes `\ell`, but
there are only finitely many codomains `E'`. The primes can be
divided according to the discriminant `d'` of the CM order `O'`
associated to `E`: either `O=O'`, or one contains the other with
index `\ell`, since `\ell O\subset O'` and vice versa.
Case (1): `O=O'`. The degrees of all isogenies between `E` and
`E'` are precisely the integers represented by one of the classes
of binary quadratic forms `Q` of discriminant `d`. Hence to
obtain all possible isomorphism classes of codomain `E'`, we need
only use one prime `\ell` represented by each such class `Q`. It
would in fact suffice to use primes represented by forms which
generate the class group. Here we simply omit the principal class
and one from each pair of inverse classes, and include a prime
represented by each of the remaining forms.
Case (2): `[O':O]=\ell`: so `d=\ell^2d;`. We include all prime
divisors of `d`.
Case (3): `[O:O']=\ell`: we may assume that `\ell` does not divide
`d` as we have already included these, so `\ell` either splits or
is inert in `O`; the class numbers satisfy `h(O')=(\ell\pm1)h(O)`
accordingly. We include all primes `\ell` such that `\ell\pm1`
divides the degree `[K:\QQ]`.
For curves with only potential CM we proceed as in the CM case,
using `2[K:\QQ]` instead of `[K:\QQ]`.
EXAMPLES:
For curves with CM by a quadratic order of class number greater
than `1`, we use the structure of the class group to only give one
prime in each ideal class::
sage: pol = PolynomialRing(QQ,'x')([1,-3,5,-5,5,-3,1])
sage: L.<a> = NumberField(pol)
sage: j = hilbert_class_polynomial(-23).roots(L,multiplicities=False)[0]
sage: E = EllipticCurve(j=j)
sage: from sage.schemes.elliptic_curves.isogeny_class import isogeny_degrees_cm
sage: isogeny_degrees_cm(E, verbose=True)
CM case, discriminant = -23
initial primes: {2}
upward primes: {}
downward ramified primes: {}
downward split primes: {2, 3}
downward inert primes: {5}
primes generating the class group: [2]
Complete set of primes: {2, 3, 5}
[2, 3, 5]
"""
if not E.has_cm():
raise ValueError("possible_isogeny_degrees_cm(E) requires E to be an elliptic curve with CM")
d = E.cm_discriminant()
if verbose:
print("CM case, discriminant = %s" % d)
from sage.libs.pari.all import pari
from sage.sets.all import Set
from sage.arith.all import kronecker_symbol
n = E.base_field().absolute_degree()
if not E.has_rational_cm():
n *= 2
divs = n.divisors()
data = pari(d).quadclassunit()
# This has 4 components: the class number, class group
# structure (ignored), class group generators (as quadratic
# forms) and regulator (=1 since d<0, ignored).
h = data[0].sage()
# We must have 2*h dividing n, and will need the quotient so
# see if the j-invariants of any proper sub-orders could lie
# in the same field
n_over_2h = n//(2*h)
# Collect possible primes. First put in 2, and also 3 for
# discriminant -3 (special case because of units):
L = Set([ZZ(2), ZZ(3)]) if d==-3 else Set([ZZ(2)])
if verbose:
print("initial primes: %s" % L)
# Step 1: "vertical" primes l such that the isogenous curve
# has CM by an order whose index is l or 1/l times the index
# of the order O of discriminant d. The latter case can only
# happen when l^2 divides d.
# Compute the ramified primes
ram_l = d.odd_part().prime_factors()
# if the CM is not rational we include all ramified primes,
# which is simpler than using the class group later:
if not E.has_rational_cm():
L1 = Set(ram_l)
L += L1
if verbose:
print("ramified primes: %s" % L1)
else:
# Find the "upward" primes (index divided by l):
L1 = Set([l for l in ram_l if d.valuation(l)>1])
L += L1
if verbose:
print("upward primes: %s" % L1)
# Find the "downward" primes (index multiplied by l, class
# number multiplied by l-kronecker_symbol(d,l)):
# (a) ramified primes; the suborder has class number l*h, so l
# must divide n/2h:
L1 = Set([l for l in ram_l if l.divides(n_over_2h)])
L += L1
if verbose:
print("downward ramified primes: %s" % L1)
# (b) split primes; the suborder has class number (l-1)*h, so
# l-1 must divide n/2h:
L1 = Set([lm1+1 for lm1 in divs
if (lm1+1).is_prime() and kronecker_symbol(d,lm1+1)==+1])
L += L1
if verbose:
print("downward split primes: %s" % L1)
# (c) inert primes; the suborder has class number (l+1)*h, so
# l+1 must divide n/2h:
L1 = Set([lp1-1 for lp1 in divs
if (lp1-1).is_prime() and kronecker_symbol(d,lp1-1)==-1])
L += L1
if verbose:
print("downward inert primes: %s" % L1)
# Now find primes represented by each form of discriminant d.
# In the rational CM case, we use all forms associated to
# generators of the class group, otherwise only forms of order
# 2:
if E.has_rational_cm():
from sage.quadratic_forms.binary_qf import BinaryQF
Qs = [BinaryQF(list(q)) for q in data[2]]
L1 = [Q.small_prime_value() for Q in Qs]
if verbose:
print("primes generating the class group: %s" % L1)
L += Set(L1)
# Return sorted list
if verbose:
print("Complete set of primes: %s" % L)
return sorted(L)
def possible_isogeny_degrees(E, algorithm='Billerey', max_l=None,
num_l=None, exact=True, verbose=False):
r"""
Return a list of primes `\ell` sufficient to generate the
isogeny class of `E`.
INPUT:
- ``E`` -- An elliptic curve defined over a number field.
- ``algorithm`` (string, default 'Billerey') -- Algorithm to be
used for non-CM curves: either 'Billerey', 'Larson', or
'heuristic'. Only relevant for non-CM curves and base fields
other than `\QQ`.
- ``max_l`` (int or ``None``) -- only relevant for non-CM curves
and algorithms 'Billerey' and 'heuristic. Controls the maximum
prime used in either algorithm. If ``None``, use the default
for that algorithm.
- ``num_l`` (int or ``None``) -- only relevant for non-CM curves
and algorithm 'Billerey'. Controls the maximum number of primes
used in the algorithm. If ``None``, use the default for that
algorithm.
- ``exact`` (bool, default ``True``) -- if ``True``, perform an
additional check that the primes returned are all reducible. If
``False``, skip this step, in which case some of the primes
returned may be irreducible.
OUTPUT:
A finite list of primes `\ell` such that every curve isogenous to
this curve can be obtained by a finite sequence of isogenies of
degree one of the primes in the list.
ALGORITHM:
For curves without CM, the set may be taken to be the finite set
of primes at which the Galois representation is not surjective,
since the existence of an `\ell`-isogeny is equivalent to the
image of the mod-`\ell` Galois representation being contained in a
Borel subgroup. Two rigorous algorithms have been implemented to
determine this set, due to Larson and Billeray respectively. We
also provide a non-rigorous 'heuristic' algorithm which only tests
reducible primes up to a bound depending on the degree of the
base field.
For curves with CM see the documentation for :meth:`isogeny_degrees_cm()`.
EXAMPLES:
For curves without CM we determine the primes at which the mod `p`
Galois representation is reducible, i.e. contained in a Borel
subgroup::
sage: from sage.schemes.elliptic_curves.isogeny_class import possible_isogeny_degrees
sage: E = EllipticCurve('11a1')
sage: possible_isogeny_degrees(E)
[5]
sage: possible_isogeny_degrees(E, algorithm='Larson')
[5]
sage: possible_isogeny_degrees(E, algorithm='Billerey')
[5]
sage: possible_isogeny_degrees(E, algorithm='heuristic')
[5]
We check that in this case `E` really does have rational
`5`-isogenies::
sage: [phi.degree() for phi in E.isogenies_prime_degree()]
[5, 5]
Over an extension field::
sage: E3 = E.change_ring(CyclotomicField(3))
sage: possible_isogeny_degrees(E3)
[5]
sage: [phi.degree() for phi in E3.isogenies_prime_degree()]
[5, 5]
A higher degree example (LMFDB curve 5.5.170701.1-4.1-b1)::
sage: K.<a> = NumberField(x^5 - x^4 - 6*x^3 + 4*x + 1)
sage: E = EllipticCurve(K, [a^3 - a^2 - 5*a + 1, a^4 - a^3 - 5*a^2 - a + 1, -a^4 + 2*a^3 + 5*a^2 - 5*a - 3, a^4 - a^3 - 5*a^2 - a, -3*a^4 + 4*a^3 + 17*a^2 - 6*a - 12])
sage: possible_isogeny_degrees(E, algorithm='heuristic')
[2]
sage: possible_isogeny_degrees(E, algorithm='Billerey')
[2]
sage: possible_isogeny_degrees(E, algorithm='Larson')
[2]
LMFDB curve 4.4.8112.1-108.1-a5::
sage: K.<a> = NumberField(x^4 - 5*x^2 + 3)
sage: E = EllipticCurve(K, [a^2 - 2, -a^2 + 3, a^2 - 2, -50*a^2 + 35, 95*a^2 - 67])
sage: possible_isogeny_degrees(E, exact=False, algorithm='Billerey')
[2, 5]
sage: possible_isogeny_degrees(E, exact=False, algorithm='Larson')
[2, 5]
sage: possible_isogeny_degrees(E, exact=False, algorithm='heuristic')
[2, 5]
sage: possible_isogeny_degrees(E)
[2, 5]
This function only returns the primes which are isogeny degrees::
sage: Set(E.isogeny_class().matrix().list())
{1, 2, 4, 5, 20, 10}
For curves with CM by a quadratic order of class number greater
than `1`, we use the structure of the class group to only give one
prime in each ideal class::
sage: pol = PolynomialRing(QQ,'x')([1,-3,5,-5,5,-3,1])
sage: L.<a> = NumberField(pol)
sage: j = hilbert_class_polynomial(-23).roots(L,multiplicities=False)[0]
sage: E = EllipticCurve(j=j)
sage: from sage.schemes.elliptic_curves.isogeny_class import possible_isogeny_degrees
sage: possible_isogeny_degrees(E, verbose=True)
CM case, discriminant = -23
initial primes: {2}
upward primes: {}
downward ramified primes: {}
downward split primes: {2, 3}
downward inert primes: {5}
primes generating the class group: [2]
Complete set of primes: {2, 3, 5}
[2, 3, 5]
"""
if E.has_cm():
return isogeny_degrees_cm(E, verbose)
if E.base_field() == QQ:
from sage.schemes.elliptic_curves.gal_reps_number_field import reducible_primes_naive
return reducible_primes_naive(E, max_l=37, verbose=verbose)
# Non-CM case
# NB The following functions first computes a finite set
# containing the reducible primes, then checks that each is
# reducible by computing l-isogenies. This appears circular
# but the computated l-isogenies for a fixed prime l is
# cached.
if verbose:
print("Non-CM case, using {} algorithm".format(algorithm))
# First we obtain a finite set of primes containing the reducible
# ones Each of these algorithms includes application of the
# "Frobenius filter" eliminating any ell for which there exists a
# prime P of good reduction such that the Frobenius polynomial at
# P does not factor modulo ell.
if algorithm == 'Larson':
L = E.galois_representation().isogeny_bound()
elif algorithm == 'Billerey':
from sage.schemes.elliptic_curves.gal_reps_number_field import reducible_primes_Billerey
L = reducible_primes_Billerey(E, num_l=num_l, max_l=max_l, verbose=verbose)
elif algorithm == 'heuristic':
from sage.schemes.elliptic_curves.gal_reps_number_field import reducible_primes_naive
L = reducible_primes_naive(E, max_l=max_l, num_P=num_l, verbose=verbose)
else:
raise ValueError("algorithm for possible_isogeny_degrees must be one of 'Larson', 'Billerey', 'heuristic'")
# The set L may contain irreducible primes. We optionally test
# each one to see if it is actually reducible, by computing ell-isogenies:
if exact:
L = [l for l in L if E.isogenies_prime_degree(l, minimal_models=False)]
return L
| 39.390177 | 739 | 0.551778 |
227a86e6c8c7e15c54f8cdf5b808efabced34b3c | 1,283 | py | Python | arcade/gui/examples/grid_layout.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | null | null | null | arcade/gui/examples/grid_layout.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | 1 | 2022-03-21T06:24:29.000Z | 2022-03-21T06:24:29.000Z | arcade/gui/examples/grid_layout.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | null | null | null | import arcade
from arcade.gui import UIManager
from arcade.gui.widgets import UIDummy
from arcade.gui.widgets.layout import UIGridLayout, UIAnchorLayout
class UIMockup(arcade.Window):
def __init__(self):
super().__init__(800, 600, "UI Mockup", resizable=True)
self.manager = UIManager()
self.manager.enable()
arcade.set_background_color(arcade.color.DARK_BLUE_GRAY)
dummy1 = UIDummy(width=100, height=100)
dummy2 = UIDummy(width=50, height=50)
dummy3 = UIDummy(width=50, height=50)
dummy4 = UIDummy(width=100, height=100)
dummy5 = UIDummy(width=200, height=100)
dummy6 = UIDummy(width=100, height=200)
subject = UIGridLayout(
column_count=3,
row_count=3,
).with_border().with_padding()
subject.add(dummy1, 0, 0)
subject.add(dummy2, 0, 1)
subject.add(dummy3, 1, 0)
subject.add(dummy4, 1, 1)
subject.add(dummy5, 0, 2, col_span=2)
subject.add(dummy6, 2, 0, row_span=3)
anchor = UIAnchorLayout()
anchor.add(subject)
self.manager.add(anchor)
def on_draw(self):
self.clear()
self.manager.draw()
if __name__ == "__main__":
window = UIMockup()
arcade.run()
| 27.891304 | 66 | 0.624318 |
4807b4a5984ce8f5ada952d85d2628a6705f5d41 | 1,476 | py | Python | rl/utils/networks/dyna_network.py | unkper/PedestrainSimulationModule | 039ed0903a0861130566d8d1d862594064b8e0db | [
"MIT"
] | null | null | null | rl/utils/networks/dyna_network.py | unkper/PedestrainSimulationModule | 039ed0903a0861130566d8d1d862594064b8e0db | [
"MIT"
] | null | null | null | rl/utils/networks/dyna_network.py | unkper/PedestrainSimulationModule | 039ed0903a0861130566d8d1d862594064b8e0db | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class dyna_model_network(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim):
'''
reward, is_done都视为一维的
:param state_dim:
:param action_dim:
'''
super(dyna_model_network, self).__init__()
self.layer_s2h = nn.Linear(state_dim, hidden_dim)
self.layer_a2h = nn.Linear(action_dim, hidden_dim)
#这里hidden_dim*2是因为要把s和a连接起来
self.layer_h2s1 = nn.Linear(hidden_dim * 2, state_dim)
self.layer_h2r = nn.Linear(hidden_dim * 2, 1)
self.layer_h2i = nn.Linear(hidden_dim * 2, 1)
def forward(self, s, a):
a = torch.unsqueeze(a, dim=1)
h_s = self.layer_s2h(s)
h_a = self.layer_a2h(a)
h = F.tanh(torch.cat([h_s, h_a],dim=1))
reward = self.layer_h2r(h)
is_done = self.layer_h2i(h)
state_1 = self.layer_h2s1(h)
return state_1, reward, is_done
class dyna_q_network(nn.Module):
def __init__(self, input_dim, out_dim, hidden_dim):
super(dyna_q_network, self).__init__()
self.layer_i2h1 = nn.Linear(input_dim, hidden_dim)
self.layer_h12h2 = nn.Linear(hidden_dim, hidden_dim)
self.layer_h22o = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
h_1 = F.relu(self.layer_i2h1(x))
h_2 = F.relu(self.layer_h12h2(h_1))
h_o = self.layer_h22o(h_2)
return h_o
| 32.086957 | 62 | 0.636179 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.