max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
circuit_element/draw_circuits.py | mcyrus10/vrfbImpedance | 1 | 6621851 | #!/Users/cyrus/miniconda3/bin/python3
from lcapy import Circuit
cct = Circuit("z_a.sch")
cct.draw('z_a.png')
cct = Circuit("z_b.sch")
cct.draw('z_b.png')
cct = Circuit('randles_circuit.sch')
cct.draw('z_randles.png')
cct = Circuit('mhpe.sch')
cct.draw('mhpe.png')
cct = Circuit('symmetric_cell.sch')
cct.draw('symmetric_cell.png')
| #!/Users/cyrus/miniconda3/bin/python3
from lcapy import Circuit
cct = Circuit("z_a.sch")
cct.draw('z_a.png')
cct = Circuit("z_b.sch")
cct.draw('z_b.png')
cct = Circuit('randles_circuit.sch')
cct.draw('z_randles.png')
cct = Circuit('mhpe.sch')
cct.draw('mhpe.png')
cct = Circuit('symmetric_cell.sch')
cct.draw('symmetric_cell.png')
| en | 0.565457 | #!/Users/cyrus/miniconda3/bin/python3 | 1.790662 | 2 |
calipso/plot/plot_backscattered.py | NASA-DEVELOP/vocal | 18 | 6621852 | <reponame>NASA-DEVELOP/vocal
#!/opt/local/bin/python2.7
#
# plot_uniform_alt_lidar.py
# <NAME>
# <NAME>
# <NAME>
# 8/11/2014
#
from ccplot.hdf import HDF
import ccplot.utils
import matplotlib as mpl
import numpy as np
from plot.avg_lidar_data import avg_horz_data
from plot.uniform_alt_2 import uniform_alt_2
from plot.regrid_lidar import regrid_lidar
# from gui.CALIPSO_Visualization_Tool import filename
# noinspection PyUnresolvedReferences
def render_backscattered(filename, x_range, y_range, fig, pfig):
x1 = x_range[0]
x2 = x_range[1]
h1 = y_range[0]
h2 = y_range[1]
# averaging_width = 15
# Adjust the averaging with so its uniform per range
averaging_width = int((x2-x1)/1000)
if averaging_width < 5:
averaging_width = 5
if averaging_width > 15:
averaging_width = 15
colormap = 'dat/calipso-backscatter.cmap'
print('xrange: ' + str(x_range) + ', yrange: ' + str(y_range))
with HDF(filename) as product:
time = product['Profile_UTC_Time'][x1:x2, 0]
minimum = min(product['Profile_UTC_Time'][::])[0]
maximum = max(product['Profile_UTC_Time'][::])[0]
# length of time determines how far the file can be viewed
if time[-1] >= maximum and len(time) < 950:
raise IndexError
if time[0] < minimum:
raise IndexError
alt = product['metadata']['Lidar_Data_Altitudes']
dataset = product['Total_Attenuated_Backscatter_532'][x1:x2].T
latitude = product['Latitude'][x1:x2, 0]
latitude = latitude[::averaging_width]
print(np.shape(time))
time = np.array([ccplot.utils.calipso_time2dt(t) for t in time])
dataset = np.ma.masked_equal(dataset, -9999)
# The following method has been translated from MatLab code written by <NAME> 7/10/07
# Translated by <NAME> 7/19/17
avg_dataset = avg_horz_data(dataset, averaging_width)
# Put altitudes above 8.2 km on same spacing as lower ones
MAX_ALT = 20
unif_alt = uniform_alt_2(MAX_ALT, alt)
regrid_dataset = regrid_lidar(alt, avg_dataset, unif_alt)
data = regrid_dataset
# End method
cmap = ccplot.utils.cmap(colormap)
cm = mpl.colors.ListedColormap(cmap['colors']/255.0)
cm.set_under(cmap['under']/255.0)
cm.set_over(cmap['over']/255.0)
cm.set_bad(cmap['bad']/255.0)
norm = mpl.colors.BoundaryNorm(cmap['bounds'], cm.N)
im = fig.imshow(
#data.T,
data,
extent=(latitude[0], latitude[-1], h1, h2),
cmap=cm,
aspect='auto',
norm=norm,
interpolation='nearest',
)
fig.set_ylabel('Altitude (km)')
fig.set_xlabel('Latitude')
fig.set_title("Averaged 532 nm Total Attenuated Backscatter")
cbar_label = 'Total Attenuated Backscatter 532nm (km$^{-1}$ sr$^{-1}$)'
cbar = pfig.colorbar(im)
cbar.set_label(cbar_label)
ax = fig.twiny()
ax.set_xlabel('Time')
ax.set_xlim(time[0], time[-1])
ax.get_xaxis().set_major_formatter(mpl.dates.DateFormatter('%H:%M:%S'))
fig.set_zorder(0)
ax.set_zorder(1)
title = fig.set_title('Averaged 532 nm Total Attenuated Backscatter')
title_xy = title.get_position()
title.set_position([title_xy[0], title_xy[1]*1.07])
return ax
| #!/opt/local/bin/python2.7
#
# plot_uniform_alt_lidar.py
# <NAME>
# <NAME>
# <NAME>
# 8/11/2014
#
from ccplot.hdf import HDF
import ccplot.utils
import matplotlib as mpl
import numpy as np
from plot.avg_lidar_data import avg_horz_data
from plot.uniform_alt_2 import uniform_alt_2
from plot.regrid_lidar import regrid_lidar
# from gui.CALIPSO_Visualization_Tool import filename
# noinspection PyUnresolvedReferences
def render_backscattered(filename, x_range, y_range, fig, pfig):
x1 = x_range[0]
x2 = x_range[1]
h1 = y_range[0]
h2 = y_range[1]
# averaging_width = 15
# Adjust the averaging with so its uniform per range
averaging_width = int((x2-x1)/1000)
if averaging_width < 5:
averaging_width = 5
if averaging_width > 15:
averaging_width = 15
colormap = 'dat/calipso-backscatter.cmap'
print('xrange: ' + str(x_range) + ', yrange: ' + str(y_range))
with HDF(filename) as product:
time = product['Profile_UTC_Time'][x1:x2, 0]
minimum = min(product['Profile_UTC_Time'][::])[0]
maximum = max(product['Profile_UTC_Time'][::])[0]
# length of time determines how far the file can be viewed
if time[-1] >= maximum and len(time) < 950:
raise IndexError
if time[0] < minimum:
raise IndexError
alt = product['metadata']['Lidar_Data_Altitudes']
dataset = product['Total_Attenuated_Backscatter_532'][x1:x2].T
latitude = product['Latitude'][x1:x2, 0]
latitude = latitude[::averaging_width]
print(np.shape(time))
time = np.array([ccplot.utils.calipso_time2dt(t) for t in time])
dataset = np.ma.masked_equal(dataset, -9999)
# The following method has been translated from MatLab code written by <NAME> 7/10/07
# Translated by <NAME> 7/19/17
avg_dataset = avg_horz_data(dataset, averaging_width)
# Put altitudes above 8.2 km on same spacing as lower ones
MAX_ALT = 20
unif_alt = uniform_alt_2(MAX_ALT, alt)
regrid_dataset = regrid_lidar(alt, avg_dataset, unif_alt)
data = regrid_dataset
# End method
cmap = ccplot.utils.cmap(colormap)
cm = mpl.colors.ListedColormap(cmap['colors']/255.0)
cm.set_under(cmap['under']/255.0)
cm.set_over(cmap['over']/255.0)
cm.set_bad(cmap['bad']/255.0)
norm = mpl.colors.BoundaryNorm(cmap['bounds'], cm.N)
im = fig.imshow(
#data.T,
data,
extent=(latitude[0], latitude[-1], h1, h2),
cmap=cm,
aspect='auto',
norm=norm,
interpolation='nearest',
)
fig.set_ylabel('Altitude (km)')
fig.set_xlabel('Latitude')
fig.set_title("Averaged 532 nm Total Attenuated Backscatter")
cbar_label = 'Total Attenuated Backscatter 532nm (km$^{-1}$ sr$^{-1}$)'
cbar = pfig.colorbar(im)
cbar.set_label(cbar_label)
ax = fig.twiny()
ax.set_xlabel('Time')
ax.set_xlim(time[0], time[-1])
ax.get_xaxis().set_major_formatter(mpl.dates.DateFormatter('%H:%M:%S'))
fig.set_zorder(0)
ax.set_zorder(1)
title = fig.set_title('Averaged 532 nm Total Attenuated Backscatter')
title_xy = title.get_position()
title.set_position([title_xy[0], title_xy[1]*1.07])
return ax | en | 0.802353 | #!/opt/local/bin/python2.7 # # plot_uniform_alt_lidar.py # <NAME> # <NAME> # <NAME> # 8/11/2014 # # from gui.CALIPSO_Visualization_Tool import filename # noinspection PyUnresolvedReferences # averaging_width = 15 # Adjust the averaging with so its uniform per range # length of time determines how far the file can be viewed # The following method has been translated from MatLab code written by <NAME> 7/10/07 # Translated by <NAME> 7/19/17 # Put altitudes above 8.2 km on same spacing as lower ones # End method #data.T, | 2.468785 | 2 |
code/38.py | Nightwish-cn/my_leetcode | 23 | 6621853 | class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
pans = ["1"]
for i in range(1, n):
lst, cur, len1 = 0, 0, len(pans)
cans = []
while cur < len1:
while cur < len1 and pans[lst] == pans[cur]:
cur += 1
cans.append(str(cur - lst))
cans.append(pans[lst])
lst = cur
pans = cans
return "".join(pans) | class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
pans = ["1"]
for i in range(1, n):
lst, cur, len1 = 0, 0, len(pans)
cans = []
while cur < len1:
while cur < len1 and pans[lst] == pans[cur]:
cur += 1
cans.append(str(cur - lst))
cans.append(pans[lst])
lst = cur
pans = cans
return "".join(pans) | en | 0.222187 | :type n: int :rtype: str | 3.244558 | 3 |
term4/AiSD/2.py | japanese-goblinn/labs | 0 | 6621854 | n = int(input())
nums = [int(i) for i in input().split(' ')]
if n == 2:
print(nums[0] * nums[1])
else:
nums.sort()
res1 = nums[0] * nums[1]
res2 = nums[-1] * nums[-2]
if res1 < res2:
print(res2)
else:
print(res1)
| n = int(input())
nums = [int(i) for i in input().split(' ')]
if n == 2:
print(nums[0] * nums[1])
else:
nums.sort()
res1 = nums[0] * nums[1]
res2 = nums[-1] * nums[-2]
if res1 < res2:
print(res2)
else:
print(res1)
| none | 1 | 3.580042 | 4 | |
testing/tshark_testing.py | mnmnc/campephilus | 0 | 6621855 | <gh_stars>0
import sys; import os
sys.path.insert(0, os.path.abspath('..'))
from modules.tshark import tshark
import unittest
class TsharkTestCase(unittest.TestCase):
shark = tshark.Tshark("tshark", "input\\", "output\\")
def test_tshark_exec_path___(self):
test_input = "split_00000_20120316133000.pcap"
test_output = "test.csv"
self.shark.add_filter("tcp")
self.shark.create_command(test_input, test_output)
self.assertTrue(len(self.shark.execution_command) > 10)
def test_tshark_tcp_fields__(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("tcp")
self.assertEqual( self.shark.fields.count("tcp") , 5)
def test_tshark_ip_fields___(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("ip")
self.assertEqual( self.shark.fields.count("ip") , 3)
def test_tshark_icmp_fields_(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("icmp")
self.assertEqual( self.shark.fields.count("icmp") , 2)
def test_tshark_dns_fields__(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("dns")
self.assertEqual( self.shark.fields.count("dns") , 4)
def test_tshark_frame_fields(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("frame")
self.assertEqual( self.shark.fields.count("frame") , 3)
def test_tshark_udp_fields__(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("udp")
self.assertEqual( self.shark.fields.count("udp") , 2)
if __name__ == '__main__':
unittest.main(verbosity=2) | import sys; import os
sys.path.insert(0, os.path.abspath('..'))
from modules.tshark import tshark
import unittest
class TsharkTestCase(unittest.TestCase):
shark = tshark.Tshark("tshark", "input\\", "output\\")
def test_tshark_exec_path___(self):
test_input = "split_00000_20120316133000.pcap"
test_output = "test.csv"
self.shark.add_filter("tcp")
self.shark.create_command(test_input, test_output)
self.assertTrue(len(self.shark.execution_command) > 10)
def test_tshark_tcp_fields__(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("tcp")
self.assertEqual( self.shark.fields.count("tcp") , 5)
def test_tshark_ip_fields___(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("ip")
self.assertEqual( self.shark.fields.count("ip") , 3)
def test_tshark_icmp_fields_(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("icmp")
self.assertEqual( self.shark.fields.count("icmp") , 2)
def test_tshark_dns_fields__(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("dns")
self.assertEqual( self.shark.fields.count("dns") , 4)
def test_tshark_frame_fields(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("frame")
self.assertEqual( self.shark.fields.count("frame") , 3)
def test_tshark_udp_fields__(self):
self.shark = tshark.Tshark("tshark", "input\\", "output\\")
self.shark.add_fields_by_category("udp")
self.assertEqual( self.shark.fields.count("udp") , 2)
if __name__ == '__main__':
unittest.main(verbosity=2) | none | 1 | 2.887526 | 3 | |
nato.py | draproctor/python | 0 | 6621856 | <filename>nato.py
nato = """A = Alpha
B = Bravo
C = Charlie
D = Delta
E = Echo
F = Foxtrot
G = Golf
H = Hotel
I = India
J = Juliett
K = Kilo
L = Lima
M = Mike
N = November
O = Oscar
P = Papa
Q = Quebec
R = Romeo
S = Sierra
T = Tango
U = Uniform
V = Victor
W = Whiskey
X = Xray
Y = Yankee
Z = Zulu
"""
print(nato)
| <filename>nato.py
nato = """A = Alpha
B = Bravo
C = Charlie
D = Delta
E = Echo
F = Foxtrot
G = Golf
H = Hotel
I = India
J = Juliett
K = Kilo
L = Lima
M = Mike
N = November
O = Oscar
P = Papa
Q = Quebec
R = Romeo
S = Sierra
T = Tango
U = Uniform
V = Victor
W = Whiskey
X = Xray
Y = Yankee
Z = Zulu
"""
print(nato)
| en | 0.767304 | A = Alpha B = Bravo C = Charlie D = Delta E = Echo F = Foxtrot G = Golf H = Hotel I = India J = Juliett K = Kilo L = Lima M = Mike N = November O = Oscar P = Papa Q = Quebec R = Romeo S = Sierra T = Tango U = Uniform V = Victor W = Whiskey X = Xray Y = Yankee Z = Zulu | 2.158391 | 2 |
readers/wikipedia.py | cstuartroe/plainclothes | 0 | 6621857 | <filename>readers/wikipedia.py
from .reader import Reader
from urllib import parse as up
from bs4 import BeautifulSoup as bs
import re
articles = []
article_names = ['China','India','United States','Indonesia','Pakistan','Brazil','Nigeria','Bangladesh',
'Russia','Japan','Mexico','Philippines','Egypt','Ethiopia','Vietnam','Germany','Iran',
'Democratic Republic of the Congo','Turkey','Thailand','United Kingdom','France',
'Italy','Canada','South Korea','Australia','Spain','Netherlands','Saudi Arabia','Switzerland',
'European Union',
'Tokyo','Yokohama','Jakarta','Delhi','Manila','Seoul','Karachi','Shanghai','Mumbai',
'New York City','São Paulo','Beijing','Mexico City','Guangzhou','Foshan',
'Osaka','Dhaka','Moscow','Cairo','Bangkok','Los Angeles','Buenos Aires','Kolkata',
'Tehran','Istanbul','Lagos','Tianjin','Shenzhen','Rio de Janeiro','Kinshasa',
'Lima','Chengdu','Paris','Lahore','Bangalore','London','Ho Chi Minh City','Chennai',
'Nagoya','Bogotá','Hyderabad','Chicago','Johannesburg','Taipei',
'Facebook','Youtube','Google','Wikipedia','Apple Inc.','Microsoft','Amazon.com',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','Vajiralongkorn','<NAME>',
'<NAME>','Eminem','The Beatles','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','Rihanna','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>',
'Earth','Sun','Moon','Star',
'Mathematics','Science','History','Writing',
'World War I','World War II',]
for article_name in article_names:
articles.append({"name":article_name,"path":up.quote(article_name.replace(" ","_"))})
class WikipediaReader(Reader):
def __init__(self,_sources=articles):
self.name = "wikipedia"
self.url_root = 'https://en.wikipedia.org/wiki/'
self.sources = _sources
super(WikipediaReader,self).__init__()
def get_text(self,document,source):
soup = bs(document, 'lxml')
body = soup.find('div',{'class':'mw-parser-output'})
#body.find('div',{'id':'toc'}).decompose()
paras = [para.text for para in body.find_all('p')]
text = '\n'.join(paras)
text = re.sub(r'\[[0-9]*\]','',text)
return text
| <filename>readers/wikipedia.py
from .reader import Reader
from urllib import parse as up
from bs4 import BeautifulSoup as bs
import re
articles = []
article_names = ['China','India','United States','Indonesia','Pakistan','Brazil','Nigeria','Bangladesh',
'Russia','Japan','Mexico','Philippines','Egypt','Ethiopia','Vietnam','Germany','Iran',
'Democratic Republic of the Congo','Turkey','Thailand','United Kingdom','France',
'Italy','Canada','South Korea','Australia','Spain','Netherlands','Saudi Arabia','Switzerland',
'European Union',
'Tokyo','Yokohama','Jakarta','Delhi','Manila','Seoul','Karachi','Shanghai','Mumbai',
'New York City','São Paulo','Beijing','Mexico City','Guangzhou','Foshan',
'Osaka','Dhaka','Moscow','Cairo','Bangkok','Los Angeles','Buenos Aires','Kolkata',
'Tehran','Istanbul','Lagos','Tianjin','Shenzhen','Rio de Janeiro','Kinshasa',
'Lima','Chengdu','Paris','Lahore','Bangalore','London','Ho Chi Minh City','Chennai',
'Nagoya','Bogotá','Hyderabad','Chicago','Johannesburg','Taipei',
'Facebook','Youtube','Google','Wikipedia','Apple Inc.','Microsoft','Amazon.com',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','Vajiralongkorn','<NAME>',
'<NAME>','Eminem','The Beatles','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','Rihanna','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>',
'Earth','Sun','Moon','Star',
'Mathematics','Science','History','Writing',
'World War I','World War II',]
for article_name in article_names:
articles.append({"name":article_name,"path":up.quote(article_name.replace(" ","_"))})
class WikipediaReader(Reader):
def __init__(self,_sources=articles):
self.name = "wikipedia"
self.url_root = 'https://en.wikipedia.org/wiki/'
self.sources = _sources
super(WikipediaReader,self).__init__()
def get_text(self,document,source):
soup = bs(document, 'lxml')
body = soup.find('div',{'class':'mw-parser-output'})
#body.find('div',{'id':'toc'}).decompose()
paras = [para.text for para in body.find_all('p')]
text = '\n'.join(paras)
text = re.sub(r'\[[0-9]*\]','',text)
return text
| en | 0.125835 | #body.find('div',{'id':'toc'}).decompose() | 2.852741 | 3 |
simple-grpc-client/target/test-classes/OpenECOMP_ETE/testsuite/eteutils/eteutils/JSONUtils.py | orhantombul/AplicationManagerGrpcTest | 0 | 6621858 | import json
from deepdiff import DeepDiff
class JSONUtils:
"""JSONUtils is common resource for simple json helper keywords."""
def json_equals(self, left, right):
"""JSON Equals takes in two strings or json objects, converts them into json if needed and then compares them, returning if they are equal or not."""
if isinstance(left, basestring):
left_json = json.loads(left);
else:
left_json = left;
if isinstance(right, basestring):
right_json = json.loads(right);
else:
right_json = right;
ddiff = DeepDiff(left_json, right_json, ignore_order=True);
if ddiff == {}:
return True;
else:
return False;
def make_list_into_dict(self, listOfDicts, key):
""" Converts a list of dicts that contains a field that has a unique key into a dict of dicts """
d = {}
if isinstance(listOfDicts, list):
for thisDict in listOfDicts:
v = thisDict[key]
d[v] = thisDict
return d
def find_element_in_array(self, searchedArray, key, value):
""" Takes in an array and a key value, it will return the items in the array that has a key and value that matches what you pass in """
elements = [];
for item in searchedArray:
if key in item:
if item[key] == value:
elements.append(item);
return elements; | import json
from deepdiff import DeepDiff
class JSONUtils:
"""JSONUtils is common resource for simple json helper keywords."""
def json_equals(self, left, right):
"""JSON Equals takes in two strings or json objects, converts them into json if needed and then compares them, returning if they are equal or not."""
if isinstance(left, basestring):
left_json = json.loads(left);
else:
left_json = left;
if isinstance(right, basestring):
right_json = json.loads(right);
else:
right_json = right;
ddiff = DeepDiff(left_json, right_json, ignore_order=True);
if ddiff == {}:
return True;
else:
return False;
def make_list_into_dict(self, listOfDicts, key):
""" Converts a list of dicts that contains a field that has a unique key into a dict of dicts """
d = {}
if isinstance(listOfDicts, list):
for thisDict in listOfDicts:
v = thisDict[key]
d[v] = thisDict
return d
def find_element_in_array(self, searchedArray, key, value):
""" Takes in an array and a key value, it will return the items in the array that has a key and value that matches what you pass in """
elements = [];
for item in searchedArray:
if key in item:
if item[key] == value:
elements.append(item);
return elements; | en | 0.910383 | JSONUtils is common resource for simple json helper keywords. JSON Equals takes in two strings or json objects, converts them into json if needed and then compares them, returning if they are equal or not. Converts a list of dicts that contains a field that has a unique key into a dict of dicts Takes in an array and a key value, it will return the items in the array that has a key and value that matches what you pass in | 3.538588 | 4 |
sdk/python/tests/integration/feature_repos/universal/data_sources/postgres.py | vinted/feast | 3 | 6621859 | from typing import Dict, List, Optional
import pandas as pd
from feast.data_source import DataSource
from feast.infra.offline_stores.contrib.postgres_offline_store.postgres import (
PostgreSQLOfflineStoreConfig,
PostgreSQLSource,
)
from feast.infra.utils.postgres.connection_utils import _get_conn, df_to_postgres_table
from feast.repo_config import FeastConfigBaseModel
from tests.integration.feature_repos.universal.data_source_creator import (
DataSourceCreator,
)
class PostgreSQLDataSourceCreator(DataSourceCreator):
tables: List[str] = []
def __init__(self, project_name: str, *args, **kwargs):
super().__init__(project_name)
self.project_name = project_name
self.offline_store_config = PostgreSQLOfflineStoreConfig(
type="postgres",
host="localhost",
port=5432,
database="postgres",
db_schema="public",
user="postgres",
password="<PASSWORD>",
)
def create_data_source(
self,
df: pd.DataFrame,
destination_name: str,
suffix: Optional[str] = None,
timestamp_field="ts",
created_timestamp_column="created_ts",
field_mapping: Dict[str, str] = None,
) -> DataSource:
destination_name = self.get_prefixed_table_name(destination_name)
df_to_postgres_table(self.offline_store_config, df, destination_name)
self.tables.append(destination_name)
return PostgreSQLSource(
name=destination_name,
query=f"SELECT * FROM {destination_name}",
timestamp_field=timestamp_field,
created_timestamp_column=created_timestamp_column,
field_mapping=field_mapping or {"ts_1": "ts"},
)
def create_offline_store_config(self) -> FeastConfigBaseModel:
return self.offline_store_config
def get_prefixed_table_name(self, suffix: str) -> str:
return f"{self.project_name}_{suffix}"
def create_saved_dataset_destination(self):
# FIXME: ...
return None
def teardown(self):
with _get_conn(self.offline_store_config) as conn, conn.cursor() as cur:
for table in self.tables:
cur.execute("DROP TABLE IF EXISTS " + table)
| from typing import Dict, List, Optional
import pandas as pd
from feast.data_source import DataSource
from feast.infra.offline_stores.contrib.postgres_offline_store.postgres import (
PostgreSQLOfflineStoreConfig,
PostgreSQLSource,
)
from feast.infra.utils.postgres.connection_utils import _get_conn, df_to_postgres_table
from feast.repo_config import FeastConfigBaseModel
from tests.integration.feature_repos.universal.data_source_creator import (
DataSourceCreator,
)
class PostgreSQLDataSourceCreator(DataSourceCreator):
tables: List[str] = []
def __init__(self, project_name: str, *args, **kwargs):
super().__init__(project_name)
self.project_name = project_name
self.offline_store_config = PostgreSQLOfflineStoreConfig(
type="postgres",
host="localhost",
port=5432,
database="postgres",
db_schema="public",
user="postgres",
password="<PASSWORD>",
)
def create_data_source(
self,
df: pd.DataFrame,
destination_name: str,
suffix: Optional[str] = None,
timestamp_field="ts",
created_timestamp_column="created_ts",
field_mapping: Dict[str, str] = None,
) -> DataSource:
destination_name = self.get_prefixed_table_name(destination_name)
df_to_postgres_table(self.offline_store_config, df, destination_name)
self.tables.append(destination_name)
return PostgreSQLSource(
name=destination_name,
query=f"SELECT * FROM {destination_name}",
timestamp_field=timestamp_field,
created_timestamp_column=created_timestamp_column,
field_mapping=field_mapping or {"ts_1": "ts"},
)
def create_offline_store_config(self) -> FeastConfigBaseModel:
return self.offline_store_config
def get_prefixed_table_name(self, suffix: str) -> str:
return f"{self.project_name}_{suffix}"
def create_saved_dataset_destination(self):
# FIXME: ...
return None
def teardown(self):
with _get_conn(self.offline_store_config) as conn, conn.cursor() as cur:
for table in self.tables:
cur.execute("DROP TABLE IF EXISTS " + table)
| en | 0.402242 | # FIXME: ... | 2.354353 | 2 |
oz/wizard.py | arruda/magic_it_up | 0 | 6621860 | # -*- coding: utf-8 -*-
"""
Do the image processing to find where player is steping
"""
| # -*- coding: utf-8 -*-
"""
Do the image processing to find where player is steping
"""
| en | 0.939211 | # -*- coding: utf-8 -*- Do the image processing to find where player is steping | 1.014898 | 1 |
rain/simul/waitk_agent.py | qq1418381215/caat | 14 | 6621861 | from simuleval.agents import Agent,TextAgent, SpeechAgent
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from typing import List,Dict, Optional
import numpy as np
import math
import torch
from collections import deque
from torch import Tensor
import torch.nn as nn
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.models import FairseqEncoderDecoderModel
from fairseq.data import encoders, Dictionary
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from argparse import Namespace
import rain
from rain.data.transforms import audio_encoder, text_encoder
import logging
logger = logging.getLogger('waitk.agent')
class WordEndChecker(object):
def __init__(self, vocab:Dictionary):
self.vocab= vocab
self.wordbegin=[]
for i in range(len(self.vocab)):
self.wordbegin.append(self.is_beginning_of_word(self.vocab[i]))
def is_beginning_of_word(self, x: str) -> bool:
if x in ["<unk>", "<s>", "</s>", "<pad>"]:
return True
return x.startswith("\u2581")
def string(self,tokens, is_finished=False, removed=0):
tokens_cpu= tokens.cpu()
tnum= len(tokens_cpu)
end_pos = tnum -removed
if is_finished:
out_str = self.vocab.string(tokens_cpu[:end_pos],bpe_symbol="sentencepiece",)
return out_str, removed
next_bow= 0
for i in range(min(end_pos+1,tnum)):
if self.wordbegin[tokens_cpu[i]] :
next_bow=i
out_str= self.vocab.string(tokens_cpu[:next_bow],bpe_symbol="sentencepiece",)
return out_str, tnum - next_bow
class OnlineSearcher(nn.Module):
def __init__(
self, models:List[FairseqEncoderDecoderModel],
vocab,
eos=1,
bos=1,
eager=False,
stop_early=False
):
super().__init__()
self.models= nn.ModuleList(models)
self.bos=bos
self.eos= eos
self.reserve_step = 0
self.vocab = vocab
self.vocab_size= len(vocab)
self.pad= vocab.pad()
self.word_end= WordEndChecker(vocab)
self.eager= eager
self.stop_early= stop_early
@property
def init_frames(self):
return self.models[0].encoder.init_frames
@property
def step_frames(self):
return self.models[0].encoder.step_frames
def get_init_frames(self, wait_block=4):
return self.init_frames + self.step_frames*(wait_block-1)
def get_step_frames(self, step_block=1):
return self.step_frames*step_block
def reorder_states(self, encoder_outs,incremental_states, new_order):
for model in self.models:
encoder_outs[model] =model.encoder.reorder_encoder_out(encoder_outs[model], new_order)
model.decoder.reorder_incremental_state_scripting(
incremental_states[model],
new_order
)
def search(
self, src,src_lengths,
prev_tokens, encoder_outs,
incremental_states,beam=5, fwd_step= 1, forecast_step = 1,
is_end=False
):
ninf= float('-inf')
if src is not None:
self.fwd_encoder(src, src_lengths, encoder_outs, incremental_states, is_end)
steps= self.reserve_step + fwd_step + forecast_step
if is_end:
steps= 100
new_order = prev_tokens.new(beam).fill_(0)
self.reorder_states(encoder_outs, incremental_states, new_order)
prev_tokens = prev_tokens.repeat(beam,1)
init_len= prev_tokens.shape[1]
finished= prev_tokens.new(beam).fill_(0).bool()
scores= prev_tokens.new(beam,1).float().fill_(0)
for nstep in range(steps):
lprobs= self.fwd_decoder_step(prev_tokens, encoder_outs, incremental_states)
# if not is_end:
# lprobs[:, self.eos] = ninf
lprobs[:, self.pad] = ninf
lprobs[finished, :self.eos]= ninf
lprobs[finished, self.eos]= 0
lprobs[finished,self.eos+1:] = ninf
#lprobs: beam*vocab
expand_score= scores + lprobs
if nstep ==0:
expand_score= expand_score[:1]
tscore, tidx= expand_score.view(-1).topk(beam)
next_tokens= tidx %self.vocab_size
new_order= tidx //self.vocab_size
scores[:]= tscore.unsqueeze(1)
prev_tokens= prev_tokens.index_select(0, new_order)
prev_tokens= torch.cat([prev_tokens, next_tokens.unsqueeze(1)], dim=1)
self.reorder_states(encoder_outs, incremental_states, new_order)
finished= finished | next_tokens.eq(self.eos)
if finished.all():
break
if not is_end and self.stop_early and prev_tokens[0][-1-forecast_step] == self.eos:
prev_tokens= prev_tokens[0]
out_tokens= prev_tokens[ init_len:]
removed = out_tokens.eq(self.eos).sum().item()
out_words, reserved = self.word_end.string(out_tokens, is_finished=True, removed= removed)
return deque( out_words.split()), prev_tokens,True
seqlen = prev_tokens.ne(self.eos).float().sum(1) - init_len+1
scores= scores.squeeze(1)/seqlen
score, idx= scores.max(0)
new_order= idx.view(1)
self.reorder_states(encoder_outs, incremental_states, new_order)
prev_tokens= prev_tokens[idx]
out_tokens= prev_tokens[ init_len:]
removed = out_tokens.eq(self.eos).sum().item()
ignore_length= max(removed, forecast_step -(steps- len(out_tokens))) if not is_end else removed
if self.eager:
out_words, reserved = self.word_end.string(out_tokens, is_finished=True, removed= ignore_length)
else:
out_words, reserved= self.word_end.string(out_tokens, is_finished= is_end, removed= ignore_length)
rollback_to = len(prev_tokens) - reserved
prev_tokens= prev_tokens[:rollback_to]
self.rollback(incremental_states, rollback_to)
self.reserve_step = reserved - forecast_step + (steps-len(out_tokens))
assert is_end or self.reserve_step >=0
return deque( out_words.split()), prev_tokens,False
def fwd_decoder_step(self, tokens, encoder_outs, incremental_states,temperature=1.0):
log_probs = []
for i, model in enumerate(self.models):
encoder_out = encoder_outs[model]
# decode each model
logits,_ = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[model],
attn_mask=False
)
logits = logits[:,-1:,:]/temperature
lprobs = utils.log_softmax(logits, dim=-1)
lprobs = lprobs[:, -1, :]
if len(self.models) == 1:
return lprobs
log_probs.append(lprobs)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
return avg_probs
def rollback(self,incremental_states, step_to_keep):
for model in self.models:
model.decoder.rollback_steps(incremental_states[model], step_to_keep)
def init_states(self):
encoder_outs={m:{} for m in self.models}
incremental_states={m:{} for m in self.models}
self.reserve_step= 0
return encoder_outs, incremental_states
def fwd_encoder(
self, src:Tensor, src_lengths:Tensor,
encoder_outs: Optional[Dict[nn.Module,Dict[str, List[Tensor]]]] ,
incremental_states:Optional[Dict[nn.Module, Dict[str, Dict[str, Optional[Tensor]]]]],
finished = False
):
for model in self.models:
curr_out= model.encoder(
src, src_lengths,
incremental_state=incremental_states[model],
finished=finished
)
if "encoder_out" in encoder_outs[model]:
pre= encoder_outs[model]["encoder_out"][0]
pre_mask= encoder_outs[model]["encoder_padding_mask"][0]
encoder_outs[model]["encoder_out"][0] = torch.cat([pre,curr_out["encoder_out"][0]], dim=0)
encoder_outs[model]["encoder_padding_mask"][0] = torch.cat([pre_mask,curr_out["encoder_padding_mask"][0]], dim=1)
else:
encoder_outs[model]=curr_out
return encoder_outs, incremental_states
class NaiveWaitk(OnlineSearcher):
def __init__(
self, models:List[FairseqEncoderDecoderModel],
vocab,
eos=1,
bos=1,
eager=False,
stop_early=False
):
super().__init__(models, vocab, eos, bos)
self.eager=eager
self.reserved_subwords=None
self.stop_early=stop_early
def init_states(self):
encoder_outs, incremental_states = super().init_states()
self.reserved_subwords = None
return encoder_outs, incremental_states
def search(
self, src,src_lengths,
prev_tokens, encoder_outs,
incremental_states,beam=5, fwd_step= 1, forecast_step = 1,
is_end=False
):
forecast_step=0
ninf= float('-inf')
if src is not None:
self.fwd_encoder(src, src_lengths, encoder_outs, incremental_states, is_end)
steps=fwd_step
if is_end:
steps= 40
new_order = prev_tokens.new(beam).fill_(0)
self.reorder_states(encoder_outs, incremental_states, new_order)
prev_tokens = prev_tokens.repeat(beam,1)
init_len= prev_tokens.shape[1]
finished= prev_tokens.new(beam).fill_(0).bool()
scores= prev_tokens.new(beam,1).float().fill_(0)
for nstep in range(steps):
lprobs= self.fwd_decoder_step(prev_tokens, encoder_outs, incremental_states)
# if not is_end:
# lprobs[:, self.eos] = ninf
lprobs[:, self.pad] = ninf
lprobs[finished, :self.eos]= ninf
lprobs[finished, self.eos]= 0
lprobs[finished,self.eos+1:] = ninf
#lprobs: beam*vocab
expand_score= scores + lprobs
if nstep ==0:
expand_score= expand_score[:1]
tscore, tidx= expand_score.view(-1).topk(beam)
next_tokens= tidx %self.vocab_size
new_order= tidx //self.vocab_size
scores[:]= tscore.unsqueeze(1)
prev_tokens= prev_tokens.index_select(0, new_order)
prev_tokens= torch.cat([prev_tokens, next_tokens.unsqueeze(1)], dim=1)
self.reorder_states(encoder_outs, incremental_states, new_order)
finished= finished | next_tokens.eq(self.eos)
if finished.all():
break
if not is_end and self.stop_early and prev_tokens[0][-1] == self.eos:
prev_tokens= prev_tokens[0]
out_tokens= prev_tokens[ init_len:]
if self.reserved_subwords is not None and len(self.reserved_subwords) >0:
out_tokens = torch.cat((self.reserved_subwords, out_tokens),dim=0)
removed = out_tokens.eq(self.eos).sum()
ignore_length= removed
out_words, reserved = self.word_end.string(out_tokens, is_finished=True, removed= ignore_length)
return deque( out_words.split()), prev_tokens,True
seqlen = prev_tokens.ne(self.eos).float().sum(1) - init_len+1
scores= scores.squeeze(1)/seqlen
score, idx= scores.max(0)
new_order= idx.view(1)
self.reorder_states(encoder_outs, incremental_states, new_order)
prev_tokens= prev_tokens[idx]
out_tokens= prev_tokens[ init_len:]
if self.reserved_subwords is not None and len(self.reserved_subwords) >0:
out_tokens = torch.cat((self.reserved_subwords, out_tokens),dim=0)
removed = out_tokens.eq(self.eos).sum()
ignore_length= removed
if self.eager:
out_words, reserved = self.word_end.string(out_tokens, is_finished=True, removed= ignore_length)
else:
out_words, reserved= self.word_end.string(out_tokens, is_finished= is_end, removed= ignore_length)
#out_words, reserved= self.word_end.string(out_tokens, is_end)
self.reserved_subwords=None
if reserved >0:
self.reserved_subwords = prev_tokens[-reserved:]
return deque( out_words.split()), prev_tokens,False
class WaitkAgent(Agent):
def __init__(self, args):
self._set_default_args(args)
super().__init__(args)
utils.import_user_module("rain")
self.cpu = args.cpu
self.wait_blocks= args.wait_blocks
self.step_read_blocks = args.step_read_blocks
self.step_generate = args.step_generate
self.step_forecast = args.step_forecast
self.beam = args.beam
task_cfg=Namespace(
task="s2s",
data= args.train_dir, task_type=args.task_type,
source_lang=args.slang,
target_lang= args.tlang,
text_config=args.text_encoder, audio_cfg=args.audio_encoder,
bpe_dropout=0,
)
self.task= tasks.setup_task(task_cfg)
self.audio_transformer = self.task.audio_transform_test
self.text_transformer= self.task.src_encoder
self.src_dict= self.task.src_dict
if args.model_path is None:
raise ValueError("--model-path needed")
models, saved_cfg= checkpoint_utils.load_model_ensemble(
utils.split_paths(args.model_path),
arg_overrides=None,
task=self.task,
)
self.tgt_dict:Dictionary= self.task.target_dictionary
self.eos= self.tgt_dict.eos()
self.bos= self.tgt_dict.eos() if args.infer_bos is None else args.infer_bos
# if self.data_type == "speech":
# self.searcher= SpeechSearcher(models, self.tgt_dict, eos= self.eos, bos= self.bos)
if args.naive_waitk:
self.searcher = NaiveWaitk(
models, self.tgt_dict, eos= self.eos, bos= self.bos, eager= args.eager,
stop_early=args.stop_early
)
else:
self.searcher = OnlineSearcher(
models, self.tgt_dict, eos= self.eos, bos= self.bos, eager= args.eager,
stop_early=args.stop_early
)
if not self.cpu:
self.searcher.cuda()
self.searcher.eval()
self.frames= None
self.finished=True
self.processed_frames=0
self.processed_units=0
self.hypos=deque()
self.prev_tokens, self.encoder_outs, self.incremental_states= None,None,None
def _set_default_args(self, args):
args.wait_blocks= 4 if args.wait_blocks is None else args.wait_blocks
args.step_read_blocks=1 if args.step_read_blocks is None else args.step_read_blocks
args.step_generate=1 if args.step_generate is None else args.step_generate
args.step_forecast= 0 if args.step_forecast is None else args.step_forecast
@staticmethod
def add_args(parser):
parser.add_argument(
'--stop-early', action='store_true', help='stop early mode for waitk'
)
parser.add_argument(
"--cpu", action= "store_true", help= "use cpu instead of cuda"
)
parser.add_argument(
"--eager", default=False, action="store_true", help="output words without word end check"
)
parser.add_argument(
"--task-type", default="mt", metavar='ttype',
help='task type :st,mt'
)
parser.add_argument(
"--slang", default="en", metavar='SLANG',
help='task type :st,mt'
)
parser.add_argument(
"--tlang", default="de", metavar='TLANG',
help='task type :st,mt'
)
parser.add_argument(
"--infer-bos", default=None,type=int,
help= "bos for decoding"
)
parser.add_argument(
"--model-path", default=None,type=str,
help= "path for models used (may be splited by `:`)"
)
parser.add_argument(
"--wait-blocks", default=None, type=int,
help="start translation after wait_blocks samples read, default None and we may set its default in our class"
)
parser.add_argument(
"--beam", default=5, type=int,
help="beam size"
)
parser.add_argument(
"--step-read-blocks", default=None, type=int,
help="do translation while read each blocks input"
)
parser.add_argument(
"--step-generate", default=None, type=int,
help="generate tokens for each step, NOT equal to output words, if current step output un-complete subwords, "
"reserve to output in next step"
)
parser.add_argument(
"--step-forecast", default= None, type=int,
help="forecast subword numbers for each step, only for search"
)
parser.add_argument(
"--train-dir", default="exp_data/must_filtered2", type=str,
help="train dir, for other resource such as dict"
)
parser.add_argument(
"--text-encoder", default="text_cfg", type=str,
help= "text encoder"
)
parser.add_argument(
"--audio-encoder", default= "audio_cfg", type=str,
help= "audio-encoder"
)
parser.add_argument(
"--naive-waitk", action="store_true",help="use naive waitk"
)
return parser
def initialize_states(self, states):
# we recompute feature at each step, the waste seems to be acceptable
logger.info(f"new sample,id={states.instance_id}")
self.input_fea= torch.Tensor(0,80)
self.finished=False
self.processed_frames=0
self.processed_units=0
self.encoder_outs, self.incremental_states = self.searcher.init_states()
self.prev_tokens= torch.LongTensor([self.bos])
if not self.cpu:
self.prev_tokens= self.prev_tokens.cuda()
self.hypos=deque()
def expected_init_frames(self):
return self.searcher.get_init_frames(self.wait_blocks)
def expected_step_frames(self):
return self.searcher.get_step_frames(self.step_read_blocks)
def expected_init_units(self):
frames= self.searcher.get_init_frames(self.wait_blocks)
if self.data_type == "text":
#return frames
# we must do sentencepiece and then get its length
return 1
elif self.data_type == "speech":
# units per 10ms
return frames
def expected_step_units(self):
frames= self.searcher.get_step_frames(self.step_read_blocks)
if self.data_type == "text":
return 1
elif self.data_type == "speech":
# units per ms
#return frames*10
return frames
def _gen_frames(self, states):
source= states.source
if self.data_type == "text":
src = ' '.join(source)
subwords =self.text_transformer.encode(src)
tokens = self.src_dict.encode_line(subwords,add_if_not_exist=False, append_eos= False).long()
#tokens = torch.cat((torch.LongTensor([self.tgt_dict.eos()]),tokens), dim=0)
if states.finish_read():
tokens= torch.cat((tokens,torch.LongTensor([ self.src_dict.eos()])), dim=0)
self.input_fea= tokens
self.processed_units = len(source)
elif self.data_type == "speech":
rate_ms= 16
if len(source[-1]) <160:
source=source[:-1]
new_frames= len(source) - self.input_fea.shape[0]
if new_frames <= 0:
return
if self.input_fea.shape[0] ==0 :
pre= torch.FloatTensor(1, 15*rate_ms).fill_(0)
new_src= sum(source[-new_frames:],[])
new_src= torch.FloatTensor(new_src).unsqueeze(0)
new_src= torch.cat([pre, new_src], dim=1)
else:
new_src= sum(source[-(new_frames+2):],[])
new_src= new_src[5*rate_ms:]
new_src = torch.FloatTensor(new_src).unsqueeze(0)
new_src= new_src*( 2**-15)
fbank= audio_encoder._get_fbank(new_src, sample_rate= 16000, n_bins=80)
fbank = self.audio_transformer(fbank)
self.input_fea= torch.cat([self.input_fea, fbank], dim=0)
self.processed_units= len(source)
else:
raise ValueError(f"unknown data type {self.data_type}")
def policy(self, states):
if len(self.hypos) >0:
return WRITE_ACTION
source= states.source
if self.finished:
if len(self.hypos) >0:
return WRITE_ACTION
else:
return READ_ACTION
if (len(source) >=self.expected_init_units() and self.processed_units==0) or \
(len(source) -self.processed_units >= self.expected_step_units() and self.processed_units >0) or \
states.finish_read():
self._gen_frames(states)
if states.finish_read():
self.infer(states)
if (self.processed_frames ==0 and len(self.input_fea) >= self.expected_init_frames()) or \
(self.processed_frames >0 and len(self.input_fea)- self.processed_frames >= self.expected_step_frames()):
self.infer(states)
if len(self.hypos) >0:
return WRITE_ACTION
else:
return READ_ACTION
def infer(self,states):
assert len(self.hypos) ==0
new_frames= len(self.input_fea) - self.processed_frames
#assert new_frames >0
if new_frames >0:
fea = self.input_fea[-new_frames:]
fea= fea.unsqueeze(0)
fea_lengths= fea.new(1).fill_(fea.shape[1])
if not self.cpu:
fea= fea.cuda()
fea_lengths=fea_lengths.cuda()
else:
print(f"infer with no new frames, finished= {states.finish_read()}")
fea= None
fea_lengths=None
if self.processed_frames ==0:
expected_step=1
else:
expected_step = max(new_frames //self.expected_step_frames(),1)
with torch.no_grad():
out_words, tokens, eos_found= self.searcher.search(
fea, fea_lengths,
self.prev_tokens,self.encoder_outs,
self.incremental_states, beam=self.beam,
fwd_step= self.step_generate*expected_step,
forecast_step= self.step_forecast,
is_end = states.finish_read(),
)
self.prev_tokens= tokens
# if states.finish_read():
# print(f"target:{self.searcher.vocab.string(self.prev_tokens)}")
# # run whole data offline
# model= self.searcher.models[0]
# def sub_encoder(fea, incremental_state= None):
# flen = fea.new(1).fill_(fea.shape[1]).cuda()
# enc_out = model.encoder(fea, flen, incremental_state = incremental_state)
# return enc_out["encoder_out"][0]
# fea = self.input_fea
# fea= fea.unsqueeze(0).cuda()
# import pdb;pdb.set_trace()
# fea_lengths= fea.new(1).fill_(fea.shape[1]).cuda()
# encoder_out= model.encoder(fea, fea_lengths, finished=True)
# prev_tokens= torch.LongTensor([self.bos]).cuda().unsqueeze(0)
# incremental_state={}
# for step in range(10):
# logits, _= model.decoder.forward(
# prev_tokens,
# encoder_out=encoder_out,
# incremental_state=incremental_state,
# attn_mask=False
# )
# lprobs= utils.log_softmax(logits, dim=-1)
# v, next_token= lprobs.max(-1)
# prev_tokens= torch.cat((prev_tokens, next_token),dim=1)
# import pdb;pdb.set_trace()
# print(f"new:{self.searcher.vocab.string(prev_tokens)}")
self.processed_frames = len(self.input_fea)
if states.finish_read():
out_words.append(DEFAULT_EOS)
if eos_found:
out_words.append(DEFAULT_EOS)
self.finished=True
self.hypos.extend(out_words)
def predict(self, states):
assert(len(self.hypos) >0)
return self.hypos.popleft()
| from simuleval.agents import Agent,TextAgent, SpeechAgent
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from typing import List,Dict, Optional
import numpy as np
import math
import torch
from collections import deque
from torch import Tensor
import torch.nn as nn
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.models import FairseqEncoderDecoderModel
from fairseq.data import encoders, Dictionary
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from argparse import Namespace
import rain
from rain.data.transforms import audio_encoder, text_encoder
import logging
logger = logging.getLogger('waitk.agent')
class WordEndChecker(object):
def __init__(self, vocab:Dictionary):
self.vocab= vocab
self.wordbegin=[]
for i in range(len(self.vocab)):
self.wordbegin.append(self.is_beginning_of_word(self.vocab[i]))
def is_beginning_of_word(self, x: str) -> bool:
if x in ["<unk>", "<s>", "</s>", "<pad>"]:
return True
return x.startswith("\u2581")
def string(self,tokens, is_finished=False, removed=0):
tokens_cpu= tokens.cpu()
tnum= len(tokens_cpu)
end_pos = tnum -removed
if is_finished:
out_str = self.vocab.string(tokens_cpu[:end_pos],bpe_symbol="sentencepiece",)
return out_str, removed
next_bow= 0
for i in range(min(end_pos+1,tnum)):
if self.wordbegin[tokens_cpu[i]] :
next_bow=i
out_str= self.vocab.string(tokens_cpu[:next_bow],bpe_symbol="sentencepiece",)
return out_str, tnum - next_bow
class OnlineSearcher(nn.Module):
def __init__(
self, models:List[FairseqEncoderDecoderModel],
vocab,
eos=1,
bos=1,
eager=False,
stop_early=False
):
super().__init__()
self.models= nn.ModuleList(models)
self.bos=bos
self.eos= eos
self.reserve_step = 0
self.vocab = vocab
self.vocab_size= len(vocab)
self.pad= vocab.pad()
self.word_end= WordEndChecker(vocab)
self.eager= eager
self.stop_early= stop_early
@property
def init_frames(self):
return self.models[0].encoder.init_frames
@property
def step_frames(self):
return self.models[0].encoder.step_frames
def get_init_frames(self, wait_block=4):
return self.init_frames + self.step_frames*(wait_block-1)
def get_step_frames(self, step_block=1):
return self.step_frames*step_block
def reorder_states(self, encoder_outs,incremental_states, new_order):
for model in self.models:
encoder_outs[model] =model.encoder.reorder_encoder_out(encoder_outs[model], new_order)
model.decoder.reorder_incremental_state_scripting(
incremental_states[model],
new_order
)
def search(
self, src,src_lengths,
prev_tokens, encoder_outs,
incremental_states,beam=5, fwd_step= 1, forecast_step = 1,
is_end=False
):
ninf= float('-inf')
if src is not None:
self.fwd_encoder(src, src_lengths, encoder_outs, incremental_states, is_end)
steps= self.reserve_step + fwd_step + forecast_step
if is_end:
steps= 100
new_order = prev_tokens.new(beam).fill_(0)
self.reorder_states(encoder_outs, incremental_states, new_order)
prev_tokens = prev_tokens.repeat(beam,1)
init_len= prev_tokens.shape[1]
finished= prev_tokens.new(beam).fill_(0).bool()
scores= prev_tokens.new(beam,1).float().fill_(0)
for nstep in range(steps):
lprobs= self.fwd_decoder_step(prev_tokens, encoder_outs, incremental_states)
# if not is_end:
# lprobs[:, self.eos] = ninf
lprobs[:, self.pad] = ninf
lprobs[finished, :self.eos]= ninf
lprobs[finished, self.eos]= 0
lprobs[finished,self.eos+1:] = ninf
#lprobs: beam*vocab
expand_score= scores + lprobs
if nstep ==0:
expand_score= expand_score[:1]
tscore, tidx= expand_score.view(-1).topk(beam)
next_tokens= tidx %self.vocab_size
new_order= tidx //self.vocab_size
scores[:]= tscore.unsqueeze(1)
prev_tokens= prev_tokens.index_select(0, new_order)
prev_tokens= torch.cat([prev_tokens, next_tokens.unsqueeze(1)], dim=1)
self.reorder_states(encoder_outs, incremental_states, new_order)
finished= finished | next_tokens.eq(self.eos)
if finished.all():
break
if not is_end and self.stop_early and prev_tokens[0][-1-forecast_step] == self.eos:
prev_tokens= prev_tokens[0]
out_tokens= prev_tokens[ init_len:]
removed = out_tokens.eq(self.eos).sum().item()
out_words, reserved = self.word_end.string(out_tokens, is_finished=True, removed= removed)
return deque( out_words.split()), prev_tokens,True
seqlen = prev_tokens.ne(self.eos).float().sum(1) - init_len+1
scores= scores.squeeze(1)/seqlen
score, idx= scores.max(0)
new_order= idx.view(1)
self.reorder_states(encoder_outs, incremental_states, new_order)
prev_tokens= prev_tokens[idx]
out_tokens= prev_tokens[ init_len:]
removed = out_tokens.eq(self.eos).sum().item()
ignore_length= max(removed, forecast_step -(steps- len(out_tokens))) if not is_end else removed
if self.eager:
out_words, reserved = self.word_end.string(out_tokens, is_finished=True, removed= ignore_length)
else:
out_words, reserved= self.word_end.string(out_tokens, is_finished= is_end, removed= ignore_length)
rollback_to = len(prev_tokens) - reserved
prev_tokens= prev_tokens[:rollback_to]
self.rollback(incremental_states, rollback_to)
self.reserve_step = reserved - forecast_step + (steps-len(out_tokens))
assert is_end or self.reserve_step >=0
return deque( out_words.split()), prev_tokens,False
def fwd_decoder_step(self, tokens, encoder_outs, incremental_states,temperature=1.0):
log_probs = []
for i, model in enumerate(self.models):
encoder_out = encoder_outs[model]
# decode each model
logits,_ = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[model],
attn_mask=False
)
logits = logits[:,-1:,:]/temperature
lprobs = utils.log_softmax(logits, dim=-1)
lprobs = lprobs[:, -1, :]
if len(self.models) == 1:
return lprobs
log_probs.append(lprobs)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
return avg_probs
def rollback(self,incremental_states, step_to_keep):
for model in self.models:
model.decoder.rollback_steps(incremental_states[model], step_to_keep)
def init_states(self):
encoder_outs={m:{} for m in self.models}
incremental_states={m:{} for m in self.models}
self.reserve_step= 0
return encoder_outs, incremental_states
def fwd_encoder(
self, src:Tensor, src_lengths:Tensor,
encoder_outs: Optional[Dict[nn.Module,Dict[str, List[Tensor]]]] ,
incremental_states:Optional[Dict[nn.Module, Dict[str, Dict[str, Optional[Tensor]]]]],
finished = False
):
for model in self.models:
curr_out= model.encoder(
src, src_lengths,
incremental_state=incremental_states[model],
finished=finished
)
if "encoder_out" in encoder_outs[model]:
pre= encoder_outs[model]["encoder_out"][0]
pre_mask= encoder_outs[model]["encoder_padding_mask"][0]
encoder_outs[model]["encoder_out"][0] = torch.cat([pre,curr_out["encoder_out"][0]], dim=0)
encoder_outs[model]["encoder_padding_mask"][0] = torch.cat([pre_mask,curr_out["encoder_padding_mask"][0]], dim=1)
else:
encoder_outs[model]=curr_out
return encoder_outs, incremental_states
class NaiveWaitk(OnlineSearcher):
def __init__(
self, models:List[FairseqEncoderDecoderModel],
vocab,
eos=1,
bos=1,
eager=False,
stop_early=False
):
super().__init__(models, vocab, eos, bos)
self.eager=eager
self.reserved_subwords=None
self.stop_early=stop_early
def init_states(self):
encoder_outs, incremental_states = super().init_states()
self.reserved_subwords = None
return encoder_outs, incremental_states
def search(
self, src,src_lengths,
prev_tokens, encoder_outs,
incremental_states,beam=5, fwd_step= 1, forecast_step = 1,
is_end=False
):
forecast_step=0
ninf= float('-inf')
if src is not None:
self.fwd_encoder(src, src_lengths, encoder_outs, incremental_states, is_end)
steps=fwd_step
if is_end:
steps= 40
new_order = prev_tokens.new(beam).fill_(0)
self.reorder_states(encoder_outs, incremental_states, new_order)
prev_tokens = prev_tokens.repeat(beam,1)
init_len= prev_tokens.shape[1]
finished= prev_tokens.new(beam).fill_(0).bool()
scores= prev_tokens.new(beam,1).float().fill_(0)
for nstep in range(steps):
lprobs= self.fwd_decoder_step(prev_tokens, encoder_outs, incremental_states)
# if not is_end:
# lprobs[:, self.eos] = ninf
lprobs[:, self.pad] = ninf
lprobs[finished, :self.eos]= ninf
lprobs[finished, self.eos]= 0
lprobs[finished,self.eos+1:] = ninf
#lprobs: beam*vocab
expand_score= scores + lprobs
if nstep ==0:
expand_score= expand_score[:1]
tscore, tidx= expand_score.view(-1).topk(beam)
next_tokens= tidx %self.vocab_size
new_order= tidx //self.vocab_size
scores[:]= tscore.unsqueeze(1)
prev_tokens= prev_tokens.index_select(0, new_order)
prev_tokens= torch.cat([prev_tokens, next_tokens.unsqueeze(1)], dim=1)
self.reorder_states(encoder_outs, incremental_states, new_order)
finished= finished | next_tokens.eq(self.eos)
if finished.all():
break
if not is_end and self.stop_early and prev_tokens[0][-1] == self.eos:
prev_tokens= prev_tokens[0]
out_tokens= prev_tokens[ init_len:]
if self.reserved_subwords is not None and len(self.reserved_subwords) >0:
out_tokens = torch.cat((self.reserved_subwords, out_tokens),dim=0)
removed = out_tokens.eq(self.eos).sum()
ignore_length= removed
out_words, reserved = self.word_end.string(out_tokens, is_finished=True, removed= ignore_length)
return deque( out_words.split()), prev_tokens,True
seqlen = prev_tokens.ne(self.eos).float().sum(1) - init_len+1
scores= scores.squeeze(1)/seqlen
score, idx= scores.max(0)
new_order= idx.view(1)
self.reorder_states(encoder_outs, incremental_states, new_order)
prev_tokens= prev_tokens[idx]
out_tokens= prev_tokens[ init_len:]
if self.reserved_subwords is not None and len(self.reserved_subwords) >0:
out_tokens = torch.cat((self.reserved_subwords, out_tokens),dim=0)
removed = out_tokens.eq(self.eos).sum()
ignore_length= removed
if self.eager:
out_words, reserved = self.word_end.string(out_tokens, is_finished=True, removed= ignore_length)
else:
out_words, reserved= self.word_end.string(out_tokens, is_finished= is_end, removed= ignore_length)
#out_words, reserved= self.word_end.string(out_tokens, is_end)
self.reserved_subwords=None
if reserved >0:
self.reserved_subwords = prev_tokens[-reserved:]
return deque( out_words.split()), prev_tokens,False
class WaitkAgent(Agent):
def __init__(self, args):
self._set_default_args(args)
super().__init__(args)
utils.import_user_module("rain")
self.cpu = args.cpu
self.wait_blocks= args.wait_blocks
self.step_read_blocks = args.step_read_blocks
self.step_generate = args.step_generate
self.step_forecast = args.step_forecast
self.beam = args.beam
task_cfg=Namespace(
task="s2s",
data= args.train_dir, task_type=args.task_type,
source_lang=args.slang,
target_lang= args.tlang,
text_config=args.text_encoder, audio_cfg=args.audio_encoder,
bpe_dropout=0,
)
self.task= tasks.setup_task(task_cfg)
self.audio_transformer = self.task.audio_transform_test
self.text_transformer= self.task.src_encoder
self.src_dict= self.task.src_dict
if args.model_path is None:
raise ValueError("--model-path needed")
models, saved_cfg= checkpoint_utils.load_model_ensemble(
utils.split_paths(args.model_path),
arg_overrides=None,
task=self.task,
)
self.tgt_dict:Dictionary= self.task.target_dictionary
self.eos= self.tgt_dict.eos()
self.bos= self.tgt_dict.eos() if args.infer_bos is None else args.infer_bos
# if self.data_type == "speech":
# self.searcher= SpeechSearcher(models, self.tgt_dict, eos= self.eos, bos= self.bos)
if args.naive_waitk:
self.searcher = NaiveWaitk(
models, self.tgt_dict, eos= self.eos, bos= self.bos, eager= args.eager,
stop_early=args.stop_early
)
else:
self.searcher = OnlineSearcher(
models, self.tgt_dict, eos= self.eos, bos= self.bos, eager= args.eager,
stop_early=args.stop_early
)
if not self.cpu:
self.searcher.cuda()
self.searcher.eval()
self.frames= None
self.finished=True
self.processed_frames=0
self.processed_units=0
self.hypos=deque()
self.prev_tokens, self.encoder_outs, self.incremental_states= None,None,None
def _set_default_args(self, args):
args.wait_blocks= 4 if args.wait_blocks is None else args.wait_blocks
args.step_read_blocks=1 if args.step_read_blocks is None else args.step_read_blocks
args.step_generate=1 if args.step_generate is None else args.step_generate
args.step_forecast= 0 if args.step_forecast is None else args.step_forecast
@staticmethod
def add_args(parser):
parser.add_argument(
'--stop-early', action='store_true', help='stop early mode for waitk'
)
parser.add_argument(
"--cpu", action= "store_true", help= "use cpu instead of cuda"
)
parser.add_argument(
"--eager", default=False, action="store_true", help="output words without word end check"
)
parser.add_argument(
"--task-type", default="mt", metavar='ttype',
help='task type :st,mt'
)
parser.add_argument(
"--slang", default="en", metavar='SLANG',
help='task type :st,mt'
)
parser.add_argument(
"--tlang", default="de", metavar='TLANG',
help='task type :st,mt'
)
parser.add_argument(
"--infer-bos", default=None,type=int,
help= "bos for decoding"
)
parser.add_argument(
"--model-path", default=None,type=str,
help= "path for models used (may be splited by `:`)"
)
parser.add_argument(
"--wait-blocks", default=None, type=int,
help="start translation after wait_blocks samples read, default None and we may set its default in our class"
)
parser.add_argument(
"--beam", default=5, type=int,
help="beam size"
)
parser.add_argument(
"--step-read-blocks", default=None, type=int,
help="do translation while read each blocks input"
)
parser.add_argument(
"--step-generate", default=None, type=int,
help="generate tokens for each step, NOT equal to output words, if current step output un-complete subwords, "
"reserve to output in next step"
)
parser.add_argument(
"--step-forecast", default= None, type=int,
help="forecast subword numbers for each step, only for search"
)
parser.add_argument(
"--train-dir", default="exp_data/must_filtered2", type=str,
help="train dir, for other resource such as dict"
)
parser.add_argument(
"--text-encoder", default="text_cfg", type=str,
help= "text encoder"
)
parser.add_argument(
"--audio-encoder", default= "audio_cfg", type=str,
help= "audio-encoder"
)
parser.add_argument(
"--naive-waitk", action="store_true",help="use naive waitk"
)
return parser
def initialize_states(self, states):
# we recompute feature at each step, the waste seems to be acceptable
logger.info(f"new sample,id={states.instance_id}")
self.input_fea= torch.Tensor(0,80)
self.finished=False
self.processed_frames=0
self.processed_units=0
self.encoder_outs, self.incremental_states = self.searcher.init_states()
self.prev_tokens= torch.LongTensor([self.bos])
if not self.cpu:
self.prev_tokens= self.prev_tokens.cuda()
self.hypos=deque()
def expected_init_frames(self):
return self.searcher.get_init_frames(self.wait_blocks)
def expected_step_frames(self):
return self.searcher.get_step_frames(self.step_read_blocks)
def expected_init_units(self):
frames= self.searcher.get_init_frames(self.wait_blocks)
if self.data_type == "text":
#return frames
# we must do sentencepiece and then get its length
return 1
elif self.data_type == "speech":
# units per 10ms
return frames
def expected_step_units(self):
frames= self.searcher.get_step_frames(self.step_read_blocks)
if self.data_type == "text":
return 1
elif self.data_type == "speech":
# units per ms
#return frames*10
return frames
def _gen_frames(self, states):
source= states.source
if self.data_type == "text":
src = ' '.join(source)
subwords =self.text_transformer.encode(src)
tokens = self.src_dict.encode_line(subwords,add_if_not_exist=False, append_eos= False).long()
#tokens = torch.cat((torch.LongTensor([self.tgt_dict.eos()]),tokens), dim=0)
if states.finish_read():
tokens= torch.cat((tokens,torch.LongTensor([ self.src_dict.eos()])), dim=0)
self.input_fea= tokens
self.processed_units = len(source)
elif self.data_type == "speech":
rate_ms= 16
if len(source[-1]) <160:
source=source[:-1]
new_frames= len(source) - self.input_fea.shape[0]
if new_frames <= 0:
return
if self.input_fea.shape[0] ==0 :
pre= torch.FloatTensor(1, 15*rate_ms).fill_(0)
new_src= sum(source[-new_frames:],[])
new_src= torch.FloatTensor(new_src).unsqueeze(0)
new_src= torch.cat([pre, new_src], dim=1)
else:
new_src= sum(source[-(new_frames+2):],[])
new_src= new_src[5*rate_ms:]
new_src = torch.FloatTensor(new_src).unsqueeze(0)
new_src= new_src*( 2**-15)
fbank= audio_encoder._get_fbank(new_src, sample_rate= 16000, n_bins=80)
fbank = self.audio_transformer(fbank)
self.input_fea= torch.cat([self.input_fea, fbank], dim=0)
self.processed_units= len(source)
else:
raise ValueError(f"unknown data type {self.data_type}")
def policy(self, states):
if len(self.hypos) >0:
return WRITE_ACTION
source= states.source
if self.finished:
if len(self.hypos) >0:
return WRITE_ACTION
else:
return READ_ACTION
if (len(source) >=self.expected_init_units() and self.processed_units==0) or \
(len(source) -self.processed_units >= self.expected_step_units() and self.processed_units >0) or \
states.finish_read():
self._gen_frames(states)
if states.finish_read():
self.infer(states)
if (self.processed_frames ==0 and len(self.input_fea) >= self.expected_init_frames()) or \
(self.processed_frames >0 and len(self.input_fea)- self.processed_frames >= self.expected_step_frames()):
self.infer(states)
if len(self.hypos) >0:
return WRITE_ACTION
else:
return READ_ACTION
def infer(self,states):
assert len(self.hypos) ==0
new_frames= len(self.input_fea) - self.processed_frames
#assert new_frames >0
if new_frames >0:
fea = self.input_fea[-new_frames:]
fea= fea.unsqueeze(0)
fea_lengths= fea.new(1).fill_(fea.shape[1])
if not self.cpu:
fea= fea.cuda()
fea_lengths=fea_lengths.cuda()
else:
print(f"infer with no new frames, finished= {states.finish_read()}")
fea= None
fea_lengths=None
if self.processed_frames ==0:
expected_step=1
else:
expected_step = max(new_frames //self.expected_step_frames(),1)
with torch.no_grad():
out_words, tokens, eos_found= self.searcher.search(
fea, fea_lengths,
self.prev_tokens,self.encoder_outs,
self.incremental_states, beam=self.beam,
fwd_step= self.step_generate*expected_step,
forecast_step= self.step_forecast,
is_end = states.finish_read(),
)
self.prev_tokens= tokens
# if states.finish_read():
# print(f"target:{self.searcher.vocab.string(self.prev_tokens)}")
# # run whole data offline
# model= self.searcher.models[0]
# def sub_encoder(fea, incremental_state= None):
# flen = fea.new(1).fill_(fea.shape[1]).cuda()
# enc_out = model.encoder(fea, flen, incremental_state = incremental_state)
# return enc_out["encoder_out"][0]
# fea = self.input_fea
# fea= fea.unsqueeze(0).cuda()
# import pdb;pdb.set_trace()
# fea_lengths= fea.new(1).fill_(fea.shape[1]).cuda()
# encoder_out= model.encoder(fea, fea_lengths, finished=True)
# prev_tokens= torch.LongTensor([self.bos]).cuda().unsqueeze(0)
# incremental_state={}
# for step in range(10):
# logits, _= model.decoder.forward(
# prev_tokens,
# encoder_out=encoder_out,
# incremental_state=incremental_state,
# attn_mask=False
# )
# lprobs= utils.log_softmax(logits, dim=-1)
# v, next_token= lprobs.max(-1)
# prev_tokens= torch.cat((prev_tokens, next_token),dim=1)
# import pdb;pdb.set_trace()
# print(f"new:{self.searcher.vocab.string(prev_tokens)}")
self.processed_frames = len(self.input_fea)
if states.finish_read():
out_words.append(DEFAULT_EOS)
if eos_found:
out_words.append(DEFAULT_EOS)
self.finished=True
self.hypos.extend(out_words)
def predict(self, states):
assert(len(self.hypos) >0)
return self.hypos.popleft()
| en | 0.327679 | # if not is_end: # lprobs[:, self.eos] = ninf #lprobs: beam*vocab # decode each model # if not is_end: # lprobs[:, self.eos] = ninf #lprobs: beam*vocab #out_words, reserved= self.word_end.string(out_tokens, is_end) # if self.data_type == "speech": # self.searcher= SpeechSearcher(models, self.tgt_dict, eos= self.eos, bos= self.bos) # we recompute feature at each step, the waste seems to be acceptable #return frames # we must do sentencepiece and then get its length # units per 10ms # units per ms #return frames*10 #tokens = torch.cat((torch.LongTensor([self.tgt_dict.eos()]),tokens), dim=0) #assert new_frames >0 # if states.finish_read(): # print(f"target:{self.searcher.vocab.string(self.prev_tokens)}") # # run whole data offline # model= self.searcher.models[0] # def sub_encoder(fea, incremental_state= None): # flen = fea.new(1).fill_(fea.shape[1]).cuda() # enc_out = model.encoder(fea, flen, incremental_state = incremental_state) # return enc_out["encoder_out"][0] # fea = self.input_fea # fea= fea.unsqueeze(0).cuda() # import pdb;pdb.set_trace() # fea_lengths= fea.new(1).fill_(fea.shape[1]).cuda() # encoder_out= model.encoder(fea, fea_lengths, finished=True) # prev_tokens= torch.LongTensor([self.bos]).cuda().unsqueeze(0) # incremental_state={} # for step in range(10): # logits, _= model.decoder.forward( # prev_tokens, # encoder_out=encoder_out, # incremental_state=incremental_state, # attn_mask=False # ) # lprobs= utils.log_softmax(logits, dim=-1) # v, next_token= lprobs.max(-1) # prev_tokens= torch.cat((prev_tokens, next_token),dim=1) # import pdb;pdb.set_trace() # print(f"new:{self.searcher.vocab.string(prev_tokens)}") | 2.252154 | 2 |
day15.2/main.py | lfscheidegger/adventofcode2018 | 1 | 6621862 | <filename>day15.2/main.py
#!/usr/bin/python
"""
"""
from collections import defaultdict
from collections import deque
import multiprocessing
import re
import sys
import time
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from find_path import find_paths_to_in_range
from number import Number
from tile import Tile
TILE_SIZE_PX = 32
ELVES_POWER = 19
def adjacency(position):
return ((x[0] + position[0], x[1] + position[1]) for x in [(0, -1), (-1, 0), (1, 0), (0, 1)])
def actor_hit_point_sort_key(actor):
return actor.hit_points
def actor_sort_key(actor):
return (actor.position[1], actor.position[0])
def position_sort_key(position):
return (position[1], position[0])
class InRoundState:
def __init__(self, state, actors):
self.state = state
self.actors = actors
self._next_thing = "START_ROUND"
self._active_actor = None
self._active_actor_targets = []
self._active_actor_range = []
self._range_paths = None
self._active_actor_selected_next_square = None
self._victim = None
self._rounds = 0
def advance(self):
# print self._next_thing
if self._next_thing == "START_ROUND":
self._advance_start_round()
elif self._next_thing == "FIND_ACTIVE_ACTOR":
self._advance_find_next_active_actor()
elif self._next_thing == "FIND_TARGETS":
self._advance_find_all_targets()
elif self._next_thing == "FIND_ACTOR_RANGE":
self._advance_find_active_actor_range()
elif self._next_thing == "FIND_PATHS":
self._advance_find_paths()
elif self._next_thing == "PICK_NEXT_SQUARE":
self._advance_pick_next_square()
elif self._next_thing == "MOVE":
self._advance_move()
elif self._next_thing == "FIND_VICTIM":
self._advance_find_victim()
elif self._next_thing == "ATTACK":
self._advance_attack()
elif self._next_thing == "END_TURN":
self._advance_end_turn()
elif self._next_thing == "END_ROUND":
self._advance_end_round()
elif self._next_thing == "END_COMBAT":
total_hit_points = sum(map(lambda a: a.hit_points, self.actors))
print "Rounds until end:", self._rounds
print "Total hit points:", total_hit_points
print "Outcome:", self._rounds * total_hit_points
time.sleep(2)
sys.exit()
def _advance_start_round(self):
self._turn_order = sorted(self.actors, key=actor_sort_key)
# print 'started round'
self._next_thing = "FIND_ACTIVE_ACTOR"
def _advance_find_next_active_actor(self):
# filter out dead people
self._turn_order = filter(lambda a: a in self.actors, self._turn_order)
if self._active_actor is None:
self._active_actor = self._turn_order[0]
# print 'found active actor', self._active_actor.id
self._next_thing = "FIND_TARGETS"
else:
next_idx = self._turn_order.index(self._active_actor) + 1
if next_idx < len(self._turn_order):
self._active_actor = self._turn_order[next_idx]
# print 'found active actor', self._active_actor.id
self._next_thing = "FIND_TARGETS"
else:
self._next_thing = "END_ROUND"
def _advance_find_all_targets(self):
target_type = 'E' if self._active_actor.type == 'G' else 'G'
self._active_actor_targets = list(filter(lambda a: a.type == target_type, self.actors))
if len(self._active_actor_targets) == 0:
self._next_thing = "END_COMBAT"
else:
self._next_thing = "FIND_ACTOR_RANGE"
def _advance_find_active_actor_range(self):
active_actor_range = set()
for target in self._active_actor_targets:
for adj in adjacency(target.position):
if self.state.board[adj[1]][adj[0]] != '#':
active_actor_range.add(adj)
self._active_actor_range = sorted(active_actor_range, key=position_sort_key)
# print 'found active range'
if self._active_actor.position in self._active_actor_range:
self._next_thing = "FIND_VICTIM"
else:
self._next_thing = "FIND_PATHS"
def _advance_find_paths(self):
self._range_paths = find_paths_to_in_range(self._active_actor.position, self.state.board, self.actors)
self._next_thing = "PICK_NEXT_SQUARE"
# print 'determined paths'
def _advance_pick_next_square(self):
def find_first_node_to_use(start, end):
candidates = []
front = path[end]
while len(front) != 0:
next_front = []
for node in front:
if start in adjacency(node):
candidates.append(node)
elif node != start:
next_front += path[node]
front = list(set(next_front))
candidates = list(set(candidates))
return sorted(candidates, key=position_sort_key)[0]
visited, path = self._range_paths
range_weights = map(lambda target: visited[target], filter(lambda target: target in visited, self._active_actor_range))
if len(range_weights) == 0:
self._next_thing = "END_TURN"
else:
lowest_range_weight = min(map(lambda target: visited[target], filter(lambda target: target in visited, self._active_actor_range)))
lowest_range = sorted(filter(lambda range: range in visited and visited[range] == lowest_range_weight, self._active_actor_range), key=position_sort_key)[0]
if visited[lowest_range] == 1:
assert lowest_range in adjacency(self._active_actor.position)
self._active_actor_selected_next_square = lowest_range
else:
self._active_actor_selected_next_square = find_first_node_to_use(self._active_actor.position, lowest_range)
self._next_thing = "MOVE"
# print 'selected next square'
def _advance_move(self):
self._active_actor.position = self._active_actor_selected_next_square
self._active_actor_targets = []
self._active_actor_range = []
self._range_paths = None
self._active_actor_selected_next_square = None
self._next_thing = "FIND_VICTIM"
def _advance_find_victim(self):
attack_candidates = []
for adj in adjacency(self._active_actor.position):
for target in filter(lambda a: a.type != self._active_actor.type, self.actors):
if target.position == adj:
attack_candidates.append(target)
if len(attack_candidates) != 0:
attack_candidates = sorted(attack_candidates, key=actor_sort_key)
attack_candidates = sorted(attack_candidates, key=actor_hit_point_sort_key)
self._victim = attack_candidates[0]
self._next_thing = "ATTACK"
else:
self._next_thing = "END_TURN"
def _advance_attack(self):
self._victim.hit_points -= self._active_actor.attack_power
self._victim.hit_points = max(self._victim.hit_points, 0)
if self._victim.type == 'E' and self._victim.hit_points == 0:
# an elf is dead. No bueno.
print 'An elf died. RIP.'
sys.exit(0)
# print 'attacked %s (%s - %s)' % (self._victim.id, self._victim.type, self._victim.hit_points)
self._next_thing = "END_TURN"
def _advance_end_turn(self):
self._active_actor_targets = []
self._active_actor_range = []
self._range_paths = None
self._next_thing = "FIND_ACTIVE_ACTOR"
self._victim = None
# print 'actor %s (%s) ended their turn' % (self._active_actor.id, self._active_actor.type)
# filter out dead people
self.actors = filter(lambda a: a.hit_points > 0, self.actors)
def _advance_end_round(self):
self._active_actor = None
self._active_actor_targets = []
self._active_actor_range = []
self._range_paths = None
self._next_thing = "START_ROUND"
self._rounds += 1
# print 'ended round'
def print_gl(self):
if self._active_actor is not None:
self._draw_square(self._active_actor.position, 0, 0.5, 0)
for target in self._active_actor_targets:
self._draw_square(target.position, 0.5, 0, 0)
for in_range in self._active_actor_range:
self._draw_square(in_range, 0, 0, 0.5)
if self._range_paths is not None:
visited = self._range_paths[0]
max_weight = max(visited.values())
for (x, y) in visited:
weight = visited[(x, y)]
Number.draw(weight, (x, y), self.state.height)
intensity = weight / float(max_weight)
self._draw_square((x, y), intensity, 0, intensity, 0.5)
if self._active_actor_selected_next_square is not None:
x, y = self._active_actor_selected_next_square
self._draw_square((x, y), 0, 0.8, 0)
if self._victim is not None:
x, y = self._victim.position
self._draw_square((x, y), 0.5, 0, 0.5, 0.5)
def _draw_square(self, position, r, g, b, a=1):
x = position[0]
y = self.state.height - 1 - position[1]
glColor4f(r, g, b, a)
glBegin(GL_TRIANGLES)
glVertex2f(x, y)
glVertex2f(x+1, y)
glVertex2f(x+1, y+1)
glVertex2f(x, y)
glVertex2f(x+1, y+1)
glVertex2f(x, y+1)
glEnd()
glColor3f(1, 1, 1)
class State:
"""
Represents the state of a cave at a point in time."""
def __init__(self, round, board, actors):
self.round = round
self.board = board
self.actors = actors
self.height = len(self.board)
self.width = len(self.board[0])
self.in_round_state = InRoundState(state=self, actors=actors)
@staticmethod
def from_stdin():
"""
Reads a State from standard input."""
board = []
actors = []
y = 0
actor_id = 0
try:
while True:
line = raw_input()
for x in range(len(line)):
char = line[x]
if char == 'G' or char == 'E':
actors.append(Actor(actor_id, char, (x, y)))
actor_id += 1
line = line.replace('G', '.').replace('E', '.')
board.append(line)
y += 1
except EOFError:
return State(board=board, actors=actors, round=0)
def advance(self):
"""
Advances the state, by either selecting the next player, advancing that player's state, etc."""
self.in_round_state.advance()
def print_stdout(self):
"""
Prints the board to standard out."""
board = list(b for b in self.board)
for actor in self.actors:
actor_line = board[actor.position[1]]
board[actor.position[1]] = actor_line[:actor.position[0]] + actor.type + actor_line[actor.position[0] + 1:]
for row in board:
print row
def print_gl(self):
"""
Displays the board in OpenGL."""
self._draw_walls()
self._draw_in_round_state()
self._draw_actors()
def _draw_walls(self):
for y in range(len(self.board)):
row = self.board[y]
for x in range(len(row)):
tile = row[x]
if tile == '#':
self._draw_tile((x, y), tile)
def _draw_in_round_state(self):
glBindTexture(GL_TEXTURE_2D, 0)
self.in_round_state.print_gl()
def _draw_actors(self):
for actor in self.actors:
x, y = actor.position
y = self.height - y - 1
self._draw_tile(actor.position, actor.type, actor.hit_points==0)
normalized_hit_points = actor.hit_points / float(200)
glColor3f(1 - normalized_hit_points, normalized_hit_points, 0)
glBegin(GL_TRIANGLES)
glVertex2f(x + 0.85, y + 0.05)
glVertex2f(x + 0.9, y + 0.05)
glVertex2f(x + 0.9, y + 0.05 + normalized_hit_points * 0.8)
glVertex2f(x + 0.85, y + 0.05)
glVertex2f(x + 0.9, y + 0.05 + normalized_hit_points * 0.8)
glVertex2f(x + 0.85, y + 0.05 + normalized_hit_points * 0.8)
glEnd()
glColor3f(1, 1, 1)
def _draw_tile(self, position, tile, dead=False):
Tile(tile, position, self, dead).draw()
class Actor:
def __init__(self, id, type, position):
self.id = id
self.type = type
self.position = position
self.hit_points = 200
self.attack_power = 3 if type == 'G' else ELVES_POWER
def display_func():
global state
glClear(GL_COLOR_BUFFER_BIT)
state.print_gl()
glutSwapBuffers()
def keyboard_func(key, x, y):
global state
key = key.lower()
if key == 'q':
sys.exit(0)
elif key == 'a':
state.advance()
glutPostRedisplay()
def reshape_func(width, height):
global state
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, state.width, 0, state.height, -1, 1)
glViewport(0, 0, width, height)
glMatrixMode(GL_MODELVIEW)
now = 0
def idle_func():
global now
global state
new_now = 1000 * time.time()
if new_now - now > 1:
# more than 60 ms. Let's advance
state.advance()
glutPostRedisplay()
now = new_now
def init_gl():
global state
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(max(512, state.width * TILE_SIZE_PX), max(512, state.height * TILE_SIZE_PX))
glutCreateWindow('Elves v. Goblins')
glutDisplayFunc(display_func)
glutKeyboardFunc(keyboard_func)
glutReshapeFunc(reshape_func)
glutIdleFunc(idle_func)
glEnable(GL_TEXTURE_2D)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glutMainLoop()
def main():
global state
state = State.from_stdin()
init_gl()
if __name__ == "__main__":
main()
| <filename>day15.2/main.py
#!/usr/bin/python
"""
"""
from collections import defaultdict
from collections import deque
import multiprocessing
import re
import sys
import time
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from find_path import find_paths_to_in_range
from number import Number
from tile import Tile
TILE_SIZE_PX = 32
ELVES_POWER = 19
def adjacency(position):
return ((x[0] + position[0], x[1] + position[1]) for x in [(0, -1), (-1, 0), (1, 0), (0, 1)])
def actor_hit_point_sort_key(actor):
return actor.hit_points
def actor_sort_key(actor):
return (actor.position[1], actor.position[0])
def position_sort_key(position):
return (position[1], position[0])
class InRoundState:
def __init__(self, state, actors):
self.state = state
self.actors = actors
self._next_thing = "START_ROUND"
self._active_actor = None
self._active_actor_targets = []
self._active_actor_range = []
self._range_paths = None
self._active_actor_selected_next_square = None
self._victim = None
self._rounds = 0
def advance(self):
# print self._next_thing
if self._next_thing == "START_ROUND":
self._advance_start_round()
elif self._next_thing == "FIND_ACTIVE_ACTOR":
self._advance_find_next_active_actor()
elif self._next_thing == "FIND_TARGETS":
self._advance_find_all_targets()
elif self._next_thing == "FIND_ACTOR_RANGE":
self._advance_find_active_actor_range()
elif self._next_thing == "FIND_PATHS":
self._advance_find_paths()
elif self._next_thing == "PICK_NEXT_SQUARE":
self._advance_pick_next_square()
elif self._next_thing == "MOVE":
self._advance_move()
elif self._next_thing == "FIND_VICTIM":
self._advance_find_victim()
elif self._next_thing == "ATTACK":
self._advance_attack()
elif self._next_thing == "END_TURN":
self._advance_end_turn()
elif self._next_thing == "END_ROUND":
self._advance_end_round()
elif self._next_thing == "END_COMBAT":
total_hit_points = sum(map(lambda a: a.hit_points, self.actors))
print "Rounds until end:", self._rounds
print "Total hit points:", total_hit_points
print "Outcome:", self._rounds * total_hit_points
time.sleep(2)
sys.exit()
def _advance_start_round(self):
self._turn_order = sorted(self.actors, key=actor_sort_key)
# print 'started round'
self._next_thing = "FIND_ACTIVE_ACTOR"
def _advance_find_next_active_actor(self):
# filter out dead people
self._turn_order = filter(lambda a: a in self.actors, self._turn_order)
if self._active_actor is None:
self._active_actor = self._turn_order[0]
# print 'found active actor', self._active_actor.id
self._next_thing = "FIND_TARGETS"
else:
next_idx = self._turn_order.index(self._active_actor) + 1
if next_idx < len(self._turn_order):
self._active_actor = self._turn_order[next_idx]
# print 'found active actor', self._active_actor.id
self._next_thing = "FIND_TARGETS"
else:
self._next_thing = "END_ROUND"
def _advance_find_all_targets(self):
target_type = 'E' if self._active_actor.type == 'G' else 'G'
self._active_actor_targets = list(filter(lambda a: a.type == target_type, self.actors))
if len(self._active_actor_targets) == 0:
self._next_thing = "END_COMBAT"
else:
self._next_thing = "FIND_ACTOR_RANGE"
def _advance_find_active_actor_range(self):
active_actor_range = set()
for target in self._active_actor_targets:
for adj in adjacency(target.position):
if self.state.board[adj[1]][adj[0]] != '#':
active_actor_range.add(adj)
self._active_actor_range = sorted(active_actor_range, key=position_sort_key)
# print 'found active range'
if self._active_actor.position in self._active_actor_range:
self._next_thing = "FIND_VICTIM"
else:
self._next_thing = "FIND_PATHS"
def _advance_find_paths(self):
self._range_paths = find_paths_to_in_range(self._active_actor.position, self.state.board, self.actors)
self._next_thing = "PICK_NEXT_SQUARE"
# print 'determined paths'
def _advance_pick_next_square(self):
def find_first_node_to_use(start, end):
candidates = []
front = path[end]
while len(front) != 0:
next_front = []
for node in front:
if start in adjacency(node):
candidates.append(node)
elif node != start:
next_front += path[node]
front = list(set(next_front))
candidates = list(set(candidates))
return sorted(candidates, key=position_sort_key)[0]
visited, path = self._range_paths
range_weights = map(lambda target: visited[target], filter(lambda target: target in visited, self._active_actor_range))
if len(range_weights) == 0:
self._next_thing = "END_TURN"
else:
lowest_range_weight = min(map(lambda target: visited[target], filter(lambda target: target in visited, self._active_actor_range)))
lowest_range = sorted(filter(lambda range: range in visited and visited[range] == lowest_range_weight, self._active_actor_range), key=position_sort_key)[0]
if visited[lowest_range] == 1:
assert lowest_range in adjacency(self._active_actor.position)
self._active_actor_selected_next_square = lowest_range
else:
self._active_actor_selected_next_square = find_first_node_to_use(self._active_actor.position, lowest_range)
self._next_thing = "MOVE"
# print 'selected next square'
def _advance_move(self):
self._active_actor.position = self._active_actor_selected_next_square
self._active_actor_targets = []
self._active_actor_range = []
self._range_paths = None
self._active_actor_selected_next_square = None
self._next_thing = "FIND_VICTIM"
def _advance_find_victim(self):
attack_candidates = []
for adj in adjacency(self._active_actor.position):
for target in filter(lambda a: a.type != self._active_actor.type, self.actors):
if target.position == adj:
attack_candidates.append(target)
if len(attack_candidates) != 0:
attack_candidates = sorted(attack_candidates, key=actor_sort_key)
attack_candidates = sorted(attack_candidates, key=actor_hit_point_sort_key)
self._victim = attack_candidates[0]
self._next_thing = "ATTACK"
else:
self._next_thing = "END_TURN"
def _advance_attack(self):
self._victim.hit_points -= self._active_actor.attack_power
self._victim.hit_points = max(self._victim.hit_points, 0)
if self._victim.type == 'E' and self._victim.hit_points == 0:
# an elf is dead. No bueno.
print 'An elf died. RIP.'
sys.exit(0)
# print 'attacked %s (%s - %s)' % (self._victim.id, self._victim.type, self._victim.hit_points)
self._next_thing = "END_TURN"
def _advance_end_turn(self):
self._active_actor_targets = []
self._active_actor_range = []
self._range_paths = None
self._next_thing = "FIND_ACTIVE_ACTOR"
self._victim = None
# print 'actor %s (%s) ended their turn' % (self._active_actor.id, self._active_actor.type)
# filter out dead people
self.actors = filter(lambda a: a.hit_points > 0, self.actors)
def _advance_end_round(self):
self._active_actor = None
self._active_actor_targets = []
self._active_actor_range = []
self._range_paths = None
self._next_thing = "START_ROUND"
self._rounds += 1
# print 'ended round'
def print_gl(self):
if self._active_actor is not None:
self._draw_square(self._active_actor.position, 0, 0.5, 0)
for target in self._active_actor_targets:
self._draw_square(target.position, 0.5, 0, 0)
for in_range in self._active_actor_range:
self._draw_square(in_range, 0, 0, 0.5)
if self._range_paths is not None:
visited = self._range_paths[0]
max_weight = max(visited.values())
for (x, y) in visited:
weight = visited[(x, y)]
Number.draw(weight, (x, y), self.state.height)
intensity = weight / float(max_weight)
self._draw_square((x, y), intensity, 0, intensity, 0.5)
if self._active_actor_selected_next_square is not None:
x, y = self._active_actor_selected_next_square
self._draw_square((x, y), 0, 0.8, 0)
if self._victim is not None:
x, y = self._victim.position
self._draw_square((x, y), 0.5, 0, 0.5, 0.5)
def _draw_square(self, position, r, g, b, a=1):
x = position[0]
y = self.state.height - 1 - position[1]
glColor4f(r, g, b, a)
glBegin(GL_TRIANGLES)
glVertex2f(x, y)
glVertex2f(x+1, y)
glVertex2f(x+1, y+1)
glVertex2f(x, y)
glVertex2f(x+1, y+1)
glVertex2f(x, y+1)
glEnd()
glColor3f(1, 1, 1)
class State:
"""
Represents the state of a cave at a point in time."""
def __init__(self, round, board, actors):
self.round = round
self.board = board
self.actors = actors
self.height = len(self.board)
self.width = len(self.board[0])
self.in_round_state = InRoundState(state=self, actors=actors)
@staticmethod
def from_stdin():
"""
Reads a State from standard input."""
board = []
actors = []
y = 0
actor_id = 0
try:
while True:
line = raw_input()
for x in range(len(line)):
char = line[x]
if char == 'G' or char == 'E':
actors.append(Actor(actor_id, char, (x, y)))
actor_id += 1
line = line.replace('G', '.').replace('E', '.')
board.append(line)
y += 1
except EOFError:
return State(board=board, actors=actors, round=0)
def advance(self):
"""
Advances the state, by either selecting the next player, advancing that player's state, etc."""
self.in_round_state.advance()
def print_stdout(self):
"""
Prints the board to standard out."""
board = list(b for b in self.board)
for actor in self.actors:
actor_line = board[actor.position[1]]
board[actor.position[1]] = actor_line[:actor.position[0]] + actor.type + actor_line[actor.position[0] + 1:]
for row in board:
print row
def print_gl(self):
"""
Displays the board in OpenGL."""
self._draw_walls()
self._draw_in_round_state()
self._draw_actors()
def _draw_walls(self):
for y in range(len(self.board)):
row = self.board[y]
for x in range(len(row)):
tile = row[x]
if tile == '#':
self._draw_tile((x, y), tile)
def _draw_in_round_state(self):
glBindTexture(GL_TEXTURE_2D, 0)
self.in_round_state.print_gl()
def _draw_actors(self):
for actor in self.actors:
x, y = actor.position
y = self.height - y - 1
self._draw_tile(actor.position, actor.type, actor.hit_points==0)
normalized_hit_points = actor.hit_points / float(200)
glColor3f(1 - normalized_hit_points, normalized_hit_points, 0)
glBegin(GL_TRIANGLES)
glVertex2f(x + 0.85, y + 0.05)
glVertex2f(x + 0.9, y + 0.05)
glVertex2f(x + 0.9, y + 0.05 + normalized_hit_points * 0.8)
glVertex2f(x + 0.85, y + 0.05)
glVertex2f(x + 0.9, y + 0.05 + normalized_hit_points * 0.8)
glVertex2f(x + 0.85, y + 0.05 + normalized_hit_points * 0.8)
glEnd()
glColor3f(1, 1, 1)
def _draw_tile(self, position, tile, dead=False):
Tile(tile, position, self, dead).draw()
class Actor:
def __init__(self, id, type, position):
self.id = id
self.type = type
self.position = position
self.hit_points = 200
self.attack_power = 3 if type == 'G' else ELVES_POWER
def display_func():
global state
glClear(GL_COLOR_BUFFER_BIT)
state.print_gl()
glutSwapBuffers()
def keyboard_func(key, x, y):
global state
key = key.lower()
if key == 'q':
sys.exit(0)
elif key == 'a':
state.advance()
glutPostRedisplay()
def reshape_func(width, height):
global state
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, state.width, 0, state.height, -1, 1)
glViewport(0, 0, width, height)
glMatrixMode(GL_MODELVIEW)
now = 0
def idle_func():
global now
global state
new_now = 1000 * time.time()
if new_now - now > 1:
# more than 60 ms. Let's advance
state.advance()
glutPostRedisplay()
now = new_now
def init_gl():
global state
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(max(512, state.width * TILE_SIZE_PX), max(512, state.height * TILE_SIZE_PX))
glutCreateWindow('Elves v. Goblins')
glutDisplayFunc(display_func)
glutKeyboardFunc(keyboard_func)
glutReshapeFunc(reshape_func)
glutIdleFunc(idle_func)
glEnable(GL_TEXTURE_2D)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glutMainLoop()
def main():
global state
state = State.from_stdin()
init_gl()
if __name__ == "__main__":
main()
| en | 0.794871 | #!/usr/bin/python # print self._next_thing # print 'started round' # filter out dead people # print 'found active actor', self._active_actor.id # print 'found active actor', self._active_actor.id # print 'found active range' # print 'determined paths' # print 'selected next square' # an elf is dead. No bueno. # print 'attacked %s (%s - %s)' % (self._victim.id, self._victim.type, self._victim.hit_points) # print 'actor %s (%s) ended their turn' % (self._active_actor.id, self._active_actor.type) # filter out dead people # print 'ended round' Represents the state of a cave at a point in time. Reads a State from standard input. Advances the state, by either selecting the next player, advancing that player's state, etc. Prints the board to standard out. Displays the board in OpenGL. # more than 60 ms. Let's advance | 2.777306 | 3 |
bibpy/error.py | MisanthropicBit/bibpy | 1 | 6621863 | # -*- coding: utf-8 -*-
"""bibpy errors."""
class LexerException(Exception):
"""Raised on a lexer error."""
pass
class ParseException(Exception):
"""Raised on errors in parsing."""
pass
class RequiredFieldError(Exception):
"""Raised when an entry does not conform to a format's requirements."""
def __init__(self, entry, required, optional):
"""Format a message for an entry's missing fields."""
if not all(len(opt) == 2 for opt in optional):
raise ValueError("Fields with options should have only two "
"options")
s = "Entry '{0}' (type '{1}') is missing required field(s): "\
.format(entry.bibkey, entry.bibtype)
if required:
s += "{0}".format(", ".join(required))
if optional:
if required:
s += ", "
s += "{0}".format(", ".join("/".join(e) for e in optional))
super().__init__(s)
self._entry = entry
self._required = required
self._optional = optional
@property
def entry(self):
"""The offending entry."""
return self._entry
@property
def required(self):
"""Missing required fields."""
return self._required
@property
def optional(self):
"""Missing fields where one of several fields are required."""
return self._optional
| # -*- coding: utf-8 -*-
"""bibpy errors."""
class LexerException(Exception):
"""Raised on a lexer error."""
pass
class ParseException(Exception):
"""Raised on errors in parsing."""
pass
class RequiredFieldError(Exception):
"""Raised when an entry does not conform to a format's requirements."""
def __init__(self, entry, required, optional):
"""Format a message for an entry's missing fields."""
if not all(len(opt) == 2 for opt in optional):
raise ValueError("Fields with options should have only two "
"options")
s = "Entry '{0}' (type '{1}') is missing required field(s): "\
.format(entry.bibkey, entry.bibtype)
if required:
s += "{0}".format(", ".join(required))
if optional:
if required:
s += ", "
s += "{0}".format(", ".join("/".join(e) for e in optional))
super().__init__(s)
self._entry = entry
self._required = required
self._optional = optional
@property
def entry(self):
"""The offending entry."""
return self._entry
@property
def required(self):
"""Missing required fields."""
return self._required
@property
def optional(self):
"""Missing fields where one of several fields are required."""
return self._optional
| en | 0.894367 | # -*- coding: utf-8 -*- bibpy errors. Raised on a lexer error. Raised on errors in parsing. Raised when an entry does not conform to a format's requirements. Format a message for an entry's missing fields. The offending entry. Missing required fields. Missing fields where one of several fields are required. | 2.965199 | 3 |
Python/Shop.py | Deego88/MPP_Assignment | 0 | 6621864 | <gh_stars>0
# Student:<NAME>
# Student Number: G00387896
# Import libraries
import os
from dataclasses import dataclass, field
from typing import List
import csv
#****** CREATE DATA CLASS******#
# Create a data class for Product
@dataclass
class Product:
name: str
price: float = 0.0
# Create a data class for ProductStock
@dataclass
class ProductStock:
product: Product
quantity: int
# Create a data class for ProductQuantity (nested)
@dataclass
class ProductQuantity:
product: Product
quantity: int
# Create a data class for Shop (nested)
@dataclass
class Shop:
cash: float = 0.0
stock: List[ProductStock] = field(default_factory=list)
# Create a data class for Customer
@dataclass
class Customer:
name: str = ""
budget: float = 0.0
shopping_list: List[ProductQuantity] = field(default_factory=list)
#****** CREATE_SHOP ******#
def create_and_stock_shop():
# initialise
shop = Shop()
# read in CSV and set variables
with open('../Data/shop_stock.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
first_row = next(csv_reader)
shop.cash = float(first_row[0])
# for loop to loop over CSV file and set p and ps variables, append items to the list
for row in csv_reader:
p = Product(row[0], float(row[1]))
ps = ProductStock(p, float(row[2]))
shop.stock.append(ps)
# print(ps) test
return shop
# ****** CREATE_CUSTOMER ****** https://github.com/Deego88/MPP_Assignment/blob/master/Data/shop_stock.csv
def create_customer(file_path):
# initialise
customer = Customer()
# read in CSV and set variables
with open(file_path, encoding='unicode_escape') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
first_row = next(csv_reader)
# Assigns name [0] and budget [1] from file
customer = Customer(first_row[0], float(first_row[1]))
# for loop to loop over CSV file and set p and ps variables, append items to the list
for row in csv_reader:
name = row[0]
quantity = float(row[1])
p = Product(name)
ps = ProductStock(p, quantity)
customer.shopping_list.append(ps)
return customer
#****** SHOW_PRODUCT ******#
def print_product(prod):
# if the price is defined show stock else show the customer shopping list
if prod.price == 0:
print(f"Product: {prod.name};")
else:
print(f"Product: {prod.name}; \tPrice: €{prod.price:.2f}\t", end="")
#****** SHOW_CUSTOMER ******#
def print_customers_details(cust, sh):
# Customer name and budget is printed
print(f"\n**********************************************************************************")
print(
f"\nThe Customer name is: {cust.name}, the customer budget is: €{cust.budget:.2f}")
print(f"\n**********************************************************************************")
# initialise
total_cost = 0
# Print customer's name
print(f"{cust.name} wants the following products: ")
# Create a for loop to loop over shopping list
for cust_item in cust.shopping_list:
# Show customer details
print(
f" -{cust_item.product.name}, quantity {cust_item.quantity:.0f}. ", end="")
# Initialise
sub_total = 0
match_exist = 0
# Assign the (i-th) product from the customer schopping list as a shorthand
cust_item_name = cust_item.product.name
# loop over the stock list to find a match
for sh_item in sh.stock:
# Assign the (j-th) product from the shop stock list as a shorthand
sh_item_name = sh_item.product.name
# check if there is match
if (cust_item_name == sh_item_name):
match_exist += 1
# IF sufficient amount exists do the following
if (cust_item.quantity <= sh_item.quantity):
# Prints out cost of all items of the product
print(f"\tThe shop has stock and ", end="")
# calculate sub total of order (price * quantity)
sub_total_full = cust_item.quantity * sh_item.product.price
# Show Cost of all items of the product set to the sub_total variable
print(f"sub-total cost would be €{sub_total_full:.2f}.")
sub_total = sub_total_full
else:
# check how many can be bought
partial_order_qty = cust_item.quantity - \
(cust_item.quantity -
sh_item.quantity)
# Cost of the (i-th) item from the customer's shopping list
sub_total_partial = partial_order_qty * \
sh_item.product.price
# Prints out cost of all items of the product
print(
f"\tSorry only {partial_order_qty:.0f} is available in stock for you, your sub-total cost is now €{sub_total_partial:.2f}.")
sub_total = sub_total_partial
# Total_cost variable
total_cost = total_cost + sub_total
# IF product is not in the shop, no match exists
if (match_exist == 0):
# Show the cost
print(
f"\tSorry but this product is not available. sub-total cost will be€{sub_total:.2f}.")
# Cost of all items
print(f"Total shopping cost will be€{total_cost:.2f}. \n")
return total_cost
# ****** SHOP_DETAILS******
# Create order function
def process_order(cust, sh, total_cost):
# IF the customer has not enough funds for the order
if (cust.budget < total_cost):
print(
f"Sorry, you do not have enough funds, you require €{(total_cost - cust.budget):.2f} extra. ", end="")
# else customer has enough funds
else:
# loop over the items in the customer shopping list
for cust_item in cust.shopping_list:
# Initialise (no match=0)
match_exist = 0
# Assign the (i-th) product from the customer schopping list as a shorthand
cust_item_name = cust_item.product.name
# loop over the stock list to find a match
for sh_item in sh.stock:
# assign the (j-th) product from the shop stock list as a shorthand
sh_item_name = sh_item.product.name
# check if there is a match
if (cust_item_name == sh_item_name):
match_exist = + 1
# IF sufficient amount exists do the following
if (cust_item.quantity <= sh_item.quantity):
# Update the shop stock
sh_item.quantity = sh_item.quantity - cust_item.quantity
print(
f"Shop product {cust_item.product.name} is now updated to: {sh_item.quantity:.0f}")
else: # customer wants more than in stock
# check how many can be bought
partial_order_qty = cust_item.quantity - \
(cust_item.quantity - sh_item.quantity)
# Buy all stock
# Perform the cost of the (i-th )item from shopping list
sub_total_partial = partial_order_qty * \
sh_item.product.price
# Update the shop stock
sh_item.quantity = sh_item.quantity - partial_order_qty
print(
f"Shop product {cust_item.product.name} is now updated to {sh_item.quantity:.0f}.")
# IF product is not in the shop, there is no match
if (match_exist == 0):
print(f"\tSorry the shop doesn't have this product.")
# update shop and customer
sh.cash = sh.cash + total_cost
cust.budget = cust.budget - total_cost
print(f"\nThe shop now has €{sh.cash:.2f} in cash. ")
# updated customer's budget
print(f"{cust.name} has €{cust.budget:.2f} remianing for shopping.")
print("")
return
# ****** LIVE_MODE ******
def interactive_mode(sh, budget):
# Print stock
print(f"\nThis is a list of products for sale in the shop:")
print_shop(sh)
# initialise
product_name = ""
quantity = 0
# initialise a forever loop forcing the user to exit only with an x
while product_name != "x":
print()
# Request input from the user, assign to the variable
product_name = input(
"\nPlease enter your product name (press x to exit): ")
print(f"Searching for: {product_name}")
# initialise (0 = no match)
match_exist = 0
# loop over shop stock list looking for a match from customer's list
for sh_item in sh.stock:
# initialise
sub_total = 0
# assign the (j-th) product from the shop stock list as a shorthand
sh_item_name = sh_item.product.name
# IF there is a match
if (product_name == sh_item_name):
match_exist += 1 # set match
quantity = int(input("Please enter your requested quantity: "))
# check products availability
if (quantity <= sh_item.quantity):
# check product price and calculate sub-total cost
sub_total = sh_item.product.price * quantity
# IF customer has enough funds
if (budget >= sub_total):
# update customer's funds
budget = budget - sub_total
print(
f"Congrats! you bought the product. Sub total cost was €{sub_total:.2f}. Your funds are now €{budget:.2f}.")
# update the shop stock and cash
sh_item.quantity = sh_item.quantity - quantity
sh.cash = sh.cash + sub_total
print(
f"Shop quantity of {sh_item_name} in now: {sh_item.quantity:.0f}. The shop has {sh.cash:.2f} cash.")
else: # customer cannot afford all
print(
f"Sorry you do nto have enough funds, you require €{(sub_total - budget):.2f} extra.", end="")
# customer wants more than in stock
else:
# check how many can be bought and buy all that is in stock
partial_order_qty = quantity - \
(quantity - sh_item.quantity)
# perform the sub-total cost for the item
sub_total_partial = partial_order_qty * \
sh_item.product.price
# Prints out cost of all items of the product
print(
f"Only {partial_order_qty:.0f} is available. Sub-total cost was €{sub_total_partial:.2f}. ")
# update customer's budget
budget = budget - sub_total_partial
print(
f"Customers budget is: €{budget:.2f} after buying the item.")
# update the shop stock adn cash
sh_item.quantity = sh_item.quantity - partial_order_qty
sh.cash = sh.cash + sub_total_partial
print(
f"This product is not avilable in the shop: {sh_item.quantity:.0f}). Cash in shop now: {sh.cash:.2f}.")
if (match_exist == 0): # product not available in stock
print("Product unavailable.")
#****** SHOP_DETAILS******#
def print_shop(sh): # takes 'shop' dataclass as a parameter
# Show shop detials
# print(sh) # for testing - ok
print(f"\nShop has {sh.cash:.2f} in cash")
print("==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ====")
for item in sh.stock:
print_product(item.product)
print(f"Available amount: {item.quantity:.0f}")
print("==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ====")
# ****** SHOP_MENU ******#
def display_menu():
print("\n******************************")
print("Welcome to the Shop Main Menu\n")
print("******************************\n")
print("[1] - Shop Details\n")
print("[2] - Customer A: good case\n")
print("[3] - Customer B: Broke funds case\n")
print("[4] - Customer C: exceeding order case\n")
print("[5] - Live Mode\n")
print("[9] - Exit\n")
print("******************************\n")
def shop_menu(shop):
# Main menu screen
display_menu()
while True: # this is a 'forever' loop, unless interupted (break)
# Request user input
choice = input("Please enter your choice: ")
if (choice == "1"):
print_shop(shop)
display_menu()
elif (choice == "2"):
# create customer A- good case csv
customer_A = create_customer(
"../Data/customer_good.csv") # read data from a file
# print customer details and shopping list
total_cost = print_customers_details(customer_A, shop)
# show customer's shopping list by calling relevant method
process_order(customer_A, shop, total_cost)
display_menu()
elif (choice == "3"):
# create customer B- broke case csv
customer_B = create_customer(
"../Data/customer_broke.csv") # read data from a file
# print customer details and shopping list
total_cost = print_customers_details(customer_B, shop)
# show customer's shopping list by calling relevant method
process_order(customer_B, shop, total_cost)
display_menu()
elif (choice == "4"):
# create customer C- exceeding case
customer_C = create_customer(
"../Data/customer_exceeding_order.csv") # read data from a file
# print customer details and shopping list
total_cost = print_customers_details(customer_C, shop)
# show customer's shopping list by calling relevant method
process_order(customer_C, shop, total_cost)
display_menu()
elif (choice == "5"):
# Live mode welcome message
print("-------------------------")
print("You are now in Live Mode")
print("-------------------------")
# get user's name
customer_name = input("Enter your name please: ")
print(
f"Welcome, {customer_name} to the live Mode shopping experience. ")
# get user's budget
budget = float(input("Please tell me your shopping budget: "))
# go to the interactive mode
interactive_mode(shop, budget)
display_menu()
elif (choice == "9"): # Exit condition
print("")
break
else:
display_menu()
#****** MAIN_FUNCTION ******#
def main():
# Clear screen
os.system("cls") # for Windows
os.system("cls") # for Linux
shop_one = create_and_stock_shop()
shop_menu(shop_one)
if __name__ == "__main__":
# execute only if run as a script
main()
| # Student:<NAME>
# Student Number: G00387896
# Import libraries
import os
from dataclasses import dataclass, field
from typing import List
import csv
#****** CREATE DATA CLASS******#
# Create a data class for Product
@dataclass
class Product:
name: str
price: float = 0.0
# Create a data class for ProductStock
@dataclass
class ProductStock:
product: Product
quantity: int
# Create a data class for ProductQuantity (nested)
@dataclass
class ProductQuantity:
product: Product
quantity: int
# Create a data class for Shop (nested)
@dataclass
class Shop:
cash: float = 0.0
stock: List[ProductStock] = field(default_factory=list)
# Create a data class for Customer
@dataclass
class Customer:
name: str = ""
budget: float = 0.0
shopping_list: List[ProductQuantity] = field(default_factory=list)
#****** CREATE_SHOP ******#
def create_and_stock_shop():
# initialise
shop = Shop()
# read in CSV and set variables
with open('../Data/shop_stock.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
first_row = next(csv_reader)
shop.cash = float(first_row[0])
# for loop to loop over CSV file and set p and ps variables, append items to the list
for row in csv_reader:
p = Product(row[0], float(row[1]))
ps = ProductStock(p, float(row[2]))
shop.stock.append(ps)
# print(ps) test
return shop
# ****** CREATE_CUSTOMER ****** https://github.com/Deego88/MPP_Assignment/blob/master/Data/shop_stock.csv
def create_customer(file_path):
# initialise
customer = Customer()
# read in CSV and set variables
with open(file_path, encoding='unicode_escape') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
first_row = next(csv_reader)
# Assigns name [0] and budget [1] from file
customer = Customer(first_row[0], float(first_row[1]))
# for loop to loop over CSV file and set p and ps variables, append items to the list
for row in csv_reader:
name = row[0]
quantity = float(row[1])
p = Product(name)
ps = ProductStock(p, quantity)
customer.shopping_list.append(ps)
return customer
#****** SHOW_PRODUCT ******#
def print_product(prod):
# if the price is defined show stock else show the customer shopping list
if prod.price == 0:
print(f"Product: {prod.name};")
else:
print(f"Product: {prod.name}; \tPrice: €{prod.price:.2f}\t", end="")
#****** SHOW_CUSTOMER ******#
def print_customers_details(cust, sh):
# Customer name and budget is printed
print(f"\n**********************************************************************************")
print(
f"\nThe Customer name is: {cust.name}, the customer budget is: €{cust.budget:.2f}")
print(f"\n**********************************************************************************")
# initialise
total_cost = 0
# Print customer's name
print(f"{cust.name} wants the following products: ")
# Create a for loop to loop over shopping list
for cust_item in cust.shopping_list:
# Show customer details
print(
f" -{cust_item.product.name}, quantity {cust_item.quantity:.0f}. ", end="")
# Initialise
sub_total = 0
match_exist = 0
# Assign the (i-th) product from the customer schopping list as a shorthand
cust_item_name = cust_item.product.name
# loop over the stock list to find a match
for sh_item in sh.stock:
# Assign the (j-th) product from the shop stock list as a shorthand
sh_item_name = sh_item.product.name
# check if there is match
if (cust_item_name == sh_item_name):
match_exist += 1
# IF sufficient amount exists do the following
if (cust_item.quantity <= sh_item.quantity):
# Prints out cost of all items of the product
print(f"\tThe shop has stock and ", end="")
# calculate sub total of order (price * quantity)
sub_total_full = cust_item.quantity * sh_item.product.price
# Show Cost of all items of the product set to the sub_total variable
print(f"sub-total cost would be €{sub_total_full:.2f}.")
sub_total = sub_total_full
else:
# check how many can be bought
partial_order_qty = cust_item.quantity - \
(cust_item.quantity -
sh_item.quantity)
# Cost of the (i-th) item from the customer's shopping list
sub_total_partial = partial_order_qty * \
sh_item.product.price
# Prints out cost of all items of the product
print(
f"\tSorry only {partial_order_qty:.0f} is available in stock for you, your sub-total cost is now €{sub_total_partial:.2f}.")
sub_total = sub_total_partial
# Total_cost variable
total_cost = total_cost + sub_total
# IF product is not in the shop, no match exists
if (match_exist == 0):
# Show the cost
print(
f"\tSorry but this product is not available. sub-total cost will be€{sub_total:.2f}.")
# Cost of all items
print(f"Total shopping cost will be€{total_cost:.2f}. \n")
return total_cost
# ****** SHOP_DETAILS******
# Create order function
def process_order(cust, sh, total_cost):
# IF the customer has not enough funds for the order
if (cust.budget < total_cost):
print(
f"Sorry, you do not have enough funds, you require €{(total_cost - cust.budget):.2f} extra. ", end="")
# else customer has enough funds
else:
# loop over the items in the customer shopping list
for cust_item in cust.shopping_list:
# Initialise (no match=0)
match_exist = 0
# Assign the (i-th) product from the customer schopping list as a shorthand
cust_item_name = cust_item.product.name
# loop over the stock list to find a match
for sh_item in sh.stock:
# assign the (j-th) product from the shop stock list as a shorthand
sh_item_name = sh_item.product.name
# check if there is a match
if (cust_item_name == sh_item_name):
match_exist = + 1
# IF sufficient amount exists do the following
if (cust_item.quantity <= sh_item.quantity):
# Update the shop stock
sh_item.quantity = sh_item.quantity - cust_item.quantity
print(
f"Shop product {cust_item.product.name} is now updated to: {sh_item.quantity:.0f}")
else: # customer wants more than in stock
# check how many can be bought
partial_order_qty = cust_item.quantity - \
(cust_item.quantity - sh_item.quantity)
# Buy all stock
# Perform the cost of the (i-th )item from shopping list
sub_total_partial = partial_order_qty * \
sh_item.product.price
# Update the shop stock
sh_item.quantity = sh_item.quantity - partial_order_qty
print(
f"Shop product {cust_item.product.name} is now updated to {sh_item.quantity:.0f}.")
# IF product is not in the shop, there is no match
if (match_exist == 0):
print(f"\tSorry the shop doesn't have this product.")
# update shop and customer
sh.cash = sh.cash + total_cost
cust.budget = cust.budget - total_cost
print(f"\nThe shop now has €{sh.cash:.2f} in cash. ")
# updated customer's budget
print(f"{cust.name} has €{cust.budget:.2f} remianing for shopping.")
print("")
return
# ****** LIVE_MODE ******
def interactive_mode(sh, budget):
# Print stock
print(f"\nThis is a list of products for sale in the shop:")
print_shop(sh)
# initialise
product_name = ""
quantity = 0
# initialise a forever loop forcing the user to exit only with an x
while product_name != "x":
print()
# Request input from the user, assign to the variable
product_name = input(
"\nPlease enter your product name (press x to exit): ")
print(f"Searching for: {product_name}")
# initialise (0 = no match)
match_exist = 0
# loop over shop stock list looking for a match from customer's list
for sh_item in sh.stock:
# initialise
sub_total = 0
# assign the (j-th) product from the shop stock list as a shorthand
sh_item_name = sh_item.product.name
# IF there is a match
if (product_name == sh_item_name):
match_exist += 1 # set match
quantity = int(input("Please enter your requested quantity: "))
# check products availability
if (quantity <= sh_item.quantity):
# check product price and calculate sub-total cost
sub_total = sh_item.product.price * quantity
# IF customer has enough funds
if (budget >= sub_total):
# update customer's funds
budget = budget - sub_total
print(
f"Congrats! you bought the product. Sub total cost was €{sub_total:.2f}. Your funds are now €{budget:.2f}.")
# update the shop stock and cash
sh_item.quantity = sh_item.quantity - quantity
sh.cash = sh.cash + sub_total
print(
f"Shop quantity of {sh_item_name} in now: {sh_item.quantity:.0f}. The shop has {sh.cash:.2f} cash.")
else: # customer cannot afford all
print(
f"Sorry you do nto have enough funds, you require €{(sub_total - budget):.2f} extra.", end="")
# customer wants more than in stock
else:
# check how many can be bought and buy all that is in stock
partial_order_qty = quantity - \
(quantity - sh_item.quantity)
# perform the sub-total cost for the item
sub_total_partial = partial_order_qty * \
sh_item.product.price
# Prints out cost of all items of the product
print(
f"Only {partial_order_qty:.0f} is available. Sub-total cost was €{sub_total_partial:.2f}. ")
# update customer's budget
budget = budget - sub_total_partial
print(
f"Customers budget is: €{budget:.2f} after buying the item.")
# update the shop stock adn cash
sh_item.quantity = sh_item.quantity - partial_order_qty
sh.cash = sh.cash + sub_total_partial
print(
f"This product is not avilable in the shop: {sh_item.quantity:.0f}). Cash in shop now: {sh.cash:.2f}.")
if (match_exist == 0): # product not available in stock
print("Product unavailable.")
#****** SHOP_DETAILS******#
def print_shop(sh): # takes 'shop' dataclass as a parameter
# Show shop detials
# print(sh) # for testing - ok
print(f"\nShop has {sh.cash:.2f} in cash")
print("==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ====")
for item in sh.stock:
print_product(item.product)
print(f"Available amount: {item.quantity:.0f}")
print("==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ====")
# ****** SHOP_MENU ******#
def display_menu():
print("\n******************************")
print("Welcome to the Shop Main Menu\n")
print("******************************\n")
print("[1] - Shop Details\n")
print("[2] - Customer A: good case\n")
print("[3] - Customer B: Broke funds case\n")
print("[4] - Customer C: exceeding order case\n")
print("[5] - Live Mode\n")
print("[9] - Exit\n")
print("******************************\n")
def shop_menu(shop):
# Main menu screen
display_menu()
while True: # this is a 'forever' loop, unless interupted (break)
# Request user input
choice = input("Please enter your choice: ")
if (choice == "1"):
print_shop(shop)
display_menu()
elif (choice == "2"):
# create customer A- good case csv
customer_A = create_customer(
"../Data/customer_good.csv") # read data from a file
# print customer details and shopping list
total_cost = print_customers_details(customer_A, shop)
# show customer's shopping list by calling relevant method
process_order(customer_A, shop, total_cost)
display_menu()
elif (choice == "3"):
# create customer B- broke case csv
customer_B = create_customer(
"../Data/customer_broke.csv") # read data from a file
# print customer details and shopping list
total_cost = print_customers_details(customer_B, shop)
# show customer's shopping list by calling relevant method
process_order(customer_B, shop, total_cost)
display_menu()
elif (choice == "4"):
# create customer C- exceeding case
customer_C = create_customer(
"../Data/customer_exceeding_order.csv") # read data from a file
# print customer details and shopping list
total_cost = print_customers_details(customer_C, shop)
# show customer's shopping list by calling relevant method
process_order(customer_C, shop, total_cost)
display_menu()
elif (choice == "5"):
# Live mode welcome message
print("-------------------------")
print("You are now in Live Mode")
print("-------------------------")
# get user's name
customer_name = input("Enter your name please: ")
print(
f"Welcome, {customer_name} to the live Mode shopping experience. ")
# get user's budget
budget = float(input("Please tell me your shopping budget: "))
# go to the interactive mode
interactive_mode(shop, budget)
display_menu()
elif (choice == "9"): # Exit condition
print("")
break
else:
display_menu()
#****** MAIN_FUNCTION ******#
def main():
# Clear screen
os.system("cls") # for Windows
os.system("cls") # for Linux
shop_one = create_and_stock_shop()
shop_menu(shop_one)
if __name__ == "__main__":
# execute only if run as a script
main() | en | 0.841204 | # Student:<NAME> # Student Number: G00387896 # Import libraries #****** CREATE DATA CLASS******# # Create a data class for Product # Create a data class for ProductStock # Create a data class for ProductQuantity (nested) # Create a data class for Shop (nested) # Create a data class for Customer #****** CREATE_SHOP ******# # initialise # read in CSV and set variables # for loop to loop over CSV file and set p and ps variables, append items to the list # print(ps) test # ****** CREATE_CUSTOMER ****** https://github.com/Deego88/MPP_Assignment/blob/master/Data/shop_stock.csv # initialise # read in CSV and set variables # Assigns name [0] and budget [1] from file # for loop to loop over CSV file and set p and ps variables, append items to the list #****** SHOW_PRODUCT ******# # if the price is defined show stock else show the customer shopping list #****** SHOW_CUSTOMER ******# # Customer name and budget is printed # initialise # Print customer's name # Create a for loop to loop over shopping list # Show customer details # Initialise # Assign the (i-th) product from the customer schopping list as a shorthand # loop over the stock list to find a match # Assign the (j-th) product from the shop stock list as a shorthand # check if there is match # IF sufficient amount exists do the following # Prints out cost of all items of the product # calculate sub total of order (price * quantity) # Show Cost of all items of the product set to the sub_total variable # check how many can be bought # Cost of the (i-th) item from the customer's shopping list # Prints out cost of all items of the product # Total_cost variable # IF product is not in the shop, no match exists # Show the cost # Cost of all items # ****** SHOP_DETAILS****** # Create order function # IF the customer has not enough funds for the order # else customer has enough funds # loop over the items in the customer shopping list # Initialise (no match=0) # Assign the (i-th) product from the customer schopping list as a shorthand # loop over the stock list to find a match # assign the (j-th) product from the shop stock list as a shorthand # check if there is a match # IF sufficient amount exists do the following # Update the shop stock # customer wants more than in stock # check how many can be bought # Buy all stock # Perform the cost of the (i-th )item from shopping list # Update the shop stock # IF product is not in the shop, there is no match # update shop and customer # updated customer's budget # ****** LIVE_MODE ****** # Print stock # initialise # initialise a forever loop forcing the user to exit only with an x # Request input from the user, assign to the variable # initialise (0 = no match) # loop over shop stock list looking for a match from customer's list # initialise # assign the (j-th) product from the shop stock list as a shorthand # IF there is a match # set match # check products availability # check product price and calculate sub-total cost # IF customer has enough funds # update customer's funds # update the shop stock and cash # customer cannot afford all # customer wants more than in stock # check how many can be bought and buy all that is in stock # perform the sub-total cost for the item # Prints out cost of all items of the product # update customer's budget # update the shop stock adn cash # product not available in stock #****** SHOP_DETAILS******# # takes 'shop' dataclass as a parameter # Show shop detials # print(sh) # for testing - ok # ****** SHOP_MENU ******# # Main menu screen # this is a 'forever' loop, unless interupted (break) # Request user input # create customer A- good case csv # read data from a file # print customer details and shopping list # show customer's shopping list by calling relevant method # create customer B- broke case csv # read data from a file # print customer details and shopping list # show customer's shopping list by calling relevant method # create customer C- exceeding case # read data from a file # print customer details and shopping list # show customer's shopping list by calling relevant method # Live mode welcome message # get user's name # get user's budget # go to the interactive mode # Exit condition #****** MAIN_FUNCTION ******# # Clear screen # for Windows # for Linux # execute only if run as a script | 3.936497 | 4 |
tests/test_dox_py.py | moi90/experitur | 3 | 6621865 | <filename>tests/test_dox_py.py
import inspect
import os
import pytest
from experitur.core.context import Context
from experitur.dox import DOXError, load_dox
@pytest.fixture(name="dox_py_fn")
def fixture_dox_py_fn(tmp_path):
fn = str(tmp_path / "dox.py")
with open(fn, "w") as f:
f.write(
inspect.cleandoc(
"""
from experitur import Experiment
@Experiment(
parameters={
"a1": [1],
"a2": [2],
"b": [1, 2],
"a": ["{a_{b}}"],
})
def baseline(trial):
return trial.parameters
# This experiment shouldn't be executed, because this combination of callable and parameters was already executed.
Experiment(
"second_experiment",
parent=baseline
)
"""
)
)
return fn
def test_dox_py(dox_py_fn):
wdir = os.path.splitext(dox_py_fn)[0]
os.makedirs(wdir, exist_ok=True)
with Context(wdir, writable=True) as ctx:
load_dox(dox_py_fn)
# Execute experiments
ctx.run()
assert len(ctx.store) == 2, "Trials: {}".format(", ".join(ctx.store.keys()))
@pytest.fixture(name="unknown_fn")
def fixture_unknown_fn(tmp_path):
fn = str(tmp_path / "unknown.txt")
with open(fn, "w"):
pass
return fn
def test_unknown_extension(unknown_fn):
wdir = os.path.splitext(unknown_fn)[0]
os.makedirs(wdir, exist_ok=True)
with Context(wdir):
with pytest.raises(DOXError):
load_dox(unknown_fn)
@pytest.fixture(name="malformed_py_fn")
def fixture_malformed_py_fn(tmp_path):
fn = str(tmp_path / "malformed.py")
with open(fn, "w") as f:
f.write("This is not a python file!")
return fn
def test_malformed_py(malformed_py_fn):
wdir = os.path.splitext(malformed_py_fn)[0]
os.makedirs(wdir, exist_ok=True)
with Context(wdir):
with pytest.raises(DOXError):
load_dox(malformed_py_fn)
| <filename>tests/test_dox_py.py
import inspect
import os
import pytest
from experitur.core.context import Context
from experitur.dox import DOXError, load_dox
@pytest.fixture(name="dox_py_fn")
def fixture_dox_py_fn(tmp_path):
fn = str(tmp_path / "dox.py")
with open(fn, "w") as f:
f.write(
inspect.cleandoc(
"""
from experitur import Experiment
@Experiment(
parameters={
"a1": [1],
"a2": [2],
"b": [1, 2],
"a": ["{a_{b}}"],
})
def baseline(trial):
return trial.parameters
# This experiment shouldn't be executed, because this combination of callable and parameters was already executed.
Experiment(
"second_experiment",
parent=baseline
)
"""
)
)
return fn
def test_dox_py(dox_py_fn):
wdir = os.path.splitext(dox_py_fn)[0]
os.makedirs(wdir, exist_ok=True)
with Context(wdir, writable=True) as ctx:
load_dox(dox_py_fn)
# Execute experiments
ctx.run()
assert len(ctx.store) == 2, "Trials: {}".format(", ".join(ctx.store.keys()))
@pytest.fixture(name="unknown_fn")
def fixture_unknown_fn(tmp_path):
fn = str(tmp_path / "unknown.txt")
with open(fn, "w"):
pass
return fn
def test_unknown_extension(unknown_fn):
wdir = os.path.splitext(unknown_fn)[0]
os.makedirs(wdir, exist_ok=True)
with Context(wdir):
with pytest.raises(DOXError):
load_dox(unknown_fn)
@pytest.fixture(name="malformed_py_fn")
def fixture_malformed_py_fn(tmp_path):
fn = str(tmp_path / "malformed.py")
with open(fn, "w") as f:
f.write("This is not a python file!")
return fn
def test_malformed_py(malformed_py_fn):
wdir = os.path.splitext(malformed_py_fn)[0]
os.makedirs(wdir, exist_ok=True)
with Context(wdir):
with pytest.raises(DOXError):
load_dox(malformed_py_fn)
| en | 0.706676 | from experitur import Experiment @Experiment( parameters={ "a1": [1], "a2": [2], "b": [1, 2], "a": ["{a_{b}}"], }) def baseline(trial): return trial.parameters # This experiment shouldn't be executed, because this combination of callable and parameters was already executed. Experiment( "second_experiment", parent=baseline ) # Execute experiments | 2.262101 | 2 |
Lib/site-packages/django_core/forms/mixins/paging.py | fochoao/cpython | 0 | 6621866 | from __future__ import unicode_literals
from django import forms
class PagingFormMixin(forms.Form):
"""Form mixin that includes paging page number and page size."""
p = forms.IntegerField(label='Page', initial=1, required=False)
ps = forms.IntegerField(label='Page Size', initial=25, required=False)
| from __future__ import unicode_literals
from django import forms
class PagingFormMixin(forms.Form):
"""Form mixin that includes paging page number and page size."""
p = forms.IntegerField(label='Page', initial=1, required=False)
ps = forms.IntegerField(label='Page Size', initial=25, required=False)
| en | 0.927054 | Form mixin that includes paging page number and page size. | 1.992901 | 2 |
3rd-iteration/tiles.py | Saccharine-Coal/pygame-pygame_ai-game | 2 | 6621867 | <reponame>Saccharine-Coal/pygame-pygame_ai-game
from random import randint
import pygame as pg
import sprites
import settings
class Tile(sprites.GameObject):
def __init__(self, xy: tuple, size: tuple, image: pg.Surface) -> None:
self.image = pg.transform.scale(image, size)
rect = pg.Rect(xy, size)
super().__init__(rect)
class TileA(Tile):
def __init__(self, xy: tuple, size: tuple) -> None:
image = settings.TILE_A_IMG
# random tile orientation
random_angle = randint(0, 3)*90
rotated_image = pg.transform.rotate(image, random_angle)
super().__init__(xy, size, rotated_image)
class TileB(Tile):
def __init__(self, xy: tuple, size: tuple) -> None:
image = settings.TILE_B_IMG
super().__init__(xy, size, image)
class TileC(Tile):
def __init__(self, xy: tuple, size: tuple) -> None:
image = settings.TILE_C_IMG
super().__init__(xy, size, image) | from random import randint
import pygame as pg
import sprites
import settings
class Tile(sprites.GameObject):
def __init__(self, xy: tuple, size: tuple, image: pg.Surface) -> None:
self.image = pg.transform.scale(image, size)
rect = pg.Rect(xy, size)
super().__init__(rect)
class TileA(Tile):
def __init__(self, xy: tuple, size: tuple) -> None:
image = settings.TILE_A_IMG
# random tile orientation
random_angle = randint(0, 3)*90
rotated_image = pg.transform.rotate(image, random_angle)
super().__init__(xy, size, rotated_image)
class TileB(Tile):
def __init__(self, xy: tuple, size: tuple) -> None:
image = settings.TILE_B_IMG
super().__init__(xy, size, image)
class TileC(Tile):
def __init__(self, xy: tuple, size: tuple) -> None:
image = settings.TILE_C_IMG
super().__init__(xy, size, image) | en | 0.481822 | # random tile orientation | 2.812017 | 3 |
hop/run.py | denismo/hopper | 0 | 6621868 | <reponame>denismo/hopper
import os
import click
import logging
from deploy import doDeploy
logging.getLogger('pip').setLevel(logging.CRITICAL)
@click.group()
def cli():
pass
@click.command(help="Deploy Hop package")
def deploy():
doDeploy()
if __name__ == '__main__':
cli.add_command(deploy)
cli()
| import os
import click
import logging
from deploy import doDeploy
logging.getLogger('pip').setLevel(logging.CRITICAL)
@click.group()
def cli():
pass
@click.command(help="Deploy Hop package")
def deploy():
doDeploy()
if __name__ == '__main__':
cli.add_command(deploy)
cli() | none | 1 | 1.822442 | 2 | |
src/uvm/comps/uvm_random_stimulus.py | rodrigomelo9/uvm-python | 140 | 6621869 | <reponame>rodrigomelo9/uvm-python<gh_stars>100-1000
#//
#//------------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2010 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
#
#//------------------------------------------------------------------------------
#// CLASS: uvm_random_stimulus #(T)
#//
#// A general purpose unidirectional random stimulus class.
#//
#// The uvm_random_stimulus class generates streams of T transactions. These streams
#// may be generated by the randomize method of T, or the randomize method of
#// one of its subclasses. The stream may go indefinitely, until terminated
#// by a call to stop_stimulus_generation, or we may specify the maximum number
#// of transactions to be generated.
#//
#// By using inheritance, we can add directed initialization or tidy up after
#// random stimulus generation. Simply extend the class and define the run task,
#// calling super.run() when you want to begin the random stimulus phase of
#// simulation.
#//
#// While very useful in its own right, this component can also be used as a
#// template for defining other stimulus generators, or it can be extended to
#// add additional stimulus generation methods and to simplify test writing.
#//
#//------------------------------------------------------------------------------
#
#class uvm_random_stimulus #(type T=uvm_transaction) extends uvm_component;
#
# const static string type_name = "uvm_random_stimulus #(T)";
#
# typedef uvm_random_stimulus #(T) this_type;
# `uvm_component_param_utils(this_type)
#
# // Port: blocking_put_port
# //
# // The blocking_put_port is used to send the generated stimulus to the rest
# // of the testbench.
#
# uvm_blocking_put_port #(T) blocking_put_port;
#
#
# // Function: new
# //
# // Creates a new instance of a specialization of this class.
# // Also, displays the random state obtained from a get_randstate call.
# // In subsequent simulations, set_randstate can be called with the same
# // value to reproduce the same sequence of transactions.
#
# function new(string name, uvm_component parent);
#
# super.new(name, parent);
#
# blocking_put_port=new("blocking_put_port", this);
#
# uvm_report_info("uvm_stimulus", {"rand state is ", get_randstate()});
#
# endfunction
#
#
# local bit m_stop;
#
#
# // Function: generate_stimulus
# //
# // Generate up to max_count transactions of type T.
# // If t is not specified, a default instance of T is allocated and used.
# // If t is specified, that transaction is used when randomizing. It must
# // be a subclass of T.
# //
# // max_count is the maximum number of transactions to be
# // generated. A value of zero indicates no maximum - in
# // this case, generate_stimulus will go on indefinitely
# // unless stopped by some other process
# //
# // The transactions are cloned before they are sent out
# // over the blocking_put_port
#
# virtual task generate_stimulus(T t=null, int max_count=0);
#
# T temp;
#
# if (t == null)
# t = new;
#
# for (int i=0; (max_count == 0 || i < max_count) && !m_stop; i++) begin
#
# if (! t.randomize() )
# uvm_report_warning ("RANDFL", "Randomization failed in generate_stimulus");
#
# $cast(temp, t.clone());
# uvm_report_info("stimulus generation", temp.convert2string());
# blocking_put_port.put(temp);
# end
# endtask
#
#
# // Function: stop_stimulus_generation
# //
# // Stops the generation of stimulus.
# // If a subclass of this method has forked additional
# // processes, those processes will also need to be
# // stopped in an overridden version of this method
#
# virtual function void stop_stimulus_generation;
# m_stop = 1;
# endfunction
#
#
# virtual function string get_type_name();
# return type_name;
# endfunction
#
#endclass : uvm_random_stimulus
| #//
#//------------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2010 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
#
#//------------------------------------------------------------------------------
#// CLASS: uvm_random_stimulus #(T)
#//
#// A general purpose unidirectional random stimulus class.
#//
#// The uvm_random_stimulus class generates streams of T transactions. These streams
#// may be generated by the randomize method of T, or the randomize method of
#// one of its subclasses. The stream may go indefinitely, until terminated
#// by a call to stop_stimulus_generation, or we may specify the maximum number
#// of transactions to be generated.
#//
#// By using inheritance, we can add directed initialization or tidy up after
#// random stimulus generation. Simply extend the class and define the run task,
#// calling super.run() when you want to begin the random stimulus phase of
#// simulation.
#//
#// While very useful in its own right, this component can also be used as a
#// template for defining other stimulus generators, or it can be extended to
#// add additional stimulus generation methods and to simplify test writing.
#//
#//------------------------------------------------------------------------------
#
#class uvm_random_stimulus #(type T=uvm_transaction) extends uvm_component;
#
# const static string type_name = "uvm_random_stimulus #(T)";
#
# typedef uvm_random_stimulus #(T) this_type;
# `uvm_component_param_utils(this_type)
#
# // Port: blocking_put_port
# //
# // The blocking_put_port is used to send the generated stimulus to the rest
# // of the testbench.
#
# uvm_blocking_put_port #(T) blocking_put_port;
#
#
# // Function: new
# //
# // Creates a new instance of a specialization of this class.
# // Also, displays the random state obtained from a get_randstate call.
# // In subsequent simulations, set_randstate can be called with the same
# // value to reproduce the same sequence of transactions.
#
# function new(string name, uvm_component parent);
#
# super.new(name, parent);
#
# blocking_put_port=new("blocking_put_port", this);
#
# uvm_report_info("uvm_stimulus", {"rand state is ", get_randstate()});
#
# endfunction
#
#
# local bit m_stop;
#
#
# // Function: generate_stimulus
# //
# // Generate up to max_count transactions of type T.
# // If t is not specified, a default instance of T is allocated and used.
# // If t is specified, that transaction is used when randomizing. It must
# // be a subclass of T.
# //
# // max_count is the maximum number of transactions to be
# // generated. A value of zero indicates no maximum - in
# // this case, generate_stimulus will go on indefinitely
# // unless stopped by some other process
# //
# // The transactions are cloned before they are sent out
# // over the blocking_put_port
#
# virtual task generate_stimulus(T t=null, int max_count=0);
#
# T temp;
#
# if (t == null)
# t = new;
#
# for (int i=0; (max_count == 0 || i < max_count) && !m_stop; i++) begin
#
# if (! t.randomize() )
# uvm_report_warning ("RANDFL", "Randomization failed in generate_stimulus");
#
# $cast(temp, t.clone());
# uvm_report_info("stimulus generation", temp.convert2string());
# blocking_put_port.put(temp);
# end
# endtask
#
#
# // Function: stop_stimulus_generation
# //
# // Stops the generation of stimulus.
# // If a subclass of this method has forked additional
# // processes, those processes will also need to be
# // stopped in an overridden version of this method
#
# virtual function void stop_stimulus_generation;
# m_stop = 1;
# endfunction
#
#
# virtual function string get_type_name();
# return type_name;
# endfunction
#
#endclass : uvm_random_stimulus | en | 0.549143 | #// #//------------------------------------------------------------------------------ #// Copyright 2007-2011 Mentor Graphics Corporation #// Copyright 2007-2010 Cadence Design Systems, Inc. #// Copyright 2010 Synopsys, Inc. #// All Rights Reserved Worldwide #// #// Licensed under the Apache License, Version 2.0 (the #// "License"); you may not use this file except in #// compliance with the License. You may obtain a copy of #// the License at #// #// http://www.apache.org/licenses/LICENSE-2.0 #// #// Unless required by applicable law or agreed to in #// writing, software distributed under the License is #// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR #// CONDITIONS OF ANY KIND, either express or implied. See #// the License for the specific language governing #// permissions and limitations under the License. #//------------------------------------------------------------------------------ # #//------------------------------------------------------------------------------ #// CLASS: uvm_random_stimulus #(T) #// #// A general purpose unidirectional random stimulus class. #// #// The uvm_random_stimulus class generates streams of T transactions. These streams #// may be generated by the randomize method of T, or the randomize method of #// one of its subclasses. The stream may go indefinitely, until terminated #// by a call to stop_stimulus_generation, or we may specify the maximum number #// of transactions to be generated. #// #// By using inheritance, we can add directed initialization or tidy up after #// random stimulus generation. Simply extend the class and define the run task, #// calling super.run() when you want to begin the random stimulus phase of #// simulation. #// #// While very useful in its own right, this component can also be used as a #// template for defining other stimulus generators, or it can be extended to #// add additional stimulus generation methods and to simplify test writing. #// #//------------------------------------------------------------------------------ # #class uvm_random_stimulus #(type T=uvm_transaction) extends uvm_component; # # const static string type_name = "uvm_random_stimulus #(T)"; # # typedef uvm_random_stimulus #(T) this_type; # `uvm_component_param_utils(this_type) # # // Port: blocking_put_port # // # // The blocking_put_port is used to send the generated stimulus to the rest # // of the testbench. # # uvm_blocking_put_port #(T) blocking_put_port; # # # // Function: new # // # // Creates a new instance of a specialization of this class. # // Also, displays the random state obtained from a get_randstate call. # // In subsequent simulations, set_randstate can be called with the same # // value to reproduce the same sequence of transactions. # # function new(string name, uvm_component parent); # # super.new(name, parent); # # blocking_put_port=new("blocking_put_port", this); # # uvm_report_info("uvm_stimulus", {"rand state is ", get_randstate()}); # # endfunction # # # local bit m_stop; # # # // Function: generate_stimulus # // # // Generate up to max_count transactions of type T. # // If t is not specified, a default instance of T is allocated and used. # // If t is specified, that transaction is used when randomizing. It must # // be a subclass of T. # // # // max_count is the maximum number of transactions to be # // generated. A value of zero indicates no maximum - in # // this case, generate_stimulus will go on indefinitely # // unless stopped by some other process # // # // The transactions are cloned before they are sent out # // over the blocking_put_port # # virtual task generate_stimulus(T t=null, int max_count=0); # # T temp; # # if (t == null) # t = new; # # for (int i=0; (max_count == 0 || i < max_count) && !m_stop; i++) begin # # if (! t.randomize() ) # uvm_report_warning ("RANDFL", "Randomization failed in generate_stimulus"); # # $cast(temp, t.clone()); # uvm_report_info("stimulus generation", temp.convert2string()); # blocking_put_port.put(temp); # end # endtask # # # // Function: stop_stimulus_generation # // # // Stops the generation of stimulus. # // If a subclass of this method has forked additional # // processes, those processes will also need to be # // stopped in an overridden version of this method # # virtual function void stop_stimulus_generation; # m_stop = 1; # endfunction # # # virtual function string get_type_name(); # return type_name; # endfunction # #endclass : uvm_random_stimulus | 1.256822 | 1 |
mrrecon/data/reader.py | sickkids-mri/mrrecon | 0 | 6621870 | <filename>mrrecon/data/reader.py
import re
import numpy as np
import twixtools
def read_twix(filename, keep_syncdata_and_acqend=True):
"""Wraps `twixtools.read_twix` with some fixes to the reader.
This wrapper can be removed once they fix these things.
"""
scan_list = twixtools.read_twix(filename, keep_syncdata_and_acqend=keep_syncdata_and_acqend) # noqa
print('') # Fixes absence of newline
# Parse other headers
for scan in scan_list:
if not isinstance(scan, dict):
# Then it is the raidfile_hdr (not needed)
continue
scan['hdr']['Config'] = _make_dict_from_hdr(scan['hdr']['Config'])
scan['hdr']['Dicom'] = _make_dict_from_hdr(scan['hdr']['Dicom'])
return scan_list
def _make_dict_from_hdr(dict_string):
"""Generates a dictionary from a portion of the header.
Works for Config and Dicom.
"""
pattern = re.compile(
'<Param(Long|String|Double)\\."([^"]+)"> { ([^}]+) }')
out = {}
for dtype, name, data in pattern.findall(dict_string):
if dtype == "String":
out[name] = data[1:-1]
if dtype == "Long":
if " " in data:
out[name] = [int(x) for x in data.split()]
else:
out[name] = int(data)
if dtype == "Double":
out[name] = float(data.rstrip().split(" ")[-1])
return out
class DataLoader:
"""Handles raw data loading and processing.
Depends on twixtools for reading the Siemens MRI raw data file. Since the
output of twixtools is a bit 'raw', data and parameters are processed and
placed in a convenient dictionary called `data`.
The whole process consists of 5 main steps:
1) Reading the Siemens raw data file with twixtools.
2) Storing scan data in NumPy arrays (e.g. noise and k-space measurements).
3) Picking out relevant scan parameters from the header (e.g. image
resolution, TR, VENC).
4) Reading the mini data header for per line data values (e.g. time
stamps, custom user-defined data).
5) Reformatting the data.
The original data structure output from twixtools can also be accessed
(attribute name is `scan_list`).
Args:
filename (str): Full name of raw data file.
Attributes:
data (dictionary): Contains loaded and processed items.
scan_list (list): Output from twixtools.read_twix().
"""
def __init__(self, filename):
self.filename = filename
self.data = {}
def run(self):
scan_list = self._load()
image_scans = self._read_scan_data(scan_list)
self._read_header(image_scans)
self._read_minidataheader(image_scans)
self._reformat()
return self.data
def _load(self):
"""Reads file and returns a list of scans."""
scan_list = read_twix(self.filename, keep_syncdata_and_acqend=False)
self.scan_list = scan_list
return scan_list
def _read_scan_data(self, scan_list):
"""Reads each scan/measurement and stores in NumPy arrays."""
image_scans = [] # For collecting image scans for header reading
self.data['kspace'] = [] # List of arrays
self.data['calib'] = [] # List of arrays
for scan in scan_list:
if not isinstance(scan, dict):
# Then it is the raidfile_hdr (not needed)
continue
array = self._fill_array(scan)
first_line = scan['mdb'][0]
if first_line.is_image_scan():
self.data['kspace'].append(array)
image_scans.append(scan)
else:
# Calibration scan
self.data['calib'].append(array)
return image_scans
def _fill_array(self, scan):
"""Reads acquired data line by line and fills a 3D NumPy array.
The shape of the array is (ncoils, nlines, nro).
"""
# Get array shape
nlines = len(scan['mdb'])
ncoils, nro = scan['mdb'][0].data.shape # Check first line
array = np.empty((ncoils, nlines, nro),
dtype=np.complex64)
for idx, line in enumerate(scan['mdb']): # Looping over a list
array[:, idx, :] = line.data
return array
def _read_header(self, image_scans):
"""Picks out relevant reconstruction parameters from the header."""
# If there is more than one image scan, reads the header from the first
hdr = image_scans[0]['hdr']
# Only 'MeasYaps' was parsed and values stored dictionary
# TODO: What happens when field/value does not exist?
config = hdr['Config']
dicom = hdr['Dicom']
self.data['nx'] = config['ImageColumns']
self.data['ny'] = config['ImageLines']
meas = hdr['Meas'].split('\n') # Not yet making dict out of 'Meas'
for n, line in enumerate(meas):
if 'i3DFTLength' in line:
if int(meas[n + 2]) == 1:
self.data['nz'] = 1
else:
self.data['nz'] = int(hdr['MeasYaps']['sKSpace']['lImagesPerSlab']) # noqa
break
# In millimetres
self.data['fovx'] = float(hdr['MeasYaps']['sSliceArray']['asSlice'][0]['dReadoutFOV']) # noqa
self.data['fovy'] = float(hdr['MeasYaps']['sSliceArray']['asSlice'][0]['dPhaseFOV']) # noqa
self.data['fovz'] = float(hdr['MeasYaps']['sSliceArray']['asSlice'][0]['dThickness']) # noqa
self.data['dx'] = self.data['fovx'] / self.data['nx']
self.data['dy'] = self.data['fovy'] / self.data['ny']
self.data['dz'] = self.data['fovz'] / self.data['nz']
# Converts to milliseconds
self.data['tr'] = float(hdr['MeasYaps']['alTR'][0]) / 1000 # noqa
self.data['te'] = float(hdr['MeasYaps']['alTE'][0]) / 1000 # noqa
self.data['ti'] = float(hdr['MeasYaps']['alTI'][0]) / 1000 # noqa
# In degrees
self.data['flipangle'] = float(hdr['MeasYaps']['adFlipAngleDegree'][0]) # noqa
# VENC in (cm/s)
self.data['venc'] = float(hdr['MeasYaps']['sAngio']['sFlowArray']['asElm'][0]['nVelocity']) # noqa
self.data['veldir'] = int(hdr['MeasYaps']['sAngio']['sFlowArray']['asElm'][0]['nDir']) # noqa
self.data['weight'] = dicom['flUsedPatientWeight'] # kg
regex = r'flPatientHeight.*?(\d+.\d+).*?}'
match = re.search(regex, hdr['Meas'], re.DOTALL)
self.data['height'] = float(match.group(1)) # mm
# Convert from nanoseconds to microseconds
self.data['dwelltime'] = float(hdr['MeasYaps']['sRXSPEC']['alDwellTime'][0]) / 1000 # noqa
# Field strength
self.data['field_strength'] = dicom['flMagneticFieldStrength']
# Grad performance params (rise time and max grad)
# Using dictionaries to look up values
grad_mode = hdr['MeasYaps']['sGRADSPEC']['ucMode']
# Dictionary values depend on system field strength
# TODO Should these values be moved to their own file, so they can be
# conveniently adjusted in case they change?
if self.data['field_strength'] < 2:
self.data['rise_time'] = { # Rise time in usec/(mT/m)
1: 5.88, # FAST
2: 12.5, # NORMAL
0: 12.5, # Also NORMAL
4: 20.0 # WHISPER
}.get(grad_mode) # Returns None if there is no value for grad_mode
self.data['grad_max'] = { # Max grad strength in mT/m
1: 28, # FAST
2: 22, # NORMAL
0: 22, # Also NORMAL
4: 22 # WHISPER
}.get(grad_mode)
else:
self.data['rise_time'] = { # Rise time in usec/(mT/m)
8: 5.3, # PERFORMANCE
1: 5.55, # FAST
2: 10.0, # NORMAL
0: 10.0, # Also NORMAL
4: 20.0 # WHISPER
}.get(grad_mode)
self.data['grad_max'] = { # Max grad strength in mT/m
8: 37, # PERFORMANCE
1: 24, # FAST
2: 22, # NORMAL
0: 22, # Also NORMAL
4: 22 # WHISPER
}.get(grad_mode)
self.data['readout_os_factor'] = config['ReadoutOversamplingFactor']
self.data['seq_filename'] = config['SequenceFileName']
# For dicom writing
self.data['vendor'] = dicom['Manufacturer']
self.data['systemmodel'] = dicom['ManufacturersModelName']
tmpstr = config['ExamMemoryUID']
self.data['acquisition_date'] = tmpstr.split('_')[3]
self.data['acquisition_time'] = tmpstr.split('_')[4]
self.data['StudyLOID'] = config['StudyLOID']
self.data['SeriesLOID'] = config['SeriesLOID']
self.data['PatientLOID'] = config['PatientLOID']
self.data['protocol_name'] = hdr['MeasYaps']['tProtocolName']
self.data['slice_normal'] = hdr['MeasYaps']['sSliceArray']['asSlice'][0]['sNormal'] # noqa
self.data['patient_orientation'] = dicom['tPatientPosition']
# Flow encoding navigators collection flag
try:
self.data['fe_nav_flag'] = hdr['MeasYaps']['sWipMemBlock']['alFree'][2] # noqa
except IndexError:
self.data['fe_nav_flag'] = 0
return
def _read_minidataheader(self, image_scans):
"""Reads mini data headers (MDH)."""
# If there is more than one image scan, reads the mdh from the first
scan = image_scans[0]
nlines = len(scan['mdb'])
times = np.zeros((nlines), dtype=np.float64)
user_float = np.zeros((nlines, 24), dtype=np.float64)
for idx, line in enumerate(scan['mdb']):
times[idx] = line.mdh['ulTimeStamp'] * 2.5
user_float[idx] = line.mdh['aushIceProgramPara']
self.data['times'] = times
self.data['user_float'] = np.copy(user_float.transpose())
# Logical to physical rotation quaternion
self.data['rot_quat'] = line.mdh[22][1]
# Slice position
self.data['slice_pos'] = line.mdh[22][0]
return
def _reformat(self):
"""Reformatting steps that may be sequence-specific."""
self.data['kspace'] = self.data['kspace'][0] # Take out of list
self.data['calib'] = self.data['calib'][0] # Take out of list
self.data['noise'] = self.data.pop('calib') # Rename
nv = 2 # Number of velocity encodes
# Reshape
(ncoils, nlines, nro) = self.data['kspace'].shape
tmp = np.empty((ncoils, nv, int(nlines/nv), nro), dtype=np.complex64)
for v in range(nv):
tmp[:, v, :, :] = self.data['kspace'][:, v::nv, :]
self.data['kspace'] = tmp
tmp = None
# Recalculate times at higher precision
time0 = self.data['times'][0]
times = np.linspace(time0,
time0 + (nlines - 1) * (self.data['tr'] / nv),
num=nlines, dtype=np.float64)
# TODO Temporarily keeping this here. times_recalculated should be
# wrong if TR is reported incorrectly. Keeping this here to check TR
# accuracy
self.data['times_recalculated'] = times
return
def print_dict_summary(data):
"""Summarizes attributes of items in the input dictionary.
Args:
data (dict): Contains raw data and scan parameters.
"""
print(f'{"NAME":<18} {"TYPE":<36} {"SHAPE OR VALUE"}')
for k, v in data.items():
if isinstance(v, np.ndarray):
s = f'{type(v)} {v.dtype}'
print(f'{k:<18} {s:<36} {v.shape}')
else:
print(f'{k:<18} {str(type(v)):<36} {v}')
class Flow4DLoader(DataLoader):
"""Data loader for 3D centre-out radial 4D flow."""
def _read_scan_data(self, scan_list):
if not len(scan_list) in [2, 3]:
raise RuntimeError('Expected the length of scan_list to be either '
'2 or 3.')
# Check datatypes
assert isinstance(scan_list[0], np.void) # raidfile_hdr
assert isinstance(scan_list[1], dict) # noise or k-space
if len(scan_list) == 3:
assert isinstance(scan_list[2], dict) # k-space
image_scans = [] # For collecting image scans for header reading
for scan in scan_list:
if not isinstance(scan, dict):
# Then it is the raidfile_hdr (not needed)
continue
# Check first two lines to see if this is a k-space or noise scan.
# If it's a noise scan, is_image_scan() should return False for
# both lines. If it's k-space, the first line should return False
# since it's the data collected during the flow encoding gradient,
# and the second should return True.
first_line = scan['mdb'][0]
second_line = scan['mdb'][1]
if second_line.is_image_scan(): # Then this is an image scan
# The first line may or may not be is_image_scan(), depending
# on whether or not flow encoding navigators were acquired.
image_scans.append(scan)
# Total number of lines
nlines = int(len(scan['mdb']))
if not first_line.is_image_scan():
# Then FE navs were acquired
# Data should alternate between FE nav and k-space
# Check first line for size of FE navigators array
ncoils, nro = first_line.data.shape
self.data['fe_nav'] = np.empty((ncoils, nlines // 2, nro),
dtype=np.complex64)
# Check second line for size of k-space array
ncoils, nro = second_line.data.shape
self.data['kspace'] = np.empty((ncoils, nlines // 2, nro),
dtype=np.complex64)
else:
# FE navs were not acquired
ncoils, nro = first_line.data.shape
self.data['kspace'] = np.empty((ncoils, nlines, nro),
dtype=np.complex64)
# Loads and stores each line
f = 0
k = 0
for line in scan['mdb']:
if line.is_flag_set('RTFEEDBACK'):
self.data['fe_nav'][:, f, :] = line.data
f += 1
elif line.is_image_scan():
self.data['kspace'][:, k, :] = line.data
k += 1
else:
raise RuntimeError('Data line has unidentified flag.')
else:
# It is noise scan
nlines = len(scan['mdb'])
ncoils, nro = first_line.data.shape
self.data['noise'] = np.empty((ncoils, nlines, nro),
dtype=np.complex64)
for idx, line in enumerate(scan['mdb']):
self.data['noise'][:, idx, :] = line.data
return image_scans
def _reformat(self):
"""Reformatting steps that may be sequence-specific."""
nv = 4 # Number of velocity encodes
# Reshape
(ncoils, nlines, nro) = self.data['kspace'].shape
tmp = np.empty((ncoils, nv, int(nlines/nv), nro), dtype=np.complex64)
for v in range(nv):
tmp[:, v, :, :] = self.data['kspace'][:, v::nv, :]
self.data['kspace'] = tmp
tmp = None
# Recalculate times at higher precision
fe_nav_acquired = 'fe_nav' in self.data.keys()
if fe_nav_acquired:
# Take the second time stamp, the first is FE navigator
time0 = self.data['times'][1]
else:
time0 = self.data['times'][0]
times = np.linspace(time0,
time0 + (nlines - 1) * (self.data['tr'] / nv),
num=nlines, dtype=np.float64)
# TODO Temporarily keeping this here. times_recalculated should be
# wrong if TR is reported incorrectly. Keeping this here to check TR
# accuracy
self.data['times_recalculated'] = times
if fe_nav_acquired:
# Discard time stamps of FE navs (only do this if times is not
# holding the recalculated times) TODO
self.data['times'] = self.data['times'][1::2]
# Discard the user-defined measurements from FE navigators
self.data['user_float'] = self.data['user_float'][:, 1::2]
return
| <filename>mrrecon/data/reader.py
import re
import numpy as np
import twixtools
def read_twix(filename, keep_syncdata_and_acqend=True):
"""Wraps `twixtools.read_twix` with some fixes to the reader.
This wrapper can be removed once they fix these things.
"""
scan_list = twixtools.read_twix(filename, keep_syncdata_and_acqend=keep_syncdata_and_acqend) # noqa
print('') # Fixes absence of newline
# Parse other headers
for scan in scan_list:
if not isinstance(scan, dict):
# Then it is the raidfile_hdr (not needed)
continue
scan['hdr']['Config'] = _make_dict_from_hdr(scan['hdr']['Config'])
scan['hdr']['Dicom'] = _make_dict_from_hdr(scan['hdr']['Dicom'])
return scan_list
def _make_dict_from_hdr(dict_string):
"""Generates a dictionary from a portion of the header.
Works for Config and Dicom.
"""
pattern = re.compile(
'<Param(Long|String|Double)\\."([^"]+)"> { ([^}]+) }')
out = {}
for dtype, name, data in pattern.findall(dict_string):
if dtype == "String":
out[name] = data[1:-1]
if dtype == "Long":
if " " in data:
out[name] = [int(x) for x in data.split()]
else:
out[name] = int(data)
if dtype == "Double":
out[name] = float(data.rstrip().split(" ")[-1])
return out
class DataLoader:
"""Handles raw data loading and processing.
Depends on twixtools for reading the Siemens MRI raw data file. Since the
output of twixtools is a bit 'raw', data and parameters are processed and
placed in a convenient dictionary called `data`.
The whole process consists of 5 main steps:
1) Reading the Siemens raw data file with twixtools.
2) Storing scan data in NumPy arrays (e.g. noise and k-space measurements).
3) Picking out relevant scan parameters from the header (e.g. image
resolution, TR, VENC).
4) Reading the mini data header for per line data values (e.g. time
stamps, custom user-defined data).
5) Reformatting the data.
The original data structure output from twixtools can also be accessed
(attribute name is `scan_list`).
Args:
filename (str): Full name of raw data file.
Attributes:
data (dictionary): Contains loaded and processed items.
scan_list (list): Output from twixtools.read_twix().
"""
def __init__(self, filename):
self.filename = filename
self.data = {}
def run(self):
scan_list = self._load()
image_scans = self._read_scan_data(scan_list)
self._read_header(image_scans)
self._read_minidataheader(image_scans)
self._reformat()
return self.data
def _load(self):
"""Reads file and returns a list of scans."""
scan_list = read_twix(self.filename, keep_syncdata_and_acqend=False)
self.scan_list = scan_list
return scan_list
def _read_scan_data(self, scan_list):
"""Reads each scan/measurement and stores in NumPy arrays."""
image_scans = [] # For collecting image scans for header reading
self.data['kspace'] = [] # List of arrays
self.data['calib'] = [] # List of arrays
for scan in scan_list:
if not isinstance(scan, dict):
# Then it is the raidfile_hdr (not needed)
continue
array = self._fill_array(scan)
first_line = scan['mdb'][0]
if first_line.is_image_scan():
self.data['kspace'].append(array)
image_scans.append(scan)
else:
# Calibration scan
self.data['calib'].append(array)
return image_scans
def _fill_array(self, scan):
"""Reads acquired data line by line and fills a 3D NumPy array.
The shape of the array is (ncoils, nlines, nro).
"""
# Get array shape
nlines = len(scan['mdb'])
ncoils, nro = scan['mdb'][0].data.shape # Check first line
array = np.empty((ncoils, nlines, nro),
dtype=np.complex64)
for idx, line in enumerate(scan['mdb']): # Looping over a list
array[:, idx, :] = line.data
return array
def _read_header(self, image_scans):
"""Picks out relevant reconstruction parameters from the header."""
# If there is more than one image scan, reads the header from the first
hdr = image_scans[0]['hdr']
# Only 'MeasYaps' was parsed and values stored dictionary
# TODO: What happens when field/value does not exist?
config = hdr['Config']
dicom = hdr['Dicom']
self.data['nx'] = config['ImageColumns']
self.data['ny'] = config['ImageLines']
meas = hdr['Meas'].split('\n') # Not yet making dict out of 'Meas'
for n, line in enumerate(meas):
if 'i3DFTLength' in line:
if int(meas[n + 2]) == 1:
self.data['nz'] = 1
else:
self.data['nz'] = int(hdr['MeasYaps']['sKSpace']['lImagesPerSlab']) # noqa
break
# In millimetres
self.data['fovx'] = float(hdr['MeasYaps']['sSliceArray']['asSlice'][0]['dReadoutFOV']) # noqa
self.data['fovy'] = float(hdr['MeasYaps']['sSliceArray']['asSlice'][0]['dPhaseFOV']) # noqa
self.data['fovz'] = float(hdr['MeasYaps']['sSliceArray']['asSlice'][0]['dThickness']) # noqa
self.data['dx'] = self.data['fovx'] / self.data['nx']
self.data['dy'] = self.data['fovy'] / self.data['ny']
self.data['dz'] = self.data['fovz'] / self.data['nz']
# Converts to milliseconds
self.data['tr'] = float(hdr['MeasYaps']['alTR'][0]) / 1000 # noqa
self.data['te'] = float(hdr['MeasYaps']['alTE'][0]) / 1000 # noqa
self.data['ti'] = float(hdr['MeasYaps']['alTI'][0]) / 1000 # noqa
# In degrees
self.data['flipangle'] = float(hdr['MeasYaps']['adFlipAngleDegree'][0]) # noqa
# VENC in (cm/s)
self.data['venc'] = float(hdr['MeasYaps']['sAngio']['sFlowArray']['asElm'][0]['nVelocity']) # noqa
self.data['veldir'] = int(hdr['MeasYaps']['sAngio']['sFlowArray']['asElm'][0]['nDir']) # noqa
self.data['weight'] = dicom['flUsedPatientWeight'] # kg
regex = r'flPatientHeight.*?(\d+.\d+).*?}'
match = re.search(regex, hdr['Meas'], re.DOTALL)
self.data['height'] = float(match.group(1)) # mm
# Convert from nanoseconds to microseconds
self.data['dwelltime'] = float(hdr['MeasYaps']['sRXSPEC']['alDwellTime'][0]) / 1000 # noqa
# Field strength
self.data['field_strength'] = dicom['flMagneticFieldStrength']
# Grad performance params (rise time and max grad)
# Using dictionaries to look up values
grad_mode = hdr['MeasYaps']['sGRADSPEC']['ucMode']
# Dictionary values depend on system field strength
# TODO Should these values be moved to their own file, so they can be
# conveniently adjusted in case they change?
if self.data['field_strength'] < 2:
self.data['rise_time'] = { # Rise time in usec/(mT/m)
1: 5.88, # FAST
2: 12.5, # NORMAL
0: 12.5, # Also NORMAL
4: 20.0 # WHISPER
}.get(grad_mode) # Returns None if there is no value for grad_mode
self.data['grad_max'] = { # Max grad strength in mT/m
1: 28, # FAST
2: 22, # NORMAL
0: 22, # Also NORMAL
4: 22 # WHISPER
}.get(grad_mode)
else:
self.data['rise_time'] = { # Rise time in usec/(mT/m)
8: 5.3, # PERFORMANCE
1: 5.55, # FAST
2: 10.0, # NORMAL
0: 10.0, # Also NORMAL
4: 20.0 # WHISPER
}.get(grad_mode)
self.data['grad_max'] = { # Max grad strength in mT/m
8: 37, # PERFORMANCE
1: 24, # FAST
2: 22, # NORMAL
0: 22, # Also NORMAL
4: 22 # WHISPER
}.get(grad_mode)
self.data['readout_os_factor'] = config['ReadoutOversamplingFactor']
self.data['seq_filename'] = config['SequenceFileName']
# For dicom writing
self.data['vendor'] = dicom['Manufacturer']
self.data['systemmodel'] = dicom['ManufacturersModelName']
tmpstr = config['ExamMemoryUID']
self.data['acquisition_date'] = tmpstr.split('_')[3]
self.data['acquisition_time'] = tmpstr.split('_')[4]
self.data['StudyLOID'] = config['StudyLOID']
self.data['SeriesLOID'] = config['SeriesLOID']
self.data['PatientLOID'] = config['PatientLOID']
self.data['protocol_name'] = hdr['MeasYaps']['tProtocolName']
self.data['slice_normal'] = hdr['MeasYaps']['sSliceArray']['asSlice'][0]['sNormal'] # noqa
self.data['patient_orientation'] = dicom['tPatientPosition']
# Flow encoding navigators collection flag
try:
self.data['fe_nav_flag'] = hdr['MeasYaps']['sWipMemBlock']['alFree'][2] # noqa
except IndexError:
self.data['fe_nav_flag'] = 0
return
def _read_minidataheader(self, image_scans):
"""Reads mini data headers (MDH)."""
# If there is more than one image scan, reads the mdh from the first
scan = image_scans[0]
nlines = len(scan['mdb'])
times = np.zeros((nlines), dtype=np.float64)
user_float = np.zeros((nlines, 24), dtype=np.float64)
for idx, line in enumerate(scan['mdb']):
times[idx] = line.mdh['ulTimeStamp'] * 2.5
user_float[idx] = line.mdh['aushIceProgramPara']
self.data['times'] = times
self.data['user_float'] = np.copy(user_float.transpose())
# Logical to physical rotation quaternion
self.data['rot_quat'] = line.mdh[22][1]
# Slice position
self.data['slice_pos'] = line.mdh[22][0]
return
def _reformat(self):
"""Reformatting steps that may be sequence-specific."""
self.data['kspace'] = self.data['kspace'][0] # Take out of list
self.data['calib'] = self.data['calib'][0] # Take out of list
self.data['noise'] = self.data.pop('calib') # Rename
nv = 2 # Number of velocity encodes
# Reshape
(ncoils, nlines, nro) = self.data['kspace'].shape
tmp = np.empty((ncoils, nv, int(nlines/nv), nro), dtype=np.complex64)
for v in range(nv):
tmp[:, v, :, :] = self.data['kspace'][:, v::nv, :]
self.data['kspace'] = tmp
tmp = None
# Recalculate times at higher precision
time0 = self.data['times'][0]
times = np.linspace(time0,
time0 + (nlines - 1) * (self.data['tr'] / nv),
num=nlines, dtype=np.float64)
# TODO Temporarily keeping this here. times_recalculated should be
# wrong if TR is reported incorrectly. Keeping this here to check TR
# accuracy
self.data['times_recalculated'] = times
return
def print_dict_summary(data):
"""Summarizes attributes of items in the input dictionary.
Args:
data (dict): Contains raw data and scan parameters.
"""
print(f'{"NAME":<18} {"TYPE":<36} {"SHAPE OR VALUE"}')
for k, v in data.items():
if isinstance(v, np.ndarray):
s = f'{type(v)} {v.dtype}'
print(f'{k:<18} {s:<36} {v.shape}')
else:
print(f'{k:<18} {str(type(v)):<36} {v}')
class Flow4DLoader(DataLoader):
"""Data loader for 3D centre-out radial 4D flow."""
def _read_scan_data(self, scan_list):
if not len(scan_list) in [2, 3]:
raise RuntimeError('Expected the length of scan_list to be either '
'2 or 3.')
# Check datatypes
assert isinstance(scan_list[0], np.void) # raidfile_hdr
assert isinstance(scan_list[1], dict) # noise or k-space
if len(scan_list) == 3:
assert isinstance(scan_list[2], dict) # k-space
image_scans = [] # For collecting image scans for header reading
for scan in scan_list:
if not isinstance(scan, dict):
# Then it is the raidfile_hdr (not needed)
continue
# Check first two lines to see if this is a k-space or noise scan.
# If it's a noise scan, is_image_scan() should return False for
# both lines. If it's k-space, the first line should return False
# since it's the data collected during the flow encoding gradient,
# and the second should return True.
first_line = scan['mdb'][0]
second_line = scan['mdb'][1]
if second_line.is_image_scan(): # Then this is an image scan
# The first line may or may not be is_image_scan(), depending
# on whether or not flow encoding navigators were acquired.
image_scans.append(scan)
# Total number of lines
nlines = int(len(scan['mdb']))
if not first_line.is_image_scan():
# Then FE navs were acquired
# Data should alternate between FE nav and k-space
# Check first line for size of FE navigators array
ncoils, nro = first_line.data.shape
self.data['fe_nav'] = np.empty((ncoils, nlines // 2, nro),
dtype=np.complex64)
# Check second line for size of k-space array
ncoils, nro = second_line.data.shape
self.data['kspace'] = np.empty((ncoils, nlines // 2, nro),
dtype=np.complex64)
else:
# FE navs were not acquired
ncoils, nro = first_line.data.shape
self.data['kspace'] = np.empty((ncoils, nlines, nro),
dtype=np.complex64)
# Loads and stores each line
f = 0
k = 0
for line in scan['mdb']:
if line.is_flag_set('RTFEEDBACK'):
self.data['fe_nav'][:, f, :] = line.data
f += 1
elif line.is_image_scan():
self.data['kspace'][:, k, :] = line.data
k += 1
else:
raise RuntimeError('Data line has unidentified flag.')
else:
# It is noise scan
nlines = len(scan['mdb'])
ncoils, nro = first_line.data.shape
self.data['noise'] = np.empty((ncoils, nlines, nro),
dtype=np.complex64)
for idx, line in enumerate(scan['mdb']):
self.data['noise'][:, idx, :] = line.data
return image_scans
def _reformat(self):
"""Reformatting steps that may be sequence-specific."""
nv = 4 # Number of velocity encodes
# Reshape
(ncoils, nlines, nro) = self.data['kspace'].shape
tmp = np.empty((ncoils, nv, int(nlines/nv), nro), dtype=np.complex64)
for v in range(nv):
tmp[:, v, :, :] = self.data['kspace'][:, v::nv, :]
self.data['kspace'] = tmp
tmp = None
# Recalculate times at higher precision
fe_nav_acquired = 'fe_nav' in self.data.keys()
if fe_nav_acquired:
# Take the second time stamp, the first is FE navigator
time0 = self.data['times'][1]
else:
time0 = self.data['times'][0]
times = np.linspace(time0,
time0 + (nlines - 1) * (self.data['tr'] / nv),
num=nlines, dtype=np.float64)
# TODO Temporarily keeping this here. times_recalculated should be
# wrong if TR is reported incorrectly. Keeping this here to check TR
# accuracy
self.data['times_recalculated'] = times
if fe_nav_acquired:
# Discard time stamps of FE navs (only do this if times is not
# holding the recalculated times) TODO
self.data['times'] = self.data['times'][1::2]
# Discard the user-defined measurements from FE navigators
self.data['user_float'] = self.data['user_float'][:, 1::2]
return
| en | 0.785113 | Wraps `twixtools.read_twix` with some fixes to the reader. This wrapper can be removed once they fix these things. # noqa # Fixes absence of newline # Parse other headers # Then it is the raidfile_hdr (not needed) Generates a dictionary from a portion of the header. Works for Config and Dicom. Handles raw data loading and processing. Depends on twixtools for reading the Siemens MRI raw data file. Since the output of twixtools is a bit 'raw', data and parameters are processed and placed in a convenient dictionary called `data`. The whole process consists of 5 main steps: 1) Reading the Siemens raw data file with twixtools. 2) Storing scan data in NumPy arrays (e.g. noise and k-space measurements). 3) Picking out relevant scan parameters from the header (e.g. image resolution, TR, VENC). 4) Reading the mini data header for per line data values (e.g. time stamps, custom user-defined data). 5) Reformatting the data. The original data structure output from twixtools can also be accessed (attribute name is `scan_list`). Args: filename (str): Full name of raw data file. Attributes: data (dictionary): Contains loaded and processed items. scan_list (list): Output from twixtools.read_twix(). Reads file and returns a list of scans. Reads each scan/measurement and stores in NumPy arrays. # For collecting image scans for header reading # List of arrays # List of arrays # Then it is the raidfile_hdr (not needed) # Calibration scan Reads acquired data line by line and fills a 3D NumPy array. The shape of the array is (ncoils, nlines, nro). # Get array shape # Check first line # Looping over a list Picks out relevant reconstruction parameters from the header. # If there is more than one image scan, reads the header from the first # Only 'MeasYaps' was parsed and values stored dictionary # TODO: What happens when field/value does not exist? # Not yet making dict out of 'Meas' # noqa # In millimetres # noqa # noqa # noqa # Converts to milliseconds # noqa # noqa # noqa # In degrees # noqa # VENC in (cm/s) # noqa # noqa # kg # mm # Convert from nanoseconds to microseconds # noqa # Field strength # Grad performance params (rise time and max grad) # Using dictionaries to look up values # Dictionary values depend on system field strength # TODO Should these values be moved to their own file, so they can be # conveniently adjusted in case they change? # Rise time in usec/(mT/m) # FAST # NORMAL # Also NORMAL # WHISPER # Returns None if there is no value for grad_mode # Max grad strength in mT/m # FAST # NORMAL # Also NORMAL # WHISPER # Rise time in usec/(mT/m) # PERFORMANCE # FAST # NORMAL # Also NORMAL # WHISPER # Max grad strength in mT/m # PERFORMANCE # FAST # NORMAL # Also NORMAL # WHISPER # For dicom writing # noqa # Flow encoding navigators collection flag # noqa Reads mini data headers (MDH). # If there is more than one image scan, reads the mdh from the first # Logical to physical rotation quaternion # Slice position Reformatting steps that may be sequence-specific. # Take out of list # Take out of list # Rename # Number of velocity encodes # Reshape # Recalculate times at higher precision # TODO Temporarily keeping this here. times_recalculated should be # wrong if TR is reported incorrectly. Keeping this here to check TR # accuracy Summarizes attributes of items in the input dictionary. Args: data (dict): Contains raw data and scan parameters. Data loader for 3D centre-out radial 4D flow. # Check datatypes # raidfile_hdr # noise or k-space # k-space # For collecting image scans for header reading # Then it is the raidfile_hdr (not needed) # Check first two lines to see if this is a k-space or noise scan. # If it's a noise scan, is_image_scan() should return False for # both lines. If it's k-space, the first line should return False # since it's the data collected during the flow encoding gradient, # and the second should return True. # Then this is an image scan # The first line may or may not be is_image_scan(), depending # on whether or not flow encoding navigators were acquired. # Total number of lines # Then FE navs were acquired # Data should alternate between FE nav and k-space # Check first line for size of FE navigators array # Check second line for size of k-space array # FE navs were not acquired # Loads and stores each line # It is noise scan Reformatting steps that may be sequence-specific. # Number of velocity encodes # Reshape # Recalculate times at higher precision # Take the second time stamp, the first is FE navigator # TODO Temporarily keeping this here. times_recalculated should be # wrong if TR is reported incorrectly. Keeping this here to check TR # accuracy # Discard time stamps of FE navs (only do this if times is not # holding the recalculated times) TODO # Discard the user-defined measurements from FE navigators | 2.648634 | 3 |
tests/unit/small_text/integrations/pytorch/models/test_kimcnn.py | chschroeder/small-text | 218 | 6621871 | <filename>tests/unit/small_text/integrations/pytorch/models/test_kimcnn.py<gh_stars>100-1000
import unittest
import pytest
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
import torch
from small_text.integrations.pytorch.classifiers.kimcnn import KimCNN
except (ImportError, PytorchNotFoundError):
pass
@pytest.mark.pytorch
class KimCNNInitTest(unittest.TestCase):
DEFAULT_KERNEL_HEIGHTS = [3, 4, 5]
def test_init_parameters_default(self):
vocab_size = 1000
max_seq_length = 50
model = KimCNN(vocab_size, max_seq_length)
pool_sizes = [(47, 1), (46, 1), (45, 1)]
# Parameters
self.assertEqual(100, model.out_channels)
self.assertEqual(1, model.in_channels)
self.assertEqual(3, model.n_kernels)
self.assertEqual(pool_sizes, model.pool_sizes)
self.assertEqual(max_seq_length, model.max_seq_length)
self.assertEqual(2, model.num_classes)
# Modules
self.assertTrue(model.embedding.weight.requires_grad)
self.assertEqual(0, model.embedding.padding_idx)
self.assertEqual(vocab_size, model.embedding.num_embeddings)
self.assertEqual(300, model.embedding.embedding_dim)
self.assertEqual(len(pool_sizes), len(model.pools))
for i, pool in enumerate(model.pools):
self.assertEqual(pool_sizes[i], pool.kernel_size)
self.assertEqual(3, len(model.convs))
for i, conv in enumerate(model.convs):
self.assertEqual(1, conv.in_channels)
self.assertEqual(100, conv.out_channels)
self.assertEqual((self.DEFAULT_KERNEL_HEIGHTS[i], 300), conv.kernel_size)
self.assertEqual(0.5, model.dropout.p)
self.assertEqual(300, model.fc.in_features)
self.assertEqual(2, model.fc.out_features)
def test_init_parameters_specific(self):
vocab_size = 1000
max_seq_length = 50
num_classes = 3
out_channels = 200
embed_dim = 150
padding_idx = 1
kernel_heights = [4, 5]
fc_dropout = 0.1
embedding_matrix = None
freeze_embedding_layer = True
pool_sizes = [(46, 1), (45, 1)]
model = KimCNN(vocab_size, max_seq_length, num_classes=num_classes,
out_channels=out_channels, embed_dim=embed_dim, padding_idx=padding_idx,
kernel_heights=kernel_heights, dropout=fc_dropout,
embedding_matrix=embedding_matrix,
freeze_embedding_layer=freeze_embedding_layer)
# Parameters
self.assertEqual(out_channels, model.out_channels)
self.assertEqual(1, model.in_channels)
self.assertEqual(2, model.n_kernels)
self.assertEqual(pool_sizes, model.pool_sizes)
self.assertEqual(max_seq_length, model.max_seq_length)
self.assertEqual(num_classes, model.num_classes)
# Modules
self.assertFalse(model.embedding.weight.requires_grad)
self.assertEqual(padding_idx, model.embedding.padding_idx)
self.assertEqual(vocab_size, model.embedding.num_embeddings)
self.assertEqual(embed_dim, model.embedding.embedding_dim)
self.assertEqual(2, len(model.convs))
for i, conv in enumerate(model.convs):
self.assertEqual(1, conv.in_channels)
self.assertEqual(out_channels, conv.out_channels)
self.assertEqual((kernel_heights[i], embed_dim), conv.kernel_size)
self.assertEqual(len(pool_sizes), len(model.pools))
for i, pool in enumerate(model.pools):
self.assertEqual(pool_sizes[i], pool.kernel_size)
self.assertEqual(fc_dropout, model.dropout.p)
self.assertEqual(400, model.fc.in_features)
self.assertEqual(num_classes, model.fc.out_features)
def test_init_with_embedding(self):
vocab_size = 1000
max_seq_length = 50
fake_embedding = torch.rand(1000, 100, device='cpu')
pool_sizes = [(47, 1), (46, 1), (45, 1)]
model = KimCNN(vocab_size, max_seq_length, embedding_matrix=fake_embedding)
# Parameters
self.assertEqual(100, model.out_channels)
self.assertEqual(1, model.in_channels)
self.assertEqual(3, model.n_kernels)
self.assertEqual(pool_sizes, model.pool_sizes)
self.assertEqual(max_seq_length, model.max_seq_length)
self.assertEqual(2, model.num_classes)
# Modules
self.assertTrue(model.embedding.weight.requires_grad)
self.assertEqual(0, model.embedding.padding_idx)
self.assertEqual(fake_embedding.size(0), model.embedding.num_embeddings)
self.assertEqual(fake_embedding.size(1), model.embedding.embedding_dim)
self.assertEqual(3, len(model.convs))
for i, conv in enumerate(model.convs):
self.assertEqual(1, conv.in_channels)
self.assertEqual(100, conv.out_channels)
self.assertEqual((self.DEFAULT_KERNEL_HEIGHTS[i], 300), conv.kernel_size)
self.assertEqual(len(pool_sizes), len(model.pools))
for i, pool in enumerate(model.pools):
self.assertEqual(pool_sizes[i], pool.kernel_size)
self.assertEqual(0.5, model.dropout.p)
self.assertEqual(300, model.fc.in_features)
self.assertEqual(2, model.fc.out_features)
| <filename>tests/unit/small_text/integrations/pytorch/models/test_kimcnn.py<gh_stars>100-1000
import unittest
import pytest
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
import torch
from small_text.integrations.pytorch.classifiers.kimcnn import KimCNN
except (ImportError, PytorchNotFoundError):
pass
@pytest.mark.pytorch
class KimCNNInitTest(unittest.TestCase):
DEFAULT_KERNEL_HEIGHTS = [3, 4, 5]
def test_init_parameters_default(self):
vocab_size = 1000
max_seq_length = 50
model = KimCNN(vocab_size, max_seq_length)
pool_sizes = [(47, 1), (46, 1), (45, 1)]
# Parameters
self.assertEqual(100, model.out_channels)
self.assertEqual(1, model.in_channels)
self.assertEqual(3, model.n_kernels)
self.assertEqual(pool_sizes, model.pool_sizes)
self.assertEqual(max_seq_length, model.max_seq_length)
self.assertEqual(2, model.num_classes)
# Modules
self.assertTrue(model.embedding.weight.requires_grad)
self.assertEqual(0, model.embedding.padding_idx)
self.assertEqual(vocab_size, model.embedding.num_embeddings)
self.assertEqual(300, model.embedding.embedding_dim)
self.assertEqual(len(pool_sizes), len(model.pools))
for i, pool in enumerate(model.pools):
self.assertEqual(pool_sizes[i], pool.kernel_size)
self.assertEqual(3, len(model.convs))
for i, conv in enumerate(model.convs):
self.assertEqual(1, conv.in_channels)
self.assertEqual(100, conv.out_channels)
self.assertEqual((self.DEFAULT_KERNEL_HEIGHTS[i], 300), conv.kernel_size)
self.assertEqual(0.5, model.dropout.p)
self.assertEqual(300, model.fc.in_features)
self.assertEqual(2, model.fc.out_features)
def test_init_parameters_specific(self):
vocab_size = 1000
max_seq_length = 50
num_classes = 3
out_channels = 200
embed_dim = 150
padding_idx = 1
kernel_heights = [4, 5]
fc_dropout = 0.1
embedding_matrix = None
freeze_embedding_layer = True
pool_sizes = [(46, 1), (45, 1)]
model = KimCNN(vocab_size, max_seq_length, num_classes=num_classes,
out_channels=out_channels, embed_dim=embed_dim, padding_idx=padding_idx,
kernel_heights=kernel_heights, dropout=fc_dropout,
embedding_matrix=embedding_matrix,
freeze_embedding_layer=freeze_embedding_layer)
# Parameters
self.assertEqual(out_channels, model.out_channels)
self.assertEqual(1, model.in_channels)
self.assertEqual(2, model.n_kernels)
self.assertEqual(pool_sizes, model.pool_sizes)
self.assertEqual(max_seq_length, model.max_seq_length)
self.assertEqual(num_classes, model.num_classes)
# Modules
self.assertFalse(model.embedding.weight.requires_grad)
self.assertEqual(padding_idx, model.embedding.padding_idx)
self.assertEqual(vocab_size, model.embedding.num_embeddings)
self.assertEqual(embed_dim, model.embedding.embedding_dim)
self.assertEqual(2, len(model.convs))
for i, conv in enumerate(model.convs):
self.assertEqual(1, conv.in_channels)
self.assertEqual(out_channels, conv.out_channels)
self.assertEqual((kernel_heights[i], embed_dim), conv.kernel_size)
self.assertEqual(len(pool_sizes), len(model.pools))
for i, pool in enumerate(model.pools):
self.assertEqual(pool_sizes[i], pool.kernel_size)
self.assertEqual(fc_dropout, model.dropout.p)
self.assertEqual(400, model.fc.in_features)
self.assertEqual(num_classes, model.fc.out_features)
def test_init_with_embedding(self):
vocab_size = 1000
max_seq_length = 50
fake_embedding = torch.rand(1000, 100, device='cpu')
pool_sizes = [(47, 1), (46, 1), (45, 1)]
model = KimCNN(vocab_size, max_seq_length, embedding_matrix=fake_embedding)
# Parameters
self.assertEqual(100, model.out_channels)
self.assertEqual(1, model.in_channels)
self.assertEqual(3, model.n_kernels)
self.assertEqual(pool_sizes, model.pool_sizes)
self.assertEqual(max_seq_length, model.max_seq_length)
self.assertEqual(2, model.num_classes)
# Modules
self.assertTrue(model.embedding.weight.requires_grad)
self.assertEqual(0, model.embedding.padding_idx)
self.assertEqual(fake_embedding.size(0), model.embedding.num_embeddings)
self.assertEqual(fake_embedding.size(1), model.embedding.embedding_dim)
self.assertEqual(3, len(model.convs))
for i, conv in enumerate(model.convs):
self.assertEqual(1, conv.in_channels)
self.assertEqual(100, conv.out_channels)
self.assertEqual((self.DEFAULT_KERNEL_HEIGHTS[i], 300), conv.kernel_size)
self.assertEqual(len(pool_sizes), len(model.pools))
for i, pool in enumerate(model.pools):
self.assertEqual(pool_sizes[i], pool.kernel_size)
self.assertEqual(0.5, model.dropout.p)
self.assertEqual(300, model.fc.in_features)
self.assertEqual(2, model.fc.out_features)
| en | 0.046613 | # Parameters # Modules # Parameters # Modules # Parameters # Modules | 2.25998 | 2 |
scripts/se/stockholm_parse.py | PierreMesure/openaddresses | 2,430 | 6621872 | <reponame>PierreMesure/openaddresses
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import requests
import os.path
import lxml.html
import codecs
# API key can be obtained here: http://openstreetgs.stockholm.se/Home/Key
# Choose "WGS84" as the coordinate system
API_KEY = ""
combined_filename = ''
combined_file = codecs.open(combined_filename, "a", encoding='utf-8')
combined_file.write("municipality, postalarea, postalcode, streetname, streetnum, lon, lat\n")
s = requests.Session()
# This gets the list of all street in Stockholm
streets_url = "http://openstreetws.stockholm.se/LvWS-2.2/Lv.asmx/GetStreetNames?apiKey=" + API_KEY + "&streetNamePattern=*&optionalMunicipality=&optionalPostalArea=&optionalPostalCode="
r = s.get(streets_url)
# We query addresses for every street name and save them as csv lines
for street_element in lxml.html.fromstring(r.content).findall('.//streetname'):
street_name = street_element.text
print("Processing " + street_name)
payload = {'apikey': API_KEY, 'municipalityPattern': '*', 'streetName': street_name, 'streetNumPattern': '*', 'postalCodePattern': '*', 'postalAreaPattern': '*', 'includeAddressConnectionsForTrafficTypes': '0'}
r = s.post("http://openstreetws.stockholm.se/LvWS-2.2/Lv.asmx/GetAddresses", data=payload, headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"})
for addr_element in lxml.html.fromstring(r.content).findall('.//address'):
wkt = addr_element.find('wkt').text
latlon = wkt[7:len(wkt)-1].split()
csv = addr_element.find('municipality').text + ',' + addr_element.find('postalarea').text + ',' + addr_element.find('postalcode').text + ',' + addr_element.find('streetname').text + ',' + addr_element.find('streetnum').text + ',' + latlon[0] + ',' + latlon[1] + '\n'
combined_file.write(csv)
time.sleep(2)
combined_file.close()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import requests
import os.path
import lxml.html
import codecs
# API key can be obtained here: http://openstreetgs.stockholm.se/Home/Key
# Choose "WGS84" as the coordinate system
API_KEY = ""
combined_filename = ''
combined_file = codecs.open(combined_filename, "a", encoding='utf-8')
combined_file.write("municipality, postalarea, postalcode, streetname, streetnum, lon, lat\n")
s = requests.Session()
# This gets the list of all street in Stockholm
streets_url = "http://openstreetws.stockholm.se/LvWS-2.2/Lv.asmx/GetStreetNames?apiKey=" + API_KEY + "&streetNamePattern=*&optionalMunicipality=&optionalPostalArea=&optionalPostalCode="
r = s.get(streets_url)
# We query addresses for every street name and save them as csv lines
for street_element in lxml.html.fromstring(r.content).findall('.//streetname'):
street_name = street_element.text
print("Processing " + street_name)
payload = {'apikey': API_KEY, 'municipalityPattern': '*', 'streetName': street_name, 'streetNumPattern': '*', 'postalCodePattern': '*', 'postalAreaPattern': '*', 'includeAddressConnectionsForTrafficTypes': '0'}
r = s.post("http://openstreetws.stockholm.se/LvWS-2.2/Lv.asmx/GetAddresses", data=payload, headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"})
for addr_element in lxml.html.fromstring(r.content).findall('.//address'):
wkt = addr_element.find('wkt').text
latlon = wkt[7:len(wkt)-1].split()
csv = addr_element.find('municipality').text + ',' + addr_element.find('postalarea').text + ',' + addr_element.find('postalcode').text + ',' + addr_element.find('streetname').text + ',' + addr_element.find('streetnum').text + ',' + latlon[0] + ',' + latlon[1] + '\n'
combined_file.write(csv)
time.sleep(2)
combined_file.close() | en | 0.814065 | #!/usr/bin/env python # -*- coding: utf-8 -*- # API key can be obtained here: http://openstreetgs.stockholm.se/Home/Key # Choose "WGS84" as the coordinate system # This gets the list of all street in Stockholm # We query addresses for every street name and save them as csv lines | 3.221178 | 3 |
new_credit/serializers.py | Alexandre1313/AppNewCredits | 0 | 6621873 | <filename>new_credit/serializers.py
from rest_framework import serializers
from .models import Usuario, Divida, Consulta
class UsuarioSerializer(serializers.ModelSerializer):
class Meta:
model = Usuario
fields = (
'id',
'data_criacao',
'data_modificacao',
'usuario',
'senha'
)
class DividaSerializer(serializers.ModelSerializer):
class Meta:
model = Divida
fields = (
'id',
'data_criacao',
'data_modificacao',
'usuario',
'data_vencimento',
'divida'
)
class ConsultaSerializer(serializers.ModelSerializer):
class Meta:
model = Consulta
fields = (
'id',
'data_criacao',
'data_modificacao'
)
| <filename>new_credit/serializers.py
from rest_framework import serializers
from .models import Usuario, Divida, Consulta
class UsuarioSerializer(serializers.ModelSerializer):
class Meta:
model = Usuario
fields = (
'id',
'data_criacao',
'data_modificacao',
'usuario',
'senha'
)
class DividaSerializer(serializers.ModelSerializer):
class Meta:
model = Divida
fields = (
'id',
'data_criacao',
'data_modificacao',
'usuario',
'data_vencimento',
'divida'
)
class ConsultaSerializer(serializers.ModelSerializer):
class Meta:
model = Consulta
fields = (
'id',
'data_criacao',
'data_modificacao'
)
| none | 1 | 2.153047 | 2 | |
itests/legacy/test_tasks.py | pabarros/asgard-api | 3 | 6621874 | <gh_stars>1-10
import json
import unittest
from http import HTTPStatus
from responses import RequestsMock
from asgard.models.account import AccountDB
from asgard.models.user import UserDB
from hollowman import api, cache
from hollowman.app import application
from hollowman.conf import DEFAULT_MESOS_ADDRESS
from itests.util import (
ACCOUNT_DEV_DICT,
USER_WITH_MULTIPLE_ACCOUNTS_AUTH_KEY,
BaseTestCase,
)
from tests.utils import get_raw_fixture, with_json_fixture
class TasksEndpointTest(BaseTestCase):
async def setUp(self):
await super(TasksEndpointTest, self).setUp()
self.user = UserDB()
self.user.current_account = AccountDB(**ACCOUNT_DEV_DICT)
self.auth_header = {
"Authorization": f"Token {USER_WITH_MULTIPLE_ACCOUNTS_AUTH_KEY}"
}
async def tearDown(self):
await super(TasksEndpointTest, self).tearDown()
@unittest.skip("")
def test_tasks_return_404_for_not_found_task(self):
self.fail()
@unittest.skip("")
def test_tasks_return_404_for_task_in_another_namespace(self):
self.fail()
@with_json_fixture(
"../fixtures/api/tasks/task_file_browse_namespace_dev.json"
)
@with_json_fixture("../fixtures/api/tasks/one_task_json_namespace_dev.json")
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_browse_files(
self,
task_browse_file_fixture,
one_task_json_fixture,
one_slave_json_fixture,
slave_state_fixture,
):
task_id = "infra_mysql.b331f6c9-fb9e-11e7-ab4b-faf0633ea91f"
sandbox_directory = "/tmp/mesos/slaves/2084863b-12d1-4319-b515-992eab91a53d-S1/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mysql.b331f6c9-fb9e-11e7-ab4b-faf0633ea91f/runs/e14d7537-c1d0-4846-a076-25d623d6a70f"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id=2084863b-12d1-4319-b515-992eab91a53d-S1",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/browse?path={sandbox_directory}",
body=json.dumps(task_browse_file_fixture),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files", headers=self.auth_header
)
resp_data = json.loads(resp.data)
self.assertEquals(200, resp.status_code)
self.assertEqual(4, len(resp_data))
self.assertEqual(
sorted(
[
"/stdout",
"/stdout.logrotate.conf",
"/stderr",
"/stderr.logrotate.conf",
]
),
sorted([u["path"] for u in resp_data]),
)
@with_json_fixture(
"../fixtures/api/tasks/task_file_browse_namespace_dev.json"
)
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks__browse_files_task_is_completed(
self,
task_browse_file_fixture,
one_task_json_fixture,
one_slave_json_fixture,
slave_state_fixture,
):
"""
Se tentamos pegar os arquivos de uma task que não está mais rodando, temos que procurá-la
no array `completed_tasks`.
"""
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/browse?path={sandbox_directory}",
body=json.dumps(task_browse_file_fixture),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files", headers=self.auth_header
)
resp_data = json.loads(resp.data)
self.assertEquals(200, resp.status_code)
self.assertEqual(4, len(resp_data))
def test_tasks_returnn_404_if_task_does_not_exist(self):
"""
Se o mesos retornar que a task não existe, já retornamos 404 direto.
"""
task_id = "task_do_not_exist.2580925d-0129-11e8-9a03-6e85ded2ca1e"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps({"tasks": []}),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files", headers=self.auth_header
)
self.assertEquals(404, resp.status_code)
@with_json_fixture("../fixtures/api/tasks/task_file_read_response.json")
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_read_file_offset(
self,
task_file_read_fixture,
one_task_json_fixture,
one_slave_json_fixture,
slave_state_fixture,
):
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/read?path={sandbox_directory}/stderr&offset=0&length=42",
body=json.dumps(task_file_read_fixture),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files/read?path=/stderr&offset=0&length=42",
headers=self.auth_header,
)
resp_data = json.loads(resp.data)
self.assertEquals(200, resp.status_code)
self.assertEquals(0, resp_data["offset"])
self.assertFalse(resp_data["truncated"])
self.assertEqual(
"*** Starting uWSGI 2.0.14 (64bit) on [Wed Jan 31 19:58:13 2018] ***",
resp_data["data"],
)
@with_json_fixture("../fixtures/api/tasks/task_file_read_response.json")
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_read_file_offset_truncated_log_file(
self,
task_file_read_fixture,
one_task_json_fixture,
one_slave_json_fixture,
slave_state_fixture,
):
"""
Confirma que, quando o offset + length > ${HOLLOWMAN_TASK_FILEREAD_MAX_OFFSET}
Por padrão esse offset máximo é de 50MB (52428800)
"""
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/read?path={sandbox_directory}/stderr&offset=52428000&length=1024",
body=json.dumps(task_file_read_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/read?path={sandbox_directory}/stderr&offset=-1",
body=json.dumps({"data": "", "offset": 540}),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files/read?path=/stderr&offset=52428000&length=1024",
headers=self.auth_header,
)
resp_data = json.loads(resp.data)
self.assertEquals(200, resp.status_code)
self.assertEquals(540, resp_data["offset"])
self.assertTrue(resp_data["truncated"])
self.assertEqual(
"*** Starting uWSGI 2.0.14 (64bit) on [Wed Jan 31 19:58:13 2018] ***",
resp_data["data"],
)
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_download_file(
self, one_task_json_fixture, one_slave_json_fixture, slave_state_fixture
):
"""
Certifica que a chamada ao endpoint de download popula o cache com os dados
Teremos nos cache dados como: url, task_id, path do arquivo a ser baixado, etc.
"""
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
slave_ip = one_slave_json_fixture["slaves"][0]["hostname"]
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
download_id = "7094c8e5b131416a87ccdc3e7d3131a6"
with unittest.mock.patch.object(
cache, "set"
) as cache_set_mock, unittest.mock.patch.object(
api.tasks,
"uuid4",
return_value=unittest.mock.Mock(hex=download_id),
) as uuid4_mock:
resp = client.get(
f"/tasks/{task_id}/files/download?path=/stdout&offset=0&length=42",
headers=self.auth_header,
)
self.assertEquals(HTTPStatus.OK, resp.status_code)
resp_data = json.loads(resp.data)
self.assertEqual(
"tasks/downloads/7094c8e5b131416a87ccdc3e7d3131a6",
resp_data["download_url"],
)
cache_set_mock.assert_called_with(
f"downloads/{download_id}",
{
"file_url": f"http://{slave_ip}:5051/files/download?path={sandbox_directory}/stdout",
"task_id": task_id,
"file_path": "/stdout",
},
timeout=30,
)
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_return_404_on_file_not_found(
self, one_task_json_fixture, one_slave_json_fixture, slave_state_fixture
):
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/read?path={sandbox_directory}/not_found_file&offset=0&length=42",
body="",
status=404,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files/read?path=/not_found_file&offset=0&length=42",
headers=self.auth_header,
)
self.assertEquals(404, resp.status_code)
def test_download_by_id_expired(self):
"""
Se o ID não existir mais no cache, retornamos 404
"""
download_id = "7094c8e5b131416a87ccdc3e7d3131a6"
with application.test_client() as client:
resp = client.get(f"/tasks/downloads/{download_id}")
self.assertEquals(404, resp.status_code)
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_download_by_id_task_running(
self, one_task_json_fixture, one_slave_json_fixture, slave_state_fixture
):
"""
Retornamos o arquivo desejado
"""
task_file_download_fixture = get_raw_fixture(
"../fixtures/api/tasks/task_file_download_response.txt"
)
slave_ip = one_slave_json_fixture["slaves"][0]["hostname"]
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/download?path={sandbox_directory}/stdout",
body=task_file_download_fixture,
status=200,
match_querystring=True,
)
download_id = "7094c8e5b131416a87ccdc3e7d3131a6"
cache_data = {
"file_url": f"http://{slave_ip}:5051/files/download?path={sandbox_directory}/stdout",
"task_id": task_id,
"file_path": "/stdout",
}
with unittest.mock.patch.object(
cache, "get", return_value=cache_data
), unittest.mock.patch.object(
api.tasks,
"uuid4",
return_value=unittest.mock.Mock(hex=download_id),
) as uuid4_mock:
resp = client.get(
f"/tasks/downloads/{download_id}",
headers=self.auth_header,
)
self.assertEquals(200, resp.status_code)
self.assertEqual(
bytes(task_file_download_fixture, "utf-8"), resp.data
)
self.assertEqual(
resp.headers.get("Content-Disposition"),
f"attachment; filename={task_id}_stdout.log",
)
| import json
import unittest
from http import HTTPStatus
from responses import RequestsMock
from asgard.models.account import AccountDB
from asgard.models.user import UserDB
from hollowman import api, cache
from hollowman.app import application
from hollowman.conf import DEFAULT_MESOS_ADDRESS
from itests.util import (
ACCOUNT_DEV_DICT,
USER_WITH_MULTIPLE_ACCOUNTS_AUTH_KEY,
BaseTestCase,
)
from tests.utils import get_raw_fixture, with_json_fixture
class TasksEndpointTest(BaseTestCase):
async def setUp(self):
await super(TasksEndpointTest, self).setUp()
self.user = UserDB()
self.user.current_account = AccountDB(**ACCOUNT_DEV_DICT)
self.auth_header = {
"Authorization": f"Token {USER_WITH_MULTIPLE_ACCOUNTS_AUTH_KEY}"
}
async def tearDown(self):
await super(TasksEndpointTest, self).tearDown()
@unittest.skip("")
def test_tasks_return_404_for_not_found_task(self):
self.fail()
@unittest.skip("")
def test_tasks_return_404_for_task_in_another_namespace(self):
self.fail()
@with_json_fixture(
"../fixtures/api/tasks/task_file_browse_namespace_dev.json"
)
@with_json_fixture("../fixtures/api/tasks/one_task_json_namespace_dev.json")
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_browse_files(
self,
task_browse_file_fixture,
one_task_json_fixture,
one_slave_json_fixture,
slave_state_fixture,
):
task_id = "infra_mysql.b331f6c9-fb9e-11e7-ab4b-faf0633ea91f"
sandbox_directory = "/tmp/mesos/slaves/2084863b-12d1-4319-b515-992eab91a53d-S1/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mysql.b331f6c9-fb9e-11e7-ab4b-faf0633ea91f/runs/e14d7537-c1d0-4846-a076-25d623d6a70f"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id=2084863b-12d1-4319-b515-992eab91a53d-S1",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/browse?path={sandbox_directory}",
body=json.dumps(task_browse_file_fixture),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files", headers=self.auth_header
)
resp_data = json.loads(resp.data)
self.assertEquals(200, resp.status_code)
self.assertEqual(4, len(resp_data))
self.assertEqual(
sorted(
[
"/stdout",
"/stdout.logrotate.conf",
"/stderr",
"/stderr.logrotate.conf",
]
),
sorted([u["path"] for u in resp_data]),
)
@with_json_fixture(
"../fixtures/api/tasks/task_file_browse_namespace_dev.json"
)
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks__browse_files_task_is_completed(
self,
task_browse_file_fixture,
one_task_json_fixture,
one_slave_json_fixture,
slave_state_fixture,
):
"""
Se tentamos pegar os arquivos de uma task que não está mais rodando, temos que procurá-la
no array `completed_tasks`.
"""
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/browse?path={sandbox_directory}",
body=json.dumps(task_browse_file_fixture),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files", headers=self.auth_header
)
resp_data = json.loads(resp.data)
self.assertEquals(200, resp.status_code)
self.assertEqual(4, len(resp_data))
def test_tasks_returnn_404_if_task_does_not_exist(self):
"""
Se o mesos retornar que a task não existe, já retornamos 404 direto.
"""
task_id = "task_do_not_exist.2580925d-0129-11e8-9a03-6e85ded2ca1e"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps({"tasks": []}),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files", headers=self.auth_header
)
self.assertEquals(404, resp.status_code)
@with_json_fixture("../fixtures/api/tasks/task_file_read_response.json")
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_read_file_offset(
self,
task_file_read_fixture,
one_task_json_fixture,
one_slave_json_fixture,
slave_state_fixture,
):
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/read?path={sandbox_directory}/stderr&offset=0&length=42",
body=json.dumps(task_file_read_fixture),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files/read?path=/stderr&offset=0&length=42",
headers=self.auth_header,
)
resp_data = json.loads(resp.data)
self.assertEquals(200, resp.status_code)
self.assertEquals(0, resp_data["offset"])
self.assertFalse(resp_data["truncated"])
self.assertEqual(
"*** Starting uWSGI 2.0.14 (64bit) on [Wed Jan 31 19:58:13 2018] ***",
resp_data["data"],
)
@with_json_fixture("../fixtures/api/tasks/task_file_read_response.json")
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_read_file_offset_truncated_log_file(
self,
task_file_read_fixture,
one_task_json_fixture,
one_slave_json_fixture,
slave_state_fixture,
):
"""
Confirma que, quando o offset + length > ${HOLLOWMAN_TASK_FILEREAD_MAX_OFFSET}
Por padrão esse offset máximo é de 50MB (52428800)
"""
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/read?path={sandbox_directory}/stderr&offset=52428000&length=1024",
body=json.dumps(task_file_read_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/read?path={sandbox_directory}/stderr&offset=-1",
body=json.dumps({"data": "", "offset": 540}),
status=200,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files/read?path=/stderr&offset=52428000&length=1024",
headers=self.auth_header,
)
resp_data = json.loads(resp.data)
self.assertEquals(200, resp.status_code)
self.assertEquals(540, resp_data["offset"])
self.assertTrue(resp_data["truncated"])
self.assertEqual(
"*** Starting uWSGI 2.0.14 (64bit) on [Wed Jan 31 19:58:13 2018] ***",
resp_data["data"],
)
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_download_file(
self, one_task_json_fixture, one_slave_json_fixture, slave_state_fixture
):
"""
Certifica que a chamada ao endpoint de download popula o cache com os dados
Teremos nos cache dados como: url, task_id, path do arquivo a ser baixado, etc.
"""
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
slave_ip = one_slave_json_fixture["slaves"][0]["hostname"]
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
download_id = "7094c8e5b131416a87ccdc3e7d3131a6"
with unittest.mock.patch.object(
cache, "set"
) as cache_set_mock, unittest.mock.patch.object(
api.tasks,
"uuid4",
return_value=unittest.mock.Mock(hex=download_id),
) as uuid4_mock:
resp = client.get(
f"/tasks/{task_id}/files/download?path=/stdout&offset=0&length=42",
headers=self.auth_header,
)
self.assertEquals(HTTPStatus.OK, resp.status_code)
resp_data = json.loads(resp.data)
self.assertEqual(
"tasks/downloads/7094c8e5b131416a87ccdc3e7d3131a6",
resp_data["download_url"],
)
cache_set_mock.assert_called_with(
f"downloads/{download_id}",
{
"file_url": f"http://{slave_ip}:5051/files/download?path={sandbox_directory}/stdout",
"task_id": task_id,
"file_path": "/stdout",
},
timeout=30,
)
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_tasks_return_404_on_file_not_found(
self, one_task_json_fixture, one_slave_json_fixture, slave_state_fixture
):
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/tasks?task_id={self.user.current_account.namespace}_{task_id}",
body=json.dumps(one_task_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url=f"{DEFAULT_MESOS_ADDRESS}/slaves?slave_id={slave_id}",
body=json.dumps(one_slave_json_fixture),
status=200,
match_querystring=True,
)
rsps.add(
method="GET",
url="http://127.0.0.1:5051/state",
body=json.dumps(slave_state_fixture),
status=200,
)
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/read?path={sandbox_directory}/not_found_file&offset=0&length=42",
body="",
status=404,
match_querystring=True,
)
resp = client.get(
f"/tasks/{task_id}/files/read?path=/not_found_file&offset=0&length=42",
headers=self.auth_header,
)
self.assertEquals(404, resp.status_code)
def test_download_by_id_expired(self):
"""
Se o ID não existir mais no cache, retornamos 404
"""
download_id = "7094c8e5b131416a87ccdc3e7d3131a6"
with application.test_client() as client:
resp = client.get(f"/tasks/downloads/{download_id}")
self.assertEquals(404, resp.status_code)
@with_json_fixture(
"../fixtures/api/tasks/task_info_namespace_dev_task_id_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e.json"
)
@with_json_fixture(
"../fixtures/api/tasks/one_slave_json_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
@with_json_fixture(
"../fixtures/api/tasks/slave_state_id_2084863b-12d1-4319-b515-992eab91a53d-S1.json"
)
def test_download_by_id_task_running(
self, one_task_json_fixture, one_slave_json_fixture, slave_state_fixture
):
"""
Retornamos o arquivo desejado
"""
task_file_download_fixture = get_raw_fixture(
"../fixtures/api/tasks/task_file_download_response.txt"
)
slave_ip = one_slave_json_fixture["slaves"][0]["hostname"]
slave_id = "31fcae61-51a9-4ad1-8054-538503eb53a9-S5"
task_id = "infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e"
sandbox_directory = "/tmp/mesos/slaves/31fcae61-51a9-4ad1-8054-538503eb53a9-S5/frameworks/27b52920-3899-4b90-a1d6-bf83a87f3612-0000/executors/dev_infra_mongodb_mongodb1.2580925d-0129-11e8-9a03-6e85ded2ca1e/runs/1ec0d0bf-0f11-49ba-8a03-2cf954ad1cfe"
with application.test_client() as client:
with RequestsMock() as rsps:
rsps.add(
method="GET",
url=f"http://127.0.0.1:5051/files/download?path={sandbox_directory}/stdout",
body=task_file_download_fixture,
status=200,
match_querystring=True,
)
download_id = "7094c8e5b131416a87ccdc3e7d3131a6"
cache_data = {
"file_url": f"http://{slave_ip}:5051/files/download?path={sandbox_directory}/stdout",
"task_id": task_id,
"file_path": "/stdout",
}
with unittest.mock.patch.object(
cache, "get", return_value=cache_data
), unittest.mock.patch.object(
api.tasks,
"uuid4",
return_value=unittest.mock.Mock(hex=download_id),
) as uuid4_mock:
resp = client.get(
f"/tasks/downloads/{download_id}",
headers=self.auth_header,
)
self.assertEquals(200, resp.status_code)
self.assertEqual(
bytes(task_file_download_fixture, "utf-8"), resp.data
)
self.assertEqual(
resp.headers.get("Content-Disposition"),
f"attachment; filename={task_id}_stdout.log",
) | pt | 0.962168 | Se tentamos pegar os arquivos de uma task que não está mais rodando, temos que procurá-la no array `completed_tasks`. Se o mesos retornar que a task não existe, já retornamos 404 direto. Confirma que, quando o offset + length > ${HOLLOWMAN_TASK_FILEREAD_MAX_OFFSET} Por padrão esse offset máximo é de 50MB (52428800) Certifica que a chamada ao endpoint de download popula o cache com os dados Teremos nos cache dados como: url, task_id, path do arquivo a ser baixado, etc. Se o ID não existir mais no cache, retornamos 404 Retornamos o arquivo desejado | 2.02423 | 2 |
setup.py | opencollective/CVTron | 94 | 6621875 | <gh_stars>10-100
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
install_requires = [
'tensorflow',
'tensorlayer',
'tqdm',
'requests'
]
setup(
name='cvtron',
version='0.0.2',
description='Out-of-the-Box Computer Vision Library',
url='https://github.com/cv-group',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='Computer Vision',
packages=find_packages(exclude=['docs', 'tests*']),
test_suite='nose.collector',
install_requires=install_requires,
extras_require={
}
)
| from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
install_requires = [
'tensorflow',
'tensorlayer',
'tqdm',
'requests'
]
setup(
name='cvtron',
version='0.0.2',
description='Out-of-the-Box Computer Vision Library',
url='https://github.com/cv-group',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='Computer Vision',
packages=find_packages(exclude=['docs', 'tests*']),
test_suite='nose.collector',
install_requires=install_requires,
extras_require={
}
) | none | 1 | 1.556221 | 2 | |
setup.py | edcote/edaplayground-cli | 1 | 6621876 | <gh_stars>1-10
from setuptools import setup
setup(
name='edaplayground',
version='1.0',
description='Command line interface to edaplayground.com',
author='<NAME>',
author_email='<EMAIL>',
packages=['edaplayground'],
install_requires=['selenium', 'urllib3'],
)
| from setuptools import setup
setup(
name='edaplayground',
version='1.0',
description='Command line interface to edaplayground.com',
author='<NAME>',
author_email='<EMAIL>',
packages=['edaplayground'],
install_requires=['selenium', 'urllib3'],
) | none | 1 | 1.176912 | 1 | |
notebooks/uci_datasets_download.py | mohamad-amin/falkon | 130 | 6621877 | import argparse
import os
import tempfile
import h5py
import numpy as np
import pandas as pd
import requests
def download_protein(out_dir):
protein_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv"
with tempfile.TemporaryDirectory() as tmp_dir:
protein_file = os.path.join(tmp_dir, 'protein.csv')
r = requests.get(protein_url, allow_redirects=True)
with open(protein_file, 'wb') as fh:
fh.write(r.content)
df = pd.read_csv(protein_file, index_col=None)
print(df.head())
df = df.astype(float)
Y = df["RMSD"].values.reshape(-1, 1)
X = df[["F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9"]].values
assert X.shape == (45730, 9)
assert Y.shape == (45730, 1)
protein_hdf_file = os.path.join(out_dir, 'protein.hdf5')
with h5py.File(protein_hdf_file, 'w') as hf:
hf.create_dataset("X", data=X, dtype=np.float64, compression='gzip')
hf.create_dataset("Y", data=Y, dtype=np.float64, compression='gzip')
def download_boston(out_dir):
boston_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data"
with tempfile.TemporaryDirectory() as tmp_dir:
boston_file = os.path.join(tmp_dir, 'boston.tsv')
r = requests.get(boston_url, allow_redirects=True)
with open(boston_file, 'wb') as fh:
fh.write(r.content)
df = pd.read_csv(boston_file, index_col=None, delim_whitespace=True, header=None)
print(df.head())
df = df.astype(float)
Y = df[0].values.reshape(-1, 1)
X = df.drop(0, axis=1).values
assert X.shape == (506, 13)
assert Y.shape == (506, 1)
boston_hdf_file = os.path.join(out_dir, 'boston.hdf5')
with h5py.File(boston_hdf_file, 'w') as hf:
hf.create_dataset("X", data=X, dtype=np.float64, compression='gzip')
hf.create_dataset("Y", data=Y, dtype=np.float64, compression='gzip')
def download_energy(out_dir):
energy_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx"
with tempfile.TemporaryDirectory() as tmp_dir:
energy_file = os.path.join(tmp_dir, 'energy.xlsx')
r = requests.get(energy_url, allow_redirects=True)
with open(energy_file, 'wb') as fh:
fh.write(r.content)
df = pd.read_excel(energy_file, engine='openpyxl', convert_float=False)
df = df.drop(["Unnamed: 10", "Unnamed: 11"], axis=1)
df = df.dropna(axis=0, how='all')
df.head()
df = df.astype(float)
Y = df["Y1"].values.reshape(-1, 1) # heating load
X = df.drop(["Y1", "Y2"], axis=1).values
assert X.shape == (768, 8)
assert Y.shape == (768, 1)
energy_hdf_file = os.path.join(out_dir, 'energy.hdf5')
with h5py.File(energy_hdf_file, 'w') as hf:
hf.create_dataset("X", data=X, dtype=np.float64, compression='gzip')
hf.create_dataset("Y", data=Y, dtype=np.float64, compression='gzip')
def download_kin40k(out_dir):
"""
Data is impossible to find from reputable sources. Delve repository does not have 40k points (only 8192).
Github repository with full data: https://github.com/trungngv/fgp
"""
url_test_y = "https://github.com/trungngv/fgp/raw/master/data/kin40k/kin40k_test_labels.asc"
url_train_y = "https://github.com/trungngv/fgp/raw/master/data/kin40k/kin40k_train_labels.asc"
url_test_x = "https://github.com/trungngv/fgp/raw/master/data/kin40k/kin40k_test_data.asc"
url_train_x = "https://github.com/trungngv/fgp/raw/master/data/kin40k/kin40k_train_data.asc"
with tempfile.TemporaryDirectory() as tmp_dir:
f_test_y = os.path.join(tmp_dir, "kin40k_test_labels.asc")
f_train_y = os.path.join(tmp_dir, "kin40k_train_labels.asc")
f_test_x = os.path.join(tmp_dir, "kin40k_test_data.asc")
f_train_x = os.path.join(tmp_dir, "kin40k_train_data.asc")
for (url, file) in [(url_test_y, f_test_y), (url_train_y, f_train_y),
(url_test_x, f_test_x), (url_train_x, f_train_x)]:
r = requests.get(url, allow_redirects=True)
with open(file, 'wb') as fh:
fh.write(r.content)
test_y = pd.read_fwf(f_test_y, header=None, index_col=None) \
.astype(float).values.reshape(-1, 1)
train_y = pd.read_fwf(f_train_y, header=None, index_col=None) \
.astype(float).values.reshape(-1, 1)
test_x = pd.read_fwf(f_test_x, header=None, index_col=None) \
.astype(float).values
train_x = pd.read_fwf(f_train_x, header=None, index_col=None) \
.astype(float).values
assert test_y.shape == (30_000, 1)
assert train_y.shape == (10_000, 1)
assert test_x.shape == (30_000, 8)
assert train_x.shape == (10_000, 8)
kin40k_hdf_file = os.path.join(out_dir, 'kin40k.hdf5')
with h5py.File(kin40k_hdf_file, 'w') as hf:
hf.create_dataset("Y_test", data=test_y, dtype=np.float64, compression='gzip')
hf.create_dataset("Y_train", data=train_y, dtype=np.float64, compression='gzip')
hf.create_dataset("X_test", data=test_x, dtype=np.float64, compression='gzip')
hf.create_dataset("X_train", data=train_x, dtype=np.float64, compression='gzip')
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Dataset downloader")
p.add_argument('-d', '--out-dir', type=str, required=True,
help="Output directory for the downloaded and processed datasets.")
args = p.parse_args()
download_fns = [download_energy, download_protein, download_boston, download_kin40k]
print(f"Will download datasets: {download_fns} to directory {args.out_dir}...")
for fn in download_fns:
fn(args.out_dir)
| import argparse
import os
import tempfile
import h5py
import numpy as np
import pandas as pd
import requests
def download_protein(out_dir):
protein_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv"
with tempfile.TemporaryDirectory() as tmp_dir:
protein_file = os.path.join(tmp_dir, 'protein.csv')
r = requests.get(protein_url, allow_redirects=True)
with open(protein_file, 'wb') as fh:
fh.write(r.content)
df = pd.read_csv(protein_file, index_col=None)
print(df.head())
df = df.astype(float)
Y = df["RMSD"].values.reshape(-1, 1)
X = df[["F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9"]].values
assert X.shape == (45730, 9)
assert Y.shape == (45730, 1)
protein_hdf_file = os.path.join(out_dir, 'protein.hdf5')
with h5py.File(protein_hdf_file, 'w') as hf:
hf.create_dataset("X", data=X, dtype=np.float64, compression='gzip')
hf.create_dataset("Y", data=Y, dtype=np.float64, compression='gzip')
def download_boston(out_dir):
boston_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data"
with tempfile.TemporaryDirectory() as tmp_dir:
boston_file = os.path.join(tmp_dir, 'boston.tsv')
r = requests.get(boston_url, allow_redirects=True)
with open(boston_file, 'wb') as fh:
fh.write(r.content)
df = pd.read_csv(boston_file, index_col=None, delim_whitespace=True, header=None)
print(df.head())
df = df.astype(float)
Y = df[0].values.reshape(-1, 1)
X = df.drop(0, axis=1).values
assert X.shape == (506, 13)
assert Y.shape == (506, 1)
boston_hdf_file = os.path.join(out_dir, 'boston.hdf5')
with h5py.File(boston_hdf_file, 'w') as hf:
hf.create_dataset("X", data=X, dtype=np.float64, compression='gzip')
hf.create_dataset("Y", data=Y, dtype=np.float64, compression='gzip')
def download_energy(out_dir):
energy_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx"
with tempfile.TemporaryDirectory() as tmp_dir:
energy_file = os.path.join(tmp_dir, 'energy.xlsx')
r = requests.get(energy_url, allow_redirects=True)
with open(energy_file, 'wb') as fh:
fh.write(r.content)
df = pd.read_excel(energy_file, engine='openpyxl', convert_float=False)
df = df.drop(["Unnamed: 10", "Unnamed: 11"], axis=1)
df = df.dropna(axis=0, how='all')
df.head()
df = df.astype(float)
Y = df["Y1"].values.reshape(-1, 1) # heating load
X = df.drop(["Y1", "Y2"], axis=1).values
assert X.shape == (768, 8)
assert Y.shape == (768, 1)
energy_hdf_file = os.path.join(out_dir, 'energy.hdf5')
with h5py.File(energy_hdf_file, 'w') as hf:
hf.create_dataset("X", data=X, dtype=np.float64, compression='gzip')
hf.create_dataset("Y", data=Y, dtype=np.float64, compression='gzip')
def download_kin40k(out_dir):
"""
Data is impossible to find from reputable sources. Delve repository does not have 40k points (only 8192).
Github repository with full data: https://github.com/trungngv/fgp
"""
url_test_y = "https://github.com/trungngv/fgp/raw/master/data/kin40k/kin40k_test_labels.asc"
url_train_y = "https://github.com/trungngv/fgp/raw/master/data/kin40k/kin40k_train_labels.asc"
url_test_x = "https://github.com/trungngv/fgp/raw/master/data/kin40k/kin40k_test_data.asc"
url_train_x = "https://github.com/trungngv/fgp/raw/master/data/kin40k/kin40k_train_data.asc"
with tempfile.TemporaryDirectory() as tmp_dir:
f_test_y = os.path.join(tmp_dir, "kin40k_test_labels.asc")
f_train_y = os.path.join(tmp_dir, "kin40k_train_labels.asc")
f_test_x = os.path.join(tmp_dir, "kin40k_test_data.asc")
f_train_x = os.path.join(tmp_dir, "kin40k_train_data.asc")
for (url, file) in [(url_test_y, f_test_y), (url_train_y, f_train_y),
(url_test_x, f_test_x), (url_train_x, f_train_x)]:
r = requests.get(url, allow_redirects=True)
with open(file, 'wb') as fh:
fh.write(r.content)
test_y = pd.read_fwf(f_test_y, header=None, index_col=None) \
.astype(float).values.reshape(-1, 1)
train_y = pd.read_fwf(f_train_y, header=None, index_col=None) \
.astype(float).values.reshape(-1, 1)
test_x = pd.read_fwf(f_test_x, header=None, index_col=None) \
.astype(float).values
train_x = pd.read_fwf(f_train_x, header=None, index_col=None) \
.astype(float).values
assert test_y.shape == (30_000, 1)
assert train_y.shape == (10_000, 1)
assert test_x.shape == (30_000, 8)
assert train_x.shape == (10_000, 8)
kin40k_hdf_file = os.path.join(out_dir, 'kin40k.hdf5')
with h5py.File(kin40k_hdf_file, 'w') as hf:
hf.create_dataset("Y_test", data=test_y, dtype=np.float64, compression='gzip')
hf.create_dataset("Y_train", data=train_y, dtype=np.float64, compression='gzip')
hf.create_dataset("X_test", data=test_x, dtype=np.float64, compression='gzip')
hf.create_dataset("X_train", data=train_x, dtype=np.float64, compression='gzip')
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Dataset downloader")
p.add_argument('-d', '--out-dir', type=str, required=True,
help="Output directory for the downloaded and processed datasets.")
args = p.parse_args()
download_fns = [download_energy, download_protein, download_boston, download_kin40k]
print(f"Will download datasets: {download_fns} to directory {args.out_dir}...")
for fn in download_fns:
fn(args.out_dir)
| en | 0.869578 | # heating load Data is impossible to find from reputable sources. Delve repository does not have 40k points (only 8192). Github repository with full data: https://github.com/trungngv/fgp | 2.795775 | 3 |
src/fbsrankings/query/game_count_by_season.py | mikee385/fbsrankings | 0 | 6621878 | from uuid import UUID
from dataclasses import dataclass
from fbsrankings.common import Query
@dataclass(frozen=True)
class GameCountBySeasonResult:
season_id: UUID
count: int
@dataclass(frozen=True)
class GameCountBySeasonQuery(Query[GameCountBySeasonResult]):
season_id: UUID
| from uuid import UUID
from dataclasses import dataclass
from fbsrankings.common import Query
@dataclass(frozen=True)
class GameCountBySeasonResult:
season_id: UUID
count: int
@dataclass(frozen=True)
class GameCountBySeasonQuery(Query[GameCountBySeasonResult]):
season_id: UUID
| none | 1 | 2.572942 | 3 | |
dataframetl.py | BuyiCheng/QueryTranslator | 0 | 6621879 | import pandas as pd
import re
def getTableDict(s_list, table, join):
# table_dict = {}
tables = [table]
for j in join:
tables.append(j.split('join ')[1].split(' ')[0])
for t in tables:
if len(join) == 0:
s_list.append("df = pd.read_csv('{0}.csv')".format(t))
else:
s_list.append("{0} = pd.read_csv('{0}.csv')".format(t))
for t in tables:
columns = {}
if len(join) > 0:
s_list.append("columns_map = {}")
s_list.append("for c in {}.columns:".format(t))
s_list.append("\tcolumns_map[c]='{}_'+c".format(t) )
s_list.append("{}.rename(columns=columns_map, inplace=True)".format(t, str(columns)))
return s_list
def parse_join(s_list, table, joins):
for join in joins:
join_table = join.split('join ')[1].split(' ')[0]
right,left = join.split(' on ')[1].split(' = ')
if left.split('.')[0] == join_table:
left, right = right, left
join_type = 'inner'
if join.startswith('left'):
join_type = 'left'
elif join.startswith('right'):
join_type = 'right'
s_list.append("df = {}.merge({},left_on='{}',right_on='{}',how='{}')".format(table,join_table,left.replace('.','_'),right.replace('.','_'), join_type))
return s_list
def handle_value_type(value):
if value[0] in ['"',"'"] and value[-1] in ['"',"'"]:
return value[1:-1]
else:
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
try:
import unicodedata
return unicodedata.numeric(value)
except (TypeError, ValueError):
pass
return value
def parse_between(item, isNot, isHaving):
key, value = item.split(' between ')
key = key.strip()
if isHaving:
key = key.replace('.','_')
v1,v2 = [handle_value_type(i.strip()) for i in value.split(' and ')]
n = '~' if isNot else ''
return key, "{3}(df['{0}'] >= {1})&{3}(df['{0}'] <= {2})".format(key, v1, v2, n)
def parse_in(item, isNot, isHaving):
if item.find(' not ') != -1:
key, value = item.split(' not in ')
isNot = not isNot
else:
key, value = item.split(' in ')
key = key.strip()
if isHaving:
key = key.replace('.','_')
value = [handle_value_type(i.strip()) for i in value[1:-1].split(',')]
n = '~' if isNot else ''
return key, "{}(df['{}'].isin({}))".format(n, key, str(value))
def parse_condition(item, isHaving):
item = item.replace('*between*', 'between').replace('*and*','and')
if item.startswith('not '):
isNot = True
item = item[len('not '):]
else:
isNot = False
n = '~' if isNot else ''
if item.find(' between ') != -1:
key, result = parse_between(item, isNot, isHaving)
elif item.find(' in ') != -1:
key, result = parse_in(item, isNot, isHaving)
else:
item = item.replace(' = ',' == ')
comparison_operators = ['==','!=','<','<=','>','>=']
compare = re.findall('|'.join(comparison_operators), item)[0]
key,value = [i.strip() for i in item.split(compare)]
if isHaving:
key = key.replace('.','_')
result = "{}(df['{}'] {} {})".format(n, key, compare, value)
print("result:",result)
return key, result
def parse_where(s_list, where, isHaving=False):
if where == '':
return [],s_list
# repalce between and with *between*
while where.find(' between ') != -1:
between_index = where.find(' between ') + 1
and_index = between_index + where[between_index:].find(' and ') + 1
where = where[:between_index]+'*between*'+where[between_index+len('between'):and_index]+ '*and*'+where[and_index+len('and'):]
where_list = re.split(' and | or ', where)
logic_list = re.findall(' and | or ', where)
logic_map = {'and':'&','or':'|'}
keys, result = [], ''
if len(where_list) >=1:
key, result = parse_condition(where_list[0],isHaving)
keys.append(key)
for i, l in enumerate(logic_list):
key, r = parse_condition(where_list[i+1],isHaving)
result = result + logic_map[l.strip()] + r
s_list.append('df = df['+result+']')
return keys,s_list
def parse_group(s_list, group, projection, order, having):
if len(group) == 0:
return s_list
group = [g.replace('.', '_') for g in group]
s_list.append("df = df.groupby({}, sort=False)".format(str(group)))
# df = df.groupby(group, sort=False)
agg_dict = {}
attributes = projection[:]
having_attrs, _ = parse_where([],having)
for h in having_attrs:
if h.find('(') != -1:
# attributes.append(h.split(':')[1].split(' ')[0])
attributes.append(h)
for o in order:
if o.find('(') != -1:
attributes.append(o.split(' ')[0])
for a in set(attributes):
a = a.replace('.', '_')
if a not in s_list and a.find('(') != -1:
func = a.split('(')[0]
if func == 'avg':
func = 'mean'
attr = a.split('(')[1][:-1]
agg_dict.setdefault(attr, []).append((a, func))
if len(agg_dict) == 0:
# df = df.count().reset_index()[group]
s_list.append("df = df.count().reset_index()[{}]".format(group))
else:
s_list.append("df = df.agg({})".format(str(agg_dict)))
s_list.append("df.columns = df.columns.droplevel(0)")
# df = df.agg(agg_dict)
# df.columns = df.columns.droplevel(0)
return s_list
def parse_order(s_list, order):
if len(order) == 0:
return s_list
columns, ascendings = [], []
for o in order:
columns.append(o.split(' ')[0].replace('.', '_'))
if len(o.split(' ')) == 1:
ascendings.append(True)
else:
if o.split(' ')[1].lower() == 'asc':
ascendings.append(True)
else:
ascendings.append(False)
s_list.append("df = df.sort_values({},ascending={})".format(str(columns), str(ascendings)))
# df = df.sort_values(columns, ascending=ascendings)
return s_list
def parse_limit_offset(s_list, offset, limit):
if limit == '':
limit = 0
else:
limit = int(limit)
if offset == '':
offset = 0
else:
offset = int(offset)
# if df.size < offset: # review.drop(review.index)
# s = "df = df.drop(df.index)"
# df = df.drop(df.index)
# else:
if limit == 0:
return s_list
# df = df.iloc[offset:offset + limit]
s = "df = df.iloc[{}:{}]".format(offset, offset + limit)
s_list.append(s)
return s_list
def parse_projection(s_list, attributes, group):
if len(attributes) == 0 and len(group) == 0:
pass
else:
# df = df.reset_index()
s_list.append("df = df.reset_index()")
if len(attributes) == 0:
group = [g.replace('.', '_') for g in group]
# df = df[group]
s_list.append("df = df[{}]".format(group))
else:
attributes = [a.replace('.', '_') for a in attributes]
# df = df[attributes]
s_list.append("df = df[{}]".format(attributes))
return s_list
def getResult(sql_dict):
if len(sql_dict['projection']) == 1 and sql_dict['projection'][0] == '*':
sql_dict['projection'] = []
s_list = []
s_list = getTableDict(s_list, sql_dict['table'], sql_dict['join'])
s_list = parse_join(s_list, sql_dict['table'], sql_dict['join'])
_, s_list = parse_where(s_list, sql_dict['where'])
s_list = parse_group(s_list, sql_dict['group'], sql_dict['projection'], sql_dict['order'],
sql_dict['having'])
_, s_list= parse_where(s_list, sql_dict['having'], True)
s_list = parse_order(s_list, sql_dict['order'])
s_list = parse_limit_offset(s_list, sql_dict['offset'], sql_dict['limit'])
print(s_list)
s_list = parse_projection(s_list, sql_dict['projection'], sql_dict['group'])
return s_list
def translate(sql_dict):
s_list = getResult(sql_dict)
return s_list
| import pandas as pd
import re
def getTableDict(s_list, table, join):
# table_dict = {}
tables = [table]
for j in join:
tables.append(j.split('join ')[1].split(' ')[0])
for t in tables:
if len(join) == 0:
s_list.append("df = pd.read_csv('{0}.csv')".format(t))
else:
s_list.append("{0} = pd.read_csv('{0}.csv')".format(t))
for t in tables:
columns = {}
if len(join) > 0:
s_list.append("columns_map = {}")
s_list.append("for c in {}.columns:".format(t))
s_list.append("\tcolumns_map[c]='{}_'+c".format(t) )
s_list.append("{}.rename(columns=columns_map, inplace=True)".format(t, str(columns)))
return s_list
def parse_join(s_list, table, joins):
for join in joins:
join_table = join.split('join ')[1].split(' ')[0]
right,left = join.split(' on ')[1].split(' = ')
if left.split('.')[0] == join_table:
left, right = right, left
join_type = 'inner'
if join.startswith('left'):
join_type = 'left'
elif join.startswith('right'):
join_type = 'right'
s_list.append("df = {}.merge({},left_on='{}',right_on='{}',how='{}')".format(table,join_table,left.replace('.','_'),right.replace('.','_'), join_type))
return s_list
def handle_value_type(value):
if value[0] in ['"',"'"] and value[-1] in ['"',"'"]:
return value[1:-1]
else:
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
try:
import unicodedata
return unicodedata.numeric(value)
except (TypeError, ValueError):
pass
return value
def parse_between(item, isNot, isHaving):
key, value = item.split(' between ')
key = key.strip()
if isHaving:
key = key.replace('.','_')
v1,v2 = [handle_value_type(i.strip()) for i in value.split(' and ')]
n = '~' if isNot else ''
return key, "{3}(df['{0}'] >= {1})&{3}(df['{0}'] <= {2})".format(key, v1, v2, n)
def parse_in(item, isNot, isHaving):
if item.find(' not ') != -1:
key, value = item.split(' not in ')
isNot = not isNot
else:
key, value = item.split(' in ')
key = key.strip()
if isHaving:
key = key.replace('.','_')
value = [handle_value_type(i.strip()) for i in value[1:-1].split(',')]
n = '~' if isNot else ''
return key, "{}(df['{}'].isin({}))".format(n, key, str(value))
def parse_condition(item, isHaving):
item = item.replace('*between*', 'between').replace('*and*','and')
if item.startswith('not '):
isNot = True
item = item[len('not '):]
else:
isNot = False
n = '~' if isNot else ''
if item.find(' between ') != -1:
key, result = parse_between(item, isNot, isHaving)
elif item.find(' in ') != -1:
key, result = parse_in(item, isNot, isHaving)
else:
item = item.replace(' = ',' == ')
comparison_operators = ['==','!=','<','<=','>','>=']
compare = re.findall('|'.join(comparison_operators), item)[0]
key,value = [i.strip() for i in item.split(compare)]
if isHaving:
key = key.replace('.','_')
result = "{}(df['{}'] {} {})".format(n, key, compare, value)
print("result:",result)
return key, result
def parse_where(s_list, where, isHaving=False):
if where == '':
return [],s_list
# repalce between and with *between*
while where.find(' between ') != -1:
between_index = where.find(' between ') + 1
and_index = between_index + where[between_index:].find(' and ') + 1
where = where[:between_index]+'*between*'+where[between_index+len('between'):and_index]+ '*and*'+where[and_index+len('and'):]
where_list = re.split(' and | or ', where)
logic_list = re.findall(' and | or ', where)
logic_map = {'and':'&','or':'|'}
keys, result = [], ''
if len(where_list) >=1:
key, result = parse_condition(where_list[0],isHaving)
keys.append(key)
for i, l in enumerate(logic_list):
key, r = parse_condition(where_list[i+1],isHaving)
result = result + logic_map[l.strip()] + r
s_list.append('df = df['+result+']')
return keys,s_list
def parse_group(s_list, group, projection, order, having):
if len(group) == 0:
return s_list
group = [g.replace('.', '_') for g in group]
s_list.append("df = df.groupby({}, sort=False)".format(str(group)))
# df = df.groupby(group, sort=False)
agg_dict = {}
attributes = projection[:]
having_attrs, _ = parse_where([],having)
for h in having_attrs:
if h.find('(') != -1:
# attributes.append(h.split(':')[1].split(' ')[0])
attributes.append(h)
for o in order:
if o.find('(') != -1:
attributes.append(o.split(' ')[0])
for a in set(attributes):
a = a.replace('.', '_')
if a not in s_list and a.find('(') != -1:
func = a.split('(')[0]
if func == 'avg':
func = 'mean'
attr = a.split('(')[1][:-1]
agg_dict.setdefault(attr, []).append((a, func))
if len(agg_dict) == 0:
# df = df.count().reset_index()[group]
s_list.append("df = df.count().reset_index()[{}]".format(group))
else:
s_list.append("df = df.agg({})".format(str(agg_dict)))
s_list.append("df.columns = df.columns.droplevel(0)")
# df = df.agg(agg_dict)
# df.columns = df.columns.droplevel(0)
return s_list
def parse_order(s_list, order):
if len(order) == 0:
return s_list
columns, ascendings = [], []
for o in order:
columns.append(o.split(' ')[0].replace('.', '_'))
if len(o.split(' ')) == 1:
ascendings.append(True)
else:
if o.split(' ')[1].lower() == 'asc':
ascendings.append(True)
else:
ascendings.append(False)
s_list.append("df = df.sort_values({},ascending={})".format(str(columns), str(ascendings)))
# df = df.sort_values(columns, ascending=ascendings)
return s_list
def parse_limit_offset(s_list, offset, limit):
if limit == '':
limit = 0
else:
limit = int(limit)
if offset == '':
offset = 0
else:
offset = int(offset)
# if df.size < offset: # review.drop(review.index)
# s = "df = df.drop(df.index)"
# df = df.drop(df.index)
# else:
if limit == 0:
return s_list
# df = df.iloc[offset:offset + limit]
s = "df = df.iloc[{}:{}]".format(offset, offset + limit)
s_list.append(s)
return s_list
def parse_projection(s_list, attributes, group):
if len(attributes) == 0 and len(group) == 0:
pass
else:
# df = df.reset_index()
s_list.append("df = df.reset_index()")
if len(attributes) == 0:
group = [g.replace('.', '_') for g in group]
# df = df[group]
s_list.append("df = df[{}]".format(group))
else:
attributes = [a.replace('.', '_') for a in attributes]
# df = df[attributes]
s_list.append("df = df[{}]".format(attributes))
return s_list
def getResult(sql_dict):
if len(sql_dict['projection']) == 1 and sql_dict['projection'][0] == '*':
sql_dict['projection'] = []
s_list = []
s_list = getTableDict(s_list, sql_dict['table'], sql_dict['join'])
s_list = parse_join(s_list, sql_dict['table'], sql_dict['join'])
_, s_list = parse_where(s_list, sql_dict['where'])
s_list = parse_group(s_list, sql_dict['group'], sql_dict['projection'], sql_dict['order'],
sql_dict['having'])
_, s_list= parse_where(s_list, sql_dict['having'], True)
s_list = parse_order(s_list, sql_dict['order'])
s_list = parse_limit_offset(s_list, sql_dict['offset'], sql_dict['limit'])
print(s_list)
s_list = parse_projection(s_list, sql_dict['projection'], sql_dict['group'])
return s_list
def translate(sql_dict):
s_list = getResult(sql_dict)
return s_list
| en | 0.285187 | # table_dict = {} # repalce between and with *between* # df = df.groupby(group, sort=False) # attributes.append(h.split(':')[1].split(' ')[0]) # df = df.count().reset_index()[group] # df = df.agg(agg_dict) # df.columns = df.columns.droplevel(0) # df = df.sort_values(columns, ascending=ascendings) # if df.size < offset: # review.drop(review.index) # s = "df = df.drop(df.index)" # df = df.drop(df.index) # else: # df = df.iloc[offset:offset + limit] # df = df.reset_index() # df = df[group] # df = df[attributes] | 3.131392 | 3 |
test.py | sillygod/django-as-pure-api-server | 1 | 6621880 | <gh_stars>1-10
# from pygelf import GelfTcpHandler, GelfUdpHandler, GelfTlsHandler, GelfHttpHandler
# import logging
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger()
# logger.addHandler(GelfTcpHandler(host='127.0.0.1', port=9401))
# logger.addHandler(GelfTcpHandler(host='localhost', port=12201))
# logger.info('hello gelf wow')
import json
import datetime
obj = {
"a": 1,
"b": 2,
}
def smarter_repr(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return repr(obj)
string = json.dumps(obj, separators=",:", default=smarter_repr)
print(string)
# json part of source code ..
# you see it will use the default function if get unexpected type ..
# def _iterencode(o, _current_indent_level):
# if isinstance(o, str):
# yield _encoder(o)
# elif o is None:
# yield 'null'
# elif o is True:
# yield 'true'
# elif o is False:
# yield 'false'
# elif isinstance(o, int):
# # see comment for int/float in _make_iterencode
# yield _intstr(o)
# elif isinstance(o, float):
# # see comment for int/float in _make_iterencode
# yield _floatstr(o)
# elif isinstance(o, (list, tuple)):
# yield from _iterencode_list(o, _current_indent_level)
# elif isinstance(o, dict):
# yield from _iterencode_dict(o, _current_indent_level)
# else:
# if markers is not None:
# markerid = id(o)
# if markerid in markers:
# raise ValueError("Circular reference detected")
# markers[markerid] = o
# o = _default(o)
# yield from _iterencode(o, _current_indent_level)
# if markers is not None:
# del markers[markerid]
# return _iterencode
| # from pygelf import GelfTcpHandler, GelfUdpHandler, GelfTlsHandler, GelfHttpHandler
# import logging
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger()
# logger.addHandler(GelfTcpHandler(host='127.0.0.1', port=9401))
# logger.addHandler(GelfTcpHandler(host='localhost', port=12201))
# logger.info('hello gelf wow')
import json
import datetime
obj = {
"a": 1,
"b": 2,
}
def smarter_repr(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return repr(obj)
string = json.dumps(obj, separators=",:", default=smarter_repr)
print(string)
# json part of source code ..
# you see it will use the default function if get unexpected type ..
# def _iterencode(o, _current_indent_level):
# if isinstance(o, str):
# yield _encoder(o)
# elif o is None:
# yield 'null'
# elif o is True:
# yield 'true'
# elif o is False:
# yield 'false'
# elif isinstance(o, int):
# # see comment for int/float in _make_iterencode
# yield _intstr(o)
# elif isinstance(o, float):
# # see comment for int/float in _make_iterencode
# yield _floatstr(o)
# elif isinstance(o, (list, tuple)):
# yield from _iterencode_list(o, _current_indent_level)
# elif isinstance(o, dict):
# yield from _iterencode_dict(o, _current_indent_level)
# else:
# if markers is not None:
# markerid = id(o)
# if markerid in markers:
# raise ValueError("Circular reference detected")
# markers[markerid] = o
# o = _default(o)
# yield from _iterencode(o, _current_indent_level)
# if markers is not None:
# del markers[markerid]
# return _iterencode | en | 0.35704 | # from pygelf import GelfTcpHandler, GelfUdpHandler, GelfTlsHandler, GelfHttpHandler # import logging # logging.basicConfig(level=logging.INFO) # logger = logging.getLogger() # logger.addHandler(GelfTcpHandler(host='127.0.0.1', port=9401)) # logger.addHandler(GelfTcpHandler(host='localhost', port=12201)) # logger.info('hello gelf wow') # json part of source code .. # you see it will use the default function if get unexpected type .. # def _iterencode(o, _current_indent_level): # if isinstance(o, str): # yield _encoder(o) # elif o is None: # yield 'null' # elif o is True: # yield 'true' # elif o is False: # yield 'false' # elif isinstance(o, int): # # see comment for int/float in _make_iterencode # yield _intstr(o) # elif isinstance(o, float): # # see comment for int/float in _make_iterencode # yield _floatstr(o) # elif isinstance(o, (list, tuple)): # yield from _iterencode_list(o, _current_indent_level) # elif isinstance(o, dict): # yield from _iterencode_dict(o, _current_indent_level) # else: # if markers is not None: # markerid = id(o) # if markerid in markers: # raise ValueError("Circular reference detected") # markers[markerid] = o # o = _default(o) # yield from _iterencode(o, _current_indent_level) # if markers is not None: # del markers[markerid] # return _iterencode | 2.599282 | 3 |
setup.py | dugalh/simple_ortho | 3 | 6621881 | <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
# To install local development version use:
# pip install -e .
setup(
name='simple-ortho',
version='0.1.0',
description='Orthorectification with known camera model and DEM',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dugalh/simple_ortho/blob/develop/setup.py',
license='Apache-2.0',
packages=find_packages(),
python_requires='>=3.8',
install_requires=[
'rasterio>=1.2',
# 'opencv>=4.5', # pip does not see the conda installed opencv, so commented out for now
'pandas>=1.2',
'pyyaml>=5.4'
'shapely>=1.7'
],
entry_points = {'console_scripts': ['simple-ortho=simple_ortho.command_line:main_entry']},
scripts=['scripts/ortho_im.py', 'scripts/batch_recompress.bat']
)
| <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
# To install local development version use:
# pip install -e .
setup(
name='simple-ortho',
version='0.1.0',
description='Orthorectification with known camera model and DEM',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dugalh/simple_ortho/blob/develop/setup.py',
license='Apache-2.0',
packages=find_packages(),
python_requires='>=3.8',
install_requires=[
'rasterio>=1.2',
# 'opencv>=4.5', # pip does not see the conda installed opencv, so commented out for now
'pandas>=1.2',
'pyyaml>=5.4'
'shapely>=1.7'
],
entry_points = {'console_scripts': ['simple-ortho=simple_ortho.command_line:main_entry']},
scripts=['scripts/ortho_im.py', 'scripts/batch_recompress.bat']
)
| en | 0.801126 | # To install local development version use: # pip install -e . # 'opencv>=4.5', # pip does not see the conda installed opencv, so commented out for now | 1.503371 | 2 |
Backend/endpoints/misc/elevatorInfo.py | LukasSchmid97/elevatorbot | 0 | 6621882 | <reponame>LukasSchmid97/elevatorbot<gh_stars>0
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
from Backend.crud import elevator_servers
from Backend.dependencies import get_db_session
from Shared.networkingSchemas import ElevatorGuildModel, ElevatorGuildsModel, EmptyResponseModel
router = APIRouter(
prefix="/elevator/discord_servers",
tags=["elevator"],
)
@router.get("/get/all", response_model=ElevatorGuildsModel) # has test
async def get_discord_servers(db: AsyncSession = Depends(get_db_session)):
"""Get all discord servers Elevator is currently in"""
results = await elevator_servers.get(db)
return ElevatorGuildsModel(guilds=[ElevatorGuildModel.from_orm(result) for result in results])
@router.post("/add/{guild_id}", response_model=EmptyResponseModel) # has test
async def add_discord_server(guild_id: int, db: AsyncSession = Depends(get_db_session)):
"""Add a discord server to the ones Elevator is currently in"""
await elevator_servers.insert(db, guild_id)
return EmptyResponseModel()
@router.delete("/delete/{guild_id}", response_model=EmptyResponseModel) # has test
async def delete_discord_server(guild_id: int, db: AsyncSession = Depends(get_db_session)):
"""Delete a discord server from the ones Elevator is currently in"""
await elevator_servers.delete(db, guild_id)
return EmptyResponseModel()
| from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
from Backend.crud import elevator_servers
from Backend.dependencies import get_db_session
from Shared.networkingSchemas import ElevatorGuildModel, ElevatorGuildsModel, EmptyResponseModel
router = APIRouter(
prefix="/elevator/discord_servers",
tags=["elevator"],
)
@router.get("/get/all", response_model=ElevatorGuildsModel) # has test
async def get_discord_servers(db: AsyncSession = Depends(get_db_session)):
"""Get all discord servers Elevator is currently in"""
results = await elevator_servers.get(db)
return ElevatorGuildsModel(guilds=[ElevatorGuildModel.from_orm(result) for result in results])
@router.post("/add/{guild_id}", response_model=EmptyResponseModel) # has test
async def add_discord_server(guild_id: int, db: AsyncSession = Depends(get_db_session)):
"""Add a discord server to the ones Elevator is currently in"""
await elevator_servers.insert(db, guild_id)
return EmptyResponseModel()
@router.delete("/delete/{guild_id}", response_model=EmptyResponseModel) # has test
async def delete_discord_server(guild_id: int, db: AsyncSession = Depends(get_db_session)):
"""Delete a discord server from the ones Elevator is currently in"""
await elevator_servers.delete(db, guild_id)
return EmptyResponseModel() | en | 0.986132 | # has test Get all discord servers Elevator is currently in # has test Add a discord server to the ones Elevator is currently in # has test Delete a discord server from the ones Elevator is currently in | 2.483309 | 2 |
Exercícios/Ex.74.py | mattheuslima/Projetos-Curso_Python | 0 | 6621883 | <gh_stars>0
'''Exercício Python 074: Crie um programa que vai gerar cinco números aleatórios e colocar em uma tupla.
Depois disso, mostre a listagem de números gerados e também indique o menor e o maior valor que estão na tupla.'''
from random import randint
print('-='*10)
print('{:=^20}'.format('Desafio 74'))
print('-='*10)
lista=(randint(1,20),randint(1,20),randint(1,20),randint(1,20),randint(1,20))
#print(f'\nA lista dos números é: ')
#for c in range(0,len(lista)):
# print(lista[c],end=" ")
print(f'\nA lista de valores gerados é: {lista}.\nO maior valor gerado é {max(lista)}.\nO menor valor gerado é {min(lista)}')
| '''Exercício Python 074: Crie um programa que vai gerar cinco números aleatórios e colocar em uma tupla.
Depois disso, mostre a listagem de números gerados e também indique o menor e o maior valor que estão na tupla.'''
from random import randint
print('-='*10)
print('{:=^20}'.format('Desafio 74'))
print('-='*10)
lista=(randint(1,20),randint(1,20),randint(1,20),randint(1,20),randint(1,20))
#print(f'\nA lista dos números é: ')
#for c in range(0,len(lista)):
# print(lista[c],end=" ")
print(f'\nA lista de valores gerados é: {lista}.\nO maior valor gerado é {max(lista)}.\nO menor valor gerado é {min(lista)}') | pt | 0.971984 | Exercício Python 074: Crie um programa que vai gerar cinco números aleatórios e colocar em uma tupla. Depois disso, mostre a listagem de números gerados e também indique o menor e o maior valor que estão na tupla. #print(f'\nA lista dos números é: ') #for c in range(0,len(lista)): # print(lista[c],end=" ") | 4.166159 | 4 |
notebooks/box_conditions_evaluation.py | kumardeepak/document-structure | 0 | 6621884 | import pandas as pd
from itertools import groupby
def arrange_grouped_line_indices(line_connections, debug=False):
lines = [list(i) for j, i in groupby(line_connections, lambda a: a[2])]
if debug:
print('arrange_grouped_line_indices: %s \n---------\n' % (str(lines)))
arranged_lines = []
for line_items in lines:
indices = []
for line_item in line_items:
indices.append(line_item[0])
indices.append(line_item[1])
indices = sorted(list(set(indices)))
arranged_lines.append([indices, line_items[0][2]])
if debug:
print('arrange_grouped_line_indices,arranged_lines : %s \n---------\n' % (str(arranged_lines)))
final_arranged_lines = []
if len(arranged_lines) == 1:
final_arranged_lines.append([arranged_lines[0][0], arranged_lines[0][1]])
else:
for index, line_item in enumerate(arranged_lines):
if index == 0 and line_item[1] == 'NOT_CONNECTED':
del line_item[0][-1]
if index > 0 and index < (len(arranged_lines) - 1) and line_item[1] == 'NOT_CONNECTED':
del line_item[0][0]
del line_item[0][-1]
if index == (len(arranged_lines) - 1) and line_item[1] == 'NOT_CONNECTED':
del line_item[0][0]
final_arranged_lines.append([line_item[0], line_item[1]])
if debug:
print('final_arrange_grouped_line_indices,arranged_lines : %s \n---------\n' % (str(final_arranged_lines)))
return final_arranged_lines
def are_lines_fonts_similar(df, configs, idx1, idx2, debug=False):
if (abs(df.iloc[idx1]['font_size'] - df.iloc[idx2]['font_size']) < 2.0) \
and (df.iloc[idx1]['font_family'] == df.iloc[idx2]['font_family']):
return True
return False
def are_hlines_aligned(df, configs, idx1, idx2, debug=False):
line1_left = df.iloc[idx1]['text_left']
line2_left = df.iloc[idx2]['text_left']
line1_right = df.iloc[idx1]['text_left'] + df.iloc[idx1]['text_width']
line2_right = df.iloc[idx2]['text_left'] + df.iloc[idx2]['text_width']
if (abs(line2_left - line1_left) < configs['LEFT_OR_RIGHT_ALIGNMENT_MARGIN']) and \
(abs(line2_right - line1_right) < configs['LEFT_OR_RIGHT_ALIGNMENT_MARGIN']):
return True
return False
def get_lines_upper_lower(df, idx1, idx2):
if df.iloc[idx2]['text_top'] > df.iloc[idx1]['text_top']:
return idx1, idx2
return idx2, idx1
def are_vlines(df, configs, idx1, idx2, debug=False):
first_idx, sec_idx = get_lines_upper_lower(df, idx1, idx2)
space = df.iloc[sec_idx]['text_top'] - (df.iloc[first_idx]['text_top'] + df.iloc[first_idx]['text_height'])
if space > configs['VERTICAL_SPACE_TOO_CLOSE']:
return True
return False
def are_vlines_close_enough(df, configs, idx1, idx2, debug=False):
space = ((df.iloc[idx1]['text_top'] + df.iloc[idx1]['text_height'])) - df.iloc[idx2]['text_top']
if debug:
print('are_vlines_too_close:: idx1: %d, idx2: %d, space: %d' % (idx1, idx2, space))
if space < configs['AVERAGE_VERTICAL_SPACE']:
return True
return False
def are_vlines_too_close(df, configs, idx1, idx2, debug=False):
first_idx, sec_idx = get_lines_upper_lower(df, idx1, idx2)
space = df.iloc[sec_idx]['text_top'] - (df.iloc[first_idx]['text_top'] + df.iloc[first_idx]['text_height'])
if debug:
print('are_vlines_too_close:: idx1: %d, idx2: %d, space: %d' % (idx1, idx2, space))
if space <= configs['VERTICAL_SPACE_TOO_CLOSE']:
return True
return False
def are_vlines_get_overlap(df, configs, idx1, idx2, debug=False):
first_idx, sec_idx = get_lines_upper_lower(df, idx1, idx2)
if (df.iloc[first_idx]['text_left'] + df.iloc[first_idx]['text_width']) < df.iloc[sec_idx]['text_left']:
return ('VLINES_ZERO_OVERLAP', 0.0)
def are_vlines_left_aligned(df, configs, idx1, idx2, debug=False):
return
# if abs(df.iloc[idx1]['text_left'] - df.iloc[idx2]['text_left']) < 0.3 *
def are_hlines_too_close(df, configs, idx1, idx2, debug=False):
space = abs((df.iloc[idx1]['text_left'] + df.iloc[idx1]['text_width']) - df.iloc[idx2]['text_left'])
if debug:
print('are_hlines_too_close:: idx1: %d, idx2: %d, space: %d' % (idx1, idx2, space))
if space <= configs['HORI_SPACE_TOO_CLOSE']:
return True
return False
def are_hlines_superscript(df, configs, idx1, idx2, debug=False):
if (df.iloc[idx1]['text_top'] > df.iloc[idx2]['text_top']):
if (df.iloc[idx1]['text_top'] - df.iloc[idx2]['text_top']) <= configs['SUPERSCRIPT_HEIGHT_DIFFERENCE']:
return True, idx1, idx2
if (df.iloc[idx2]['text_top'] > df.iloc[idx1]['text_top']):
if (df.iloc[idx2]['text_top'] - df.iloc[idx1]['text_top']) <= configs['SUPERSCRIPT_HEIGHT_DIFFERENCE']:
return True, idx2, idx1
return False, idx1, idx2
def are_hlines_close_enough(df, configs, idx1, idx2, debug=False):
if (abs(df.iloc[idx1]['text_width'] - df.iloc[idx2]['text_width']) / (max(df.iloc[idx1]['text_width'], df.iloc[idx2]['text_width'])) \
> configs['HORI_BLOCK_WDTH_DIFF_PERC'] ):
return True
return False
def are_hlines(df, configs, idx1, idx2, debug=False):
space = abs(df.iloc[idx1]['text_top'] - df.iloc[idx2]['text_top'])
if debug:
print('are_hlines:: idx1: %d, idx2: %d, space: %d' % (idx1, idx2, space))
return space <= configs['SUPERSCRIPT_HEIGHT_DIFFERENCE'] | import pandas as pd
from itertools import groupby
def arrange_grouped_line_indices(line_connections, debug=False):
lines = [list(i) for j, i in groupby(line_connections, lambda a: a[2])]
if debug:
print('arrange_grouped_line_indices: %s \n---------\n' % (str(lines)))
arranged_lines = []
for line_items in lines:
indices = []
for line_item in line_items:
indices.append(line_item[0])
indices.append(line_item[1])
indices = sorted(list(set(indices)))
arranged_lines.append([indices, line_items[0][2]])
if debug:
print('arrange_grouped_line_indices,arranged_lines : %s \n---------\n' % (str(arranged_lines)))
final_arranged_lines = []
if len(arranged_lines) == 1:
final_arranged_lines.append([arranged_lines[0][0], arranged_lines[0][1]])
else:
for index, line_item in enumerate(arranged_lines):
if index == 0 and line_item[1] == 'NOT_CONNECTED':
del line_item[0][-1]
if index > 0 and index < (len(arranged_lines) - 1) and line_item[1] == 'NOT_CONNECTED':
del line_item[0][0]
del line_item[0][-1]
if index == (len(arranged_lines) - 1) and line_item[1] == 'NOT_CONNECTED':
del line_item[0][0]
final_arranged_lines.append([line_item[0], line_item[1]])
if debug:
print('final_arrange_grouped_line_indices,arranged_lines : %s \n---------\n' % (str(final_arranged_lines)))
return final_arranged_lines
def are_lines_fonts_similar(df, configs, idx1, idx2, debug=False):
if (abs(df.iloc[idx1]['font_size'] - df.iloc[idx2]['font_size']) < 2.0) \
and (df.iloc[idx1]['font_family'] == df.iloc[idx2]['font_family']):
return True
return False
def are_hlines_aligned(df, configs, idx1, idx2, debug=False):
line1_left = df.iloc[idx1]['text_left']
line2_left = df.iloc[idx2]['text_left']
line1_right = df.iloc[idx1]['text_left'] + df.iloc[idx1]['text_width']
line2_right = df.iloc[idx2]['text_left'] + df.iloc[idx2]['text_width']
if (abs(line2_left - line1_left) < configs['LEFT_OR_RIGHT_ALIGNMENT_MARGIN']) and \
(abs(line2_right - line1_right) < configs['LEFT_OR_RIGHT_ALIGNMENT_MARGIN']):
return True
return False
def get_lines_upper_lower(df, idx1, idx2):
if df.iloc[idx2]['text_top'] > df.iloc[idx1]['text_top']:
return idx1, idx2
return idx2, idx1
def are_vlines(df, configs, idx1, idx2, debug=False):
first_idx, sec_idx = get_lines_upper_lower(df, idx1, idx2)
space = df.iloc[sec_idx]['text_top'] - (df.iloc[first_idx]['text_top'] + df.iloc[first_idx]['text_height'])
if space > configs['VERTICAL_SPACE_TOO_CLOSE']:
return True
return False
def are_vlines_close_enough(df, configs, idx1, idx2, debug=False):
space = ((df.iloc[idx1]['text_top'] + df.iloc[idx1]['text_height'])) - df.iloc[idx2]['text_top']
if debug:
print('are_vlines_too_close:: idx1: %d, idx2: %d, space: %d' % (idx1, idx2, space))
if space < configs['AVERAGE_VERTICAL_SPACE']:
return True
return False
def are_vlines_too_close(df, configs, idx1, idx2, debug=False):
first_idx, sec_idx = get_lines_upper_lower(df, idx1, idx2)
space = df.iloc[sec_idx]['text_top'] - (df.iloc[first_idx]['text_top'] + df.iloc[first_idx]['text_height'])
if debug:
print('are_vlines_too_close:: idx1: %d, idx2: %d, space: %d' % (idx1, idx2, space))
if space <= configs['VERTICAL_SPACE_TOO_CLOSE']:
return True
return False
def are_vlines_get_overlap(df, configs, idx1, idx2, debug=False):
first_idx, sec_idx = get_lines_upper_lower(df, idx1, idx2)
if (df.iloc[first_idx]['text_left'] + df.iloc[first_idx]['text_width']) < df.iloc[sec_idx]['text_left']:
return ('VLINES_ZERO_OVERLAP', 0.0)
def are_vlines_left_aligned(df, configs, idx1, idx2, debug=False):
return
# if abs(df.iloc[idx1]['text_left'] - df.iloc[idx2]['text_left']) < 0.3 *
def are_hlines_too_close(df, configs, idx1, idx2, debug=False):
space = abs((df.iloc[idx1]['text_left'] + df.iloc[idx1]['text_width']) - df.iloc[idx2]['text_left'])
if debug:
print('are_hlines_too_close:: idx1: %d, idx2: %d, space: %d' % (idx1, idx2, space))
if space <= configs['HORI_SPACE_TOO_CLOSE']:
return True
return False
def are_hlines_superscript(df, configs, idx1, idx2, debug=False):
if (df.iloc[idx1]['text_top'] > df.iloc[idx2]['text_top']):
if (df.iloc[idx1]['text_top'] - df.iloc[idx2]['text_top']) <= configs['SUPERSCRIPT_HEIGHT_DIFFERENCE']:
return True, idx1, idx2
if (df.iloc[idx2]['text_top'] > df.iloc[idx1]['text_top']):
if (df.iloc[idx2]['text_top'] - df.iloc[idx1]['text_top']) <= configs['SUPERSCRIPT_HEIGHT_DIFFERENCE']:
return True, idx2, idx1
return False, idx1, idx2
def are_hlines_close_enough(df, configs, idx1, idx2, debug=False):
if (abs(df.iloc[idx1]['text_width'] - df.iloc[idx2]['text_width']) / (max(df.iloc[idx1]['text_width'], df.iloc[idx2]['text_width'])) \
> configs['HORI_BLOCK_WDTH_DIFF_PERC'] ):
return True
return False
def are_hlines(df, configs, idx1, idx2, debug=False):
space = abs(df.iloc[idx1]['text_top'] - df.iloc[idx2]['text_top'])
if debug:
print('are_hlines:: idx1: %d, idx2: %d, space: %d' % (idx1, idx2, space))
return space <= configs['SUPERSCRIPT_HEIGHT_DIFFERENCE'] | en | 0.174909 | # if abs(df.iloc[idx1]['text_left'] - df.iloc[idx2]['text_left']) < 0.3 * | 3.177891 | 3 |
manoria_project/apps/manoria/managers.py | jtauber/team566 | 1 | 6621885 | <filename>manoria_project/apps/manoria/managers.py
from django.db import models
class KindManager(models.Manager):
def get_by_natural_key(self, slug):
return self.get(slug=slug)
| <filename>manoria_project/apps/manoria/managers.py
from django.db import models
class KindManager(models.Manager):
def get_by_natural_key(self, slug):
return self.get(slug=slug)
| none | 1 | 1.809064 | 2 | |
pyscf/fci/test/test_direct_nosym.py | nmardirossian/pyscf | 1 | 6621886 | #!/usr/bin/env python
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
from pyscf.fci import fci_slow
nelec = (3,4)
norb = 8
h1e = numpy.random.random((norb,norb))
h2e = numpy.random.random((norb,norb,norb,norb))
h2e = h2e + h2e.transpose(2,3,0,1)
na = fci.cistring.num_strings(norb, nelec[0])
nb = fci.cistring.num_strings(norb, nelec[1])
ci0 = numpy.random.random((na,nb))
class KnowValues(unittest.TestCase):
def test_contract(self):
ci1ref = fci_slow.contract_1e(h1e, ci0, norb, nelec)
ci1 = fci.direct_nosym.contract_1e(h1e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
ci1ref = fci_slow.contract_2e(h2e, ci0, norb, nelec)
ci1 = fci.direct_nosym.contract_2e(h2e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
if __name__ == "__main__":
print("Full Tests for spin1")
unittest.main()
| #!/usr/bin/env python
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
from pyscf.fci import fci_slow
nelec = (3,4)
norb = 8
h1e = numpy.random.random((norb,norb))
h2e = numpy.random.random((norb,norb,norb,norb))
h2e = h2e + h2e.transpose(2,3,0,1)
na = fci.cistring.num_strings(norb, nelec[0])
nb = fci.cistring.num_strings(norb, nelec[1])
ci0 = numpy.random.random((na,nb))
class KnowValues(unittest.TestCase):
def test_contract(self):
ci1ref = fci_slow.contract_1e(h1e, ci0, norb, nelec)
ci1 = fci.direct_nosym.contract_1e(h1e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
ci1ref = fci_slow.contract_2e(h2e, ci0, norb, nelec)
ci1 = fci.direct_nosym.contract_2e(h2e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
if __name__ == "__main__":
print("Full Tests for spin1")
unittest.main()
| ru | 0.26433 | #!/usr/bin/env python | 2.47025 | 2 |
formulae/power.py | dcl10/leccy-mod | 0 | 6621887 | from typing import Union
def power_dc(
current: Union[int, float], voltage: Union[int, float]
) -> Union[int, float]:
"""Calculate the electrical power of a DC circuit.
Args:
current (Union[int, float]): The current of the circuit, in Amperes (A).
voltage (Union[int, float]): The voltage of the circuit, in volts (V).
Returns:
Union[int, float]: The electrical power of the circuit, in Watts (W).
"""
return current * voltage
def consumption(kilowatts: Union[int, float], hours: int) -> Union[int, float]:
"""Calculate energy consumption, in kilowatt hours (kWh).
Args:
kilowatts (Union[int, float]): The power used, in kilowatts (kW).
hours (int): The number of hours the power is sustained, in hours (h)/
Returns:
Union[int, float]: _description_
"""
return kilowatts * hours
| from typing import Union
def power_dc(
current: Union[int, float], voltage: Union[int, float]
) -> Union[int, float]:
"""Calculate the electrical power of a DC circuit.
Args:
current (Union[int, float]): The current of the circuit, in Amperes (A).
voltage (Union[int, float]): The voltage of the circuit, in volts (V).
Returns:
Union[int, float]: The electrical power of the circuit, in Watts (W).
"""
return current * voltage
def consumption(kilowatts: Union[int, float], hours: int) -> Union[int, float]:
"""Calculate energy consumption, in kilowatt hours (kWh).
Args:
kilowatts (Union[int, float]): The power used, in kilowatts (kW).
hours (int): The number of hours the power is sustained, in hours (h)/
Returns:
Union[int, float]: _description_
"""
return kilowatts * hours
| en | 0.691182 | Calculate the electrical power of a DC circuit. Args: current (Union[int, float]): The current of the circuit, in Amperes (A). voltage (Union[int, float]): The voltage of the circuit, in volts (V). Returns: Union[int, float]: The electrical power of the circuit, in Watts (W). Calculate energy consumption, in kilowatt hours (kWh). Args: kilowatts (Union[int, float]): The power used, in kilowatts (kW). hours (int): The number of hours the power is sustained, in hours (h)/ Returns: Union[int, float]: _description_ | 4.297646 | 4 |
Coding/Find-balanced-substrings.py | KerinPithawala/Interview-Questions | 0 | 6621888 | <reponame>KerinPithawala/Interview-Questions<filename>Coding/Find-balanced-substrings.py<gh_stars>0
from collections import Counter
l=list(map(int,input().split(',')))
n=len(l)
out=[]
for i in range(n):
s=[]
for j in range(i+1,n):
if l[j]>l[i]:
s.append(l[j])
if (len(s)==0):
out.append(-1)
else:
c=dict(Counter(s))
l1=[]
v=max(c.values())
for i in c.keys():
if (c[i]==v):
l1.append(i)
out.append(min(l1))
print(*out,sep=",") | from collections import Counter
l=list(map(int,input().split(',')))
n=len(l)
out=[]
for i in range(n):
s=[]
for j in range(i+1,n):
if l[j]>l[i]:
s.append(l[j])
if (len(s)==0):
out.append(-1)
else:
c=dict(Counter(s))
l1=[]
v=max(c.values())
for i in c.keys():
if (c[i]==v):
l1.append(i)
out.append(min(l1))
print(*out,sep=",") | none | 1 | 3.104217 | 3 | |
webapp/element43/apps/api/migrations/0004_auto_20160119_0824.py | Ososope/eve_online | 0 | 6621889 | <filename>webapp/element43/apps/api/migrations/0004_auto_20160119_0824.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_system_check_fixes'),
]
operations = [
migrations.AlterModelOptions(
name='charskill',
options={'verbose_name': 'Character Skill', 'verbose_name_plural': 'Character Skills'},
),
migrations.AlterModelOptions(
name='corpdivision',
options={'verbose_name': 'Corporation Division', 'verbose_name_plural': 'Corporation Divisions'},
),
migrations.RemoveField(
model_name='corp',
name='id',
),
migrations.AlterField(
model_name='corp',
name='corp_id',
field=models.BigIntegerField(help_text=b'Corporation ID', serialize=False, primary_key=True),
),
migrations.AlterUniqueTogether(
name='apikey',
unique_together=set([('user', 'keyid')]),
),
migrations.AlterUniqueTogether(
name='apitimer',
unique_together=set([('character', 'corporation', 'apisheet')]),
),
migrations.AlterUniqueTogether(
name='charskill',
unique_together=set([('character', 'skill')]),
),
migrations.AlterUniqueTogether(
name='journalentry',
unique_together=set([('ref_id', 'character')]),
),
migrations.AlterUniqueTogether(
name='markettransaction',
unique_together=set([('journal_transaction_id', 'character', 'corporation')]),
),
]
| <filename>webapp/element43/apps/api/migrations/0004_auto_20160119_0824.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_system_check_fixes'),
]
operations = [
migrations.AlterModelOptions(
name='charskill',
options={'verbose_name': 'Character Skill', 'verbose_name_plural': 'Character Skills'},
),
migrations.AlterModelOptions(
name='corpdivision',
options={'verbose_name': 'Corporation Division', 'verbose_name_plural': 'Corporation Divisions'},
),
migrations.RemoveField(
model_name='corp',
name='id',
),
migrations.AlterField(
model_name='corp',
name='corp_id',
field=models.BigIntegerField(help_text=b'Corporation ID', serialize=False, primary_key=True),
),
migrations.AlterUniqueTogether(
name='apikey',
unique_together=set([('user', 'keyid')]),
),
migrations.AlterUniqueTogether(
name='apitimer',
unique_together=set([('character', 'corporation', 'apisheet')]),
),
migrations.AlterUniqueTogether(
name='charskill',
unique_together=set([('character', 'skill')]),
),
migrations.AlterUniqueTogether(
name='journalentry',
unique_together=set([('ref_id', 'character')]),
),
migrations.AlterUniqueTogether(
name='markettransaction',
unique_together=set([('journal_transaction_id', 'character', 'corporation')]),
),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.632391 | 2 |
scripts/image_downloader.py | RodolfoFerro/FacialRecognition | 1 | 6621890 | # ===============================================================
# Author: <NAME>
# Email: <EMAIL>
# Twitter: @FerroRodolfo
#
# Script: Image downloader using ImageSoup.
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by <NAME>. Any
# explicit usage of this script or its contents is granted
# according to the license provided and its conditions.
# ===============================================================
from imagesoup import ImageSoup
from tqdm import tqdm
import os
# Set number of images and terms to look for:
n_images = 100
terms = "Tom Holland face"
def img_downloader(n_images, terms):
# Define paths:
db_path = "../db/original/"
# Clean folders:
os.system("rm ../db/original/*")
os.system("rm ../db/train/*")
# Create soup and search
print("Looking for {} images...".format(n_images))
soup = ImageSoup()
images = soup.search('"{}"'.format(terms), n_images=n_images)
# Save trainin percentage (train_per) of images in training folder:
n_images = len(images)
print("Downloading images of '{}'...".format(terms))
for i in tqdm(range(n_images)):
try:
images[i].to_file(db_path + "img_{:0>4}.jpg".format(i + 1))
except Exception:
pass
print("Now you must go to '../db/original' to delete useless images.")
if __name__ == "__main__":
img_downloader(n_images, terms)
| # ===============================================================
# Author: <NAME>
# Email: <EMAIL>
# Twitter: @FerroRodolfo
#
# Script: Image downloader using ImageSoup.
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by <NAME>. Any
# explicit usage of this script or its contents is granted
# according to the license provided and its conditions.
# ===============================================================
from imagesoup import ImageSoup
from tqdm import tqdm
import os
# Set number of images and terms to look for:
n_images = 100
terms = "Tom Holland face"
def img_downloader(n_images, terms):
# Define paths:
db_path = "../db/original/"
# Clean folders:
os.system("rm ../db/original/*")
os.system("rm ../db/train/*")
# Create soup and search
print("Looking for {} images...".format(n_images))
soup = ImageSoup()
images = soup.search('"{}"'.format(terms), n_images=n_images)
# Save trainin percentage (train_per) of images in training folder:
n_images = len(images)
print("Downloading images of '{}'...".format(terms))
for i in tqdm(range(n_images)):
try:
images[i].to_file(db_path + "img_{:0>4}.jpg".format(i + 1))
except Exception:
pass
print("Now you must go to '../db/original' to delete useless images.")
if __name__ == "__main__":
img_downloader(n_images, terms)
| en | 0.834195 | # =============================================================== # Author: <NAME> # Email: <EMAIL> # Twitter: @FerroRodolfo # # Script: Image downloader using ImageSoup. # # ABOUT COPYING OR USING PARTIAL INFORMATION: # This script was originally created by <NAME>. Any # explicit usage of this script or its contents is granted # according to the license provided and its conditions. # =============================================================== # Set number of images and terms to look for: # Define paths: # Clean folders: # Create soup and search # Save trainin percentage (train_per) of images in training folder: | 2.966809 | 3 |
test/workbench_singleton.py | Ayub-Khan/workbench | 61 | 6621891 | ''' Spin up Workbench Server (this is a singleton module)'''
import multiprocessing
import workbench.server.workbench_server as workbench_server
print '\nStarting up the Workbench server...'
process = multiprocessing.Process(target=workbench_server.run)
process.start()
def shutdown():
# Terminate the workbench server process
print '\nShutting down the Workbench server...'
try:
process.terminate()
except OSError, error:
print 'Not able to shut down server, probably means another one is running...'
print 'Error %s' % error
| ''' Spin up Workbench Server (this is a singleton module)'''
import multiprocessing
import workbench.server.workbench_server as workbench_server
print '\nStarting up the Workbench server...'
process = multiprocessing.Process(target=workbench_server.run)
process.start()
def shutdown():
# Terminate the workbench server process
print '\nShutting down the Workbench server...'
try:
process.terminate()
except OSError, error:
print 'Not able to shut down server, probably means another one is running...'
print 'Error %s' % error
| en | 0.60792 | Spin up Workbench Server (this is a singleton module) # Terminate the workbench server process | 2.721485 | 3 |
homeassistant/components/keba/lock.py | orcema/core | 0 | 6621892 | """Support for KEBA charging station switch."""
from __future__ import annotations
from typing import Any
from homeassistant.components.lock import LockEntity
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DOMAIN, KebaHandler
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the KEBA charging station platform."""
if discovery_info is None:
return
keba: KebaHandler = hass.data[DOMAIN]
locks = [KebaLock(keba, "Authentication", "authentication")]
async_add_entities(locks)
class KebaLock(LockEntity):
"""The entity class for KEBA charging stations switch."""
_attr_should_poll = False
def __init__(self, keba: KebaHandler, name: str, entity_type: str) -> None:
"""Initialize the KEBA switch."""
self._keba = keba
self._attr_is_locked = True
self._attr_name = f"{keba.device_name} {name}"
self._attr_unique_id = f"{keba.device_id}_{entity_type}"
async def async_lock(self, **kwargs: Any) -> None:
"""Lock wallbox."""
await self._keba.async_stop()
async def async_unlock(self, **kwargs: Any) -> None:
"""Unlock wallbox."""
await self._keba.async_start()
async def async_update(self) -> None:
"""Attempt to retrieve on off state from the switch."""
self._attr_is_locked = self._keba.get_value("Authreq") == 1
def update_callback(self) -> None:
"""Schedule a state update."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self) -> None:
"""Add update callback after being added to hass."""
self._keba.add_update_listener(self.update_callback)
| """Support for KEBA charging station switch."""
from __future__ import annotations
from typing import Any
from homeassistant.components.lock import LockEntity
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DOMAIN, KebaHandler
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the KEBA charging station platform."""
if discovery_info is None:
return
keba: KebaHandler = hass.data[DOMAIN]
locks = [KebaLock(keba, "Authentication", "authentication")]
async_add_entities(locks)
class KebaLock(LockEntity):
"""The entity class for KEBA charging stations switch."""
_attr_should_poll = False
def __init__(self, keba: KebaHandler, name: str, entity_type: str) -> None:
"""Initialize the KEBA switch."""
self._keba = keba
self._attr_is_locked = True
self._attr_name = f"{keba.device_name} {name}"
self._attr_unique_id = f"{keba.device_id}_{entity_type}"
async def async_lock(self, **kwargs: Any) -> None:
"""Lock wallbox."""
await self._keba.async_stop()
async def async_unlock(self, **kwargs: Any) -> None:
"""Unlock wallbox."""
await self._keba.async_start()
async def async_update(self) -> None:
"""Attempt to retrieve on off state from the switch."""
self._attr_is_locked = self._keba.get_value("Authreq") == 1
def update_callback(self) -> None:
"""Schedule a state update."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self) -> None:
"""Add update callback after being added to hass."""
self._keba.add_update_listener(self.update_callback)
| en | 0.881802 | Support for KEBA charging station switch. Set up the KEBA charging station platform. The entity class for KEBA charging stations switch. Initialize the KEBA switch. Lock wallbox. Unlock wallbox. Attempt to retrieve on off state from the switch. Schedule a state update. Add update callback after being added to hass. | 2.207859 | 2 |
app/view.py | geonub/kakaobot_ | 0 | 6621893 | <reponame>geonub/kakaobot_<filename>app/view.py<gh_stars>0
from app import app
from flask import request, jsonify
import traceback
from .manager import APIHandler
def processFail():
message = APIAdmin.process("fail").getMessage()
viewLog("fail")
return jsonify(message)
@app.route("/keyboard", methods=["GET"])
def yellow_keyboard():
message, code = APIHandler.process("home")
return jsonify(message), code
@app.route("/message", methods=["POST"])
def yellow_message():
try:
message, code = APIHandler.process("message", request.json)
return jsonify(message), code
except:
traceback.print_exc()
return processFail(), 400
@app.route("/friend", methods=["POST"])
def yellow_friend_add():
message, code = APIHandler.process("add", request.json)
return jsonify(message), code
@app.route("/friend/<key>", methods=["DELETE"])
def yellow_friend_block(key):
message, code = APIHandler.process("block", key)
return jsonify(message), code
@app.route("/chat_room/<key>", methods=["DELETE"])
def yellow_exit(key):
message, code = APIHandler.process("exit", key)
return jsonify(message), code
| from app import app
from flask import request, jsonify
import traceback
from .manager import APIHandler
def processFail():
message = APIAdmin.process("fail").getMessage()
viewLog("fail")
return jsonify(message)
@app.route("/keyboard", methods=["GET"])
def yellow_keyboard():
message, code = APIHandler.process("home")
return jsonify(message), code
@app.route("/message", methods=["POST"])
def yellow_message():
try:
message, code = APIHandler.process("message", request.json)
return jsonify(message), code
except:
traceback.print_exc()
return processFail(), 400
@app.route("/friend", methods=["POST"])
def yellow_friend_add():
message, code = APIHandler.process("add", request.json)
return jsonify(message), code
@app.route("/friend/<key>", methods=["DELETE"])
def yellow_friend_block(key):
message, code = APIHandler.process("block", key)
return jsonify(message), code
@app.route("/chat_room/<key>", methods=["DELETE"])
def yellow_exit(key):
message, code = APIHandler.process("exit", key)
return jsonify(message), code | none | 1 | 2.303936 | 2 | |
GUI/components/confidence_sliders.py | Kumaken/fyp-vehicle-counting-system | 0 | 6621894 |
from GUI.strings.confidence_sliders import NMS_THRESHOLD_LABEL, YOLO_CONFIDENCE_THRESHOLD_LABEL
from GUI.components.sliders import Sliders
from PyQt5.QtWidgets import ( QVBoxLayout)
class ConfidenceSliders:
def __init__(self, parent=None):
self.parent = parent
self.layout = QVBoxLayout()
self.yolo_conf_slider = None
self.nms_slider = None
def getLayout(self):
return self.layout
def update(self):
self.yolo_conf_slider.setSliderValue(self.parent.getYoloConfidenceThreshold())
self.nms_slider.setSliderValue(self.parent.getNMSThreshold())
def setup(self):
self.yolo_conf_slider = Sliders(YOLO_CONFIDENCE_THRESHOLD_LABEL, 20, 0.01, self.parent).setSliderRange(0, 100, 1).setConnect(self.parent.setYoloConfidenceThreshold) # IMPORTANT: PASS SELF AS PARENT!
self.layout.addWidget(self.yolo_conf_slider.getComponent())
self.nms_slider = Sliders(NMS_THRESHOLD_LABEL, 40, 0.01, self.parent).setSliderRange(0, 100, 1).setConnect(self.parent.setNMSThreshold)
self.layout.addWidget(self.nms_slider.getComponent())
return self |
from GUI.strings.confidence_sliders import NMS_THRESHOLD_LABEL, YOLO_CONFIDENCE_THRESHOLD_LABEL
from GUI.components.sliders import Sliders
from PyQt5.QtWidgets import ( QVBoxLayout)
class ConfidenceSliders:
def __init__(self, parent=None):
self.parent = parent
self.layout = QVBoxLayout()
self.yolo_conf_slider = None
self.nms_slider = None
def getLayout(self):
return self.layout
def update(self):
self.yolo_conf_slider.setSliderValue(self.parent.getYoloConfidenceThreshold())
self.nms_slider.setSliderValue(self.parent.getNMSThreshold())
def setup(self):
self.yolo_conf_slider = Sliders(YOLO_CONFIDENCE_THRESHOLD_LABEL, 20, 0.01, self.parent).setSliderRange(0, 100, 1).setConnect(self.parent.setYoloConfidenceThreshold) # IMPORTANT: PASS SELF AS PARENT!
self.layout.addWidget(self.yolo_conf_slider.getComponent())
self.nms_slider = Sliders(NMS_THRESHOLD_LABEL, 40, 0.01, self.parent).setSliderRange(0, 100, 1).setConnect(self.parent.setNMSThreshold)
self.layout.addWidget(self.nms_slider.getComponent())
return self | en | 0.432706 | # IMPORTANT: PASS SELF AS PARENT! | 2.363008 | 2 |
broccoli/layer/base.py | naritotakizawa/broccoli | 5 | 6621895 | """ゲームキャンバスにおける、レイヤーを扱うモジュールです。
ゲームキャンバス内のデータは、レイヤーという層に格納されます。
背景は背景レイヤーに、キャラクターや物体はオブジェクトレイヤー、アイテムはアイテムレイヤーという具合です。
それらのレイヤーを作成するためのクラスを提供しています。
"""
import random
from broccoli.conf import settings
class BaseLayer:
"""全てのレイヤの基底クラス。"""
def __init__(self):
self.layer = None
self.canvas = None
def put_material(self, material, x, y):
"""レイヤに、マテリアルを登録する。"""
self[y][x] = material
def all(self, include_none=True):
"""レイヤ内のものを全て返す。
include_noneがTrueの場合、オブジェクトレイヤやアイテムレイヤで返されるNoneや空リストも含めて返します。
Falseの場合はそれらを省き、存在しているマテリアルだけ返します。
"""
for y, row in enumerate(self):
for x, col in enumerate(row):
if include_none or col:
yield x, y, col
def get(self, **kwargs):
"""レイヤ内のマテリアルを検索する。"""
for _, _, material in self.all():
for key, value in kwargs.items():
attr = getattr(material, key, None)
if attr != value:
break
else:
return material
def filter(self, **kwargs):
"""レイヤ内のマテリアルを検索する。"""
for _, _, material in self.all():
for key, value in kwargs.items():
attr = getattr(material, key, None)
if attr != value:
break
else:
yield material
def create_material(self, material_cls, x=None, y=None, **kwargs):
"""マテリアルの生成と初期設定、レイヤへの配置、キャンバスへの描画を行う。
material_clsはクラスオブジェクトを渡せますが、インスタンスも渡せます。
インスタンスを渡した場合は、そのマテリアルの__init__が呼ばれません。
既にほかの場所で作成したマテリアルを流用したい場合は、インスタンスを渡すだけで済みます。
"""
canvas = self.canvas
system = canvas.system
# x,y座標の指定がなければ座標を探す
if x is None or y is None:
x, y = self.get_random_empty_space(material_cls)
kwargs.update({
'system': system,
'canvas': canvas,
'layer': self,
'x': x,
'y': y,
})
material = material_cls(**kwargs)
id = self.canvas.create_image(
x*settings.CELL_WIDTH,
y*settings.CELL_HEIGHT,
image=material.image, anchor='nw'
)
material.id = id
self.put_material(material, x, y)
return material
def delete_material(self, material):
"""マテリアルを削除する。"""
raise NotImplementedError
def get_empty_space(self, material=None):
"""空いているスペースを全てyieldで返す。
マテリアルの種類によって空いているの定義が異なるため、それぞれでオーバーライドしています。
"""
raise NotImplementedError
def get_random_empty_space(self, material=None):
"""空いているスペースをランダムで1つ返す。"""
empty_spaces = list(self.get_empty_space(material))
return random.choice(empty_spaces)
def __getitem__(self, item):
"""self.layerにデリゲート。
item_layer.layer[y][x]ではなく、
item_layer[y][x]と書くために実装しています。
"""
return self.layer[item]
class BaseTileLayer(BaseLayer):
"""背景レイヤの基底クラス。"""
def __init__(self, x_length, y_length):
super().__init__()
self.x_length = x_length
self.y_length = y_length
self.first_tile_id = None
def create(self):
"""レイヤーの作成、描画を行う。"""
self.layer = [[None for _ in range(self.x_length)] for _ in range(self.y_length)]
self.create_layer()
def get_empty_space(self, material=None):
"""空いているスペースを全てyieldで返す。
is_publicがTrueのタイルであれば空いているとみなします。
ランダムにゴールタイルなどを設定したい場合には便利です。
しかし逆に、既に存在する特殊なタイル(ゴールタイル等)の座標を返してしまう恐れもあるため、
tile_layerのget_empty_space及びget_random_empty_spaceの利用は注意してください。
material引数は他レイヤのメソッドの引数と合わせる必要があるために定義していますが、使いません。
"""
for x, y, tile in self.all():
if tile.is_public():
yield x, y
def create_material(self, material_cls, x=None, y=None, **kwargs):
material = super().create_material(material_cls, x=x, y=y, **kwargs)
self.canvas.lower(material.id) # 背景は一番下に配置する
# 一番はじめのタイルはIDを保存しておきます。
# オブジェクトはどんどん上に描画され、タイルはどんどん下に描画され、アイテムは最初のタイルの上に描画されます。
# 結果として、オブジェクト アイテム タイル という順番での重なりで描画されます。
if self.first_tile_id is None:
self.first_tile_id = material.id
return material
def delete_material(self, material):
"""タイルを削除する。
タイルは存在しているのが当然なため、レイヤ内にNoneを入れる等はできません。
このメソッドは、新しいタイルを設定する際に古いタイルを消したい、というケースに使ってください。
このメソッドはキャンバス上から消す(表示だけ消す)ことしか行いません。
その後にcreate_materialで、新しいタイルを設定してください。
"""
self.canvas.delete(material.id)
class BaseObjectLayer(BaseLayer):
"""オブジェクトレイヤの基底クラス。"""
def __init__(self):
super().__init__()
self.tile_layer = None
def create(self):
"""レイヤーの作成、描画を行う。"""
self.layer = [[None for _ in range(self.tile_layer.x_length)] for _ in range(self.tile_layer.y_length)]
self.create_layer()
def get_empty_space(self, material=None):
"""空いているスペースを全てyieldで返す。
そのオブジェクトを受け入れるタイルであり、
まだオブジェクトがない座標ならばOK。
"""
for x, y, tile in self.tile_layer.all():
if tile.is_public(obj=material) and self[y][x] is None:
yield x, y
def clear(self):
"""layer内を全てNoneにし、表示中のオブジェクトを削除します。"""
for x, y, obj in self.all(include_none=False):
self.delete_material(obj)
def create_material(self, material_cls, x=None, y=None, **kwargs):
material = super().create_material(material_cls, x=x, y=y, **kwargs)
self.canvas.lift(material.id) # オブジェクトは一番上に配置する
return material
def delete_material(self, material):
"""マテリアルを削除する"""
self[material.y][material.x] = None
self.canvas.delete(material.id)
class BaseItemLayer(BaseLayer):
"""アイテムレイヤの基底クラス。"""
def __init__(self):
super().__init__()
self.tile_layer = None
def put_material(self, material, x, y):
"""アイテムを配置する。
アイテムは1座標に複数格納できます。つまり、リストで管理しています。
そのため、アイテムの配置はappendメソッドを使います。
"""
self[y][x].append(material)
def create(self):
"""レイヤーの作成、描画を行う。"""
self.layer = [[[] for _ in range(self.tile_layer.x_length)] for _ in range(self.tile_layer.y_length)]
self.create_layer()
def get_empty_space(self, material=None):
"""空いているスペースを全てyieldで返す。
そのアイテムにとって、配置可能な座標を返します。
tileのis_public(引数なし)がTrueであれば配置可能と考えます。
"""
for x, y, tile in self.tile_layer.all():
if tile.is_public():
yield x, y
def clear(self):
"""layer内を全てNoneにし、表示中のオブジェクトを削除します。"""
for x, y, items in self.all(include_none=False):
for item in items:
self.delete_material(item)
def create_material(self, material_cls, x=None, y=None, **kwargs):
material = super().create_material(material_cls, x=x, y=y, **kwargs)
self.canvas.lift(material.id, self.tile_layer.first_tile_id) # 一番上にある背景の上
return material
def delete_material(self, material):
"""マテリアルを削除する"""
self[material.y][material.x].remove(material)
self.canvas.delete(material.id)
def get(self, **kwargs):
"""レイヤ内のアイテムを検索する。"""
for _, _, items in self.all():
for item in items:
for key, value in kwargs.items():
attr = getattr(item, key, None)
if attr != value:
break
else:
return item
def filter(self, **kwargs):
"""レイヤ内のアイテムを検索する。"""
for _, _, items in self.all():
for item in items:
for key, value in kwargs.items():
attr = getattr(item, key, None)
if attr != value:
break
else:
yield item
| """ゲームキャンバスにおける、レイヤーを扱うモジュールです。
ゲームキャンバス内のデータは、レイヤーという層に格納されます。
背景は背景レイヤーに、キャラクターや物体はオブジェクトレイヤー、アイテムはアイテムレイヤーという具合です。
それらのレイヤーを作成するためのクラスを提供しています。
"""
import random
from broccoli.conf import settings
class BaseLayer:
"""全てのレイヤの基底クラス。"""
def __init__(self):
self.layer = None
self.canvas = None
def put_material(self, material, x, y):
"""レイヤに、マテリアルを登録する。"""
self[y][x] = material
def all(self, include_none=True):
"""レイヤ内のものを全て返す。
include_noneがTrueの場合、オブジェクトレイヤやアイテムレイヤで返されるNoneや空リストも含めて返します。
Falseの場合はそれらを省き、存在しているマテリアルだけ返します。
"""
for y, row in enumerate(self):
for x, col in enumerate(row):
if include_none or col:
yield x, y, col
def get(self, **kwargs):
"""レイヤ内のマテリアルを検索する。"""
for _, _, material in self.all():
for key, value in kwargs.items():
attr = getattr(material, key, None)
if attr != value:
break
else:
return material
def filter(self, **kwargs):
"""レイヤ内のマテリアルを検索する。"""
for _, _, material in self.all():
for key, value in kwargs.items():
attr = getattr(material, key, None)
if attr != value:
break
else:
yield material
def create_material(self, material_cls, x=None, y=None, **kwargs):
"""マテリアルの生成と初期設定、レイヤへの配置、キャンバスへの描画を行う。
material_clsはクラスオブジェクトを渡せますが、インスタンスも渡せます。
インスタンスを渡した場合は、そのマテリアルの__init__が呼ばれません。
既にほかの場所で作成したマテリアルを流用したい場合は、インスタンスを渡すだけで済みます。
"""
canvas = self.canvas
system = canvas.system
# x,y座標の指定がなければ座標を探す
if x is None or y is None:
x, y = self.get_random_empty_space(material_cls)
kwargs.update({
'system': system,
'canvas': canvas,
'layer': self,
'x': x,
'y': y,
})
material = material_cls(**kwargs)
id = self.canvas.create_image(
x*settings.CELL_WIDTH,
y*settings.CELL_HEIGHT,
image=material.image, anchor='nw'
)
material.id = id
self.put_material(material, x, y)
return material
def delete_material(self, material):
"""マテリアルを削除する。"""
raise NotImplementedError
def get_empty_space(self, material=None):
"""空いているスペースを全てyieldで返す。
マテリアルの種類によって空いているの定義が異なるため、それぞれでオーバーライドしています。
"""
raise NotImplementedError
def get_random_empty_space(self, material=None):
"""空いているスペースをランダムで1つ返す。"""
empty_spaces = list(self.get_empty_space(material))
return random.choice(empty_spaces)
def __getitem__(self, item):
"""self.layerにデリゲート。
item_layer.layer[y][x]ではなく、
item_layer[y][x]と書くために実装しています。
"""
return self.layer[item]
class BaseTileLayer(BaseLayer):
"""背景レイヤの基底クラス。"""
def __init__(self, x_length, y_length):
super().__init__()
self.x_length = x_length
self.y_length = y_length
self.first_tile_id = None
def create(self):
"""レイヤーの作成、描画を行う。"""
self.layer = [[None for _ in range(self.x_length)] for _ in range(self.y_length)]
self.create_layer()
def get_empty_space(self, material=None):
"""空いているスペースを全てyieldで返す。
is_publicがTrueのタイルであれば空いているとみなします。
ランダムにゴールタイルなどを設定したい場合には便利です。
しかし逆に、既に存在する特殊なタイル(ゴールタイル等)の座標を返してしまう恐れもあるため、
tile_layerのget_empty_space及びget_random_empty_spaceの利用は注意してください。
material引数は他レイヤのメソッドの引数と合わせる必要があるために定義していますが、使いません。
"""
for x, y, tile in self.all():
if tile.is_public():
yield x, y
def create_material(self, material_cls, x=None, y=None, **kwargs):
material = super().create_material(material_cls, x=x, y=y, **kwargs)
self.canvas.lower(material.id) # 背景は一番下に配置する
# 一番はじめのタイルはIDを保存しておきます。
# オブジェクトはどんどん上に描画され、タイルはどんどん下に描画され、アイテムは最初のタイルの上に描画されます。
# 結果として、オブジェクト アイテム タイル という順番での重なりで描画されます。
if self.first_tile_id is None:
self.first_tile_id = material.id
return material
def delete_material(self, material):
"""タイルを削除する。
タイルは存在しているのが当然なため、レイヤ内にNoneを入れる等はできません。
このメソッドは、新しいタイルを設定する際に古いタイルを消したい、というケースに使ってください。
このメソッドはキャンバス上から消す(表示だけ消す)ことしか行いません。
その後にcreate_materialで、新しいタイルを設定してください。
"""
self.canvas.delete(material.id)
class BaseObjectLayer(BaseLayer):
"""オブジェクトレイヤの基底クラス。"""
def __init__(self):
super().__init__()
self.tile_layer = None
def create(self):
"""レイヤーの作成、描画を行う。"""
self.layer = [[None for _ in range(self.tile_layer.x_length)] for _ in range(self.tile_layer.y_length)]
self.create_layer()
def get_empty_space(self, material=None):
"""空いているスペースを全てyieldで返す。
そのオブジェクトを受け入れるタイルであり、
まだオブジェクトがない座標ならばOK。
"""
for x, y, tile in self.tile_layer.all():
if tile.is_public(obj=material) and self[y][x] is None:
yield x, y
def clear(self):
"""layer内を全てNoneにし、表示中のオブジェクトを削除します。"""
for x, y, obj in self.all(include_none=False):
self.delete_material(obj)
def create_material(self, material_cls, x=None, y=None, **kwargs):
material = super().create_material(material_cls, x=x, y=y, **kwargs)
self.canvas.lift(material.id) # オブジェクトは一番上に配置する
return material
def delete_material(self, material):
"""マテリアルを削除する"""
self[material.y][material.x] = None
self.canvas.delete(material.id)
class BaseItemLayer(BaseLayer):
"""アイテムレイヤの基底クラス。"""
def __init__(self):
super().__init__()
self.tile_layer = None
def put_material(self, material, x, y):
"""アイテムを配置する。
アイテムは1座標に複数格納できます。つまり、リストで管理しています。
そのため、アイテムの配置はappendメソッドを使います。
"""
self[y][x].append(material)
def create(self):
"""レイヤーの作成、描画を行う。"""
self.layer = [[[] for _ in range(self.tile_layer.x_length)] for _ in range(self.tile_layer.y_length)]
self.create_layer()
def get_empty_space(self, material=None):
"""空いているスペースを全てyieldで返す。
そのアイテムにとって、配置可能な座標を返します。
tileのis_public(引数なし)がTrueであれば配置可能と考えます。
"""
for x, y, tile in self.tile_layer.all():
if tile.is_public():
yield x, y
def clear(self):
"""layer内を全てNoneにし、表示中のオブジェクトを削除します。"""
for x, y, items in self.all(include_none=False):
for item in items:
self.delete_material(item)
def create_material(self, material_cls, x=None, y=None, **kwargs):
material = super().create_material(material_cls, x=x, y=y, **kwargs)
self.canvas.lift(material.id, self.tile_layer.first_tile_id) # 一番上にある背景の上
return material
def delete_material(self, material):
"""マテリアルを削除する"""
self[material.y][material.x].remove(material)
self.canvas.delete(material.id)
def get(self, **kwargs):
"""レイヤ内のアイテムを検索する。"""
for _, _, items in self.all():
for item in items:
for key, value in kwargs.items():
attr = getattr(item, key, None)
if attr != value:
break
else:
return item
def filter(self, **kwargs):
"""レイヤ内のアイテムを検索する。"""
for _, _, items in self.all():
for item in items:
for key, value in kwargs.items():
attr = getattr(item, key, None)
if attr != value:
break
else:
yield item
| ja | 1.000037 | ゲームキャンバスにおける、レイヤーを扱うモジュールです。 ゲームキャンバス内のデータは、レイヤーという層に格納されます。 背景は背景レイヤーに、キャラクターや物体はオブジェクトレイヤー、アイテムはアイテムレイヤーという具合です。 それらのレイヤーを作成するためのクラスを提供しています。 全てのレイヤの基底クラス。 レイヤに、マテリアルを登録する。 レイヤ内のものを全て返す。 include_noneがTrueの場合、オブジェクトレイヤやアイテムレイヤで返されるNoneや空リストも含めて返します。 Falseの場合はそれらを省き、存在しているマテリアルだけ返します。 レイヤ内のマテリアルを検索する。 レイヤ内のマテリアルを検索する。 マテリアルの生成と初期設定、レイヤへの配置、キャンバスへの描画を行う。 material_clsはクラスオブジェクトを渡せますが、インスタンスも渡せます。 インスタンスを渡した場合は、そのマテリアルの__init__が呼ばれません。 既にほかの場所で作成したマテリアルを流用したい場合は、インスタンスを渡すだけで済みます。 # x,y座標の指定がなければ座標を探す マテリアルを削除する。 空いているスペースを全てyieldで返す。 マテリアルの種類によって空いているの定義が異なるため、それぞれでオーバーライドしています。 空いているスペースをランダムで1つ返す。 self.layerにデリゲート。 item_layer.layer[y][x]ではなく、 item_layer[y][x]と書くために実装しています。 背景レイヤの基底クラス。 レイヤーの作成、描画を行う。 空いているスペースを全てyieldで返す。 is_publicがTrueのタイルであれば空いているとみなします。 ランダムにゴールタイルなどを設定したい場合には便利です。 しかし逆に、既に存在する特殊なタイル(ゴールタイル等)の座標を返してしまう恐れもあるため、 tile_layerのget_empty_space及びget_random_empty_spaceの利用は注意してください。 material引数は他レイヤのメソッドの引数と合わせる必要があるために定義していますが、使いません。 # 背景は一番下に配置する # 一番はじめのタイルはIDを保存しておきます。 # オブジェクトはどんどん上に描画され、タイルはどんどん下に描画され、アイテムは最初のタイルの上に描画されます。 # 結果として、オブジェクト アイテム タイル という順番での重なりで描画されます。 タイルを削除する。 タイルは存在しているのが当然なため、レイヤ内にNoneを入れる等はできません。 このメソッドは、新しいタイルを設定する際に古いタイルを消したい、というケースに使ってください。 このメソッドはキャンバス上から消す(表示だけ消す)ことしか行いません。 その後にcreate_materialで、新しいタイルを設定してください。 オブジェクトレイヤの基底クラス。 レイヤーの作成、描画を行う。 空いているスペースを全てyieldで返す。 そのオブジェクトを受け入れるタイルであり、 まだオブジェクトがない座標ならばOK。 layer内を全てNoneにし、表示中のオブジェクトを削除します。 # オブジェクトは一番上に配置する マテリアルを削除する アイテムレイヤの基底クラス。 アイテムを配置する。 アイテムは1座標に複数格納できます。つまり、リストで管理しています。 そのため、アイテムの配置はappendメソッドを使います。 レイヤーの作成、描画を行う。 空いているスペースを全てyieldで返す。 そのアイテムにとって、配置可能な座標を返します。 tileのis_public(引数なし)がTrueであれば配置可能と考えます。 layer内を全てNoneにし、表示中のオブジェクトを削除します。 # 一番上にある背景の上 マテリアルを削除する レイヤ内のアイテムを検索する。 レイヤ内のアイテムを検索する。 | 2.929535 | 3 |
projects_api/apps.py | BerkeYazici/project-list-api | 0 | 6621896 | <reponame>BerkeYazici/project-list-api
from django.apps import AppConfig
class ProjectsApiConfig(AppConfig):
name = 'projects_api'
| from django.apps import AppConfig
class ProjectsApiConfig(AppConfig):
name = 'projects_api' | none | 1 | 1.229379 | 1 | |
user_activities/migrations/0005_auto_20200123_1507.py | chopdgd/django-user-activities | 1 | 6621897 | # Generated by Django 2.1.2 on 2020-01-23 21:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_activities', '0004_auto_20191121_1151'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='review',
name='text',
field=models.TextField(blank=True),
),
]
| # Generated by Django 2.1.2 on 2020-01-23 21:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_activities', '0004_auto_20191121_1151'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='review',
name='text',
field=models.TextField(blank=True),
),
]
| en | 0.785256 | # Generated by Django 2.1.2 on 2020-01-23 21:07 | 1.336731 | 1 |
deploy/utils/field_generation.py | xingao267/healthcare | 0 | 6621898 | <reponame>xingao267/healthcare
"""field_generation provides utilities to manage generated_fields."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deploy.utils import utils
# The tag name of generated_fields.
GENERATED_FIELDS_NAME = 'generated_fields'
def update_generated_fields(input_yaml_path, new_config):
"""Get and update the generated_fields block of the input yaml."""
cfg_content = utils.read_yaml_file(input_yaml_path)
if GENERATED_FIELDS_NAME not in new_config:
cfg_content.pop(GENERATED_FIELDS_NAME, {})
else:
cfg_content[GENERATED_FIELDS_NAME] = new_config[GENERATED_FIELDS_NAME]
return cfg_content
def rewrite_generated_fields_back(project_yaml, new_config):
"""Write config file to output_yaml_path with new generated_fields."""
cfg_content = update_generated_fields(project_yaml, new_config)
utils.write_yaml_file(cfg_content, project_yaml)
| """field_generation provides utilities to manage generated_fields."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deploy.utils import utils
# The tag name of generated_fields.
GENERATED_FIELDS_NAME = 'generated_fields'
def update_generated_fields(input_yaml_path, new_config):
"""Get and update the generated_fields block of the input yaml."""
cfg_content = utils.read_yaml_file(input_yaml_path)
if GENERATED_FIELDS_NAME not in new_config:
cfg_content.pop(GENERATED_FIELDS_NAME, {})
else:
cfg_content[GENERATED_FIELDS_NAME] = new_config[GENERATED_FIELDS_NAME]
return cfg_content
def rewrite_generated_fields_back(project_yaml, new_config):
"""Write config file to output_yaml_path with new generated_fields."""
cfg_content = update_generated_fields(project_yaml, new_config)
utils.write_yaml_file(cfg_content, project_yaml) | en | 0.824578 | field_generation provides utilities to manage generated_fields. # The tag name of generated_fields. Get and update the generated_fields block of the input yaml. Write config file to output_yaml_path with new generated_fields. | 2.001385 | 2 |
dataStructure/queue/deque.py | jinbooooom/coding-for-interview | 8 | 6621899 | # -*- coding:utf-8 -*-
class Deque:
def __init__(self):
self.items = []
def addFront(self, item): # 首部添加元素
self.items.append(item)
def addRear(self, item): # 尾部添加元素
self.items.insert(0, item)
def removeFront(self): # 首部删除元素
return self.items.pop()
def removeRear(self): # 尾部删除元素
return self.items.pop(0)
def size(self):
return len(self.items)
def isEmpty(self):
return self.items == []
def clear(self):
del self.items[:]
if __name__ == "__main__":
d = Deque()
d.addRear(5)
d.addRear(6)
d.addRear(7)
d.addFront(8)
d.addFront(9)
d.addFront(10)
print(d.items)
d.removeFront()
print(d.items)
d.removeRear()
print(d.items)
d.clear()
print(d.items)
print(d.isEmpty())
| # -*- coding:utf-8 -*-
class Deque:
def __init__(self):
self.items = []
def addFront(self, item): # 首部添加元素
self.items.append(item)
def addRear(self, item): # 尾部添加元素
self.items.insert(0, item)
def removeFront(self): # 首部删除元素
return self.items.pop()
def removeRear(self): # 尾部删除元素
return self.items.pop(0)
def size(self):
return len(self.items)
def isEmpty(self):
return self.items == []
def clear(self):
del self.items[:]
if __name__ == "__main__":
d = Deque()
d.addRear(5)
d.addRear(6)
d.addRear(7)
d.addFront(8)
d.addFront(9)
d.addFront(10)
print(d.items)
d.removeFront()
print(d.items)
d.removeRear()
print(d.items)
d.clear()
print(d.items)
print(d.isEmpty())
| zh | 0.939705 | # -*- coding:utf-8 -*- # 首部添加元素 # 尾部添加元素 # 首部删除元素 # 尾部删除元素 | 3.776242 | 4 |
weakpoint/config.py | onesuper/weakpoint | 15 | 6621900 | <reponame>onesuper/weakpoint
import yaml
from weakpoint.exceptions import ConfigException
from weakpoint.fs import File
class Config(dict):
def __init__(self, string):
super(Config, self).__init__()
try:
self.update(yaml.load(string))
except yaml.YAMLError:
raise ConfigException('YAML Error')
except:
raise ConfigException('Invalid config format.')
| import yaml
from weakpoint.exceptions import ConfigException
from weakpoint.fs import File
class Config(dict):
def __init__(self, string):
super(Config, self).__init__()
try:
self.update(yaml.load(string))
except yaml.YAMLError:
raise ConfigException('YAML Error')
except:
raise ConfigException('Invalid config format.') | none | 1 | 2.658964 | 3 | |
cardmarket_api/clients/product_client.py | SukiCZ/cardmarket_api | 0 | 6621901 | from .base import BaseClient
class ProductClient(BaseClient):
"""
Manage product aggregates.
"""
def products_get(
self,
**kwargs
):
"""
No parameters
"""
return self._request(
'GET',
'products',
**kwargs,
)
def products_post(
self,
id: str = None,
name: str = None,
length: int = None,
width: int = None,
height: int = None,
weight: int = None,
description: str = None,
is_active: bool = True,
release_date: str = None,
product_image: str = None,
category_id: str = None,
values: dict = None,
data: dict = None,
**kwargs
):
"""
:id: An auto generated uuid used to identify the object.
:name: pattern: ^[a-zA-Z0-9 ]+$, max_length=25, required
:length: The length of the product in millimetres (mm), required
:width: The width of the product in millimetres (mm), required
:height: The height of the product in millimetres (mm), required
:weight: The weight of the object in grams (g), required
:description: max_length: 255
:is_active: default true
:release_date: example: 1970-01-31
:product_image:
:category_id: uuid, required
:values: ProductAttributeValue objects referenced by UUID in a dictionary
"""
data = data or {}
if id is not None:
data.setdefault('id', id)
if name is not None:
data.setdefault('name', name)
if length is not None:
data.setdefault('length', length)
if width is not None:
data.setdefault('width', width)
if height is not None:
data.setdefault('height', height)
if weight is not None:
data.setdefault('weight', weight)
if description is not None:
data.setdefault('description', description)
if is_active is not None:
data.setdefault('isActive', is_active)
if release_date is not None:
data.setdefault('releaseDate', release_date)
if product_image is not None:
data.setdefault('productImage', product_image)
if category_id is not None:
data.setdefault('categoryId', category_id)
if values is not None:
data.setdefault('values', values)
return self._request(
'POST',
'products',
data=data,
**kwargs
)
# TODO product detail
def categories_get(
self,
**kwargs,
):
"""
No parameters
"""
return self._request(
'GET',
'categories',
**kwargs
)
| from .base import BaseClient
class ProductClient(BaseClient):
"""
Manage product aggregates.
"""
def products_get(
self,
**kwargs
):
"""
No parameters
"""
return self._request(
'GET',
'products',
**kwargs,
)
def products_post(
self,
id: str = None,
name: str = None,
length: int = None,
width: int = None,
height: int = None,
weight: int = None,
description: str = None,
is_active: bool = True,
release_date: str = None,
product_image: str = None,
category_id: str = None,
values: dict = None,
data: dict = None,
**kwargs
):
"""
:id: An auto generated uuid used to identify the object.
:name: pattern: ^[a-zA-Z0-9 ]+$, max_length=25, required
:length: The length of the product in millimetres (mm), required
:width: The width of the product in millimetres (mm), required
:height: The height of the product in millimetres (mm), required
:weight: The weight of the object in grams (g), required
:description: max_length: 255
:is_active: default true
:release_date: example: 1970-01-31
:product_image:
:category_id: uuid, required
:values: ProductAttributeValue objects referenced by UUID in a dictionary
"""
data = data or {}
if id is not None:
data.setdefault('id', id)
if name is not None:
data.setdefault('name', name)
if length is not None:
data.setdefault('length', length)
if width is not None:
data.setdefault('width', width)
if height is not None:
data.setdefault('height', height)
if weight is not None:
data.setdefault('weight', weight)
if description is not None:
data.setdefault('description', description)
if is_active is not None:
data.setdefault('isActive', is_active)
if release_date is not None:
data.setdefault('releaseDate', release_date)
if product_image is not None:
data.setdefault('productImage', product_image)
if category_id is not None:
data.setdefault('categoryId', category_id)
if values is not None:
data.setdefault('values', values)
return self._request(
'POST',
'products',
data=data,
**kwargs
)
# TODO product detail
def categories_get(
self,
**kwargs,
):
"""
No parameters
"""
return self._request(
'GET',
'categories',
**kwargs
)
| en | 0.635002 | Manage product aggregates. No parameters :id: An auto generated uuid used to identify the object. :name: pattern: ^[a-zA-Z0-9 ]+$, max_length=25, required :length: The length of the product in millimetres (mm), required :width: The width of the product in millimetres (mm), required :height: The height of the product in millimetres (mm), required :weight: The weight of the object in grams (g), required :description: max_length: 255 :is_active: default true :release_date: example: 1970-01-31 :product_image: :category_id: uuid, required :values: ProductAttributeValue objects referenced by UUID in a dictionary # TODO product detail No parameters | 2.861051 | 3 |
jj/servers/__init__.py | TeoDV/jj | 4 | 6621902 | from ._server import Server
__all__ = ("Server",)
| from ._server import Server
__all__ = ("Server",)
| none | 1 | 1.105828 | 1 | |
src/main.py | lendradxx/telebot | 1 | 6621903 | <filename>src/main.py<gh_stars>1-10
from telebot import TeleBot, types
from utils import core
from components import handler
if __name__ == "__main__":
bot = TeleBot(core.getConfig("BOT", "TOKEN"))
# Print Info Bot
PREFIX = core.getConfig("BOT", "PREFIX")
core.printHeaderLine("=")
print(f"BOT NAME: @{bot.get_me().username}")
print(f"PREFIX: {PREFIX}")
core.printHeaderLine("=")
# Handling message
@bot.message_handler(content_types="text")
def textHandler(message: types.Message):
if not message.text.startswith(PREFIX): return
if core.lateMsg(message.date): return
handler.textHandler(bot=bot, msg=message)
print("[LOG]: Listening")
core.printHeaderLine("=")
bot.infinity_polling(skip_pending=True)
| <filename>src/main.py<gh_stars>1-10
from telebot import TeleBot, types
from utils import core
from components import handler
if __name__ == "__main__":
bot = TeleBot(core.getConfig("BOT", "TOKEN"))
# Print Info Bot
PREFIX = core.getConfig("BOT", "PREFIX")
core.printHeaderLine("=")
print(f"BOT NAME: @{bot.get_me().username}")
print(f"PREFIX: {PREFIX}")
core.printHeaderLine("=")
# Handling message
@bot.message_handler(content_types="text")
def textHandler(message: types.Message):
if not message.text.startswith(PREFIX): return
if core.lateMsg(message.date): return
handler.textHandler(bot=bot, msg=message)
print("[LOG]: Listening")
core.printHeaderLine("=")
bot.infinity_polling(skip_pending=True)
| en | 0.064471 | # Print Info Bot # Handling message | 2.419917 | 2 |
src/wa_kat/templates/static/js/Lib/site-packages/rules_view.py | WebArchivCZ/WA-KAT | 3 | 6621904 | <filename>src/wa_kat/templates/static/js/Lib/site-packages/rules_view.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: brython (http://brython.info) (like python3)
#
"""
This module is used to set / get values from/to Rules section on the HTML page.
"""
#
# Imports =====================================================================
from browser import document
from descriptors import RadioDescriptor
from descriptors import StandardElDescriptor
# Functions & classes =========================================================
class RulesView(object):
"""
View object for manipulation with rules on the HTML page.
"""
budget = RadioDescriptor("budget_radio", val_type=int)
gentle_fetch = RadioDescriptor("gentle_fetch_radio", val_type=str)
local_traps = RadioDescriptor("local_traps_radio")
calendars = RadioDescriptor("calendars_radio")
youtube = RadioDescriptor("youtube_radio")
global_reject = RadioDescriptor("global_reject_radio")
javascript = RadioDescriptor("javascript_radio")
frequency = StandardElDescriptor(document["freq"])
def __init__(self):
self._property_names = [
"budget",
"gentle_fetch",
"local_traps",
"calendars",
"youtube",
"global_reject",
"javascript",
"frequency",
]
def get_dict(self):
"""
Convert all rules to dict and return them.
"""
out = {
property_name: getattr(self, property_name)
for property_name in self._property_names
}
if "frequency" in out:
out["frequency"] = int(out["frequency"])
return out
def set_dict(self, incomming):
"""
Set all rules from the `incomming` dictionary.
"""
for key, val in incomming.items():
if val and key in self._property_names:
setattr(self, key, val)
RulesViewController = RulesView()
| <filename>src/wa_kat/templates/static/js/Lib/site-packages/rules_view.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: brython (http://brython.info) (like python3)
#
"""
This module is used to set / get values from/to Rules section on the HTML page.
"""
#
# Imports =====================================================================
from browser import document
from descriptors import RadioDescriptor
from descriptors import StandardElDescriptor
# Functions & classes =========================================================
class RulesView(object):
"""
View object for manipulation with rules on the HTML page.
"""
budget = RadioDescriptor("budget_radio", val_type=int)
gentle_fetch = RadioDescriptor("gentle_fetch_radio", val_type=str)
local_traps = RadioDescriptor("local_traps_radio")
calendars = RadioDescriptor("calendars_radio")
youtube = RadioDescriptor("youtube_radio")
global_reject = RadioDescriptor("global_reject_radio")
javascript = RadioDescriptor("javascript_radio")
frequency = StandardElDescriptor(document["freq"])
def __init__(self):
self._property_names = [
"budget",
"gentle_fetch",
"local_traps",
"calendars",
"youtube",
"global_reject",
"javascript",
"frequency",
]
def get_dict(self):
"""
Convert all rules to dict and return them.
"""
out = {
property_name: getattr(self, property_name)
for property_name in self._property_names
}
if "frequency" in out:
out["frequency"] = int(out["frequency"])
return out
def set_dict(self, incomming):
"""
Set all rules from the `incomming` dictionary.
"""
for key, val in incomming.items():
if val and key in self._property_names:
setattr(self, key, val)
RulesViewController = RulesView()
| en | 0.531171 | #! /usr/bin/env python # -*- coding: utf-8 -*- # # Interpreter version: brython (http://brython.info) (like python3) # This module is used to set / get values from/to Rules section on the HTML page. # # Imports ===================================================================== # Functions & classes ========================================================= View object for manipulation with rules on the HTML page. Convert all rules to dict and return them. Set all rules from the `incomming` dictionary. | 2.670735 | 3 |
qwebirc/engines/staticengine.py | zuccon/qwebirc | 155 | 6621905 | <reponame>zuccon/qwebirc
from twisted.web import resource, server, static, error
import qwebirc.util as util
import pprint
from adminengine import AdminEngineAction
try:
from twisted.web.server import GzipEncoderFactory
GZIP_ENCODER = GzipEncoderFactory()
except ImportError:
GZIP_ENCODER = None
# TODO, cache gzip stuff
cache = {}
def clear_cache():
global cache
cache = {}
class StaticEngine(static.File):
isLeaf = False
hit = util.HitCounter()
def __init__(self, *args, **kwargs):
static.File.__init__(self, *args, **kwargs)
def render(self, request):
self.hit(request)
# temporarily disabled -- seems to eat big pages
# if GZIP_ENCODER:
# request._encoder = GZIP_ENCODER.encoderForRequest(request) # HACK
return static.File.render(self, request)
@property
def adminEngine(self):
return {
#"GZip cache": [
#("Contents: %s" % pprint.pformat(list(cache.keys())),)# AdminEngineAction("clear", d))
#],
"Hits": [
(self.hit,),
]
}
def directoryListing(self):
return error.ForbiddenResource()
| from twisted.web import resource, server, static, error
import qwebirc.util as util
import pprint
from adminengine import AdminEngineAction
try:
from twisted.web.server import GzipEncoderFactory
GZIP_ENCODER = GzipEncoderFactory()
except ImportError:
GZIP_ENCODER = None
# TODO, cache gzip stuff
cache = {}
def clear_cache():
global cache
cache = {}
class StaticEngine(static.File):
isLeaf = False
hit = util.HitCounter()
def __init__(self, *args, **kwargs):
static.File.__init__(self, *args, **kwargs)
def render(self, request):
self.hit(request)
# temporarily disabled -- seems to eat big pages
# if GZIP_ENCODER:
# request._encoder = GZIP_ENCODER.encoderForRequest(request) # HACK
return static.File.render(self, request)
@property
def adminEngine(self):
return {
#"GZip cache": [
#("Contents: %s" % pprint.pformat(list(cache.keys())),)# AdminEngineAction("clear", d))
#],
"Hits": [
(self.hit,),
]
}
def directoryListing(self):
return error.ForbiddenResource() | en | 0.325746 | # TODO, cache gzip stuff # temporarily disabled -- seems to eat big pages # if GZIP_ENCODER: # request._encoder = GZIP_ENCODER.encoderForRequest(request) # HACK #"GZip cache": [ #("Contents: %s" % pprint.pformat(list(cache.keys())),)# AdminEngineAction("clear", d)) #], | 2.137383 | 2 |
apps/compile_graph/compile_graph.py | nickspeal/bikesy-server | 4 | 6621906 | #!/usr/bin/env python
from graphserver.compiler.compile_graph import main
main() | #!/usr/bin/env python
from graphserver.compiler.compile_graph import main
main() | ru | 0.26433 | #!/usr/bin/env python | 1.109334 | 1 |
convert_model_deploy.py | lshupingxl/crnn.pytorch | 1 | 6621907 | # -*- coding: utf-8 -*-
# @Time : 18-9-21 上午11:42
# @Author : zhoujun
import torch
from models.crnn import CRNN
def save(net, input, save_path):
net.eval()
traced_script_module = torch.jit.trace(net, input)
traced_script_module.save(save_path)
def load(model_path):
return torch.jit.load(model_path)
if __name__ == '__main__':
input = torch.Tensor(10, 3, 32, 320)
model_path = './model.pth'
net = CRNN(32, 3, 10, 256)
net.load_state_dict(torch.load(model_path))
save(net, input, './model.pt')
| # -*- coding: utf-8 -*-
# @Time : 18-9-21 上午11:42
# @Author : zhoujun
import torch
from models.crnn import CRNN
def save(net, input, save_path):
net.eval()
traced_script_module = torch.jit.trace(net, input)
traced_script_module.save(save_path)
def load(model_path):
return torch.jit.load(model_path)
if __name__ == '__main__':
input = torch.Tensor(10, 3, 32, 320)
model_path = './model.pth'
net = CRNN(32, 3, 10, 256)
net.load_state_dict(torch.load(model_path))
save(net, input, './model.pt')
| zh | 0.318681 | # -*- coding: utf-8 -*- # @Time : 18-9-21 上午11:42 # @Author : zhoujun | 2.290905 | 2 |
libdmem-gdb.py | sdimitro/libdmem | 0 | 6621908 | <reponame>sdimitro/libdmem
#
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gdb
def symbol_address(symbol):
sym = gdb.lookup_global_symbol(symbol)
if sym == None:
sym = gdb.lookup_symbol(symbol)[0]
if sym is not None:
return sym.value().address
return None
def alloc_list_iter(func):
head = symbol_address("dmem_alloc_list_head");
p = head['dae_next']
while int(p) != 0x0 and p != head:
func(p)
p = p['dae_next']
class WalkAllocatedBuffers(gdb.Command):
def __init__(self) -> None:
super().__init__("walk_alloc_bufs", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
sizeof_dae = gdb.lookup_type('dmem_alloc_entry_t').sizeof
alloc_list_iter((lambda p: print(hex(int(int(p) + sizeof_dae)))))
WalkAllocatedBuffers()
class WalkAllocatedEntries(gdb.Command):
def __init__(self) -> None:
super().__init__("walk_alloc_entries", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
sizeof_dae = gdb.lookup_type('dmem_alloc_entry_t').sizeof
alloc_list_iter(print)
WalkAllocatedEntries()
class ShowAllocStacks(gdb.Command):
def __init__(self) -> None:
super().__init__("show_alloc_stacks", gdb.COMMAND_DATA)
@staticmethod
def print_stack(entry):
trace = entry['dae_tx']
print(f"{entry} allocated from {hex(trace['dt_thread'])} at:")
for f in range(11):
frame = trace['dt_stack'][f]
if int(frame) == 0x0:
break
pretty_frame = str(frame).split('<')[1][:-1]
print(f"\t{pretty_frame}")
print()
def invoke(self, arg, from_tty):
alloc_list_iter(self.print_stack)
ShowAllocStacks()
| #
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gdb
def symbol_address(symbol):
sym = gdb.lookup_global_symbol(symbol)
if sym == None:
sym = gdb.lookup_symbol(symbol)[0]
if sym is not None:
return sym.value().address
return None
def alloc_list_iter(func):
head = symbol_address("dmem_alloc_list_head");
p = head['dae_next']
while int(p) != 0x0 and p != head:
func(p)
p = p['dae_next']
class WalkAllocatedBuffers(gdb.Command):
def __init__(self) -> None:
super().__init__("walk_alloc_bufs", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
sizeof_dae = gdb.lookup_type('dmem_alloc_entry_t').sizeof
alloc_list_iter((lambda p: print(hex(int(int(p) + sizeof_dae)))))
WalkAllocatedBuffers()
class WalkAllocatedEntries(gdb.Command):
def __init__(self) -> None:
super().__init__("walk_alloc_entries", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
sizeof_dae = gdb.lookup_type('dmem_alloc_entry_t').sizeof
alloc_list_iter(print)
WalkAllocatedEntries()
class ShowAllocStacks(gdb.Command):
def __init__(self) -> None:
super().__init__("show_alloc_stacks", gdb.COMMAND_DATA)
@staticmethod
def print_stack(entry):
trace = entry['dae_tx']
print(f"{entry} allocated from {hex(trace['dt_thread'])} at:")
for f in range(11):
frame = trace['dt_stack'][f]
if int(frame) == 0x0:
break
pretty_frame = str(frame).split('<')[1][:-1]
print(f"\t{pretty_frame}")
print()
def invoke(self, arg, from_tty):
alloc_list_iter(self.print_stack)
ShowAllocStacks() | en | 0.847677 | # # Copyright 2019 Delphix # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # | 2.313316 | 2 |
file.py | samkreter/fuzzy-decision-tree | 5 | 6621909 | class File:
def __init__(self,name="results.csv"):
self.name = name
def writeA(self,val,op="a"):
with open(self.name,op) as f:
f.write(','.join(map(str,val)) + '\n') | class File:
def __init__(self,name="results.csv"):
self.name = name
def writeA(self,val,op="a"):
with open(self.name,op) as f:
f.write(','.join(map(str,val)) + '\n') | none | 1 | 3.254817 | 3 | |
numpyPractice2.py | NikhilKanamarla/ML-Practice | 0 | 6621910 | import sys
import numpy as np
from PIL._util import *
from scipy.misc import imread, imsave, imresize
import matplotlib.pyplot as plt
from scipy import misc
from PIL import Image, ImageDraw
if __name__ == '__main__':
a = np.array([[1,2,3],[6,4,5]])
#start (inclusive), stop (exclusive), step size
b = np.arange(start=0,stop=10,step=2)
print(b)
#all rows and certain columns
slice = a[:, np.arange(start=0,stop=3,step=2)]
print(slice)
#change the shape
slice = slice.reshape((4,1))
print(slice)
x = np.array([1,2,3,4,5,6])
#condition, return if true, return if false
slice2 = np.where(x > 2,x,0)
print(slice2)
#PIL
im = Image.open("/home/nikhil/Downloads/ForwardObserverDocuments/cowc-m/ProcessedData/retinanet_results/6.png")
print(im.format,im.size,im.mode)
im.show()
box = (10,10,10,10)
region = im.crop(box)
region = region.transpose(Image.ROTATE_180)
im.paste(region,box)
im.resize((100,100))
im.save("test.png")
im2 = ImageDraw.Draw(im)
#Numpy Scipy multi dimensional image processing
f = misc.face()
misc.imsave('face.png', f) # uses the Image module (PIL)
plt.imshow(f)
plt.savefig("tester.png")
| import sys
import numpy as np
from PIL._util import *
from scipy.misc import imread, imsave, imresize
import matplotlib.pyplot as plt
from scipy import misc
from PIL import Image, ImageDraw
if __name__ == '__main__':
a = np.array([[1,2,3],[6,4,5]])
#start (inclusive), stop (exclusive), step size
b = np.arange(start=0,stop=10,step=2)
print(b)
#all rows and certain columns
slice = a[:, np.arange(start=0,stop=3,step=2)]
print(slice)
#change the shape
slice = slice.reshape((4,1))
print(slice)
x = np.array([1,2,3,4,5,6])
#condition, return if true, return if false
slice2 = np.where(x > 2,x,0)
print(slice2)
#PIL
im = Image.open("/home/nikhil/Downloads/ForwardObserverDocuments/cowc-m/ProcessedData/retinanet_results/6.png")
print(im.format,im.size,im.mode)
im.show()
box = (10,10,10,10)
region = im.crop(box)
region = region.transpose(Image.ROTATE_180)
im.paste(region,box)
im.resize((100,100))
im.save("test.png")
im2 = ImageDraw.Draw(im)
#Numpy Scipy multi dimensional image processing
f = misc.face()
misc.imsave('face.png', f) # uses the Image module (PIL)
plt.imshow(f)
plt.savefig("tester.png")
| en | 0.427697 | #start (inclusive), stop (exclusive), step size #all rows and certain columns #change the shape #condition, return if true, return if false #PIL #Numpy Scipy multi dimensional image processing # uses the Image module (PIL) | 3.010262 | 3 |
rookie/mysite/migrations/0029_auto_20210615_1501.py | chen1932390299/drf-backend-platform | 1 | 6621911 | <gh_stars>1-10
# Generated by Django 3.2.2 on 2021-06-15 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mysite', '0028_scheduletrigger_schedule_args'),
]
operations = [
migrations.AlterModelOptions(
name='scheduletrigger',
options={'ordering': ['id'], 'verbose_name': '定时任务表'},
),
migrations.AddField(
model_name='testcase',
name='project_name',
field=models.CharField(default='供应链', max_length=50),
preserve_default=False,
),
]
| # Generated by Django 3.2.2 on 2021-06-15 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mysite', '0028_scheduletrigger_schedule_args'),
]
operations = [
migrations.AlterModelOptions(
name='scheduletrigger',
options={'ordering': ['id'], 'verbose_name': '定时任务表'},
),
migrations.AddField(
model_name='testcase',
name='project_name',
field=models.CharField(default='供应链', max_length=50),
preserve_default=False,
),
] | en | 0.853398 | # Generated by Django 3.2.2 on 2021-06-15 15:01 | 1.566287 | 2 |
logParser.py | Agastya-Asthana/LogReader | 0 | 6621912 | <reponame>Agastya-Asthana/LogReader
def parseLogFile(filename):
"""Parses a log file and returns a dictionary of name to log data
which itself is a dictionary containing timestamps, data and the header
"""
# Timestamp,isHeader(1 or 0),name,logginglevel,val1,val2,val3,...
nameData = {}
with open(filename) as f:
# For every line in the file, determine if it is a header and handle appropriately
for line in f:
split = line.split(',')
# Ignore commented lines or lines that are too small like empty lines
if split[0][0] == '#' or len(split) < 4:
continue
# If Log Level isn't I then print out the line instead of graphing it
if split[3] != 'I':
print(line)
continue
# Remove trailing newline
split[-1] = split[-1].rstrip('\n')
name = split[2]
if split[1] == '1': # Handle header line
nameData[name] = {}
nameData[name]['header'] = split[4:]
nameData[name]['data'] = {}
for valName in split[4:]:
nameData[name]['data'][valName] = []
nameData[name]['timestamps'] = []
else: # Handle data line
nameData[name]['timestamps'].append(float(split[0]))
for valName, val in zip(nameData[name]['header'], split[4:]):
nameData[name]['data'][valName].append(float(val))
return nameData | def parseLogFile(filename):
"""Parses a log file and returns a dictionary of name to log data
which itself is a dictionary containing timestamps, data and the header
"""
# Timestamp,isHeader(1 or 0),name,logginglevel,val1,val2,val3,...
nameData = {}
with open(filename) as f:
# For every line in the file, determine if it is a header and handle appropriately
for line in f:
split = line.split(',')
# Ignore commented lines or lines that are too small like empty lines
if split[0][0] == '#' or len(split) < 4:
continue
# If Log Level isn't I then print out the line instead of graphing it
if split[3] != 'I':
print(line)
continue
# Remove trailing newline
split[-1] = split[-1].rstrip('\n')
name = split[2]
if split[1] == '1': # Handle header line
nameData[name] = {}
nameData[name]['header'] = split[4:]
nameData[name]['data'] = {}
for valName in split[4:]:
nameData[name]['data'][valName] = []
nameData[name]['timestamps'] = []
else: # Handle data line
nameData[name]['timestamps'].append(float(split[0]))
for valName, val in zip(nameData[name]['header'], split[4:]):
nameData[name]['data'][valName].append(float(val))
return nameData | en | 0.804434 | Parses a log file and returns a dictionary of name to log data which itself is a dictionary containing timestamps, data and the header # Timestamp,isHeader(1 or 0),name,logginglevel,val1,val2,val3,... # For every line in the file, determine if it is a header and handle appropriately # Ignore commented lines or lines that are too small like empty lines # If Log Level isn't I then print out the line instead of graphing it # Remove trailing newline # Handle header line # Handle data line | 3.324508 | 3 |
algorithms/string_matching/implicit_match.py | rn5l/rsc18 | 8 | 6621913 | <reponame>rn5l/rsc18
'''
Created on 17.04.2018
@author: malte
'''
import implicit
from nltk import stem as stem, tokenize as tokenise
from fuzzywuzzy import fuzz
import numpy as np
import pandas as pd
from scipy import sparse
class ImplicitStringMatch:
def __init__(self, factors=32, neighbors=20, fuzzy=True, use_count=False, normalize=False, sim_weight=True, add_artists=False, item_key='track_id', artist_key='artist_id', session_key='playlist_id'):
self.item_key = item_key
self.artist_key = artist_key
self.session_key = session_key
self.factors = factors
self.use_count = use_count
self.add_artists = add_artists
self.fuzzy = fuzzy
self.neighbors = neighbors
self.sim_weight = sim_weight
self.normalize = normalize
self.stemmer = stem.PorterStemmer()
def train(self, train, test=None):
self.actions = train['actions']
self.playlists = train['playlists']
#datat = test['actions']
if self.add_artists:
new_actions = pd.DataFrame()
new_actions['count']= self.actions.groupby(['artist_id','track_id']).size()
new_actions = new_actions.reset_index()
max_pl = self.playlists.playlist_id.max()
new_actions['playlist_id'] = new_actions.artist_id.transform( lambda x: max_pl + x )
self.actions = pd.concat( [ self.actions, new_actions ], sort=False )
new_lists = pd.DataFrame()
new_lists['artist_id'] = new_actions.groupby( ['playlist_id'] ).artist_id.min()
new_lists = new_lists.reset_index()
new_lists = new_lists.merge( train['artists'][ ['artist_id', 'artist_name'] ], on='artist_id', how='inner' )
new_lists['name'] = new_lists['artist_name']
del new_lists['artist_name']
self.playlists = pd.concat( [ self.playlists, new_lists ], sort=False )
#normalize playlist names
self.playlists['name'] = self.playlists['name'].apply(lambda x: self.normalise(str(x), True, True))
self.playlists['name_id'] = self.playlists['name'].astype( 'category' ).cat.codes
self.playlists['count'] = self.playlists.groupby('name_id')['name_id'].transform('count')
self.nameidmap = pd.Series( index=self.playlists['name'], data=self.playlists['name_id'].values )
self.nameidmap.drop_duplicates(inplace=True)
self.nameidmap2 = pd.Series( index=self.playlists['name_id'], data=self.playlists['name'].values )
self.nameidmap2.drop_duplicates(inplace=True)
self.actions = self.actions.merge( self.playlists[['playlist_id', 'name_id']], on='playlist_id', how='inner' )
pop = pd.DataFrame()
pop['popularity'] = train['actions'].groupby( 'track_id' ).size()
pop.reset_index(inplace=True)
pop['confidence'] = pop['popularity'] / len( train['actions'] )
pop.sort_values( ['confidence','track_id'], ascending=[False,True], inplace=True )
self.pop = pop[['track_id','confidence']]
self.pop.index = self.pop['track_id']
#MF PART
itemids = self.actions[self.item_key].unique()
self.n_items = len(itemids)
self.itemidmap = pd.Series(data=np.arange(self.n_items), index=itemids)
self.itemidmap2 = pd.Series(index=np.arange(self.n_items), data=itemids)
self.actions = pd.merge(self.actions, pd.DataFrame({self.item_key:self.itemidmap.index, 'ItemIdx':self.itemidmap[self.itemidmap.index].values}), on=self.item_key, how='inner')
datac = pd.DataFrame()
datac['count'] = self.actions.groupby( ['name_id','ItemIdx'] ).size()
datac.reset_index( inplace=True )
data = datac
if self.use_count:
datam = data['count']
else:
datam = np.ones( len(data) )
#row_ind = data.ItemIdx
#col_ind = data.name_id
col_ind = data.ItemIdx
row_ind = data.name_id
self.mat = sparse.csr_matrix((datam, (row_ind, col_ind)))
self.model = implicit.als.AlternatingLeastSquares( factors=self.factors, iterations=10, regularization=0.07, use_gpu=False )
#self.model = implicitu.bpr.BaysianPersonalizedRanking( factors=self.factors, iterations=self.epochs )
self.model.fit(self.mat)
self.tmp = self.mat.T
#self.tmp = sparse.csr_matrix( ( len(col_ind.unique()), len(row_ind.unique()) ) )
def predict(self, name=None, tracks=None, playlist_id=None, artists=None, num_hidden=None):
tracks = [] if tracks is None else tracks
res = pd.DataFrame()
if name is None or type(name) is float:
res_dict = {}
res_dict['track_id'] = []
res_dict['confidence'] = []
return pd.DataFrame.from_dict(res_dict)
name = self.normalise(str(name), True, True)
if not name in self.nameidmap:
self.playlists['match'] = self.playlists['name'].apply( lambda n: fuzz.ratio(n,name) )
self.playlists.sort_values( ['match','count','num_followers'], ascending=False, inplace=True )
if self.playlists['match'].values[0] > 60:
# playlists = playlists.head(10)
# playlists['num'] = playlists.groupby('name')['name'].transform('count')
# playlists.sort_values( 'num', ascending=False, inplace=True )
new_name = self.playlists['name'].values[0]
#print( name + ' => ' + new_name )
#print( playlists )
name = new_name
#print( 'imatch' )
#print( ' name: ' + name )
if name in self.nameidmap:
name_id = self.nameidmap[name]
actions_for_name = self.actions[ self.actions.name_id == name_id ]
res['confidence'] = actions_for_name.groupby( 'track_id' ).size()
res.reset_index(inplace=True)
res['confidence'] += self.pop.confidence[ res.track_id.values ].values
res.sort_values( ['confidence','track_id'], ascending=[False,True], inplace=True )
if self.neighbors > 0:
similar = self.model.similar_items(name_id, N=self.neighbors)
similar = pd.DataFrame({'name_id':[x[0] for x in similar], 'conf':[x[1] for x in similar]})
actions_all = self.actions[ np.in1d( self.actions.name_id, similar.name_id.values ) ]
actions_all = actions_all.merge( similar, on='name_id', how='inner' )
res_syn = pd.DataFrame()
if self.sim_weight:
res_syn['tmp'] = actions_all.groupby( ['track_id'] ).conf.sum()
else:
res_syn['tmp'] = actions_all.groupby( ['track_id'] ).size()
res_syn.reset_index(inplace=True)
res_syn['tmp'] += self.pop.confidence[ res_syn.track_id.values ].values
if len(res) > 0:
res = res.merge( res_syn, how="left", on='track_id' )
res['confidence'] += res['tmp'].fillna(0)
del res['tmp']
res_syn['confidence'] = res_syn['tmp']
del res_syn['tmp']
mask = ~np.in1d( res_syn.track_id, res['track_id'] )
if mask.sum() > 0:
res = pd.concat( [ res, res_syn[mask] ] )
else:
res['track_id'] = []
res['confidence'] = []
res = res[~np.in1d( res.track_id, tracks )]
res.sort_values( ['confidence','track_id'], ascending=[False,True], inplace=True )
return res.head(500)
def normalise(self, s, tokenize=True, stemm=True):
if tokenize:
words = tokenise.wordpunct_tokenize(s.lower().strip())
else:
words = s.lower().strip().split( ' ' )
if stemm:
return ' '.join([self.stemmer.stem(w) for w in words])
else:
return ' '.join([w for w in words])
| '''
Created on 17.04.2018
@author: malte
'''
import implicit
from nltk import stem as stem, tokenize as tokenise
from fuzzywuzzy import fuzz
import numpy as np
import pandas as pd
from scipy import sparse
class ImplicitStringMatch:
def __init__(self, factors=32, neighbors=20, fuzzy=True, use_count=False, normalize=False, sim_weight=True, add_artists=False, item_key='track_id', artist_key='artist_id', session_key='playlist_id'):
self.item_key = item_key
self.artist_key = artist_key
self.session_key = session_key
self.factors = factors
self.use_count = use_count
self.add_artists = add_artists
self.fuzzy = fuzzy
self.neighbors = neighbors
self.sim_weight = sim_weight
self.normalize = normalize
self.stemmer = stem.PorterStemmer()
def train(self, train, test=None):
self.actions = train['actions']
self.playlists = train['playlists']
#datat = test['actions']
if self.add_artists:
new_actions = pd.DataFrame()
new_actions['count']= self.actions.groupby(['artist_id','track_id']).size()
new_actions = new_actions.reset_index()
max_pl = self.playlists.playlist_id.max()
new_actions['playlist_id'] = new_actions.artist_id.transform( lambda x: max_pl + x )
self.actions = pd.concat( [ self.actions, new_actions ], sort=False )
new_lists = pd.DataFrame()
new_lists['artist_id'] = new_actions.groupby( ['playlist_id'] ).artist_id.min()
new_lists = new_lists.reset_index()
new_lists = new_lists.merge( train['artists'][ ['artist_id', 'artist_name'] ], on='artist_id', how='inner' )
new_lists['name'] = new_lists['artist_name']
del new_lists['artist_name']
self.playlists = pd.concat( [ self.playlists, new_lists ], sort=False )
#normalize playlist names
self.playlists['name'] = self.playlists['name'].apply(lambda x: self.normalise(str(x), True, True))
self.playlists['name_id'] = self.playlists['name'].astype( 'category' ).cat.codes
self.playlists['count'] = self.playlists.groupby('name_id')['name_id'].transform('count')
self.nameidmap = pd.Series( index=self.playlists['name'], data=self.playlists['name_id'].values )
self.nameidmap.drop_duplicates(inplace=True)
self.nameidmap2 = pd.Series( index=self.playlists['name_id'], data=self.playlists['name'].values )
self.nameidmap2.drop_duplicates(inplace=True)
self.actions = self.actions.merge( self.playlists[['playlist_id', 'name_id']], on='playlist_id', how='inner' )
pop = pd.DataFrame()
pop['popularity'] = train['actions'].groupby( 'track_id' ).size()
pop.reset_index(inplace=True)
pop['confidence'] = pop['popularity'] / len( train['actions'] )
pop.sort_values( ['confidence','track_id'], ascending=[False,True], inplace=True )
self.pop = pop[['track_id','confidence']]
self.pop.index = self.pop['track_id']
#MF PART
itemids = self.actions[self.item_key].unique()
self.n_items = len(itemids)
self.itemidmap = pd.Series(data=np.arange(self.n_items), index=itemids)
self.itemidmap2 = pd.Series(index=np.arange(self.n_items), data=itemids)
self.actions = pd.merge(self.actions, pd.DataFrame({self.item_key:self.itemidmap.index, 'ItemIdx':self.itemidmap[self.itemidmap.index].values}), on=self.item_key, how='inner')
datac = pd.DataFrame()
datac['count'] = self.actions.groupby( ['name_id','ItemIdx'] ).size()
datac.reset_index( inplace=True )
data = datac
if self.use_count:
datam = data['count']
else:
datam = np.ones( len(data) )
#row_ind = data.ItemIdx
#col_ind = data.name_id
col_ind = data.ItemIdx
row_ind = data.name_id
self.mat = sparse.csr_matrix((datam, (row_ind, col_ind)))
self.model = implicit.als.AlternatingLeastSquares( factors=self.factors, iterations=10, regularization=0.07, use_gpu=False )
#self.model = implicitu.bpr.BaysianPersonalizedRanking( factors=self.factors, iterations=self.epochs )
self.model.fit(self.mat)
self.tmp = self.mat.T
#self.tmp = sparse.csr_matrix( ( len(col_ind.unique()), len(row_ind.unique()) ) )
def predict(self, name=None, tracks=None, playlist_id=None, artists=None, num_hidden=None):
tracks = [] if tracks is None else tracks
res = pd.DataFrame()
if name is None or type(name) is float:
res_dict = {}
res_dict['track_id'] = []
res_dict['confidence'] = []
return pd.DataFrame.from_dict(res_dict)
name = self.normalise(str(name), True, True)
if not name in self.nameidmap:
self.playlists['match'] = self.playlists['name'].apply( lambda n: fuzz.ratio(n,name) )
self.playlists.sort_values( ['match','count','num_followers'], ascending=False, inplace=True )
if self.playlists['match'].values[0] > 60:
# playlists = playlists.head(10)
# playlists['num'] = playlists.groupby('name')['name'].transform('count')
# playlists.sort_values( 'num', ascending=False, inplace=True )
new_name = self.playlists['name'].values[0]
#print( name + ' => ' + new_name )
#print( playlists )
name = new_name
#print( 'imatch' )
#print( ' name: ' + name )
if name in self.nameidmap:
name_id = self.nameidmap[name]
actions_for_name = self.actions[ self.actions.name_id == name_id ]
res['confidence'] = actions_for_name.groupby( 'track_id' ).size()
res.reset_index(inplace=True)
res['confidence'] += self.pop.confidence[ res.track_id.values ].values
res.sort_values( ['confidence','track_id'], ascending=[False,True], inplace=True )
if self.neighbors > 0:
similar = self.model.similar_items(name_id, N=self.neighbors)
similar = pd.DataFrame({'name_id':[x[0] for x in similar], 'conf':[x[1] for x in similar]})
actions_all = self.actions[ np.in1d( self.actions.name_id, similar.name_id.values ) ]
actions_all = actions_all.merge( similar, on='name_id', how='inner' )
res_syn = pd.DataFrame()
if self.sim_weight:
res_syn['tmp'] = actions_all.groupby( ['track_id'] ).conf.sum()
else:
res_syn['tmp'] = actions_all.groupby( ['track_id'] ).size()
res_syn.reset_index(inplace=True)
res_syn['tmp'] += self.pop.confidence[ res_syn.track_id.values ].values
if len(res) > 0:
res = res.merge( res_syn, how="left", on='track_id' )
res['confidence'] += res['tmp'].fillna(0)
del res['tmp']
res_syn['confidence'] = res_syn['tmp']
del res_syn['tmp']
mask = ~np.in1d( res_syn.track_id, res['track_id'] )
if mask.sum() > 0:
res = pd.concat( [ res, res_syn[mask] ] )
else:
res['track_id'] = []
res['confidence'] = []
res = res[~np.in1d( res.track_id, tracks )]
res.sort_values( ['confidence','track_id'], ascending=[False,True], inplace=True )
return res.head(500)
def normalise(self, s, tokenize=True, stemm=True):
if tokenize:
words = tokenise.wordpunct_tokenize(s.lower().strip())
else:
words = s.lower().strip().split( ' ' )
if stemm:
return ' '.join([self.stemmer.stem(w) for w in words])
else:
return ' '.join([w for w in words]) | en | 0.276159 | Created on 17.04.2018 @author: malte #datat = test['actions'] #normalize playlist names #MF PART #row_ind = data.ItemIdx #col_ind = data.name_id #self.model = implicitu.bpr.BaysianPersonalizedRanking( factors=self.factors, iterations=self.epochs ) #self.tmp = sparse.csr_matrix( ( len(col_ind.unique()), len(row_ind.unique()) ) ) # playlists = playlists.head(10) # playlists['num'] = playlists.groupby('name')['name'].transform('count') # playlists.sort_values( 'num', ascending=False, inplace=True ) #print( name + ' => ' + new_name ) #print( playlists ) #print( 'imatch' ) #print( ' name: ' + name ) | 2.356865 | 2 |
data/umls/make_folds.py | issca/inferbeddings | 33 | 6621914 | <filename>data/umls/make_folds.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import logging
import numpy as np
from sklearn.cross_validation import KFold, train_test_split
def read_triples(path):
with open(path, 'rt') as f:
lines = f.readlines()
triples = [(s.strip(), p.strip(), o.strip()) for [s, p, o] in [l.split() for l in lines]]
return triples
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('K-Folder for Knowledge Graphs', formatter_class=formatter)
argparser.add_argument('triples', action='store', type=str, default=None)
args = argparser.parse_args(argv)
triples_path = args.triples
triples = read_triples(triples_path)
nb_triples = len(triples)
kf = KFold(n=nb_triples, n_folds=10, random_state=0, shuffle=True)
triples_np = np.array(triples)
for fold_no, (train_idx, test_idx) in enumerate(kf):
train_valid_triples = triples_np[train_idx]
test_triples = triples_np[test_idx]
train_triples, valid_triples, _, _ = train_test_split(train_valid_triples,
np.ones(train_valid_triples.shape[0]),
test_size=len(test_triples), random_state=0)
train_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in train_triples]
valid_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in valid_triples]
test_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in test_triples]
if not os.path.exists('folds/{}'.format(str(fold_no))):
os.mkdir('folds/{}'.format(str(fold_no)))
with open('folds/{}/umls_train.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in train_lines])
with open('folds/{}/umls_valid.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in valid_lines])
with open('folds/{}/umls_test.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in test_lines])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| <filename>data/umls/make_folds.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import logging
import numpy as np
from sklearn.cross_validation import KFold, train_test_split
def read_triples(path):
with open(path, 'rt') as f:
lines = f.readlines()
triples = [(s.strip(), p.strip(), o.strip()) for [s, p, o] in [l.split() for l in lines]]
return triples
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('K-Folder for Knowledge Graphs', formatter_class=formatter)
argparser.add_argument('triples', action='store', type=str, default=None)
args = argparser.parse_args(argv)
triples_path = args.triples
triples = read_triples(triples_path)
nb_triples = len(triples)
kf = KFold(n=nb_triples, n_folds=10, random_state=0, shuffle=True)
triples_np = np.array(triples)
for fold_no, (train_idx, test_idx) in enumerate(kf):
train_valid_triples = triples_np[train_idx]
test_triples = triples_np[test_idx]
train_triples, valid_triples, _, _ = train_test_split(train_valid_triples,
np.ones(train_valid_triples.shape[0]),
test_size=len(test_triples), random_state=0)
train_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in train_triples]
valid_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in valid_triples]
test_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in test_triples]
if not os.path.exists('folds/{}'.format(str(fold_no))):
os.mkdir('folds/{}'.format(str(fold_no)))
with open('folds/{}/umls_train.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in train_lines])
with open('folds/{}/umls_valid.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in valid_lines])
with open('folds/{}/umls_test.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in test_lines])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| en | 0.308914 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- | 2.363548 | 2 |
handshake.py | bocoup/wspy | 8 | 6621915 | <reponame>bocoup/wspy<gh_stars>1-10
import os
import re
import socket
import time
from base64 import b64encode
from hashlib import sha1
from urlparse import urlparse
from errors import HandshakeError
from python_digest import build_authorization_request
WS_GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
WS_VERSION = '13'
MAX_REDIRECTS = 10
HDR_TIMEOUT = 5
MAX_HDR_LEN = 1024
class Handshake(object):
def __init__(self, wsock):
self.wsock = wsock
self.sock = wsock.sock
def fail(self, msg):
self.sock.close()
raise HandshakeError(msg)
def receive_request(self):
raw, headers = self.receive_headers()
# Request must be HTTP (at least 1.1) GET request, find the location
# (without trailing slash)
match = re.search(r'^GET (.*?)/* HTTP/1.1\r\n', raw)
if match is None:
self.fail('not a valid HTTP 1.1 GET request')
location = match.group(1)
return location, headers
def receive_response(self):
raw, headers = self.receive_headers()
# Response must be HTTP (at least 1.1) with status 101
match = re.search(r'^HTTP/1\.1 (\d{3})', raw)
if match is None:
self.fail('not a valid HTTP 1.1 response')
status = int(match.group(1))
return status, headers
def receive_headers(self):
# Receive entire HTTP header
hdr = ''
sock_timeout = self.sock.gettimeout()
try:
force_timeout = sock_timeout is None
timeout = HDR_TIMEOUT if force_timeout else sock_timeout
self.sock.settimeout(timeout)
start_time = time.time()
while hdr[-4:] != '\r\n\r\n':
if len(hdr) == MAX_HDR_LEN:
raise HandshakeError('request exceeds maximum header '
'length of %d' % MAX_HDR_LEN)
hdr += self.sock.recv(1)
time_diff = time.time() - start_time
if time_diff > timeout:
raise socket.timeout
self.sock.settimeout(timeout - time_diff)
except socket.timeout:
self.sock.close()
raise HandshakeError('timeout while receiving handshake headers')
self.sock.settimeout(sock_timeout)
hdr = hdr.decode('utf-8', 'ignore')
headers = {}
for key, value in re.findall(r'(.*?): ?(.*?)\r\n', hdr):
if key in headers:
headers[key] += ', ' + value
else:
headers[key] = value
return hdr, headers
def send_headers(self, headers):
# Send request
for hdr in list(headers):
if isinstance(hdr, tuple):
hdr = '%s: %s' % hdr
self.sock.sendall(hdr + '\r\n')
self.sock.sendall('\r\n')
def perform(self):
raise NotImplementedError
class ServerHandshake(Handshake):
"""
Executes a handshake as the server end point of the socket. If the HTTP
request headers sent by the client are invalid, a HandshakeError is raised.
"""
def perform(self, ssock):
# Receive and validate client handshake
location, headers = self.receive_request()
self.wsock.location = location
self.wsock.request_headers = headers
# Send server handshake in response
self.send_headers(self.response_headers(ssock))
def response_headers(self, ssock):
headers = self.wsock.request_headers
# Check if headers that MUST be present are actually present
for name in ('Host', 'Upgrade', 'Connection', 'Sec-WebSocket-Key',
'Sec-WebSocket-Version'):
if name not in headers:
self.fail('missing "%s" header' % name)
# Check WebSocket version used by client
version = headers['Sec-WebSocket-Version']
if version != WS_VERSION:
self.fail('WebSocket version %s requested (only %s is supported)'
% (version, WS_VERSION))
# Verify required header keywords
if 'websocket' not in headers['Upgrade'].lower():
self.fail('"Upgrade" header must contain "websocket"')
if 'upgrade' not in headers['Connection'].lower():
self.fail('"Connection" header must contain "Upgrade"')
# Origin must be present if browser client, and must match the list of
# trusted origins
if 'Origin' not in headers and 'User-Agent' in headers:
self.fail('browser client must specify "Origin" header')
origin = headers.get('Origin', 'null')
if origin == 'null':
if ssock.trusted_origins:
self.fail('no "Origin" header specified, assuming untrusted')
elif ssock.trusted_origins and origin not in ssock.trusted_origins:
self.fail('untrusted origin "%s"' % origin)
# Only a supported protocol can be returned
client_proto = split_stripped(headers['Sec-WebSocket-Protocol']) \
if 'Sec-WebSocket-Protocol' in headers else []
self.wsock.protocol = None
for p in client_proto:
if p in ssock.protocols:
self.wsock.protocol = p
break
# Only supported extensions are returned
if 'Sec-WebSocket-Extensions' in headers:
self.wsock.extension_instances = []
for hdr in split_stripped(headers['Sec-WebSocket-Extensions']):
name, params = parse_param_hdr(hdr)
for ext in ssock.extensions:
if ext.is_supported(name, self.wsock.extension_instances):
accept_params = ext.negotiate_safe(name, params)
if accept_params is not None:
instance = ext.Instance(ext, name, accept_params)
self.wsock.extension_instances.append(instance)
# Check if requested resource location is served by this server
if ssock.locations:
if self.wsock.location not in ssock.locations:
raise HandshakeError('location "%s" is not supported by this '
'server' % self.wsock.location)
# Encode acceptation key using the WebSocket GUID
key = headers['Sec-WebSocket-Key'].strip()
accept = b64encode(sha1(key + WS_GUID).digest())
# Location scheme differs for SSL-enabled connections
scheme = 'wss' if self.wsock.secure else 'ws'
if 'Host' in headers:
host = headers['Host']
else:
host, port = self.sock.getpeername()
default_port = 443 if self.wsock.secure else 80
if port != default_port:
host += ':%d' % port
location = '%s://%s%s' % (scheme, host, self.wsock.location)
# Construct HTTP response header
yield 'HTTP/1.1 101 Switching Protocols'
yield 'Upgrade', 'websocket'
yield 'Connection', 'Upgrade'
yield 'Sec-WebSocket-Origin', origin
yield 'Sec-WebSocket-Location', location
yield 'Sec-WebSocket-Accept', accept
if self.wsock.protocol:
yield 'Sec-WebSocket-Protocol', self.wsock.protocol
if self.wsock.extension_instances:
values = [format_param_hdr(i.name, i.params)
for i in self.wsock.extension_instances]
yield 'Sec-WebSocket-Extensions', ', '.join(values)
class ClientHandshake(Handshake):
"""
Executes a handshake as the client end point of the socket. May raise a
HandshakeError if the server response is invalid.
"""
def __init__(self, wsock):
Handshake.__init__(self, wsock)
self.redirects = 0
def perform(self):
self.send_headers(self.request_headers())
self.handle_response(*self.receive_response())
def handle_response(self, status, headers):
if status == 101:
self.handle_handshake(headers)
elif status == 401:
self.handle_auth(headers)
elif status in (301, 302, 303, 307, 308):
self.handle_redirect(headers)
else:
self.fail('invalid HTTP response status %d' % status)
def handle_handshake(self, headers):
# Check if headers that MUST be present are actually present
for name in ('Upgrade', 'Connection', 'Sec-WebSocket-Accept'):
if name not in headers:
self.fail('missing "%s" header' % name)
if 'websocket' not in headers['Upgrade'].lower():
self.fail('"Upgrade" header must contain "websocket"')
if 'upgrade' not in headers['Connection'].lower():
self.fail('"Connection" header must contain "Upgrade"')
# Verify accept header
accept = headers['Sec-WebSocket-Accept'].strip()
required_accept = b64encode(sha1(self.key + WS_GUID).digest())
if accept != required_accept:
self.fail('invalid websocket accept header "%s"' % accept)
# Compare extensions, add hooks only for those returned by server
if 'Sec-WebSocket-Extensions' in headers:
# FIXME: there is no distinction between server/client extension
# instances, while the extension instance may assume it belongs to
# a server, leading to undefined behavior
self.wsock.extension_instances = []
for hdr in split_stripped(headers['Sec-WebSocket-Extensions']):
name, accept_params = parse_param_hdr(hdr)
for ext in self.wsock.extensions:
if name in ext.names:
instance = ext.Instance(ext, name, accept_params)
self.wsock.extension_instances.append(instance)
break
else:
raise HandshakeError('server handshake contains '
'unsupported extension "%s"' % name)
# Assert that returned protocol (if any) is supported
if 'Sec-WebSocket-Protocol' in headers:
protocol = headers['Sec-WebSocket-Protocol']
if protocol != 'null' and protocol not in self.wsock.protocols:
self.fail('unsupported protocol "%s"' % protocol)
self.wsock.protocol = protocol
def handle_auth(self, headers):
# HTTP authentication is required in the request
hdr = headers['WWW-Authenticate']
authres = dict(re.findall(r'(\w+)[:=] ?"?(\w+)"?', hdr))
mode = hdr.lstrip().split(' ', 1)[0]
if not self.wsock.auth:
self.fail('missing username and password for HTTP authentication')
if mode == 'Basic':
auth_hdr = self.http_auth_basic_headers(**authres)
elif mode == 'Digest':
auth_hdr = self.http_auth_digest_headers(**authres)
else:
self.fail('unsupported HTTP authentication mode "%s"' % mode)
# Send new, authenticated handshake
self.send_headers(list(self.request_headers()) + list(auth_hdr))
self.handle_response(*self.receive_response())
def handle_redirect(self, headers):
self.redirects += 1
if self.redirects > MAX_REDIRECTS:
self.fail('reached maximum number of redirects (%d)'
% MAX_REDIRECTS)
# Handle HTTP redirect
url = urlparse(headers['Location'].strip())
# Reconnect socket to new host if net location changed
if not url.port:
url.port = 443 if self.secure else 80
addr = (url.netloc, url.port)
if addr != self.sock.getpeername():
self.sock.close()
self.sock.connect(addr)
# Update websocket object and send new handshake
self.wsock.location = url.path
self.perform()
def request_headers(self):
if len(self.wsock.location) == 0:
self.fail('request location is empty')
# Generate a 16-byte random base64-encoded key for this connection
self.key = b64encode(os.urandom(16))
# Send client handshake
yield 'GET %s HTTP/1.1' % self.wsock.location
yield 'Host', '%s:%d' % self.sock.getpeername()
yield 'Upgrade', 'websocket'
yield 'Connection', 'keep-alive, Upgrade'
yield 'Sec-WebSocket-Key', self.key
yield 'Sec-WebSocket-Version', WS_VERSION
if self.wsock.origin:
yield 'Origin', self.wsock.origin
# These are for eagerly caching webservers
yield 'Pragma', 'no-cache'
yield 'Cache-Control', 'no-cache'
# Request protocols and extensions, these are later checked with the
# actual supported values from the server's response
if self.wsock.protocols:
yield 'Sec-WebSocket-Protocol', ', '.join(self.wsock.protocols)
if self.wsock.extensions:
values = [format_param_hdr(e.name, e.request)
for e in self.wsock.extensions]
yield 'Sec-WebSocket-Extensions', ', '.join(values)
def http_auth_basic_headers(self, **kwargs):
u, p = self.wsock.auth
u = u.encode('utf-8')
p = p.encode('utf-8')
yield 'Authorization', 'Basic ' + b64encode(u + ':' + p)
def http_auth_digest_headers(self, **kwargs):
username, password = self.wsock.auth
yield 'Authorization', build_authorization_request(
username=username.encode('utf-8'),
method='GET',
uri=self.wsock.location,
nonce_count=0,
realm=kwargs['realm'],
nonce=kwargs['nonce'],
opaque=kwargs['opaque'],
password=password.encode('<PASSWORD>'))
def split_stripped(value, delim=',', maxsplits=-1):
return map(str.strip, str(value).split(delim, maxsplits)) if value else []
def parse_param_hdr(hdr):
if ';' in hdr:
name, paramstr = split_stripped(hdr, ';', 1)
else:
name = hdr
paramstr = ''
params = {}
for param in split_stripped(paramstr):
if '=' in param:
key, value = split_stripped(param, '=', 1)
if value.isdigit():
value = int(value)
else:
key = param
value = True
params[key] = value
return name, params
def format_param_hdr(value, params):
if not params:
return value
def fmt_param((k, v)):
if v is True:
return k
if v is not False and v is not None:
return k + '=' + str(v)
strparams = filter(None, map(fmt_param, params.items()))
return '; '.join([value] + strparams)
| import os
import re
import socket
import time
from base64 import b64encode
from hashlib import sha1
from urlparse import urlparse
from errors import HandshakeError
from python_digest import build_authorization_request
WS_GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
WS_VERSION = '13'
MAX_REDIRECTS = 10
HDR_TIMEOUT = 5
MAX_HDR_LEN = 1024
class Handshake(object):
def __init__(self, wsock):
self.wsock = wsock
self.sock = wsock.sock
def fail(self, msg):
self.sock.close()
raise HandshakeError(msg)
def receive_request(self):
raw, headers = self.receive_headers()
# Request must be HTTP (at least 1.1) GET request, find the location
# (without trailing slash)
match = re.search(r'^GET (.*?)/* HTTP/1.1\r\n', raw)
if match is None:
self.fail('not a valid HTTP 1.1 GET request')
location = match.group(1)
return location, headers
def receive_response(self):
raw, headers = self.receive_headers()
# Response must be HTTP (at least 1.1) with status 101
match = re.search(r'^HTTP/1\.1 (\d{3})', raw)
if match is None:
self.fail('not a valid HTTP 1.1 response')
status = int(match.group(1))
return status, headers
def receive_headers(self):
# Receive entire HTTP header
hdr = ''
sock_timeout = self.sock.gettimeout()
try:
force_timeout = sock_timeout is None
timeout = HDR_TIMEOUT if force_timeout else sock_timeout
self.sock.settimeout(timeout)
start_time = time.time()
while hdr[-4:] != '\r\n\r\n':
if len(hdr) == MAX_HDR_LEN:
raise HandshakeError('request exceeds maximum header '
'length of %d' % MAX_HDR_LEN)
hdr += self.sock.recv(1)
time_diff = time.time() - start_time
if time_diff > timeout:
raise socket.timeout
self.sock.settimeout(timeout - time_diff)
except socket.timeout:
self.sock.close()
raise HandshakeError('timeout while receiving handshake headers')
self.sock.settimeout(sock_timeout)
hdr = hdr.decode('utf-8', 'ignore')
headers = {}
for key, value in re.findall(r'(.*?): ?(.*?)\r\n', hdr):
if key in headers:
headers[key] += ', ' + value
else:
headers[key] = value
return hdr, headers
def send_headers(self, headers):
# Send request
for hdr in list(headers):
if isinstance(hdr, tuple):
hdr = '%s: %s' % hdr
self.sock.sendall(hdr + '\r\n')
self.sock.sendall('\r\n')
def perform(self):
raise NotImplementedError
class ServerHandshake(Handshake):
"""
Executes a handshake as the server end point of the socket. If the HTTP
request headers sent by the client are invalid, a HandshakeError is raised.
"""
def perform(self, ssock):
# Receive and validate client handshake
location, headers = self.receive_request()
self.wsock.location = location
self.wsock.request_headers = headers
# Send server handshake in response
self.send_headers(self.response_headers(ssock))
def response_headers(self, ssock):
headers = self.wsock.request_headers
# Check if headers that MUST be present are actually present
for name in ('Host', 'Upgrade', 'Connection', 'Sec-WebSocket-Key',
'Sec-WebSocket-Version'):
if name not in headers:
self.fail('missing "%s" header' % name)
# Check WebSocket version used by client
version = headers['Sec-WebSocket-Version']
if version != WS_VERSION:
self.fail('WebSocket version %s requested (only %s is supported)'
% (version, WS_VERSION))
# Verify required header keywords
if 'websocket' not in headers['Upgrade'].lower():
self.fail('"Upgrade" header must contain "websocket"')
if 'upgrade' not in headers['Connection'].lower():
self.fail('"Connection" header must contain "Upgrade"')
# Origin must be present if browser client, and must match the list of
# trusted origins
if 'Origin' not in headers and 'User-Agent' in headers:
self.fail('browser client must specify "Origin" header')
origin = headers.get('Origin', 'null')
if origin == 'null':
if ssock.trusted_origins:
self.fail('no "Origin" header specified, assuming untrusted')
elif ssock.trusted_origins and origin not in ssock.trusted_origins:
self.fail('untrusted origin "%s"' % origin)
# Only a supported protocol can be returned
client_proto = split_stripped(headers['Sec-WebSocket-Protocol']) \
if 'Sec-WebSocket-Protocol' in headers else []
self.wsock.protocol = None
for p in client_proto:
if p in ssock.protocols:
self.wsock.protocol = p
break
# Only supported extensions are returned
if 'Sec-WebSocket-Extensions' in headers:
self.wsock.extension_instances = []
for hdr in split_stripped(headers['Sec-WebSocket-Extensions']):
name, params = parse_param_hdr(hdr)
for ext in ssock.extensions:
if ext.is_supported(name, self.wsock.extension_instances):
accept_params = ext.negotiate_safe(name, params)
if accept_params is not None:
instance = ext.Instance(ext, name, accept_params)
self.wsock.extension_instances.append(instance)
# Check if requested resource location is served by this server
if ssock.locations:
if self.wsock.location not in ssock.locations:
raise HandshakeError('location "%s" is not supported by this '
'server' % self.wsock.location)
# Encode acceptation key using the WebSocket GUID
key = headers['Sec-WebSocket-Key'].strip()
accept = b64encode(sha1(key + WS_GUID).digest())
# Location scheme differs for SSL-enabled connections
scheme = 'wss' if self.wsock.secure else 'ws'
if 'Host' in headers:
host = headers['Host']
else:
host, port = self.sock.getpeername()
default_port = 443 if self.wsock.secure else 80
if port != default_port:
host += ':%d' % port
location = '%s://%s%s' % (scheme, host, self.wsock.location)
# Construct HTTP response header
yield 'HTTP/1.1 101 Switching Protocols'
yield 'Upgrade', 'websocket'
yield 'Connection', 'Upgrade'
yield 'Sec-WebSocket-Origin', origin
yield 'Sec-WebSocket-Location', location
yield 'Sec-WebSocket-Accept', accept
if self.wsock.protocol:
yield 'Sec-WebSocket-Protocol', self.wsock.protocol
if self.wsock.extension_instances:
values = [format_param_hdr(i.name, i.params)
for i in self.wsock.extension_instances]
yield 'Sec-WebSocket-Extensions', ', '.join(values)
class ClientHandshake(Handshake):
"""
Executes a handshake as the client end point of the socket. May raise a
HandshakeError if the server response is invalid.
"""
def __init__(self, wsock):
Handshake.__init__(self, wsock)
self.redirects = 0
def perform(self):
self.send_headers(self.request_headers())
self.handle_response(*self.receive_response())
def handle_response(self, status, headers):
if status == 101:
self.handle_handshake(headers)
elif status == 401:
self.handle_auth(headers)
elif status in (301, 302, 303, 307, 308):
self.handle_redirect(headers)
else:
self.fail('invalid HTTP response status %d' % status)
def handle_handshake(self, headers):
# Check if headers that MUST be present are actually present
for name in ('Upgrade', 'Connection', 'Sec-WebSocket-Accept'):
if name not in headers:
self.fail('missing "%s" header' % name)
if 'websocket' not in headers['Upgrade'].lower():
self.fail('"Upgrade" header must contain "websocket"')
if 'upgrade' not in headers['Connection'].lower():
self.fail('"Connection" header must contain "Upgrade"')
# Verify accept header
accept = headers['Sec-WebSocket-Accept'].strip()
required_accept = b64encode(sha1(self.key + WS_GUID).digest())
if accept != required_accept:
self.fail('invalid websocket accept header "%s"' % accept)
# Compare extensions, add hooks only for those returned by server
if 'Sec-WebSocket-Extensions' in headers:
# FIXME: there is no distinction between server/client extension
# instances, while the extension instance may assume it belongs to
# a server, leading to undefined behavior
self.wsock.extension_instances = []
for hdr in split_stripped(headers['Sec-WebSocket-Extensions']):
name, accept_params = parse_param_hdr(hdr)
for ext in self.wsock.extensions:
if name in ext.names:
instance = ext.Instance(ext, name, accept_params)
self.wsock.extension_instances.append(instance)
break
else:
raise HandshakeError('server handshake contains '
'unsupported extension "%s"' % name)
# Assert that returned protocol (if any) is supported
if 'Sec-WebSocket-Protocol' in headers:
protocol = headers['Sec-WebSocket-Protocol']
if protocol != 'null' and protocol not in self.wsock.protocols:
self.fail('unsupported protocol "%s"' % protocol)
self.wsock.protocol = protocol
def handle_auth(self, headers):
# HTTP authentication is required in the request
hdr = headers['WWW-Authenticate']
authres = dict(re.findall(r'(\w+)[:=] ?"?(\w+)"?', hdr))
mode = hdr.lstrip().split(' ', 1)[0]
if not self.wsock.auth:
self.fail('missing username and password for HTTP authentication')
if mode == 'Basic':
auth_hdr = self.http_auth_basic_headers(**authres)
elif mode == 'Digest':
auth_hdr = self.http_auth_digest_headers(**authres)
else:
self.fail('unsupported HTTP authentication mode "%s"' % mode)
# Send new, authenticated handshake
self.send_headers(list(self.request_headers()) + list(auth_hdr))
self.handle_response(*self.receive_response())
def handle_redirect(self, headers):
self.redirects += 1
if self.redirects > MAX_REDIRECTS:
self.fail('reached maximum number of redirects (%d)'
% MAX_REDIRECTS)
# Handle HTTP redirect
url = urlparse(headers['Location'].strip())
# Reconnect socket to new host if net location changed
if not url.port:
url.port = 443 if self.secure else 80
addr = (url.netloc, url.port)
if addr != self.sock.getpeername():
self.sock.close()
self.sock.connect(addr)
# Update websocket object and send new handshake
self.wsock.location = url.path
self.perform()
def request_headers(self):
if len(self.wsock.location) == 0:
self.fail('request location is empty')
# Generate a 16-byte random base64-encoded key for this connection
self.key = b64encode(os.urandom(16))
# Send client handshake
yield 'GET %s HTTP/1.1' % self.wsock.location
yield 'Host', '%s:%d' % self.sock.getpeername()
yield 'Upgrade', 'websocket'
yield 'Connection', 'keep-alive, Upgrade'
yield 'Sec-WebSocket-Key', self.key
yield 'Sec-WebSocket-Version', WS_VERSION
if self.wsock.origin:
yield 'Origin', self.wsock.origin
# These are for eagerly caching webservers
yield 'Pragma', 'no-cache'
yield 'Cache-Control', 'no-cache'
# Request protocols and extensions, these are later checked with the
# actual supported values from the server's response
if self.wsock.protocols:
yield 'Sec-WebSocket-Protocol', ', '.join(self.wsock.protocols)
if self.wsock.extensions:
values = [format_param_hdr(e.name, e.request)
for e in self.wsock.extensions]
yield 'Sec-WebSocket-Extensions', ', '.join(values)
def http_auth_basic_headers(self, **kwargs):
u, p = self.wsock.auth
u = u.encode('utf-8')
p = p.encode('utf-8')
yield 'Authorization', 'Basic ' + b64encode(u + ':' + p)
def http_auth_digest_headers(self, **kwargs):
username, password = self.wsock.auth
yield 'Authorization', build_authorization_request(
username=username.encode('utf-8'),
method='GET',
uri=self.wsock.location,
nonce_count=0,
realm=kwargs['realm'],
nonce=kwargs['nonce'],
opaque=kwargs['opaque'],
password=password.encode('<PASSWORD>'))
def split_stripped(value, delim=',', maxsplits=-1):
return map(str.strip, str(value).split(delim, maxsplits)) if value else []
def parse_param_hdr(hdr):
if ';' in hdr:
name, paramstr = split_stripped(hdr, ';', 1)
else:
name = hdr
paramstr = ''
params = {}
for param in split_stripped(paramstr):
if '=' in param:
key, value = split_stripped(param, '=', 1)
if value.isdigit():
value = int(value)
else:
key = param
value = True
params[key] = value
return name, params
def format_param_hdr(value, params):
if not params:
return value
def fmt_param((k, v)):
if v is True:
return k
if v is not False and v is not None:
return k + '=' + str(v)
strparams = filter(None, map(fmt_param, params.items()))
return '; '.join([value] + strparams) | en | 0.846249 | # Request must be HTTP (at least 1.1) GET request, find the location # (without trailing slash) # Response must be HTTP (at least 1.1) with status 101 # Receive entire HTTP header # Send request Executes a handshake as the server end point of the socket. If the HTTP request headers sent by the client are invalid, a HandshakeError is raised. # Receive and validate client handshake # Send server handshake in response # Check if headers that MUST be present are actually present # Check WebSocket version used by client # Verify required header keywords # Origin must be present if browser client, and must match the list of # trusted origins # Only a supported protocol can be returned # Only supported extensions are returned # Check if requested resource location is served by this server # Encode acceptation key using the WebSocket GUID # Location scheme differs for SSL-enabled connections # Construct HTTP response header Executes a handshake as the client end point of the socket. May raise a HandshakeError if the server response is invalid. # Check if headers that MUST be present are actually present # Verify accept header # Compare extensions, add hooks only for those returned by server # FIXME: there is no distinction between server/client extension # instances, while the extension instance may assume it belongs to # a server, leading to undefined behavior # Assert that returned protocol (if any) is supported # HTTP authentication is required in the request # Send new, authenticated handshake # Handle HTTP redirect # Reconnect socket to new host if net location changed # Update websocket object and send new handshake # Generate a 16-byte random base64-encoded key for this connection # Send client handshake # These are for eagerly caching webservers # Request protocols and extensions, these are later checked with the # actual supported values from the server's response | 2.639421 | 3 |
photoplace/lib/PhotoPlace/UserInterface/commandUI.py | jriguera/photoplace | 10 | 6621916 | <filename>photoplace/lib/PhotoPlace/UserInterface/commandUI.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# commandUI.py
#
# Copyright 2010-2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A command line implementation for a user interface.
"""
__program__ = "photoplace"
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.6.1"
__date__ = "Dec 2014"
__license__ = "Apache 2.0"
__copyright__ ="(c) <NAME>"
import os
import sys
from PhotoPlace.definitions import *
from PhotoPlace.observerHandler import *
from PhotoPlace.stateHandler import *
from PhotoPlace.userFacade import *
from PhotoPlace.Plugins.Interface import *
from Interface import InterfaceUI
class PhotoPlaceCOM(InterfaceUI):
"""
GTK GUI for PhotoPlace
"""
_instance = None
# Singleton
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(PhotoPlaceCOM, cls).__new__(cls)
return cls._instance
def __init__(self, resourcedir=None):
InterfaceUI.__init__(self, resourcedir)
def init(self, userfacade):
self.userfacade = userfacade
self.plugins = dict()
self.plugins_error = []
self.num_photos_process = 0
# Make a new state
try:
self.userfacade.init()
except Error as e:
print(e)
self.userfacade.init(True)
def loadPlugins(self):
errors = self.userfacade.load_plugins()
for p, e in errors.iteritems():
print(e)
self.plugins_error = []
for p in self.userfacade.addons :
if not p in errors:
try:
error = self.userfacade.activate_plugin(p, None)
except Error as e:
self.plugins_error.append(p)
print(e)
else:
if error != None:
self.plugins_error.append(p)
print(error)
else:
self.plugins_error.append(p)
def unloadPlugins(self):
pass
def activate_plugins(self):
for plg, plgobj in self.userfacade.list_plugins().iteritems():
if plg in self.plugins or plg in self.plugins_error:
continue
if not plgobj.capabilities['UI']:
# Active all plugins
try:
self.userfacade.init_plugin(plg, '*', None)
except Error as e:
print(e)
self.plugins[plg] = (plgobj)
def deactivate_plugins(self):
for plg in self.plugins.keys():
plgobj = self.plugins[plg]
try:
self.userfacade.end_plugin(plg)
except Error as e:
print(e)
del self.plugins[plg]
self.plugins = dict()
def start(self, load_files=True):
self.activate_plugins()
if self.action_loadtemplates():
if self.action_loadphotos():
if self.userfacade.state['gpxinputfile']:
if self.action_readgpx():
self.action_geolocate()
try:
self.userfacade.goprocess(True)
except Error as e:
print(e)
self.deactivate_plugins()
def action_loadtemplates(self):
try:
loadtemplates = self.userfacade.DoTemplates()
if loadtemplates:
loadtemplates.run()
return True
else:
return False
except Error as e:
print(e)
return False
def action_loadphotos(self, directory=None):
try:
loadphotos = self.userfacade.LoadPhotos(directory)
if loadphotos:
loadphotos.run()
return True
else:
return False
except Error as e:
print(e)
return False
def action_readgpx(self, filename=None):
try:
readgpx = self.userfacade.ReadGPX(filename)
if readgpx:
readgpx.run()
return True
else:
return False
except Error as e:
print(e)
return False
def action_geolocate(self):
try:
geolocate = self.userfacade.Geolocate()
if geolocate:
geolocate.run()
else:
return False
except Error as e:
print(e)
return False
return True
# EOF
| <filename>photoplace/lib/PhotoPlace/UserInterface/commandUI.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# commandUI.py
#
# Copyright 2010-2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A command line implementation for a user interface.
"""
__program__ = "photoplace"
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.6.1"
__date__ = "Dec 2014"
__license__ = "Apache 2.0"
__copyright__ ="(c) <NAME>"
import os
import sys
from PhotoPlace.definitions import *
from PhotoPlace.observerHandler import *
from PhotoPlace.stateHandler import *
from PhotoPlace.userFacade import *
from PhotoPlace.Plugins.Interface import *
from Interface import InterfaceUI
class PhotoPlaceCOM(InterfaceUI):
"""
GTK GUI for PhotoPlace
"""
_instance = None
# Singleton
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(PhotoPlaceCOM, cls).__new__(cls)
return cls._instance
def __init__(self, resourcedir=None):
InterfaceUI.__init__(self, resourcedir)
def init(self, userfacade):
self.userfacade = userfacade
self.plugins = dict()
self.plugins_error = []
self.num_photos_process = 0
# Make a new state
try:
self.userfacade.init()
except Error as e:
print(e)
self.userfacade.init(True)
def loadPlugins(self):
errors = self.userfacade.load_plugins()
for p, e in errors.iteritems():
print(e)
self.plugins_error = []
for p in self.userfacade.addons :
if not p in errors:
try:
error = self.userfacade.activate_plugin(p, None)
except Error as e:
self.plugins_error.append(p)
print(e)
else:
if error != None:
self.plugins_error.append(p)
print(error)
else:
self.plugins_error.append(p)
def unloadPlugins(self):
pass
def activate_plugins(self):
for plg, plgobj in self.userfacade.list_plugins().iteritems():
if plg in self.plugins or plg in self.plugins_error:
continue
if not plgobj.capabilities['UI']:
# Active all plugins
try:
self.userfacade.init_plugin(plg, '*', None)
except Error as e:
print(e)
self.plugins[plg] = (plgobj)
def deactivate_plugins(self):
for plg in self.plugins.keys():
plgobj = self.plugins[plg]
try:
self.userfacade.end_plugin(plg)
except Error as e:
print(e)
del self.plugins[plg]
self.plugins = dict()
def start(self, load_files=True):
self.activate_plugins()
if self.action_loadtemplates():
if self.action_loadphotos():
if self.userfacade.state['gpxinputfile']:
if self.action_readgpx():
self.action_geolocate()
try:
self.userfacade.goprocess(True)
except Error as e:
print(e)
self.deactivate_plugins()
def action_loadtemplates(self):
try:
loadtemplates = self.userfacade.DoTemplates()
if loadtemplates:
loadtemplates.run()
return True
else:
return False
except Error as e:
print(e)
return False
def action_loadphotos(self, directory=None):
try:
loadphotos = self.userfacade.LoadPhotos(directory)
if loadphotos:
loadphotos.run()
return True
else:
return False
except Error as e:
print(e)
return False
def action_readgpx(self, filename=None):
try:
readgpx = self.userfacade.ReadGPX(filename)
if readgpx:
readgpx.run()
return True
else:
return False
except Error as e:
print(e)
return False
def action_geolocate(self):
try:
geolocate = self.userfacade.Geolocate()
if geolocate:
geolocate.run()
else:
return False
except Error as e:
print(e)
return False
return True
# EOF
| en | 0.798396 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # commandUI.py # # Copyright 2010-2015 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A command line implementation for a user interface. GTK GUI for PhotoPlace # Singleton # Make a new state # Active all plugins # EOF | 2.25243 | 2 |
crawling/crwalingbasic.py | metacogpe/python | 0 | 6621917 | <gh_stars>0
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
import urllib.request
r1 = urllib.request.Request('https://m.stock.naver.com/item/main.nhn#/stocks/005930/total')
r2 = urllib.request.urlopen(r1)
r3 = r2.read()
r4 = r3.decode('utf-8')
r4.find('삼성')
print(r4)
#res = req.urlopen(url).read().decode('euc-kr')
| import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
import urllib.request
r1 = urllib.request.Request('https://m.stock.naver.com/item/main.nhn#/stocks/005930/total')
r2 = urllib.request.urlopen(r1)
r3 = r2.read()
r4 = r3.decode('utf-8')
r4.find('삼성')
print(r4)
#res = req.urlopen(url).read().decode('euc-kr') | en | 0.309383 | #/stocks/005930/total') #res = req.urlopen(url).read().decode('euc-kr') | 2.553589 | 3 |
simple.py | DrAlbertCruz/PL-FC-Naive | 0 | 6621918 | <filename>simple.py
# Simple example to determine entailment in propositional logic with forward chaining
# Knowledge base is a list of propositional atomic sentences (identified by a string)
KB = ["looks","swims","quacks"]
# Rules are the other part of the KB that contain implications. Antecedents must be conjunctive only, and the
# consequent must be a single atomic sentence (for the algorithm in this lab).
# Example:
# (["looks","swims","quacks"],"duck")
# looks AND swims AND quacks => duck
# Indicates that if it looks like a duck, if it swims like a duck, and it quacks like a duck it must be a duck.
rules = [(["looks","swims","quacks"],"duck"),(["barks"],"dog"),(["hoots","flies"],"owl")]
count = 1
# Keep track of the number of times we have iterated over the whole rule set
# []: This is a list of items ...
# (): This is a tuple of items ... there is a difference
# Keep looping, attempting to fire each rule, stopping when we loop over the whole ruleset and no new knowledge
# is commited
changes = True
while changes:
changes = False
# Set the flag that there have been no changes to false
print( "Starting iteration " + str(count) )
# For each rule in the set of rules ...
for p in rules:
antecedent, consequent = p
print( "Consider a rule where: " )
print( antecedent )
print( "implies: " )
print( consequent )
# Determine if all chars in antecedent are also in KB
anteInKB = True # Flag for the antecedent in the KB
for q in antecedent:
# q will be a list of strings
if q not in KB:
# KB is a string
anteInKB = False # Flag as false, all clauses must be implied
# If it passes the above, then antecedent should be entailed
if anteInKB and consequent not in KB:
KB.append( consequent )
changes = True
print( "Antecedent is in KB, consequent is implied, KB is now: " )
print(KB)
elif anteInKB and consequent in KB:
print( "Consequent is implied, but was already in KB")
else:
print( "Consequent is not implied" )
count = count + 1
print( "No more changes. KB is: " )
print(KB)
| <filename>simple.py
# Simple example to determine entailment in propositional logic with forward chaining
# Knowledge base is a list of propositional atomic sentences (identified by a string)
KB = ["looks","swims","quacks"]
# Rules are the other part of the KB that contain implications. Antecedents must be conjunctive only, and the
# consequent must be a single atomic sentence (for the algorithm in this lab).
# Example:
# (["looks","swims","quacks"],"duck")
# looks AND swims AND quacks => duck
# Indicates that if it looks like a duck, if it swims like a duck, and it quacks like a duck it must be a duck.
rules = [(["looks","swims","quacks"],"duck"),(["barks"],"dog"),(["hoots","flies"],"owl")]
count = 1
# Keep track of the number of times we have iterated over the whole rule set
# []: This is a list of items ...
# (): This is a tuple of items ... there is a difference
# Keep looping, attempting to fire each rule, stopping when we loop over the whole ruleset and no new knowledge
# is commited
changes = True
while changes:
changes = False
# Set the flag that there have been no changes to false
print( "Starting iteration " + str(count) )
# For each rule in the set of rules ...
for p in rules:
antecedent, consequent = p
print( "Consider a rule where: " )
print( antecedent )
print( "implies: " )
print( consequent )
# Determine if all chars in antecedent are also in KB
anteInKB = True # Flag for the antecedent in the KB
for q in antecedent:
# q will be a list of strings
if q not in KB:
# KB is a string
anteInKB = False # Flag as false, all clauses must be implied
# If it passes the above, then antecedent should be entailed
if anteInKB and consequent not in KB:
KB.append( consequent )
changes = True
print( "Antecedent is in KB, consequent is implied, KB is now: " )
print(KB)
elif anteInKB and consequent in KB:
print( "Consequent is implied, but was already in KB")
else:
print( "Consequent is not implied" )
count = count + 1
print( "No more changes. KB is: " )
print(KB)
| en | 0.907389 | # Simple example to determine entailment in propositional logic with forward chaining # Knowledge base is a list of propositional atomic sentences (identified by a string) # Rules are the other part of the KB that contain implications. Antecedents must be conjunctive only, and the # consequent must be a single atomic sentence (for the algorithm in this lab). # Example: # (["looks","swims","quacks"],"duck") # looks AND swims AND quacks => duck # Indicates that if it looks like a duck, if it swims like a duck, and it quacks like a duck it must be a duck. # Keep track of the number of times we have iterated over the whole rule set # []: This is a list of items ... # (): This is a tuple of items ... there is a difference # Keep looping, attempting to fire each rule, stopping when we loop over the whole ruleset and no new knowledge # is commited # Set the flag that there have been no changes to false # For each rule in the set of rules ... # Determine if all chars in antecedent are also in KB # Flag for the antecedent in the KB # q will be a list of strings # KB is a string # Flag as false, all clauses must be implied # If it passes the above, then antecedent should be entailed | 3.807431 | 4 |
Janus/python-base-unit_05/files/close.file.test.py | voodoopeople42/Vproject | 0 | 6621919 | # -*- coding: utf-8 -*-
# close.file.test.py
my_file = open("README.md", "r")
print(f"Имя файла: {my_file.name}")
# Python автоматически закрывает файл
# если файловый объект к которому он привязан присваивается другому файлу.
# Однако, хорошей практикой будет вручную закрывать файл командой close().
my_file = open("LICENSE", "r")
print(f"Имя файла: {my_file.name}")
print(f"Файл {my_file.name} закрыт: {my_file.closed}")
my_file.close()
print(f"А теперь файл {my_file.name} закрыт: {my_file.closed}")
| # -*- coding: utf-8 -*-
# close.file.test.py
my_file = open("README.md", "r")
print(f"Имя файла: {my_file.name}")
# Python автоматически закрывает файл
# если файловый объект к которому он привязан присваивается другому файлу.
# Однако, хорошей практикой будет вручную закрывать файл командой close().
my_file = open("LICENSE", "r")
print(f"Имя файла: {my_file.name}")
print(f"Файл {my_file.name} закрыт: {my_file.closed}")
my_file.close()
print(f"А теперь файл {my_file.name} закрыт: {my_file.closed}")
| ru | 0.995734 | # -*- coding: utf-8 -*- # close.file.test.py # Python автоматически закрывает файл # если файловый объект к которому он привязан присваивается другому файлу. # Однако, хорошей практикой будет вручную закрывать файл командой close(). | 3.197766 | 3 |
Python/Numbers/fibonacci_test.py | whzd/LearningProjects | 0 | 6621920 | <filename>Python/Numbers/fibonacci_test.py
import unittest
from fibonacci import fibonacci_sequence
class TestFibonacci(unittest.TestCase):
def test_fibonacci_sequence(self):
# Test calculate the term number 2 of tha Fibonacci Sequence
self.assertEqual(fibonacci_sequence(2), 1)
# Test calculate the term number 15 of tha Fibonacci Sequence
self.assertEqual(fibonacci_sequence(15), 610)
# Test calculate the term number 31 of tha Fibonacci Sequence
self.assertEqual(fibonacci_sequence(31), 1346269)
def test_values(self):
# Test if value error is raised to n values off the limits
self.assertRaises(ValueError, fibonacci_sequence, -1)
def test_type(self):
# Test if type error is raised to invalid n type
self.assertRaises(TypeError, fibonacci_sequence, "a") | <filename>Python/Numbers/fibonacci_test.py
import unittest
from fibonacci import fibonacci_sequence
class TestFibonacci(unittest.TestCase):
def test_fibonacci_sequence(self):
# Test calculate the term number 2 of tha Fibonacci Sequence
self.assertEqual(fibonacci_sequence(2), 1)
# Test calculate the term number 15 of tha Fibonacci Sequence
self.assertEqual(fibonacci_sequence(15), 610)
# Test calculate the term number 31 of tha Fibonacci Sequence
self.assertEqual(fibonacci_sequence(31), 1346269)
def test_values(self):
# Test if value error is raised to n values off the limits
self.assertRaises(ValueError, fibonacci_sequence, -1)
def test_type(self):
# Test if type error is raised to invalid n type
self.assertRaises(TypeError, fibonacci_sequence, "a") | en | 0.645997 | # Test calculate the term number 2 of tha Fibonacci Sequence # Test calculate the term number 15 of tha Fibonacci Sequence # Test calculate the term number 31 of tha Fibonacci Sequence # Test if value error is raised to n values off the limits # Test if type error is raised to invalid n type | 4.053219 | 4 |
kissim/io/dataframe.py | AJK-dev/kissim | 15 | 6621921 | """
kissim.io.dataframe
Defines a DataFrame-based pocket class.
"""
from opencadd.structure.pocket import Pocket
from . import KlifsToKissimData
class PocketDataFrame(Pocket):
@classmethod
def from_structure_klifs_id(cls, structure_klifs_id, klifs_session=None):
"""
Get DataFrame-based pocket object from a KLIFS structure ID.
Parameters
----------
structure_id : int
KLIFS structure ID.
klifs_session : None or opencadd.databases.klifs.session.Session
Local or remote KLIFS session. If None, generate new remote session.
Returns
-------
kissim.io.PocketDataFrame or None
DataFrame-based pocket object.
"""
data = KlifsToKissimData.from_structure_klifs_id(structure_klifs_id, klifs_session)
if data:
pocket = cls.from_text(
data.text, data.extension, data.residue_ids, data.residue_ixs, structure_klifs_id
)
return pocket
else:
return None
| """
kissim.io.dataframe
Defines a DataFrame-based pocket class.
"""
from opencadd.structure.pocket import Pocket
from . import KlifsToKissimData
class PocketDataFrame(Pocket):
@classmethod
def from_structure_klifs_id(cls, structure_klifs_id, klifs_session=None):
"""
Get DataFrame-based pocket object from a KLIFS structure ID.
Parameters
----------
structure_id : int
KLIFS structure ID.
klifs_session : None or opencadd.databases.klifs.session.Session
Local or remote KLIFS session. If None, generate new remote session.
Returns
-------
kissim.io.PocketDataFrame or None
DataFrame-based pocket object.
"""
data = KlifsToKissimData.from_structure_klifs_id(structure_klifs_id, klifs_session)
if data:
pocket = cls.from_text(
data.text, data.extension, data.residue_ids, data.residue_ixs, structure_klifs_id
)
return pocket
else:
return None
| en | 0.465088 | kissim.io.dataframe Defines a DataFrame-based pocket class. Get DataFrame-based pocket object from a KLIFS structure ID. Parameters ---------- structure_id : int KLIFS structure ID. klifs_session : None or opencadd.databases.klifs.session.Session Local or remote KLIFS session. If None, generate new remote session. Returns ------- kissim.io.PocketDataFrame or None DataFrame-based pocket object. | 2.599887 | 3 |
lattices/constraints.py | dit/lattices | 1 | 6621922 | """
Various conditions one might employ in defining a lattice.
"""
from itertools import combinations
from operator import le
import networkx as nx
__all__ = [
'is_antichain',
'is_cover',
'is_partition',
'is_connected',
]
def is_antichain(set_of_sets, le=le):
"""
Determine whether `set_of_sets` represents an antichain; that is,
whether all pairs of sets within `set_of_sets` are incomperable
according to `le`.
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potential antichain.
le : func
A function which determines whether one set is "less than" another.
Defaults to operator.le (the built-in <=).
Returns
-------
antichain : bool
Whether set_of_sets represents an antichain or not.
"""
for i, j in combinations(set_of_sets, 2):
if le(i, j) or le(j, i):
return False
return True
def is_cover(set_of_sets, alphabet):
"""
Determine whether `set_of_sets` is a cover of `alphabet`; that is,
is every element of `alphabet` represented somewhere in `set_of_sets`?
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potential covering.
alphabet : set
The full alphabet.
Returns
-------
cover : bool
Whether set_of_sets is a cover or not.
"""
return set().union(*set_of_sets) == set(alphabet)
def is_partition(set_of_sets, alphabet):
"""
Determine whether `set_of_sets` partitions `alphabet`; that is,
is every element of `alphabet` represented exactly once in `set_of_sets`?
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potential partition.
alphabet : set
The full alphabet.
Returns
-------
partition : bool
Whether set_of_sets is a partition or not.
"""
pairwise_disjoint = not any(i & j for i, j in combinations(set_of_sets, 2))
return pairwise_disjoint and is_cover(set_of_sets, alphabet)
def is_connected(set_of_sets):
"""
Determine whether `set_of_sets` forms a connected set.
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potentially connected set.
Returns
-------
connected : bool
Whether set_of_sets is connected or not.
"""
graph = nx.Graph()
for set_ in set_of_sets:
graph.add_edges_from(combinations(set_, 2))
return len(list(nx.connected_components(graph))) <= 1
| """
Various conditions one might employ in defining a lattice.
"""
from itertools import combinations
from operator import le
import networkx as nx
__all__ = [
'is_antichain',
'is_cover',
'is_partition',
'is_connected',
]
def is_antichain(set_of_sets, le=le):
"""
Determine whether `set_of_sets` represents an antichain; that is,
whether all pairs of sets within `set_of_sets` are incomperable
according to `le`.
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potential antichain.
le : func
A function which determines whether one set is "less than" another.
Defaults to operator.le (the built-in <=).
Returns
-------
antichain : bool
Whether set_of_sets represents an antichain or not.
"""
for i, j in combinations(set_of_sets, 2):
if le(i, j) or le(j, i):
return False
return True
def is_cover(set_of_sets, alphabet):
"""
Determine whether `set_of_sets` is a cover of `alphabet`; that is,
is every element of `alphabet` represented somewhere in `set_of_sets`?
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potential covering.
alphabet : set
The full alphabet.
Returns
-------
cover : bool
Whether set_of_sets is a cover or not.
"""
return set().union(*set_of_sets) == set(alphabet)
def is_partition(set_of_sets, alphabet):
"""
Determine whether `set_of_sets` partitions `alphabet`; that is,
is every element of `alphabet` represented exactly once in `set_of_sets`?
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potential partition.
alphabet : set
The full alphabet.
Returns
-------
partition : bool
Whether set_of_sets is a partition or not.
"""
pairwise_disjoint = not any(i & j for i, j in combinations(set_of_sets, 2))
return pairwise_disjoint and is_cover(set_of_sets, alphabet)
def is_connected(set_of_sets):
"""
Determine whether `set_of_sets` forms a connected set.
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potentially connected set.
Returns
-------
connected : bool
Whether set_of_sets is connected or not.
"""
graph = nx.Graph()
for set_ in set_of_sets:
graph.add_edges_from(combinations(set_, 2))
return len(list(nx.connected_components(graph))) <= 1
| en | 0.74017 | Various conditions one might employ in defining a lattice. Determine whether `set_of_sets` represents an antichain; that is, whether all pairs of sets within `set_of_sets` are incomperable according to `le`. Parameters ---------- set_of_sets : a (frozen)set of (frozen)sets The potential antichain. le : func A function which determines whether one set is "less than" another. Defaults to operator.le (the built-in <=). Returns ------- antichain : bool Whether set_of_sets represents an antichain or not. Determine whether `set_of_sets` is a cover of `alphabet`; that is, is every element of `alphabet` represented somewhere in `set_of_sets`? Parameters ---------- set_of_sets : a (frozen)set of (frozen)sets The potential covering. alphabet : set The full alphabet. Returns ------- cover : bool Whether set_of_sets is a cover or not. Determine whether `set_of_sets` partitions `alphabet`; that is, is every element of `alphabet` represented exactly once in `set_of_sets`? Parameters ---------- set_of_sets : a (frozen)set of (frozen)sets The potential partition. alphabet : set The full alphabet. Returns ------- partition : bool Whether set_of_sets is a partition or not. Determine whether `set_of_sets` forms a connected set. Parameters ---------- set_of_sets : a (frozen)set of (frozen)sets The potentially connected set. Returns ------- connected : bool Whether set_of_sets is connected or not. | 3.661336 | 4 |
chsystem/utility/db_utils.py | OB-UNISA/ch-system | 0 | 6621923 | <gh_stars>0
import time
from replit import db
import utils
def get_all_key_values():
db_kv = {}
for key in db.keys():
db_kv[key] = db[key]
return db_kv
def print_db(db_kv):
for key, value in db_kv.items():
print(f'{key}: {value}')
def get_all_bosses():
return {boss: timer for (boss, timer) in db.items() if boss in utils.BOSSES}
def write_logs_file(file_name='tmp.txt'):
with open(file_name, 'w') as logs:
logs.write(db['logs'])
def delete_logs():
with open('log.txt', 'w') as logs:
logs.write('--DELETED--\n')
db['logs'] = ''
utils.logger('DL: deleted logs')
db['last_delete'] = str(round(time.time()))
| import time
from replit import db
import utils
def get_all_key_values():
db_kv = {}
for key in db.keys():
db_kv[key] = db[key]
return db_kv
def print_db(db_kv):
for key, value in db_kv.items():
print(f'{key}: {value}')
def get_all_bosses():
return {boss: timer for (boss, timer) in db.items() if boss in utils.BOSSES}
def write_logs_file(file_name='tmp.txt'):
with open(file_name, 'w') as logs:
logs.write(db['logs'])
def delete_logs():
with open('log.txt', 'w') as logs:
logs.write('--DELETED--\n')
db['logs'] = ''
utils.logger('DL: deleted logs')
db['last_delete'] = str(round(time.time())) | none | 1 | 2.746288 | 3 | |
flimsy/query.py | spwilson2/flimsy | 0 | 6621924 | import terminal
import log
# TODO Refactor print logic out of this so the objects
# created are separate from print logic.
class QueryRunner(object):
def __init__(self, test_schedule):
self.schedule = test_schedule
def tags(self):
tags = set()
for suite in self.schedule:
tags = tags | set(suite.tags)
return tags
def suites(self):
return [suite for suite in self.schedule]
def suites_with_tag(self, tag):
return filter(lambda suite: tag in suite.tags, self.suites())
def list_tests(self):
log.test_log.message(terminal.separator())
log.test_log.message('Listing all Test Cases.', bold=True)
log.test_log.message(terminal.separator())
for suite in self.schedule:
for test in suite:
log.test_log.message(test.uid)
def list_suites(self):
log.test_log.message(terminal.separator())
log.test_log.message('Listing all Test Suites.', bold=True)
log.test_log.message(terminal.separator())
for suite in self.suites():
log.test_log.message(suite.uid)
def list_tags(self):
#TODO In Gem5 override this with tag types (isa,variant,length)
log.test_log.message(terminal.separator())
log.test_log.message('Listing all Test Tags.', bold=True)
log.test_log.message(terminal.separator())
for tag in self.tags():
log.test_log.message(tag) | import terminal
import log
# TODO Refactor print logic out of this so the objects
# created are separate from print logic.
class QueryRunner(object):
def __init__(self, test_schedule):
self.schedule = test_schedule
def tags(self):
tags = set()
for suite in self.schedule:
tags = tags | set(suite.tags)
return tags
def suites(self):
return [suite for suite in self.schedule]
def suites_with_tag(self, tag):
return filter(lambda suite: tag in suite.tags, self.suites())
def list_tests(self):
log.test_log.message(terminal.separator())
log.test_log.message('Listing all Test Cases.', bold=True)
log.test_log.message(terminal.separator())
for suite in self.schedule:
for test in suite:
log.test_log.message(test.uid)
def list_suites(self):
log.test_log.message(terminal.separator())
log.test_log.message('Listing all Test Suites.', bold=True)
log.test_log.message(terminal.separator())
for suite in self.suites():
log.test_log.message(suite.uid)
def list_tags(self):
#TODO In Gem5 override this with tag types (isa,variant,length)
log.test_log.message(terminal.separator())
log.test_log.message('Listing all Test Tags.', bold=True)
log.test_log.message(terminal.separator())
for tag in self.tags():
log.test_log.message(tag) | en | 0.839075 | # TODO Refactor print logic out of this so the objects # created are separate from print logic. #TODO In Gem5 override this with tag types (isa,variant,length) | 2.461001 | 2 |
brainframe_qt/ui/dialogs/license_dialog/widgets/brainframe_license/aotu_login_form/aotu_login_form.py | aotuai/brainframe-qt | 17 | 6621925 | from PyQt5.QtCore import QObject, pyqtSignal
from .aotu_login_form_ui import AotuLoginFormUI
class AotuLoginForm(AotuLoginFormUI):
oath_login_requested = pyqtSignal()
def __init__(self, *, parent: QObject):
super().__init__(parent=parent)
self._init_signals()
def _init_signals(self) -> None:
self.oauth_button.clicked.connect(self.oath_login_requested)
| from PyQt5.QtCore import QObject, pyqtSignal
from .aotu_login_form_ui import AotuLoginFormUI
class AotuLoginForm(AotuLoginFormUI):
oath_login_requested = pyqtSignal()
def __init__(self, *, parent: QObject):
super().__init__(parent=parent)
self._init_signals()
def _init_signals(self) -> None:
self.oauth_button.clicked.connect(self.oath_login_requested)
| none | 1 | 2.465677 | 2 | |
setup.py | onhernandes/capybara | 2 | 6621926 | from distutils.core import setup
setup(
name="Capybara",
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
scripts=["main.py"],
license="LICENSE",
description="Twitter bot for posting photos",
long_description=open("README.md").read(),
install_requires=[
"PyYAML == 3.13",
"mongoengine == 0.15.3",
"twitter == 1.18.0"
],
)
| from distutils.core import setup
setup(
name="Capybara",
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
scripts=["main.py"],
license="LICENSE",
description="Twitter bot for posting photos",
long_description=open("README.md").read(),
install_requires=[
"PyYAML == 3.13",
"mongoengine == 0.15.3",
"twitter == 1.18.0"
],
)
| none | 1 | 1.130361 | 1 | |
python_scripts/unicorn02.py | mirontoli/tolle-rasp | 2 | 6621927 | import unicornhat as uh
import time
uh.set_layout(uh.PHAT)
uh.brightness(0.5)
def paint(r, g, b):
for x in range(8):
for y in range(4):
uh.set_pixel(x,y,r,g, b)
uh.show()
while True:
paint(0,255,0)
time.sleep(10)
paint(255,255,0)
time.sleep(10)
paint(255,0,0)
time.sleep(10)
| import unicornhat as uh
import time
uh.set_layout(uh.PHAT)
uh.brightness(0.5)
def paint(r, g, b):
for x in range(8):
for y in range(4):
uh.set_pixel(x,y,r,g, b)
uh.show()
while True:
paint(0,255,0)
time.sleep(10)
paint(255,255,0)
time.sleep(10)
paint(255,0,0)
time.sleep(10)
| none | 1 | 2.817482 | 3 | |
tests/integration/roots/test-settings/kaybee_plugins/settings_handlers.py | pauleveritt/kaybee | 2 | 6621928 | <reponame>pauleveritt/kaybee<filename>tests/integration/roots/test-settings/kaybee_plugins/settings_handlers.py<gh_stars>1-10
from sphinx.environment import BuildEnvironment
from kaybee.app import kb
@kb.dumper('demosettings')
def dump_hello(kb_app: kb, sphinx_env: BuildEnvironment):
settings = sphinx_env.app.config['kaybee_settings']
use_debug = settings.debugdumper.use_debug
return dict(
demosettings=dict(using_demo=use_debug)
)
| from sphinx.environment import BuildEnvironment
from kaybee.app import kb
@kb.dumper('demosettings')
def dump_hello(kb_app: kb, sphinx_env: BuildEnvironment):
settings = sphinx_env.app.config['kaybee_settings']
use_debug = settings.debugdumper.use_debug
return dict(
demosettings=dict(using_demo=use_debug)
) | none | 1 | 1.922214 | 2 | |
binarysearch/Balanced-Brackets-Sequel.py | UserBlackBox/competitive-programming | 0 | 6621929 | # https://binarysearch.com/problems/Balanced-Brackets-Sequel
class Solution:
def solve(self, s):
brackets = ""
for i in range(len(s)):
if s[i] in ['{','(','[']:
brackets += s[i]
if s[i] in ['}',')',']']:
try:
if s[i] == '}' and brackets[-1] == '{':
brackets = brackets[:-1]
elif s[i] == ')' and brackets[-1] == '(':
brackets = brackets[:-1]
elif s[i] == ']' and brackets[-1] == '[':
brackets = brackets[:-1]
else:
return False
except IndexError:
return False
return brackets==""
| # https://binarysearch.com/problems/Balanced-Brackets-Sequel
class Solution:
def solve(self, s):
brackets = ""
for i in range(len(s)):
if s[i] in ['{','(','[']:
brackets += s[i]
if s[i] in ['}',')',']']:
try:
if s[i] == '}' and brackets[-1] == '{':
brackets = brackets[:-1]
elif s[i] == ')' and brackets[-1] == '(':
brackets = brackets[:-1]
elif s[i] == ']' and brackets[-1] == '[':
brackets = brackets[:-1]
else:
return False
except IndexError:
return False
return brackets==""
| en | 0.708078 | # https://binarysearch.com/problems/Balanced-Brackets-Sequel | 3.80779 | 4 |
Preprocessing.py | sdhayalk/Invasive_Species_Monitoring | 6 | 6621930 | <filename>Preprocessing.py
import os
import cv2
def resize_all_images(directory, d1, d2):
for current_dir in os.walk(directory):
for current_file in current_dir[2]:
current_path_with_file = directory + "/" + current_file
img = cv2.imread(current_path_with_file)
resized_img = cv2.resize(img, (d1, d2))
cv2.imwrite(current_path_with_file, resized_img)
def rotate_image(directory_in, directory_out, degree):
for current_dir in os.walk(directory_in):
for current_file in current_dir[2]:
current_path_with_file = directory_in + "/" + current_file
img = cv2.imread(current_path_with_file)
num_rows, num_cols = img.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((num_cols / 2, num_rows / 2), degree, 1)
img = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows))
cv2.imwrite(directory_out + "/" + current_file, img)
def flip_image_vertically(directory_in, directory_out):
for current_dir in os.walk(directory_in):
for current_file in current_dir[2]:
current_path_with_file = directory_in + "/" + current_file
img = cv2.imread(current_path_with_file)
img = cv2.flip(img, 1)
cv2.imwrite(directory_out + "/" + current_file, img)
# resize_all_images('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 224, 224)
# resize_all_images('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 224, 224)
if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_90'):
os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_90')
rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_90', 90)
# if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90'):
# os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90')
# rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90', 90)
if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_180'):
os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_180')
rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_180', 180)
# if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180'):
# os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180')
# rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180', 180)
if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_270'):
os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_270')
rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_270', 270)
# if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270'):
# os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270')
# rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270', 270)
if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_flip'):
os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_flip')
flip_image_vertically('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_flip')
# if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip'):
# os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip')
# flip_image_vertically('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip') | <filename>Preprocessing.py
import os
import cv2
def resize_all_images(directory, d1, d2):
for current_dir in os.walk(directory):
for current_file in current_dir[2]:
current_path_with_file = directory + "/" + current_file
img = cv2.imread(current_path_with_file)
resized_img = cv2.resize(img, (d1, d2))
cv2.imwrite(current_path_with_file, resized_img)
def rotate_image(directory_in, directory_out, degree):
for current_dir in os.walk(directory_in):
for current_file in current_dir[2]:
current_path_with_file = directory_in + "/" + current_file
img = cv2.imread(current_path_with_file)
num_rows, num_cols = img.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((num_cols / 2, num_rows / 2), degree, 1)
img = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows))
cv2.imwrite(directory_out + "/" + current_file, img)
def flip_image_vertically(directory_in, directory_out):
for current_dir in os.walk(directory_in):
for current_file in current_dir[2]:
current_path_with_file = directory_in + "/" + current_file
img = cv2.imread(current_path_with_file)
img = cv2.flip(img, 1)
cv2.imwrite(directory_out + "/" + current_file, img)
# resize_all_images('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 224, 224)
# resize_all_images('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 224, 224)
if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_90'):
os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_90')
rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_90', 90)
# if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90'):
# os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90')
# rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90', 90)
if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_180'):
os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_180')
rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_180', 180)
# if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180'):
# os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180')
# rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180', 180)
if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_270'):
os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_270')
rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_270', 270)
# if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270'):
# os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270')
# rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270', 270)
if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_flip'):
os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_flip')
flip_image_vertically('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train_flip')
# if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip'):
# os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip')
# flip_image_vertically('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip') | en | 0.473009 | # resize_all_images('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/train', 224, 224) # resize_all_images('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 224, 224) # if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90'): # os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90') # rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_90', 90) # if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180'): # os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180') # rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_180', 180) # if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270'): # os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270') # rotate_image('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_270', 270) # if not os.path.exists('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip'): # os.makedirs('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip') # flip_image_vertically('G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test', 'G:/Sahil/MS in US/ASU/CRS Lab/InvasiveSpecies/test_flip') | 2.935896 | 3 |
data_processing/pyscripts/process_dataset.py | yimengmin/wiki-cs-dataset | 30 | 6621931 | """
Subroutines to turn the graph dataset into vectorised form and output JSON
metadata, specifying data splits and vectorising article text features.
"""
import numpy as np
import json
import random
import sys
import pickle
import os
import word_frequencies
def label_set(nodes):
"""
Get the set of labels applied to at least one node.
"""
return {n.label for n in nodes.values()}
def add_binary_word_vectors(nodes, words):
for id,node in nodes.items():
token_set = set(node.tokens)
node.vector = np.concatenate(
(node.vector,
np.array([bool(word in token_set) for word in words]))
)
def add_glove_word_vectors(nodes, glove_dict, words_whitelist=None):
zeros = []
for id,node in nodes.items():
sum = np.zeros(len(next(iter(glove_dict.values()))))
for t in node.tokens:
if (t in glove_dict and
(words_whitelist is None or t in words_whitelist)):
sum += glove_dict[t]
if np.linalg.norm(sum) == 0.0:
zeros += [node.title];
node.vector = np.concatenate((node.vector, sum/len(node.tokens)))
print(len(zeros), 'nodes with no words in glove dict:', zeros)
def load_glove_dict(filename, relevant_words=None):
result = {}
with open(filename, 'r', encoding='utf8') as input:
for line in input:
l = line.split(' ')
word = l[0]
if relevant_words is None or word in relevant_words:
weights = np.array([float(x) for x in l[1:]])
result[word] = weights
return result
def raw_data_dict(node):
return {
'id': node.id,
'title': node.title,
'label': node.label,
'outlinks': node.outlinks,
'tokens': node.tokens
}
def output_data(nodes, vectors_outfile, raw_data_outfile, train_ratio=0.05,
test_ratio=0.5, stopping_ratio = 0.3, n_train_splits = 20,
seed=42):
rnd = random.Random(seed)
labels = list(label_set(nodes))
node_ids_for_labels = {lab: [] for lab in labels}
all_ids_list = []
for node in nodes.values():
node_ids_for_labels[node.label].append(node.id)
all_ids_list.append(node.id)
test_ids = set()
train_sets = [set() for _ in range(n_train_splits)]
stopping_sets = [set() for _ in range(n_train_splits)]
val_sets = [set() for _ in range(n_train_splits)]
for lab in labels:
ids = node_ids_for_labels[lab]
rnd.shuffle(ids)
n_train = int(train_ratio*len(ids))
n_test = int(test_ratio*len(ids))
n_stopping = int(stopping_ratio*len(ids))
test_ids.update(ids[:n_test])
visible_ids = ids[n_test:]
for i in range(n_train_splits):
rnd.shuffle(visible_ids)
train_sets[i].update(visible_ids[:n_train])
stopping_sets[i].update(visible_ids[n_train : (n_train+n_stopping)])
val_sets[i].update(visible_ids[n_train+n_stopping:])
remap_node_ids = {old_id: new_id for new_id, old_id in enumerate(all_ids_list)}
test_mask = [(id in test_ids) for id in all_ids_list]
train_masks = [
[id in train_sets[i] for id in all_ids_list]
for i in range(n_train_splits)
]
stopping_masks = [
[id in stopping_sets[i] for id in all_ids_list]
for i in range(n_train_splits)
]
val_masks = [
[id in val_sets[i] for id in all_ids_list]
for i in range(n_train_splits)
]
node_features = [nodes[id].vector.tolist() for id in all_ids_list]
label_ids = {lab: i for i,lab in enumerate(labels)}
labels_vec = [label_ids[nodes[id].label] for id in all_ids_list]
links = [
[remap_node_ids[nb] for nb in nodes[id].outlinks]
for id in all_ids_list
]
vector_data = {
'features': node_features,
'labels': labels_vec,
'links': links,
'train_masks': train_masks,
'stopping_masks': stopping_masks,
'val_masks': val_masks,
'test_mask': test_mask
}
raw_metadata = {
'labels': {i: lab for i,lab in enumerate(labels)},
'nodes': [raw_data_dict(nodes[id]) for id in all_ids_list]
}
json.dump(vector_data, open(vectors_outfile, 'w'))
json.dump(raw_metadata, open(raw_data_outfile, 'w'))
def process_with_glove_vectors(data_dir, glove_file):
data = pickle.load(open(os.path.join(data_dir, 'fulldata.pickle'), 'rb'))
# Select set of words that appear at all in dataset
freqs = word_frequencies.dataset_word_frequencies(data)
words = freqs.keys()
glove = load_glove_dict(glove_file, relevant_words=words)
add_glove_word_vectors(data, glove)
output_data(data,
os.path.join(data_dir, 'vectors.json'),
os.path.join(data_dir, 'readable.json'))
if __name__ == '__main__':
process_with_glove_vectors(sys.argv[1], sys.argv[2])
| """
Subroutines to turn the graph dataset into vectorised form and output JSON
metadata, specifying data splits and vectorising article text features.
"""
import numpy as np
import json
import random
import sys
import pickle
import os
import word_frequencies
def label_set(nodes):
"""
Get the set of labels applied to at least one node.
"""
return {n.label for n in nodes.values()}
def add_binary_word_vectors(nodes, words):
for id,node in nodes.items():
token_set = set(node.tokens)
node.vector = np.concatenate(
(node.vector,
np.array([bool(word in token_set) for word in words]))
)
def add_glove_word_vectors(nodes, glove_dict, words_whitelist=None):
zeros = []
for id,node in nodes.items():
sum = np.zeros(len(next(iter(glove_dict.values()))))
for t in node.tokens:
if (t in glove_dict and
(words_whitelist is None or t in words_whitelist)):
sum += glove_dict[t]
if np.linalg.norm(sum) == 0.0:
zeros += [node.title];
node.vector = np.concatenate((node.vector, sum/len(node.tokens)))
print(len(zeros), 'nodes with no words in glove dict:', zeros)
def load_glove_dict(filename, relevant_words=None):
result = {}
with open(filename, 'r', encoding='utf8') as input:
for line in input:
l = line.split(' ')
word = l[0]
if relevant_words is None or word in relevant_words:
weights = np.array([float(x) for x in l[1:]])
result[word] = weights
return result
def raw_data_dict(node):
return {
'id': node.id,
'title': node.title,
'label': node.label,
'outlinks': node.outlinks,
'tokens': node.tokens
}
def output_data(nodes, vectors_outfile, raw_data_outfile, train_ratio=0.05,
test_ratio=0.5, stopping_ratio = 0.3, n_train_splits = 20,
seed=42):
rnd = random.Random(seed)
labels = list(label_set(nodes))
node_ids_for_labels = {lab: [] for lab in labels}
all_ids_list = []
for node in nodes.values():
node_ids_for_labels[node.label].append(node.id)
all_ids_list.append(node.id)
test_ids = set()
train_sets = [set() for _ in range(n_train_splits)]
stopping_sets = [set() for _ in range(n_train_splits)]
val_sets = [set() for _ in range(n_train_splits)]
for lab in labels:
ids = node_ids_for_labels[lab]
rnd.shuffle(ids)
n_train = int(train_ratio*len(ids))
n_test = int(test_ratio*len(ids))
n_stopping = int(stopping_ratio*len(ids))
test_ids.update(ids[:n_test])
visible_ids = ids[n_test:]
for i in range(n_train_splits):
rnd.shuffle(visible_ids)
train_sets[i].update(visible_ids[:n_train])
stopping_sets[i].update(visible_ids[n_train : (n_train+n_stopping)])
val_sets[i].update(visible_ids[n_train+n_stopping:])
remap_node_ids = {old_id: new_id for new_id, old_id in enumerate(all_ids_list)}
test_mask = [(id in test_ids) for id in all_ids_list]
train_masks = [
[id in train_sets[i] for id in all_ids_list]
for i in range(n_train_splits)
]
stopping_masks = [
[id in stopping_sets[i] for id in all_ids_list]
for i in range(n_train_splits)
]
val_masks = [
[id in val_sets[i] for id in all_ids_list]
for i in range(n_train_splits)
]
node_features = [nodes[id].vector.tolist() for id in all_ids_list]
label_ids = {lab: i for i,lab in enumerate(labels)}
labels_vec = [label_ids[nodes[id].label] for id in all_ids_list]
links = [
[remap_node_ids[nb] for nb in nodes[id].outlinks]
for id in all_ids_list
]
vector_data = {
'features': node_features,
'labels': labels_vec,
'links': links,
'train_masks': train_masks,
'stopping_masks': stopping_masks,
'val_masks': val_masks,
'test_mask': test_mask
}
raw_metadata = {
'labels': {i: lab for i,lab in enumerate(labels)},
'nodes': [raw_data_dict(nodes[id]) for id in all_ids_list]
}
json.dump(vector_data, open(vectors_outfile, 'w'))
json.dump(raw_metadata, open(raw_data_outfile, 'w'))
def process_with_glove_vectors(data_dir, glove_file):
data = pickle.load(open(os.path.join(data_dir, 'fulldata.pickle'), 'rb'))
# Select set of words that appear at all in dataset
freqs = word_frequencies.dataset_word_frequencies(data)
words = freqs.keys()
glove = load_glove_dict(glove_file, relevant_words=words)
add_glove_word_vectors(data, glove)
output_data(data,
os.path.join(data_dir, 'vectors.json'),
os.path.join(data_dir, 'readable.json'))
if __name__ == '__main__':
process_with_glove_vectors(sys.argv[1], sys.argv[2])
| en | 0.834403 | Subroutines to turn the graph dataset into vectorised form and output JSON metadata, specifying data splits and vectorising article text features. Get the set of labels applied to at least one node. # Select set of words that appear at all in dataset | 2.826252 | 3 |
fifa/player.py | ZhihaoXu/fifaweb | 0 | 6621932 | <reponame>ZhihaoXu/fifaweb
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for, Response, request, jsonify,json
)
from werkzeug.exceptions import abort
from werkzeug.security import check_password_hash, generate_password_hash
# from photo.auth import login_required
from fifa.db import get_db
bp = Blueprint('player', __name__)
# @bp.route('/player/index', methods=('GET', 'POST'))
@bp.route('/<int:id>/score')
def player_score(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT pace_score,shooting_score,passing_score,dribbling_score,defending_score,physical_score"
" FROM rating"
" WHERE ID = %s",id
)
player = cursor.fetchone()
player_score = []
for key in player:
player_score.append(player[key])
return jsonify(player_score)
@bp.route('/<int:id>/passing')
def passing_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT vision, crossing, FK_accuracy, short_passing, long_passing, curve"
" FROM passing"
" WHERE ID = %s", id
)
passing = cursor.fetchone()
passing_list = []
for key in passing:
passing_list.append(passing[key])
return jsonify(passing_list)
@bp.route('/<int:id>/shooting')
def shooting_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT positioning, finishing, shot_power, long_shots, volleys, penalties"
" FROM shooting"
" WHERE ID = %s", id
)
shooting = cursor.fetchone()
shooting_list = []
for key in shooting:
shooting_list.append(shooting[key])
return jsonify(shooting_list)
@bp.route('/<int:id>/defending')
def defending_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT interceptions, headingaccuracy, marking, standing_tackle, sliding_tackle"
" FROM defending"
" WHERE ID = %s", id
)
defending = cursor.fetchone()
defending_list = []
for key in defending:
defending_list.append(defending[key])
return jsonify(defending_list)
@bp.route('/<int:id>/dribbling2')
def dribbling2_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT agility, balance, reactions, ball_control, dribbling,composure"
" FROM dribbling2"
" WHERE ID = %s", id
)
dribbling2 = cursor.fetchone()
dribbling2_list = []
for key in dribbling2:
dribbling2_list.append(dribbling2[key])
return jsonify(dribbling2_list)
@bp.route('/<int:id>/physical')
def physical_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT jumping, stamina, strength, aggression"
" FROM physical"
" WHERE ID = %s", id
)
physical = cursor.fetchone()
physical_list = []
for key in physical:
physical_list.append(physical[key])
return jsonify(physical_list)
@bp.route('/<int:id>/gk')
def gk_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT GK_handling, GK_kicking, GK_positioning, GK_reflexes"
" FROM GK"
" WHERE ID = %s", id
)
gk = cursor.fetchone()
gk_list = []
for key in gk:
gk_list.append(gk[key])
return jsonify(gk_list)
@bp.route('/<int:id>/player', methods=('GET', 'POST'))
def index(id):
g.current = "player"
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT p.id id, p.age age,p.club_id club_id, p.photo photo, p.name name, p.position position, n.flag flag, n.nationality nationality, p.value value, p.wage wage, p.overall overall, p.potential potential, c.club_name club_name, c.club_logo logo"
" FROM player p, nation n, team c "
" WHERE p.nation_id = n.nation_id AND p.club_id=c.club_id AND p.id = %s", id
)
player_detail = cursor.fetchone()
player_detail["value"] = '%.1f' % (player_detail["value"])
# cursor.execute(
# "SELECT pace_score,shooting_score,passing_score,dribbling_score,defending_score,physical_score,GK_score"
# " FROM rating"
# " WHERE ID = 231747"
# )
# player = cursor.fetchone()
# cursor.execute("SELECT phone FROM %s_phone WHERE id = '%d'" % (position, id,))
# phone = cursor.fetchone()
# if phone == None:
# cursor.execute("SELECT pos.id, pos.position position, pos.username username, pos.level level, "
# "pos.birthday birthday, pos.home home"
# " FROM %s pos"
# " WHERE pos.id = '%d'" % (position, id,))
# players = cursor.fetchone()
# players['phone'] = None
# else:
# cursor.execute("SELECT pos.id, pos.position position, pos.username username, pos.level level, "
# "pos.birthday birthday, pos.home home, MAX(phone.phone) phone"
# " FROM %s pos, %s_phone phone"
# " WHERE pos.id = '%d' AND"
# " pos.id = phone.id" % (position, position, id,))
# players = cursor.fetchone()
return render_template('player.html', id = id, player_detail = player_detail)
# def get_player(id, position, check_author=True):
# db = get_db()
# cursor = db.cursor()
# # position = "".join(position.split()) ## remove space
# sql = ("SELECT * FROM %s WHERE id = '%d'" % (position, id,))
# cursor.execute(sql)
# players = cursor.fetchone()
# cursor.execute("SELECT phone FROM %s_phone WHERE id = '%d'" % (position, id,))
# phone = cursor.fetchone()
# if phone == None:
# players['phone'] = None
# else:
# players['phone'] = phone['phone']
# if players is None:
# abort(404, "Post id {0} doesn't exist.".format(id))
# if check_author and players['id'] != g.user['id']:
# abort(403)
# return players
# @bp.route('/<int:id>/<string:position>/player/update', methods=('GET', 'POST'))
# @login_required
# def update(id, position):
# g.current = "player"
# players = get_player(id, position)
# if players['position'] == 'aftereffect':
# players['position'] = 'After Effect'
# if players['position'] == 'devicemanager':
# players['position'] = 'Device Manager'
# if players['position'] == 'projectmanager':
# players['position'] = 'Project Manager'
# if players['position'] == 'photographer':
# players['position'] = 'Photographer'
# if request.method == 'POST':
# username = request.form['username']
# birthday = request.form['birthday']
# phone = request.form['phone']
# password = request.form['password']
# password2 = request.form['password2']
# home = request.form['address']
# username = str(username)
# birthday = str(birthday)
# phone = str(phone)
# password = str(password)
# password2 = str(<PASSWORD>)
# error = None
# if not username:
# error = 'Username is required.'
# if password != <PASSWORD>:
# error = 'Password is not consistent'
# if not (len(phone) == 11 or len(phone) == 8) or not phone.isdigit():
# error = 'Incorrect phone'
# if error is not None:
# flash(error)
# return render_template('player/player_update.html', players=players, error = error)
# else:
# db = get_db()
# cursor = db.cursor()
# cursor.execute("DELETE FROM %s_phone WHERE id = '%d'" % (position, id))
# cursor.execute(
# "UPDATE %s SET username = '%s', birthday = '%s', password = <PASSWORD>', home = '%s'"
# " WHERE id = '%d'" % \
# (position, username, birthday, generate_password_hash(password), home, id)
# )
# cursor.execute("INSERT INTO %s_phone(id, phone) VALUES ('%d', '%s')" % (position, id, phone))
# db.commit()
# return redirect(url_for('player.index', id=id, position=position))
# return render_template('player/player_update.html', players=players, error = error)
# return render_template('player/player_update.html', players=players)
# @bp.route('/<int:id>/player/delete', methods=('POST',))
# @login_required
# def delete(id):
# position = 'photographer' # just in case
# g.current = "player"
# get_player(id, position)
# db = get_db()
# cursor = db.cursor()
# cursor.execute("DELETE FROM post WHERE id = '%d'" % (id,))
# db.commit()
# return redirect(url_for('dashboard.index')) | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for, Response, request, jsonify,json
)
from werkzeug.exceptions import abort
from werkzeug.security import check_password_hash, generate_password_hash
# from photo.auth import login_required
from fifa.db import get_db
bp = Blueprint('player', __name__)
# @bp.route('/player/index', methods=('GET', 'POST'))
@bp.route('/<int:id>/score')
def player_score(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT pace_score,shooting_score,passing_score,dribbling_score,defending_score,physical_score"
" FROM rating"
" WHERE ID = %s",id
)
player = cursor.fetchone()
player_score = []
for key in player:
player_score.append(player[key])
return jsonify(player_score)
@bp.route('/<int:id>/passing')
def passing_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT vision, crossing, FK_accuracy, short_passing, long_passing, curve"
" FROM passing"
" WHERE ID = %s", id
)
passing = cursor.fetchone()
passing_list = []
for key in passing:
passing_list.append(passing[key])
return jsonify(passing_list)
@bp.route('/<int:id>/shooting')
def shooting_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT positioning, finishing, shot_power, long_shots, volleys, penalties"
" FROM shooting"
" WHERE ID = %s", id
)
shooting = cursor.fetchone()
shooting_list = []
for key in shooting:
shooting_list.append(shooting[key])
return jsonify(shooting_list)
@bp.route('/<int:id>/defending')
def defending_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT interceptions, headingaccuracy, marking, standing_tackle, sliding_tackle"
" FROM defending"
" WHERE ID = %s", id
)
defending = cursor.fetchone()
defending_list = []
for key in defending:
defending_list.append(defending[key])
return jsonify(defending_list)
@bp.route('/<int:id>/dribbling2')
def dribbling2_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT agility, balance, reactions, ball_control, dribbling,composure"
" FROM dribbling2"
" WHERE ID = %s", id
)
dribbling2 = cursor.fetchone()
dribbling2_list = []
for key in dribbling2:
dribbling2_list.append(dribbling2[key])
return jsonify(dribbling2_list)
@bp.route('/<int:id>/physical')
def physical_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT jumping, stamina, strength, aggression"
" FROM physical"
" WHERE ID = %s", id
)
physical = cursor.fetchone()
physical_list = []
for key in physical:
physical_list.append(physical[key])
return jsonify(physical_list)
@bp.route('/<int:id>/gk')
def gk_func(id):
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT GK_handling, GK_kicking, GK_positioning, GK_reflexes"
" FROM GK"
" WHERE ID = %s", id
)
gk = cursor.fetchone()
gk_list = []
for key in gk:
gk_list.append(gk[key])
return jsonify(gk_list)
@bp.route('/<int:id>/player', methods=('GET', 'POST'))
def index(id):
g.current = "player"
db = get_db()
cursor = db.cursor()
cursor.execute(
"SELECT p.id id, p.age age,p.club_id club_id, p.photo photo, p.name name, p.position position, n.flag flag, n.nationality nationality, p.value value, p.wage wage, p.overall overall, p.potential potential, c.club_name club_name, c.club_logo logo"
" FROM player p, nation n, team c "
" WHERE p.nation_id = n.nation_id AND p.club_id=c.club_id AND p.id = %s", id
)
player_detail = cursor.fetchone()
player_detail["value"] = '%.1f' % (player_detail["value"])
# cursor.execute(
# "SELECT pace_score,shooting_score,passing_score,dribbling_score,defending_score,physical_score,GK_score"
# " FROM rating"
# " WHERE ID = 231747"
# )
# player = cursor.fetchone()
# cursor.execute("SELECT phone FROM %s_phone WHERE id = '%d'" % (position, id,))
# phone = cursor.fetchone()
# if phone == None:
# cursor.execute("SELECT pos.id, pos.position position, pos.username username, pos.level level, "
# "pos.birthday birthday, pos.home home"
# " FROM %s pos"
# " WHERE pos.id = '%d'" % (position, id,))
# players = cursor.fetchone()
# players['phone'] = None
# else:
# cursor.execute("SELECT pos.id, pos.position position, pos.username username, pos.level level, "
# "pos.birthday birthday, pos.home home, MAX(phone.phone) phone"
# " FROM %s pos, %s_phone phone"
# " WHERE pos.id = '%d' AND"
# " pos.id = phone.id" % (position, position, id,))
# players = cursor.fetchone()
return render_template('player.html', id = id, player_detail = player_detail)
# def get_player(id, position, check_author=True):
# db = get_db()
# cursor = db.cursor()
# # position = "".join(position.split()) ## remove space
# sql = ("SELECT * FROM %s WHERE id = '%d'" % (position, id,))
# cursor.execute(sql)
# players = cursor.fetchone()
# cursor.execute("SELECT phone FROM %s_phone WHERE id = '%d'" % (position, id,))
# phone = cursor.fetchone()
# if phone == None:
# players['phone'] = None
# else:
# players['phone'] = phone['phone']
# if players is None:
# abort(404, "Post id {0} doesn't exist.".format(id))
# if check_author and players['id'] != g.user['id']:
# abort(403)
# return players
# @bp.route('/<int:id>/<string:position>/player/update', methods=('GET', 'POST'))
# @login_required
# def update(id, position):
# g.current = "player"
# players = get_player(id, position)
# if players['position'] == 'aftereffect':
# players['position'] = 'After Effect'
# if players['position'] == 'devicemanager':
# players['position'] = 'Device Manager'
# if players['position'] == 'projectmanager':
# players['position'] = 'Project Manager'
# if players['position'] == 'photographer':
# players['position'] = 'Photographer'
# if request.method == 'POST':
# username = request.form['username']
# birthday = request.form['birthday']
# phone = request.form['phone']
# password = request.form['password']
# password2 = request.form['password2']
# home = request.form['address']
# username = str(username)
# birthday = str(birthday)
# phone = str(phone)
# password = str(password)
# password2 = str(<PASSWORD>)
# error = None
# if not username:
# error = 'Username is required.'
# if password != <PASSWORD>:
# error = 'Password is not consistent'
# if not (len(phone) == 11 or len(phone) == 8) or not phone.isdigit():
# error = 'Incorrect phone'
# if error is not None:
# flash(error)
# return render_template('player/player_update.html', players=players, error = error)
# else:
# db = get_db()
# cursor = db.cursor()
# cursor.execute("DELETE FROM %s_phone WHERE id = '%d'" % (position, id))
# cursor.execute(
# "UPDATE %s SET username = '%s', birthday = '%s', password = <PASSWORD>', home = '%s'"
# " WHERE id = '%d'" % \
# (position, username, birthday, generate_password_hash(password), home, id)
# )
# cursor.execute("INSERT INTO %s_phone(id, phone) VALUES ('%d', '%s')" % (position, id, phone))
# db.commit()
# return redirect(url_for('player.index', id=id, position=position))
# return render_template('player/player_update.html', players=players, error = error)
# return render_template('player/player_update.html', players=players)
# @bp.route('/<int:id>/player/delete', methods=('POST',))
# @login_required
# def delete(id):
# position = 'photographer' # just in case
# g.current = "player"
# get_player(id, position)
# db = get_db()
# cursor = db.cursor()
# cursor.execute("DELETE FROM post WHERE id = '%d'" % (id,))
# db.commit()
# return redirect(url_for('dashboard.index')) | en | 0.58413 | # from photo.auth import login_required # @bp.route('/player/index', methods=('GET', 'POST')) # cursor.execute( # "SELECT pace_score,shooting_score,passing_score,dribbling_score,defending_score,physical_score,GK_score" # " FROM rating" # " WHERE ID = 231747" # ) # player = cursor.fetchone() # cursor.execute("SELECT phone FROM %s_phone WHERE id = '%d'" % (position, id,)) # phone = cursor.fetchone() # if phone == None: # cursor.execute("SELECT pos.id, pos.position position, pos.username username, pos.level level, " # "pos.birthday birthday, pos.home home" # " FROM %s pos" # " WHERE pos.id = '%d'" % (position, id,)) # players = cursor.fetchone() # players['phone'] = None # else: # cursor.execute("SELECT pos.id, pos.position position, pos.username username, pos.level level, " # "pos.birthday birthday, pos.home home, MAX(phone.phone) phone" # " FROM %s pos, %s_phone phone" # " WHERE pos.id = '%d' AND" # " pos.id = phone.id" % (position, position, id,)) # players = cursor.fetchone() # def get_player(id, position, check_author=True): # db = get_db() # cursor = db.cursor() # # position = "".join(position.split()) ## remove space # sql = ("SELECT * FROM %s WHERE id = '%d'" % (position, id,)) # cursor.execute(sql) # players = cursor.fetchone() # cursor.execute("SELECT phone FROM %s_phone WHERE id = '%d'" % (position, id,)) # phone = cursor.fetchone() # if phone == None: # players['phone'] = None # else: # players['phone'] = phone['phone'] # if players is None: # abort(404, "Post id {0} doesn't exist.".format(id)) # if check_author and players['id'] != g.user['id']: # abort(403) # return players # @bp.route('/<int:id>/<string:position>/player/update', methods=('GET', 'POST')) # @login_required # def update(id, position): # g.current = "player" # players = get_player(id, position) # if players['position'] == 'aftereffect': # players['position'] = 'After Effect' # if players['position'] == 'devicemanager': # players['position'] = 'Device Manager' # if players['position'] == 'projectmanager': # players['position'] = 'Project Manager' # if players['position'] == 'photographer': # players['position'] = 'Photographer' # if request.method == 'POST': # username = request.form['username'] # birthday = request.form['birthday'] # phone = request.form['phone'] # password = request.form['password'] # password2 = request.form['password2'] # home = request.form['address'] # username = str(username) # birthday = str(birthday) # phone = str(phone) # password = str(password) # password2 = str(<PASSWORD>) # error = None # if not username: # error = 'Username is required.' # if password != <PASSWORD>: # error = 'Password is not consistent' # if not (len(phone) == 11 or len(phone) == 8) or not phone.isdigit(): # error = 'Incorrect phone' # if error is not None: # flash(error) # return render_template('player/player_update.html', players=players, error = error) # else: # db = get_db() # cursor = db.cursor() # cursor.execute("DELETE FROM %s_phone WHERE id = '%d'" % (position, id)) # cursor.execute( # "UPDATE %s SET username = '%s', birthday = '%s', password = <PASSWORD>', home = '%s'" # " WHERE id = '%d'" % \ # (position, username, birthday, generate_password_hash(password), home, id) # ) # cursor.execute("INSERT INTO %s_phone(id, phone) VALUES ('%d', '%s')" % (position, id, phone)) # db.commit() # return redirect(url_for('player.index', id=id, position=position)) # return render_template('player/player_update.html', players=players, error = error) # return render_template('player/player_update.html', players=players) # @bp.route('/<int:id>/player/delete', methods=('POST',)) # @login_required # def delete(id): # position = 'photographer' # just in case # g.current = "player" # get_player(id, position) # db = get_db() # cursor = db.cursor() # cursor.execute("DELETE FROM post WHERE id = '%d'" % (id,)) # db.commit() # return redirect(url_for('dashboard.index')) | 2.429289 | 2 |
info/app/Location.py | jaddoueik1/masonite-project | 0 | 6621933 | """Location Model."""
from config.database import Model
class Location(Model):
"""Location Model."""
__fillable__=['country','longitude','latitude','codes','country_code','continent_code'] | """Location Model."""
from config.database import Model
class Location(Model):
"""Location Model."""
__fillable__=['country','longitude','latitude','codes','country_code','continent_code'] | en | 0.704334 | Location Model. Location Model. | 2.560551 | 3 |
misc/cardGame.py | caro-oviedo/Python- | 0 | 6621934 | NUM_CARDS = 52
def no_high(list_name):
"""
list_name is a list of strings representing cards.
Return TRUE if there are no high cards in list_name, False otherwise.
"""
if "jack" in list_name:
return False
if "queen" in list_name:
return False
if "king" in list_name:
return False
if "ace" in list_name:
return False
return True
deck = [ ]
for i in range(NUM_CARDS):
deck.append(input())
score_a = 0
score_b= 0
player = "A"
for i in range(NUM_CARDS):
card = (deck[i])
points = 0
remaining = NUM_CARDS - i - 1
if card == "jack" and remaining >= 1 and no_high(deck[i+1 : i+2]):
points == 1
if card == "queen" and remaining >= 2 and no_high(deck[i+1 : i+3]):
points == 2
if card == "king" and remaining >= 3 and no_high(deck[i+1 : i+4]):
points == 3
if card == "ace" and remaining >= 4 and no_high(deck[i+1 : i+5]):
points == 4
if points > 0:
print(f"Player {player} scores {points} point(s).")
if player == "A":
score_a = score_a + points
player = "B"
else:
score_b = score_b + points
player = "B"
print(f"Player A: {score_a} point(s).")
print(f"Player B: {score_b} point(s).")
| NUM_CARDS = 52
def no_high(list_name):
"""
list_name is a list of strings representing cards.
Return TRUE if there are no high cards in list_name, False otherwise.
"""
if "jack" in list_name:
return False
if "queen" in list_name:
return False
if "king" in list_name:
return False
if "ace" in list_name:
return False
return True
deck = [ ]
for i in range(NUM_CARDS):
deck.append(input())
score_a = 0
score_b= 0
player = "A"
for i in range(NUM_CARDS):
card = (deck[i])
points = 0
remaining = NUM_CARDS - i - 1
if card == "jack" and remaining >= 1 and no_high(deck[i+1 : i+2]):
points == 1
if card == "queen" and remaining >= 2 and no_high(deck[i+1 : i+3]):
points == 2
if card == "king" and remaining >= 3 and no_high(deck[i+1 : i+4]):
points == 3
if card == "ace" and remaining >= 4 and no_high(deck[i+1 : i+5]):
points == 4
if points > 0:
print(f"Player {player} scores {points} point(s).")
if player == "A":
score_a = score_a + points
player = "B"
else:
score_b = score_b + points
player = "B"
print(f"Player A: {score_a} point(s).")
print(f"Player B: {score_b} point(s).")
| en | 0.859207 | list_name is a list of strings representing cards. Return TRUE if there are no high cards in list_name, False otherwise. | 3.862568 | 4 |
interviewbit/Programming/Binary Search/Count element occurence/solution.py | pablotrinidad/competitive-programming | 0 | 6621935 | <gh_stars>0
"""InterviewBit.
Programming > Binary Search > Cont Element Occurence.
"""
class Solution:
"""Solution."""
# @param A : tuple of integers
# @param B : integer
# @return an integer
def findCount(self, A, B):
"""Return the number of occurrences of B in A."""
start = 0
end = len(A) - 1
count = 0
while start <= end:
mid = (end + start) // 2
if A[mid] == B:
count += 1
count += self.findCount(A[mid + 1: end + 1], B)
count += self.findCount(A[start:mid], B)
break
if A[mid] < B:
start = mid + 1
else:
end = mid - 1
return count
import random # NOQA
A = []
print("Contnt of A:")
for i in range(1, 10):
n = random.randint(1, 100)
A += [i] * n
print("\t{} elements added with value {}".format(n, i))
B = random.randint(1, 9)
print("B:", B)
solution = Solution()
print(solution.findCount(A, B))
| """InterviewBit.
Programming > Binary Search > Cont Element Occurence.
"""
class Solution:
"""Solution."""
# @param A : tuple of integers
# @param B : integer
# @return an integer
def findCount(self, A, B):
"""Return the number of occurrences of B in A."""
start = 0
end = len(A) - 1
count = 0
while start <= end:
mid = (end + start) // 2
if A[mid] == B:
count += 1
count += self.findCount(A[mid + 1: end + 1], B)
count += self.findCount(A[start:mid], B)
break
if A[mid] < B:
start = mid + 1
else:
end = mid - 1
return count
import random # NOQA
A = []
print("Contnt of A:")
for i in range(1, 10):
n = random.randint(1, 100)
A += [i] * n
print("\t{} elements added with value {}".format(n, i))
B = random.randint(1, 9)
print("B:", B)
solution = Solution()
print(solution.findCount(A, B)) | en | 0.446482 | InterviewBit. Programming > Binary Search > Cont Element Occurence. Solution. # @param A : tuple of integers # @param B : integer # @return an integer Return the number of occurrences of B in A. # NOQA | 3.543952 | 4 |
pureblog/site/apps/views.py | Lucky4/pureblog | 0 | 6621936 | import datetime
from django.shortcuts import render, get_object_or_404, HttpResponseRedirect
from django.views.generic.list import ListView, MultipleObjectMixin
from django.views.generic.detail import DetailView, SingleObjectMixin
from .models import Article, Category, Tag
class IndexView(ListView, MultipleObjectMixin):
template_name = 'apps/index.html'
context_object_name = 'article_list'
paginate_by = 2
def get_queryset(self):
article_list = Article.objects.filter(status='p')
return article_list
class ArticleDetailView(DetailView, SingleObjectMixin):
template_name = 'apps/detail.html'
context_object_name = 'article'
queryset = Article.objects.all()
pk_url_kwarg = 'article_id'
def get(self, request, *args, **kwargs):
last_visit = request.session.get('last_visit')
reset_last_visit_time = False
if last_visit:
last_visit_time = datetime.datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")
if (datetime.datetime.utcnow() - last_visit_time).seconds > 0:
obj = super(ArticleDetailView, self).get_object()
obj.views = obj.views + 1
obj.save()
reset_last_visit_time = True
else:
reset_last_visit_time = True
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.datetime.utcnow())
return super(ArticleDetailView, self).get(request, *args, **kwargs)
def get_object(self, queryset=None):
object = super(ArticleDetailView, self).get_object()
return object
def get_context_data(self, **kwargs):
context = super(ArticleDetailView, self).get_context_data(**kwargs)
object = super(ArticleDetailView, self).get_object()
context['tags'] = object.tags.all()
return context
class ArchiveView(ListView, MultipleObjectMixin):
template_name = 'apps/full-width.html'
context_object_name = 'article_list'
paginate_by = 10
def get_queryset(self):
article_list = Article.objects.filter(status='p')
return article_list
class CategoryView(ListView, MultipleObjectMixin):
template_name = 'apps/full-width.html'
context_object_name = 'article_list'
paginate_by = 10
def get_queryset(self):
cat_query = Category.objects.get(name=self.kwargs['category'])
article_list = cat_query.article_set.filter(status='p')
return article_list
class TagView(ListView):
template_name = 'apps/full-width.html'
context_object_name = 'article_list'
paginate_by = 10
def get_queryset(self):
tag_query = Tag.objects.get(name=self.kwargs['tag'])
article_list = tag_query.article_set.filter(status='p')
return article_list
class DateView(ListView):
template_name = 'apps/full-width.html'
context_object_name = 'article_list'
paginate_by = 10
def get_queryset(self):
year = self.kwargs['year']
month = self.kwargs['month']
start_date = datetime.date(int(year), int(month), 1)
end_date = datetime.date(int(year), int(month), 31)
article_list = Article.objects.filter(create_time__range=(start_date, end_date))
return article_list
| import datetime
from django.shortcuts import render, get_object_or_404, HttpResponseRedirect
from django.views.generic.list import ListView, MultipleObjectMixin
from django.views.generic.detail import DetailView, SingleObjectMixin
from .models import Article, Category, Tag
class IndexView(ListView, MultipleObjectMixin):
template_name = 'apps/index.html'
context_object_name = 'article_list'
paginate_by = 2
def get_queryset(self):
article_list = Article.objects.filter(status='p')
return article_list
class ArticleDetailView(DetailView, SingleObjectMixin):
template_name = 'apps/detail.html'
context_object_name = 'article'
queryset = Article.objects.all()
pk_url_kwarg = 'article_id'
def get(self, request, *args, **kwargs):
last_visit = request.session.get('last_visit')
reset_last_visit_time = False
if last_visit:
last_visit_time = datetime.datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")
if (datetime.datetime.utcnow() - last_visit_time).seconds > 0:
obj = super(ArticleDetailView, self).get_object()
obj.views = obj.views + 1
obj.save()
reset_last_visit_time = True
else:
reset_last_visit_time = True
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.datetime.utcnow())
return super(ArticleDetailView, self).get(request, *args, **kwargs)
def get_object(self, queryset=None):
object = super(ArticleDetailView, self).get_object()
return object
def get_context_data(self, **kwargs):
context = super(ArticleDetailView, self).get_context_data(**kwargs)
object = super(ArticleDetailView, self).get_object()
context['tags'] = object.tags.all()
return context
class ArchiveView(ListView, MultipleObjectMixin):
template_name = 'apps/full-width.html'
context_object_name = 'article_list'
paginate_by = 10
def get_queryset(self):
article_list = Article.objects.filter(status='p')
return article_list
class CategoryView(ListView, MultipleObjectMixin):
template_name = 'apps/full-width.html'
context_object_name = 'article_list'
paginate_by = 10
def get_queryset(self):
cat_query = Category.objects.get(name=self.kwargs['category'])
article_list = cat_query.article_set.filter(status='p')
return article_list
class TagView(ListView):
template_name = 'apps/full-width.html'
context_object_name = 'article_list'
paginate_by = 10
def get_queryset(self):
tag_query = Tag.objects.get(name=self.kwargs['tag'])
article_list = tag_query.article_set.filter(status='p')
return article_list
class DateView(ListView):
template_name = 'apps/full-width.html'
context_object_name = 'article_list'
paginate_by = 10
def get_queryset(self):
year = self.kwargs['year']
month = self.kwargs['month']
start_date = datetime.date(int(year), int(month), 1)
end_date = datetime.date(int(year), int(month), 31)
article_list = Article.objects.filter(create_time__range=(start_date, end_date))
return article_list
| none | 1 | 2.157533 | 2 | |
genessa/networks/ratelaws.py | sbernasek/genessa | 2 | 6621937 | import numpy as np
from tabulate import tabulate
class RateLaws:
"""
Class provides tabulated summary of reaction kinetics.
Attributes:
node_key (dict) - maps state dimension (key) to unique node id (value)
reactions (list) - list of reaction objects
table (list of lists) - rate law table, row for each reaction
"""
def __init__(self,
node_key,
reactions):
"""
Instantiate raw law table.
Args:
node_key (dict) - maps state dimension (key) to node id (value)
reactions (list) - reaction instances
"""
self.node_key = node_key
self.reactions = reactions
self.build_table()
def __repr__(self):
"""
Pretty-print rate law table.
"""
self.print_table()
return ''
def print_table(self):
"""
Pretty-print rate law table.
"""
print(tabulate(self.table,
headers=["Rxn",
"Reactants",
"Products",
"Propensity",
"Parameter"],
numalign='center',
stralign='center'))
def build_table(self):
"""
Build rate law table.
"""
self.table = []
for rxn in self.reactions:
self.add_rxn_to_table(rxn)
def add_rxn_to_table(self, rxn):
"""
Add reaction rate law to table.
Args:
rxn (reaction instance)
"""
# assemble reactants
reactants = self.assemble_reactants(rxn)
# assemble products
products = self.assemble_products(rxn)
# assemble rate expression
rate_law = self.assemble_rate_expression(rxn)
# assemble rate constant
rate_constant = self.assemble_rate_constant(rxn)
# assemble sensitivities to environmental conditions
sensitivities = self.assemble_sensitivities(rxn)
# set reaction name
if rxn.name is None:
name = 'Not Named'
else:
name = rxn.name
# add reaction to table
self.table.append([name, reactants, products, rate_law, rate_constant])
# add any repressors for Hill and Coupling reactions
if rxn.type in ('Hill', 'Coupling'):
for repressor in rxn.repressors:
repressor_name = rxn.name + ' repression'
rate_law = self.get_enzymatic_rate_law(repressor)
self.table.append([repressor_name, '', '', rate_law, '', 'NA'])
def assemble_reactants(self, rxn):
"""
Assemble list of reactants.
Args:
rxn (reaction instance)
Returns:
reactants (str) - species indices
"""
reactants = []
for reactant, coeff in enumerate(rxn.stoichiometry):
if coeff < 0:
reactants.append(str(self.node_key[int(reactant)]))
return ", ".join(reactants)
def assemble_products(self, rxn):
"""
Assemble list of products.
Args:
rxn (reaction instance)
Returns:
products (str) - species indices
"""
products = []
for product, coeff in enumerate(rxn.stoichiometry):
if coeff > 0:
products.append(str(self.node_key[int(product)]))
return ", ".join(products)
def assemble_sensitivities(self, rxn):
"""
Assemble list of sensitivities to environmental conditions.
Args:
rxn (reaction instance)
Returns:
sensitivities (str) - species indices
"""
sensitivities = ''
if rxn.temperature_sensitive is True:
pass
if rxn.atp_sensitive is not False:
if rxn.atp_sensitive is True or rxn.atp_sensitive==1:
sensitivities += 'ATP'
else:
sensitivities += 'ATP^{:1.0f}'.format(rxn.atp_sensitive)
if rxn.ribosome_sensitive is not False:
if rxn.ribosome_sensitive is True or rxn.ribosome_sensitive==1:
sensitivities += ', RPs'
else:
sensitivities += ', RPs^{:1.0f}'.format(rxn.ribosome_sensitive)
return sensitivities
def assemble_rate_expression(self, rxn):
"""
Assemble rate expression.
Args:
rxn (reaction instance)
Returns:
rate_expression (str)
"""
# get reaction type
if rxn.type in ('MassAction', 'LinearFeedback'):
rate_expression = self.get_mass_action_rate_law(rxn)
elif rxn.type == 'Hill':
rate_expression = self.get_enzymatic_rate_law(rxn)
elif rxn.type == 'SumReaction':
rate_expression = self.get_sum_rxn_rate_law(rxn)
elif rxn.type == 'Coupling':
rate_expression = self.get_coupling_rate_law(rxn)
else:
rate_expression = 'Unknown Rxn Type'
return rate_expression
@staticmethod
def assemble_rate_constant(rxn):
"""
Returns rate constant expression for a reaction instance.
Args:
rxn (all types)
Returns:
rate_constant (str) - rate constant expression
"""
rate_constant = '{:2.5f}'.format(rxn.k[0])
if rxn.type == 'Hill':
for i, coeff in enumerate(rxn.rate_modifier):
if coeff != 0:
rate_constant += ' + {:0.1f}[IN_{:d}]'.format(coeff, i)
return rate_constant
def get_mass_action_rate_law(self, rxn):
"""
Returns rate expression for MassAction instance.
Args:
rxn (MassAction)
Returns:
rate_law (str) - rate expression
"""
fmt_term = lambda sp: ('['+str(self.node_key[int(sp)])+']')
# add reactants
p = [fmt_term(sp)*int(co) for sp, co in enumerate(rxn.propensity)]
# add input contribution
input_contribution = ''
for i, dependence in enumerate(rxn.input_dependence):
if dependence != 0:
input_contribution += int(dependence) * '[IN_{:d}]'.format(i)
# assemble rate law
rate_law = input_contribution + "".join(str(term) for term in p)
return rate_law
def get_enzymatic_rate_law(self, rxn):
"""
Returns rate expression for Hill or Repressor instance.
Args:
rxn (Hill or Repressor)
Returns:
rate_law (str) - rate expression
"""
fmt = lambda sp: '['+str(self.node_key[int(sp)])+']'
# add substrate contributions
substrate_contribution = ''
p = [fmt(sp) if co!=0 else '' for sp, co in enumerate(rxn.propensity)]
w = [str(int(c)) if c!=0 and c!=1 else '' for c in rxn.propensity]
# combine weights and inputs
for i, j in zip(w, p):
substrate_contribution += (i+j)
# assemble substrates
activity = ''
for i, dependence in enumerate(rxn.input_dependence):
if dependence != 0:
if rxn.input_dependence.size == 1:
input_name = '[IN]'
else:
input_name = '[IN_{:d}]'.format(i)
coefficient = ''
if dependence != 1:
coefficient = str(dependence)
if i == 0:
activity += (coefficient + input_name)
else:
activity += (coefficient + input_name)
if rxn.num_active_species > 0 and len(activity) > 0:
activity += '+' + substrate_contribution
elif rxn.num_active_species > 0 and len(activity) == 0:
activity += substrate_contribution
# get species/michaelis constant quotient
if rxn.type == 'Hill':
quotient = '{:s}/{:s}'.format(str(rxn.k_m), activity)
else:
quotient = '{:s}/{:s}'.format(activity, str(rxn.k_m))
# assemble rate law
if rxn.n != 1:
rate_law = '1 / (1 + ({:s})^{:0.1f})'.format(quotient, rxn.n)
else:
rate_law = '1 / (1 + {:s})'.format(quotient, rxn.n)
return rate_law
@staticmethod
def get_sum_rxn_rate_law(rxn):
"""
Returns rate expression for SumReaction instance.
Args:
rxn (SumReaction)
Returns:
rate_law (str) - rate expression
"""
positive = np.where(rxn.propensity == 1)[0][0]
negative = np.where(rxn.propensity == -1)[0][0]
rate_law = '[{:d}] - [{:d}]'.format(positive, negative)
return rate_law
@staticmethod
def get_coupling_rate_law(rxn):
"""
Returns rate expression for Coupling instance.
Args:
rxn (Coupling)
Returns:
rate_law (str) - rate expression
"""
if rxn.propensity.max() == 0:
rate_law = ''
else:
base = np.where(rxn.propensity>0)[0][0]
neighbors = np.where(rxn.propensity<0)[0]
weight = (rxn.a * rxn.w) / (1+rxn.w*len(neighbors))
if len(neighbors) > 1:
coeff = '{:d}'.format(len(neighbors))
else:
coeff = ''
rate_law = '{:0.3f} x ({:s}[{:d}]'.format(weight, coeff, base)
for n in neighbors:
rate_law += ' - [{:d}]'.format(n)
rate_law += ')'
return rate_law
| import numpy as np
from tabulate import tabulate
class RateLaws:
"""
Class provides tabulated summary of reaction kinetics.
Attributes:
node_key (dict) - maps state dimension (key) to unique node id (value)
reactions (list) - list of reaction objects
table (list of lists) - rate law table, row for each reaction
"""
def __init__(self,
node_key,
reactions):
"""
Instantiate raw law table.
Args:
node_key (dict) - maps state dimension (key) to node id (value)
reactions (list) - reaction instances
"""
self.node_key = node_key
self.reactions = reactions
self.build_table()
def __repr__(self):
"""
Pretty-print rate law table.
"""
self.print_table()
return ''
def print_table(self):
"""
Pretty-print rate law table.
"""
print(tabulate(self.table,
headers=["Rxn",
"Reactants",
"Products",
"Propensity",
"Parameter"],
numalign='center',
stralign='center'))
def build_table(self):
"""
Build rate law table.
"""
self.table = []
for rxn in self.reactions:
self.add_rxn_to_table(rxn)
def add_rxn_to_table(self, rxn):
"""
Add reaction rate law to table.
Args:
rxn (reaction instance)
"""
# assemble reactants
reactants = self.assemble_reactants(rxn)
# assemble products
products = self.assemble_products(rxn)
# assemble rate expression
rate_law = self.assemble_rate_expression(rxn)
# assemble rate constant
rate_constant = self.assemble_rate_constant(rxn)
# assemble sensitivities to environmental conditions
sensitivities = self.assemble_sensitivities(rxn)
# set reaction name
if rxn.name is None:
name = 'Not Named'
else:
name = rxn.name
# add reaction to table
self.table.append([name, reactants, products, rate_law, rate_constant])
# add any repressors for Hill and Coupling reactions
if rxn.type in ('Hill', 'Coupling'):
for repressor in rxn.repressors:
repressor_name = rxn.name + ' repression'
rate_law = self.get_enzymatic_rate_law(repressor)
self.table.append([repressor_name, '', '', rate_law, '', 'NA'])
def assemble_reactants(self, rxn):
"""
Assemble list of reactants.
Args:
rxn (reaction instance)
Returns:
reactants (str) - species indices
"""
reactants = []
for reactant, coeff in enumerate(rxn.stoichiometry):
if coeff < 0:
reactants.append(str(self.node_key[int(reactant)]))
return ", ".join(reactants)
def assemble_products(self, rxn):
"""
Assemble list of products.
Args:
rxn (reaction instance)
Returns:
products (str) - species indices
"""
products = []
for product, coeff in enumerate(rxn.stoichiometry):
if coeff > 0:
products.append(str(self.node_key[int(product)]))
return ", ".join(products)
def assemble_sensitivities(self, rxn):
"""
Assemble list of sensitivities to environmental conditions.
Args:
rxn (reaction instance)
Returns:
sensitivities (str) - species indices
"""
sensitivities = ''
if rxn.temperature_sensitive is True:
pass
if rxn.atp_sensitive is not False:
if rxn.atp_sensitive is True or rxn.atp_sensitive==1:
sensitivities += 'ATP'
else:
sensitivities += 'ATP^{:1.0f}'.format(rxn.atp_sensitive)
if rxn.ribosome_sensitive is not False:
if rxn.ribosome_sensitive is True or rxn.ribosome_sensitive==1:
sensitivities += ', RPs'
else:
sensitivities += ', RPs^{:1.0f}'.format(rxn.ribosome_sensitive)
return sensitivities
def assemble_rate_expression(self, rxn):
"""
Assemble rate expression.
Args:
rxn (reaction instance)
Returns:
rate_expression (str)
"""
# get reaction type
if rxn.type in ('MassAction', 'LinearFeedback'):
rate_expression = self.get_mass_action_rate_law(rxn)
elif rxn.type == 'Hill':
rate_expression = self.get_enzymatic_rate_law(rxn)
elif rxn.type == 'SumReaction':
rate_expression = self.get_sum_rxn_rate_law(rxn)
elif rxn.type == 'Coupling':
rate_expression = self.get_coupling_rate_law(rxn)
else:
rate_expression = 'Unknown Rxn Type'
return rate_expression
@staticmethod
def assemble_rate_constant(rxn):
"""
Returns rate constant expression for a reaction instance.
Args:
rxn (all types)
Returns:
rate_constant (str) - rate constant expression
"""
rate_constant = '{:2.5f}'.format(rxn.k[0])
if rxn.type == 'Hill':
for i, coeff in enumerate(rxn.rate_modifier):
if coeff != 0:
rate_constant += ' + {:0.1f}[IN_{:d}]'.format(coeff, i)
return rate_constant
def get_mass_action_rate_law(self, rxn):
"""
Returns rate expression for MassAction instance.
Args:
rxn (MassAction)
Returns:
rate_law (str) - rate expression
"""
fmt_term = lambda sp: ('['+str(self.node_key[int(sp)])+']')
# add reactants
p = [fmt_term(sp)*int(co) for sp, co in enumerate(rxn.propensity)]
# add input contribution
input_contribution = ''
for i, dependence in enumerate(rxn.input_dependence):
if dependence != 0:
input_contribution += int(dependence) * '[IN_{:d}]'.format(i)
# assemble rate law
rate_law = input_contribution + "".join(str(term) for term in p)
return rate_law
def get_enzymatic_rate_law(self, rxn):
"""
Returns rate expression for Hill or Repressor instance.
Args:
rxn (Hill or Repressor)
Returns:
rate_law (str) - rate expression
"""
fmt = lambda sp: '['+str(self.node_key[int(sp)])+']'
# add substrate contributions
substrate_contribution = ''
p = [fmt(sp) if co!=0 else '' for sp, co in enumerate(rxn.propensity)]
w = [str(int(c)) if c!=0 and c!=1 else '' for c in rxn.propensity]
# combine weights and inputs
for i, j in zip(w, p):
substrate_contribution += (i+j)
# assemble substrates
activity = ''
for i, dependence in enumerate(rxn.input_dependence):
if dependence != 0:
if rxn.input_dependence.size == 1:
input_name = '[IN]'
else:
input_name = '[IN_{:d}]'.format(i)
coefficient = ''
if dependence != 1:
coefficient = str(dependence)
if i == 0:
activity += (coefficient + input_name)
else:
activity += (coefficient + input_name)
if rxn.num_active_species > 0 and len(activity) > 0:
activity += '+' + substrate_contribution
elif rxn.num_active_species > 0 and len(activity) == 0:
activity += substrate_contribution
# get species/michaelis constant quotient
if rxn.type == 'Hill':
quotient = '{:s}/{:s}'.format(str(rxn.k_m), activity)
else:
quotient = '{:s}/{:s}'.format(activity, str(rxn.k_m))
# assemble rate law
if rxn.n != 1:
rate_law = '1 / (1 + ({:s})^{:0.1f})'.format(quotient, rxn.n)
else:
rate_law = '1 / (1 + {:s})'.format(quotient, rxn.n)
return rate_law
@staticmethod
def get_sum_rxn_rate_law(rxn):
"""
Returns rate expression for SumReaction instance.
Args:
rxn (SumReaction)
Returns:
rate_law (str) - rate expression
"""
positive = np.where(rxn.propensity == 1)[0][0]
negative = np.where(rxn.propensity == -1)[0][0]
rate_law = '[{:d}] - [{:d}]'.format(positive, negative)
return rate_law
@staticmethod
def get_coupling_rate_law(rxn):
"""
Returns rate expression for Coupling instance.
Args:
rxn (Coupling)
Returns:
rate_law (str) - rate expression
"""
if rxn.propensity.max() == 0:
rate_law = ''
else:
base = np.where(rxn.propensity>0)[0][0]
neighbors = np.where(rxn.propensity<0)[0]
weight = (rxn.a * rxn.w) / (1+rxn.w*len(neighbors))
if len(neighbors) > 1:
coeff = '{:d}'.format(len(neighbors))
else:
coeff = ''
rate_law = '{:0.3f} x ({:s}[{:d}]'.format(weight, coeff, base)
for n in neighbors:
rate_law += ' - [{:d}]'.format(n)
rate_law += ')'
return rate_law
| en | 0.684647 | Class provides tabulated summary of reaction kinetics. Attributes: node_key (dict) - maps state dimension (key) to unique node id (value) reactions (list) - list of reaction objects table (list of lists) - rate law table, row for each reaction Instantiate raw law table. Args: node_key (dict) - maps state dimension (key) to node id (value) reactions (list) - reaction instances Pretty-print rate law table. Pretty-print rate law table. Build rate law table. Add reaction rate law to table. Args: rxn (reaction instance) # assemble reactants # assemble products # assemble rate expression # assemble rate constant # assemble sensitivities to environmental conditions # set reaction name # add reaction to table # add any repressors for Hill and Coupling reactions Assemble list of reactants. Args: rxn (reaction instance) Returns: reactants (str) - species indices Assemble list of products. Args: rxn (reaction instance) Returns: products (str) - species indices Assemble list of sensitivities to environmental conditions. Args: rxn (reaction instance) Returns: sensitivities (str) - species indices Assemble rate expression. Args: rxn (reaction instance) Returns: rate_expression (str) # get reaction type Returns rate constant expression for a reaction instance. Args: rxn (all types) Returns: rate_constant (str) - rate constant expression Returns rate expression for MassAction instance. Args: rxn (MassAction) Returns: rate_law (str) - rate expression # add reactants # add input contribution # assemble rate law Returns rate expression for Hill or Repressor instance. Args: rxn (Hill or Repressor) Returns: rate_law (str) - rate expression # add substrate contributions # combine weights and inputs # assemble substrates # get species/michaelis constant quotient # assemble rate law Returns rate expression for SumReaction instance. Args: rxn (SumReaction) Returns: rate_law (str) - rate expression Returns rate expression for Coupling instance. Args: rxn (Coupling) Returns: rate_law (str) - rate expression | 3.197106 | 3 |
aws_secrets/cli/cli.py | lucasvieirasilva/aws-ssm-secrets-cli | 4 | 6621938 | <gh_stars>1-10
import click
import yaml
from aws_secrets import __name__ as module_name
from aws_secrets.cli.decrypt import decrypt
from aws_secrets.cli.deploy import deploy
from aws_secrets.cli.encrypt import encrypt
from aws_secrets.cli.set_parameter import set_parameter
from aws_secrets.cli.set_secret import set_secret
from aws_secrets.cli.version import version
from aws_secrets.cli.view_parameter import view_parameter
from aws_secrets.cli.view_secret import view_secret
from aws_secrets.helpers.catch_exceptions import catch_exceptions
from aws_secrets.helpers.logging import setup_logging
from aws_secrets.representers.literal import Literal, literal_presenter
from aws_secrets.tags.cmd import CmdTag
from aws_secrets.tags.file import FileTag
from aws_secrets.tags.output_stack import OutputStackTag
yaml.SafeLoader.add_constructor('!cf_output', OutputStackTag.from_yaml)
yaml.SafeDumper.add_multi_representer(
OutputStackTag, OutputStackTag.to_yaml)
yaml.SafeLoader.add_constructor('!cmd', CmdTag.from_yaml)
yaml.SafeDumper.add_multi_representer(CmdTag, CmdTag.to_yaml)
yaml.SafeLoader.add_constructor('!file', FileTag.from_yaml)
yaml.SafeDumper.add_multi_representer(FileTag, FileTag.to_yaml)
yaml.SafeDumper.add_representer(Literal, literal_presenter)
@click.group(help='AWS Secrets CLI')
@click.option(
'--loglevel',
help='Log level.',
required=False,
default='WARNING',
show_default=True,
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], case_sensitive=False)
)
@click.pass_context
@catch_exceptions
def cli(ctx, loglevel: str):
"""
Root CLI Group `aws-secrets --help`
Args:
ctx (`click.Context`): click context object
loglevel (`str`): log level
"""
ctx.ensure_object(dict)
ctx.obj['loglevel'] = loglevel
setup_logging(module_name, loglevel)
cli.add_command(set_parameter)
cli.add_command(set_secret)
cli.add_command(view_parameter)
cli.add_command(view_secret)
cli.add_command(deploy)
cli.add_command(decrypt)
cli.add_command(encrypt)
cli.add_command(version)
| import click
import yaml
from aws_secrets import __name__ as module_name
from aws_secrets.cli.decrypt import decrypt
from aws_secrets.cli.deploy import deploy
from aws_secrets.cli.encrypt import encrypt
from aws_secrets.cli.set_parameter import set_parameter
from aws_secrets.cli.set_secret import set_secret
from aws_secrets.cli.version import version
from aws_secrets.cli.view_parameter import view_parameter
from aws_secrets.cli.view_secret import view_secret
from aws_secrets.helpers.catch_exceptions import catch_exceptions
from aws_secrets.helpers.logging import setup_logging
from aws_secrets.representers.literal import Literal, literal_presenter
from aws_secrets.tags.cmd import CmdTag
from aws_secrets.tags.file import FileTag
from aws_secrets.tags.output_stack import OutputStackTag
yaml.SafeLoader.add_constructor('!cf_output', OutputStackTag.from_yaml)
yaml.SafeDumper.add_multi_representer(
OutputStackTag, OutputStackTag.to_yaml)
yaml.SafeLoader.add_constructor('!cmd', CmdTag.from_yaml)
yaml.SafeDumper.add_multi_representer(CmdTag, CmdTag.to_yaml)
yaml.SafeLoader.add_constructor('!file', FileTag.from_yaml)
yaml.SafeDumper.add_multi_representer(FileTag, FileTag.to_yaml)
yaml.SafeDumper.add_representer(Literal, literal_presenter)
@click.group(help='AWS Secrets CLI')
@click.option(
'--loglevel',
help='Log level.',
required=False,
default='WARNING',
show_default=True,
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], case_sensitive=False)
)
@click.pass_context
@catch_exceptions
def cli(ctx, loglevel: str):
"""
Root CLI Group `aws-secrets --help`
Args:
ctx (`click.Context`): click context object
loglevel (`str`): log level
"""
ctx.ensure_object(dict)
ctx.obj['loglevel'] = loglevel
setup_logging(module_name, loglevel)
cli.add_command(set_parameter)
cli.add_command(set_secret)
cli.add_command(view_parameter)
cli.add_command(view_secret)
cli.add_command(deploy)
cli.add_command(decrypt)
cli.add_command(encrypt)
cli.add_command(version) | en | 0.191034 | Root CLI Group `aws-secrets --help` Args: ctx (`click.Context`): click context object loglevel (`str`): log level | 1.869416 | 2 |
ref/spatial_codec.py | cSDes1gn/spatial-codec | 3 | 6621939 | <gh_stars>1-10
"""
Spatial Codec™
==============
Contributors: <NAME>
Updated: 2020-07
Repoitory: https://github.com/cSDes1gn/spatial-codec
README availble in repository root
Version: 2.0
Dependancies
------------
>>> import argparse
>>> import random
>>> import time
>>> import numpy as np
>>> import plotly.graph_objects as go
>>> from bitarray import bitarray
Sample Runs
-----------
>>> python spatial_codec.py 4 1 ffffffffffffffff
>>> python spatial_codec.py 8 64
Copyright © 2020 <NAME>
"""
#include status of imports via progress bar
import argparse
import random
import time
import numpy as np
import plotly.graph_objects as go
from bitarray import bitarray
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Frame:
"""Class `Frame` represents a 1D bitarray as a 3D matrix of specified size.
`Frame` is a wrapper for an encoded 3D spatially encoded bitarray. It includes a `read` method for returning the spatial map `_SM`
and a `compact` method for reducing the `x`,`y`,`z` attributes into a format that is readable to the plotly renderer.
Attributes:
* _SM (`np.matrix`): `_SM` is a 3D matrix of integers.
* x (`list`): `x` is a list of x components for 'on' bits within the frame.
* y (`list`): `y` is a list of y components for 'on' bits within the frame.
* z (`list`): `z` is a list of z components for 'on' bits within the frame.
"""
def __init__(self, dim):
"""Initializes a dim^3 dimensional 3D matrix of zeroes."""
self._SM = np.zeros((dim,dim,dim), dtype=int)
# components are initialized to an empty list so that the components can be entered in the order which they appear according to
# Hilberts space filling curve
self.x = [None for _ in range(pow(dim,3))]
self.y = [None for _ in range(pow(dim,3))]
self.z = [None for _ in range(pow(dim,3))]
def read(self):
"""Returns stored `_SM` matrix within this instance of `Frame`.
Returns:
* Returns the `_SM` matrix.
"""
return self._SM
def write(self, x, y, z, bit=1):
"""Writes bit to spatial map matrix `_SM` within this instance of `Frame`.
Args:
* x (`int`): x component of spatial mapping tuple.
* y (`int`): y component of spatial mapping tuple.
* z (`int`): z component of spatial mapping tuple.
* bit (`int`): bit value
"""
self._SM[x][y][z] = bit
def compact(self):
"""Formats the component lists by removing the leftover `None` type objects so component lists can be used by plotly renderer."""
# each list corresponds to a component of a coordinate set so the first time None is not found for one component will make it
# true for all other components
while True:
try:
self.x.remove(None)
self.y.remove(None)
self.z.remove(None)
except ValueError:
break
class SpatialCodec:
"""Class `SpatialCodec` defines the codec for spatial encoding and decoding based on Hilbert's space filling curve.
`SpatialCodec` has two primary definitions `encode` & `decode` for converting `bitarray` objects into `Frame` objects and vice-versa.
This class also has two secondary functions `remap` & `render`. `remap` is a defintion specifically designed for the LEAP™ project. It
is a protected definition which can only be called by `TransmissionControlUnit` objects which allows the Transmission Control Software
to change the Hilbert space filling curve mapping to project the encoded frame to different access points (directions). The receiving
unit will always decode the frame according to a standardized method. `render` renders the encoded spatial map into a 3D matrix for the
purposes of validation testing and demonstration.
Attributes:
* dim (`int`): `dim` attribute defines the dimension of the the 3D matrix spatial map.
* orient (`int`): `orient` defines the orientation of the hilbert curve mapping. The default orientation is 0 followed by 1>2>3>0 in CW rotation
* fig (`dict`): `fig` is a dictionary containing the figure attribute initialization for a graphic object rendering spatial mapping in plotly
* HC (`np.matrix`): Holds `bitarray` index numbers in a 3D matrix defined by the hilberts space filling curve and shape is described by `dim`.
* J (`np.matrix`): Defines an 2D anti-diagonal identity matrix for rotating each layer (xy plane) of a `Frame`.
* bit_index (`int`): `bit_index` is a temporary attribute used to hold the running bit index count for `hilberts_curve`.
"""
def __init__(self, dim):
"""Initializes empty `HC` 3D matrix with resolution defined by `dim` and orientation by `orient`. Intializes graphic object `fig` for
rendering spatial mapping. Generates a spatial map using `HC` via Hilbert's space filling curve. Defines anti-diagonal identity matrix
`J` for rotational transformations by `Frame` layer.
Args:
* dim (`int`): `dim` is the dimension of the 3D matrix. Hilbert's space filling algorithm restricts this dimension to powers of 2.
Raises:
* `ValueError`: Raised if the parameter `dim` is not a power of 2.
"""
self.dim = dim
self.orient = 0 # default to 0
self.bit_index = 0
self.fig = go.Figure(
layout = go.Layout(title="3D Spatial Mapping of Randomly Generated 1D Bitarray using Hilbert's Space Filling Curve.")
)
# entry check to hilberts_curve to ensure dim parameter is a power of 2
if np.log2(self.dim) % 1 != 0:
raise ValueError
# Generate a 3D matrix of size dim that maps 1D bitarray indices to Hilberts space filling curve
print("\nGenerating Hilbert Curve...")
self.HC = np.zeros((self.dim,self.dim,self.dim), dtype=int)
self.hilbert_curve(dim,0,0,0,1,0,0,0,1,0,0,0,1)
print(bcolors.OKGREEN + "Hilbert curve matrix (HC) attribute successfully initialized." + bcolors.ENDC)
# dereference bit_index counter for HC
del self.bit_index
# construct anti-diagonal identity matrix J
self.J = np.eye(self.dim)
for i in range(int(self.dim/2)):
self.J[:,[0+i,self.dim-1-i]] = self.J[:,[self.dim-1-i,0+i]]
def hilbert_curve(self, dim, x, y, z, dx, dy, dz, dx2, dy2, dz2, dx3, dy3, dz3):
"""Recursively generates a set of coordinates for a hilbert space filling curve with 3D resolution `dim`
Algorithm based on solution by user:kylefinn @ https://stackoverflow.com/questions/14519267/algorithm-for-generating-a-3d-hilbert-space-filling-curve-in-python
"""
if dim == 1:
# Recursively fill matrix indices using temp SpatialCodec attribute bit_index
self.HC[int(z)][int(x)][int(y)] = self.bit_index
self.bit_index += 1
else:
dim /= 2
if(dx < 0):
x -= dim*dx
if(dy < 0):
y -= dim*dy
if(dz < 0):
z -= dim*dz
if(dx2 < 0):
x -= dim*dx2
if(dy2 < 0):
y -= dim*dy2
if(dz2 < 0):
z -= dim*dz2
if(dx3 < 0):
x -= dim*dx3
if(dy3 < 0):
y -= dim*dy3
if(dz3 < 0):
z -= dim*dz3
self.hilbert_curve(dim, x, y, z, dx2, dy2, dz2, dx3, dy3, dz3, dx, dy, dz)
self.hilbert_curve(dim, x+dim*dx, y+dim*dy, z+dim*dz, dx3, dy3, dz3, dx, dy, dz, dx2, dy2, dz2)
self.hilbert_curve(dim, x+dim*dx+dim*dx2, y+dim*dy+dim*dy2, z+dim*dz+dim*dz2, dx3, dy3, dz3, dx, dy, dz, dx2, dy2, dz2)
self.hilbert_curve(dim, x+dim*dx2, y+dim*dy2, z+dim*dz2, -dx, -dy, -dz, -dx2, -dy2, -dz2, dx3, dy3, dz3)
self.hilbert_curve(dim, x+dim*dx2+dim*dx3, y+dim*dy2+dim*dy3, z+dim*dz2+dim*dz3, -dx, -dy, -dz, -dx2, -dy2, -dz2, dx3, dy3, dz3)
self.hilbert_curve(dim, x+dim*dx+dim*dx2+dim*dx3, y+dim*dy+dim*dy2+dim*dy3, z+dim*dz+dim*dz2+dim*dz3, -dx3, -dy3, -dz3, dx, dy, dz, -dx2, -dy2, -dz2)
self.hilbert_curve(dim, x+dim*dx+dim*dx3, y+dim*dy+dim*dy3, z+dim*dz+dim*dz3, -dx3, -dy3, -dz3, dx, dy, dz, -dx2, -dy2, -dz2)
self.hilbert_curve(dim, x+dim*dx3, y+dim*dy3, z+dim*dz3, dx2, dy2, dz2, -dx3, -dy3, -dz3, -dx, -dy, -dz)
def encode(self, ba):
"""Encodes a 1D bitarray into a `Frame` object consisting of a 3D matrix containing indices corresponding to its spatial mapping.
Args:
* ba (`bitarray`): `ba` is the input `bitarray` object for encoding.
Returns:
* frame (`Frame`): `frame` object built from input bitarray `ba`
"""
frame = Frame(self.dim)
# construct spatial map matrix
for i in range(self.dim):
for j in range(self.dim):
for k in range(self.dim):
if ba[self.HC[i][j][k]] == 1:
frame.write(i,j,k)
# update frame components for rendering
frame.x[self.HC[i][j][k]] = j
frame.y[self.HC[i][j][k]] = k
frame.z[self.HC[i][j][k]] = i
else:
pass
print(frame.read())
# component condensing setup for rendering
frame.compact()
return frame
def decode(self, frame):
"""Decodes a `Frame` object into a 1D bitarray.
Args:
* frame (`Frame`): `frame` object built from a bitarray `ba`
Returns:
* ba (`bitarray`): `ba` is the decoded 1D `bitarray` from `Frame` object.
"""
# bitarray defined with 0's with a length equal to the masterlist (has dim encoded by masterlist length) for 1 bit replacement
ba = bitarray(pow(self.dim,3))
ba.setall(False)
SM = frame.read()
# adjust bitarray true values based on spatial_bitmap
bit_index = 0
for i in range(self.dim):
# adding 1 to each HC element allows element multiplication of SM to HC to yield non-zero bit indices defining positions for decoded bits
SML = np.multiply(SM[i][:][:],self.HC[i][:][:]+1)
for j in range(self.dim):
for k in range(self.dim):
if SML[j][k] != 0:
# subtracting 1 from each element reverts the indices to the true index number
ba[SML[j][k]-1] = 1
print(ba)
return ba
def remap(self, dest):
"""Protected definition. Modifies `SpatialCodec` Hilbert curve by translating about the Z axis.
Args:
* dest (`int`): `dest` defines a target direction to remap to. Defined by directions [0>1>2>3] in clockwise direction
Raises:
* `ValueError`: Raised if destination is not defined by integers in the range [0,3]
"""
# assume default direction is 0
if self.orient == dest:
return
elif dest not in [0,1,2,3]:
raise ValueError
# the following if statements categorize 3 matrix transformations: mirror, CW and CCW rotations. Depending on the destination index the
# algorithm will select the operation that will yield the destination via a single transformation.
if np.abs(self.orient+dest)%2 == 0:
print("Mirror translation:")
# to perform a mirror transformation matrix multiply anti-diagonal identity J to HC twice for each layer of HC: J*HC[x]*J
for i in range(self.dim):
self.HC[i][:][:] = np.matmul(self.J, np.matmul(self.HC[i][:][:],self.J))
else:
# determine the
if ((dest - self.orient) <= 0 and (dest - self.orient) >= -1) or ((dest - self.orient) > 1):
# perform a CCW rotation transformation by matrix multiply the anti-diagonal identity J to the transpose of each layer of HC: J*HC[x]^T
print("CCW translation:")
for i in range(self.dim):
self.HC[i][:][:] = np.matmul(self.J,self.HC[i][:][:].T)
else:
# perform a CW rotation transformation by matrix multiply the transpose of each layer of HC to the anti-diagonal identity J: HC[x]^T*J
print("CW translation:")
for i in range(self.dim):
self.HC[i][:][:] = np.matmul(self.HC[i][:][:].T,self.J)
# update codec orientation attribute
self.orient = dest
print(self.HC)
def render(self, ba_list):
"""Renders a list of `bitarray` objects to a 3D scatter rendered using `plotly`
Args:
* ba_list (:list:`bitarray`): `ba_list` is a list (size args.frames) of randomly generated bits (size args.dim^3)
"""
# initialized for storing figure labels with decoded hex values
decoded_hex = list()
print("Rendering Spatial Bitmaps:")
for steps in range(len(ba_list)):
# encode bitarray into list of Spatial bits
frame = self.encode(ba_list[steps])
print("Encoded frame: " + str(steps))
# Add the new trace to the scatter
tx = frame.x
ty = frame.y
tz = frame.z
self.fig.add_trace(go.Scatter3d(visible=True, x=tx,y=ty,z=tz))
# decode Frame object back into bitarray
ba = self.decode(frame)
# append decoded bitarray to decoded hex list for figure labelling
decoded_hex.append(ba.tobytes().hex())
print("Decoded frame: " + str(steps))
# clear arrays for next frame
tx.clear()
ty.clear()
tz.clear()
steps = []
for i in range(len(self.fig.data)):
step = dict(
method="restyle",
args=["visible", [False] * len(self.fig.data)],
label=decoded_hex[i],
)
step["args"][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=0,
currentvalue={"prefix": "Frame: "},
pad={"t": 50},
steps=steps
)]
self.fig.update_layout(
sliders=sliders,
)
self.fig.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generates a sequence of 3D spatially encoded frames from sequence of 1D bitarrays.')
parser.add_argument('dim', metavar='dim', type=int, help='matrix dimension (must be a power of 2)')
parser.add_argument('frames', metavar='frames',type=int, help='number of frames to generate.')
parser.add_argument('bitarray', nargs='?', default=None, metavar='bitarray',type=str, help='custom hex definition. If arg specified script ignores previous frame argument.')
args = parser.parse_args()
# size parameter is important for describing the voxel (3D pixel) resolution per frame
# ex/. for a 4x4x4 matrix the resolution is 64. In other words, there are 64 bits of information that can be encoded per frame
size = pow(args.dim,3)
ba_list = list()
# check for specified bitarray argument otherwise generate random bitarrays for each new frame
if args.bitarray:
# ensure bitarray length matches matrix dimension argument
if len(args.bitarray) != size/4:
raise ValueError("Mis-match of bitarray length and matrix dimension arguments.")
b = bitarray(bin(int(args.bitarray, base=16)).lstrip('0b'))
# re append MSB cutoff of 0 bits by python bin() definition
if len(b) != 64:
bn = bitarray()
for i in range(64-len(b)):
bn.append(False)
bn.extend(b)
ba_list.append(bn)
else:
ba_list.append(b)
else:
# generate 'args.frames' number random bitarray with a length 'size'
for j in range(args.frames):
ba = bitarray()
for i in range(size):
ba.append(bool(random.getrandbits(1)))
ba_list.append(ba)
try:
sc = SpatialCodec(args.dim)
except ValueError:
print("Argument dim must be a power of 2. Exiting.")
exit(0)
print(sc.HC)
# Step 1: Translate HC
# sc.remap(3)
# Step 2: Encode
# Step 3 Decode always at default
# NOTE: TCU has the ability to rotate and track its rotation with the hilbert curve. IRIS only knows one configuration of HC and simply decodes according to that
sc.render(ba_list)
| """
Spatial Codec™
==============
Contributors: <NAME>
Updated: 2020-07
Repoitory: https://github.com/cSDes1gn/spatial-codec
README availble in repository root
Version: 2.0
Dependancies
------------
>>> import argparse
>>> import random
>>> import time
>>> import numpy as np
>>> import plotly.graph_objects as go
>>> from bitarray import bitarray
Sample Runs
-----------
>>> python spatial_codec.py 4 1 ffffffffffffffff
>>> python spatial_codec.py 8 64
Copyright © 2020 <NAME>
"""
#include status of imports via progress bar
import argparse
import random
import time
import numpy as np
import plotly.graph_objects as go
from bitarray import bitarray
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Frame:
"""Class `Frame` represents a 1D bitarray as a 3D matrix of specified size.
`Frame` is a wrapper for an encoded 3D spatially encoded bitarray. It includes a `read` method for returning the spatial map `_SM`
and a `compact` method for reducing the `x`,`y`,`z` attributes into a format that is readable to the plotly renderer.
Attributes:
* _SM (`np.matrix`): `_SM` is a 3D matrix of integers.
* x (`list`): `x` is a list of x components for 'on' bits within the frame.
* y (`list`): `y` is a list of y components for 'on' bits within the frame.
* z (`list`): `z` is a list of z components for 'on' bits within the frame.
"""
def __init__(self, dim):
"""Initializes a dim^3 dimensional 3D matrix of zeroes."""
self._SM = np.zeros((dim,dim,dim), dtype=int)
# components are initialized to an empty list so that the components can be entered in the order which they appear according to
# Hilberts space filling curve
self.x = [None for _ in range(pow(dim,3))]
self.y = [None for _ in range(pow(dim,3))]
self.z = [None for _ in range(pow(dim,3))]
def read(self):
"""Returns stored `_SM` matrix within this instance of `Frame`.
Returns:
* Returns the `_SM` matrix.
"""
return self._SM
def write(self, x, y, z, bit=1):
"""Writes bit to spatial map matrix `_SM` within this instance of `Frame`.
Args:
* x (`int`): x component of spatial mapping tuple.
* y (`int`): y component of spatial mapping tuple.
* z (`int`): z component of spatial mapping tuple.
* bit (`int`): bit value
"""
self._SM[x][y][z] = bit
def compact(self):
"""Formats the component lists by removing the leftover `None` type objects so component lists can be used by plotly renderer."""
# each list corresponds to a component of a coordinate set so the first time None is not found for one component will make it
# true for all other components
while True:
try:
self.x.remove(None)
self.y.remove(None)
self.z.remove(None)
except ValueError:
break
class SpatialCodec:
"""Class `SpatialCodec` defines the codec for spatial encoding and decoding based on Hilbert's space filling curve.
`SpatialCodec` has two primary definitions `encode` & `decode` for converting `bitarray` objects into `Frame` objects and vice-versa.
This class also has two secondary functions `remap` & `render`. `remap` is a defintion specifically designed for the LEAP™ project. It
is a protected definition which can only be called by `TransmissionControlUnit` objects which allows the Transmission Control Software
to change the Hilbert space filling curve mapping to project the encoded frame to different access points (directions). The receiving
unit will always decode the frame according to a standardized method. `render` renders the encoded spatial map into a 3D matrix for the
purposes of validation testing and demonstration.
Attributes:
* dim (`int`): `dim` attribute defines the dimension of the the 3D matrix spatial map.
* orient (`int`): `orient` defines the orientation of the hilbert curve mapping. The default orientation is 0 followed by 1>2>3>0 in CW rotation
* fig (`dict`): `fig` is a dictionary containing the figure attribute initialization for a graphic object rendering spatial mapping in plotly
* HC (`np.matrix`): Holds `bitarray` index numbers in a 3D matrix defined by the hilberts space filling curve and shape is described by `dim`.
* J (`np.matrix`): Defines an 2D anti-diagonal identity matrix for rotating each layer (xy plane) of a `Frame`.
* bit_index (`int`): `bit_index` is a temporary attribute used to hold the running bit index count for `hilberts_curve`.
"""
def __init__(self, dim):
"""Initializes empty `HC` 3D matrix with resolution defined by `dim` and orientation by `orient`. Intializes graphic object `fig` for
rendering spatial mapping. Generates a spatial map using `HC` via Hilbert's space filling curve. Defines anti-diagonal identity matrix
`J` for rotational transformations by `Frame` layer.
Args:
* dim (`int`): `dim` is the dimension of the 3D matrix. Hilbert's space filling algorithm restricts this dimension to powers of 2.
Raises:
* `ValueError`: Raised if the parameter `dim` is not a power of 2.
"""
self.dim = dim
self.orient = 0 # default to 0
self.bit_index = 0
self.fig = go.Figure(
layout = go.Layout(title="3D Spatial Mapping of Randomly Generated 1D Bitarray using Hilbert's Space Filling Curve.")
)
# entry check to hilberts_curve to ensure dim parameter is a power of 2
if np.log2(self.dim) % 1 != 0:
raise ValueError
# Generate a 3D matrix of size dim that maps 1D bitarray indices to Hilberts space filling curve
print("\nGenerating Hilbert Curve...")
self.HC = np.zeros((self.dim,self.dim,self.dim), dtype=int)
self.hilbert_curve(dim,0,0,0,1,0,0,0,1,0,0,0,1)
print(bcolors.OKGREEN + "Hilbert curve matrix (HC) attribute successfully initialized." + bcolors.ENDC)
# dereference bit_index counter for HC
del self.bit_index
# construct anti-diagonal identity matrix J
self.J = np.eye(self.dim)
for i in range(int(self.dim/2)):
self.J[:,[0+i,self.dim-1-i]] = self.J[:,[self.dim-1-i,0+i]]
def hilbert_curve(self, dim, x, y, z, dx, dy, dz, dx2, dy2, dz2, dx3, dy3, dz3):
"""Recursively generates a set of coordinates for a hilbert space filling curve with 3D resolution `dim`
Algorithm based on solution by user:kylefinn @ https://stackoverflow.com/questions/14519267/algorithm-for-generating-a-3d-hilbert-space-filling-curve-in-python
"""
if dim == 1:
# Recursively fill matrix indices using temp SpatialCodec attribute bit_index
self.HC[int(z)][int(x)][int(y)] = self.bit_index
self.bit_index += 1
else:
dim /= 2
if(dx < 0):
x -= dim*dx
if(dy < 0):
y -= dim*dy
if(dz < 0):
z -= dim*dz
if(dx2 < 0):
x -= dim*dx2
if(dy2 < 0):
y -= dim*dy2
if(dz2 < 0):
z -= dim*dz2
if(dx3 < 0):
x -= dim*dx3
if(dy3 < 0):
y -= dim*dy3
if(dz3 < 0):
z -= dim*dz3
self.hilbert_curve(dim, x, y, z, dx2, dy2, dz2, dx3, dy3, dz3, dx, dy, dz)
self.hilbert_curve(dim, x+dim*dx, y+dim*dy, z+dim*dz, dx3, dy3, dz3, dx, dy, dz, dx2, dy2, dz2)
self.hilbert_curve(dim, x+dim*dx+dim*dx2, y+dim*dy+dim*dy2, z+dim*dz+dim*dz2, dx3, dy3, dz3, dx, dy, dz, dx2, dy2, dz2)
self.hilbert_curve(dim, x+dim*dx2, y+dim*dy2, z+dim*dz2, -dx, -dy, -dz, -dx2, -dy2, -dz2, dx3, dy3, dz3)
self.hilbert_curve(dim, x+dim*dx2+dim*dx3, y+dim*dy2+dim*dy3, z+dim*dz2+dim*dz3, -dx, -dy, -dz, -dx2, -dy2, -dz2, dx3, dy3, dz3)
self.hilbert_curve(dim, x+dim*dx+dim*dx2+dim*dx3, y+dim*dy+dim*dy2+dim*dy3, z+dim*dz+dim*dz2+dim*dz3, -dx3, -dy3, -dz3, dx, dy, dz, -dx2, -dy2, -dz2)
self.hilbert_curve(dim, x+dim*dx+dim*dx3, y+dim*dy+dim*dy3, z+dim*dz+dim*dz3, -dx3, -dy3, -dz3, dx, dy, dz, -dx2, -dy2, -dz2)
self.hilbert_curve(dim, x+dim*dx3, y+dim*dy3, z+dim*dz3, dx2, dy2, dz2, -dx3, -dy3, -dz3, -dx, -dy, -dz)
def encode(self, ba):
"""Encodes a 1D bitarray into a `Frame` object consisting of a 3D matrix containing indices corresponding to its spatial mapping.
Args:
* ba (`bitarray`): `ba` is the input `bitarray` object for encoding.
Returns:
* frame (`Frame`): `frame` object built from input bitarray `ba`
"""
frame = Frame(self.dim)
# construct spatial map matrix
for i in range(self.dim):
for j in range(self.dim):
for k in range(self.dim):
if ba[self.HC[i][j][k]] == 1:
frame.write(i,j,k)
# update frame components for rendering
frame.x[self.HC[i][j][k]] = j
frame.y[self.HC[i][j][k]] = k
frame.z[self.HC[i][j][k]] = i
else:
pass
print(frame.read())
# component condensing setup for rendering
frame.compact()
return frame
def decode(self, frame):
"""Decodes a `Frame` object into a 1D bitarray.
Args:
* frame (`Frame`): `frame` object built from a bitarray `ba`
Returns:
* ba (`bitarray`): `ba` is the decoded 1D `bitarray` from `Frame` object.
"""
# bitarray defined with 0's with a length equal to the masterlist (has dim encoded by masterlist length) for 1 bit replacement
ba = bitarray(pow(self.dim,3))
ba.setall(False)
SM = frame.read()
# adjust bitarray true values based on spatial_bitmap
bit_index = 0
for i in range(self.dim):
# adding 1 to each HC element allows element multiplication of SM to HC to yield non-zero bit indices defining positions for decoded bits
SML = np.multiply(SM[i][:][:],self.HC[i][:][:]+1)
for j in range(self.dim):
for k in range(self.dim):
if SML[j][k] != 0:
# subtracting 1 from each element reverts the indices to the true index number
ba[SML[j][k]-1] = 1
print(ba)
return ba
def remap(self, dest):
"""Protected definition. Modifies `SpatialCodec` Hilbert curve by translating about the Z axis.
Args:
* dest (`int`): `dest` defines a target direction to remap to. Defined by directions [0>1>2>3] in clockwise direction
Raises:
* `ValueError`: Raised if destination is not defined by integers in the range [0,3]
"""
# assume default direction is 0
if self.orient == dest:
return
elif dest not in [0,1,2,3]:
raise ValueError
# the following if statements categorize 3 matrix transformations: mirror, CW and CCW rotations. Depending on the destination index the
# algorithm will select the operation that will yield the destination via a single transformation.
if np.abs(self.orient+dest)%2 == 0:
print("Mirror translation:")
# to perform a mirror transformation matrix multiply anti-diagonal identity J to HC twice for each layer of HC: J*HC[x]*J
for i in range(self.dim):
self.HC[i][:][:] = np.matmul(self.J, np.matmul(self.HC[i][:][:],self.J))
else:
# determine the
if ((dest - self.orient) <= 0 and (dest - self.orient) >= -1) or ((dest - self.orient) > 1):
# perform a CCW rotation transformation by matrix multiply the anti-diagonal identity J to the transpose of each layer of HC: J*HC[x]^T
print("CCW translation:")
for i in range(self.dim):
self.HC[i][:][:] = np.matmul(self.J,self.HC[i][:][:].T)
else:
# perform a CW rotation transformation by matrix multiply the transpose of each layer of HC to the anti-diagonal identity J: HC[x]^T*J
print("CW translation:")
for i in range(self.dim):
self.HC[i][:][:] = np.matmul(self.HC[i][:][:].T,self.J)
# update codec orientation attribute
self.orient = dest
print(self.HC)
def render(self, ba_list):
"""Renders a list of `bitarray` objects to a 3D scatter rendered using `plotly`
Args:
* ba_list (:list:`bitarray`): `ba_list` is a list (size args.frames) of randomly generated bits (size args.dim^3)
"""
# initialized for storing figure labels with decoded hex values
decoded_hex = list()
print("Rendering Spatial Bitmaps:")
for steps in range(len(ba_list)):
# encode bitarray into list of Spatial bits
frame = self.encode(ba_list[steps])
print("Encoded frame: " + str(steps))
# Add the new trace to the scatter
tx = frame.x
ty = frame.y
tz = frame.z
self.fig.add_trace(go.Scatter3d(visible=True, x=tx,y=ty,z=tz))
# decode Frame object back into bitarray
ba = self.decode(frame)
# append decoded bitarray to decoded hex list for figure labelling
decoded_hex.append(ba.tobytes().hex())
print("Decoded frame: " + str(steps))
# clear arrays for next frame
tx.clear()
ty.clear()
tz.clear()
steps = []
for i in range(len(self.fig.data)):
step = dict(
method="restyle",
args=["visible", [False] * len(self.fig.data)],
label=decoded_hex[i],
)
step["args"][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=0,
currentvalue={"prefix": "Frame: "},
pad={"t": 50},
steps=steps
)]
self.fig.update_layout(
sliders=sliders,
)
self.fig.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generates a sequence of 3D spatially encoded frames from sequence of 1D bitarrays.')
parser.add_argument('dim', metavar='dim', type=int, help='matrix dimension (must be a power of 2)')
parser.add_argument('frames', metavar='frames',type=int, help='number of frames to generate.')
parser.add_argument('bitarray', nargs='?', default=None, metavar='bitarray',type=str, help='custom hex definition. If arg specified script ignores previous frame argument.')
args = parser.parse_args()
# size parameter is important for describing the voxel (3D pixel) resolution per frame
# ex/. for a 4x4x4 matrix the resolution is 64. In other words, there are 64 bits of information that can be encoded per frame
size = pow(args.dim,3)
ba_list = list()
# check for specified bitarray argument otherwise generate random bitarrays for each new frame
if args.bitarray:
# ensure bitarray length matches matrix dimension argument
if len(args.bitarray) != size/4:
raise ValueError("Mis-match of bitarray length and matrix dimension arguments.")
b = bitarray(bin(int(args.bitarray, base=16)).lstrip('0b'))
# re append MSB cutoff of 0 bits by python bin() definition
if len(b) != 64:
bn = bitarray()
for i in range(64-len(b)):
bn.append(False)
bn.extend(b)
ba_list.append(bn)
else:
ba_list.append(b)
else:
# generate 'args.frames' number random bitarray with a length 'size'
for j in range(args.frames):
ba = bitarray()
for i in range(size):
ba.append(bool(random.getrandbits(1)))
ba_list.append(ba)
try:
sc = SpatialCodec(args.dim)
except ValueError:
print("Argument dim must be a power of 2. Exiting.")
exit(0)
print(sc.HC)
# Step 1: Translate HC
# sc.remap(3)
# Step 2: Encode
# Step 3 Decode always at default
# NOTE: TCU has the ability to rotate and track its rotation with the hilbert curve. IRIS only knows one configuration of HC and simply decodes according to that
sc.render(ba_list) | en | 0.718274 | Spatial Codec™ ============== Contributors: <NAME> Updated: 2020-07 Repoitory: https://github.com/cSDes1gn/spatial-codec README availble in repository root Version: 2.0 Dependancies ------------ >>> import argparse >>> import random >>> import time >>> import numpy as np >>> import plotly.graph_objects as go >>> from bitarray import bitarray Sample Runs ----------- >>> python spatial_codec.py 4 1 ffffffffffffffff >>> python spatial_codec.py 8 64 Copyright © 2020 <NAME> #include status of imports via progress bar Class `Frame` represents a 1D bitarray as a 3D matrix of specified size. `Frame` is a wrapper for an encoded 3D spatially encoded bitarray. It includes a `read` method for returning the spatial map `_SM` and a `compact` method for reducing the `x`,`y`,`z` attributes into a format that is readable to the plotly renderer. Attributes: * _SM (`np.matrix`): `_SM` is a 3D matrix of integers. * x (`list`): `x` is a list of x components for 'on' bits within the frame. * y (`list`): `y` is a list of y components for 'on' bits within the frame. * z (`list`): `z` is a list of z components for 'on' bits within the frame. Initializes a dim^3 dimensional 3D matrix of zeroes. # components are initialized to an empty list so that the components can be entered in the order which they appear according to # Hilberts space filling curve Returns stored `_SM` matrix within this instance of `Frame`. Returns: * Returns the `_SM` matrix. Writes bit to spatial map matrix `_SM` within this instance of `Frame`. Args: * x (`int`): x component of spatial mapping tuple. * y (`int`): y component of spatial mapping tuple. * z (`int`): z component of spatial mapping tuple. * bit (`int`): bit value Formats the component lists by removing the leftover `None` type objects so component lists can be used by plotly renderer. # each list corresponds to a component of a coordinate set so the first time None is not found for one component will make it # true for all other components Class `SpatialCodec` defines the codec for spatial encoding and decoding based on Hilbert's space filling curve. `SpatialCodec` has two primary definitions `encode` & `decode` for converting `bitarray` objects into `Frame` objects and vice-versa. This class also has two secondary functions `remap` & `render`. `remap` is a defintion specifically designed for the LEAP™ project. It is a protected definition which can only be called by `TransmissionControlUnit` objects which allows the Transmission Control Software to change the Hilbert space filling curve mapping to project the encoded frame to different access points (directions). The receiving unit will always decode the frame according to a standardized method. `render` renders the encoded spatial map into a 3D matrix for the purposes of validation testing and demonstration. Attributes: * dim (`int`): `dim` attribute defines the dimension of the the 3D matrix spatial map. * orient (`int`): `orient` defines the orientation of the hilbert curve mapping. The default orientation is 0 followed by 1>2>3>0 in CW rotation * fig (`dict`): `fig` is a dictionary containing the figure attribute initialization for a graphic object rendering spatial mapping in plotly * HC (`np.matrix`): Holds `bitarray` index numbers in a 3D matrix defined by the hilberts space filling curve and shape is described by `dim`. * J (`np.matrix`): Defines an 2D anti-diagonal identity matrix for rotating each layer (xy plane) of a `Frame`. * bit_index (`int`): `bit_index` is a temporary attribute used to hold the running bit index count for `hilberts_curve`. Initializes empty `HC` 3D matrix with resolution defined by `dim` and orientation by `orient`. Intializes graphic object `fig` for rendering spatial mapping. Generates a spatial map using `HC` via Hilbert's space filling curve. Defines anti-diagonal identity matrix `J` for rotational transformations by `Frame` layer. Args: * dim (`int`): `dim` is the dimension of the 3D matrix. Hilbert's space filling algorithm restricts this dimension to powers of 2. Raises: * `ValueError`: Raised if the parameter `dim` is not a power of 2. # default to 0 # entry check to hilberts_curve to ensure dim parameter is a power of 2 # Generate a 3D matrix of size dim that maps 1D bitarray indices to Hilberts space filling curve # dereference bit_index counter for HC # construct anti-diagonal identity matrix J Recursively generates a set of coordinates for a hilbert space filling curve with 3D resolution `dim` Algorithm based on solution by user:kylefinn @ https://stackoverflow.com/questions/14519267/algorithm-for-generating-a-3d-hilbert-space-filling-curve-in-python # Recursively fill matrix indices using temp SpatialCodec attribute bit_index Encodes a 1D bitarray into a `Frame` object consisting of a 3D matrix containing indices corresponding to its spatial mapping. Args: * ba (`bitarray`): `ba` is the input `bitarray` object for encoding. Returns: * frame (`Frame`): `frame` object built from input bitarray `ba` # construct spatial map matrix # update frame components for rendering # component condensing setup for rendering Decodes a `Frame` object into a 1D bitarray. Args: * frame (`Frame`): `frame` object built from a bitarray `ba` Returns: * ba (`bitarray`): `ba` is the decoded 1D `bitarray` from `Frame` object. # bitarray defined with 0's with a length equal to the masterlist (has dim encoded by masterlist length) for 1 bit replacement # adjust bitarray true values based on spatial_bitmap # adding 1 to each HC element allows element multiplication of SM to HC to yield non-zero bit indices defining positions for decoded bits # subtracting 1 from each element reverts the indices to the true index number Protected definition. Modifies `SpatialCodec` Hilbert curve by translating about the Z axis. Args: * dest (`int`): `dest` defines a target direction to remap to. Defined by directions [0>1>2>3] in clockwise direction Raises: * `ValueError`: Raised if destination is not defined by integers in the range [0,3] # assume default direction is 0 # the following if statements categorize 3 matrix transformations: mirror, CW and CCW rotations. Depending on the destination index the # algorithm will select the operation that will yield the destination via a single transformation. # to perform a mirror transformation matrix multiply anti-diagonal identity J to HC twice for each layer of HC: J*HC[x]*J # determine the # perform a CCW rotation transformation by matrix multiply the anti-diagonal identity J to the transpose of each layer of HC: J*HC[x]^T # perform a CW rotation transformation by matrix multiply the transpose of each layer of HC to the anti-diagonal identity J: HC[x]^T*J # update codec orientation attribute Renders a list of `bitarray` objects to a 3D scatter rendered using `plotly` Args: * ba_list (:list:`bitarray`): `ba_list` is a list (size args.frames) of randomly generated bits (size args.dim^3) # initialized for storing figure labels with decoded hex values # encode bitarray into list of Spatial bits # Add the new trace to the scatter # decode Frame object back into bitarray # append decoded bitarray to decoded hex list for figure labelling # clear arrays for next frame # Toggle i'th trace to "visible" # size parameter is important for describing the voxel (3D pixel) resolution per frame # ex/. for a 4x4x4 matrix the resolution is 64. In other words, there are 64 bits of information that can be encoded per frame # check for specified bitarray argument otherwise generate random bitarrays for each new frame # ensure bitarray length matches matrix dimension argument # re append MSB cutoff of 0 bits by python bin() definition # generate 'args.frames' number random bitarray with a length 'size' # Step 1: Translate HC # sc.remap(3) # Step 2: Encode # Step 3 Decode always at default # NOTE: TCU has the ability to rotate and track its rotation with the hilbert curve. IRIS only knows one configuration of HC and simply decodes according to that | 2.124584 | 2 |
Black-Box/code.py | bcspragu/Machine-Learning-Projects | 0 | 6621940 | <gh_stars>0
import itertools
import operator
import numpy as np
from sklearn import cross_validation
from sklearn import svm
from sklearn import tree
from sklearn import neighbors
from sklearn import ensemble
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
train = np.load('train.npy')
# Remove the labels
test = np.load('test_distribute.npy')[:,1:]
data = train[:,1:]
target = train[:,0]
# Take the 400 best features
trimmer = SelectKBest(chi2, k=400).fit(data, target)
trimmed_data = trimmer.transform(data)
trimmed_test = trimmer.transform(test)
clf = ensemble.RandomForestClassifier(n_estimators=100)
pred = clf.fit(trimmed_data, target).predict(trimmed_test)
f = open('predictions.csv', 'w')
f.write("ID,Category\n")
for i, res in enumerate(pred):
f.write("%d,%d\n" % (i+1,res))
f.close()
# I use the following code to find good hyperparameter values
#scores = cross_validation.cross_val_score(
#clf, trimmed_data, target, cv=5)
#print("Accuracy: %f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
| import itertools
import operator
import numpy as np
from sklearn import cross_validation
from sklearn import svm
from sklearn import tree
from sklearn import neighbors
from sklearn import ensemble
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
train = np.load('train.npy')
# Remove the labels
test = np.load('test_distribute.npy')[:,1:]
data = train[:,1:]
target = train[:,0]
# Take the 400 best features
trimmer = SelectKBest(chi2, k=400).fit(data, target)
trimmed_data = trimmer.transform(data)
trimmed_test = trimmer.transform(test)
clf = ensemble.RandomForestClassifier(n_estimators=100)
pred = clf.fit(trimmed_data, target).predict(trimmed_test)
f = open('predictions.csv', 'w')
f.write("ID,Category\n")
for i, res in enumerate(pred):
f.write("%d,%d\n" % (i+1,res))
f.close()
# I use the following code to find good hyperparameter values
#scores = cross_validation.cross_val_score(
#clf, trimmed_data, target, cv=5)
#print("Accuracy: %f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) | en | 0.465054 | # Remove the labels # Take the 400 best features # I use the following code to find good hyperparameter values #scores = cross_validation.cross_val_score( #clf, trimmed_data, target, cv=5) #print("Accuracy: %f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) | 2.906422 | 3 |
customerapi/apps.py | yorek/azure-sql-db-django | 2 | 6621941 | from django.apps import AppConfig
class customerapiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'customerapi'
| from django.apps import AppConfig
class customerapiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'customerapi'
| none | 1 | 1.37278 | 1 | |
tests/conftest.py | davesque/py-lll | 1 | 6621942 | import contextlib
from pathlib import (
Path,
)
import pytest
from lll.parser import (
parse_s_exp,
)
FIXTURES_PATH = Path(__file__).parent / 'fixtures'
UNPARSEABLE_FIXTURES_PATH = FIXTURES_PATH / 'unparseable'
FIXTURES = list(sorted(FIXTURES_PATH.glob('*.lisp')))
UNPARSEABLE_FIXTURES = list(sorted(UNPARSEABLE_FIXTURES_PATH.glob('*.lisp')))
def get_fixture_path_id(path: Path) -> str:
return str(path.resolve())
@contextlib.contextmanager
def _open_fixture_file(filename, *args):
fixture_path = FIXTURES_PATH / filename
with open(fixture_path, *args) as f:
yield f
def _get_fixture_contents(filename):
with _open_fixture_file(filename, 'r') as f:
return f.read()
@pytest.fixture(
params=FIXTURES,
ids=get_fixture_path_id,
)
def parseable_lll_file(request):
return request.param
@pytest.fixture(
params=UNPARSEABLE_FIXTURES,
ids=get_fixture_path_id,
)
def unparseable_lll_file(request):
return request.param
@pytest.fixture
def open_fixture_file():
return _open_fixture_file
@pytest.fixture
def get_fixture_contents():
return _get_fixture_contents
@pytest.fixture
def get_parsed_fixture():
def _get_parsed_fixture(filename):
return parse_s_exp(_get_fixture_contents(filename))
return _get_parsed_fixture
| import contextlib
from pathlib import (
Path,
)
import pytest
from lll.parser import (
parse_s_exp,
)
FIXTURES_PATH = Path(__file__).parent / 'fixtures'
UNPARSEABLE_FIXTURES_PATH = FIXTURES_PATH / 'unparseable'
FIXTURES = list(sorted(FIXTURES_PATH.glob('*.lisp')))
UNPARSEABLE_FIXTURES = list(sorted(UNPARSEABLE_FIXTURES_PATH.glob('*.lisp')))
def get_fixture_path_id(path: Path) -> str:
return str(path.resolve())
@contextlib.contextmanager
def _open_fixture_file(filename, *args):
fixture_path = FIXTURES_PATH / filename
with open(fixture_path, *args) as f:
yield f
def _get_fixture_contents(filename):
with _open_fixture_file(filename, 'r') as f:
return f.read()
@pytest.fixture(
params=FIXTURES,
ids=get_fixture_path_id,
)
def parseable_lll_file(request):
return request.param
@pytest.fixture(
params=UNPARSEABLE_FIXTURES,
ids=get_fixture_path_id,
)
def unparseable_lll_file(request):
return request.param
@pytest.fixture
def open_fixture_file():
return _open_fixture_file
@pytest.fixture
def get_fixture_contents():
return _get_fixture_contents
@pytest.fixture
def get_parsed_fixture():
def _get_parsed_fixture(filename):
return parse_s_exp(_get_fixture_contents(filename))
return _get_parsed_fixture
| none | 1 | 2.258557 | 2 | |
tarball_deploy/__init__.py | psliwka/tarball-deploy | 1 | 6621943 | <filename>tarball_deploy/__init__.py
import pkg_resources
__version__ = pkg_resources.get_distribution(__name__).version
| <filename>tarball_deploy/__init__.py
import pkg_resources
__version__ = pkg_resources.get_distribution(__name__).version
| none | 1 | 1.325419 | 1 | |
util/client/gui/logInGui.py | bcm27/Chat | 0 | 6621944 | import util.client.clientUtils as cUtils
from tkinter import *
from util.client.gui import gui
import socket
from sys import argv
from util.sharedUtils import SERVER_PORT
import time
class LoginGUI:
def __init__(self, client_socket):
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a new socket
client_socket.connect((argv[1] if len(argv) > 1 else "localhost", SERVER_PORT)) # Connect to the socket
window = Tk()
window.resizable(0, 0)
window.title("Welcome to Python Chat!")
canvasBottom = Canvas(window, width=400, height=50)
canvasTop = Canvas(window, width=400, height=50)
label_one = Label(window, text='User:')
label_two = Label(window, text='Password:')
user_name_text_field = Entry(window, textvariable=StringVar())
password_text_field = Entry(window, show='*', textvariable=StringVar())
def display_toast(message):
label_message = Label(window, text=message)
label_message.pack()
def clear_label():
label_message.destroy()
label_message.after(2000, clear_label) # Clear label after 2 seconds
def login_or_register(should_login):
user = user_name_text_field.get()
password = password_text_field.get()
if not user or not password:
display_toast('Please fill in all required fields!')
else:
server_login_cb(client_socket, should_login, user, password, window, display_toast)
def login_on_click():
login_or_register(True)
def register_on_click():
login_or_register(False)
login_button = Button(window, text='Log In', command=login_on_click)
register_button = Button(window, text='Register', command=register_on_click)
label_login = Label(window, text='Login Now or Register', fg="orange red", font=("Georgia", 16, "bold"))
label_welcome = Label(window, text='Welcome to Python Chat!', fg="deep sky blue", font=("Georgia", 18, "bold "))
canvasTop.pack()
label_welcome.pack()
label_login.pack()
blueBox = canvasTop.create_rectangle(0, 0, 400, 30, fill="deep sky blue")
label_one.pack()
user_name_text_field.pack()
label_two.pack()
password_text_field.pack()
login_button.pack()
register_button.pack()
blueBox = canvasBottom.create_rectangle(0, 50, 400, 30, fill="deep sky blue", )
canvasBottom.pack()
window.mainloop()
def server_login_cb(client_socket, should_login, p_user, p_pass, login_ui_window, display_toast):
if should_login:
log_attempt = cUtils.send_login_command(client_socket, p_user, p_pass)
print("Login Status: %s" % log_attempt)
if log_attempt == "<ACCEPTED>":
login_ui_window.withdraw()
gui.start_gui(client_socket) # Start the main GUI and pass it the created socket
if log_attempt == "<FAILED LOGIN ATTEMPTS>":
display_toast("Too many invalid login attempts...closing")
time.sleep(2)
display_toast("Closing...")
time.sleep(3)
client_socket.close()
sys.exit(-1)
else:
display_toast("Invalid username or password.")
else:
if cUtils.send_register_command(client_socket, p_user, p_pass):
login_ui_window.withdraw()
gui.start_gui(client_socket) # Start the main GUI and pass it the created socket
else:
display_toast("That username is already in use.")
| import util.client.clientUtils as cUtils
from tkinter import *
from util.client.gui import gui
import socket
from sys import argv
from util.sharedUtils import SERVER_PORT
import time
class LoginGUI:
def __init__(self, client_socket):
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a new socket
client_socket.connect((argv[1] if len(argv) > 1 else "localhost", SERVER_PORT)) # Connect to the socket
window = Tk()
window.resizable(0, 0)
window.title("Welcome to Python Chat!")
canvasBottom = Canvas(window, width=400, height=50)
canvasTop = Canvas(window, width=400, height=50)
label_one = Label(window, text='User:')
label_two = Label(window, text='Password:')
user_name_text_field = Entry(window, textvariable=StringVar())
password_text_field = Entry(window, show='*', textvariable=StringVar())
def display_toast(message):
label_message = Label(window, text=message)
label_message.pack()
def clear_label():
label_message.destroy()
label_message.after(2000, clear_label) # Clear label after 2 seconds
def login_or_register(should_login):
user = user_name_text_field.get()
password = password_text_field.get()
if not user or not password:
display_toast('Please fill in all required fields!')
else:
server_login_cb(client_socket, should_login, user, password, window, display_toast)
def login_on_click():
login_or_register(True)
def register_on_click():
login_or_register(False)
login_button = Button(window, text='Log In', command=login_on_click)
register_button = Button(window, text='Register', command=register_on_click)
label_login = Label(window, text='Login Now or Register', fg="orange red", font=("Georgia", 16, "bold"))
label_welcome = Label(window, text='Welcome to Python Chat!', fg="deep sky blue", font=("Georgia", 18, "bold "))
canvasTop.pack()
label_welcome.pack()
label_login.pack()
blueBox = canvasTop.create_rectangle(0, 0, 400, 30, fill="deep sky blue")
label_one.pack()
user_name_text_field.pack()
label_two.pack()
password_text_field.pack()
login_button.pack()
register_button.pack()
blueBox = canvasBottom.create_rectangle(0, 50, 400, 30, fill="deep sky blue", )
canvasBottom.pack()
window.mainloop()
def server_login_cb(client_socket, should_login, p_user, p_pass, login_ui_window, display_toast):
if should_login:
log_attempt = cUtils.send_login_command(client_socket, p_user, p_pass)
print("Login Status: %s" % log_attempt)
if log_attempt == "<ACCEPTED>":
login_ui_window.withdraw()
gui.start_gui(client_socket) # Start the main GUI and pass it the created socket
if log_attempt == "<FAILED LOGIN ATTEMPTS>":
display_toast("Too many invalid login attempts...closing")
time.sleep(2)
display_toast("Closing...")
time.sleep(3)
client_socket.close()
sys.exit(-1)
else:
display_toast("Invalid username or password.")
else:
if cUtils.send_register_command(client_socket, p_user, p_pass):
login_ui_window.withdraw()
gui.start_gui(client_socket) # Start the main GUI and pass it the created socket
else:
display_toast("That username is already in use.")
| en | 0.873302 | # Create a new socket # Connect to the socket # Clear label after 2 seconds # Start the main GUI and pass it the created socket # Start the main GUI and pass it the created socket | 3.198212 | 3 |
contrib/py/jxl_library_patches/jxl_imageio.py | toricls/brunsli | 700 | 6621945 | # Copyright (c) Google LLC 2019
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
"""Extends the 'imageio' to support basic reading of JPEG-XL recompressed JPEG.
This registers a file format that will call an external converter which will
(securely) convert JPEG-XL to a temporary JPEG file behind the scenes.
"""
from . import jxl_utils
import imageio
import imageio.core
# Extending imageio with plugins:
# https://imageio.readthedocs.io/en/stable/plugins.html
class _JpegXLFormat(imageio.core.Format):
"""Jpeg-XL image format. Currently only supports recompressed JPEG."""
def _can_read(self, request):
return (request.mode[1] in (self.modes + '?') and
request.firstbytes.startswith(
jxl_utils.JPEGXL_RECOMPRESSED_JPEG_HEADER))
def _can_write(self, request):
return False
class Reader(imageio.core.Format.Reader):
"""Image format Reader implementation."""
def _open(self):
"""Opens the reader."""
self._filename = self.request.get_local_filename()
def _close(self):
"""Closes the reader."""
def _get_length(self):
"""Returns the number of images."""
return 1
def _get_data(self, index):
"""Returns the data and metadata for the image with given index."""
if not 0 <= index < 1:
raise IndexError('Image index out of range: %s' % (index,))
metadata = {}
data = jxl_utils.call_with_jxl_filename_arg1_replaced_by_temp_jpeg_file(
imageio.imread, self._filename)
return data, metadata
def _get_meta_data(self, index):
return {} # This format does not yet support returning metadata.
class Writer(imageio.core.Format.Writer):
"""Image format Writer implementation."""
def _open(self):
raise NotImplementedError('Not implemented.')
def _close(self):
"""Closes the writer."""
def _append_data(self, image_data, metadata):
del image_data, metadata # Unused.
raise NotImplementedError('Not implemented.')
def set_meta_data(self, metadata):
del metadata # Unused.
raise NotImplementedError('Not implemented.')
def register_jxl_support(imageio_module):
"""Registers JPEG-XL with imageio."""
# Above, we are using "import imageio.core" to get a parent class.
# We should make sure that the `imageio` module implicitly referred like this
# matches the imageio module which we are extending with Jpeg-XL support.
# In principle, we could avoid this by importing from the module passed in,
# and defining a closure-class, but the situations when this fails are so
# obscure that this should not be an issue for easing the transition into
# Jpeg-XL.
assert id(imageio_module) == id(imageio), (
'User-passed `imageio` module differs from `import imageio` result.')
imageio_module.formats.add_format(
_JpegXLFormat(
'JPEG-XL',
'JPEG-XL (partial support; reads recompressed JPEG only.)',
'.jxl',
'iI',
))
| # Copyright (c) Google LLC 2019
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
"""Extends the 'imageio' to support basic reading of JPEG-XL recompressed JPEG.
This registers a file format that will call an external converter which will
(securely) convert JPEG-XL to a temporary JPEG file behind the scenes.
"""
from . import jxl_utils
import imageio
import imageio.core
# Extending imageio with plugins:
# https://imageio.readthedocs.io/en/stable/plugins.html
class _JpegXLFormat(imageio.core.Format):
"""Jpeg-XL image format. Currently only supports recompressed JPEG."""
def _can_read(self, request):
return (request.mode[1] in (self.modes + '?') and
request.firstbytes.startswith(
jxl_utils.JPEGXL_RECOMPRESSED_JPEG_HEADER))
def _can_write(self, request):
return False
class Reader(imageio.core.Format.Reader):
"""Image format Reader implementation."""
def _open(self):
"""Opens the reader."""
self._filename = self.request.get_local_filename()
def _close(self):
"""Closes the reader."""
def _get_length(self):
"""Returns the number of images."""
return 1
def _get_data(self, index):
"""Returns the data and metadata for the image with given index."""
if not 0 <= index < 1:
raise IndexError('Image index out of range: %s' % (index,))
metadata = {}
data = jxl_utils.call_with_jxl_filename_arg1_replaced_by_temp_jpeg_file(
imageio.imread, self._filename)
return data, metadata
def _get_meta_data(self, index):
return {} # This format does not yet support returning metadata.
class Writer(imageio.core.Format.Writer):
"""Image format Writer implementation."""
def _open(self):
raise NotImplementedError('Not implemented.')
def _close(self):
"""Closes the writer."""
def _append_data(self, image_data, metadata):
del image_data, metadata # Unused.
raise NotImplementedError('Not implemented.')
def set_meta_data(self, metadata):
del metadata # Unused.
raise NotImplementedError('Not implemented.')
def register_jxl_support(imageio_module):
"""Registers JPEG-XL with imageio."""
# Above, we are using "import imageio.core" to get a parent class.
# We should make sure that the `imageio` module implicitly referred like this
# matches the imageio module which we are extending with Jpeg-XL support.
# In principle, we could avoid this by importing from the module passed in,
# and defining a closure-class, but the situations when this fails are so
# obscure that this should not be an issue for easing the transition into
# Jpeg-XL.
assert id(imageio_module) == id(imageio), (
'User-passed `imageio` module differs from `import imageio` result.')
imageio_module.formats.add_format(
_JpegXLFormat(
'JPEG-XL',
'JPEG-XL (partial support; reads recompressed JPEG only.)',
'.jxl',
'iI',
))
| en | 0.839193 | # Copyright (c) Google LLC 2019 # # Use of this source code is governed by an MIT-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/MIT. Extends the 'imageio' to support basic reading of JPEG-XL recompressed JPEG. This registers a file format that will call an external converter which will (securely) convert JPEG-XL to a temporary JPEG file behind the scenes. # Extending imageio with plugins: # https://imageio.readthedocs.io/en/stable/plugins.html Jpeg-XL image format. Currently only supports recompressed JPEG. Image format Reader implementation. Opens the reader. Closes the reader. Returns the number of images. Returns the data and metadata for the image with given index. # This format does not yet support returning metadata. Image format Writer implementation. Closes the writer. # Unused. # Unused. Registers JPEG-XL with imageio. # Above, we are using "import imageio.core" to get a parent class. # We should make sure that the `imageio` module implicitly referred like this # matches the imageio module which we are extending with Jpeg-XL support. # In principle, we could avoid this by importing from the module passed in, # and defining a closure-class, but the situations when this fails are so # obscure that this should not be an issue for easing the transition into # Jpeg-XL. | 2.490035 | 2 |
timeweb/timewebapp/migrations/0047_rename_ad_timewebmodel_assignment_date.py | snapsnap123/TimeWeb | 1 | 6621946 | <gh_stars>1-10
# Generated by Django 3.2.4 on 2021-06-26 20:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('timewebapp', '0046_rename_dif_assign_timewebmodel_blue_line_start'),
]
operations = [
migrations.RenameField(
model_name='timewebmodel',
old_name='ad',
new_name='assignment_date',
),
]
| # Generated by Django 3.2.4 on 2021-06-26 20:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('timewebapp', '0046_rename_dif_assign_timewebmodel_blue_line_start'),
]
operations = [
migrations.RenameField(
model_name='timewebmodel',
old_name='ad',
new_name='assignment_date',
),
] | en | 0.8692 | # Generated by Django 3.2.4 on 2021-06-26 20:53 | 1.616665 | 2 |
aspc/college/migrations/0001_initial.py | DDKZ/mainsite | 8 | 6621947 | <reponame>DDKZ/mainsite
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Building',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('latitude', models.FloatField(null=True, blank=True)),
('longitude', models.FloatField(null=True, blank=True)),
('name', models.CharField(max_length=32)),
('shortname', models.SlugField(max_length=32)),
('type', models.IntegerField(db_index=True, choices=[(0, b'Dormitory'), (1, b'Academic'), (2, b'Dining Hall')])),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Floor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.PositiveSmallIntegerField()),
('building', models.ForeignKey(to='college.Building')),
],
options={
'ordering': ('number',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Map',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.FileField(upload_to=b'maps/')),
('n', models.FloatField()),
('e', models.FloatField()),
('s', models.FloatField()),
('w', models.FloatField()),
('floor', models.OneToOneField(to='college.Floor')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RoomLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('latitude', models.FloatField(null=True, blank=True)),
('longitude', models.FloatField(null=True, blank=True)),
('number', models.CharField(help_text=b'room number in building numbering scheme', max_length=8)),
('floor', models.ForeignKey(to='college.Floor')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Term',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start', models.DateField()),
('end', models.DateField()),
],
options={
'ordering': ['-end'],
},
bases=(models.Model,),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Building',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('latitude', models.FloatField(null=True, blank=True)),
('longitude', models.FloatField(null=True, blank=True)),
('name', models.CharField(max_length=32)),
('shortname', models.SlugField(max_length=32)),
('type', models.IntegerField(db_index=True, choices=[(0, b'Dormitory'), (1, b'Academic'), (2, b'Dining Hall')])),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Floor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.PositiveSmallIntegerField()),
('building', models.ForeignKey(to='college.Building')),
],
options={
'ordering': ('number',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Map',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.FileField(upload_to=b'maps/')),
('n', models.FloatField()),
('e', models.FloatField()),
('s', models.FloatField()),
('w', models.FloatField()),
('floor', models.OneToOneField(to='college.Floor')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RoomLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('latitude', models.FloatField(null=True, blank=True)),
('longitude', models.FloatField(null=True, blank=True)),
('number', models.CharField(help_text=b'room number in building numbering scheme', max_length=8)),
('floor', models.ForeignKey(to='college.Floor')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Term',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start', models.DateField()),
('end', models.DateField()),
],
options={
'ordering': ['-end'],
},
bases=(models.Model,),
),
] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.75077 | 2 |
examples/docs_snippets/docs_snippets/concepts/solids_pipelines/pipelines.py | rpatil524/dagster | 4,606 | 6621948 | # pylint: disable=unused-argument
from dagster import DependencyDefinition, GraphDefinition, job, op
@op
def my_op():
pass
# start_pipeline_example_marker
@op
def return_one(context):
return 1
@op
def add_one(context, number: int):
return number + 1
@job
def one_plus_one():
add_one(return_one())
# end_pipeline_example_marker
# start_multiple_usage_pipeline
@job
def multiple_usage():
add_one(add_one(return_one()))
# end_multiple_usage_pipeline
# start_alias_pipeline
@job
def alias():
add_one.alias("second_addition")(add_one(return_one()))
# end_alias_pipeline
# start_tag_pipeline
@job
def tagged_add_one():
add_one.tag({"my_tag": "my_value"})(add_one(return_one()))
# end_tag_pipeline
# start_pipeline_definition_marker
one_plus_one_from_constructor = GraphDefinition(
name="one_plus_one",
node_defs=[return_one, add_one],
dependencies={"add_one": {"number": DependencyDefinition("return_one")}},
).to_job()
# end_pipeline_definition_marker
# start_tags_pipeline
@job(tags={"my_tag": "my_value"})
def my_tags_job():
my_op()
# end_tags_pipeline
| # pylint: disable=unused-argument
from dagster import DependencyDefinition, GraphDefinition, job, op
@op
def my_op():
pass
# start_pipeline_example_marker
@op
def return_one(context):
return 1
@op
def add_one(context, number: int):
return number + 1
@job
def one_plus_one():
add_one(return_one())
# end_pipeline_example_marker
# start_multiple_usage_pipeline
@job
def multiple_usage():
add_one(add_one(return_one()))
# end_multiple_usage_pipeline
# start_alias_pipeline
@job
def alias():
add_one.alias("second_addition")(add_one(return_one()))
# end_alias_pipeline
# start_tag_pipeline
@job
def tagged_add_one():
add_one.tag({"my_tag": "my_value"})(add_one(return_one()))
# end_tag_pipeline
# start_pipeline_definition_marker
one_plus_one_from_constructor = GraphDefinition(
name="one_plus_one",
node_defs=[return_one, add_one],
dependencies={"add_one": {"number": DependencyDefinition("return_one")}},
).to_job()
# end_pipeline_definition_marker
# start_tags_pipeline
@job(tags={"my_tag": "my_value"})
def my_tags_job():
my_op()
# end_tags_pipeline
| en | 0.626763 | # pylint: disable=unused-argument # start_pipeline_example_marker # end_pipeline_example_marker # start_multiple_usage_pipeline # end_multiple_usage_pipeline # start_alias_pipeline # end_alias_pipeline # start_tag_pipeline # end_tag_pipeline # start_pipeline_definition_marker # end_pipeline_definition_marker # start_tags_pipeline # end_tags_pipeline | 2.094122 | 2 |
tools/store_convert.py | Totto16/auto-xdcc | 4 | 6621949 | <filename>tools/store_convert.py
#!/usr/bin/env python3
import argparse
import itertools
import json
import sys
import urllib.parse as urlparse
from copy import deepcopy
def list_partition(pred, iterable):
"""Use a predicate to partition entries into false entries and true entries"""
# list_partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
return itertools.filterfalse(pred, t1), filter(pred, t2)
def migrate_2_7(old_conf):
conf = {
"storeVer": "2.7",
"content-length": 0,
"trusted": ["CR-HOLLAND|NEW", "CR-ARCHIVE|1080p", "KareRaisu", "Ginpachi-Sensei", "Gintoki", "Ginpa3", "Ginpa2", "Nippon|zongzing", "Nippon|minglong"],
"current": "CR-HOLLAND|NEW",
"clear": "on",
"last": 0,
"shows": {}
}
conf.update(old_conf)
return conf
def migrate_3_0(old_conf):
conf = {
'storeVer': '3.0',
'packlist': {
'url':"http://arutha.info:1337/txt",
'contentLength': int(old_conf['content-length']),
'lastPack': int(old_conf['last'])
},
'timers': {
'refresh': {
'interval': 900
}
},
'maxConcurrentDownloads': 3,
'trusted': old_conf['trusted'],
'current': old_conf['current'],
'clear': old_conf['clear']
}
shows, archived = list_partition(lambda x: x[1][3] == 'a', old_conf['shows'].items())
conf['archived'] = dict([(name, x[:3]) for name, x in archived])
conf['shows'] = dict([(name, x[:3]) for name, x in shows])
return conf
def migrate_3_2(old_conf):
conf = {
'storeVer': '3.2',
'packlists': {},
'shows': old_conf['shows'],
'archived': old_conf['archived'],
'clear': old_conf['clear']
}
packlist = old_conf['packlist']
components = urlparse.urlparse(packlist['url'])
name = components.hostname.split('.')[-2]
conf['packlists'][name] = {
'url': packlist['url'],
'type': 'episodic',
'contentLength': packlist['contentLength'],
'lastPack': packlist['lastPack'],
'maxConcurrentDownloads': old_conf['maxConcurrentDownloads'],
'trusted': old_conf['trusted'],
'current': old_conf['current'],
'refreshInterval': old_conf['timers']['refresh']['interval']
}
return conf
def migrate_3_3(old_conf):
conf = {
'storeVer': '3.3',
'packlists': old_conf['packlists'],
'shows': old_conf['shows'],
'archived': old_conf['archived'],
'clear': old_conf['clear']
}
for key in conf['packlists']:
conf['packlists'][key]['metaType'] = ['text']
return conf
versions = [
('2.7', migrate_2_7),
('3.0', migrate_3_0),
('3.2', migrate_3_2),
('3.3', migrate_3_3)
]
def run_migrations(old_conf, from_ver):
# Make new deep copy to modify
new_conf = deepcopy(old_conf)
for ver, fn in versions:
if ver > from_ver:
new_conf = fn(old_conf)
return new_conf
def argument_parser():
parser = argparse.ArgumentParser(description="Auto-XDCC store converter tool.")
parser.add_argument('filename', help="Filename of the store to convert. Defaults to standard input.", nargs='?', default='-')
parser.add_argument('-nb', '--nobackup', help="Don't make backup of old store.", action='store_true')
parser.add_argument('-o', '--output', help="Output filename. Defaults to standard output", default='-')
return parser
def main():
parser = argument_parser()
args = parser.parse_args()
input_file = sys.stdin if args.filename == '-' else open(args.filename)
with input_file:
content = json.load(input_file)
store_ver = content['storeVer'] if 'storeVer' in content else "0.1"
if not args.nobackup:
backup_filename = 'xdcc_store.json' if args.filename == '-' else args.filename
with open('{}.v{}.bak'.format(backup_filename, store_ver.replace('.', '_')), 'w') as bak:
json.dump(content, bak)
new_content = run_migrations(content, store_ver)
output_file = sys.stdout if args.output == '-' else open(args.output, 'w')
with output_file:
json.dump(new_content, output_file, indent=2)
if __name__ == '__main__':
main()
| <filename>tools/store_convert.py
#!/usr/bin/env python3
import argparse
import itertools
import json
import sys
import urllib.parse as urlparse
from copy import deepcopy
def list_partition(pred, iterable):
"""Use a predicate to partition entries into false entries and true entries"""
# list_partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
return itertools.filterfalse(pred, t1), filter(pred, t2)
def migrate_2_7(old_conf):
conf = {
"storeVer": "2.7",
"content-length": 0,
"trusted": ["CR-HOLLAND|NEW", "CR-ARCHIVE|1080p", "KareRaisu", "Ginpachi-Sensei", "Gintoki", "Ginpa3", "Ginpa2", "Nippon|zongzing", "Nippon|minglong"],
"current": "CR-HOLLAND|NEW",
"clear": "on",
"last": 0,
"shows": {}
}
conf.update(old_conf)
return conf
def migrate_3_0(old_conf):
conf = {
'storeVer': '3.0',
'packlist': {
'url':"http://arutha.info:1337/txt",
'contentLength': int(old_conf['content-length']),
'lastPack': int(old_conf['last'])
},
'timers': {
'refresh': {
'interval': 900
}
},
'maxConcurrentDownloads': 3,
'trusted': old_conf['trusted'],
'current': old_conf['current'],
'clear': old_conf['clear']
}
shows, archived = list_partition(lambda x: x[1][3] == 'a', old_conf['shows'].items())
conf['archived'] = dict([(name, x[:3]) for name, x in archived])
conf['shows'] = dict([(name, x[:3]) for name, x in shows])
return conf
def migrate_3_2(old_conf):
conf = {
'storeVer': '3.2',
'packlists': {},
'shows': old_conf['shows'],
'archived': old_conf['archived'],
'clear': old_conf['clear']
}
packlist = old_conf['packlist']
components = urlparse.urlparse(packlist['url'])
name = components.hostname.split('.')[-2]
conf['packlists'][name] = {
'url': packlist['url'],
'type': 'episodic',
'contentLength': packlist['contentLength'],
'lastPack': packlist['lastPack'],
'maxConcurrentDownloads': old_conf['maxConcurrentDownloads'],
'trusted': old_conf['trusted'],
'current': old_conf['current'],
'refreshInterval': old_conf['timers']['refresh']['interval']
}
return conf
def migrate_3_3(old_conf):
conf = {
'storeVer': '3.3',
'packlists': old_conf['packlists'],
'shows': old_conf['shows'],
'archived': old_conf['archived'],
'clear': old_conf['clear']
}
for key in conf['packlists']:
conf['packlists'][key]['metaType'] = ['text']
return conf
versions = [
('2.7', migrate_2_7),
('3.0', migrate_3_0),
('3.2', migrate_3_2),
('3.3', migrate_3_3)
]
def run_migrations(old_conf, from_ver):
# Make new deep copy to modify
new_conf = deepcopy(old_conf)
for ver, fn in versions:
if ver > from_ver:
new_conf = fn(old_conf)
return new_conf
def argument_parser():
parser = argparse.ArgumentParser(description="Auto-XDCC store converter tool.")
parser.add_argument('filename', help="Filename of the store to convert. Defaults to standard input.", nargs='?', default='-')
parser.add_argument('-nb', '--nobackup', help="Don't make backup of old store.", action='store_true')
parser.add_argument('-o', '--output', help="Output filename. Defaults to standard output", default='-')
return parser
def main():
parser = argument_parser()
args = parser.parse_args()
input_file = sys.stdin if args.filename == '-' else open(args.filename)
with input_file:
content = json.load(input_file)
store_ver = content['storeVer'] if 'storeVer' in content else "0.1"
if not args.nobackup:
backup_filename = 'xdcc_store.json' if args.filename == '-' else args.filename
with open('{}.v{}.bak'.format(backup_filename, store_ver.replace('.', '_')), 'w') as bak:
json.dump(content, bak)
new_content = run_migrations(content, store_ver)
output_file = sys.stdout if args.output == '-' else open(args.output, 'w')
with output_file:
json.dump(new_content, output_file, indent=2)
if __name__ == '__main__':
main()
| en | 0.437931 | #!/usr/bin/env python3 Use a predicate to partition entries into false entries and true entries # list_partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 # Make new deep copy to modify | 2.56048 | 3 |
pyvisdk/do/host_nas_volume_spec.py | Infinidat/pyvisdk | 0 | 6621950 | <reponame>Infinidat/pyvisdk<filename>pyvisdk/do/host_nas_volume_spec.py<gh_stars>0
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostNasVolumeSpec(vim, *args, **kwargs):
'''Specification for creating NAS volume.When mounting a NAS volume on multiple
hosts, the same remoteHost and remotePath values should be used on every host,
otherwise it will be treated as different datastores. For example, if one host
references the remotePath of a NAS volume as "/mnt/mount1" and another
references it as "/mnt/mount1/", it will not be recognized as the same
datastore.'''
obj = vim.client.factory.create('{urn:vim25}HostNasVolumeSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'accessMode', 'localPath', 'remoteHost', 'remotePath' ]
optional = [ 'password', 'type', 'userName', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostNasVolumeSpec(vim, *args, **kwargs):
'''Specification for creating NAS volume.When mounting a NAS volume on multiple
hosts, the same remoteHost and remotePath values should be used on every host,
otherwise it will be treated as different datastores. For example, if one host
references the remotePath of a NAS volume as "/mnt/mount1" and another
references it as "/mnt/mount1/", it will not be recognized as the same
datastore.'''
obj = vim.client.factory.create('{urn:vim25}HostNasVolumeSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'accessMode', 'localPath', 'remoteHost', 'remotePath' ]
optional = [ 'password', 'type', 'userName', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj | en | 0.701206 | ######################################## # Automatically generated, do not edit. ######################################## Specification for creating NAS volume.When mounting a NAS volume on multiple hosts, the same remoteHost and remotePath values should be used on every host, otherwise it will be treated as different datastores. For example, if one host references the remotePath of a NAS volume as "/mnt/mount1" and another references it as "/mnt/mount1/", it will not be recognized as the same datastore. # do some validation checking... | 2.369328 | 2 |