code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 12:05:46 2018
@author: MichaelEK
"""
import pytest
import numpy as np
from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list
### Parameters
test_data1 = dict(
base_url = 'http://data.ecan.govt.nz/',
hts = 'WQAll.hts',
site = 'SQ31045',
collection = 'LWRPLakes',
measurement = 'Total Phosphorus',
from_date = '1983-11-22 10:50',
to_date = '2018-04-13 14:05',
dtl_method = 'trend'
)
# test_data2 = dict(
# base_url = 'https://data.hbrc.govt.nz/Envirodata',
# hts = 'ContinuousArchive.hts',
# site = 'Well.16772 Ngatarawa Rd',
# collection = 'Stage',
# measurement = 'Elevation Above Sea Level[Recorder Water Level]',
# from_date = '2018-10-13',
# to_date = '2018-11-01'
# )
### Tests
@pytest.mark.parametrize('data', [test_data1])
def test_site_list(data):
sites = site_list(data['base_url'], data['hts'], True)
assert len(sites) > 1000
@pytest.mark.parametrize('data', [test_data1])
def test_measurement_list(data):
mtype_df1 = measurement_list(data['base_url'], data['hts'], data['site'])
assert len(mtype_df1) > 6
@pytest.mark.parametrize('data', [test_data1])
def test_site_list_with_collection(data):
sites = site_list(data['base_url'], data['hts'], collection=data['collection'])
assert len(sites) > 40
@pytest.mark.parametrize('data', [test_data1])
def test_wq_sample_parameter_list(data):
mtype_df2 = wq_sample_parameter_list(data['base_url'], data['hts'], data['site'])
assert len(mtype_df2) > 10
@pytest.mark.parametrize('data', [test_data1])
def test_collection_list(data):
cl = collection_list(data['base_url'], data['hts'])
assert len(cl) > 180
assert list(cl.columns) == \
['CollectionName', 'SiteName', 'Measurement', 'Filename']
@pytest.mark.parametrize('data', [test_data1])
def test_get_data1(data):
tsdata1 = get_data(data['base_url'], data['hts'], data['site'], data['measurement'], from_date=data['from_date'], to_date=data['to_date'])
assert len(tsdata1) > 80
@pytest.mark.parametrize('data', [test_data1])
def test_get_data2(data):
tsdata2, extra2 = get_data(data['base_url'], data['hts'], data['site'], data['measurement'], from_date=data['from_date'], to_date=data['to_date'], parameters=True)
assert (len(tsdata2) > 80) & (len(extra2) > 300)
@pytest.mark.parametrize('data', [test_data1])
def test_get_data3(data):
tsdata3 = get_data(data['base_url'], data['hts'], data['site'], 'WQ Sample', from_date=data['from_date'], to_date=data['to_date'])
assert len(tsdata3) > 800
@pytest.mark.parametrize('data', [test_data1])
def test_get_data4(data):
tsdata4, extra4 = get_data(data['base_url'], data['hts'], data['site'], data['measurement'], from_date=data['from_date'], to_date=data['to_date'], parameters=True, dtl_method=data['dtl_method'])
assert (len(tsdata4) > 80) & (len(extra4) > 300) & (tsdata4.Value.dtype == np.number)
| [
"hilltoppy.web_service.collection_list",
"pytest.mark.parametrize",
"hilltoppy.web_service.wq_sample_parameter_list",
"hilltoppy.web_service.site_list",
"hilltoppy.web_service.measurement_list",
"hilltoppy.web_service.get_data"
] | [((868, 913), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (891, 913), False, 'import pytest\n'), ((1031, 1076), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (1054, 1076), False, 'import pytest\n'), ((1221, 1266), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (1244, 1266), False, 'import pytest\n'), ((1423, 1468), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (1446, 1468), False, 'import pytest\n'), ((1630, 1675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (1653, 1675), False, 'import pytest\n'), ((1891, 1936), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (1914, 1936), False, 'import pytest\n'), ((2138, 2183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (2161, 2183), False, 'import pytest\n'), ((2434, 2479), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (2457, 2479), False, 'import pytest\n'), ((2674, 2719), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', '[test_data1]'], {}), "('data', [test_data1])\n", (2697, 2719), False, 'import pytest\n'), ((952, 998), 'hilltoppy.web_service.site_list', 'site_list', (["data['base_url']", "data['hts']", '(True)'], {}), "(data['base_url'], data['hts'], True)\n", (961, 998), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n'), ((1126, 1187), 'hilltoppy.web_service.measurement_list', 'measurement_list', (["data['base_url']", "data['hts']", "data['site']"], {}), "(data['base_url'], data['hts'], data['site'])\n", (1142, 1187), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n'), ((1321, 1392), 'hilltoppy.web_service.site_list', 'site_list', (["data['base_url']", "data['hts']"], {'collection': "data['collection']"}), "(data['base_url'], data['hts'], collection=data['collection'])\n", (1330, 1392), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n'), ((1526, 1595), 'hilltoppy.web_service.wq_sample_parameter_list', 'wq_sample_parameter_list', (["data['base_url']", "data['hts']", "data['site']"], {}), "(data['base_url'], data['hts'], data['site'])\n", (1550, 1595), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n'), ((1717, 1763), 'hilltoppy.web_service.collection_list', 'collection_list', (["data['base_url']", "data['hts']"], {}), "(data['base_url'], data['hts'])\n", (1732, 1763), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n'), ((1977, 2109), 'hilltoppy.web_service.get_data', 'get_data', (["data['base_url']", "data['hts']", "data['site']", "data['measurement']"], {'from_date': "data['from_date']", 'to_date': "data['to_date']"}), "(data['base_url'], data['hts'], data['site'], data['measurement'],\n from_date=data['from_date'], to_date=data['to_date'])\n", (1985, 2109), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n'), ((2232, 2381), 'hilltoppy.web_service.get_data', 'get_data', (["data['base_url']", "data['hts']", "data['site']", "data['measurement']"], {'from_date': "data['from_date']", 'to_date': "data['to_date']", 'parameters': '(True)'}), "(data['base_url'], data['hts'], data['site'], data['measurement'],\n from_date=data['from_date'], to_date=data['to_date'], parameters=True)\n", (2240, 2381), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n'), ((2520, 2644), 'hilltoppy.web_service.get_data', 'get_data', (["data['base_url']", "data['hts']", "data['site']", '"""WQ Sample"""'], {'from_date': "data['from_date']", 'to_date': "data['to_date']"}), "(data['base_url'], data['hts'], data['site'], 'WQ Sample',\n from_date=data['from_date'], to_date=data['to_date'])\n", (2528, 2644), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n'), ((2768, 2952), 'hilltoppy.web_service.get_data', 'get_data', (["data['base_url']", "data['hts']", "data['site']", "data['measurement']"], {'from_date': "data['from_date']", 'to_date': "data['to_date']", 'parameters': '(True)', 'dtl_method': "data['dtl_method']"}), "(data['base_url'], data['hts'], data['site'], data['measurement'],\n from_date=data['from_date'], to_date=data['to_date'], parameters=True,\n dtl_method=data['dtl_method'])\n", (2776, 2952), False, 'from hilltoppy.web_service import measurement_list, site_list, collection_list, get_data, wq_sample_parameter_list\n')] |
##Author : <NAME>
##Date : 9/24/2017
import csv
import sys
import operator
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
from docutils.writers.odf_odt import ToString
from _elementtree import SubElement
from sqlalchemy.sql.expression import true
from pygments.lexers.csound import newline
#from math import floor
#from pandas.io.tests.parser import quoting
stock_lst = []
flag = 0
counter = 0
code_t = ""
min_time_gap_t=999999
# avg_volume_t = 0
# avg_trade_t = 0
# max_trade_t = 0
# min_trade_t = 0
# weighted_avg_price_t = 0
class Stock:
code = ""
min_time_gap=9999999999999999999999999
prev_ts = 0
curr_ts = 0
avg_volume = 0
avg_price = 0
max_trade = 0
min_trade = 0
sum_price = 0
sum_volume = 0
stock_code_count = 0
price_range = 0
def __init__(self, stock_code, timestamp, volume, trading_price):
self.code = stock_code
self.prev_ts = self.curr_ts
self.curr_ts = timestamp
self.avg_volume = volume
self.sum_volume = volume
self.max_trade = trading_price
self.min_trade = trading_price
self.sum_price = trading_price
self.stock_code_count = 1
self.avg_price = int(self.sum_price/self.stock_code_count)
self.avg_volume = volume
self.price_range = self.min_trade - self.max_trade
def displayStock(self):
print("Name : ", self.code, ", Minimum Time Gap: ", self.min_time_gap, ", Average Volume : ", self.avg_volume,", Max Price :", self.max_trade, " , Minimum Trade : ", self.min_trade, ", Price Range : ", self.price_range, " , Average Price : ", self.avg_price)
def updateStock(self, stock_code, timestamp, volume, trading_price):
print('****** Inside Update Stock Method ********')
stock_code_count_old = int(self.stock_code_count)
self.stock_code_count +=1
#avg_price_old = self.avg_price
volume_old =self.sum_volume
print('Checkpoint - US-01')
print('Updating Stock : ', self.code)
##Update Average Price
print('Old Average Price: ', self.sum_price/stock_code_count_old , ' , Old Average Volume : ',volume_old/stock_code_count_old )
##Calculate the cumulative share price and cumulative share volume
self.sum_price += trading_price
self.sum_volume += volume
##Update Average Price
self.avg_price = int(self.sum_price/self.stock_code_count)
print('New Average Price : ', self.avg_price)
##Update Volume
self.avg_volume = int(self.sum_volume/self.stock_code_count)
print('New Average Volume is : ', self.avg_volume)
##Update Minimum traded price
if int(self.min_trade) < trading_price:
self.mix_trade = trading_price
##Update current and new values of time stamp
self.prev_ts = self.curr_ts
self.curr_ts = timestamp
temp_time_stamp_difference = self.curr_ts - self.prev_ts
print('Code : ',self.code, ' has Current TimeStamp as :', self.curr_ts,' Previous Timestamp as :', self.prev_ts, ' temporary time difference : ', temp_time_stamp_difference)
if temp_time_stamp_difference < self.min_time_gap:
print('Checkpoint - US - 03')
self.min_time_gap = temp_time_stamp_difference
#Update Mimimum Trading Price
if self.min_trade > trading_price:
self.min_trade = trading_price
##Update Maximum Trading Price
if self.max_trade < trading_price:
self.max_trade = trading_price
#Update Price Range
self.price_range = self.max_trade - self.min_trade
with open("E:\\06 Summer Internship\\01 Quantlab\\QL-02-Wagholikar\\input.csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter = ',')
stock_master = []
for row in readCSV:
stock_t = Stock(row[1], int(row[0]), int(row[2]), int(row[3]))
counter = counter + 1
print('Counter is : ', counter)
if flag== 0:
stock_lst.append(row[1])
stock_master.append(stock_t)
flag = 1
print(stock_master[0].displayStock())
else:
print("Next item traversal starts...")
try:
stock_index = stock_lst.index(row[1])
print('Stock index of ', row[1], ' is :', stock_index)
stock_master[stock_index].updateStock(row[1], int(row[0]), int(row[2]), int(row[3]))
print(stock_master[stock_index].displayStock())
except:
print('New Stock node is being added..')
stock_lst.append(row[1])
print('The given stock : ', row[1] ,' is being updated')
stock_master.append(stock_t)
stock_master[-1].displayStock()
print(stock_lst)
# for i in range(0,len(stock_master),1):
# stock_master[i].weighted_avg_price = floor(stock_master[i].weighted_avg_price)
stock_master.sort(key= lambda x: x.code, reverse=False)
for item in stock_master:
print(item.displayStock())
with open("E:\\06 Summer Internship\\01 Quantlab\\QL-02-Wagholikar\\output.csv", 'w', newline = '') as csvfile:
stockwriter = csv.writer(csvfile, quoting = csv.QUOTE_NONE)
for j in range(0, len(stock_master),1):
stockwriter.writerow([stock_master[j].code,stock_master[j].min_time_gap,stock_master[j].avg_volume,stock_master[j].price_range,stock_master[j].avg_price])
root = Element('symbols')
# tree = ElementTree(root)
# name = Element('symbol')
tag1 = 'symbol'
subtag1 = "name"
subtag2 = "MinTimeGap"
subtag3 = "AverageVolume"
subtag4 = "PriceRange"
subtag5 = "AveragePrice"
for k in range(0, len(stock_master), 1):
subvalue1 = stock_master[k].code
subvalue2 = str(stock_master[k].min_time_gap)
subvalue3 = str(stock_master[k].avg_volume)
subvalue4 = str(stock_master[k].price_range)
subvalue5 = str(stock_master[k].avg_price)
my_dict={subtag1:subvalue1, subtag2:subvalue2, subtag3:subvalue3, subtag4:subvalue4, subtag5:subvalue5}
#my_dict.sor
#sorted(my_dict, 1 , reversed = true)
#sorted_my_dict= sorted(my_dict.items(), key = operator.itemgetter(0))
print(my_dict)
value= 'value'+str(k)
value=SubElement(root, tag1, my_dict)
print(etree.tostring(root))
orig_stdout = sys.stdout
f = open("E:\\06 Summer Internship\\01 Quantlab\\QL-02-Wagholikar\\output.xml", 'w', newline = '')
sys.stdout = f
print(etree.tostring(root))
sys.stdout = orig_stdout
f.close()
#tree= Element('masternode')
#tree = ElementTree(root)
#tree.append(root)
#tree.write(open("E:\\06 Summer Internship\\01 Quantlab\\QL-02-Wagholikar\\xml_output_test001.csv", 'w'))
| [
"xml.etree.ElementTree.tostring",
"csv.writer",
"xml.etree.ElementTree.Element",
"_elementtree.SubElement",
"csv.reader"
] | [((4031, 4065), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (4041, 4065), False, 'import csv\n'), ((5622, 5665), 'csv.writer', 'csv.writer', (['csvfile'], {'quoting': 'csv.QUOTE_NONE'}), '(csvfile, quoting=csv.QUOTE_NONE)\n', (5632, 5665), False, 'import csv\n'), ((5895, 5913), 'xml.etree.ElementTree.Element', 'Element', (['"""symbols"""'], {}), "('symbols')\n", (5902, 5913), False, 'from xml.etree.ElementTree import Element\n'), ((6755, 6786), '_elementtree.SubElement', 'SubElement', (['root', 'tag1', 'my_dict'], {}), '(root, tag1, my_dict)\n', (6765, 6786), False, 'from _elementtree import SubElement\n'), ((6816, 6836), 'xml.etree.ElementTree.tostring', 'etree.tostring', (['root'], {}), '(root)\n', (6830, 6836), True, 'import xml.etree.ElementTree as etree\n'), ((6999, 7019), 'xml.etree.ElementTree.tostring', 'etree.tostring', (['root'], {}), '(root)\n', (7013, 7019), True, 'import xml.etree.ElementTree as etree\n')] |
import json
import glob
import os
import argparse
import time
_DEFAULT_DATASET_DIR_PATH = "D:\\Documents\\output_vvs_2020\\labeled"
_DEFAULT_METADATA_PATH = "C:\\Users\\Steven\\github\\kvasir-capsule\\metadata.json"
_DEFAULT_WORK_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
_ANATOMY_CLASSES = [
"Pylorus",
"Ileocecal valve",
"Ampulla of Vater"
]
_LUMINAL_CLASSES = [
"Normal clean mucosa",
"Reduced mucosal view",
"Blood - fresh",
"Blood - hematin",
"Erythema",
"Foreign body",
"Angiectasia",
"Erosion",
"Ulcer",
"Polyp",
"Lymphangiectasia"
]
argument_parser = argparse.ArgumentParser(description="")
argument_parser.add_argument("-d", "--dataset-dir", type=str, default=_DEFAULT_DATASET_DIR_PATH)
argument_parser.add_argument("-m", "--metadata-file", type=str, default=_DEFAULT_METADATA_PATH)
argument_parser.add_argument("-o", "--output-file", type=str, default=os.path.join(_DEFAULT_WORK_DIR, "metadata.csv"))
def match_sequence(path, start, end):
frame_number = int(os.path.splitext(os.path.basename(path).split("_")[1])[0])
if frame_number >= start and frame_number <= end:
return True
return False
def match_frame_id(path, match):
return os.path.splitext(os.path.basename(path).split("_")[-1])[0] == match
def clean_metadata_json(dataset_dir_path, metadata_file_path, output_file_path):
all_video_frames = list(glob.glob(os.path.join(dataset_dir_path, "*", "*")))
timer = time.time()
with open(metadata_file_path) as f:
metadata = json.load(f)
with open(output_file_path, mode="w") as f:
f.write("filename;video_id;frame_number;finding_category;finding_class;x1;y1;x2;y2;x3;y3;x4;y4\n")
for video_id, video_data in metadata.items():
print("Reading video with ID %s..." % video_id)
video_specific_frames = list(filter(
lambda x: os.path.basename(x).split("_")[0] == video_id, all_video_frames
))
for segment_id, segment_data in video_data["segments"].items():
for seen_segment in segment_data["seen"]:
start_frame = seen_segment[0]
end_frame = seen_segment[1]
segment_class = segment_data["metadata"]["pillcam_subtype"]
if segment_class is None:
segment_class = segment_data["metadata"]["segment_type"]
segment_category = "Anatomy" if segment_class in _ANATOMY_CLASSES else "Luminal"
segment_frames = list(filter(
lambda x: match_sequence(x, start_frame, end_frame), video_specific_frames
))
segment_frames = sorted(
segment_frames,
key=lambda x: int(os.path.splitext(os.path.basename(x).split("_")[1])[0])
)
for frame in segment_frames:
frame_id = os.path.splitext(os.path.basename(frame))[0].split("_")[-1]
f.write("%s;%s;%s;%s;%s;;;;;;;\n" % (os.path.basename(frame), video_id, frame_id, segment_category, segment_class))
for finding_id, finding_data in video_data["findings"].items():
finding_class = finding_data["metadata"]["pillcam_subtype"]
finding_category = "Anatomy" if finding_class in _ANATOMY_CLASSES else "Luminal"
for frame_id, frame_data in finding_data["frames"].items():
frame = filter(lambda x: match_frame_id(x, frame_id), video_specific_frames)
frame = list(frame)
if len(frame) <= 0:
print("Missing frames for %s!" % video_id)
continue
f.write("%s;%s;%s;%s;%s" % (os.path.basename(frame[0]), video_id, frame_id, finding_category, finding_class))
for box in frame_data["shape"]:
f.write(";%s;%s" % (int(box["x"]), int(box["y"])))
f.write("\n")
print("Finished after %s seconds!" % int(time.time() - timer))
if __name__ == "__main__":
args = argument_parser.parse_args()
clean_metadata_json(
dataset_dir_path=args.dataset_dir,
metadata_file_path=args.metadata_file,
output_file_path=args.output_file
) | [
"argparse.ArgumentParser",
"os.path.join",
"json.load",
"os.path.basename",
"os.path.abspath",
"time.time"
] | [((647, 686), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (670, 686), False, 'import argparse\n'), ((1501, 1512), 'time.time', 'time.time', ([], {}), '()\n', (1510, 1512), False, 'import time\n'), ((267, 292), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (282, 292), False, 'import os\n'), ((951, 998), 'os.path.join', 'os.path.join', (['_DEFAULT_WORK_DIR', '"""metadata.csv"""'], {}), "(_DEFAULT_WORK_DIR, 'metadata.csv')\n", (963, 998), False, 'import os\n'), ((1573, 1585), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1582, 1585), False, 'import json\n'), ((1446, 1486), 'os.path.join', 'os.path.join', (['dataset_dir_path', '"""*"""', '"""*"""'], {}), "(dataset_dir_path, '*', '*')\n", (1458, 1486), False, 'import os\n'), ((4273, 4284), 'time.time', 'time.time', ([], {}), '()\n', (4282, 4284), False, 'import time\n'), ((1079, 1101), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1095, 1101), False, 'import os\n'), ((1274, 1296), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1290, 1296), False, 'import os\n'), ((3982, 4008), 'os.path.basename', 'os.path.basename', (['frame[0]'], {}), '(frame[0])\n', (3998, 4008), False, 'import os\n'), ((3199, 3222), 'os.path.basename', 'os.path.basename', (['frame'], {}), '(frame)\n', (3215, 3222), False, 'import os\n'), ((1935, 1954), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (1951, 1954), False, 'import os\n'), ((3095, 3118), 'os.path.basename', 'os.path.basename', (['frame'], {}), '(frame)\n', (3111, 3118), False, 'import os\n'), ((2912, 2931), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (2928, 2931), False, 'import os\n')] |
from django_datatables_view.base_datatable_view import BaseDatatableView
from django.db.models import Q
from django.contrib.postgres.aggregates.general import ArrayAgg
from website.models import Genome
class GenomeTableAjax(BaseDatatableView):
# The model we're going to show
model = Genome
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 2000
def render_column(self, row: Genome, column: str):
if column == 'genome_tags':
html = [F'<span data-tag="{tag}">{tag}</span>' for tag in row.genome_tags if tag]
return ' '.join(html)
if column == 'organism_tags':
html = [F'<span data-tag="{tag}">{tag}</span>' for tag in row.organism_tags if tag]
return ' '.join(html)
if column == 'representative':
return 'True' if row.is_representative else 'False'
if column == 'literature_references':
return " ".join(row.literature_references)
elif column.startswith("env_"):
return " ".join(row.__getattribute__(column))
else:
return super(GenomeTableAjax, self).render_column(row, column)
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
qs = self.model.objects.annotate(
genome_tags=ArrayAgg('tags__tag', distinct=True),
organism_tags=ArrayAgg('organism__tags__tag', distinct=True)
) \
.all() \
.prefetch_related('organism') \
.prefetch_related('organism__taxid')
return qs
def filter_queryset(self, qs):
""" If search['value'] is provided then filter all searchable columns using filter_method (istartswith
by default).
Automatic filtering only works for Datatables 1.10+. For older versions override this method
"""
columns = self._columns
if not self.pre_camel_case_notation:
# get global search value
search = self._querydict.get('search[value]', None)
q = Q()
filter_method = self.get_filter_method()
for col_no, col in enumerate(self.columns_data):
# apply global search to all searchable columns
if search and col['searchable']:
# cannot search binary fields or tags
if not columns[col_no] in ['representative', 'contaminated', 'organism.restricted']:
q |= Q(**{F"{columns[col_no].replace('.', '__')}__{filter_method}": search})
# column specific filter
if col['search.value']:
colname = col['name']
## CUSTOM FILTERS
if colname == 'representative':
if col['search.value'] == "True":
qs = qs.filter(representative__isnull=False)
else:
qs = qs.filter(representative__isnull=True)
elif colname == "genome_tags":
qs = qs.filter(tags__tag__in=col['search.value'].split("|"))
elif colname == "organism_tags":
qs = qs.filter(organism__tags__tag__in=col['search.value'].split("|"))
elif colname.endswith("_date"):
if col['search.value'].startswith("-yadcf_delim"):
range = ["0001-01-01", col['search.value'][-10:]]
elif col['search.value'].endswith("yadcf_delim-"):
range = [col['search.value'][:10], "9000-12-30"]
else:
range = [col['search.value'][:10], col['search.value'][-10:]]
qs = qs.filter(**{'{0}__{1}'.format(columns[col_no].replace('.', '__'), 'range'): range})
else:
# DEFAULT BEHAVIOUR
qs = qs.filter(**{
'{0}__{1}'.format(columns[col_no].replace('.', '__'), filter_method): col['search.value']})
qs = qs.filter(q)
return qs
| [
"django.db.models.Q",
"django.contrib.postgres.aggregates.general.ArrayAgg"
] | [((2237, 2240), 'django.db.models.Q', 'Q', ([], {}), '()\n', (2238, 2240), False, 'from django.db.models import Q\n'), ((1496, 1532), 'django.contrib.postgres.aggregates.general.ArrayAgg', 'ArrayAgg', (['"""tags__tag"""'], {'distinct': '(True)'}), "('tags__tag', distinct=True)\n", (1504, 1532), False, 'from django.contrib.postgres.aggregates.general import ArrayAgg\n'), ((1560, 1606), 'django.contrib.postgres.aggregates.general.ArrayAgg', 'ArrayAgg', (['"""organism__tags__tag"""'], {'distinct': '(True)'}), "('organism__tags__tag', distinct=True)\n", (1568, 1606), False, 'from django.contrib.postgres.aggregates.general import ArrayAgg\n')] |
from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup
from .antiflood import BANNED_USERS
from ..database import querymanager
from pyrogram.errors import FloodWait, exceptions
import logging
import time
import re
from base64 import b64encode as b64enc
from base64 import b64decode as b64dec
import dateparser
from collections import defaultdict
from ..post_manager import send_post
DOING = defaultdict(lambda: ([None, None, None]))
choices = defaultdict(lambda: defaultdict(list))
IDS = defaultdict(lambda: defaultdict(int))
def query_regex(data):
return Filters.create(
lambda flt, query: re.match(data, query.data),
data=data)
def flt_schedule(flt, message):
if not message.from_user:
return False
if not DOING[message.from_user.id][0]:
return False
else:
return DOING[message.from_user.id][1] == "SCHEDULE"
Filters.UserScheduling = Filters.create(flt_schedule)
@Client.on_callback_query(query_regex("\-\d+\_.+\_\w+"))
def make_post(_, query):
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
channel_id, channel_name, sub = query.data.split("_")
DOING[query.from_user.id] = [int(channel_id), None]
pro = 'Sì' if sub == 'pro' else 'No'
pro = 'Sì'
channel_name = b64dec(channel_name.encode("utf-8")).decode()
data = (('📸 Foto: ❌', 'pic_true'), ('✍ Didascalia: ❌', 'text_true',), ('⏰ Programma: ❌', 'schedule_true' if pro == 'Sì' else 'schedule_false'), ('✅ Procedi', 'post_complete'), ('⬅️ Indietro', 'back_start'))
buttons = []
for text, callback in data:
if not callback.startswith("schedule") and callback not in ("back_start", "post_complete"):
callback += "_"
callback += "pro" if pro == 'Sì' else 'free'
buttons.append([InlineKeyboardButton(text, callback_data=callback)])
buttons = InlineKeyboardMarkup(buttons)
try:
query.edit_message_text(f"**AmazonOffers Manager - Crea Post**\n\nQui puoi rivedere e programmare un post nel canale\n\n📣 Canale: {channel_name}\n🆔 ID: `{channel_id}`\n⭐️ Pro: {pro}\n\n🗺 **Legenda** 🗺\n\nFoto: Se impostato, allega la foto del prodotto al post\n\nDidascalia: Se impostato, allega una breve descrizione del prodotto al post\n\nProgramma: Programma l'invio del post, solo per utenti PRO\n\n__Il prodotto oggetto del post sarà casuale, scelto tra le offerte giornaliere disponibili__", reply_markup=buttons)
IDS[query.from_user.id] = channel_id
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
@Client.on_callback_query(query_regex("post_complete"))
def on_post_complete(client, query):
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
buttons = InlineKeyboardMarkup([[InlineKeyboardButton("✅ Conferma", callback_data='confirm_choices'), InlineKeyboardButton('⬅️ Annulla', callback_data='back_start')]])
for button in query.message.reply_markup.inline_keyboard:
button = button[0]
if button.callback_data.startswith("schedule"):
if button.callback_data == "schedule_reset":
choices[query.from_user.id]["schedule"] = "✅"
else:
choices[query.from_user.id]["schedule"] = "❌"
elif button.callback_data.startswith("pic"):
if button.callback_data.startswith("pic_true"):
choices[query.from_user.id]["pic"] = "❌"
else:
choices[query.from_user.id]["pic"] = "✅"
elif button.callback_data.startswith("text"):
if button.callback_data.startswith("text_true"):
choices[query.from_user.id]["text"] = "❌"
else:
choices[query.from_user.id]["text"] = "✅"
try:
query.edit_message_text(f"**AmazonOffers Manager - Conferma Post**\n\nRivedi le informazioni sul post e premi conferma, altrimenti premi annulla per tornare al menù principale\n\n📸 Foto: {choices[query.from_user.id]['pic']}\n✍ Didascalia: {choices[query.from_user.id]['text']}\n⏰ Programma: {choices[query.from_user.id]['schedule']}", reply_markup=buttons)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
@Client.on_callback_query(query_regex("confirm_choices"))
def schedule_message(client, query):
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
DOING[query.from_user.id].pop()
buttons = InlineKeyboardMarkup([[InlineKeyboardButton("❌ Annulla", callback_data='back_start')]])
if choices[query.from_user.id]["schedule"] == "✅":
try:
query.edit_message_text("**AmazonOffers Manager - Programma Post**\n\nInvia ora la data di invio del post, puoi scrivere:\n\n__Tra 1 ora\nDomani\nTra 1 settimana\n27/6/2020 12:00__", reply_markup=buttons)
DOING[query.from_user.id].append("SCHEDULE")
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
else:
try:
query.edit_message_text("✅ Fatto! Il post sarà inviato a breve nel canale selezionato")
send_post(client, choices[query.from_user.id], DOING[query.from_user.id][0], False, IDS[DOING[query.from_user.id][0]])
del DOING[message.from_user.id]
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
DOING[query.from_user.id].append(int(time.time()))
@Client.on_message(Filters.text & Filters.UserScheduling & Filters.private & ~BANNED_USERS)
def parse_date(client, message):
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
try:
for key, (channel, action, date) in DOING.copy().items():
if time.time() - date >= 120:
del DOING[key]
except (ValueError, TypeError):
pass
date = dateparser.parse(message.text, languages=['it'], region='IT')
if not date and DOING[message.from_user.id]:
try:
client.send_message(message.from_user.id, "❌ Errore: Non hai fornito una data valida o la tua sessione é scaduta, riprova!")
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
else:
d_obj = date
date = date.strftime("%d/%m/%Y %H:%M:%S %p")
try:
client.send_message(message.chat.id, f"✅ Post Programmato!\n\n🕙 Data & Ora: {date}")
send_post(client, choices[message.from_user.id], DOING[message.from_user.id][0], int(d_obj.timestamp()), IDS[DOING[message.from_user.id][0]])
del DOING[message.from_user.id]
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
@Client.on_callback_query(query_regex("schedule_false"))
def not_pro_user(_, query):
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
try:
query.answer("Non sei un utente pro!")
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
@Client.on_callback_query(query_regex("schedule_reset"))
def set_schedule_false(_, query):
pro = "Sì"
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
callback = "schedule_true"
data = InlineKeyboardButton('⏰ Programma: ❌', callback_data=callback)
buttons = []
for button in query.message.reply_markup.inline_keyboard:
if button[0].callback_data == "schedule_reset":
buttons.append([data])
else:
buttons.append(button)
buttons = InlineKeyboardMarkup(buttons)
try:
query.edit_message_reply_markup(buttons)
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
@Client.on_callback_query(query_regex("pic_true\_\w+"))
def set_pic_true(_, query):
pro = "Sì" if query.data.split("_")[-1] == "pro" else "No"
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
callback = "pic_false" + "_"
callback += "pro" if pro == 'Sì' else 'free'
data = InlineKeyboardButton('📸 Foto: ✅', callback_data=callback)
buttons = []
for button in query.message.reply_markup.inline_keyboard:
if button[0].callback_data.startswith("pic_true"):
buttons.append([data])
else:
buttons.append(button)
buttons = InlineKeyboardMarkup(buttons)
try:
query.edit_message_reply_markup(buttons)
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
@Client.on_callback_query(query_regex("pic_false\_\w+"))
def set_pic_false(_, query):
pro = "Sì" if query.data.split("_")[-1] == "pro" else "No"
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
callback = "pic_true" + "_"
callback += "pro" if pro == 'Sì' else 'free'
data = InlineKeyboardButton('📸 Foto: ❌', callback_data=callback)
buttons = []
for button in query.message.reply_markup.inline_keyboard:
if button[0].callback_data.startswith("pic_false"):
buttons.append([data])
else:
buttons.append(button)
buttons = InlineKeyboardMarkup(buttons)
try:
query.edit_message_reply_markup(buttons)
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
@Client.on_callback_query(query_regex("text_true\_\w+"))
def set_text_true(_, query):
pro = "Sì" if query.data.split("_")[-1] == "pro" else "No"
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
callback = "text_false" + "_"
callback += "pro" if pro == 'Sì' else 'free'
data = InlineKeyboardButton('✍ Didascalia: ✅', callback_data=callback)
buttons = []
for button in query.message.reply_markup.inline_keyboard:
if button[0].callback_data.startswith("text_true"):
buttons.append([data])
else:
buttons.append(button)
buttons = InlineKeyboardMarkup(buttons)
try:
query.edit_message_reply_markup(buttons)
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
@Client.on_callback_query(query_regex("text_false\_\w+"))
def set_text_false(_, query):
pro = "Sì" if query.data.split("_")[-1] == "pro" else "No"
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
callback = "text_true" + "_"
callback += "pro" if pro == 'Sì' else 'free'
data = InlineKeyboardButton('✍ Didascalia: ❌', callback_data=callback)
buttons = []
for button in query.message.reply_markup.inline_keyboard:
if button[0].callback_data.startswith("text_false"):
buttons.append([data])
else:
buttons.append(button)
buttons = InlineKeyboardMarkup(buttons)
try:
query.edit_message_reply_markup(buttons)
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
@Client.on_callback_query(query_regex("schedule_true"))
def set_schedule_true(_, query):
pro = "Sì"
message = query
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
callback = "schedule_reset"
data = InlineKeyboardButton('⏰ Programma: ✅', callback_data=callback)
buttons = []
for button in query.message.reply_markup.inline_keyboard:
if button[0].callback_data == "schedule_true":
buttons.append([data])
else:
buttons.append(button)
buttons = InlineKeyboardMarkup(buttons)
try:
query.edit_message_reply_markup(buttons)
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
except exceptions.bad_request_400.MessageNotModified as exc:
logging.error(f"Error in chat with {name} [{query.from_user.id}] -> {exc}")
@Client.on_message(Filters.private & ~BANNED_USERS & Filters.command("post"))
def on_channels(client, message):
channels = querymanager.retrieve_channels(message.from_user.id)
if message.from_user.first_name:
name = message.from_user.first_name
elif message.from_user.username:
name = message.from_user.username
else:
name = "Anonimo"
if not channels:
try:
client.send_message(message.chat.id, "❌ Errore, non c'è nessun canale registrato a tuo nome!\nRicorda che se hai appena registrato un canale, potrebbero occorrere un paio di minuti prima che esso venga mostrato qui")
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
else:
response = "**AmazonOffers Manager - Seleziona Canale**\n\nUtilizzando i bottoni qui sotto, scegli in quale canale desideri inviare i post"
buttons = []
for channel_id, channel_name, sub, amzn_code in channels:
if len(channel_name) > 15:
channel_name = channel_name[0:20] + "..."
IDS[channel_id] = amzn_code
data = f"{channel_id}_{b64enc(channel_name.encode()).decode()}_{sub}"
if len(data) > 64:
data = f"{channel_id}_{b64enc(channel_name[0:10].encode()).decode()}_{sub}"
buttons.append([InlineKeyboardButton(text=channel_name, callback_data=data)])
try:
client.send_message(message.chat.id, response, reply_markup=InlineKeyboardMarkup(buttons))
except FloodWait as fw:
logging.error(
f"Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...")
time.sleep(fw.x)
| [
"pyrogram.InlineKeyboardMarkup",
"pyrogram.Filters.create",
"re.match",
"pyrogram.Filters.command",
"dateparser.parse",
"pyrogram.Client.on_message",
"time.sleep",
"collections.defaultdict",
"time.time",
"logging.error",
"pyrogram.InlineKeyboardButton"
] | [((419, 459), 'collections.defaultdict', 'defaultdict', (['(lambda : [None, None, None])'], {}), '(lambda : [None, None, None])\n', (430, 459), False, 'from collections import defaultdict\n'), ((924, 952), 'pyrogram.Filters.create', 'Filters.create', (['flt_schedule'], {}), '(flt_schedule)\n', (938, 952), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((6867, 6961), 'pyrogram.Client.on_message', 'Client.on_message', (['(Filters.text & Filters.UserScheduling & Filters.private & ~BANNED_USERS)'], {}), '(Filters.text & Filters.UserScheduling & Filters.private &\n ~BANNED_USERS)\n', (6884, 6961), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((2024, 2053), 'pyrogram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (2044, 2053), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((7394, 7455), 'dateparser.parse', 'dateparser.parse', (['message.text'], {'languages': "['it']", 'region': '"""IT"""'}), "(message.text, languages=['it'], region='IT')\n", (7410, 7455), False, 'import dateparser\n'), ((9358, 9420), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""⏰ Programma: ❌"""'], {'callback_data': 'callback'}), "('⏰ Programma: ❌', callback_data=callback)\n", (9378, 9420), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((9654, 9683), 'pyrogram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (9674, 9683), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((10536, 10593), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""📸 Foto: ✅"""'], {'callback_data': 'callback'}), "('📸 Foto: ✅', callback_data=callback)\n", (10556, 10593), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((10830, 10859), 'pyrogram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (10850, 10859), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((11713, 11770), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""📸 Foto: ❌"""'], {'callback_data': 'callback'}), "('📸 Foto: ❌', callback_data=callback)\n", (11733, 11770), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((12008, 12037), 'pyrogram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (12028, 12037), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((12892, 12955), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""✍ Didascalia: ✅"""'], {'callback_data': 'callback'}), "('✍ Didascalia: ✅', callback_data=callback)\n", (12912, 12955), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((13193, 13222), 'pyrogram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (13213, 13222), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((14079, 14142), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""✍ Didascalia: ❌"""'], {'callback_data': 'callback'}), "('✍ Didascalia: ❌', callback_data=callback)\n", (14099, 14142), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((14381, 14410), 'pyrogram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (14401, 14410), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((15169, 15231), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""⏰ Programma: ✅"""'], {'callback_data': 'callback'}), "('⏰ Programma: ✅', callback_data=callback)\n", (15189, 15231), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((15464, 15493), 'pyrogram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (15484, 15493), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((491, 508), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (502, 508), False, 'from collections import defaultdict\n'), ((536, 552), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (547, 552), False, 'from collections import defaultdict\n'), ((15944, 15967), 'pyrogram.Filters.command', 'Filters.command', (['"""post"""'], {}), "('post')\n", (15959, 15967), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((633, 659), 're.match', 're.match', (['data', 'query.data'], {}), '(data, query.data)\n', (641, 659), False, 'import re\n'), ((2710, 2785), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (2723, 2785), False, 'import logging\n'), ((2822, 2945), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (2835, 2945), False, 'import logging\n'), ((2957, 2973), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (2967, 2973), False, 'import time\n'), ((4741, 4816), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (4754, 4816), False, 'import logging\n'), ((4853, 4976), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (4866, 4976), False, 'import logging\n'), ((4988, 5004), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (4998, 5004), False, 'import time\n'), ((6850, 6861), 'time.time', 'time.time', ([], {}), '()\n', (6859, 6861), False, 'import time\n'), ((8842, 8965), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (8855, 8965), False, 'import logging\n'), ((8977, 8993), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (8987, 8993), False, 'import time\n'), ((9778, 9901), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (9791, 9901), False, 'import logging\n'), ((9913, 9929), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (9923, 9929), False, 'import time\n'), ((10003, 10078), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (10016, 10078), False, 'import logging\n'), ((10954, 11077), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (10967, 11077), False, 'import logging\n'), ((11089, 11105), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (11099, 11105), False, 'import time\n'), ((11179, 11254), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (11192, 11254), False, 'import logging\n'), ((12132, 12255), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (12145, 12255), False, 'import logging\n'), ((12267, 12283), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (12277, 12283), False, 'import time\n'), ((12357, 12432), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (12370, 12432), False, 'import logging\n'), ((13317, 13440), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (13330, 13440), False, 'import logging\n'), ((13452, 13468), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (13462, 13468), False, 'import time\n'), ((13542, 13617), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (13555, 13617), False, 'import logging\n'), ((14505, 14628), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (14518, 14628), False, 'import logging\n'), ((14640, 14656), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (14650, 14656), False, 'import time\n'), ((14730, 14805), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (14743, 14805), False, 'import logging\n'), ((15588, 15711), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (15601, 15711), False, 'import logging\n'), ((15723, 15739), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (15733, 15739), False, 'import time\n'), ((15813, 15888), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (15826, 15888), False, 'import logging\n'), ((1957, 2007), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['text'], {'callback_data': 'callback'}), '(text, callback_data=callback)\n', (1977, 2007), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((3321, 3388), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""✅ Conferma"""'], {'callback_data': '"""confirm_choices"""'}), "('✅ Conferma', callback_data='confirm_choices')\n", (3341, 3388), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((3392, 3454), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""⬅️ Annulla"""'], {'callback_data': '"""back_start"""'}), "('⬅️ Annulla', callback_data='back_start')\n", (3412, 3454), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((5390, 5451), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""❌ Annulla"""'], {'callback_data': '"""back_start"""'}), "('❌ Annulla', callback_data='back_start')\n", (5410, 5451), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((5841, 5964), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (5854, 5964), False, 'import logging\n'), ((5980, 5996), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (5990, 5996), False, 'import time\n'), ((6078, 6153), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (6091, 6153), False, 'import logging\n'), ((6496, 6619), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (6509, 6619), False, 'import logging\n'), ((6635, 6651), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (6645, 6651), False, 'import time\n'), ((6733, 6808), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{query.from_user.id}] -> {exc}"""'], {}), "(f'Error in chat with {name} [{query.from_user.id}] -> {exc}')\n", (6746, 6808), False, 'import logging\n'), ((7699, 7822), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (7712, 7822), False, 'import logging\n'), ((7838, 7854), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (7848, 7854), False, 'import time\n'), ((8291, 8414), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (8304, 8414), False, 'import logging\n'), ((8430, 8446), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (8440, 8446), False, 'import time\n'), ((16573, 16696), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (16586, 16696), False, 'import logging\n'), ((16716, 16732), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (16726, 16732), False, 'import time\n'), ((17570, 17693), 'logging.error', 'logging.error', (['f"""Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds..."""'], {}), "(\n f'Error in chat with {name} [{message.from_user.id}] -> FloodWait! Sleeping for {fw.x} seconds...'\n )\n", (17583, 17693), False, 'import logging\n'), ((17713, 17729), 'time.sleep', 'time.sleep', (['fw.x'], {}), '(fw.x)\n', (17723, 17729), False, 'import time\n'), ((7276, 7287), 'time.time', 'time.time', ([], {}), '()\n', (7285, 7287), False, 'import time\n'), ((17348, 17407), 'pyrogram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': 'channel_name', 'callback_data': 'data'}), '(text=channel_name, callback_data=data)\n', (17368, 17407), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((17495, 17524), 'pyrogram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (17515, 17524), False, 'from pyrogram import Client, Filters, InlineKeyboardButton, InlineKeyboardMarkup\n')] |
# SPDX-FileCopyrightText: 2019-2021 REFITT Team
# SPDX-License-Identifier: Apache-2.0
"""Database file_type model integration tests."""
# external libs
import pytest
from sqlalchemy.exc import IntegrityError
# internal libs
from refitt.database.model import FileType, NotFound
from tests.integration.test_database.test_model.conftest import TestData
from tests.integration.test_database.test_model import json_roundtrip
class TestFileType:
"""Tests for `FileType` database model."""
def test_init(self, testdata: TestData) -> None:
"""Create file_type instance and validate accessors."""
for data in testdata['file_type']:
file_type = FileType(**data)
for key, value in data.items():
assert getattr(file_type, key) == value
def test_dict(self, testdata: TestData) -> None:
"""Test round-trip of dict translations."""
for data in testdata['file_type']:
file_type = FileType.from_dict(data)
assert data == file_type.to_dict()
def test_tuple(self, testdata: TestData) -> None:
"""Test tuple-conversion."""
for data in testdata['file_type']:
file_type = FileType.from_dict(data)
assert tuple(data.values()) == file_type.to_tuple()
def test_embedded_no_join(self, testdata: TestData) -> None:
"""Tests embedded method to check JSON-serialization."""
for data in testdata['file_type']:
assert data == json_roundtrip(FileType(**data).to_json(join=False))
def test_embedded(self) -> None:
"""Test embedded method to check JSON-serialization and auto-join."""
assert FileType.from_name('fits.gz').to_json(join=True) == {
'id': 1,
'name': 'fits.gz',
'description': 'Gzip compressed FITS file.'
}
def test_from_id(self, testdata: TestData) -> None:
"""Test loading file_type from `id`."""
# NOTE: `id` not set until after insert
for i, record in enumerate(testdata['file_type']):
assert FileType.from_id(i + 1).name == record['name']
def test_id_missing(self) -> None:
"""Test exception on missing file_type `id`."""
with pytest.raises(NotFound):
FileType.from_id(-1)
def test_id_already_exists(self) -> None:
"""Test exception on file_type `id` already exists."""
with pytest.raises(IntegrityError):
FileType.add({'id': 1, 'name': 'jpeg',
'description': 'A bad format for scientific images.'})
def test_from_name(self, testdata: TestData) -> None:
"""Test loading file_type from `name`."""
for record in testdata['file_type']:
assert FileType.from_name(record['name']).name == record['name']
def test_name_missing(self) -> None:
"""Test exception on missing file_type `name`."""
with pytest.raises(NotFound):
FileType.from_name('png')
def test_name_already_exists(self) -> None:
"""Test exception on file_type `name` already exists."""
with pytest.raises(IntegrityError):
FileType.add({'name': 'fits.gz',
'description': 'Gzip compressed FITS file.'})
| [
"refitt.database.model.FileType.add",
"refitt.database.model.FileType.from_dict",
"pytest.raises",
"refitt.database.model.FileType",
"refitt.database.model.FileType.from_id",
"refitt.database.model.FileType.from_name"
] | [((678, 694), 'refitt.database.model.FileType', 'FileType', ([], {}), '(**data)\n', (686, 694), False, 'from refitt.database.model import FileType, NotFound\n'), ((968, 992), 'refitt.database.model.FileType.from_dict', 'FileType.from_dict', (['data'], {}), '(data)\n', (986, 992), False, 'from refitt.database.model import FileType, NotFound\n'), ((1199, 1223), 'refitt.database.model.FileType.from_dict', 'FileType.from_dict', (['data'], {}), '(data)\n', (1217, 1223), False, 'from refitt.database.model import FileType, NotFound\n'), ((2232, 2255), 'pytest.raises', 'pytest.raises', (['NotFound'], {}), '(NotFound)\n', (2245, 2255), False, 'import pytest\n'), ((2269, 2289), 'refitt.database.model.FileType.from_id', 'FileType.from_id', (['(-1)'], {}), '(-1)\n', (2285, 2289), False, 'from refitt.database.model import FileType, NotFound\n'), ((2413, 2442), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (2426, 2442), False, 'import pytest\n'), ((2456, 2553), 'refitt.database.model.FileType.add', 'FileType.add', (["{'id': 1, 'name': 'jpeg', 'description': 'A bad format for scientific images.'}"], {}), "({'id': 1, 'name': 'jpeg', 'description':\n 'A bad format for scientific images.'})\n", (2468, 2553), False, 'from refitt.database.model import FileType, NotFound\n'), ((2920, 2943), 'pytest.raises', 'pytest.raises', (['NotFound'], {}), '(NotFound)\n', (2933, 2943), False, 'import pytest\n'), ((2957, 2982), 'refitt.database.model.FileType.from_name', 'FileType.from_name', (['"""png"""'], {}), "('png')\n", (2975, 2982), False, 'from refitt.database.model import FileType, NotFound\n'), ((3110, 3139), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (3123, 3139), False, 'import pytest\n'), ((3153, 3231), 'refitt.database.model.FileType.add', 'FileType.add', (["{'name': 'fits.gz', 'description': 'Gzip compressed FITS file.'}"], {}), "({'name': 'fits.gz', 'description': 'Gzip compressed FITS file.'})\n", (3165, 3231), False, 'from refitt.database.model import FileType, NotFound\n'), ((1673, 1702), 'refitt.database.model.FileType.from_name', 'FileType.from_name', (['"""fits.gz"""'], {}), "('fits.gz')\n", (1691, 1702), False, 'from refitt.database.model import FileType, NotFound\n'), ((2076, 2099), 'refitt.database.model.FileType.from_id', 'FileType.from_id', (['(i + 1)'], {}), '(i + 1)\n', (2092, 2099), False, 'from refitt.database.model import FileType, NotFound\n'), ((2749, 2783), 'refitt.database.model.FileType.from_name', 'FileType.from_name', (["record['name']"], {}), "(record['name'])\n", (2767, 2783), False, 'from refitt.database.model import FileType, NotFound\n'), ((1504, 1520), 'refitt.database.model.FileType', 'FileType', ([], {}), '(**data)\n', (1512, 1520), False, 'from refitt.database.model import FileType, NotFound\n')] |
import os
import os.path as op
import shutil
import sys
from datetime import datetime
from tempfile import TemporaryDirectory
import boto3
import sh
from termcolor import cprint
from .cloudformation import get_cf_resources
from .config import Config
def _docker(*args):
cprint("$", "red", end=" ")
cprint("docker " + " ".join(args), "green")
p = None
try:
p = sh.docker(*args, _out=sys.stdout, _err=sys.stderr, _bg=True)
p.wait()
except KeyboardInterrupt:
if p is not None:
p.signal(2)
sys.exit(2)
def _target_zip_basename(layername: str, ver: str) -> str:
ts = round(datetime.utcnow().timestamp())
return f"{layername}_{ts}_{ver}.zip"
def _build_cmd(zipbasename, cfg: Config):
chains = []
# ==========================================
# COPY files on /volumepoint/other_resources
# ==========================================
chains.extend(["(test -d /other_resources && cp -RT /other_resources . || echo)"])
# ===========
# PIP INSTALL
# ===========
chains.extend(
[
"mkdir -p python",
"pip3 install --no-cache-dir -r /volumepoint/requirements.txt -t python",
"cd python",
]
)
# ====================================
# REMOVE UNNECESSARY SNIPPETS OR FILES
# ====================================
if cfg.shrink.plotly.remove_jupyterlab_plotly:
chains.append("(test -d jupyterlab_plotly && rm -rf jupyterlab_plotly || echo)")
if cfg.shrink.plotly.remove_data_docs:
chains.append(
r'''(test -d plotly && (find plotly/validators -name '*.py' | xargs sed --in-place -z -E -e 's/kwargs\.pop\([[:space:]]*"data_docs",[[:space:]]*""".*""",?[[:space:]]*\)/kwargs.pop("data_docs", "")/') || echo)''' # noqa
)
if cfg.shrink.remove_dist_info:
chains.append("(find . -name '*.dist-info' | xargs rm -rf)")
chains.append("(find . -name '__pycache__' | xargs rm -rf)")
# =====
# PATCH
# =====
if cfg.shrink.compile and cfg.shrink.compile_optimize_level >= 2:
# Compile with optimize_level=2 remove docstrings on python code; and __doc__ attributes gets to be None,
# Affected by this behavior some codes raises errors.
chains.append(
r"""(test -d numpy && (find numpy -name '*.py' | xargs sed --in-place -e 's/dispatcher\.__doc__/""/g') || echo)""" # noqa
)
# =======
# COMPILE
# =======
if cfg.shrink.compile:
chains.extend(
[
f"""python -c 'import compileall; compileall.compile_dir(".", maxlevels=20, optimize={cfg.shrink.compile_optimize_level}, force=True, legacy=True, quiet=2)'""", # noqa
"(find . -name '*.py' | xargs rm -rf)",
]
)
# ===
# ZIP
# ===
chains.extend(
[
"cd ..",
"mkdir -p /volumepoint/dist",
f"zip -r9 --quiet /volumepoint/dist/{zipbasename} .",
]
)
return " && ".join(chains)
def _make_package(zipbasename: str, ver: str, cfg: Config):
"""Run `pip install' in the docker container and zip artifacts."""
cprint(f"Start to make {op.join('dist', zipbasename)}", "green")
if op.exists(op.join("dist", zipbasename)):
print(f"SKIP: {op.join('dist', zipbasename)} already exists")
return
with TemporaryDirectory() as tmpdir:
curdir = op.abspath(os.curdir)
# In order to follow symbolic links, copy files on ./other_resources to tmpdir
os.makedirs(op.join(curdir, "other_resources"), exist_ok=True)
other_resources_copy_dir = op.join(tmpdir, "other_resources_copy_dir")
shutil.copytree(
op.join(curdir, "other_resources"), other_resources_copy_dir, symlinks=False
)
_docker(
"run",
"--rm",
"-v",
f"{curdir}:/volumepoint",
"-v",
f"{other_resources_copy_dir}:/other_resources",
f"lambci/lambda:build-python{ver}",
"sh",
"-c",
_build_cmd(zipbasename, cfg),
)
print(f"DONE: {op.join('dist', zipbasename)} created")
def _full_layername(layername: str, ver: str):
suffix = "-py" + ver.replace(".", "")
return layername + suffix
def _upload_package(zipbasename: str, ver: str, full_layername: str, description: str):
cf_resources = get_cf_resources()
s3 = boto3.resource("s3")
cprint(f"Start to upload {op.join('dist', zipbasename)}", "green")
bucketname = cf_resources["DeploymentBucketName"]
s3.Bucket(bucketname).upload_file(op.join("dist", zipbasename), zipbasename)
print(f"Put the package file: s3://{bucketname}/{zipbasename}")
lambdafunc = boto3.client("lambda")
res = lambdafunc.publish_layer_version(
LayerName=full_layername,
Description=description,
Content={
"S3Bucket": bucketname,
"S3Key": zipbasename,
},
CompatibleRuntimes=[f"python{ver}"],
)
print(f"Publish the custom layer: {res['LayerVersionArn']}")
print(f"DONE: {op.join('dist', zipbasename)} created")
def deploy_package(cfg: Config, upload_also=True):
"""Run make_package & upload_package according in accordance with config."""
for ver in cfg.pyversions:
# ex. NAME_MD5SUM_py3x.zip
zipbasename = _target_zip_basename(cfg.layername, ver)
# ex. NAME-py3x
fullname = _full_layername(cfg.layername, ver)
_make_package(zipbasename, ver, cfg)
if upload_also:
_upload_package(zipbasename, ver, fullname, cfg.description)
| [
"tempfile.TemporaryDirectory",
"boto3.client",
"datetime.datetime.utcnow",
"os.path.join",
"boto3.resource",
"sys.exit",
"os.path.abspath",
"termcolor.cprint",
"sh.docker"
] | [((278, 305), 'termcolor.cprint', 'cprint', (['"""$"""', '"""red"""'], {'end': '""" """'}), "('$', 'red', end=' ')\n", (284, 305), False, 'from termcolor import cprint\n'), ((4478, 4498), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (4492, 4498), False, 'import boto3\n'), ((4792, 4814), 'boto3.client', 'boto3.client', (['"""lambda"""'], {}), "('lambda')\n", (4804, 4814), False, 'import boto3\n'), ((388, 448), 'sh.docker', 'sh.docker', (['*args'], {'_out': 'sys.stdout', '_err': 'sys.stderr', '_bg': '(True)'}), '(*args, _out=sys.stdout, _err=sys.stderr, _bg=True)\n', (397, 448), False, 'import sh\n'), ((3278, 3306), 'os.path.join', 'op.join', (['"""dist"""', 'zipbasename'], {}), "('dist', zipbasename)\n", (3285, 3306), True, 'import os.path as op\n'), ((3403, 3423), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (3421, 3423), False, 'from tempfile import TemporaryDirectory\n'), ((3452, 3473), 'os.path.abspath', 'op.abspath', (['os.curdir'], {}), '(os.curdir)\n', (3462, 3473), True, 'import os.path as op\n'), ((3667, 3710), 'os.path.join', 'op.join', (['tmpdir', '"""other_resources_copy_dir"""'], {}), "(tmpdir, 'other_resources_copy_dir')\n", (3674, 3710), True, 'import os.path as op\n'), ((4663, 4691), 'os.path.join', 'op.join', (['"""dist"""', 'zipbasename'], {}), "('dist', zipbasename)\n", (4670, 4691), True, 'import os.path as op\n'), ((554, 565), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (562, 565), False, 'import sys\n'), ((3581, 3615), 'os.path.join', 'op.join', (['curdir', '"""other_resources"""'], {}), "(curdir, 'other_resources')\n", (3588, 3615), True, 'import os.path as op\n'), ((3748, 3782), 'os.path.join', 'op.join', (['curdir', '"""other_resources"""'], {}), "(curdir, 'other_resources')\n", (3755, 3782), True, 'import os.path as op\n'), ((642, 659), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (657, 659), False, 'from datetime import datetime\n'), ((3220, 3248), 'os.path.join', 'op.join', (['"""dist"""', 'zipbasename'], {}), "('dist', zipbasename)\n", (3227, 3248), True, 'import os.path as op\n'), ((4180, 4208), 'os.path.join', 'op.join', (['"""dist"""', 'zipbasename'], {}), "('dist', zipbasename)\n", (4187, 4208), True, 'import os.path as op\n'), ((4529, 4557), 'os.path.join', 'op.join', (['"""dist"""', 'zipbasename'], {}), "('dist', zipbasename)\n", (4536, 4557), True, 'import os.path as op\n'), ((5161, 5189), 'os.path.join', 'op.join', (['"""dist"""', 'zipbasename'], {}), "('dist', zipbasename)\n", (5168, 5189), True, 'import os.path as op\n'), ((3332, 3360), 'os.path.join', 'op.join', (['"""dist"""', 'zipbasename'], {}), "('dist', zipbasename)\n", (3339, 3360), True, 'import os.path as op\n')] |
#!/usr/bin/python3
import os
import subprocess
import sys
md5sums = {}
dirname = sys.argv[1]
for fn in os.listdir(dirname):
md5sum = subprocess.check_output(['md5sum', os.path.join(dirname, fn)]).decode('ascii').partition(" ")[0]
if md5sum in md5sums:
# This blob already exists. Symlink it
os.unlink(os.path.join(dirname, fn))
os.symlink(md5sums[md5sum], os.path.join(dirname, fn))
else:
md5sums[md5sum] = fn
| [
"os.listdir",
"os.path.join"
] | [((106, 125), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (116, 125), False, 'import os\n'), ((329, 354), 'os.path.join', 'os.path.join', (['dirname', 'fn'], {}), '(dirname, fn)\n', (341, 354), False, 'import os\n'), ((392, 417), 'os.path.join', 'os.path.join', (['dirname', 'fn'], {}), '(dirname, fn)\n', (404, 417), False, 'import os\n'), ((175, 200), 'os.path.join', 'os.path.join', (['dirname', 'fn'], {}), '(dirname, fn)\n', (187, 200), False, 'import os\n')] |
'''
clyther.rttt
--------------------
Run Time Type Tree (rttt)
'''
from clast import cast
from clyther.pybuiltins import builtin_map
from inspect import isroutine, isclass, isfunction
from meta.asttools.visitors import visit_children, Mutator
from meta.asttools.visitors.print_visitor import print_ast
from opencl import contextual_memory
from opencl.type_formats import type_format, cdefn
import _ctypes
import abc
import ast
import ctypes
import opencl as cl
import re
class cltype(object):
__metaclass__ = abc.ABCMeta
pass
cltype.register(contextual_memory)
class cList(cltype):
def __init__(self, ctype):
self.iter_type = ctype
class RuntimeConstant(object):
'''
define a constant value that is defined in the OpenCL runtime.
:param name: the name of the constant in OpenCL.
:param rtt: the ctype of the constant.
'''
def __init__(self, name, rtt):
self.name = name
self.rtt = rtt
def ctype_string(self):
return self.name
class RuntimeType(cltype):
def __init__(self, name):
self.name = name
def __call__(self, name):
return RuntimeConstant(name, self)
def ctype_string(self):
return self.name
class gentype(object):
'''
a generic numeric type in OpenCL
'''
def __init__(self, *types):
self.types = types
class ugentype(object):
'''
an unsigned generic numeric type in OpenCL
'''
def __init__(self, *types):
self.types = types
class sgentype(object):
'''
a signed generic numeric type in OpenCL
'''
def __init__(self, *types):
self.types = types
class RuntimeFunction(cltype):
'''
A function that is defined in the openCL runtime.
:param name: the name of the function as per the oencl specification.
:param return_type: Either a ctype or a function that returns a ctype
:param argtypes: Either a ctype or a function that returns a ctype
Keyword only parameters:
:param doc: Either a ctype or a function that returns a ctype
:param builtin: a python builtin function that is equivalent to this function
:param emulate: A function that emulates the behavior of this function in python.
This argument is not required if `builtin` is given.
If `return_type` is a function it must have the same signature as the runtime function.
'''
def __init__(self, name, return_type, *argtypes, **kwargs):
self.name = name
self._return_type = return_type
self.argtypes = argtypes
self.kwargs = kwargs
self.__doc__ = kwargs.get('doc', None)
self.builtin = kwargs.get('builtin', None)
self.emulate = kwargs.get('emulate', None)
if self.builtin is not None:
builtin_map[self.builtin] = self
def return_type(self, argtypes):
if isfunction(self._return_type):
return self._return_type(*argtypes)
else:
if len(argtypes) != len(self.argtypes):
raise TypeError('openCL builtin function %r expected %i argument(s) (got %i)' % (self.name, len(self.argtypes), len(argtypes)))
return self._return_type
def ctype_string(self):
return None
def __call__(self, *args):
if self.builtin is not None:
return self.builtin(*args)
elif self.emulate is not None:
return self.builtin(*args)
else:
raise NotImplementedError("python can not emulate this function yet.")
int_ctypes = {ctypes.c_int, ctypes.c_int32, ctypes.c_int8, ctypes.c_int16, ctypes.c_int64, ctypes.c_long , ctypes.c_longlong,
ctypes.c_size_t, ctypes.c_ssize_t,
ctypes.c_ubyte, ctypes.c_uint16, ctypes.c_uint64, ctypes.c_ulong, ctypes.c_ushort,
ctypes.c_uint, ctypes.c_uint32, ctypes.c_uint8, ctypes.c_ulonglong,
int}
unsigned_ctypes = {ctypes.c_ubyte, ctypes.c_uint16, ctypes.c_uint64, ctypes.c_ulong, ctypes.c_ushort,
ctypes.c_size_t, ctypes.c_ssize_t,
ctypes.c_uint, ctypes.c_uint32, ctypes.c_uint8, ctypes.c_ulonglong}
float_types = {ctypes.c_float, ctypes.c_double, ctypes.c_longdouble, float}
type_groups = {'unsigned': unsigned_ctypes, 'int':int_ctypes, 'float':float_types}
type_group_weight = ['unsigned', 'int', 'float']
def groupof(ctype):
for gname, group in type_groups.items():
if ctype in group:
return gname
return None
def same_group(left, right):
return groupof(left) == groupof(right)
def greatest_common_type(*args):
if len(args) == 1:
args = args[0]
if len(args) == 1:
return args[0]
else:
return reduce(_greatest_common_type, args)
vector_len = re.compile('^\((\d)\)([f|i|I|d|l|L])$')
def is_vetor_type(ctype):
return vector_len.match(type_format(ctype)) is not None
def derefrence(ctype):
if isinstance(ctype, cltype):
return ctype.derefrence()
elif is_vetor_type(ctype):
return ctype._type_
elif isclass(ctype) and issubclass(ctype, _ctypes._Pointer):
return ctype._type_
else:
raise NotImplementedError(slice)
def typeof(ctx, obj):
if isinstance(obj, cl.MemoryObject):
return cl.global_memory(obj.ctype, ndim=len(obj.shape), shape=obj.shape, context=ctx)
elif isinstance(obj, cl.local_memory):
return obj
elif isfunction(obj):
return obj
elif isinstance(obj, int):
return ctypes.c_int
elif isinstance(obj, float):
return ctypes.c_float
elif isinstance(obj, ctypes.Structure):
return cl.constant_memory(type(obj), 0, (), context=ctx)
# raise NotImplementedError("ctypes.Structure as parameter")
else:
try:
view = memoryview(obj)
return cl.global_memory(view.format, ndim=len(view.shape), shape=view.shape, context=ctx)
except TypeError:
pass
return type(obj)
def _greatest_common_type(left, right):
if not isclass(left):
left = type(left)
if not isclass(right):
right = type(right)
if left == int:
left = ctypes.c_int32
elif left == float:
left = ctypes.c_float
if right == int:
right = ctypes.c_int32
elif right == float:
right = ctypes.c_float
if left == right:
return left
if issubclass(left, _ctypes.Array):
if not isinstance(right, _ctypes.Array):
return left
else:
raise TypeError("type conversion for vector logic is not implemented yet")
elif issubclass(right, _ctypes.Array):
if not isinstance(left, _ctypes.Array):
return right
else:
raise TypeError("type conversion for vector logic is not implemented yet")
elif same_group(left, right):
return max(left, right, key=lambda ctype:ctypes.sizeof(ctype))
else:
size = max(ctypes.sizeof(left), ctypes.sizeof(right))
group = max(groupof(left), groupof(right), key=lambda group:type_group_weight.index(group))
test = lambda ctype: issubclass(ctype, _ctypes._SimpleCData) and ctypes.sizeof(ctype) >= size
ctype = min([ctype for ctype in type_groups[group] if test(ctype)], key=lambda ctype:ctypes.sizeof(ctype))
return ctype
class rtt(object):
def __repr__(self):
return '%s()' % self.__class__.__name__
class const_type(rtt):
def __init__(self, ctype):
self._ctype = ctype
def resolve(self, locls, globls):
return self._ctype
class type_tree(rtt):
def __init__(self, ctype_list):
self._ctype_list = ctype_list
class parameter_type(rtt):
def __init__(self, param_id):
self.param_id = param_id
class return_type(rtt):
pass
class local_type(rtt):
def __init__(self, param_id):
self.param_id = param_id
def resolve(self, locls, globls):
return eval(self.param_id, locls, globls)
from opencl import cl_types
type_map = {
cl_types.cl_char : 'char',
cl_types.cl_char16 : 'char16',
cl_types.cl_char2 : 'char2',
cl_types.cl_char4 : 'char4',
cl_types.cl_char8 : 'char8',
cl_types.cl_double : 'double',
cl_types.cl_double16 : 'double16',
cl_types.cl_double2 : 'double2',
cl_types.cl_double4 : 'double4',
cl_types.cl_double8 : 'double8',
cl_types.cl_float : 'float',
cl_types.cl_float16 : 'float16',
cl_types.cl_float2 : 'float2',
cl_types.cl_float4 : 'float4',
cl_types.cl_float8 : 'float8',
cl_types.cl_half : 'half',
cl_types.cl_int : 'int',
cl_types.cl_int16 : 'int16',
cl_types.cl_int2 : 'int2',
cl_types.cl_int4 : 'int4',
cl_types.cl_int8 : 'int8',
cl_types.cl_long : 'long',
cl_types.cl_long16 : 'long16',
cl_types.cl_long2 : 'long2',
cl_types.cl_long4 : 'long4',
cl_types.cl_long8 : 'long8',
cl_types.cl_short : 'short',
cl_types.cl_short16 : 'short16',
cl_types.cl_short2 : 'short2',
cl_types.cl_short4 : 'short4',
cl_types.cl_short8 : 'short8',
cl_types.cl_uchar : 'uchar',
cl_types.cl_uchar16 : 'uchar16',
cl_types.cl_uchar2 : 'uchar2',
cl_types.cl_uchar4 : 'uchar4',
cl_types.cl_uchar8 : 'uchar8',
cl_types.cl_uint : 'uint',
cl_types.cl_uint16 : 'uint16',
cl_types.cl_uint2 : 'uint2',
cl_types.cl_uint4 : 'uint4',
cl_types.cl_uint8 : 'uint8',
cl_types.cl_ulong : 'ulong',
cl_types.cl_ulong16 : 'ulong16',
cl_types.cl_ulong2 : 'ulong2',
cl_types.cl_ulong4 : 'ulong4',
cl_types.cl_ulong8 : 'ulong8',
cl_types.cl_ushort : 'ushort',
cl_types.cl_ushort16 : 'ushort16',
cl_types.cl_ushort2 : 'ushort2',
cl_types.cl_ushort4 : 'ushort4',
cl_types.cl_ushort8 : 'ushort8',
}
def str_type(ctype, defined_types):
if ctype in defined_types:
return defined_types[ctype]
elif ctype in type_map:
return type_map[ctype]
elif isroutine(ctype):
return None
elif isinstance(ctype, cl.contextual_memory):
base_str = str_type(ctype.ctype, defined_types)
return '%s %s*' % (ctype.qualifier, base_str)
elif isinstance(ctype, cltype):
return ctype.ctype_string()
elif isinstance(ctype, str):
return ctype
else:
format = type_format(ctype)
return cdefn(format)
class TypeReplacer(Mutator):
'''
Replace ctype with opencl type string.
'''
def __init__(self, defined_types):
self.defined_types = defined_types
self.new_types = {}
def visitCVarDec(self, node):
if not isinstance(node.ctype, cast.CTypeName):
node.ctype = cast.CTypeName(str_type(node.ctype, self.defined_types))
self.visitDefault(node)
def visitCFunctionForwardDec(self, node):
if not isinstance(node.return_type, cast.CTypeName):
node.return_type = cast.CTypeName(str_type(node.return_type, self.defined_types))
self.visitDefault(node)
def visitCFunctionDef(self, node):
if not isinstance(node.return_type, cast.CTypeName):
node.return_type = cast.CTypeName(str_type(node.return_type, self.defined_types))
self.visitDefault(node)
def mutateDefault(self, node):
if isinstance(node, ast.expr):
if isinstance(node.ctype, RuntimeConstant):
return cast.CName(node.ctype.name, ast.Load(), node.ctype.rtt)
return Mutator.mutateDefault(self, node)
def visitDefault(self, node):
if isinstance(node, ast.expr):
if not isinstance(node.ctype, cast.CTypeName):
try:
type_repr = str_type(node.ctype, self.defined_types)
except KeyError:
if isinstance(node.ctype, cl.contextual_memory):
ctype = node.ctype.ctype
else:
ctype = node.ctype
base_name = 'cly_%s' % (ctype.__name__)
type_repr = base_name
i = 0
while type_repr in self.defined_types.viewvalues():
i += 1
type_repr = '%s_%03i' % (base_name, i)
self.defined_types[ctype] = type_repr
self.new_types[type_repr] = ctype
if isinstance(node.ctype, cl.contextual_memory):
type_repr = str_type(node.ctype, self.defined_types)
node.ctype = cast.CTypeName(type_repr)
visit_children(self, node)
def create_cstruct(struct_id, ctype, defined_types):
decs = []
for name, field in ctype._fields_:
typename = cast.CTypeName(str_type(field, defined_types))
decs.append(cast.CVarDec(name, typename))
return cast.CStruct(struct_id, decs)
def replace_types(node):
defined_types = {None:'void', str:'char*'}
if isinstance(node, ast.Module):
for statement in node.body:
if isinstance(statement, cast.CStruct):
defined_types[statement.ctype] = statement.id
type_replacer = TypeReplacer(defined_types)
type_replacer.mutate(node)
type_replacer.visit(node)
for name, ctype in type_replacer.new_types.items():
c_struct = create_cstruct(name, ctype, type_replacer.defined_types)
node.body.insert(0, c_struct)
| [
"ast.Load",
"clast.cast.CStruct",
"re.compile",
"opencl.type_formats.type_format",
"clast.cast.CTypeName",
"inspect.isroutine",
"meta.asttools.visitors.visit_children",
"opencl.type_formats.cdefn",
"clast.cast.CVarDec",
"inspect.isclass",
"inspect.isfunction",
"meta.asttools.visitors.Mutator.m... | [((4953, 4995), 're.compile', 're.compile', (['"""^\\\\((\\\\d)\\\\)([f|i|I|d|l|L])$"""'], {}), "('^\\\\((\\\\d)\\\\)([f|i|I|d|l|L])$')\n", (4963, 4995), False, 'import re\n'), ((13302, 13331), 'clast.cast.CStruct', 'cast.CStruct', (['struct_id', 'decs'], {}), '(struct_id, decs)\n', (13314, 13331), False, 'from clast import cast\n'), ((2999, 3028), 'inspect.isfunction', 'isfunction', (['self._return_type'], {}), '(self._return_type)\n', (3009, 3028), False, 'from inspect import isroutine, isclass, isfunction\n'), ((6239, 6252), 'inspect.isclass', 'isclass', (['left'], {}), '(left)\n', (6246, 6252), False, 'from inspect import isroutine, isclass, isfunction\n'), ((6291, 6305), 'inspect.isclass', 'isclass', (['right'], {}), '(right)\n', (6298, 6305), False, 'from inspect import isroutine, isclass, isfunction\n'), ((11775, 11808), 'meta.asttools.visitors.Mutator.mutateDefault', 'Mutator.mutateDefault', (['self', 'node'], {}), '(self, node)\n', (11796, 11808), False, 'from meta.asttools.visitors import visit_children, Mutator\n'), ((13014, 13040), 'meta.asttools.visitors.visit_children', 'visit_children', (['self', 'node'], {}), '(self, node)\n', (13028, 13040), False, 'from meta.asttools.visitors import visit_children, Mutator\n'), ((5048, 5066), 'opencl.type_formats.type_format', 'type_format', (['ctype'], {}), '(ctype)\n', (5059, 5066), False, 'from opencl.type_formats import type_format, cdefn\n'), ((5609, 5624), 'inspect.isfunction', 'isfunction', (['obj'], {}), '(obj)\n', (5619, 5624), False, 'from inspect import isroutine, isclass, isfunction\n'), ((10219, 10235), 'inspect.isroutine', 'isroutine', (['ctype'], {}), '(ctype)\n', (10228, 10235), False, 'from inspect import isroutine, isclass, isfunction\n'), ((13256, 13284), 'clast.cast.CVarDec', 'cast.CVarDec', (['name', 'typename'], {}), '(name, typename)\n', (13268, 13284), False, 'from clast import cast\n'), ((5245, 5259), 'inspect.isclass', 'isclass', (['ctype'], {}), '(ctype)\n', (5252, 5259), False, 'from inspect import isroutine, isclass, isfunction\n'), ((12963, 12988), 'clast.cast.CTypeName', 'cast.CTypeName', (['type_repr'], {}), '(type_repr)\n', (12977, 12988), False, 'from clast import cast\n'), ((7187, 7206), 'ctypes.sizeof', 'ctypes.sizeof', (['left'], {}), '(left)\n', (7200, 7206), False, 'import ctypes\n'), ((7208, 7228), 'ctypes.sizeof', 'ctypes.sizeof', (['right'], {}), '(right)\n', (7221, 7228), False, 'import ctypes\n'), ((11732, 11742), 'ast.Load', 'ast.Load', ([], {}), '()\n', (11740, 11742), False, 'import ast\n'), ((7136, 7156), 'ctypes.sizeof', 'ctypes.sizeof', (['ctype'], {}), '(ctype)\n', (7149, 7156), False, 'import ctypes\n'), ((7412, 7432), 'ctypes.sizeof', 'ctypes.sizeof', (['ctype'], {}), '(ctype)\n', (7425, 7432), False, 'import ctypes\n'), ((7535, 7555), 'ctypes.sizeof', 'ctypes.sizeof', (['ctype'], {}), '(ctype)\n', (7548, 7555), False, 'import ctypes\n'), ((10570, 10588), 'opencl.type_formats.type_format', 'type_format', (['ctype'], {}), '(ctype)\n', (10581, 10588), False, 'from opencl.type_formats import type_format, cdefn\n'), ((10604, 10617), 'opencl.type_formats.cdefn', 'cdefn', (['format'], {}), '(format)\n', (10609, 10617), False, 'from opencl.type_formats import type_format, cdefn\n')] |
# -*- coding: utf-8 -*-
"""Check versions"""
import sys
import json
import aiida_raspa
def test_version_agreement():
"""Check if versions in setup.json and in plugin are consistent"""
version1 = aiida_raspa.__version__
with open("setup.json") as fhandle:
version2 = json.load(fhandle)['version']
if version1 != version2:
print("ERROR: Versions in aiida_raspa/__init__.py and setup.json are inconsistent: {} vs {}".format(
version1, version2))
sys.exit(3)
| [
"json.load",
"sys.exit"
] | [((499, 510), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (507, 510), False, 'import sys\n'), ((289, 307), 'json.load', 'json.load', (['fhandle'], {}), '(fhandle)\n', (298, 307), False, 'import json\n')] |
from collections import defaultdict
from copy import copy, deepcopy
from tqdm import tqdm
from ..eventuality import Eventuality
from ..relation import Relation
def conceptualize_eventualities(aser_conceptualizer, eventualities):
""" Conceptualize eventualities by an ASER conceptualizer
:param aser_conceptualizer: an ASER conceptualizer
:type aser_conceptualizer: aser.conceptualize.aser_conceptualizer.BaseASERConceptualizer
:param eventualities: a list of eventualities
:type eventualities: List[aser.event.Eventuality]
:return: a dictionary from cid to concept, a list of concept-instance pairs, a dictionary from cid to weights
:rtype: Dict[str, aser.concept.ASERConcept], List[aser.concept.ASERConcept, aser.eventuality.Eventuality, float], Dict[str, float]
"""
cid2concept = dict()
concept_instance_pairs = []
cid2score = dict()
for eventuality in tqdm(eventualities):
results = aser_conceptualizer.conceptualize(eventuality)
for concept, score in results:
if concept.cid not in cid2concept:
cid2concept[concept.cid] = deepcopy(concept)
concept = cid2concept[concept.cid]
if (eventuality.eid, eventuality.pattern, score) not in concept.instances:
concept.instances.append(((eventuality.eid, eventuality.pattern, score)))
if concept.cid not in cid2score:
cid2score[concept.cid] = 0.0
cid2score[concept.cid] += score * eventuality.frequency
concept_instance_pairs.append((concept, eventuality, score))
return cid2concept, concept_instance_pairs, cid2score
def build_concept_relations(concept_conn, relations):
""" Build relations between conceptualized eventualities from the given relations between eventualities
:param concept_conn: ASER concept KG connection
:type concept_conn: aser.database.kg_connection.ASERConceptConnection
:param relations: relations between eventualities
:type relations: List[aser.relation.Relations]
:return: a dictionary from rid to relations between conceptualized eventualities
:rtype: Dict[str, aser.relation.Relation]
"""
rid2relation = dict()
hid2related_events = defaultdict(list)
for relation in tqdm(relations):
hid2related_events[relation.hid].append((relation.tid, relation))
for h_cid in tqdm(concept_conn.cids):
instances = concept_conn.get_eventualities_given_concept(h_cid)
for h_eid, pattern, instance_score in instances:
# eid -> event -> related eids -> related events, relations -> related concepts, relations
related_events = hid2related_events[h_eid]
for t_eid, relation in related_events:
concept_score_pairs = concept_conn.get_concepts_given_eventuality(t_eid)
for t_concept, score in concept_score_pairs:
t_cid = t_concept.cid
if h_cid == t_cid:
continue
rid = Relation.generate_rid(h_cid, t_cid)
if rid not in rid2relation:
rid2relation[rid] = Relation(h_cid, t_cid)
rid2relation[rid].update({k: v * instance_score * score for k, v in relation.relations.items()})
return rid2relation
| [
"tqdm.tqdm",
"collections.defaultdict",
"copy.deepcopy"
] | [((907, 926), 'tqdm.tqdm', 'tqdm', (['eventualities'], {}), '(eventualities)\n', (911, 926), False, 'from tqdm import tqdm\n'), ((2252, 2269), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2263, 2269), False, 'from collections import defaultdict\n'), ((2290, 2305), 'tqdm.tqdm', 'tqdm', (['relations'], {}), '(relations)\n', (2294, 2305), False, 'from tqdm import tqdm\n'), ((2399, 2422), 'tqdm.tqdm', 'tqdm', (['concept_conn.cids'], {}), '(concept_conn.cids)\n', (2403, 2422), False, 'from tqdm import tqdm\n'), ((1122, 1139), 'copy.deepcopy', 'deepcopy', (['concept'], {}), '(concept)\n', (1130, 1139), False, 'from copy import copy, deepcopy\n')] |
from abc import ABCMeta, abstractmethod
from django_tooling.exceptions import ValidationError
class FailedValidation():
def __init__(self, code, details, msg):
self.code = code
self.details = details
self.msg = msg
if msg and details:
self.msg = msg.format(**details)
class Validation(metaclass=ABCMeta):
"""
Base class for all validations.
The registered key is the app name plus the snake_case version of the class name.
NameTooLong in secretobject will be available as secretobject_name_too_long
"""
def __init__(self, fieldName=None):
self.__fieldName = fieldName
self.__failedValidations = list()
@abstractmethod
def _validate(self):
pass
def validate(self, raiseError=True):
self._validate()
if self.__failedValidations and raiseError:
raise ValidationError([failedValidation.msg for failedValidation in self.__failedValidations], self.__fieldName)
def _addFailure(self, code, details=None, msg=None):
self.__failedValidations.append(FailedValidation(code, details, msg))
def getFailedValidations(self):
return self.__failedValidations
| [
"django_tooling.exceptions.ValidationError"
] | [((892, 1003), 'django_tooling.exceptions.ValidationError', 'ValidationError', (['[failedValidation.msg for failedValidation in self.__failedValidations]', 'self.__fieldName'], {}), '([failedValidation.msg for failedValidation in self.\n __failedValidations], self.__fieldName)\n', (907, 1003), False, 'from django_tooling.exceptions import ValidationError\n')] |
# Generated by Django 2.2.6 on 2019-10-29 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registry', '0005_auto_20191029_2312'),
]
operations = [
migrations.AlterField(
model_name='primary',
name='recommendations',
field=models.ManyToManyField(blank=True, to='registry.Sport_type', verbose_name='Рекомендации'),
),
]
| [
"django.db.models.ManyToManyField"
] | [((347, 441), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""registry.Sport_type"""', 'verbose_name': '"""Рекомендации"""'}), "(blank=True, to='registry.Sport_type', verbose_name=\n 'Рекомендации')\n", (369, 441), False, 'from django.db import migrations, models\n')] |
from fastapi.testclient import TestClient
import pytest
from main import app
client = TestClient(app)
def get_response_for_test(path):
response = client.get(path)
return response.json(), response.status_code
def post_response_for_test(path, input_json):
response = client.post(path, json=input_json)
return response.json(), response.status_code
def test_read_main():
response = client.get("/")
assert response.status_code == 200
@pytest.mark.parametrize(('category', 'status', 'response_json'), [
("symbols", 200, {"Properties":[{"Parameter":"frequency","Property":"ω"},
{"Parameter":"offset","Property":"σ"},{"Parameter":"phi","Property":"φ"}]}),
("groups", 200, {"Properties":[{"Parameter":"frequency","Property":"System parameters"},
{"Parameter":"offset","Property":"System parameters"},
{"Parameter":"r","Property":"System parameters"},
{"Parameter":"phi","Property":"Initial impact"},
{"Parameter":"v","Property":"Initial impact"},
{"Parameter":"max_periods","Property":"Control parameters"},
{"Parameter":"num_iterations","Property":"Control parameters"},
{"Parameter":"num_points","Property":"Control parameters"}]}),
])
def test_read_parameter_info(category, status, response_json):
json, actual_status = get_response_for_test(f"/api/parameter-info/{category}")
assert actual_status == status
assert "Properties" in json
body = json["Properties"]
for element in response_json["Properties"]:
assert element in body, f"Element {element} not found in response "
for element in body:
assert element in response_json["Properties"], f"Unexpected element {element} found in response "
@pytest.mark.parametrize(('category', 'status', 'response_json'), [("garbage", 404, {"detail": "Parameter info category not found"})
])
def test_read_parameter_info_bad_path(category, status, response_json):
json, actual_status = get_response_for_test(f"/api/parameter-info/{category}")
assert actual_status == status
assert json == response_json
def test_get_impact_iteration():
input_json = {"frequency": 2.0,
"offset": 0.0,
"r": 0.8,
"max_periods": 100,
"phi": 0.0,
"v": 0.0,
"num_iterations": 2}
json, actual_status = post_response_for_test(f"/api/iteration/data", input_json)
assert actual_status == 200, f"{json}"
assert json
# assert len(json) == 2 | [
"fastapi.testclient.TestClient",
"pytest.mark.parametrize"
] | [((88, 103), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (98, 103), False, 'from fastapi.testclient import TestClient\n'), ((466, 1239), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('category', 'status', 'response_json')", "[('symbols', 200, {'Properties': [{'Parameter': 'frequency', 'Property':\n 'ω'}, {'Parameter': 'offset', 'Property': 'σ'}, {'Parameter': 'phi',\n 'Property': 'φ'}]}), ('groups', 200, {'Properties': [{'Parameter':\n 'frequency', 'Property': 'System parameters'}, {'Parameter': 'offset',\n 'Property': 'System parameters'}, {'Parameter': 'r', 'Property':\n 'System parameters'}, {'Parameter': 'phi', 'Property': 'Initial impact'\n }, {'Parameter': 'v', 'Property': 'Initial impact'}, {'Parameter':\n 'max_periods', 'Property': 'Control parameters'}, {'Parameter':\n 'num_iterations', 'Property': 'Control parameters'}, {'Parameter':\n 'num_points', 'Property': 'Control parameters'}]})]"], {}), "(('category', 'status', 'response_json'), [(\n 'symbols', 200, {'Properties': [{'Parameter': 'frequency', 'Property':\n 'ω'}, {'Parameter': 'offset', 'Property': 'σ'}, {'Parameter': 'phi',\n 'Property': 'φ'}]}), ('groups', 200, {'Properties': [{'Parameter':\n 'frequency', 'Property': 'System parameters'}, {'Parameter': 'offset',\n 'Property': 'System parameters'}, {'Parameter': 'r', 'Property':\n 'System parameters'}, {'Parameter': 'phi', 'Property': 'Initial impact'\n }, {'Parameter': 'v', 'Property': 'Initial impact'}, {'Parameter':\n 'max_periods', 'Property': 'Control parameters'}, {'Parameter':\n 'num_iterations', 'Property': 'Control parameters'}, {'Parameter':\n 'num_points', 'Property': 'Control parameters'}]})])\n", (489, 1239), False, 'import pytest\n'), ((1723, 1861), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('category', 'status', 'response_json')", "[('garbage', 404, {'detail': 'Parameter info category not found'})]"], {}), "(('category', 'status', 'response_json'), [(\n 'garbage', 404, {'detail': 'Parameter info category not found'})])\n", (1746, 1861), False, 'import pytest\n')] |
import argparse
import sys
import optax
import torch
import numpy as np
import time
import jax
import jax.numpy as jnp
import matplotlib as mp
import haiku as hk
import dill as pickle
try:
mp.use("Qt5Agg")
mp.rc('text', usetex=True)
mp.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as cm
except ImportError:
pass
import deep_lagrangian_networks.jax_HNN_model as hnn
import deep_lagrangian_networks.jax_DeLaN_model as delan
import deep_lagrangian_networks.jax_Black_Box_model as black_box
from deep_lagrangian_networks.utils import load_dataset, init_env, activations
from deep_lagrangian_networks.jax_integrator import symplectic_euler, explicit_euler, runge_kutta_4
def running_mean(x, n):
cumsum = np.cumsum(np.concatenate([x[0] * np.ones((n,)), x]))
return (cumsum[n:] - cumsum[:-n]) / n
if __name__ == "__main__":
n_plot = 5
dataset = "uniform"
model_id = ["structured", "black_box", "structured", "black_box", "black_box"]
module_key = ["DeLaN", "DeLaN", "HNN", "HNN", "Network"]
colors = {
"DeLaN structured": cm.get_cmap(cm.Set1)(0),
"DeLaN black_box": cm.get_cmap(cm.Set1)(1),
"HNN structured": cm.get_cmap(cm.Set1)(2),
"HNN black_box": cm.get_cmap(cm.Set1)(3),
"Network black_box": cm.get_cmap(cm.Set1)(4),
}
results = {}
for i in range(n_plot):
with open(f"data/results/{module_key[i]}_{model_id[i]}_{dataset}.pickle", "rb") as file:
results[module_key[i] + " " + model_id[i]] = pickle.load(file)
if dataset == "char":
train_data, test_data, divider, dt = load_dataset(
filename="data/character_data.pickle",
test_label=["e", "q", "v"])
elif dataset == "uniform":
train_data, test_data, divider, dt = load_dataset(
filename="data/uniform_data.pickle",
test_label=["Test 0", "Test 1", "Test 2"])
else:
raise ValueError
vpt_th = 1.e-2
for i in range(n_plot):
key = f"{module_key[i]} {model_id[i]}"
n_seeds = results[key]['forward_model']['q_error'].shape[0]
xd_error = np.mean(results[key]['forward_model']['xd_error']), 2. * np.std(results[key]['forward_model']['xd_error'])
n_test = 2
vpt = np.zeros((0, n_test))
for i in range(n_seeds):
vpt_i = []
for j in range(n_test):
traj = np.concatenate([
results[key]['forward_model']['q_error'][i, divider[j]:divider[j+1]],
results[key]['forward_model']['q_error'][i, -1:] * 0.0 + 1.])
vpt_i = vpt_i + [np.argwhere(traj >= vpt_th)[0, 0]]
vpt = np.concatenate([vpt, np.array([vpt_i])])
vpt = np.mean(vpt), np.std(vpt)
unit = r"\text{s}"
string = f"${xd_error[0]:.1e}{'}'} \pm {xd_error[1]:.1e}{'}'}$ & ${vpt[0]*dt:.2f}{unit} \pm {vpt[1]*dt:.2f}{unit}$ \\\\".replace("e-", r"\mathrm{e}{-").replace("e+", r"\mathrm{e}{+")
print(f"{key:20} - " + string)
test_labels, test_qp, test_qv, test_qa, test_p, test_pd, test_tau, test_m, test_c, test_g = test_data
tau_g, tau_c, tau_m, tau = jnp.array(test_g), jnp.array(test_c), jnp.array(test_m), jnp.array(test_tau)
q, qd, qdd = jnp.array(test_qp), jnp.array(test_qv), jnp.array(test_qa)
p, pd = jnp.array(test_p), jnp.array(test_pd)
dHdt = jax.vmap(jnp.dot, [0, 0])(qd, tau)
H = jnp.concatenate([dt * jnp.cumsum(dHdt[divider[i]: divider[i+1]]) for i in range(3)])
def smoothing(x):
return np.concatenate([running_mean(x[divider[i]:divider[i + 1]], 10) for i in range(3)])
print("\n################################################")
print("Plotting Performance:")
# Alpha of the graphs:
plot_alpha = 0.8
y_offset = -0.15
n_test = 2
# Plot the performance:
q_low = np.clip(1.5 * np.min(np.array(q), axis=0), -np.inf, -0.01)
q_max = np.clip(1.5 * np.max(np.array(q), axis=0), 0.01, np.inf)
if dataset == "char":
q_max = np.array([0.25, 3.])
q_low = np.array([-1.25, 1.])
qd_low = np.clip(1.5 * np.min(qd, axis=0), -np.inf, -0.01)
qd_max = np.clip(1.5 * np.max(qd, axis=0), 0.01, np.inf)
p_low = np.clip(1.2 * np.min(p, axis=0), -np.inf, -0.01)
p_max = np.clip(1.2 * np.max(p, axis=0), 0.01, np.inf)
H_lim = [-0.01, +0.01] if dataset == "uniform" else [-2.75, +2.75]
err_min, err_max = 1.e-5, 1.e3
plt.rc('text', usetex=True)
color_i = ["r", "b", "g", "k"]
ticks = np.array(divider)
ticks = (ticks[:-1] + ticks[1:]) / 2
fig = plt.figure(figsize=(24.0 / 1.54, 8.0 / 1.54), dpi=100)
fig.subplots_adjust(left=0.06, bottom=0.12, right=0.98, top=0.95, wspace=0.24, hspace=0.2)
fig.canvas.set_window_title('')
legend = [
mp.patches.Patch(color=colors["DeLaN structured"], label="DeLaN - Structured Lagrangian"),
mp.patches.Patch(color=colors["DeLaN black_box"], label="DeLaN - Black-Box Lagrangian"),
mp.patches.Patch(color=colors["HNN structured"], label="HNN - Structured Hamiltonian"),
mp.patches.Patch(color=colors["HNN black_box"], label="HNN - Black-Box Hamiltonian"),
mp.patches.Patch(color=colors["Network black_box"], label="Feed-Forward Network"),
mp.patches.Patch(color="k", label="Ground Truth")]
ax0 = fig.add_subplot(3, 4, 1)
ax0.set_title(r"Generalized Position $\mathbf{q}$")
ax0.text(s=r"\textbf{Joint 0}", x=-0.25, y=.5, fontsize=12, fontweight="bold", rotation=90,
horizontalalignment="center", verticalalignment="center", transform=ax0.transAxes)
ax0.set_ylabel(r"$\mathbf{q}_0$ [Rad]")
ax0.get_yaxis().set_label_coords(-0.2, 0.5)
ax0.set_ylim(q_low[0], q_max[0])
ax0.set_xticks(ticks)
ax0.set_xticklabels(test_labels)
[ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax0.set_xlim(divider[0], divider[n_test])
ax0.yaxis.set_label_coords(y_offset, 0.5)
ax1 = fig.add_subplot(3, 4, 5)
ax1.text(s=r"\textbf{Joint 1}", x=-.25, y=0.5, fontsize=12, fontweight="bold", rotation=90,
horizontalalignment="center", verticalalignment="center", transform=ax1.transAxes)
ax1.set_ylabel(r"$\mathbf{q}_1$ [Rad]")
ax1.get_yaxis().set_label_coords(-0.2, 0.5)
ax1.set_ylim(q_low[1], q_max[1])
ax1.set_xticks(ticks)
ax1.set_xticklabels(test_labels)
[ax1.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax1.set_xlim(divider[0], divider[n_test])
ax1.yaxis.set_label_coords(y_offset, 0.5)
ax2 = fig.add_subplot(3, 4, 9)
ax2.text(s=r"\textbf{Error}", x=-.25, y=0.5, fontsize=12, fontweight="bold", rotation=90,
horizontalalignment="center", verticalalignment="center", transform=ax2.transAxes)
ax2.text(s=r"\textbf{(a)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center",
verticalalignment="center", transform=ax2.transAxes)
ax2.get_yaxis().set_label_coords(-0.2, 0.5)
ax2.set_xticks(ticks)
ax2.set_xticklabels(test_labels)
[ax2.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax2.set_xlim(divider[0], divider[n_test])
ax2.set_ylim(err_min, err_max)
ax2.set_yscale('log')
ax2.set_ylabel(r"Position Error")
ax2.yaxis.set_label_coords(y_offset, 0.5)
ax2.axhline(vpt_th, color="k", linestyle="--")
# Plot Ground Truth Torque:
ax0.plot(q[:, 0], color="k")
ax1.plot(q[:, 1], color="k")
# Plot DeLaN Torque:
for key in results.keys():
color = colors[key]
q_pred = results[key]["forward_model"]["q_pred"]
q_error = results[key]["forward_model"]["q_error"]
q_pred_min, q_pred_mean, q_pred_max = np.min(q_pred, axis=0), np.median(q_pred, axis=0), np.max(q_pred, axis=0)
q_error_min, q_error_mean, q_error_max = np.min(q_error, axis=0), np.median(q_error, axis=0), np.max(q_error, axis=0)
q_error_min = smoothing(q_error_min)
q_error_mean = smoothing(q_error_mean)
q_error_max = smoothing(q_error_max)
x = np.arange(q_pred_max.shape[0])
ax0.plot(q_pred_mean[:, 0], color=color, alpha=plot_alpha)
ax0.fill_between(x, q_pred_min[:, 0], q_pred_max[:, 0], color=color, alpha=plot_alpha/8.)
ax1.plot(q_pred_mean[:, 1], color=color, alpha=plot_alpha)
ax1.fill_between(x, q_pred_min[:, 1], q_pred_max[:, 1], color=color, alpha=plot_alpha/8.)
ax2.plot(q_error_mean, color=color, alpha=plot_alpha)
ax2.fill_between(x, q_error_min, q_error_max, color=color, alpha=plot_alpha/8.)
# Plot Mass Torque
ax0 = fig.add_subplot(3, 4, 2)
ax0.set_title(r"Generalized Velocity $\dot{\mathbf{q}}$")
ax0.set_ylabel(r"$\dot{\mathbf{q}}_0$ [Rad/s]")
ax0.set_ylim(qd_low[0], qd_max[0])
ax0.set_xticks(ticks)
ax0.set_xticklabels(test_labels)
[ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax0.set_xlim(divider[0], divider[n_test])
ax0.yaxis.set_label_coords(y_offset, 0.5)
ax1 = fig.add_subplot(3, 4, 6)
ax1.set_ylabel(r"$\dot{\mathbf{q}}_{1}$ [Rad/s]")
ax1.set_ylim(qd_low[1], qd_max[1])
ax1.set_xticks(ticks)
ax1.set_xticklabels(test_labels)
[ax1.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax1.set_xlim(divider[0], divider[n_test])
ax1.yaxis.set_label_coords(y_offset, 0.5)
ax2 = fig.add_subplot(3, 4, 10)
ax2.text(s=r"\textbf{(b)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center",
verticalalignment="center", transform=ax2.transAxes)
ax2.get_yaxis().set_label_coords(-0.2, 0.5)
ax2.set_xticks(ticks)
ax2.set_xticklabels(test_labels)
[ax2.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax2.set_xlim(divider[0], divider[n_test])
ax2.set_ylim(err_min, err_max)
ax2.set_yscale('log')
ax2.set_ylabel(r"Velocity Error")
ax2.yaxis.set_label_coords(y_offset, 0.5)
# Plot Ground Truth Inertial Torque:
ax0.plot(qd[:, 0], color="k")
ax1.plot(qd[:, 1], color="k")
# Plot DeLaN Inertial Torque:
for key in results.keys():
color = colors[key]
qd_pred = results[key]["forward_model"]["qd_pred"]
qd_error = results[key]["forward_model"]["qd_error"]
qd_pred_min, qd_pred_mean, qd_pred_max = np.min(qd_pred, axis=0), np.median(qd_pred, axis=0), np.max(qd_pred, axis=0)
qd_error_min, qd_error_mean, qd_error_max = np.min(qd_error, axis=0), np.median(qd_error, axis=0), np.max(qd_error, axis=0)
x = np.arange(qd_pred_max.shape[0])
qd_error_min = smoothing(qd_error_min)
qd_error_mean = smoothing(qd_error_mean)
qd_error_max = smoothing(qd_error_max)
ax0.plot(qd_pred_mean[:, 0], color=color, alpha=plot_alpha)
ax0.fill_between(x, qd_pred_min[:, 0], qd_pred_max[:, 0], color=color, alpha=plot_alpha/8.)
ax1.plot(qd_pred_mean[:, 1], color=color, alpha=plot_alpha)
ax1.fill_between(x, qd_pred_min[:, 1], qd_pred_max[:, 1], color=color, alpha=plot_alpha/8.)
ax2.plot(qd_error_mean, color=color, alpha=plot_alpha)
ax2.fill_between(x, qd_error_min, qd_error_max, color=color, alpha=plot_alpha/8.)
# Plot Coriolis Torque
ax0 = fig.add_subplot(3, 4, 3)
ax0.set_title(r"Generalized Momentum $\mathbf{p}$")
ax0.set_ylabel(r"$\mathbf{p}_0$")
ax0.set_ylim(p_low[0], p_max[0])
ax0.set_xticks(ticks)
ax0.set_xticklabels(test_labels)
[ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax0.set_xlim(divider[0], divider[n_test])
ax0.yaxis.set_label_coords(y_offset, 0.5)
ax1 = fig.add_subplot(3, 4, 7)
ax1.set_ylabel(r"$\mathbf{p}_1$")
ax1.set_ylim(p_low[1], p_max[1])
ax1.set_xticks(ticks)
ax1.set_xticklabels(test_labels)
[ax1.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax1.set_xlim(divider[0], divider[n_test])
ax1.yaxis.set_label_coords(y_offset, 0.5)
ax2 = fig.add_subplot(3, 4, 11)
ax2.text(s=r"\textbf{(c)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center",
verticalalignment="center", transform=ax2.transAxes)
ax2.get_yaxis().set_label_coords(-0.2, 0.5)
ax2.set_xticks(ticks)
ax2.set_xticklabels(test_labels)
[ax2.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax2.set_xlim(divider[0], divider[n_test])
ax2.set_ylim(err_min, err_max)
ax2.set_yscale('log')
ax2.set_ylabel(r"Impulse Error")
ax2.yaxis.set_label_coords(y_offset, 0.5)
# Plot Ground Truth Coriolis & Centrifugal Torque:
ax0.plot(p[:, 0], color="k")
ax1.plot(p[:, 1], color="k")
for key in results.keys():
color = colors[key]
p_pred = results[key]["forward_model"]["p_pred"]
p_error = results[key]["forward_model"]["p_error"]
p_pred_min, p_pred_mean, p_pred_max = np.min(p_pred, axis=0), np.median(p_pred, axis=0), np.max(p_pred, axis=0)
p_error_min, p_error_mean, p_error_max = np.min(p_error, axis=0), np.median(p_error, axis=0), np.max(p_error, axis=0)
x = np.arange(p_pred_max.shape[0])
p_error_min = smoothing(p_error_min)
p_error_mean = smoothing(p_error_mean)
p_error_max = smoothing(p_error_max)
ax0.plot(p_pred_mean[:, 0], color=color, alpha=plot_alpha)
ax0.fill_between(x, p_pred_min[:, 0], p_pred_max[:, 0], color=color, alpha=plot_alpha/8.)
ax1.plot(p_pred_mean[:, 1], color=color, alpha=plot_alpha)
ax1.fill_between(x, p_pred_min[:, 1], p_pred_max[:, 1], color=color, alpha=plot_alpha/8.)
ax2.plot(p_error_mean, color=color, alpha=plot_alpha)
ax2.fill_between(x, p_error_min, p_error_max, color=color, alpha=plot_alpha/8.)
# Plot Gravity
ax0 = fig.add_subplot(3, 4, 4)
ax0.set_title(r"Normalized Energy $\mathcal{H}$")
ax0.set_ylabel("$\mathcal{H}$")
ax0.yaxis.set_label_coords(y_offset, 0.5)
ax0.set_ylim(H_lim[0], H_lim[1])
ax0.set_xticks(ticks)
ax0.set_xticklabels(test_labels)
[ax0.axvline(divider[i], linestyle='--', linewidth=1.0, alpha=1., color="k") for i in range(len(divider))]
ax0.set_xlim(divider[0], divider[n_test])
ax0.plot(H[:], color="k")
for key in results.keys():
if key == "Network black_box":
continue
color = colors[key]
H_pred = results[key]["forward_model"]["H_pred"]
H_pred_min, H_pred_mean, H_pred_max = np.min(H_pred, axis=0), np.median(H_pred, axis=0), np.max(H_pred, axis=0)
x = np.arange(H_pred_max.shape[0])
ax0.plot(H_pred_mean[:], color=color, alpha=plot_alpha)
ax0.fill_between(x, H_pred_min[:], H_pred_max[:], color=color, alpha=plot_alpha/8.)
ax2 = fig.add_subplot(3, 4, 12)
ax2.text(s=r"\textbf{(d)}", x=.5, y=-0.35, fontsize=12, fontweight="bold", horizontalalignment="center",
verticalalignment="center", transform=ax2.transAxes)
ax2.set_frame_on(False)
ax2.set_xticks([])
ax2.set_yticks([])
ax2.legend(handles=legend, bbox_to_anchor=(-0.0375, 2.1), loc='upper left', ncol=1, framealpha=0., labelspacing=1.0)
# fig.savefig(f"figures/forward_model_{module_key}_{model_id}_Performance.pdf", format="pdf")
# fig.savefig(f"figures/forward_model_{module_key}_{model_id}_Performance.png", format="png")
print("\n################################################\n\n\n")
plt.show()
| [
"deep_lagrangian_networks.utils.load_dataset",
"numpy.array",
"matplotlib.rc",
"numpy.arange",
"dill.load",
"numpy.mean",
"numpy.max",
"numpy.concatenate",
"numpy.min",
"matplotlib.cm.get_cmap",
"numpy.ones",
"matplotlib.use",
"jax.numpy.cumsum",
"matplotlib.patches.Patch",
"numpy.std",
... | [((194, 210), 'matplotlib.use', 'mp.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (200, 210), True, 'import matplotlib as mp\n'), ((215, 241), 'matplotlib.rc', 'mp.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (220, 241), True, 'import matplotlib as mp\n'), ((4554, 4581), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (4560, 4581), True, 'import matplotlib.pyplot as plt\n'), ((4630, 4647), 'numpy.array', 'np.array', (['divider'], {}), '(divider)\n', (4638, 4647), True, 'import numpy as np\n'), ((4700, 4754), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24.0 / 1.54, 8.0 / 1.54)', 'dpi': '(100)'}), '(figsize=(24.0 / 1.54, 8.0 / 1.54), dpi=100)\n', (4710, 4754), True, 'import matplotlib.pyplot as plt\n'), ((15912, 15922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15920, 15922), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1797), 'deep_lagrangian_networks.utils.load_dataset', 'load_dataset', ([], {'filename': '"""data/character_data.pickle"""', 'test_label': "['e', 'q', 'v']"}), "(filename='data/character_data.pickle', test_label=['e', 'q', 'v'])\n", (1730, 1797), False, 'from deep_lagrangian_networks.utils import load_dataset, init_env, activations\n'), ((2377, 2398), 'numpy.zeros', 'np.zeros', (['(0, n_test)'], {}), '((0, n_test))\n', (2385, 2398), True, 'import numpy as np\n'), ((3277, 3294), 'jax.numpy.array', 'jnp.array', (['test_g'], {}), '(test_g)\n', (3286, 3294), True, 'import jax.numpy as jnp\n'), ((3296, 3313), 'jax.numpy.array', 'jnp.array', (['test_c'], {}), '(test_c)\n', (3305, 3313), True, 'import jax.numpy as jnp\n'), ((3315, 3332), 'jax.numpy.array', 'jnp.array', (['test_m'], {}), '(test_m)\n', (3324, 3332), True, 'import jax.numpy as jnp\n'), ((3334, 3353), 'jax.numpy.array', 'jnp.array', (['test_tau'], {}), '(test_tau)\n', (3343, 3353), True, 'import jax.numpy as jnp\n'), ((3371, 3389), 'jax.numpy.array', 'jnp.array', (['test_qp'], {}), '(test_qp)\n', (3380, 3389), True, 'import jax.numpy as jnp\n'), ((3391, 3409), 'jax.numpy.array', 'jnp.array', (['test_qv'], {}), '(test_qv)\n', (3400, 3409), True, 'import jax.numpy as jnp\n'), ((3411, 3429), 'jax.numpy.array', 'jnp.array', (['test_qa'], {}), '(test_qa)\n', (3420, 3429), True, 'import jax.numpy as jnp\n'), ((3442, 3459), 'jax.numpy.array', 'jnp.array', (['test_p'], {}), '(test_p)\n', (3451, 3459), True, 'import jax.numpy as jnp\n'), ((3461, 3479), 'jax.numpy.array', 'jnp.array', (['test_pd'], {}), '(test_pd)\n', (3470, 3479), True, 'import jax.numpy as jnp\n'), ((3491, 3516), 'jax.vmap', 'jax.vmap', (['jnp.dot', '[0, 0]'], {}), '(jnp.dot, [0, 0])\n', (3499, 3516), False, 'import jax\n'), ((4137, 4158), 'numpy.array', 'np.array', (['[0.25, 3.0]'], {}), '([0.25, 3.0])\n', (4145, 4158), True, 'import numpy as np\n'), ((4174, 4196), 'numpy.array', 'np.array', (['[-1.25, 1.0]'], {}), '([-1.25, 1.0])\n', (4182, 4196), True, 'import numpy as np\n'), ((4910, 5004), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['DeLaN structured']", 'label': '"""DeLaN - Structured Lagrangian"""'}), "(color=colors['DeLaN structured'], label=\n 'DeLaN - Structured Lagrangian')\n", (4926, 5004), True, 'import matplotlib as mp\n'), ((5009, 5101), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['DeLaN black_box']", 'label': '"""DeLaN - Black-Box Lagrangian"""'}), "(color=colors['DeLaN black_box'], label=\n 'DeLaN - Black-Box Lagrangian')\n", (5025, 5101), True, 'import matplotlib as mp\n'), ((5106, 5197), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['HNN structured']", 'label': '"""HNN - Structured Hamiltonian"""'}), "(color=colors['HNN structured'], label=\n 'HNN - Structured Hamiltonian')\n", (5122, 5197), True, 'import matplotlib as mp\n'), ((5202, 5291), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['HNN black_box']", 'label': '"""HNN - Black-Box Hamiltonian"""'}), "(color=colors['HNN black_box'], label=\n 'HNN - Black-Box Hamiltonian')\n", (5218, 5291), True, 'import matplotlib as mp\n'), ((5296, 5382), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': "colors['Network black_box']", 'label': '"""Feed-Forward Network"""'}), "(color=colors['Network black_box'], label=\n 'Feed-Forward Network')\n", (5312, 5382), True, 'import matplotlib as mp\n'), ((5387, 5436), 'matplotlib.patches.Patch', 'mp.patches.Patch', ([], {'color': '"""k"""', 'label': '"""Ground Truth"""'}), "(color='k', label='Ground Truth')\n", (5403, 5436), True, 'import matplotlib as mp\n'), ((8307, 8337), 'numpy.arange', 'np.arange', (['q_pred_max.shape[0]'], {}), '(q_pred_max.shape[0])\n', (8316, 8337), True, 'import numpy as np\n'), ((10915, 10946), 'numpy.arange', 'np.arange', (['qd_pred_max.shape[0]'], {}), '(qd_pred_max.shape[0])\n', (10924, 10946), True, 'import numpy as np\n'), ((13602, 13632), 'numpy.arange', 'np.arange', (['p_pred_max.shape[0]'], {}), '(p_pred_max.shape[0])\n', (13611, 13632), True, 'import numpy as np\n'), ((15045, 15075), 'numpy.arange', 'np.arange', (['H_pred_max.shape[0]'], {}), '(H_pred_max.shape[0])\n', (15054, 15075), True, 'import numpy as np\n'), ((1190, 1210), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1201, 1210), True, 'import matplotlib.cm as cm\n'), ((1242, 1262), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1253, 1262), True, 'import matplotlib.cm as cm\n'), ((1293, 1313), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1304, 1313), True, 'import matplotlib.cm as cm\n'), ((1343, 1363), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1354, 1363), True, 'import matplotlib.cm as cm\n'), ((1397, 1417), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cm.Set1'], {}), '(cm.Set1)\n', (1408, 1417), True, 'import matplotlib.cm as cm\n'), ((1628, 1645), 'dill.load', 'pickle.load', (['file'], {}), '(file)\n', (1639, 1645), True, 'import dill as pickle\n'), ((1900, 1996), 'deep_lagrangian_networks.utils.load_dataset', 'load_dataset', ([], {'filename': '"""data/uniform_data.pickle"""', 'test_label': "['Test 0', 'Test 1', 'Test 2']"}), "(filename='data/uniform_data.pickle', test_label=['Test 0',\n 'Test 1', 'Test 2'])\n", (1912, 1996), False, 'from deep_lagrangian_networks.utils import load_dataset, init_env, activations\n'), ((2236, 2286), 'numpy.mean', 'np.mean', (["results[key]['forward_model']['xd_error']"], {}), "(results[key]['forward_model']['xd_error'])\n", (2243, 2286), True, 'import numpy as np\n'), ((2845, 2857), 'numpy.mean', 'np.mean', (['vpt'], {}), '(vpt)\n', (2852, 2857), True, 'import numpy as np\n'), ((2859, 2870), 'numpy.std', 'np.std', (['vpt'], {}), '(vpt)\n', (2865, 2870), True, 'import numpy as np\n'), ((4224, 4242), 'numpy.min', 'np.min', (['qd'], {'axis': '(0)'}), '(qd, axis=0)\n', (4230, 4242), True, 'import numpy as np\n'), ((4287, 4305), 'numpy.max', 'np.max', (['qd'], {'axis': '(0)'}), '(qd, axis=0)\n', (4293, 4305), True, 'import numpy as np\n'), ((4348, 4365), 'numpy.min', 'np.min', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (4354, 4365), True, 'import numpy as np\n'), ((4409, 4426), 'numpy.max', 'np.max', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (4415, 4426), True, 'import numpy as np\n'), ((7956, 7978), 'numpy.min', 'np.min', (['q_pred'], {'axis': '(0)'}), '(q_pred, axis=0)\n', (7962, 7978), True, 'import numpy as np\n'), ((7980, 8005), 'numpy.median', 'np.median', (['q_pred'], {'axis': '(0)'}), '(q_pred, axis=0)\n', (7989, 8005), True, 'import numpy as np\n'), ((8007, 8029), 'numpy.max', 'np.max', (['q_pred'], {'axis': '(0)'}), '(q_pred, axis=0)\n', (8013, 8029), True, 'import numpy as np\n'), ((8079, 8102), 'numpy.min', 'np.min', (['q_error'], {'axis': '(0)'}), '(q_error, axis=0)\n', (8085, 8102), True, 'import numpy as np\n'), ((8104, 8130), 'numpy.median', 'np.median', (['q_error'], {'axis': '(0)'}), '(q_error, axis=0)\n', (8113, 8130), True, 'import numpy as np\n'), ((8132, 8155), 'numpy.max', 'np.max', (['q_error'], {'axis': '(0)'}), '(q_error, axis=0)\n', (8138, 8155), True, 'import numpy as np\n'), ((10694, 10717), 'numpy.min', 'np.min', (['qd_pred'], {'axis': '(0)'}), '(qd_pred, axis=0)\n', (10700, 10717), True, 'import numpy as np\n'), ((10719, 10745), 'numpy.median', 'np.median', (['qd_pred'], {'axis': '(0)'}), '(qd_pred, axis=0)\n', (10728, 10745), True, 'import numpy as np\n'), ((10747, 10770), 'numpy.max', 'np.max', (['qd_pred'], {'axis': '(0)'}), '(qd_pred, axis=0)\n', (10753, 10770), True, 'import numpy as np\n'), ((10823, 10847), 'numpy.min', 'np.min', (['qd_error'], {'axis': '(0)'}), '(qd_error, axis=0)\n', (10829, 10847), True, 'import numpy as np\n'), ((10849, 10876), 'numpy.median', 'np.median', (['qd_error'], {'axis': '(0)'}), '(qd_error, axis=0)\n', (10858, 10876), True, 'import numpy as np\n'), ((10878, 10902), 'numpy.max', 'np.max', (['qd_error'], {'axis': '(0)'}), '(qd_error, axis=0)\n', (10884, 10902), True, 'import numpy as np\n'), ((13390, 13412), 'numpy.min', 'np.min', (['p_pred'], {'axis': '(0)'}), '(p_pred, axis=0)\n', (13396, 13412), True, 'import numpy as np\n'), ((13414, 13439), 'numpy.median', 'np.median', (['p_pred'], {'axis': '(0)'}), '(p_pred, axis=0)\n', (13423, 13439), True, 'import numpy as np\n'), ((13441, 13463), 'numpy.max', 'np.max', (['p_pred'], {'axis': '(0)'}), '(p_pred, axis=0)\n', (13447, 13463), True, 'import numpy as np\n'), ((13513, 13536), 'numpy.min', 'np.min', (['p_error'], {'axis': '(0)'}), '(p_error, axis=0)\n', (13519, 13536), True, 'import numpy as np\n'), ((13538, 13564), 'numpy.median', 'np.median', (['p_error'], {'axis': '(0)'}), '(p_error, axis=0)\n', (13547, 13564), True, 'import numpy as np\n'), ((13566, 13589), 'numpy.max', 'np.max', (['p_error'], {'axis': '(0)'}), '(p_error, axis=0)\n', (13572, 13589), True, 'import numpy as np\n'), ((14959, 14981), 'numpy.min', 'np.min', (['H_pred'], {'axis': '(0)'}), '(H_pred, axis=0)\n', (14965, 14981), True, 'import numpy as np\n'), ((14983, 15008), 'numpy.median', 'np.median', (['H_pred'], {'axis': '(0)'}), '(H_pred, axis=0)\n', (14992, 15008), True, 'import numpy as np\n'), ((15010, 15032), 'numpy.max', 'np.max', (['H_pred'], {'axis': '(0)'}), '(H_pred, axis=0)\n', (15016, 15032), True, 'import numpy as np\n'), ((2293, 2342), 'numpy.std', 'np.std', (["results[key]['forward_model']['xd_error']"], {}), "(results[key]['forward_model']['xd_error'])\n", (2299, 2342), True, 'import numpy as np\n'), ((2514, 2673), 'numpy.concatenate', 'np.concatenate', (["[results[key]['forward_model']['q_error'][i, divider[j]:divider[j + 1]], \n results[key]['forward_model']['q_error'][i, -1:] * 0.0 + 1.0]"], {}), "([results[key]['forward_model']['q_error'][i, divider[j]:\n divider[j + 1]], results[key]['forward_model']['q_error'][i, -1:] * 0.0 +\n 1.0])\n", (2528, 2673), True, 'import numpy as np\n'), ((3556, 3599), 'jax.numpy.cumsum', 'jnp.cumsum', (['dHdt[divider[i]:divider[i + 1]]'], {}), '(dHdt[divider[i]:divider[i + 1]])\n', (3566, 3599), True, 'import jax.numpy as jnp\n'), ((3987, 3998), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (3995, 3998), True, 'import numpy as np\n'), ((4058, 4069), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (4066, 4069), True, 'import numpy as np\n'), ((874, 887), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (881, 887), True, 'import numpy as np\n'), ((2810, 2827), 'numpy.array', 'np.array', (['[vpt_i]'], {}), '([vpt_i])\n', (2818, 2827), True, 'import numpy as np\n'), ((2736, 2763), 'numpy.argwhere', 'np.argwhere', (['(traj >= vpt_th)'], {}), '(traj >= vpt_th)\n', (2747, 2763), True, 'import numpy as np\n')] |
#!flask/bi.python
from app import app
app.run(debug = True)
| [
"app.app.run"
] | [((39, 58), 'app.app.run', 'app.run', ([], {'debug': '(True)'}), '(debug=True)\n', (46, 58), False, 'from app import app\n')] |
#!/usr/bin/env python
import numpy as np
import scipy.sparse
from sklearn import svm
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer
from sklearn.model_selection import cross_val_score
def zero_pivot_columns(matrix, pivots):
matrix_lil = scipy.sparse.lil_matrix(matrix, copy=True)
for pivot in pivots:
matrix_lil[:,pivot] = 0.0
return matrix_lil.tocsr()
def zero_nonpivot_columns(array, pivots):
matrix = np.matrix(array, copy=False)
matrix_return = np.matrix(np.zeros(matrix.shape))
for pivot in pivots:
matrix_return[:,pivot] += matrix[:,pivot]
return scipy.sparse.csr_matrix(matrix_return)
def remove_columns(matrix, indices):
return scipy.sparse.csr_matrix(np.delete(matrix, indices, 1))
def evaluate_and_print_scores(X_train, y_train, X_test, y_test, score_label, C, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True):
preds = get_preds(X_train, y_train, X_test, C, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True)
r = recall_score(y_test, preds, pos_label=score_label)
p = precision_score(y_test, preds, pos_label=score_label)
f1 = f1_score(y_test, preds, pos_label=score_label)
acc = accuracy_score(y_test, preds)
print("Gold has %d instances of target class" % (len(np.where(y_test == score_label)[0])))
print("System predicted %d instances of target class" % (len(np.where(preds == score_label)[0])))
print("Accuracy is %f, p/r/f1 score is %f %f %f\n" % (acc, p, r, f1))
def get_preds(X_train, y_train, X_test, C=1.0, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True):
svc = svm.LinearSVC(C=C, penalty=penalty, loss=loss, dual=dual)
svc.fit(X_train, y_train, sample_weight=sample_weight)
preds = svc.predict(X_test)
return preds
def get_decisions(X_train, y_train, X_test, C=1.0, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True):
svc = svm.LinearSVC(C=C, penalty=penalty, loss=loss, dual=dual)
svc.fit(X_train, y_train, sample_weight=sample_weight)
preds = svc.decision_function(X_test)
return preds
def get_f1(X_train, y_train, X_test, y_test, score_label, C=1.0, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True):
preds = get_preds(X_train, y_train, X_test, C=C, sample_weight=None, penalty='l2', loss='squared_hinge', dual=True)
f1 = f1_score(y_test, preds, pos_label=score_label)
return f1
def read_pivots(pivot_file):
pivots = {}
f = open(pivot_file, 'r')
for line in f:
line.rstrip()
pivot = int(line)
pivots[pivot] = 1
## Before we zero out the nopivot, copy it to the pivot
f.close()
return pivots
def align_test_X_train(X_train, X_test):
num_instances, num_feats = X_train.shape
num_test_instances, num_test_feats = X_test.shape
if num_test_feats < num_feats:
## Expand X_test
#print("Not sure I need to do anything here.")
X_test_array = X_test.toarray()
X_test = scipy.sparse.csr_matrix(np.append(X_test_array, np.zeros((num_test_instances, num_feats-num_test_feats)), axis=1))
elif num_test_feats > num_feats:
## Truncate X_test
X_test = X_test[:,:num_feats]
return X_test
def find_best_c(X_train, y_train, C_list = [0.01, 0.1, 1.0, 10.0], penalty='l2', dual=True, scorer=f1_score, **scorer_args):
scorer = make_scorer(scorer, **scorer_args)
best_score = 0
best_c = 0
for C in C_list:
score = np.average(cross_val_score(svm.LinearSVC(C=C, penalty=penalty, dual=dual), X_train, y_train, scoring=scorer, n_jobs=1))
if score > best_score:
best_score = score
best_c = C
return best_c, best_score
def read_feature_groups(groups_file, offset=0):
## The feature groups file unfortunately has to be adjusted here. The
## files written by cleartk are 1-indexed, but the reader that reads them
## in "helpfully" adjusts all the indices. So when we read them in we
## decrement them all.
map = {}
with open(groups_file, 'r') as f:
for line in f:
domain, indices = line.split(' : ')
map[domain] = [int(f)+offset for f in indices.split(',')]
return map
def read_feature_lookup(lookup_file, offset=0):
## The feature groups file unfortunately has to be adjusted here. The
## files written by cleartk are 1-indexed, but the reader that reads them
## in "helpfully" adjusts all the indices. So when we read them in we
## decrement them all.
map = {}
with open(lookup_file, 'r', encoding='utf-8') as f:
for line in f:
name, ind = line.rstrip().split(' : ')
map[int(ind)+offset] = name
## The first feature in our data is the bias feature, always set to 1:
list = ['Bias']
for i in sorted(map.keys()):
list.append(map[i])
return list
| [
"sklearn.metrics.f1_score",
"numpy.where",
"numpy.delete",
"sklearn.svm.LinearSVC",
"sklearn.metrics.make_scorer",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.zeros",
"numpy.matrix",
"sklearn.metrics.accuracy_score"
] | [((481, 509), 'numpy.matrix', 'np.matrix', (['array'], {'copy': '(False)'}), '(array, copy=False)\n', (490, 509), True, 'import numpy as np\n'), ((1070, 1120), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'preds'], {'pos_label': 'score_label'}), '(y_test, preds, pos_label=score_label)\n', (1082, 1120), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((1129, 1182), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'preds'], {'pos_label': 'score_label'}), '(y_test, preds, pos_label=score_label)\n', (1144, 1182), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((1192, 1238), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'preds'], {'pos_label': 'score_label'}), '(y_test, preds, pos_label=score_label)\n', (1200, 1238), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((1249, 1278), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (1263, 1278), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((1676, 1733), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': 'C', 'penalty': 'penalty', 'loss': 'loss', 'dual': 'dual'}), '(C=C, penalty=penalty, loss=loss, dual=dual)\n', (1689, 1733), False, 'from sklearn import svm\n'), ((1972, 2029), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': 'C', 'penalty': 'penalty', 'loss': 'loss', 'dual': 'dual'}), '(C=C, penalty=penalty, loss=loss, dual=dual)\n', (1985, 2029), False, 'from sklearn import svm\n'), ((2411, 2457), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'preds'], {'pos_label': 'score_label'}), '(y_test, preds, pos_label=score_label)\n', (2419, 2457), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((3426, 3460), 'sklearn.metrics.make_scorer', 'make_scorer', (['scorer'], {}), '(scorer, **scorer_args)\n', (3437, 3460), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, make_scorer\n'), ((540, 562), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (548, 562), True, 'import numpy as np\n'), ((764, 793), 'numpy.delete', 'np.delete', (['matrix', 'indices', '(1)'], {}), '(matrix, indices, 1)\n', (773, 793), True, 'import numpy as np\n'), ((3099, 3157), 'numpy.zeros', 'np.zeros', (['(num_test_instances, num_feats - num_test_feats)'], {}), '((num_test_instances, num_feats - num_test_feats))\n', (3107, 3157), True, 'import numpy as np\n'), ((3559, 3605), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': 'C', 'penalty': 'penalty', 'dual': 'dual'}), '(C=C, penalty=penalty, dual=dual)\n', (3572, 3605), False, 'from sklearn import svm\n'), ((1336, 1367), 'numpy.where', 'np.where', (['(y_test == score_label)'], {}), '(y_test == score_label)\n', (1344, 1367), True, 'import numpy as np\n'), ((1439, 1469), 'numpy.where', 'np.where', (['(preds == score_label)'], {}), '(preds == score_label)\n', (1447, 1469), True, 'import numpy as np\n')] |
"""Training a SEAL-CI model."""
import torch
from utils import tab_printer
from seal import SEALCITrainer
from param_parser import parameter_parser
def main():
"""
Parsing command line parameters, reading data.
Fitting and scoring a SEAL-CI model.
"""
args = parameter_parser()
tab_printer(args)
trainer = SEALCITrainer(args)
trainer.fit()
trainer.score()
if __name__ == "__main__":
main()
| [
"utils.tab_printer",
"param_parser.parameter_parser",
"seal.SEALCITrainer"
] | [((281, 299), 'param_parser.parameter_parser', 'parameter_parser', ([], {}), '()\n', (297, 299), False, 'from param_parser import parameter_parser\n'), ((304, 321), 'utils.tab_printer', 'tab_printer', (['args'], {}), '(args)\n', (315, 321), False, 'from utils import tab_printer\n'), ((336, 355), 'seal.SEALCITrainer', 'SEALCITrainer', (['args'], {}), '(args)\n', (349, 355), False, 'from seal import SEALCITrainer\n')] |
"""
Calm DSL Sample Runbook for parallel task
"""
from calm.dsl.runbooks import runbook, parallel, branch
from calm.dsl.runbooks import RunbookTask as Task
@runbook
def ParallelTask():
"Runbook Service example"
with parallel() as p:
with branch(p):
Task.Delay(60, name="Delay1")
with branch(p):
Task.Delay(60, name="Delay2")
with branch(p):
Task.Delay(60, name="Delay3")
| [
"calm.dsl.runbooks.branch",
"calm.dsl.runbooks.RunbookTask.Delay",
"calm.dsl.runbooks.parallel"
] | [((228, 238), 'calm.dsl.runbooks.parallel', 'parallel', ([], {}), '()\n', (236, 238), False, 'from calm.dsl.runbooks import runbook, parallel, branch\n'), ((258, 267), 'calm.dsl.runbooks.branch', 'branch', (['p'], {}), '(p)\n', (264, 267), False, 'from calm.dsl.runbooks import runbook, parallel, branch\n'), ((281, 310), 'calm.dsl.runbooks.RunbookTask.Delay', 'Task.Delay', (['(60)'], {'name': '"""Delay1"""'}), "(60, name='Delay1')\n", (291, 310), True, 'from calm.dsl.runbooks import RunbookTask as Task\n'), ((324, 333), 'calm.dsl.runbooks.branch', 'branch', (['p'], {}), '(p)\n', (330, 333), False, 'from calm.dsl.runbooks import runbook, parallel, branch\n'), ((347, 376), 'calm.dsl.runbooks.RunbookTask.Delay', 'Task.Delay', (['(60)'], {'name': '"""Delay2"""'}), "(60, name='Delay2')\n", (357, 376), True, 'from calm.dsl.runbooks import RunbookTask as Task\n'), ((390, 399), 'calm.dsl.runbooks.branch', 'branch', (['p'], {}), '(p)\n', (396, 399), False, 'from calm.dsl.runbooks import runbook, parallel, branch\n'), ((413, 442), 'calm.dsl.runbooks.RunbookTask.Delay', 'Task.Delay', (['(60)'], {'name': '"""Delay3"""'}), "(60, name='Delay3')\n", (423, 442), True, 'from calm.dsl.runbooks import RunbookTask as Task\n')] |
from .scripts import ClearScreen
from PyInquirer import prompt
def SelectTemplateGraft(statBlock):
ClearScreen()
print("You can optionally use a template graft to start the creation process. Template grafts grant specific bonuses and unique abilities. They also " +
"largely determine the creature types the monster will have. Listed minimum levels account for the graft applied to a level -1 " +
"creature except for when the grafts themselves require a minimum level, e.g., graveknight. See " +
"https://2e.aonprd.com/MonsterTemplates.aspx for additional information on the abilities that each of these grant.\n\n" +
"Ghost: incorporeal undead; has the Spirit and Undead traits; minimum level 1\n\n" +
"Ghoul: usually intelligent, flesh-eating undead formed from humanoids; often form societies; has the Undead trait; minimum level 0\n\n" +
"Ghast: stronger version of Ghouls; minimum level 1\n\n" +
"Graveknight: once humanoid undead warriors kept alive by cursed armor; has the Undead trait; minimum level 6\n\n" +
"Lich: powerful spellcasters that have intentionally become undead; has the Undead trait; recommended minimum level 13\n\n" +
"Ravener: skeletal undead dragons that feed on the souls of the living; has the Undead trait; recommended minimum level 15\n\n" +
"True Vampire: once humanoid undead that feed on the blood of living creatures; has the Undead trait; minimum level 6\n\n" +
"Vampire Spawn: undead turned by a True Vampire's bite; has the Undead trait; minimum level 0\n\n" +
"Vrykolakas: vampiric, plague-bearing undead risen from neglected corpses; has the Undead trait; minimum level 0\n\n" +
"Werecreature: shapeshifting, humanoid animal hybrids; has the Beast and Humanoid traits; minimum level 0\n\n" +
"Worm That Walks: eldritch spellcasters formed from an amalgamation of grave worms; has the Aberration and Swarm traits; recommended minimum level 5\n\n")
question = [
{
'type': 'list',
'name': 'template',
'message': 'Choose one of the following:',
'choices': ['Create a monster without a template graft',
'Ghost', 'Ghoul', 'Ghast', 'Graveknight', 'Lich', 'Ravener', 'True Vampire',
'Vampire Spawn', 'Vrykolakas', 'Werecreature', 'Worm That Walks']
}
]
# switch case to return proper tuple in form of ('template name', 'level constraint') e.g., ('True Vampire', 'minimum level 6')
ret = ('', '')
cases = {
'Ghost': lambda: ('Ghost', 'minimum level 1', ['Spirit', 'Undead']),
'Ghoul': lambda: ('Ghoul', 'minimum level 0', ['Undead']),
'Ghast': lambda: ('Ghast', 'minimum level 1', ['Undead']),
'Graveknight': lambda: ('Graveknight', 'minimum level 6', ['Undead']),
'Lich': lambda: ('Lich', 'Paizo recommended minimum level 13', ['Undead']),
'Ravener': lambda: ('Ravener', 'Paizo recommended minimum level 15', ['Undead']),
'True Vampire': lambda: ('True Vampire', 'minimum level 6', ['Undead']),
'Vampire Spawn': lambda: ('Vampire Spawn', 'minimum level 0', ['Undead']),
'Vrykolakas': lambda: ('Vrykolakas', 'minimum level 0', ['Undead']),
'Werecreature': lambda: ('Werecreature', 'minimum level 0', ['Beast', 'Humanoid']),
'Worm That Walks': lambda: ('Worm That Walks', 'Paizo recommended minimum level 5', ['Aberration', 'Swarm'])
}
statBlock.template, statBlock.levelConstraint, statBlock.types = cases.get(prompt(question)['template'], lambda: (None, '', []))()
return | [
"PyInquirer.prompt"
] | [((3664, 3680), 'PyInquirer.prompt', 'prompt', (['question'], {}), '(question)\n', (3670, 3680), False, 'from PyInquirer import prompt\n')] |
"""
Convert scenery image to tactile image.
# Algorithm
1. Read scenery image.
2. Grayscale.
3. Histogram equalize.
4. Compute fine-grained saliency map.
5. Scale to [0, 255]
6. Compute binary threshold map.
7. Invert binary threshold map.
8. Write tactile image.
References at the corresponding source code below.
"""
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--show', required=False, action='store_true', help='Show output')
ap.add_argument('-f', '--fine', required=False, action='store_true', help='Fine-grained salient only mode')
ap.add_argument('-i', '--image', required=True, help='Input: image file path')
ap.add_argument('-o', '--output', required=False, help='Output: tactile image file path')
args = ap.parse_args()
# Read image, grayscale, equalize
# Source: https://docs.opencv.org/3.4.3/d4/d1b/tutorial_histogram_equalization.html
image = cv2.imread(args.image)
image_grayscaled = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_equalized = cv2.equalizeHist(image_grayscaled)
"""
Fine-grained saliency detection from
<NAME> and <NAME>.
Human detection using a mobile platform and novel features derived from a visual saliency mechanism.
In Image and Vision Computing, Vol. 28 Issue 3, pages 391–402. Elsevier, 2010.
Source: https://docs.opencv.org/3.4.3/da/dd0/classcv_1_1saliency_1_1StaticSaliencyFineGrained.html
"""
fine_saliency = cv2.saliency.StaticSaliencyFineGrained_create()
_, fine_saliency_map = fine_saliency.computeSaliency(image_equalized)
# Scale the values to [0, 255]
fine_saliency_map = (fine_saliency_map * 255).astype('uint8')
# Compute binary threshold map
threshold_map = cv2.threshold(
fine_saliency_map.astype('uint8'), 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# Invert the binary threshold map so it is Swell Paper Tactile Printer friendly
inverse_threshold_map = cv2.bitwise_not(threshold_map)
if args.fine:
selected_map = fine_saliency_map
else:
selected_map = inverse_threshold_map
# Save output
cv2.imwrite(args.output, selected_map)
if args.show:
# Show output
cv2.imshow('Inverse Threshold Map', selected_map)
# Press any key to exit
cv2.waitKey(0) | [
"cv2.imwrite",
"cv2.saliency.StaticSaliencyFineGrained_create",
"argparse.ArgumentParser",
"cv2.imshow",
"cv2.equalizeHist",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.bitwise_not",
"cv2.imread"
] | [((354, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (377, 379), False, 'import argparse\n'), ((896, 918), 'cv2.imread', 'cv2.imread', (['args.image'], {}), '(args.image)\n', (906, 918), False, 'import cv2\n'), ((938, 977), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (950, 977), False, 'import cv2\n'), ((996, 1030), 'cv2.equalizeHist', 'cv2.equalizeHist', (['image_grayscaled'], {}), '(image_grayscaled)\n', (1012, 1030), False, 'import cv2\n'), ((1391, 1438), 'cv2.saliency.StaticSaliencyFineGrained_create', 'cv2.saliency.StaticSaliencyFineGrained_create', ([], {}), '()\n', (1436, 1438), False, 'import cv2\n'), ((1858, 1888), 'cv2.bitwise_not', 'cv2.bitwise_not', (['threshold_map'], {}), '(threshold_map)\n', (1873, 1888), False, 'import cv2\n'), ((2003, 2041), 'cv2.imwrite', 'cv2.imwrite', (['args.output', 'selected_map'], {}), '(args.output, selected_map)\n', (2014, 2041), False, 'import cv2\n'), ((2079, 2128), 'cv2.imshow', 'cv2.imshow', (['"""Inverse Threshold Map"""', 'selected_map'], {}), "('Inverse Threshold Map', selected_map)\n", (2089, 2128), False, 'import cv2\n'), ((2162, 2176), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2173, 2176), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
from abc import ABC, abstractmethod
from typing import Union, Tuple, List, Optional, Type
import oef.query_pb2 as query_pb2
from oef.schema import ATTRIBUTE_TYPES, AttributeSchema, DataModel, ProtobufSerializable, Description, Location
RANGE_TYPES = Union[Tuple[str, str], Tuple[int, int], Tuple[float, float], Tuple[Location, Location]]
ORDERED_TYPES = Union[int, str, float]
SET_TYPES = Union[List[float], List[str], List[bool], List[int], List[Location]]
Query = None
class ConstraintExpr(ProtobufSerializable, ABC):
"""
This class is used to represent a constraint expression.
"""
@abstractmethod
def check(self, description: Description) -> bool:
"""
Check if a description satisfies the constraint expression.
:param description: the description to check.
:return: ``True`` if the description satisfy the constraint expression, ``False`` otherwise.
"""
@abstractmethod
def is_valid(self, data_model: DataModel) -> bool:
"""
Check whether a constraint expression is valid wrt a data model. Specifically, check the following conditions:
- If all the attributes referenced by the constraints are correctly associated with the Data Model attributes.
:param data_model: the data model used to check the validity of the constraint expression.
:return: ``True`` if the constraint expression is valid wrt the data model, ``False`` otherwise.
"""
def _check_validity(self) -> None:
"""Check whether a Constraint Expression satisfies some basic requirements.
E.g. an :class:`~oef.query.And` expression must have at least 2 subexpressions.
:return ``None``
:raises ValueError: if the object does not satisfy some requirements."""
return
@staticmethod
def _to_pb(expression):
constraint_expr_pb = query_pb2.Query.ConstraintExpr()
expression_pb = expression.to_pb()
if isinstance(expression, And):
constraint_expr_pb.and_.CopyFrom(expression_pb)
elif isinstance(expression, Or):
constraint_expr_pb.or_.CopyFrom(expression_pb)
elif isinstance(expression, Not):
constraint_expr_pb.not_.CopyFrom(expression_pb)
elif isinstance(expression, Constraint):
constraint_expr_pb.constraint.CopyFrom(expression_pb)
return constraint_expr_pb
@staticmethod
def _from_pb(expression_pb):
expression = expression_pb.WhichOneof("expression")
if expression == "and_":
return And.from_pb(expression_pb.and_)
elif expression == "or_":
return Or.from_pb(expression_pb.or_)
elif expression == "not_":
return Not.from_pb(expression_pb.not_)
elif expression == "constraint":
return Constraint.from_pb(expression_pb.constraint)
class And(ConstraintExpr):
"""
A constraint type that allows you to specify a conjunction of constraints.
That is, the :class:`~oef.query.And` constraint is satisfied whenever
all the constraints that constitute the and are satisfied.
Examples:
All the books whose title is between 'I' and 'J' (alphanumeric order) but not equal to 'It'
>>> c = And([Constraint("title", Range(("I", "J"))), Constraint("title", NotEq("It"))])
>>> c.check(Description({"title": "I, Robot"}))
True
>>> c.check(Description({"title": "It"}))
False
>>> c.check(Description({"title": "1984"}))
False
"""
def __init__(self, constraints: List[ConstraintExpr]) -> None:
"""
Initialize an :class:`~oef.query.And` constraint.
:param constraints: the list of constraints to be interpreted in conjunction.
"""
self.constraints = constraints
self._check_validity()
def to_pb(self):
"""
From an instance of :class:`~oef.query.And` to its associated Protobuf object.
:return: the ConstraintExpr Protobuf object that contains the :class:`~oef.query.And` constraint.
"""
and_pb = query_pb2.Query.ConstraintExpr.And()
constraint_expr_pbs = [ConstraintExpr._to_pb(constraint) for constraint in self.constraints]
and_pb.expr.extend(constraint_expr_pbs)
return and_pb
@classmethod
def from_pb(cls, constraint_pb: query_pb2.Query.ConstraintExpr.And):
"""
From the ``And`` Protobuf object to the associated instance of :class:`~oef.query.And`.
:param constraint_pb: the Protobuf object that represents the ``And`` constraint.
:return: an instance of :class:`~oef.query.And` equivalent to the Protobuf object.
"""
expr = [ConstraintExpr._from_pb(c) for c in constraint_pb.expr]
return cls(expr)
def check(self, description: Description) -> bool:
"""
Check if a value satisfies the :class:`~oef.query.And` constraint expression.
:param description: the description to check.
:return: ``True`` if the description satisfy the constraint expression, ``False`` otherwise.
"""
return all(expr.check(description) for expr in self.constraints)
def is_valid(self, data_model: DataModel) -> bool:
return all(c.is_valid(data_model) for c in self.constraints)
def _check_validity(self):
if len(self.constraints) < 2:
raise ValueError("Invalid input value for type '{}': number of "
"subexpression must be at least 2.".format(type(self).__name__))
for c in self.constraints:
c._check_validity()
def __eq__(self, other):
if type(other) != And:
return False
else:
return self.constraints == other.constraints
class Or(ConstraintExpr):
"""
A constraint type that allows you to specify a disjunction of constraints.
That is, the Or constraint is satisfied whenever at least one of the constraints
that constitute the or is satisfied.
Examples:
All the books that have been published either before the year 1960 or after the year 1970
>>> c = Or([Constraint("year", Lt(1960)), Constraint("year", Gt(1970))])
>>> c.check(Description({"year": 1950}))
True
>>> c.check(Description({"year": 1975}))
True
>>> c.check(Description({"year": 1960}))
False
>>> c.check(Description({"year": 1970}))
False
"""
def __init__(self, constraints: List[ConstraintExpr]) -> None:
"""
Initialize an :class:`~oef.query.Or` constraint.
:param constraints: the list of constraints to be interpreted in disjunction.
"""
self.constraints = constraints
self._check_validity()
def to_pb(self):
"""
From an instance of :class:`~oef.query.Or` to its associated Protobuf object.
:return: the Protobuf object that contains the :class:`~oef.query.Or` constraint.
"""
or_pb = query_pb2.Query.ConstraintExpr.Or()
constraint_expr_pbs = [ConstraintExpr._to_pb(constraint) for constraint in self.constraints]
or_pb.expr.extend(constraint_expr_pbs)
return or_pb
@classmethod
def from_pb(cls, constraint_pb: query_pb2.Query.ConstraintExpr.Or):
"""
From the ``Or`` Protobuf object to the associated instance of :class:`~oef.query.Or`.
:param constraint_pb: the Protobuf object that represents the ``Or`` constraint.
:return: an instance of :class:`~oef.query.Or` equivalent to the Protobuf object.
"""
expr = [ConstraintExpr._from_pb(c) for c in constraint_pb.expr]
return cls(expr)
def check(self, description: Description) -> bool:
"""
Check if a value satisfies the :class:`~oef.query.Or` constraint expression.
:param description: the description to check.
:return: ``True`` if the description satisfy the constraint expression, ``False`` otherwise.
"""
return any(expr.check(description) for expr in self.constraints)
def is_valid(self, data_model: DataModel) -> bool:
return all(c.is_valid(data_model) for c in self.constraints)
def _check_validity(self):
if len(self.constraints) < 2:
raise ValueError("Invalid input value for type '{}': number of "
"subexpression must be at least 2.".format(type(self).__name__))
for c in self.constraints:
c._check_validity()
def __eq__(self, other):
if type(other) != Or:
return False
else:
return self.constraints == other.constraints
class Not(ConstraintExpr):
"""
A constraint type that allows you to specify a negation of a constraint.
That is, the Not constraint is satisfied whenever the constraint
that constitutes the Not expression is not satisfied.
Examples:
All the books whose genre is science fiction, but the year is not between 1990 and 2000
>>> c = And([Constraint("genre", Eq("science-fiction")), Not(Constraint("year", Range((1990, 2000))))])
>>> c.check(Description({"genre": "science-fiction", "year": 1995}))
False
>>> c.check(Description({"genre": "science-fiction", "year": 2001}))
True
"""
def __init__(self, constraint: ConstraintExpr) -> None:
self.constraint = constraint
def check(self, description: Description) -> bool:
"""
Check if a value satisfies the :class:`~oef.query.Not` constraint expression.
:param description: the description to check.
:return: ``True`` if the description satisfy the constraint expression, ``False`` otherwise.
"""
return not self.constraint.check(description)
def to_pb(self):
"""
From an instance of :class:`~oef.query.Not` to its associated Protobuf object.
:return: the Protobuf object that contains the :class:`~oef.query.Not` constraint.
"""
not_pb = query_pb2.Query.ConstraintExpr.Not()
constraint_expr_pb = ConstraintExpr._to_pb(self.constraint)
not_pb.expr.CopyFrom(constraint_expr_pb)
return not_pb
@classmethod
def from_pb(cls, constraint_pb: query_pb2.Query.ConstraintExpr.Not):
"""
From the ``Not`` Protobuf object to the associated instance of :class:`~oef.query.Not`.
:param constraint_pb: the Protobuf object that represents the ``Not`` constraint.
:return: an instance of :class:`~oef.query.Not` equivalent to the Protobuf object.
"""
expression = ConstraintExpr._from_pb(constraint_pb.expr)
return cls(expression)
def is_valid(self, data_model: DataModel) -> bool:
return self.constraint.is_valid(data_model)
def __eq__(self, other):
if type(other) != Not:
return False
else:
return self.constraint == other.constraint
class ConstraintType(ProtobufSerializable, ABC):
"""
This class is used to represent a constraint type.
"""
@abstractmethod
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if an attribute value satisfies the constraint.
The implementation depends on the constraint type.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
def is_valid(self, attribute: AttributeSchema) -> bool:
"""
Check if the constraint type is valid wrt a given attribute.
:param attribute: the data model used to check the validity of the constraint type.
:return: ``True`` if the constraint type is valid wrt the attribute, ``False`` otherwise.
"""
return self._get_type() is None or self._get_type() == attribute.type
@abstractmethod
def _get_type(self) -> Optional[Type[ATTRIBUTE_TYPES]]:
"""
Get the type of attributes values that can be compared with this constraint
:return: the type of this constraint type, or ``None`` if it can't be determined.
"""
class Relation(ConstraintType, ABC):
"""
A constraint type that allows you to impose specific values
for the attributes.
The specific operator of the relation is defined in the
subclasses that extend this class.
"""
def __init__(self, value: ATTRIBUTE_TYPES) -> None:
"""
Initialize a Relation object.
:param value: the right value of the relation.
"""
self.value = value
@property
@abstractmethod
def _operator(self) -> query_pb2.Query.Relation:
"""The operator of the relation."""
@classmethod
def from_pb(cls, relation: query_pb2.Query.Relation):
"""
From the Relation Protobuf object to the associated
instance of a subclass of Relation.
:param relation: the Protobuf object that represents the relation constraint.
:return: an instance of one of the subclasses of Relation.
"""
relations_from_pb = {
query_pb2.Query.Relation.GTEQ: GtEq,
query_pb2.Query.Relation.GT: Gt,
query_pb2.Query.Relation.LTEQ: LtEq,
query_pb2.Query.Relation.LT: Lt,
query_pb2.Query.Relation.NOTEQ: NotEq,
query_pb2.Query.Relation.EQ: Eq
}
relation_class = relations_from_pb[relation.op]
value_case = relation.val.WhichOneof("value")
if value_case == "s":
return relation_class(relation.val.s)
elif value_case == "b":
return relation_class(relation.val.b)
elif value_case == "i":
return relation_class(relation.val.i)
elif value_case == "d":
return relation_class(relation.val.d)
elif value_case == "l":
return relation_class(Location.from_pb(relation.val.l))
def to_pb(self) -> query_pb2.Query.Relation:
"""
From an instance of Relation to its associated Protobuf object.
:return: the Protobuf object that contains the relation.
"""
relation = query_pb2.Query.Relation()
relation.op = self._operator()
query_value = query_pb2.Query.Value()
if isinstance(self.value, bool):
query_value.b = self.value
elif isinstance(self.value, int):
query_value.i = self.value
elif isinstance(self.value, float):
query_value.d = self.value
elif isinstance(self.value, str):
query_value.s = self.value
elif isinstance(self.value, Location):
query_value.l.CopyFrom(self.value.to_pb())
relation.val.CopyFrom(query_value)
return relation
def _get_type(self) -> Type[ATTRIBUTE_TYPES]:
return type(self.value)
def __eq__(self, other):
if type(other) != type(self):
return False
else:
return self.value == other.value
class OrderingRelation(Relation, ABC):
"""A specialization of the :class:`~oef.query.Relation` class to represent ordering relation (e.g. greater-than)."""
def __init__(self, value: ORDERED_TYPES):
super().__init__(value)
def _get_type(self) -> Type[ORDERED_TYPES]:
return type(self.value)
class Eq(Relation):
"""
The equality relation. That is, if the value of an attribute is equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books whose author is <NAME>
>>> c = Constraint("author", Eq("<NAME>"))
>>> c.check(Description({"author": "<NAME>"}))
True
>>> c.check(Description({"author": "<NAME>"}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.EQ
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value == self.value
class NotEq(Relation):
"""
The non-equality relation. That is, if the value of an attribute is not equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books that are not of the genre Horror
>>> c = Constraint("genre", NotEq("horror"))
>>> c.check(Description({"genre": "non-fiction"}))
True
>>> c.check(Description({"author": "horror"}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.NOTEQ
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is not equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value != self.value
class Lt(OrderingRelation):
"""
The Less-than relation. That is, if the value of an attribute is less than the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published before 1990
>>> c = Constraint("year", Lt(1990))
>>> c.check(Description({"year": 1985}))
True
>>> c.check(Description({"year": 2000}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.LT
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is less than the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value < self.value
class LtEq(OrderingRelation):
"""
Less-than-equal relation. That is, if the value of an attribute is less than or equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published before 1990, 1990 included
>>> c = Constraint("year", LtEq(1990))
>>> c.check(Description({"year": 1990}))
True
>>> c.check(Description({"year": 1991}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.LTEQ
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is less than or equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value <= self.value
class Gt(OrderingRelation):
"""
Greater-than relation. That is, if the value of an attribute is greater than the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books with rating greater than 4.0
>>> c = Constraint("average_rating", Gt(4.0))
>>> c.check(Description({"average_rating": 4.5}))
True
>>> c.check(Description({"average_rating": 3.0}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.GT
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is greater than the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value > self.value
class GtEq(OrderingRelation):
"""
Greater-than-equal relation. That is, if the value of an attribute is greater than or equal to the value specified
then the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published after 2000, included
>>> c = Constraint("year", GtEq(2000))
>>> c.check(Description({"year": 2000}))
True
>>> c.check(Description({"year": 1990}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.GTEQ
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value greater than or equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value >= self.value
class Range(ConstraintType):
"""
A constraint type that allows you to restrict the values of the attribute in a given range.
Examples:
All the books published after 2000, included
>>> c = Constraint("year", Range((2000, 2005)))
>>> c.check(Description({"year": 2000}))
True
>>> c.check(Description({"year": 2005}))
True
>>> c.check(Description({"year": 1990}))
False
>>> c.check(Description({"year": 2010}))
False
"""
def __init__(self, values: RANGE_TYPES) -> None:
"""
Initialize a range constraint type.
:param values: a pair of ``int``, a pair of ``str``, a pair of ``float` or
| a pair of :class:`~oef.schema.Location`.
"""
self.values = values
def to_pb(self) -> query_pb2.Query:
"""
From an instance of Range to its associated Protobuf object.
:return: the Protobuf object that contains the range.
"""
range_ = query_pb2.Query.Range()
if type(self.values[0]) == str:
values = query_pb2.Query.StringPair()
values.first = self.values[0]
values.second = self.values[1]
range_.s.CopyFrom(values)
elif type(self.values[0]) == int:
values = query_pb2.Query.IntPair()
values.first = self.values[0]
values.second = self.values[1]
range_.i.CopyFrom(values)
elif type(self.values[0]) == float:
values = query_pb2.Query.DoublePair()
values.first = self.values[0]
values.second = self.values[1]
range_.d.CopyFrom(values)
elif type(self.values[0]) == Location:
values = query_pb2.Query.LocationPair()
values.first.CopyFrom(self.values[0].to_pb())
values.second.CopyFrom(self.values[1].to_pb())
range_.l.CopyFrom(values)
return range_
@classmethod
def from_pb(cls, range_pb: query_pb2.Query.Range):
"""
From the Range Protobuf object to the associated instance of ``Range``.
:param range_pb: the Protobuf object that represents the range.
:return: an instance of ``Range`` equivalent to the Protobuf object provided as input.
"""
range_case = range_pb.WhichOneof("pair")
if range_case == "s":
return cls((range_pb.s.first, range_pb.s.second))
elif range_case == "i":
return cls((range_pb.i.first, range_pb.i.second))
elif range_case == "d":
return cls((range_pb.d.first, range_pb.d.second))
elif range_case == "l":
return cls((Location.from_pb(range_pb.l.first), Location.from_pb(range_pb.l.second)))
def check(self, value: RANGE_TYPES) -> bool:
"""
Check if a value is in the range specified by the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
left, right = self.values
return left <= value <= right
def _get_type(self) -> Type[Union[int, str, float, Location]]:
return type(self.values[0])
def __eq__(self, other):
if type(other) != Range:
return False
else:
return self.values == other.values
class Set(ConstraintType, ABC):
"""
A constraint type that allows you to restrict the values of the attribute in a specific set.
The specific operator of the relation is defined in the subclasses that extend this class.
"""
def __init__(self, values: SET_TYPES) -> None:
"""
Initialize a :class:`~oef.query.Set` constraint.
:param values: a list of values for the set relation.
"""
self.values = values
@property
@abstractmethod
def _operator(self) -> query_pb2.Query.Set:
"""The operator over the set."""
def to_pb(self):
"""
From an instance of one of the subclasses of :class:`~oef.query.Set` to its associated Protobuf object.
:return: the Protobuf object that contains the set constraint.
"""
set_ = query_pb2.Query.Set()
set_.op = self._operator()
value_type = type(self.values[0]) if len(self.values) > 0 else str
if value_type == str:
values = query_pb2.Query.Set.Values.Strings()
values.vals.extend(self.values)
set_.vals.s.CopyFrom(values)
elif value_type == bool:
values = query_pb2.Query.Set.Values.Bools()
values.vals.extend(self.values)
set_.vals.b.CopyFrom(values)
elif value_type == int:
values = query_pb2.Query.Set.Values.Ints()
values.vals.extend(self.values)
set_.vals.i.CopyFrom(values)
elif value_type == float:
values = query_pb2.Query.Set.Values.Doubles()
values.vals.extend(self.values)
set_.vals.d.CopyFrom(values)
elif value_type == Location:
values = query_pb2.Query.Set.Values.Locations()
values.vals.extend([value.to_pb() for value in self.values])
set_.vals.l.CopyFrom(values)
return set_
@classmethod
def from_pb(cls, set_pb: query_pb2.Query.Set):
"""
From the Set Protobuf object to the associated instance of a subclass of :class:`~oef.query.Set`.
:param set_pb: the Protobuf object that represents the set constraint.
:return: the object of one of the subclasses of :class:`~oef.query.Set`.
"""
op_from_pb = {
query_pb2.Query.Set.IN: In,
query_pb2.Query.Set.NOTIN: NotIn
}
set_class = op_from_pb[set_pb.op]
value_case = set_pb.vals.WhichOneof("values")
if value_case == "s":
return set_class(set_pb.vals.s.vals)
elif value_case == "b":
return set_class(set_pb.vals.b.vals)
elif value_case == "i":
return set_class(set_pb.vals.i.vals)
elif value_case == "d":
return set_class(set_pb.vals.d.vals)
elif value_case == "l":
locations = [Location.from_pb(loc) for loc in set_pb.vals.l.vals]
return set_class(locations)
def _get_type(self) -> Optional[Type[ATTRIBUTE_TYPES]]:
return type(next(iter(self.values))) if len(self.values) > 0 else None
def __eq__(self, other):
if type(other) != type(self):
return False
return self.values == other.values
class In(Set):
"""
Class that implements the 'in set' constraint type.
That is, the value of attribute over which the constraint is defined
must be in the set of values provided.
Examples:
All the books whose genre is one of the following: `Horror`, `Science fiction`, `Non-fiction`
>>> c = Constraint("genre", In(["horror", "science fiction", "non-fiction"]))
>>> c.check(Description({"genre": "horror"}))
True
>>> c.check(Description({"genre": "thriller"}))
False
"""
def __init__(self, values: SET_TYPES):
super().__init__(values)
def _operator(self):
return query_pb2.Query.Set.IN
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is in the set of values specified by the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value in self.values
class NotIn(Set):
"""
Class that implements the 'not in set' constraint type.
That is, the value of attribute over which the constraint is defined
must be not in the set of values provided.
Examples:
All the books that have not been published neither in 1990, nor in 1995, nor in 2000
>>> c = Constraint("year", NotIn([1990, 1995, 2000]))
>>> c.check(Description({"year": 1991}))
True
>>> c.check(Description({"year": 2000}))
False
"""
def __init__(self, values: SET_TYPES):
super().__init__(values)
def _operator(self):
return query_pb2.Query.Set.NOTIN
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is not in the set of values specified by the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value not in self.values
class Distance(ConstraintType):
"""
Class that implements the 'distance' constraint type.
That is, the locations we are looking for
must be within a given distance from a given location.
The distance is interpreted as a radius from a center.
Examples:
Define a location of interest, e.g. the Tour Eiffel
>>> tour_eiffel = Location(48.8581064, 2.29447)
Find all the locations close to the Tour Eiffel within 1 km
>>> close_to_tour_eiffel = Distance(tour_eiffel, 1.0)
Le Jules Verne, a famous restaurant close to the Tour Eiffel, satisfies the constraint.
>>> le_jules_verne_restaurant = Location(48.8579675, 2.2951849)
>>> close_to_tour_eiffel.check(le_jules_verne_restaurant)
True
The Colosseum does not satisfy the constraint (farther than 1 km from the Tour Eiffel).
>>> colosseum = Location(41.8902102, 12.4922309)
>>> close_to_tour_eiffel.check(colosseum)
False
"""
def __init__(self, center: Location, distance: float) -> None:
"""
Instantiate the ``Distance`` constraint.
:param center: the center from where compute the distance.
:param distance: the maximum distance from the center, in km.
"""
self.center = center
self.distance = distance
def check(self, value: Location) -> bool:
return self.center.distance(value) <= self.distance
def to_pb(self) -> query_pb2.Query.Distance:
"""
From an instance :class:`~oef.query.Distance` to its associated Protobuf object.
:return: the Protobuf object that contains the :class:`~oef.query.Distance` constraint.
"""
distance_pb = query_pb2.Query.Distance()
distance_pb.distance = self.distance
distance_pb.center.CopyFrom(self.center.to_pb())
return distance_pb
@classmethod
def from_pb(cls, distance_pb: query_pb2.Query.Distance):
"""
From the ``Distance`` Protobuf object to the associated instance of :class:`~oef.query.Distance`.
:param distance_pb: the Protobuf object that represents the ``~oef.query.Distance`` constraint.
:return: an instance of ``~oef.query.Distance``.
"""
center = Location.from_pb(distance_pb.center)
distance = distance_pb.distance
return cls(center, distance)
def _get_type(self) -> Optional[Type[ATTRIBUTE_TYPES]]:
return Location
def __eq__(self, other):
if type(other) != Distance:
return False
return self.center == other.center and self.distance == other.distance
class Constraint(ConstraintExpr):
"""
A class that represent a constraint over an attribute.
"""
def __init__(self,
attribute_name: str,
constraint: ConstraintType) -> None:
self.attribute_name = attribute_name
self.constraint = constraint
def to_pb(self):
"""
Return the associated Protobuf object.
:return: a Protobuf object equivalent to the caller object.
"""
constraint = query_pb2.Query.ConstraintExpr.Constraint()
constraint.attribute_name = self.attribute_name
if isinstance(self.constraint, Relation):
constraint.relation.CopyFrom(self.constraint.to_pb())
elif isinstance(self.constraint, Range):
constraint.range_.CopyFrom(self.constraint.to_pb())
elif isinstance(self.constraint, Set):
constraint.set_.CopyFrom(self.constraint.to_pb())
elif isinstance(self.constraint, Distance):
constraint.distance.CopyFrom(self.constraint.to_pb())
else:
raise ValueError("The constraint type is not valid: {}".format(self.constraint))
return constraint
@classmethod
def from_pb(cls, constraint_pb: query_pb2.Query.ConstraintExpr.Constraint):
"""
From the ``Constraint`` Protobuf object to the associated instance of ``Constraint``.
:param constraint_pb: the Protobuf object that represents the ``Constraint`` object.
:return: an instance of ``Constraint`` equivalent to the Protobuf object provided in input.
"""
constraint_case = constraint_pb.WhichOneof("constraint")
constraint_type = None
if constraint_case == "relation":
constraint_type = Relation.from_pb(constraint_pb.relation)
elif constraint_case == "set_":
constraint_type = Set.from_pb(constraint_pb.set_)
elif constraint_case == "range_":
constraint_type = Range.from_pb(constraint_pb.range_)
elif constraint_case == "distance":
constraint_type = Distance.from_pb(constraint_pb.distance)
return cls(constraint_pb.attribute_name, constraint_type)
def check(self, description: Description) -> bool:
"""
Check if a description satisfies the constraint. The implementation depends on the type of the constraint.
:param description: the description to check.
:return: ``True`` if the description satisfies the constraint, ``False`` otherwise.
Examples:
>>> attr_author = AttributeSchema("author" , str, True, "The author of the book.")
>>> attr_year = AttributeSchema("year", int, True, "The year of publication of the book.")
>>> c1 = Constraint("author", Eq("<NAME>"))
>>> c2 = Constraint("year", Gt(1990))
>>> book_1 = Description({"author": "<NAME>", "year": 1991})
>>> book_2 = Description({"author": "<NAME>", "year": 1948})
The ``"author"`` attribute instantiation satisfies the constraint, so the result is ``True``.
>>> c1.check(book_1)
True
Here, the ``"author"`` does not satisfy the constraints. Hence, the result is ``False``.
>>> c1.check(book_2)
False
In this case, there is a missing field specified by the query, that is ``"year"``
So the result is ``False``, even in the case it is not required by the schema:
>>> c2.check(Description({"author": "<NAME>"}))
False
If the type of some attribute of the description is not correct, the result is ``False``.
In this case, the field ``"year"`` has a string instead of an integer:
>>> c2.check(Description({"author": "<NAME>", "year": "1991"}))
False
>>> Constraint("position", Distance(Location(0.0, 0.0), 1.0)).check(Description({"position": "1.0,1.0"}))
False
"""
# if the name of the attribute is not present, return false.
name = self.attribute_name
if name not in description.values:
return False
# if the type of the value is different from the type of the attribute schema, return false.
value = description.values[name]
if type(value) != self.constraint._get_type():
return False
# dispatch the check to the right implementation for the concrete constraint type.
return self.constraint.check(value)
def is_valid(self, data_model: DataModel) -> bool:
# if the attribute name of the constraint is not present in the data model, the constraint is not valid.
if self.attribute_name not in data_model.attributes_by_name:
return False
attribute = data_model.attributes_by_name[self.attribute_name]
return self.constraint.is_valid(attribute)
def __eq__(self, other):
if type(other) != Constraint:
return False
else:
return self.attribute_name == other.attribute_name and self.constraint == other.constraint
class Query(ProtobufSerializable):
"""
Representation of a search that is to be performed. Currently a search is represented as a
set of key value pairs that must be contained in the description of the service/ agent.
Examples:
Return all the books written by <NAME> published after 1990, and available as an e-book:
>>> attr_author = AttributeSchema("author" , str, True, "The author of the book.")
>>> attr_year = AttributeSchema("year", int, True, "The year of publication of the book.")
>>> attr_ebook = AttributeSchema("ebook_available", bool, False, "If the book can be sold as an e-book.")
>>> q = Query([
... Constraint("author", Eq("<NAME>")),
... Constraint("year", Gt(1990)),
... Constraint("ebook_available", Eq(True))
... ])
With a query, you can check that a `~oef.schema.Description` object satisfies the constraints.
>>> q.check(Description({"author": "<NAME>", "year": 1991, "ebook_available": True}))
True
>>> q.check(Description({"author": "<NAME>", "year": 1948, "ebook_available": False}))
False
"""
def __init__(self,
constraints: List[ConstraintExpr],
model: Optional[DataModel] = None) -> None:
"""
Initialize a query.
:param constraints: a list of ``Constraint``.
:param model: the data model where the query is defined.
"""
self.constraints = constraints
self.model = model
self._check_validity()
def to_pb(self) -> query_pb2.Query.Model:
"""
Return the associated Protobuf object.
:return: a Protobuf object equivalent to the caller object.
"""
query = query_pb2.Query.Model()
constraint_expr_pbs = [ConstraintExpr._to_pb(constraint) for constraint in self.constraints]
query.constraints.extend(constraint_expr_pbs)
if self.model is not None:
query.model.CopyFrom(self.model.to_pb())
return query
@classmethod
def from_pb(cls, query: query_pb2.Query.Model):
"""
From the ``Query`` Protobuf object to the associated instance of :class:`~oef.query.Query`.
:param query: the Protobuf object that represents the :class:`~oef.query.Query` object.
:return: an instance of :class:`~oef.query.Query` equivalent to the Protobuf object provided in input.
"""
constraints = [ConstraintExpr._from_pb(c) for c in query.constraints]
return cls(constraints, DataModel.from_pb(query.model) if query.HasField("model") else None)
def check(self, description: Description) -> bool:
"""
Check if a description satisfies the constraints of the query.
The constraints are interpreted as conjunction.
:param description: the description to check.
:return: ``True`` if the description satisfies all the constraints, ``False`` otherwise.
"""
return all(c.check(description) for c in self.constraints)
def is_valid(self, data_model: DataModel) -> bool:
"""
Given a data model, check whether the query is valid for that data model.
:return: ``True`` if the query is compliant with the data model, ``False`` otherwise.
"""
if data_model is None:
return True
return all(c.is_valid(data_model) for c in self.constraints)
def _check_validity(self):
"""Check whether the :class:`~oef.query.Query` object is valid.
:return ``None``
:raises ValueError: if the query does not satisfy some sanity requirements."""
if len(self.constraints) < 1:
raise ValueError("Invalid input value for type '{}': empty list of constraints. The number of "
"constraints must be at least 1.".format(type(self).__name__))
if not self.is_valid(self.model):
raise ValueError("Invalid input value for type '{}': the query is not valid "
"for the given data model.".format(type(self).__name__))
def __eq__(self, other):
if type(other) != Query:
return False
return self.constraints == other.constraints and self.model == other.model
| [
"oef.query_pb2.Query.Relation",
"oef.query_pb2.Query.DoublePair",
"oef.query_pb2.Query.Set.Values.Ints",
"oef.query_pb2.Query.Set",
"oef.query_pb2.Query.Value",
"oef.query_pb2.Query.Set.Values.Doubles",
"oef.query_pb2.Query.ConstraintExpr.Or",
"oef.schema.DataModel.from_pb",
"oef.query_pb2.Query.Mod... | [((2671, 2703), 'oef.query_pb2.Query.ConstraintExpr', 'query_pb2.Query.ConstraintExpr', ([], {}), '()\n', (2701, 2703), True, 'import oef.query_pb2 as query_pb2\n'), ((4906, 4942), 'oef.query_pb2.Query.ConstraintExpr.And', 'query_pb2.Query.ConstraintExpr.And', ([], {}), '()\n', (4940, 4942), True, 'import oef.query_pb2 as query_pb2\n'), ((7831, 7866), 'oef.query_pb2.Query.ConstraintExpr.Or', 'query_pb2.Query.ConstraintExpr.Or', ([], {}), '()\n', (7864, 7866), True, 'import oef.query_pb2 as query_pb2\n'), ((10874, 10910), 'oef.query_pb2.Query.ConstraintExpr.Not', 'query_pb2.Query.ConstraintExpr.Not', ([], {}), '()\n', (10908, 10910), True, 'import oef.query_pb2 as query_pb2\n'), ((14999, 15025), 'oef.query_pb2.Query.Relation', 'query_pb2.Query.Relation', ([], {}), '()\n', (15023, 15025), True, 'import oef.query_pb2 as query_pb2\n'), ((15087, 15110), 'oef.query_pb2.Query.Value', 'query_pb2.Query.Value', ([], {}), '()\n', (15108, 15110), True, 'import oef.query_pb2 as query_pb2\n'), ((22381, 22404), 'oef.query_pb2.Query.Range', 'query_pb2.Query.Range', ([], {}), '()\n', (22402, 22404), True, 'import oef.query_pb2 as query_pb2\n'), ((25561, 25582), 'oef.query_pb2.Query.Set', 'query_pb2.Query.Set', ([], {}), '()\n', (25580, 25582), True, 'import oef.query_pb2 as query_pb2\n'), ((31667, 31693), 'oef.query_pb2.Query.Distance', 'query_pb2.Query.Distance', ([], {}), '()\n', (31691, 31693), True, 'import oef.query_pb2 as query_pb2\n'), ((32211, 32247), 'oef.schema.Location.from_pb', 'Location.from_pb', (['distance_pb.center'], {}), '(distance_pb.center)\n', (32227, 32247), False, 'from oef.schema import ATTRIBUTE_TYPES, AttributeSchema, DataModel, ProtobufSerializable, Description, Location\n'), ((33072, 33115), 'oef.query_pb2.Query.ConstraintExpr.Constraint', 'query_pb2.Query.ConstraintExpr.Constraint', ([], {}), '()\n', (33113, 33115), True, 'import oef.query_pb2 as query_pb2\n'), ((39530, 39553), 'oef.query_pb2.Query.Model', 'query_pb2.Query.Model', ([], {}), '()\n', (39551, 39553), True, 'import oef.query_pb2 as query_pb2\n'), ((22466, 22494), 'oef.query_pb2.Query.StringPair', 'query_pb2.Query.StringPair', ([], {}), '()\n', (22492, 22494), True, 'import oef.query_pb2 as query_pb2\n'), ((25746, 25782), 'oef.query_pb2.Query.Set.Values.Strings', 'query_pb2.Query.Set.Values.Strings', ([], {}), '()\n', (25780, 25782), True, 'import oef.query_pb2 as query_pb2\n'), ((22681, 22706), 'oef.query_pb2.Query.IntPair', 'query_pb2.Query.IntPair', ([], {}), '()\n', (22704, 22706), True, 'import oef.query_pb2 as query_pb2\n'), ((25922, 25956), 'oef.query_pb2.Query.Set.Values.Bools', 'query_pb2.Query.Set.Values.Bools', ([], {}), '()\n', (25954, 25956), True, 'import oef.query_pb2 as query_pb2\n'), ((40331, 40361), 'oef.schema.DataModel.from_pb', 'DataModel.from_pb', (['query.model'], {}), '(query.model)\n', (40348, 40361), False, 'from oef.schema import ATTRIBUTE_TYPES, AttributeSchema, DataModel, ProtobufSerializable, Description, Location\n'), ((22895, 22923), 'oef.query_pb2.Query.DoublePair', 'query_pb2.Query.DoublePair', ([], {}), '()\n', (22921, 22923), True, 'import oef.query_pb2 as query_pb2\n'), ((26095, 26128), 'oef.query_pb2.Query.Set.Values.Ints', 'query_pb2.Query.Set.Values.Ints', ([], {}), '()\n', (26126, 26128), True, 'import oef.query_pb2 as query_pb2\n'), ((23115, 23145), 'oef.query_pb2.Query.LocationPair', 'query_pb2.Query.LocationPair', ([], {}), '()\n', (23143, 23145), True, 'import oef.query_pb2 as query_pb2\n'), ((26269, 26305), 'oef.query_pb2.Query.Set.Values.Doubles', 'query_pb2.Query.Set.Values.Doubles', ([], {}), '()\n', (26303, 26305), True, 'import oef.query_pb2 as query_pb2\n'), ((26449, 26487), 'oef.query_pb2.Query.Set.Values.Locations', 'query_pb2.Query.Set.Values.Locations', ([], {}), '()\n', (26485, 26487), True, 'import oef.query_pb2 as query_pb2\n'), ((14734, 14766), 'oef.schema.Location.from_pb', 'Location.from_pb', (['relation.val.l'], {}), '(relation.val.l)\n', (14750, 14766), False, 'from oef.schema import ATTRIBUTE_TYPES, AttributeSchema, DataModel, ProtobufSerializable, Description, Location\n'), ((24054, 24088), 'oef.schema.Location.from_pb', 'Location.from_pb', (['range_pb.l.first'], {}), '(range_pb.l.first)\n', (24070, 24088), False, 'from oef.schema import ATTRIBUTE_TYPES, AttributeSchema, DataModel, ProtobufSerializable, Description, Location\n'), ((24090, 24125), 'oef.schema.Location.from_pb', 'Location.from_pb', (['range_pb.l.second'], {}), '(range_pb.l.second)\n', (24106, 24125), False, 'from oef.schema import ATTRIBUTE_TYPES, AttributeSchema, DataModel, ProtobufSerializable, Description, Location\n'), ((27576, 27597), 'oef.schema.Location.from_pb', 'Location.from_pb', (['loc'], {}), '(loc)\n', (27592, 27597), False, 'from oef.schema import ATTRIBUTE_TYPES, AttributeSchema, DataModel, ProtobufSerializable, Description, Location\n')] |
import numpy as np
from algorithm.base import Algorithm
class Greedy(Algorithm):
def __init__(self, knapsack):
assert isinstance(knapsack, dict)
self.capacity = knapsack['capacity'][0]
self.weights = knapsack['weights']
self.profits = knapsack['profits']
self.n = len(knapsack['weights'])
@property
def name(self):
return 'Greedy'
def solve(self):
value = [(x[0], x[2] / x[1]) for x in zip(np.arange(self.n),
self.weights,
self.profits)]
value = sorted(value, key=lambda x: x[1], reverse=True)
cur_weight = 0
optim_set = np.zeros(self.n, dtype=np.int64)
for v in value:
if cur_weight + self.weights[v[0]] <= self.capacity:
optim_set[v[0]] = 1
cur_weight += self.weights[v[0]]
else:
continue
return optim_set.tolist()
| [
"numpy.zeros",
"numpy.arange"
] | [((736, 768), 'numpy.zeros', 'np.zeros', (['self.n'], {'dtype': 'np.int64'}), '(self.n, dtype=np.int64)\n', (744, 768), True, 'import numpy as np\n'), ((479, 496), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (488, 496), True, 'import numpy as np\n')] |
"""
File: tools.py
"""
import random
import time
def getRandomList(n):
"""Returns a list of unique random numbers in the
range 0..n-1"""
items = list(range(n))
random.shuffle(items)
return items
def compare(titleList, functionList, sizeList,
dataSet=lambda x: x, counter=None, compareType="time"):
"""Runs a comparison test between the functions in functionList."""
print()
# Print a header indicating what value is being compared
print(compareType.title().center(25 + (12 * (len(titleList) - 1)) + 1, "-") + "\n")
# Print the header for the table of runtimes
headerString = "{:>25s}" + "{:>12s}" * (len(titleList) - 1) + "\n"
print(headerString.format(*titleList))
# Testing set
for size in sizeList:
# Print the lefthand label of the table
print(" Size: {:>5d} ".format(size), end="", flush=True)
# Test each function
for function in functionList:
# Create the data set
data = dataSet(size)
# When did we start the test
startTime = time.time()
# Detect a counter variable
if counter:
# Reset the counter
for key in counter.keys():
counter[key] = 0
function(data, counter)
else:
function(data)
# When did we end the test
endTime = time.time()
# Display in nice formatting the compare type
if compareType == "time":
value = endTime - startTime
print("{:>12.4f}".format(value), end="", flush=True)
elif compareType in counter.keys():
value = counter[compareType]
print("{:>12d}".format(value), end="", flush=True)
else:
print("ERROR: Unknown compare type " + compareType)
return
print()
print()
def show(n, function, dataSet=lambda x: x):
"""Shows the data returned by function."""
print()
data = dataSet(n)
print(function(data))
print()
| [
"time.time",
"random.shuffle"
] | [((180, 201), 'random.shuffle', 'random.shuffle', (['items'], {}), '(items)\n', (194, 201), False, 'import random\n'), ((1135, 1146), 'time.time', 'time.time', ([], {}), '()\n', (1144, 1146), False, 'import time\n'), ((1502, 1513), 'time.time', 'time.time', ([], {}), '()\n', (1511, 1513), False, 'import time\n')] |
#!/usr/bin/env python
#
# This library is for Grove - Button(https://www.seeedstudio.com/s/Grove-Button-p-766.html)
#
# This is the library for Grove Base Hat which used to connect grove sensors for raspberry pi.
#
'''
## License
The MIT License (MIT)
Grove Base Hat for the Raspberry Pi, used to connect grove sensors.
Copyright (C) 2018 Seeed Technology Co.,Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
from grove.button import Button
from grove.factory import Factory
class GroveButton(object):
def __init__(self, pin):
# High = pressed
self.__btn = Factory.getButton("GPIO-HIGH", pin)
self.__last_time = time.time()
self.__on_press = None
self.__on_release = None
self.__btn.on_event(self, GroveButton.__handle_event)
@property
def on_press(self):
return self.__on_press
@on_press.setter
def on_press(self, callback):
if not callable(callback):
return
self.__on_press = callback
@property
def on_release(self):
return self.__on_release
@on_release.setter
def on_release(self, callback):
if not callable(callback):
return
self.__on_release = callback
def __handle_event(self, evt):
dt, self.__last_time = evt["time"] - self.__last_time, evt["time"]
# print("event index:{} event:{} pressed:{}".format(evt["index"], evt["code"], evt["pressed"]))
if evt["code"] == Button.EV_LEVEL_CHANGED:
if evt["pressed"]:
if callable(self.__on_press):
self.__on_press(dt)
else:
if callable(self.__on_release):
self.__on_release(dt)
Grove = GroveButton
def main():
from grove.helper import SlotHelper
sh = SlotHelper(SlotHelper.GPIO)
pin = sh.argv2pin()
button = GroveButton(pin)
def on_press(t):
print('Button is pressed')
def on_release(t):
print("Button is released, pressed for {0} seconds".format(round(t,6)))
button.on_press = on_press
button.on_release = on_release
while True:
time.sleep(1)
if __name__ == '__main__':
main()
| [
"grove.factory.Factory.getButton",
"time.sleep",
"time.time",
"grove.helper.SlotHelper"
] | [((2797, 2824), 'grove.helper.SlotHelper', 'SlotHelper', (['SlotHelper.GPIO'], {}), '(SlotHelper.GPIO)\n', (2807, 2824), False, 'from grove.helper import SlotHelper\n'), ((1581, 1616), 'grove.factory.Factory.getButton', 'Factory.getButton', (['"""GPIO-HIGH"""', 'pin'], {}), "('GPIO-HIGH', pin)\n", (1598, 1616), False, 'from grove.factory import Factory\n'), ((1644, 1655), 'time.time', 'time.time', ([], {}), '()\n', (1653, 1655), False, 'import time\n'), ((3132, 3145), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3142, 3145), False, 'import time\n')] |
"""Load the Tatoeba dataset."""
import sys
import os
import csv
import subprocess
import time
from multiprocessing import Pool, Lock, cpu_count
from tqdm import tqdm
from scipy.io import wavfile
from python.params import MIN_EXAMPLE_LENGTH, MAX_EXAMPLE_LENGTH
from python.dataset.config import CACHE_DIR, CORPUS_DIR
from python.util.storage import delete_file_if_exists
from python.dataset import download
from python.dataset.txt_files import generate_txt
# Path to the Taboeba dataset.
__URL = 'https://downloads.tatoeba.org/audio/tatoeba_audio_eng.zip'
__MD5 = 'd76252fd704734fc3d8bf5b44e029809'
__NAME = 'tatoeba'
__FOLDER_NAME = 'tatoeba_audio_eng'
__SOURCE_PATH = os.path.join(CACHE_DIR, __FOLDER_NAME)
__TARGET_PATH = os.path.realpath(os.path.join(CORPUS_DIR, __FOLDER_NAME))
def tatoeba_loader(keep_archive):
"""Download, extract and build the output strings that can be written to the desired TXT files.
Args:
keep_archive (bool): Keep or delete the downloaded archive afterwards.
Returns:
str: String containing the output string that can be written to TXT files.
"""
# Download and extract the dataset if necessary.
download.maybe_download(__URL, md5=__MD5, cache_archive=keep_archive)
if not os.path.isdir(__SOURCE_PATH):
raise ValueError('"{}" is not a directory.'.format(__SOURCE_PATH))
# Download user ratings CSV file.
csv_path = os.path.join(__SOURCE_PATH, 'users_sentences.csv')
download.download_with_progress('http://downloads.tatoeba.org/exports/users_sentences.csv',
csv_path)
assert os.path.exists(csv_path)
target = 'train'
# Generate the WAV and a string for the `<target>.txt` file.
output = __tatoeba_loader(target)
# Generate the `<target>.txt` file.
txt_path = generate_txt(__NAME, target, output)
# Cleanup extracted folder.
download.cleanup_cache(__FOLDER_NAME)
return txt_path
def __tatoeba_loader(target):
"""Build the output string that can be written to the desired TXT file.
Args:
target (str): Only 'train' is supported for the Tatoeba dataset.
Returns:
str: List containing the output string that can be written to TXT file.
"""
if not os.path.isdir(__SOURCE_PATH):
raise ValueError('"{}" is not a directory.'.format(__SOURCE_PATH))
if target != 'train':
raise ValueError('Invalid target. Tatoeba only has a train dataset.')
validated_samples = set() # Set of all sample IDs that have been validated.
# Parse dataset meta data information to filter out low ranked samples.
with open(os.path.join(__SOURCE_PATH, 'users_sentences.csv'), 'r') as csv_handle:
csv_reader = csv.reader(csv_handle, delimiter='\t')
csv_lines = list(csv_reader)
# print('csv_header: username\tsentence_id\trating\tdate_added\tdate_modified')
for username, _id, rating, _, _ in csv_lines:
rating = int(rating)
if rating >= 1:
path = os.path.join(__SOURCE_PATH, 'audio', username, _id)
validated_samples.add(path)
samples = [] # List of dictionaries of all files and labels and in the dataset.
# Parse dataset meta data information to filter out low ranked samples.
with open(os.path.join(__SOURCE_PATH, 'sentences_with_audio.csv'), 'r') as csv_handle:
csv_reader = csv.reader(csv_handle, delimiter='\t')
csv_lines = list(csv_reader)
csv_lines = csv_lines[1:] # Remove CSV header.
# print('csv_header: sentence_id\tusername\ttext')
for _id, username, text in tqdm(csv_lines,
desc='Loading Tatoeba CSV', total=len(csv_lines),
file=sys.stdout, unit='entries', dynamic_ncols=True):
path = os.path.join(__SOURCE_PATH, 'audio', username, _id)
if path in validated_samples:
samples.append({'path': path, 'text': text})
# Create target folder structure.
for sample in samples:
dir_path = os.path.join(__TARGET_PATH, os.path.relpath(sample['path'], __SOURCE_PATH))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
lock = Lock()
buffer = []
missing_mp3_counter = 0
with Pool(processes=cpu_count()) as pool:
for result in tqdm(pool.imap_unordered(__tatoeba_loader_helper, samples, chunksize=1),
desc='Converting Tatoeba MP3 to WAV', total=len(samples),
file=sys.stdout, unit='files', dynamic_ncols=True):
lock.acquire()
if result is None:
missing_mp3_counter += 1
else:
buffer.append(result)
lock.release()
print('WARN: {} MP3 files listed in the CSV could not be found.'
.format(missing_mp3_counter))
return buffer
def __tatoeba_loader_helper(sample):
path = sample['path']
text = sample['text']
mp3_path = '{}.mp3'.format(path)
wav_path = '{}.wav'.format(path)
wav_path = os.path.join(__TARGET_PATH, os.path.relpath(wav_path, __SOURCE_PATH))
# Check if audio file MP3 exists.
if not os.path.isfile(mp3_path):
# print('WARN: Audio file missing: {}'.format(mp3_path))
return None
# Check if file isn't empty.
try:
if os.path.getsize(mp3_path) <= 4048:
return None
except OSError:
return None
delete_file_if_exists(wav_path)
# Convert MP3 file into WAV file, reduce volume to 0.95, downsample to 16kHz mono sound.
ret = subprocess.call(['sox', '-v', '0.95', mp3_path, '-r', '16k', wav_path, 'remix', '1'])
if not os.path.isfile(wav_path):
raise RuntimeError('Failed to create WAV file with error code={}: {}'.format(ret, wav_path))
# Validate that the example length is within boundaries.
for i in range(5):
try:
(sr, y) = wavfile.read(wav_path)
length_sec = len(y) / sr
if not MIN_EXAMPLE_LENGTH <= length_sec <= MAX_EXAMPLE_LENGTH:
return None
break
except ValueError:
print('WARN: Could not load ({}/5) wavfile: {}'.format(i, wav_path))
if i == 4:
raise
time.sleep(1)
# TODO: Copy used files to corpus dir
wav_path = os.path.relpath(wav_path, CORPUS_DIR)
return '{} {}\n'.format(wav_path, text.strip())
# Test download script.
if __name__ == '__main__':
print('Tatoeba txt_paths: ', tatoeba_loader(True))
print('\nDone.')
| [
"os.path.exists",
"os.path.getsize",
"python.dataset.txt_files.generate_txt",
"os.makedirs",
"multiprocessing.Lock",
"python.dataset.download.download_with_progress",
"python.util.storage.delete_file_if_exists",
"os.path.join",
"multiprocessing.cpu_count",
"time.sleep",
"os.path.isfile",
"os.p... | [((673, 711), 'os.path.join', 'os.path.join', (['CACHE_DIR', '__FOLDER_NAME'], {}), '(CACHE_DIR, __FOLDER_NAME)\n', (685, 711), False, 'import os\n'), ((745, 784), 'os.path.join', 'os.path.join', (['CORPUS_DIR', '__FOLDER_NAME'], {}), '(CORPUS_DIR, __FOLDER_NAME)\n', (757, 784), False, 'import os\n'), ((1175, 1244), 'python.dataset.download.maybe_download', 'download.maybe_download', (['__URL'], {'md5': '__MD5', 'cache_archive': 'keep_archive'}), '(__URL, md5=__MD5, cache_archive=keep_archive)\n', (1198, 1244), False, 'from python.dataset import download\n'), ((1415, 1465), 'os.path.join', 'os.path.join', (['__SOURCE_PATH', '"""users_sentences.csv"""'], {}), "(__SOURCE_PATH, 'users_sentences.csv')\n", (1427, 1465), False, 'import os\n'), ((1470, 1576), 'python.dataset.download.download_with_progress', 'download.download_with_progress', (['"""http://downloads.tatoeba.org/exports/users_sentences.csv"""', 'csv_path'], {}), "(\n 'http://downloads.tatoeba.org/exports/users_sentences.csv', csv_path)\n", (1501, 1576), False, 'from python.dataset import download\n'), ((1619, 1643), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (1633, 1643), False, 'import os\n'), ((1824, 1860), 'python.dataset.txt_files.generate_txt', 'generate_txt', (['__NAME', 'target', 'output'], {}), '(__NAME, target, output)\n', (1836, 1860), False, 'from python.dataset.txt_files import generate_txt\n'), ((1898, 1935), 'python.dataset.download.cleanup_cache', 'download.cleanup_cache', (['__FOLDER_NAME'], {}), '(__FOLDER_NAME)\n', (1920, 1935), False, 'from python.dataset import download\n'), ((4265, 4271), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (4269, 4271), False, 'from multiprocessing import Pool, Lock, cpu_count\n'), ((5501, 5532), 'python.util.storage.delete_file_if_exists', 'delete_file_if_exists', (['wav_path'], {}), '(wav_path)\n', (5522, 5532), False, 'from python.util.storage import delete_file_if_exists\n'), ((5637, 5726), 'subprocess.call', 'subprocess.call', (["['sox', '-v', '0.95', mp3_path, '-r', '16k', wav_path, 'remix', '1']"], {}), "(['sox', '-v', '0.95', mp3_path, '-r', '16k', wav_path,\n 'remix', '1'])\n", (5652, 5726), False, 'import subprocess\n'), ((6399, 6436), 'os.path.relpath', 'os.path.relpath', (['wav_path', 'CORPUS_DIR'], {}), '(wav_path, CORPUS_DIR)\n', (6414, 6436), False, 'import os\n'), ((1256, 1284), 'os.path.isdir', 'os.path.isdir', (['__SOURCE_PATH'], {}), '(__SOURCE_PATH)\n', (1269, 1284), False, 'import os\n'), ((2262, 2290), 'os.path.isdir', 'os.path.isdir', (['__SOURCE_PATH'], {}), '(__SOURCE_PATH)\n', (2275, 2290), False, 'import os\n'), ((2740, 2778), 'csv.reader', 'csv.reader', (['csv_handle'], {'delimiter': '"""\t"""'}), "(csv_handle, delimiter='\\t')\n", (2750, 2778), False, 'import csv\n'), ((3416, 3454), 'csv.reader', 'csv.reader', (['csv_handle'], {'delimiter': '"""\t"""'}), "(csv_handle, delimiter='\\t')\n", (3426, 3454), False, 'import csv\n'), ((5140, 5180), 'os.path.relpath', 'os.path.relpath', (['wav_path', '__SOURCE_PATH'], {}), '(wav_path, __SOURCE_PATH)\n', (5155, 5180), False, 'import os\n'), ((5232, 5256), 'os.path.isfile', 'os.path.isfile', (['mp3_path'], {}), '(mp3_path)\n', (5246, 5256), False, 'import os\n'), ((5734, 5758), 'os.path.isfile', 'os.path.isfile', (['wav_path'], {}), '(wav_path)\n', (5748, 5758), False, 'import os\n'), ((2647, 2697), 'os.path.join', 'os.path.join', (['__SOURCE_PATH', '"""users_sentences.csv"""'], {}), "(__SOURCE_PATH, 'users_sentences.csv')\n", (2659, 2697), False, 'import os\n'), ((3318, 3373), 'os.path.join', 'os.path.join', (['__SOURCE_PATH', '"""sentences_with_audio.csv"""'], {}), "(__SOURCE_PATH, 'sentences_with_audio.csv')\n", (3330, 3373), False, 'import os\n'), ((3862, 3913), 'os.path.join', 'os.path.join', (['__SOURCE_PATH', '"""audio"""', 'username', '_id'], {}), "(__SOURCE_PATH, 'audio', username, _id)\n", (3874, 3913), False, 'import os\n'), ((4130, 4176), 'os.path.relpath', 'os.path.relpath', (["sample['path']", '__SOURCE_PATH'], {}), "(sample['path'], __SOURCE_PATH)\n", (4145, 4176), False, 'import os\n'), ((4193, 4217), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (4207, 4217), False, 'import os\n'), ((4231, 4252), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (4242, 4252), False, 'import os\n'), ((5397, 5422), 'os.path.getsize', 'os.path.getsize', (['mp3_path'], {}), '(mp3_path)\n', (5412, 5422), False, 'import os\n'), ((5981, 6003), 'scipy.io.wavfile.read', 'wavfile.read', (['wav_path'], {}), '(wav_path)\n', (5993, 6003), False, 'from scipy.io import wavfile\n'), ((3043, 3094), 'os.path.join', 'os.path.join', (['__SOURCE_PATH', '"""audio"""', 'username', '_id'], {}), "(__SOURCE_PATH, 'audio', username, _id)\n", (3055, 3094), False, 'import os\n'), ((4340, 4351), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4349, 4351), False, 'from multiprocessing import Pool, Lock, cpu_count\n'), ((6327, 6340), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6337, 6340), False, 'import time\n')] |
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from .mask_point import MaskPoint
from ..base_object import BaseObject
class MaskPosition(BaseObject):
"""
Position on a photo where a mask is placed
:param point: Part of the face, relative to which the mask is placed
:type point: :class:`MaskPoint`
:param x_shift: Shift by X-axis measured in widths of the mask scaled to the face size, from left to right. (For example, -1.0 will place the mask just to the left of the default mask position)
:type x_shift: :class:`float`
:param y_shift: Shift by Y-axis measured in heights of the mask scaled to the face size, from top to bottom. (For example, 1.0 will place the mask just below the default mask position)
:type y_shift: :class:`float`
:param scale: Mask scaling coefficient. (For example, 2.0 means a doubled size)
:type scale: :class:`float`
"""
ID: str = Field("maskPosition", alias="@type")
point: MaskPoint
x_shift: float
y_shift: float
scale: float
@staticmethod
def read(q: dict) -> MaskPosition:
return MaskPosition.construct(**q)
| [
"pydantic.Field"
] | [((1372, 1408), 'pydantic.Field', 'Field', (['"""maskPosition"""'], {'alias': '"""@type"""'}), "('maskPosition', alias='@type')\n", (1377, 1408), False, 'from pydantic import Field\n')] |
from flask import Flask, request, json
from TextSummarizer import Preprocess
app = Flask(__name__)
app.debug = True
@app.route('/summarize', methods=['POST'])
def summrize():
text = request.json['text']
serve = Preprocess()
log, slead, srefresh, sgold = serve.run_textmode(text)
return json.dumps({'status': 'OK', 'log': log, "lead" : slead, "refresh" : srefresh, "sgold" : sgold})
if __name__ == '__main__':
app.run(host="0.0.0.0", use_reloader=False) | [
"TextSummarizer.Preprocess",
"flask.json.dumps",
"flask.Flask"
] | [((85, 100), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (90, 100), False, 'from flask import Flask, request, json\n'), ((222, 234), 'TextSummarizer.Preprocess', 'Preprocess', ([], {}), '()\n', (232, 234), False, 'from TextSummarizer import Preprocess\n'), ((305, 401), 'flask.json.dumps', 'json.dumps', (["{'status': 'OK', 'log': log, 'lead': slead, 'refresh': srefresh, 'sgold': sgold\n }"], {}), "({'status': 'OK', 'log': log, 'lead': slead, 'refresh': srefresh,\n 'sgold': sgold})\n", (315, 401), False, 'from flask import Flask, request, json\n')] |
'''
This is a ready to use package consisting of several supervised machine learning
algorithms with a predefined parameter-grid (which then you can update its values)
to fine tune these models on your data. You can calculate some features from your
peptides data set using our feature extraction tool to feed these machines.
Authors: <NAME>; <NAME>;
'''
import pandas as pd
import numpy as np
import sys, getopt, os
from sklearn.feature_selection import SelectKBest, chi2, RFE, SelectFromModel, f_classif
from sklearn.svm import SVC
from xgboost.sklearn import XGBClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_validate
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error
from imblearn.metrics import specificity_score
from sklearn.metrics import matthews_corrcoef
from sklearn.utils import parallel_backend
from imblearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
import tools
import hparams
def print_performance_results(results, model, model_name):
print("* * * * * \n\nBest parameters: \n\n {}\n\n* * * * * ".format(model.named_steps[model_name].best_params_))
print("\n - - Train performance results - -\n")
print('f1_score, mean: {:0.3}'.format(results['test_f1_score'].mean()))
print('f1_score, std: {:0.3}'.format(results['test_f1_score'].std()))
print('roc_auc_score, mean: {:0.3}'.format(results['test_roc_auc_score'].mean()))
print('roc_auc_score, std: {:0.3}'.format(results['test_roc_auc_score'].std()))
print('accuracy, mean: {:0.3}'.format(results['test_accuracy'].mean()))
print('accuracy, std: {:0.3}'.format(results['test_accuracy'].std()))
print('precision, mean: {:0.3}'.format(results['test_precision'].mean()))
print('precision, std: {:0.3}'.format(results['test_precision'].std()))
print('recall, mean: {:0.3}'.format(results['test_recall'].mean()))
print('recall, std: {:0.3}'.format(results['test_recall'].std()))
print('specificity, mean: {:0.3}'.format(results['test_specificity'].mean()))
print('specificity, std: {:0.3}'.format(results['test_specificity'].std()))
print('mcc, mean: {:0.3}'.format(results['test_mcc'].mean()))
print('mcc, std: {:0.3}'.format(results['test_mcc'].std()))
print(" - - - - - - - - - - - - - - - - - - - -")
def write_performance_results_to_file(results, model, model_name, output_file_path="scores.csv"):
with open(output_file_path, "w") as ff:
ff.write("\nmethod,value\n")
ff.write('f1_score_mean,{:0.3}\n'.format(results['test_f1_score'].mean()))
ff.write('f1_score_std,{:0.3}\n'.format(results['test_f1_score'].std()))
ff.write('roc_auc_score_mean,{:0.3}\n'.format(results['test_roc_auc_score'].mean()))
ff.write('roc_auc_score_std,{:0.3}\n'.format(results['test_roc_auc_score'].std()))
ff.write('accuracy_mean,{:0.3}\n'.format(results['test_accuracy'].mean()))
ff.write('accuracy_std,{:0.3}\n'.format(results['test_accuracy'].std()))
ff.write('precision_mean,{:0.3}\n'.format(results['test_precision'].mean()))
ff.write('precision_std,{:0.3}\n'.format(results['test_precision'].std()))
ff.write('recall_mean,{:0.3}\n'.format(results['test_recall'].mean()))
ff.write('recall_std,{:0.3}\n'.format(results['test_recall'].std()))
ff.write('specificity_mean,{:0.3}\n'.format(results['test_specificity'].mean()))
ff.write('specificity_std,{:0.3}\n'.format(results['test_specificity'].std()))
ff.write('mcc_mean,{:0.3}\n'.format(results['test_mcc'].mean()))
ff.write('mcc_std,{:0.3}\n'.format(results['test_mcc'].std()))
with open(output_file_path.replace(".csv", "-BestParams.txt"), "w") as ff:
import json
ff.write(json.dumps(model.named_steps[model_name].best_params_, indent=3))
def print_performance_results_for_test_data(model, X_test, y_test, columns_to_drop = []):
predictions = model.predict(X_test)
print("\n - - Test performance results - -\n")
print("accuracy_score: {:0.3}".format(accuracy_score(y_test, predictions)))
print("precision_score: {:0.3}".format(precision_score(y_test, predictions)))
print("recall_score: {:0.3}".format(recall_score(y_test, predictions)))
print("f1_score: {:0.3}".format(f1_score(y_test, predictions)))
print("roc_auc_score: {:0.3}".format(roc_auc_score(y_test, predictions)))
print("specificity_score: {:0.3}".format(specificity_score(y_test, predictions)))
print("matthews_corrcoef: {:0.3}".format(matthews_corrcoef(y_test, predictions, sample_weight=None)))
def write_performance_results_to_file_for_test_data(model, X_test, y_test, columns_to_drop = [], output_file_path="TestDataScores.csv"):
predictions = model.predict(X_test)
with open(output_file_path, "w") as ff:
ff.write("method,value\n")
ff.write("accuracy_score,{:0.3}\n".format(accuracy_score(y_test, predictions)))
ff.write("precision_score,{:0.3}\n".format(precision_score(y_test, predictions)))
ff.write("recall_score,{:0.3}\n".format(recall_score(y_test, predictions)))
ff.write("f1_score,{:0.3}\n".format(f1_score(y_test, predictions)))
ff.write("roc_auc_score,{:0.3}\n".format(roc_auc_score(y_test, predictions)))
ff.write("specificity_score,{:0.3}\n".format(specificity_score(y_test, predictions)))
ff.write("matthews_corrcoef,{:0.3}\n".format(matthews_corrcoef(y_test, predictions, sample_weight=None)))
#######################################################################
## Creating models
#######################################################################
scoring = {'accuracy': make_scorer(accuracy_score),
'precision': make_scorer(precision_score),
'recall': make_scorer(recall_score),
'f1_score': make_scorer(f1_score),
'roc_auc_score': make_scorer(roc_auc_score),
'mcc': make_scorer(matthews_corrcoef),
'specificity': make_scorer(specificity_score)}
###############
### GaussianNB
###############
def run_GNB(X_train,
X_test,
y_train,
y_test,
input_file_path,
output_root_dir,
colsdel,
n_splits = 10,
n_repeats = 10,
n_jobs = 8,
postfix = "",
save_model = 0,
use_stan_scaler = 0):
print("\n Working on the GaussianNB model . . .")
estimators = []
if use_stan_scaler > 0:
estimators.append(('std', StandardScaler()))
estimators.append(('GNB', GridSearchCV(GaussianNB(),
hparams.GNB_parameter_space,
cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats= n_repeats),
n_jobs= n_jobs, refit= 'roc_auc_score', scoring = scoring)))
model = Pipeline(estimators)
with parallel_backend('threading', n_jobs = n_jobs):
results = cross_validate(model, X_train, y_train, n_jobs = n_jobs, cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats= n_repeats), scoring = scoring, return_estimator=True)
with parallel_backend('threading', n_jobs = n_jobs):
model.fit(X_train,y_train)
if save_model > 0:
tools.save_model_joblib(model, "", "models/GNB-{}.joblib".format(postfix))
# You can load models with load_model_joblib method
model_name = "GNB"
print_performance_results(results, model, model_name)
write_performance_results_to_file(results, model, model_name, "{}GNB-{}-trainData.csv".format(output_root_dir, postfix))
print_performance_results_for_test_data(model, X_test, y_test, colsdel,)
write_performance_results_to_file_for_test_data(model, X_test, y_test, colsdel, "{}GNB-{}-testData.csv".format(output_root_dir, postfix))
###############
### KNeighborsClassifier
###############
def run_KNN(X_train,
X_test,
y_train,
y_test,
input_file_path,
output_root_dir,
colsdel,
n_splits = 10,
n_repeats = 10,
n_jobs = 8,
postfix = "",
save_model = 0,
use_stan_scaler = 0):
print("\n Working on the KNeighborsClassifier model . . .")
estimators = []
if use_stan_scaler > 0:
estimators.append(('std', StandardScaler()))
estimators.append(('KNN', GridSearchCV(KNeighborsClassifier(),
hparams.KNN_parameter_space,
cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats=n_repeats),
n_jobs=n_jobs, refit= 'roc_auc_score', scoring = scoring )))
model = Pipeline(estimators)
with parallel_backend('threading', n_jobs = n_jobs):
results = cross_validate(model, X_train, y_train, n_jobs = n_jobs, cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats= n_repeats), scoring = scoring, return_estimator=True)
with parallel_backend('threading', n_jobs = n_jobs):
model.fit(X_train,y_train)
if save_model > 0:
tools.save_model_joblib(model, "", "models/KNN-{}.joblib".format(postfix))
# You can load models with load_model_joblib method
model_name = "KNN"
print_performance_results(results, model, model_name)
write_performance_results_to_file(results, model, model_name, "{}KNN-{}-trainData.csv".format(output_root_dir, postfix))
print_performance_results_for_test_data(model, X_test, y_test, colsdel,)
write_performance_results_to_file_for_test_data(model, X_test, y_test, colsdel, "{}KNN-{}-testData.csv".format(output_root_dir, postfix))
###################
#### SVM
###################
def run_SVM(X_train,
X_test,
y_train,
y_test,
input_file_path,
output_root_dir,
colsdel,
n_splits = 10,
n_repeats = 10,
n_jobs = 8,
postfix = "",
save_model = 0,
use_stan_scaler = 0):
print("\n Working on the SVM model . . .")
estimators = []
if use_stan_scaler > 0:
estimators.append(('std', StandardScaler()))
estimators.append(('SVM', GridSearchCV(SVC(probability=True),
hparams.SVM_parameter_space,
cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats=n_repeats),
n_jobs= n_jobs, refit= 'roc_auc_score', scoring = scoring )))
model = Pipeline(estimators)
with parallel_backend('threading', n_jobs = n_jobs):
results = cross_validate(model, X_train, y_train, n_jobs = n_jobs, cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats=n_repeats), scoring = scoring)
with parallel_backend('threading', n_jobs = n_jobs):
model.fit(X_train,y_train)
if save_model > 0:
tools.save_model_joblib(model, "", "models/SVM-{}.joblib".format(postfix))
# You can load models with load_model_joblib method
model_name = "SVM"
print_performance_results(results, model, model_name)
write_performance_results_to_file(results, model, model_name, "{}SVM-{}-trainData.csv".format(output_root_dir, postfix))
print_performance_results_for_test_data(model,X_test, y_test, colsdel,)
write_performance_results_to_file_for_test_data(model,X_test, y_test, colsdel, "{}SVM-{}-testData.csv".format(output_root_dir, postfix))
##################
### RF
##################
def run_RF(X_train,
X_test,
y_train,
y_test,
input_file_path,
output_root_dir,
colsdel,
n_splits = 10,
n_repeats = 10,
n_jobs = 8,
postfix = "",
save_model = 0,
use_stan_scaler = 0):
print("\n Working on the RF model . . .")
estimators = []
if use_stan_scaler > 0:
estimators.append(('std', StandardScaler()))
estimators.append(('RandomForest', GridSearchCV(RandomForestClassifier(),
hparams.RF_parameter_space,
cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats=n_repeats),
n_jobs= n_jobs, refit= 'roc_auc_score', scoring = scoring )))
model = Pipeline(estimators)
with parallel_backend('threading', n_jobs = n_jobs):
results = cross_validate(model, X_train, y_train, n_jobs = n_jobs, cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats=n_repeats), scoring = scoring, return_estimator=True)
with parallel_backend('threading', n_jobs = n_jobs):
model.fit(X_train,y_train)
if save_model > 0:
tools.save_model_joblib(model, "", "models/RF-{}.joblib".format(postfix))
# You can load models with load_model_joblib method
model_name = "RandomForest"
print_performance_results(results, model, model_name)
write_performance_results_to_file(results, model, model_name, "{}RF-{}-trainData.csv".format(output_root_dir, postfix))
print_performance_results_for_test_data(model, X_test, y_test, colsdel,)
write_performance_results_to_file_for_test_data(model, X_test, y_test, colsdel, "{}RF-{}-testData.csv".format(output_root_dir, postfix))
###################
#### XGBoost
###################
def run_XGB(X_train,
X_test,
y_train,
y_test,
input_file_path,
output_root_dir,
colsdel,
n_splits = 10,
n_repeats = 10,
n_jobs = 8,
postfix = "",
save_model = 0,
use_stan_scaler = 0):
print("\n Working on the XGBoost model . . .")
estimators = []
if use_stan_scaler > 0:
estimators.append(('std', StandardScaler()))
estimators.append(('XGBClassifier', GridSearchCV(XGBClassifier(use_label_encoder=False),
hparams.XGB_parameter_space,
cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats=n_repeats),
n_jobs=n_jobs, refit= 'roc_auc_score', scoring = scoring)))
model = Pipeline(estimators)
with parallel_backend('threading', n_jobs = n_jobs):
results = cross_validate(model, X_train, y_train, n_jobs = n_jobs, cv = RepeatedStratifiedKFold(n_splits=n_splits , n_repeats=n_repeats), scoring = scoring)
with parallel_backend('threading', n_jobs = n_jobs):
model.fit(X_train,y_train)
if save_model > 0:
tools.save_model_joblib(model, "", "models/XGBoost-{}.joblib".format(postfix))
# You can load models with load_model_joblib method
model_name = "XGBClassifier"
print_performance_results(results, model, model_name)
write_performance_results_to_file(results, model, model_name, "{}XGBoost-{}-trainData.csv".format(output_root_dir, postfix))
print_performance_results_for_test_data(model, X_test, y_test, colsdel,)
write_performance_results_to_file_for_test_data(model, X_test, y_test, colsdel, "{}XGBoost-{}-testData.csv".format(output_root_dir, postfix))
#######################################################################
#######################################################################
#######################################################################
if __name__ == "__main__":
usage ='''
USAGE:
python main.py -i input_file_path
-l label_col_name
-o output_root_dir
[-c col_name_to_delete]
[-s cv_splits]
[-r n_cv_repeats]
[-j n_jobs]
[-p output_name_postfix]
[-m model_names]
[-s 0 or 1]
-h (--help) Shows these instructions :)
-i (--input) The input file path
-l (--labelcol) The labels' column name
-o (--out) (default="") The output root directory
-c (--colsdel) (default="") Column names you want to be removed from the
training data. e.g. sample_name1,sample_name2
-k (--kfold) (default=10) The value of k for k-fold cross validation.
i.e. The number of folds
-r (--repeats) (default=10) The number of iterations for the cross validation stage
-j (--jobs) (default=8) Number of threads for running modles
-p (--postfix) (default="") The postfix for output file names
-m (--models) (defaults=rf,svm,xgb) Models that you want to build and test.
divide names with ','. e.g. rf,svm,xgb,knn,gnb
Available machines:
RF = Random Forest
SVM = Support Vector Machine
XGB = eXtreme Gradient Boosting
KNN = K-Nearest Neighbors
GNB = Gussian Naive Bayes
-s (--save) (default=0) Set it to 1 if you want to save your model as a
joblib file.
-t (--testsplit) (default=0.25) The test split size
-x (--stanscale) (default=0) Set it to 1 if you want to standardize features
'''
input_file_path = ""
labelcol = ""
output_root_dir = ""
n_splits = 10
n_repeats = 10
n_jobs = 8
postfix = ""
models = ["rf", "svm", "xgb"]
colsdel = []
save_model = 0
test_split_size = 0.25
use_stan_scaler = 0
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:k:r:j:p:o:m:l:c:s:t:x", ["input=", "kfold=", "repeats=", "jobs=", "postfix=", "out=", "models=", "labelcol=", "colsdel=", "save=", "testsplit=", "stanscale="])
except getopt.GetoptError:
print(usage)
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
print(usage)
sys.exit()
if opt in ("-i", "--input"):
try:
if len(arg) > 0 and os.path.isfile(arg):
input_file_path = arg.replace("\\", "/").replace("\"", "").replace("'", "")
else:
raise ValueError("Input file path not found: \n >>> {}\n".format(arg))
except Exception as e:
print("ERROR: {} \n {}".format(e, usage))
sys.exit()
if opt in ("-l", "--labelcol"):
try:
if len(arg) > 0:
labelcol = arg
else:
raise ValueError("labels' column name is empty\n")
except Exception as e:
print("ERROR: {} \n {}".format(e, usage))
sys.exit()
if opt in ("-o", "--out"):
try:
if len(arg) > 0:
output_root_dir = arg.replace("\\", "/").replace("\"", "").replace("'", "")
if output_root_dir[-1] != "/":
output_root_dir += "/"
else:
raise ValueError("Please enter the output root directory")
except Exception as e:
print("ERROR: {} \n {}".format(e, usage))
sys.exit()
if opt in ("-c", "--colsdel"):
try:
colsdel.extend(arg.split(","))
except Exception as e:
print(usage)
sys.exit()
if opt in ("-k", "--kfold"):
try:
n_splits = int(arg)
except Exception as e:
print("ERROR: input parameters for 'splits' should be INTEGER \n {}".format(usage))
sys.exit()
if opt in ("-r", "--repeats"):
try:
n_repeats = int(arg)
except Exception as e:
print("ERROR: input parameters for 'repeats' should be INTEGER \n {}".format(usage))
sys.exit()
if opt in ("-j", "--jobs"):
try:
n_jobs = int(arg)
except Exception as e:
print("ERROR: input parameters for 'jobs' should be INTEGER \n {}".format(usage))
sys.exit()
if opt in ("-p", "--postfix"):
try:
postfix = (arg)
except Exception as e:
print(usage)
sys.exit()
if opt in ("-m", "--models"):
try:
if len(arg) > 0:
models = []
models.extend(arg.lower().split(","))
except Exception as e:
print(usage)
sys.exit()
if opt in ("-s", "--save"):
try:
if int(arg) > 0:
save_model = 1
else:
save_model = 0
except Exception as e:
print("ERROR: input parameters for 'save' should be INTEGER \n {}".format(usage))
sys.exit()
if opt in ("-t", "--testsplit"):
try:
if float(arg) >=0 and float(arg) <= 1:
test_split_size = float(arg)
else:
raise ValueError("")
except Exception as e:
print("ERROR: input parameters for 'testsplit' should be a number between 0 and 1 \n {}".format(usage))
sys.exit()
if opt in ("-x", "--stanscale"):
try:
if int(arg) > 0:
use_stan_scaler = 1
else:
use_stan_scaler = 0
except Exception as e:
print("ERROR: input parameters for 'stanscale' should be INTEGER \n {}".format(usage))
sys.exit()
# Check for mandatory inputs
if len(input_file_path) == 0 or len(labelcol) == 0 or len(output_root_dir) == 0:
print(usage)
sys.exit()
colsdel.append(labelcol)
input_data = pd.read_csv(input_file_path)
X = input_data.drop(colsdel, axis=1)
y = input_data[labelcol]
X_train , X_test , y_train , y_test = train_test_split (X, y, test_size=test_split_size, random_state = 42)
print("Train shape: {} | Test shape: {}".format(y_train.shape, X_train.shape))
for model in models:
if model == "xgb":
run_XGB(X_train, X_test, y_train, y_test,
input_file_path, output_root_dir, colsdel,
n_splits, n_repeats, n_jobs, postfix,
save_model, use_stan_scaler)
if model == "rf":
run_RF(X_train, X_test, y_train, y_test,
input_file_path, output_root_dir, colsdel,
n_splits, n_repeats, n_jobs, postfix,
save_model, use_stan_scaler)
if model == "svm":
run_SVM(X_train, X_test, y_train, y_test,
input_file_path, output_root_dir, colsdel,
n_splits, n_repeats, n_jobs, postfix,
save_model, use_stan_scaler)
if model == "knn":
run_KNN(X_train, X_test, y_train, y_test,
input_file_path, output_root_dir, colsdel,
n_splits, n_repeats, n_jobs, postfix,
save_model, use_stan_scaler)
if model == "gnb":
run_GNB(X_train, X_test, y_train, y_test,
input_file_path, output_root_dir, colsdel,
n_splits, n_repeats, n_jobs, postfix,
save_model, use_stan_scaler)
| [
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"sys.exit",
"json.dumps",
"getopt.getopt",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"os.p... | [((6233, 6260), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (6244, 6260), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((6287, 6315), 'sklearn.metrics.make_scorer', 'make_scorer', (['precision_score'], {}), '(precision_score)\n', (6298, 6315), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((6339, 6364), 'sklearn.metrics.make_scorer', 'make_scorer', (['recall_score'], {}), '(recall_score)\n', (6350, 6364), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((6390, 6411), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_score'], {}), '(f1_score)\n', (6401, 6411), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((6442, 6468), 'sklearn.metrics.make_scorer', 'make_scorer', (['roc_auc_score'], {}), '(roc_auc_score)\n', (6453, 6468), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((6489, 6519), 'sklearn.metrics.make_scorer', 'make_scorer', (['matthews_corrcoef'], {}), '(matthews_corrcoef)\n', (6500, 6519), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((6548, 6578), 'sklearn.metrics.make_scorer', 'make_scorer', (['specificity_score'], {}), '(specificity_score)\n', (6559, 6578), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((7504, 7524), 'imblearn.pipeline.Pipeline', 'Pipeline', (['estimators'], {}), '(estimators)\n', (7512, 7524), False, 'from imblearn.pipeline import Pipeline\n'), ((9428, 9448), 'imblearn.pipeline.Pipeline', 'Pipeline', (['estimators'], {}), '(estimators)\n', (9436, 9448), False, 'from imblearn.pipeline import Pipeline\n'), ((11328, 11348), 'imblearn.pipeline.Pipeline', 'Pipeline', (['estimators'], {}), '(estimators)\n', (11336, 11348), False, 'from imblearn.pipeline import Pipeline\n'), ((13231, 13251), 'imblearn.pipeline.Pipeline', 'Pipeline', (['estimators'], {}), '(estimators)\n', (13239, 13251), False, 'from imblearn.pipeline import Pipeline\n'), ((15231, 15251), 'imblearn.pipeline.Pipeline', 'Pipeline', (['estimators'], {}), '(estimators)\n', (15239, 15251), False, 'from imblearn.pipeline import Pipeline\n'), ((23141, 23169), 'pandas.read_csv', 'pd.read_csv', (['input_file_path'], {}), '(input_file_path)\n', (23152, 23169), True, 'import pandas as pd\n'), ((23285, 23351), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_split_size', 'random_state': '(42)'}), '(X, y, test_size=test_split_size, random_state=42)\n', (23301, 23351), False, 'from sklearn.model_selection import train_test_split\n'), ((7535, 7579), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (7551, 7579), False, 'from sklearn.utils import parallel_backend\n'), ((7786, 7830), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (7802, 7830), False, 'from sklearn.utils import parallel_backend\n'), ((9459, 9503), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (9475, 9503), False, 'from sklearn.utils import parallel_backend\n'), ((9710, 9754), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (9726, 9754), False, 'from sklearn.utils import parallel_backend\n'), ((11359, 11403), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (11375, 11403), False, 'from sklearn.utils import parallel_backend\n'), ((11586, 11630), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (11602, 11630), False, 'from sklearn.utils import parallel_backend\n'), ((13264, 13308), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (13280, 13308), False, 'from sklearn.utils import parallel_backend\n'), ((13514, 13558), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (13530, 13558), False, 'from sklearn.utils import parallel_backend\n'), ((15262, 15306), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (15278, 15306), False, 'from sklearn.utils import parallel_backend\n'), ((15489, 15533), 'sklearn.utils.parallel_backend', 'parallel_backend', (['"""threading"""'], {'n_jobs': 'n_jobs'}), "('threading', n_jobs=n_jobs)\n", (15505, 15533), False, 'from sklearn.utils import parallel_backend\n'), ((18665, 18864), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hi:k:r:j:p:o:m:l:c:s:t:x"""', "['input=', 'kfold=', 'repeats=', 'jobs=', 'postfix=', 'out=', 'models=',\n 'labelcol=', 'colsdel=', 'save=', 'testsplit=', 'stanscale=']"], {}), "(sys.argv[1:], 'hi:k:r:j:p:o:m:l:c:s:t:x', ['input=', 'kfold=',\n 'repeats=', 'jobs=', 'postfix=', 'out=', 'models=', 'labelcol=',\n 'colsdel=', 'save=', 'testsplit=', 'stanscale='])\n", (18678, 18864), False, 'import sys, getopt, os\n'), ((23076, 23086), 'sys.exit', 'sys.exit', ([], {}), '()\n', (23084, 23086), False, 'import sys, getopt, os\n'), ((4261, 4325), 'json.dumps', 'json.dumps', (['model.named_steps[model_name].best_params_'], {'indent': '(3)'}), '(model.named_steps[model_name].best_params_, indent=3)\n', (4271, 4325), False, 'import json\n'), ((4556, 4591), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4570, 4591), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((4638, 4674), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4653, 4674), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((4718, 4751), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4730, 4751), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((4791, 4820), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4799, 4820), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((4865, 4899), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4878, 4899), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((4948, 4986), 'imblearn.metrics.specificity_score', 'specificity_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4965, 4986), False, 'from imblearn.metrics import specificity_score\n'), ((5035, 5093), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['y_test', 'predictions'], {'sample_weight': 'None'}), '(y_test, predictions, sample_weight=None)\n', (5052, 5093), False, 'from sklearn.metrics import matthews_corrcoef\n'), ((18920, 18930), 'sys.exit', 'sys.exit', ([], {}), '()\n', (18928, 18930), False, 'import sys, getopt, os\n'), ((19034, 19044), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19042, 19044), False, 'import sys, getopt, os\n'), ((5425, 5460), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (5439, 5460), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((5519, 5555), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (5534, 5555), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((5611, 5644), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (5623, 5644), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((5696, 5725), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (5704, 5725), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((5782, 5816), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (5795, 5816), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_squared_error\n'), ((5877, 5915), 'imblearn.metrics.specificity_score', 'specificity_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (5894, 5915), False, 'from imblearn.metrics import specificity_score\n'), ((5976, 6034), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['y_test', 'predictions'], {'sample_weight': 'None'}), '(y_test, predictions, sample_weight=None)\n', (5993, 6034), False, 'from sklearn.metrics import matthews_corrcoef\n'), ((7117, 7133), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7131, 7133), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7180, 7192), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (7190, 7192), False, 'from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB\n'), ((7665, 7728), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (7688, 7728), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((9032, 9048), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9046, 9048), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9095, 9117), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (9115, 9117), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((9589, 9652), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (9612, 9652), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((10932, 10948), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (10946, 10948), False, 'from sklearn.preprocessing import StandardScaler\n'), ((10995, 11016), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (10998, 11016), False, 'from sklearn.svm import SVC\n'), ((11489, 11552), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (11512, 11552), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((12800, 12816), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12814, 12816), False, 'from sklearn.preprocessing import StandardScaler\n'), ((12872, 12896), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (12894, 12896), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13394, 13457), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (13417, 13457), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((14750, 14766), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (14764, 14766), False, 'from sklearn.preprocessing import StandardScaler\n'), ((14823, 14861), 'xgboost.sklearn.XGBClassifier', 'XGBClassifier', ([], {'use_label_encoder': '(False)'}), '(use_label_encoder=False)\n', (14836, 14861), False, 'from xgboost.sklearn import XGBClassifier\n'), ((15392, 15455), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (15415, 15455), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((7318, 7381), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (7341, 7381), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((9243, 9306), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (9266, 9306), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((11142, 11205), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (11165, 11205), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((13037, 13100), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (13060, 13100), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((15027, 15090), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats'}), '(n_splits=n_splits, n_repeats=n_repeats)\n', (15050, 15090), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((19138, 19157), 'os.path.isfile', 'os.path.isfile', (['arg'], {}), '(arg)\n', (19152, 19157), False, 'import sys, getopt, os\n'), ((19487, 19497), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19495, 19497), False, 'import sys, getopt, os\n'), ((19834, 19844), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19842, 19844), False, 'import sys, getopt, os\n'), ((20345, 20355), 'sys.exit', 'sys.exit', ([], {}), '()\n', (20353, 20355), False, 'import sys, getopt, os\n'), ((20545, 20555), 'sys.exit', 'sys.exit', ([], {}), '()\n', (20553, 20555), False, 'import sys, getopt, os\n'), ((20803, 20813), 'sys.exit', 'sys.exit', ([], {}), '()\n', (20811, 20813), False, 'import sys, getopt, os\n'), ((21065, 21075), 'sys.exit', 'sys.exit', ([], {}), '()\n', (21073, 21075), False, 'import sys, getopt, os\n'), ((21318, 21328), 'sys.exit', 'sys.exit', ([], {}), '()\n', (21326, 21328), False, 'import sys, getopt, os\n'), ((21503, 21513), 'sys.exit', 'sys.exit', ([], {}), '()\n', (21511, 21513), False, 'import sys, getopt, os\n'), ((21780, 21790), 'sys.exit', 'sys.exit', ([], {}), '()\n', (21788, 21790), False, 'import sys, getopt, os\n'), ((22127, 22137), 'sys.exit', 'sys.exit', ([], {}), '()\n', (22135, 22137), False, 'import sys, getopt, os\n'), ((22543, 22553), 'sys.exit', 'sys.exit', ([], {}), '()\n', (22551, 22553), False, 'import sys, getopt, os\n'), ((22910, 22920), 'sys.exit', 'sys.exit', ([], {}), '()\n', (22918, 22920), False, 'import sys, getopt, os\n')] |
from shlex import split
import json
class RawCommand:
def __init__(self, command, client="local", posix=True, inline=False):
# TODO: check shlex.quote, raw string, etc..
if inline:
self.command = split(command, posix=posix)
else:
self.command = split(command, posix=posix)[1:]
self.options = {"expr_form": "glob"}
self.client = client
def parse(self):
args = self.command
if args[0].startswith("--client"):
self.client = args[0].split("=")[1]
args.pop(0)
low = {"client": self.client}
if self.client.startswith("local"):
if len(args) < 2:
return "Command or target not specified"
# Batch option
low["batch"] = None
if self.client == "local_batch":
batch_index = None
for index, arg in enumerate(args):
if arg in ["-b", "--batch", "--batch-size"]:
low["batch"] = args[index + 1]
batch_index = index
if batch_index:
args.pop(batch_index)
args.pop(batch_index)
# Timeout option
timeout_index = None
for index, arg in enumerate(args):
if arg in ["-t", "--timeout"]:
low["timeout"] = int(args[index + 1])
timeout_index = index
if timeout_index:
args.pop(timeout_index)
args.pop(timeout_index)
# take care of targeting.
target_dict = {
"pcre": ["-E", "--pcre"],
"list": ["-L", "--list"],
"grain": ["-G", "--grain"],
"grain_pcre": ["--grain-pcre"],
"pillar": ["-I", "--pillar"],
"pillar_pcre": ["--pillar-pcre"],
"range": ["-R", "--range"],
"compound": ["-C", "--compound"],
"nodegroup": ["-N", "--nodegroup"],
}
for key, value in target_dict.items():
if args[0] in value:
self.options["expr_form"] = key
args.pop(0)
low["tgt_type"] = self.options["expr_form"]
low["tgt"] = args.pop(0)
low["fun"] = args.pop(0)
low["arg"] = args
elif self.client.startswith("runner") or self.client.startswith("wheel"):
low["fun"] = args.pop(0)
for arg in args:
if "=" in arg:
key, value = arg.split("=", 1)
try:
low[key] = json.loads(value)
except json.JSONDecodeError:
low[key] = value
else:
low.setdefault("arg", []).append(arg)
else:
# This should never happen
return "Client not implemented: {0}".format(self.client)
return [low]
| [
"shlex.split",
"json.loads"
] | [((230, 257), 'shlex.split', 'split', (['command'], {'posix': 'posix'}), '(command, posix=posix)\n', (235, 257), False, 'from shlex import split\n'), ((299, 326), 'shlex.split', 'split', (['command'], {'posix': 'posix'}), '(command, posix=posix)\n', (304, 326), False, 'from shlex import split\n'), ((2702, 2719), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (2712, 2719), False, 'import json\n')] |
# script to test the parallelized gradient / divergence from pymirc
import numpy as np
import pymirc.image_operations as pi
# seed the random generator
np.random.seed(1)
# create a random 3D/4D image
shape = (6,200,190,180)
# create random array and pad with 0s
x = np.pad(np.random.rand(*shape), 1)
# allocate array for the gradient
grad_x = np.zeros((x.ndim,) + x.shape, dtype = x.dtype)
# calculate the gradient
pi.grad(x, grad_x)
# setup random array in gradient space
y = np.random.rand(*grad_x.shape)
# calucate divergence
div_y = pi.div(y)
# check if operators are adjoint
print(-(x*div_y).sum() / (grad_x*y).sum())
| [
"numpy.random.rand",
"pymirc.image_operations.grad",
"pymirc.image_operations.div",
"numpy.zeros",
"numpy.random.seed"
] | [((154, 171), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (168, 171), True, 'import numpy as np\n'), ((348, 392), 'numpy.zeros', 'np.zeros', (['((x.ndim,) + x.shape)'], {'dtype': 'x.dtype'}), '((x.ndim,) + x.shape, dtype=x.dtype)\n', (356, 392), True, 'import numpy as np\n'), ((421, 439), 'pymirc.image_operations.grad', 'pi.grad', (['x', 'grad_x'], {}), '(x, grad_x)\n', (428, 439), True, 'import pymirc.image_operations as pi\n'), ((485, 514), 'numpy.random.rand', 'np.random.rand', (['*grad_x.shape'], {}), '(*grad_x.shape)\n', (499, 514), True, 'import numpy as np\n'), ((547, 556), 'pymirc.image_operations.div', 'pi.div', (['y'], {}), '(y)\n', (553, 556), True, 'import pymirc.image_operations as pi\n'), ((277, 299), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (291, 299), True, 'import numpy as np\n')] |
from happytransformer import HappyNextSentence
def test_sp_true():
happy_ns = HappyNextSentence()
result = happy_ns.predict_next_sentence(
"Hi nice to meet you. How old are you?",
"I am 21 years old."
)
assert result > 0.5
def test_sp_false():
happy_ns = HappyNextSentence()
result = happy_ns.predict_next_sentence(
"How old are you?",
"The Eiffel Tower is in Paris."
)
assert result < 0.5
def test_sp_save():
happy = HappyNextSentence()
happy.save("model/")
result_before = happy.predict_next_sentence(
"How old are you?",
"The Eiffel Tower is in Paris."
)
happy = HappyNextSentence(load_path="model/")
result_after = happy.predict_next_sentence(
"How old are you?",
"The Eiffel Tower is in Paris."
)
assert result_before == result_after
| [
"happytransformer.HappyNextSentence"
] | [((84, 103), 'happytransformer.HappyNextSentence', 'HappyNextSentence', ([], {}), '()\n', (101, 103), False, 'from happytransformer import HappyNextSentence\n'), ((295, 314), 'happytransformer.HappyNextSentence', 'HappyNextSentence', ([], {}), '()\n', (312, 314), False, 'from happytransformer import HappyNextSentence\n'), ((491, 510), 'happytransformer.HappyNextSentence', 'HappyNextSentence', ([], {}), '()\n', (508, 510), False, 'from happytransformer import HappyNextSentence\n'), ((672, 709), 'happytransformer.HappyNextSentence', 'HappyNextSentence', ([], {'load_path': '"""model/"""'}), "(load_path='model/')\n", (689, 709), False, 'from happytransformer import HappyNextSentence\n')] |
from setuptools import setup
with open("Readme.md", 'r') as f:
long_description = f.read()
setup(
name='pysqlizer',
version='1.0',
description='A module that can be used to convert a CSV file into a SQL file',
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/slafi",
license="MIT",
long_description=long_description,
packages=['pysqlizer'],
) | [
"setuptools.setup"
] | [((97, 374), 'setuptools.setup', 'setup', ([], {'name': '"""pysqlizer"""', 'version': '"""1.0"""', 'description': '"""A module that can be used to convert a CSV file into a SQL file"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/slafi"""', 'license': '"""MIT"""', 'long_description': 'long_description', 'packages': "['pysqlizer']"}), "(name='pysqlizer', version='1.0', description=\n 'A module that can be used to convert a CSV file into a SQL file',\n author='<NAME>', author_email='<EMAIL>', url='https://github.com/slafi',\n license='MIT', long_description=long_description, packages=['pysqlizer'])\n", (102, 374), False, 'from setuptools import setup\n')] |
import numpy as np
class Grid:
def __init__(self, width, heigth, discount = 0.9):
self.width = width
self.heigth = heigth
self.x_pos = 0
self.y_pos = 0
self.values = np.zeros((heigth, width))
self.discount = discount
self.vertex_sources = []
self.vertex_dests = []
self.vertex_values = []
def init_rewards(self, rewards):
assert rewards.shape[0] == self.heigth and rewards.shape[1]==self.width, "reward initialized is not valid"
self.rewards = rewards
def add_vertex(self, source, dest):
assert len(source) == 2 and len(dest) == 2, "source or dest is not valid"
self.vertex_sources.append(source)
self.vertex_dests.append(dest)
def update(self):
next_values = np.zeros((self.heigth, self.width))
for x in range(self.width):
for y in range(self.heigth):
if [y, x] in self.vertex_sources:
for vertex_source, vertex_dest in zip(self.vertex_sources, self.vertex_dests):
if [y, x] == vertex_source:
next_values[y, x] += self.rewards[y,x] + self.discount*self.values[vertex_dest[0], vertex_dest[1]]
break
else:
for cur_movement, cur_prob in zip([[-1, 0], [0, 1], [1, 0], [0, -1]], [0.25, 0.25, 0.25, 0.25]):
next_place = [y+cur_movement[0], x+cur_movement[1]]
if 0<=next_place[0]<self.heigth and 0<=next_place[1]<self.width:
next_values[y, x] += cur_prob*(self.rewards[y,x] + self.discount*self.values[next_place[0], next_place[1]])
else:
next_values[y, x] += cur_prob*(-1+self.discount*self.values[y, x])
print('-'*20)
print (next_values)
self.values = next_values
def policy(self):
movement_x = -1
movement_y = -1
if np.random.rand()> 0.5:
movement_x = 1
if np.random.rand()>0.5:
movement_y = 1
if [self.x_pos, self.y_pos] in self.vertex_sources:
for vertex_source, vertex_dest in zip(self.vertex_sources, self.vertex_dests):
if vertex_source == [self.x_pos, self.y_pos]:
self.x_pos = vertex_dest[0]
self.y_pos = vertex_dest[1]
else:
if 0<=self.x_pos+movement_x<self.heigth:
self.x_pos+=movement_x
grid_world = Grid(5, 5, discount = 0.9)
reward = np.zeros((5, 5))
grid_world.add_vertex([0, 1], [4, 1])
reward[0, 1] = 10
grid_world.add_vertex([0, 3], [2, 3])
reward[0, 3] = 5
grid_world.init_rewards(reward)
print(grid_world.values)
for i in range(50):
print('iter: {}'.format(i))
grid_world.update()
| [
"numpy.zeros",
"numpy.random.rand"
] | [((2602, 2618), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (2610, 2618), True, 'import numpy as np\n'), ((211, 236), 'numpy.zeros', 'np.zeros', (['(heigth, width)'], {}), '((heigth, width))\n', (219, 236), True, 'import numpy as np\n'), ((808, 843), 'numpy.zeros', 'np.zeros', (['(self.heigth, self.width)'], {}), '((self.heigth, self.width))\n', (816, 843), True, 'import numpy as np\n'), ((2014, 2030), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2028, 2030), True, 'import numpy as np\n'), ((2075, 2091), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2089, 2091), True, 'import numpy as np\n')] |
# Copyright (C) <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.****
import sys
from nemo.constants import NEMO_ENV_VARNAME_ENABLE_COLORING
from nemo.utils.env_var_parsing import get_envbool
__all__ = ["check_color_support", "to_unicode"]
def check_color_support():
# Colors can be forced with an env variable
if not sys.platform.lower().startswith("win") and get_envbool(NEMO_ENV_VARNAME_ENABLE_COLORING, False):
return True
def to_unicode(value):
"""
Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
try:
if isinstance(value, (str, type(None))):
return value
if not isinstance(value, bytes):
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
return value.decode("utf-8")
except UnicodeDecodeError:
return repr(value)
| [
"nemo.utils.env_var_parsing.get_envbool",
"sys.platform.lower"
] | [((900, 952), 'nemo.utils.env_var_parsing.get_envbool', 'get_envbool', (['NEMO_ENV_VARNAME_ENABLE_COLORING', '(False)'], {}), '(NEMO_ENV_VARNAME_ENABLE_COLORING, False)\n', (911, 952), False, 'from nemo.utils.env_var_parsing import get_envbool\n'), ((857, 877), 'sys.platform.lower', 'sys.platform.lower', ([], {}), '()\n', (875, 877), False, 'import sys\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""Let viewers pay currency to boost currency payouts
for everyone in chat for x seconds"""
import json
import os, os.path
import operator
import time
import codecs
#---------------------------------------
# [Required] Script information
#---------------------------------------
ScriptName = "Festival"
Website = "https://www.twitch.tv/newtc"
Creator = "Newt"
Version = "1.0.0.0"
Description = "Allows users to select stories for me to tell"
#---------------------------------------
# Versions
#---------------------------------------
"""
1.0.0.0 - Initial release
"""
#---------------------------------------
# Variables
#---------------------------------------
settingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
story_file = os.path.join(os.path.dirname(__file__), "stories.json")
pending_file = os.path.join(os.path.dirname(__file__), "pending.json")
#---------------------------------------
# Classes
#---------------------------------------
class Settings:
"""
Tries to load settings from file if given
The 'default' variable names need to match UI_Config"""
def __init__(self, settingsFile=None):
if settingsFile is not None and os.path.isfile(settingsFile):
with codecs.open(settingsFile, encoding='utf-8-sig', mode='r') as f:
self.__dict__ = json.load(f, encoding='utf-8-sig')
else: #set variables if no settings file
self.Enabled = True
self.OnlyLive = False
self.Command = "!stories"
self.SubmissionRewards = "100"
def ReloadSettings(self, data):
"""Reload settings on save through UI"""
self.__dict__ = json.loads(data, encoding='utf-8-sig')
return
def SaveSettings(self, settingsFile):
"""Save settings to files (json and js)"""
with codecs.open(settingsFile, encoding='utf-8-sig', mode='w+') as f:
json.dump(self.__dict__, f, encoding='utf-8-sig')
with codecs.open(settingsFile.replace("json", "js"), encoding='utf-8-sig', mode='w+') as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8-sig')))
return
def ReloadSettings(jsonData):
"""Reload settings"""
# Globals
global MySet
# Reload saved settings
MySet.ReloadSettings(jsonData)
# End of ReloadSettings
return
class Story:
StoryInfo = ""
StoryValue = 0
StoryContributor = ""
def __init__(self, info, contributor):
self.StoryInfo = info
self.StoryContributor = contributor
def get_story_info(self):
return self.StoryInfo
def set_story_info(self, info):
self.StoryInfo = info
def get_value(self):
return self.StoryValue
def set_value(self, value):
self.StoryValue = value
def get_contributor(self):
return self.StoryContributor
def set_contributor(self, contributor):
self.StoryContributor = contributor
class NewtClass:
NewtClass = True
NewtStreamerLevel = 5
#---------------------------------------
# [Required] functions
#---------------------------------------
def Init():
"""Required tick function"""
# Globals
global MySet
global m_Active
global selected_stories
global story_timer
global last_removed_story
m_Active = False
selected_stories = []
story_timer = time.time() + 5400
last_removed_story = {}
# story_timer += time.time() + 600
# Load in saved settings
MySet = Settings(settingsFile)
if not os.path.exists(story_file):
Parent.SendStreamMessage("No story file found. Creating a new one.")
data = {}
with codecs.open(story_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(data, f, encoding='utf-8-sig', indent=2)
if not os.path.exists(pending_file):
Parent.SendStreamMessage("No pending file found. Creating a new one.")
data = {}
with codecs.open(pending_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(data, f, encoding='utf-8-sig', indent=2)
# convert_to_new_format()
# End of Init
return
def Execute(data):
"""Required Execute function"""
roll_permissions = ["subscriber", "moderator", "vip", "vip+"]
retVal = ''
global selected_stories
global story_timer
if data.IsChatMessage():
if data.GetParam(0).lower() == MySet.Command.lower() or data.GetParam(0).lower() == "!story":
# parse the input to something usable by the script
data_input = data.Message
data_input = data_input.split()
data_input = data_input[2:]
title = ' '.join(data_input)
data_input = '_'.join(data_input).lower()
# two word commands
if data.GetParamCount() == 2:
if data.GetParam(1).lower() == "display":
respond(data, display_story_list())
if data.GetParam(1).lower() == "selected":
respond(data, parse_selected_stories())
if data.GetParam(1).lower() == "roll":
if Parent.HasPermission(data.User,"user_specific", "newtc"):
if len(selected_stories) > 0:
roll_story()
story_timer = time.time() + 3600
else:
roll_unselected_story()
if data.GetParam(1).lower() == "pending":
respond(data, display_pending_list())
if data.GetParam(1).lower() == "links":
respond(data, display_pending_links())
# single word commands
if data.GetParamCount() == 1:
respond(data, display_story_list())
# variable length commands
if data.GetParamCount() > 1:
if data.GetParam(1).lower() == "info":
respond(data, "Info for " + title + ": " + story_info(data_input))
if data.GetParam(1).lower() == "select":
story_added = select_story(data_input, selected_stories, data.UserName)
if (story_added == True):
respond(data, "Added " + title + " to the next story spin.")
elif (story_added == False):
respond(data, "That story is already in the next story spin.")
if data.GetParam(1).lower() == "add":
# get the final value and save is as the link
length = data.GetParamCount()
info = data.GetParam(length - 1)
# build the name
name = []
for param in range(2, length-1):
name.append(data.GetParam(param))
data_input = '_'.join(name)
# save the contributor
contributor = data.UserName.lower()
if data_input:
add_story(data_input, info, contributor)
if data.GetParam(1).lower() == ("remove" or "subtract"):
remove_story(data_input)
if data.GetParam(1).lower() == "restore":
re_add(data_input)
if data.GetParam(1).lower() == "approve":
approve_story(data_input)
if data.GetParam(0).lower()[0] == '!':
if data.GetParam(0).lower()[1:] in load_story_list():
respond(data, load_story_list()[data.GetParam(0).lower()[1:]])
return
def Tick():
"""Required tick function"""
global story_timer
# roll a new story every 3 hours
if time.time() > story_timer:
if len(selected_stories) > 0:
roll_story()
# else:
# roll_unselected_story()
story_timer = time.time() + 3600
return
def respond(data, output):
retVal = output
# If the original message is from a discord message
if data.IsFromDiscord():
# if the original message is from a whisper
if data.IsWhisper():
Parent.SendDiscordDM(data.User, retVal)
else:
Parent.SendDiscordMessage(retVal)
# If the original message is from a live stream
else:
if data.IsWhisper():
Parent.SendStreamWhisper(data.UserName, retVal)
else:
Parent.SendStreamMessage(str(retVal))
def load_story_list():
"""Returns a the list of counters as a settings object"""
with codecs.open(story_file, encoding='utf-8-sig', mode='r') as f:
data = json.load(f, encoding='utf-8-sig')
return data
def load_pending_list():
"""Returns a the list of counters as a settings object"""
with codecs.open(pending_file, encoding='utf-8-sig', mode='r') as f:
data = json.load(f, encoding='utf-8-sig')
return data
# display all available stories
def display_story_list():
data = load_story_list()
retval = ''
for key in data.keys():
upper = ''
# uppercase every first letter
for word in key.split("_"):
output = word.replace(word[0], word[0].upper(), 1)
upper += output + " "
# get rid of the last space
upper = upper[:-1]
retval += upper + ', '
retval = retval.replace('_', ' ')
retval = retval[:-2]
return retval
# display all available stories
def display_pending_list():
data = load_pending_list()
retval = ''
for key in data.keys():
upper = ''
# uppercase every first letter
for word in key.split("_"):
output = word.replace(word[0], word[0].upper(), 1)
upper += output + " "
# get rid of the last space
upper = upper[:-1]
retval += upper + ', '
retval = retval.replace('_', ' ')
retval = retval[:-2]
return retval
def display_pending_links():
data = load_pending_list()
retval = ''
for key in data.keys():
output = key + ": " + data[key]["info"]
# get rid of the last space
retval += output + ' , '
return retval
def parse_selected_stories():
# returns a list of selected stories
global selected_stories
retval = ''
if len(selected_stories) != 0:
for stories in selected_stories:
upper = ''
# uppercase every first letter
for word in stories.split("_"):
output = word.replace(word[0], word[0].upper(), 1)
upper += output + " "
# get rid of the last space
upper = upper[:-1]
retval += upper + ', '
retval = retval.replace('_', ' ')
retval = retval[:-2]
else:
retval = "There are no stories selected! Please select one."
return retval
# returns the story info
def story_info(story):
data = load_story_list()
if story.lower() in data:
return data[story.lower()]["info"]
else:
return "The story " + story + " is not in the story selection yet. Send me a link and I can add it."
# parses the story's name into an easily readable string
def story_name(story):
data = load_story_list()
if story.lower() in data.keys():
upper = ''
for word in story.split("_"):
output = word.replace(word[0], word[0].upper(), 1)
upper += output + " "
upper = upper[:-1]
return upper
else:
return ""
# select a story
def select_story(story, selected_stories, user):
global story_timer
data = load_story_list()
if story.lower() in data.keys():
if story.lower() not in selected_stories:
selected_stories.append(story.lower())
story_timer = time.time() + 1800
if data[story.lower()]["contributor"] != user.lower():
# add more points each time anyone other than the user selects it
data[story.lower()]["value"] += 50
with codecs.open(story_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(data, f, encoding='utf-8-sig', indent=2)
return True
else:
return False
# select a story from chosen stories
def roll_story():
global selected_stories
choice = selected_stories[Parent.GetRandom(0, len(selected_stories))]
retval = "The story that was selected was: " + story_name(choice) + ". You can follow along at " + story_info(choice)
Parent.SendStreamMessage(retval)
# reset selected stories
selected_stories = []
# payout if the user is in chat
data = load_story_list()
if (data[choice.lower()]["contributor"] in Parent.GetViewerList()) and (data[choice]["value"] > 0):
user = data[choice.lower()]["contributor"]
value = data[choice.lower()]["value"]
Parent.AddPoints(user.lower(), user.lower(), value)
# remove the story we rolled from the list
remove_story(choice.lower())
return choice
def roll_unselected_story():
data = load_story_list()
stories = data.keys()
choice = stories[Parent.GetRandom(0, len(stories))]
retval = "Rolling from the main story list. The story that was selected was: " + story_name(choice) + ". You can follow along at " + story_info(
choice)
Parent.SendStreamMessage(retval)
if (data[choice.lower()]["contributor"] in Parent.GetViewerList()) and (data[choice]["value"] > 0):
user = data[choice.lower()]["contributor"]
value = data[choice.lower()]["value"]
Parent.AddPoints(user.lower(), user.lower(), value)
remove_story(choice.lower())
return choice
# add a story
def add_story(story, info, contributor):
retval = False
# if the counter already exists
if story in load_pending_list() or load_story_list():
Parent.SendStreamMessage("That story already exists.")
# else if the counter does not exist
else:
# add the counter to the counters.json
counter_list = load_pending_list()
storyname = story.lower()
counter_list[storyname] = {}
counter_list[storyname]["info"] = info
counter_list[storyname]["contributor"] = contributor
counter_list[storyname]["value"] = 0
# give logs to the user who added
Parent.AddPoints(contributor, contributor, int(MySet.SubmissionReward))
# save the story
with codecs.open(pending_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(counter_list, f, encoding='utf-8-sig', indent=2)
Parent.SendStreamMessage('Story "' + story_name(story) +
'" successfully created. It has been stored in pending.')
retval = True
return retval
def approve_story(story):
"""
Moves a story from the pending file to the story file.
:param story: The story to approve.
:return:
"""
pending_data = load_pending_list()
story_to_approve = pending_data[story.lower()]
del pending_file[story.lower()]
with codecs.open(pending_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(pending_data, f, encoding='utf-8-sig', indent=2)
story_data = load_story_list()
story_data[story.lower()] = story_to_approve
# save the story
with codecs.open(story_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(story_data, f, encoding='utf-8-sig', indent=2)
Parent.SendStreamMessage('Story "' + story_name(story) + '" successfully created.')
# remove a story from the list
def remove_story(story):
global last_removed_story
data = load_story_list()
# save the story for restoration if we need to
last_removed_story[story.lower()] = data[story.lower()]
del data[story.lower()]
# update the story file with the removed story
with codecs.open(story_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(data, f, encoding='utf-8-sig', indent=2)
def re_add(story):
story_lower = story.lower()
global last_removed_story
if story_lower in last_removed_story.keys():
# create the new story
counter_list = load_story_list()
counter_list[story_lower] = {}
counter_list[story_lower]["info"] = last_removed_story[story_lower]["info"]
counter_list[story_lower]["contributor"] = last_removed_story[story_lower]["contributor"]
counter_list[story_lower]["value"] = last_removed_story[story_lower]["value"]
# save the story
with codecs.open(story_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(counter_list, f, encoding='utf-8-sig', indent=2)
Parent.SendStreamMessage('Story "' + story_name(story) + '" successfully restored.')
#def convert_to_new_format():
# data = load_story_list()
# for each in data.keys():
# build the new values
# new_data = {}
# if type(data[each]) != dict:
# new_data["info"] = data[each]
# new_data["contributor"] = ""
# new_data["value"] = 0
# if len(new_data.keys()) > 0:
# data[each] = new_data
# with codecs.open(story_file, encoding='utf-8-sig', mode='w+') as f:
# json.dump(data, f, encoding='utf-8-sig') | [
"os.path.exists",
"json.loads",
"json.dumps",
"os.path.isfile",
"os.path.dirname",
"json.load",
"codecs.open",
"time.time",
"json.dump"
] | [((766, 791), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (781, 791), False, 'import os, os.path\n'), ((836, 861), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (851, 861), False, 'import os, os.path\n'), ((907, 932), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (922, 932), False, 'import os, os.path\n'), ((1741, 1779), 'json.loads', 'json.loads', (['data'], {'encoding': '"""utf-8-sig"""'}), "(data, encoding='utf-8-sig')\n", (1751, 1779), False, 'import json\n'), ((3454, 3465), 'time.time', 'time.time', ([], {}), '()\n', (3463, 3465), False, 'import time\n'), ((3616, 3642), 'os.path.exists', 'os.path.exists', (['story_file'], {}), '(story_file)\n', (3630, 3642), False, 'import os, os.path\n'), ((3890, 3918), 'os.path.exists', 'os.path.exists', (['pending_file'], {}), '(pending_file)\n', (3904, 3918), False, 'import os, os.path\n'), ((7795, 7806), 'time.time', 'time.time', ([], {}), '()\n', (7804, 7806), False, 'import time\n'), ((8636, 8691), 'codecs.open', 'codecs.open', (['story_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""r"""'}), "(story_file, encoding='utf-8-sig', mode='r')\n", (8647, 8691), False, 'import codecs\n'), ((8713, 8747), 'json.load', 'json.load', (['f'], {'encoding': '"""utf-8-sig"""'}), "(f, encoding='utf-8-sig')\n", (8722, 8747), False, 'import json\n'), ((8863, 8920), 'codecs.open', 'codecs.open', (['pending_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""r"""'}), "(pending_file, encoding='utf-8-sig', mode='r')\n", (8874, 8920), False, 'import codecs\n'), ((8942, 8976), 'json.load', 'json.load', (['f'], {'encoding': '"""utf-8-sig"""'}), "(f, encoding='utf-8-sig')\n", (8951, 8976), False, 'import json\n'), ((15135, 15193), 'codecs.open', 'codecs.open', (['pending_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(pending_file, encoding='utf-8-sig', mode='w+')\n", (15146, 15193), False, 'import codecs\n'), ((15208, 15266), 'json.dump', 'json.dump', (['pending_data', 'f'], {'encoding': '"""utf-8-sig"""', 'indent': '(2)'}), "(pending_data, f, encoding='utf-8-sig', indent=2)\n", (15217, 15266), False, 'import json\n'), ((15383, 15439), 'codecs.open', 'codecs.open', (['story_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(story_file, encoding='utf-8-sig', mode='w+')\n", (15394, 15439), False, 'import codecs\n'), ((15454, 15510), 'json.dump', 'json.dump', (['story_data', 'f'], {'encoding': '"""utf-8-sig"""', 'indent': '(2)'}), "(story_data, f, encoding='utf-8-sig', indent=2)\n", (15463, 15510), False, 'import json\n'), ((15916, 15972), 'codecs.open', 'codecs.open', (['story_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(story_file, encoding='utf-8-sig', mode='w+')\n", (15927, 15972), False, 'import codecs\n'), ((15987, 16037), 'json.dump', 'json.dump', (['data', 'f'], {'encoding': '"""utf-8-sig"""', 'indent': '(2)'}), "(data, f, encoding='utf-8-sig', indent=2)\n", (15996, 16037), False, 'import json\n'), ((1256, 1284), 'os.path.isfile', 'os.path.isfile', (['settingsFile'], {}), '(settingsFile)\n', (1270, 1284), False, 'import os, os.path\n'), ((1903, 1961), 'codecs.open', 'codecs.open', (['settingsFile'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(settingsFile, encoding='utf-8-sig', mode='w+')\n", (1914, 1961), False, 'import codecs\n'), ((1980, 2029), 'json.dump', 'json.dump', (['self.__dict__', 'f'], {'encoding': '"""utf-8-sig"""'}), "(self.__dict__, f, encoding='utf-8-sig')\n", (1989, 2029), False, 'import json\n'), ((3752, 3808), 'codecs.open', 'codecs.open', (['story_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(story_file, encoding='utf-8-sig', mode='w+')\n", (3763, 3808), False, 'import codecs\n'), ((3827, 3877), 'json.dump', 'json.dump', (['data', 'f'], {'encoding': '"""utf-8-sig"""', 'indent': '(2)'}), "(data, f, encoding='utf-8-sig', indent=2)\n", (3836, 3877), False, 'import json\n'), ((4030, 4088), 'codecs.open', 'codecs.open', (['pending_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(pending_file, encoding='utf-8-sig', mode='w+')\n", (4041, 4088), False, 'import codecs\n'), ((4107, 4157), 'json.dump', 'json.dump', (['data', 'f'], {'encoding': '"""utf-8-sig"""', 'indent': '(2)'}), "(data, f, encoding='utf-8-sig', indent=2)\n", (4116, 4157), False, 'import json\n'), ((7962, 7973), 'time.time', 'time.time', ([], {}), '()\n', (7971, 7973), False, 'import time\n'), ((14508, 14566), 'codecs.open', 'codecs.open', (['pending_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(pending_file, encoding='utf-8-sig', mode='w+')\n", (14519, 14566), False, 'import codecs\n'), ((14585, 14643), 'json.dump', 'json.dump', (['counter_list', 'f'], {'encoding': '"""utf-8-sig"""', 'indent': '(2)'}), "(counter_list, f, encoding='utf-8-sig', indent=2)\n", (14594, 14643), False, 'import json\n'), ((16589, 16645), 'codecs.open', 'codecs.open', (['story_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(story_file, encoding='utf-8-sig', mode='w+')\n", (16600, 16645), False, 'import codecs\n'), ((16664, 16722), 'json.dump', 'json.dump', (['counter_list', 'f'], {'encoding': '"""utf-8-sig"""', 'indent': '(2)'}), "(counter_list, f, encoding='utf-8-sig', indent=2)\n", (16673, 16722), False, 'import json\n'), ((1303, 1360), 'codecs.open', 'codecs.open', (['settingsFile'], {'encoding': '"""utf-8-sig"""', 'mode': '"""r"""'}), "(settingsFile, encoding='utf-8-sig', mode='r')\n", (1314, 1360), False, 'import codecs\n'), ((1399, 1433), 'json.load', 'json.load', (['f'], {'encoding': '"""utf-8-sig"""'}), "(f, encoding='utf-8-sig')\n", (1408, 1433), False, 'import json\n'), ((11863, 11874), 'time.time', 'time.time', ([], {}), '()\n', (11872, 11874), False, 'import time\n'), ((12099, 12155), 'codecs.open', 'codecs.open', (['story_file'], {'encoding': '"""utf-8-sig"""', 'mode': '"""w+"""'}), "(story_file, encoding='utf-8-sig', mode='w+')\n", (12110, 12155), False, 'import codecs\n'), ((12178, 12228), 'json.dump', 'json.dump', (['data', 'f'], {'encoding': '"""utf-8-sig"""', 'indent': '(2)'}), "(data, f, encoding='utf-8-sig', indent=2)\n", (12187, 12228), False, 'import json\n'), ((2179, 2226), 'json.dumps', 'json.dumps', (['self.__dict__'], {'encoding': '"""utf-8-sig"""'}), "(self.__dict__, encoding='utf-8-sig')\n", (2189, 2226), False, 'import json\n'), ((5407, 5418), 'time.time', 'time.time', ([], {}), '()\n', (5416, 5418), False, 'import time\n')] |
import numpy as np
import tensorflow as tf
#----------------------------------------------------------------------------
# Encoder network.
# Extract the feature of content and style image
# Use VGG19 network to extract features.
ENCODER_LAYERS = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1'
)
FEATURE_LAYERS = (
'relu1_1', 'relu2_1', 'relu3_1', 'relu4_1'
)
class Encoder(object):
def __init__(self, weights_path):
# load weights (kernel and bias) from npz file
weights = np.load(weights_path)
idx = 0
self.weight_vars = []
# create the TensorFlow variables
with tf.variable_scope('encoder'):
for layer in ENCODER_LAYERS:
kind = layer[:4]
if kind == 'conv':
kernel = weights['arr_%d' % idx].transpose([2, 3, 1, 0])
bias = weights['arr_%d' % (idx + 1)]
kernel = kernel.astype(np.float32)
bias = bias.astype(np.float32)
idx += 2
with tf.variable_scope(layer):
W = tf.Variable(kernel, trainable=False, name='kernel')
b = tf.Variable(bias, trainable=False, name='bias')
self.weight_vars.append((W, b))
def encode(self, image, img_type = 'style'):
# create the computational graph
idx = 0
layers = {}
current = image
for layer in ENCODER_LAYERS:
kind = layer[:4]
if kind == 'conv':
kernel, bias = self.weight_vars[idx]
idx += 1
current = conv2d(current, kernel, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = pool2d(current)
if layer in FEATURE_LAYERS:
layers[layer] = current
assert(len(layers) == len(FEATURE_LAYERS))
enc = layers[FEATURE_LAYERS[-1]]
if img_type == 'style':
latent_code = tf.reduce_mean(enc, axis=[1,2])
elif img_type == 'content':
latent_code = tf.nn.avg_pool(enc, ksize=[1,8,8,1], strides=[1,8,8,1], padding='SAME')
latent_code = tf.transpose(latent_code, [0, 3, 1, 2])
else:
raise
init = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init)
latent_code = latent_code.eval()
return latent_code, layers
def preprocess(self, image, mode='BGR'):
if mode == 'BGR':
return image - np.array([103.939, 116.779, 123.68])
else:
return image - np.array([123.68, 116.779, 103.939])
def deprocess(self, image, mode='BGR'):
if mode == 'BGR':
return image + np.array([103.939, 116.779, 123.68])
else:
return image + np.array([123.68, 116.779, 103.939])
def conv2d(x, kernel, bias):
# padding image with reflection mode
x_padded = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
# conv and add bias
out = tf.nn.conv2d(x_padded, kernel, strides=[1, 1, 1, 1], padding='VALID')
out = tf.nn.bias_add(out, bias)
return out
def pool2d(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
| [
"tensorflow.nn.conv2d",
"tensorflow.local_variables_initializer",
"tensorflow.nn.max_pool",
"tensorflow.pad",
"tensorflow.variable_scope",
"tensorflow.transpose",
"tensorflow.nn.relu",
"tensorflow.nn.avg_pool",
"tensorflow.Session",
"tensorflow.Variable",
"tensorflow.global_variables_initializer... | [((3289, 3348), 'tensorflow.pad', 'tf.pad', (['x', '[[0, 0], [1, 1], [1, 1], [0, 0]]'], {'mode': '"""REFLECT"""'}), "(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')\n", (3295, 3348), True, 'import tensorflow as tf\n'), ((3384, 3453), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x_padded', 'kernel'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(x_padded, kernel, strides=[1, 1, 1, 1], padding='VALID')\n", (3396, 3453), True, 'import tensorflow as tf\n'), ((3464, 3489), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['out', 'bias'], {}), '(out, bias)\n', (3478, 3489), True, 'import tensorflow as tf\n'), ((3534, 3609), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (3548, 3609), True, 'import tensorflow as tf\n'), ((707, 728), 'numpy.load', 'np.load', (['weights_path'], {}), '(weights_path)\n', (714, 728), True, 'import numpy as np\n'), ((832, 860), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), "('encoder')\n", (849, 860), True, 'import tensorflow as tf\n'), ((2282, 2314), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['enc'], {'axis': '[1, 2]'}), '(enc, axis=[1, 2])\n', (2296, 2314), True, 'import tensorflow as tf\n'), ((2563, 2596), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2594, 2596), True, 'import tensorflow as tf\n'), ((2598, 2630), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2628, 2630), True, 'import tensorflow as tf\n'), ((2645, 2657), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2655, 2657), True, 'import tensorflow as tf\n'), ((2376, 2453), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['enc'], {'ksize': '[1, 8, 8, 1]', 'strides': '[1, 8, 8, 1]', 'padding': '"""SAME"""'}), "(enc, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')\n", (2390, 2453), True, 'import tensorflow as tf\n'), ((2474, 2513), 'tensorflow.transpose', 'tf.transpose', (['latent_code', '[0, 3, 1, 2]'], {}), '(latent_code, [0, 3, 1, 2])\n', (2486, 2513), True, 'import tensorflow as tf\n'), ((2874, 2910), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (2882, 2910), True, 'import numpy as np\n'), ((2952, 2988), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (2960, 2988), True, 'import numpy as np\n'), ((3087, 3123), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (3095, 3123), True, 'import numpy as np\n'), ((3165, 3201), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (3173, 3201), True, 'import numpy as np\n'), ((1952, 1971), 'tensorflow.nn.relu', 'tf.nn.relu', (['current'], {}), '(current)\n', (1962, 1971), True, 'import tensorflow as tf\n'), ((1271, 1295), 'tensorflow.variable_scope', 'tf.variable_scope', (['layer'], {}), '(layer)\n', (1288, 1295), True, 'import tensorflow as tf\n'), ((1325, 1376), 'tensorflow.Variable', 'tf.Variable', (['kernel'], {'trainable': '(False)', 'name': '"""kernel"""'}), "(kernel, trainable=False, name='kernel')\n", (1336, 1376), True, 'import tensorflow as tf\n'), ((1405, 1452), 'tensorflow.Variable', 'tf.Variable', (['bias'], {'trainable': '(False)', 'name': '"""bias"""'}), "(bias, trainable=False, name='bias')\n", (1416, 1452), True, 'import tensorflow as tf\n')] |
"""WebSDL URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, \
HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, \
TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView
import dataloaderinterface.views as views
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^sites/$', SitesListView.as_view(), name='sites_list'),
url(r'^terms/$', TermsOfUseView.as_view(), name='terms_of_use'),
url(r'^dmca/$', DMCAView.as_view(), name='dmca'),
url(r'^privacy/$', PrivacyView.as_view(), name='privacy'),
url(r'^cookies/$', CookiePolicyView.as_view(), name='cookie_policy'),
url(r'^status/$', StatusListView.as_view(), name='status'),
url(r'^browse/$', BrowseSitesListView.as_view(), name='browse_sites'),
url(r'^sites/register/$', SiteRegistrationView.as_view(), name='site_registration'),
url(r'^sites/update/(?P<sampling_feature_code>.*?)/sensors/$', SensorListUpdateView.as_view(), name='sensors'),
url(r'^sites/update/(?P<sampling_feature_code>.*?)/leafpacks/$', LeafPackListUpdateView.as_view(), name='leafpacks'),
url(r'^sites/update/(?P<sampling_feature_code>.*)/$', SiteUpdateView.as_view(), name='site_update'),
url(r'^sites/delete/(?P<sampling_feature_code>.*)/$', SiteDeleteView.as_view(), name='site_delete'),
url(r'^sites/(?P<sampling_feature_code>.*)/leafpack/', include(('leafpack.urls', 'leafpack'), namespace='leafpack')),
url(r'^sites/(?P<sampling_feature_code>.*)/$', SiteDetailView.as_view(), name='site_detail'),
url(r'^dataloader/ajax/', views.ajax_router, name='ajax'),
]
| [
"dataloaderinterface.views.LeafPackListUpdateView.as_view",
"django.conf.urls.url",
"dataloaderinterface.views.SiteDetailView.as_view",
"dataloaderinterface.views.PrivacyView.as_view",
"dataloaderinterface.views.SitesListView.as_view",
"dataloaderinterface.views.StatusListView.as_view",
"dataloaderinter... | [((2301, 2357), 'django.conf.urls.url', 'url', (['"""^dataloader/ajax/"""', 'views.ajax_router'], {'name': '"""ajax"""'}), "('^dataloader/ajax/', views.ajax_router, name='ajax')\n", (2304, 2357), False, 'from django.conf.urls import url, include\n'), ((1041, 1059), 'dataloaderinterface.views.HomeView.as_view', 'HomeView.as_view', ([], {}), '()\n', (1057, 1059), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1096, 1119), 'dataloaderinterface.views.SitesListView.as_view', 'SitesListView.as_view', ([], {}), '()\n', (1117, 1119), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1162, 1186), 'dataloaderinterface.views.TermsOfUseView.as_view', 'TermsOfUseView.as_view', ([], {}), '()\n', (1184, 1186), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1230, 1248), 'dataloaderinterface.views.DMCAView.as_view', 'DMCAView.as_view', ([], {}), '()\n', (1246, 1248), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1287, 1308), 'dataloaderinterface.views.PrivacyView.as_view', 'PrivacyView.as_view', ([], {}), '()\n', (1306, 1308), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1350, 1376), 'dataloaderinterface.views.CookiePolicyView.as_view', 'CookiePolicyView.as_view', ([], {}), '()\n', (1374, 1376), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1423, 1447), 'dataloaderinterface.views.StatusListView.as_view', 'StatusListView.as_view', ([], {}), '()\n', (1445, 1447), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1487, 1516), 'dataloaderinterface.views.BrowseSitesListView.as_view', 'BrowseSitesListView.as_view', ([], {}), '()\n', (1514, 1516), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1570, 1600), 'dataloaderinterface.views.SiteRegistrationView.as_view', 'SiteRegistrationView.as_view', ([], {}), '()\n', (1598, 1600), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1696, 1726), 'dataloaderinterface.views.SensorListUpdateView.as_view', 'SensorListUpdateView.as_view', ([], {}), '()\n', (1724, 1726), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1814, 1846), 'dataloaderinterface.views.LeafPackListUpdateView.as_view', 'LeafPackListUpdateView.as_view', ([], {}), '()\n', (1844, 1846), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((1925, 1949), 'dataloaderinterface.views.SiteUpdateView.as_view', 'SiteUpdateView.as_view', ([], {}), '()\n', (1947, 1949), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((2030, 2054), 'dataloaderinterface.views.SiteDeleteView.as_view', 'SiteDeleteView.as_view', ([], {}), '()\n', (2052, 2054), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n'), ((2136, 2196), 'django.conf.urls.include', 'include', (["('leafpack.urls', 'leafpack')"], {'namespace': '"""leafpack"""'}), "(('leafpack.urls', 'leafpack'), namespace='leafpack')\n", (2143, 2196), False, 'from django.conf.urls import url, include\n'), ((2250, 2274), 'dataloaderinterface.views.SiteDetailView.as_view', 'SiteDetailView.as_view', ([], {}), '()\n', (2272, 2274), False, 'from dataloaderinterface.views import SitesListView, SiteDetailView, SiteRegistrationView, SensorListUpdateView, HomeView, BrowseSitesListView, SiteUpdateView, SiteDeleteView, StatusListView, LeafPackListUpdateView, TermsOfUseView, DMCAView, PrivacyView, CookiePolicyView\n')] |
# =======================================================================================================================================
# VNU-HCM, University of Science
# Department Computer Science, Faculty of Information Technology
# Authors: <NAME> (<NAME>)
# © 2020
"""
We have a loud talking parrot. The "hour" parameter is the current hour time in the range 0..23. We are in trouble if the parrot is talking and the hour is before 7 or after 20. Return True if we are in trouble.
parrot_trouble(True, 6) → True
parrot_trouble(True, 7) → False
parrot_trouble(False, 6) → False
"""
import unittest
def parrot_trouble(talking, hour):
return (False, (hour < 7 or hour > 20))[talking]
class MyTest(unittest.TestCase):
def test_case_00(self):
self.assertEqual(parrot_trouble(True, 6), True)
def test_case_01(self):
self.assertEqual(parrot_trouble(True, 7), False)
def test_case_03(self):
self.assertEqual(parrot_trouble(False, 6), False)
def test_case_04(self):
self.assertEqual(parrot_trouble(False, 21), False)
def test_case_05(self):
self.assertEqual(parrot_trouble(False, 20), False)
def test_case_06(self):
self.assertEqual(parrot_trouble(True, 23), True)
def test_case_07(self):
self.assertEqual(parrot_trouble(False, 23), False)
def test_case_08(self):
self.assertEqual(parrot_trouble(True, 20), False)
def test_case_09(self):
self.assertEqual(parrot_trouble(False, 12), False)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
] | [((1549, 1564), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1562, 1564), False, 'import unittest\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'newGui.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets ,QtPrintSupport
from pyqtgraph import PlotWidget ,PlotItem
import os
import pathlib
import pyqtgraph as pg
import pandas as pd
import numpy as np
import sys
import random
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class Ui_MainWindow(QtGui.QMainWindow):
signals = []
timer = []
data = []
n = []
nn = []
data_line = []
r = [1200,1200,1200]
z = [1,1,1]
spectrogram = []
checkBox = []
counter = -1
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1010, 878)
mW = QtGui.QIcon("Mw.png")
MainWindow.setWindowIcon(mW)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
for i in range(0,3):
self.signals.append( PlotWidget(self.centralwidget))
self.spectrogram.append( QtWidgets.QLabel(self.centralwidget))
self.checkBox.append(QtWidgets.QCheckBox(self.centralwidget))
if i == 0:
self.signals[i].setGeometry(QtCore.QRect(20, 90, 461, 192))
self.signals[i].setObjectName("signal_1")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 90, 471, 192))
self.spectrogram[i].setObjectName("spectro_1")
self.checkBox[i].setGeometry(QtCore.QRect(20, 50, 68, 20))
self.checkBox[i].setObjectName("check_1")
elif i == 1:
self.signals[i].setGeometry(QtCore.QRect(20, 340, 461, 192))
self.signals[i].setObjectName("signal_2")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 340, 471, 192))
self.spectrogram[i].setObjectName("spectro_2")
self.checkBox[i].setGeometry(QtCore.QRect(20, 300, 68, 20))
self.checkBox[i].setObjectName("check_2")
else:
self.signals[i].setGeometry(QtCore.QRect(20, 600, 461, 192))
self.signals[i].setObjectName("signal_3")
self.spectrogram[i].setGeometry(QtCore.QRect(490, 600, 471, 192))
self.spectrogram[i].setObjectName("spectro_3")
self.checkBox[i].setGeometry(QtCore.QRect(20, 560, 68, 20))
self.checkBox[i].setObjectName("check_3")
self.signals[i].setStyleSheet("background-color:rgb(0, 0, 0);")
self.signals[i].setRubberBandSelectionMode(QtCore.Qt.IntersectsItemBoundingRect)
self.signals[i].plotItem.showGrid(x=True, y=True )
self.signals[i].plotItem.setMenuEnabled(False)
self.checkBox[i].setStyleSheet("font: 10pt \"MS Shell Dlg 2\";")
self.spectrogram[i].setScaledContents(True)
self.open = QtWidgets.QPushButton(self.centralwidget)
self.open.setGeometry(QtCore.QRect(0, 1, 35, 35))
self.open.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("img/open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.open.setIcon(icon3)
self.open.setObjectName("open")
self.save = QtWidgets.QPushButton(self.centralwidget)
self.save.setGeometry(QtCore.QRect(30, 1, 35, 35))
self.save.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("img/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.save.setIcon(icon2)
self.save.setObjectName("save")
self.Zoom_in = QtWidgets.QPushButton(self.centralwidget)
self.Zoom_in.setGeometry(QtCore.QRect(60, 1, 35, 35))
self.Zoom_in.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("img/zoom-in.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Zoom_in.setIcon(icon)
self.Zoom_in.setObjectName("Zoom_in")
self.zoom_out = QtWidgets.QPushButton(self.centralwidget)
self.zoom_out.setGeometry(QtCore.QRect(90, 1, 35, 35))
self.zoom_out.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("img/zoom-out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zoom_out.setIcon(icon1)
self.zoom_out.setObjectName("zoom_out")
self.left = QtWidgets.QPushButton(self.centralwidget)
self.left.setGeometry(QtCore.QRect(120, 1, 35, 35))
self.left.setText("")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("img/previous.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.left.setIcon(icon7)
self.left.setObjectName("left")
self.play = QtWidgets.QPushButton(self.centralwidget)
self.play.setGeometry(QtCore.QRect(150, 1, 35, 35))
self.play.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("img/play.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.play.setIcon(icon5)
self.play.setObjectName("play")
self.right = QtWidgets.QPushButton(self.centralwidget)
self.right.setGeometry(QtCore.QRect(180, 1, 35, 35))
self.right.setText("")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("img/next.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.right.setIcon(icon6)
self.right.setObjectName("right")
self.pause = QtWidgets.QPushButton(self.centralwidget)
self.pause.setGeometry(QtCore.QRect(210, 1, 35, 35))
self.pause.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("img/pause.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pause.setIcon(icon4)
self.pause.setObjectName("pause")
self.spec = QtWidgets.QPushButton(self.centralwidget)
self.spec.setGeometry(QtCore.QRect(240, 1, 35, 35))
self.spec.setText("")
icon20 = QtGui.QIcon()
icon20.addPixmap(QtGui.QPixmap("img/spec3.jpeg"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.spec.setIcon(icon20)
self.spec.setObjectName("spec")
self.Zoom_in.raise_()
self.signals[0].raise_()
self.checkBox[1].raise_()
self.spectrogram[1].raise_()
self.spectrogram[2].raise_()
self.checkBox[2].raise_()
self.spectrogram[0].raise_()
self.signals[1].raise_()
self.signals[2].raise_()
self.checkBox[0].raise_()
self.zoom_out.raise_()
self.save.raise_()
self.open.raise_()
self.pause.raise_()
self.play.raise_()
self.right.raise_()
self.left.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1010, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuSignal_tools = QtWidgets.QMenu(self.menubar)
self.menuSignal_tools.setObjectName("menuSignal_tools")
self.menuPlay_navigate = QtWidgets.QMenu(self.menubar)
self.menuPlay_navigate.setObjectName("menuPlay_navigate")
MainWindow.setMenuBar(self.menubar)
self.actionOpen = QtWidgets.QAction(MainWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon9)
self.actionOpen.setObjectName("actionOpen")
self.actionzoom_in = QtWidgets.QAction(MainWindow)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("zoom-in_1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionzoom_in.setIcon(icon10)
self.actionzoom_in.setObjectName("actionzoom_in")
self.actionzoom_out = QtWidgets.QAction(MainWindow)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("zoom-out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionzoom_out.setIcon(icon11)
self.actionzoom_out.setObjectName("actionzoom_out")
self.actionSpectrogram = QtWidgets.QAction(MainWindow)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap("sound.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSpectrogram.setIcon(icon12)
self.actionSpectrogram.setObjectName("actionSpectrogram")
self.actionPlay = QtWidgets.QAction(MainWindow)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap("play-button.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPlay.setIcon(icon13)
self.actionPlay.setObjectName("actionPlay")
self.actionPause = QtWidgets.QAction(MainWindow)
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap("pause-button.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPause.setIcon(icon14)
self.actionPause.setObjectName("actionPause")
self.actionBackward = QtWidgets.QAction(MainWindow)
icon16 = QtGui.QIcon()
icon16.addPixmap(QtGui.QPixmap("backward.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionBackward.setIcon(icon16)
self.actionBackward.setObjectName("actionBackward")
self.actionForward = QtWidgets.QAction(MainWindow)
icon17 = QtGui.QIcon()
icon17.addPixmap(QtGui.QPixmap("forward.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionForward.setIcon(icon17)
self.actionForward.setObjectName("actionForward")
self.actionSave_as_pdf = QtWidgets.QAction(MainWindow)
icon18 = QtGui.QIcon()
icon18.addPixmap(QtGui.QPixmap("pdf-file.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave_as_pdf.setIcon(icon18)
self.actionSave_as_pdf.setObjectName("actionSave_as_pdf")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave_as_pdf)
self.menuEdit.addAction(self.actionzoom_in)
self.menuEdit.addAction(self.actionzoom_out)
self.menuSignal_tools.addAction(self.actionSpectrogram)
self.menuPlay_navigate.addAction(self.actionPlay)
self.menuPlay_navigate.addAction(self.actionPause)
self.menuPlay_navigate.addSeparator()
self.menuPlay_navigate.addAction(self.actionBackward)
self.menuPlay_navigate.addAction(self.actionForward)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuPlay_navigate.menuAction())
self.menubar.addAction(self.menuSignal_tools.menuAction())
self.signals[0].hide()
self.checkBox[0].hide()
self.spectrogram[0].hide()
self.signals[1].hide()
self.checkBox[1].hide()
self.spectrogram[1].hide()
self.signals[2].hide()
self.checkBox[2].hide()
self.spectrogram[2].hide()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.actionOpen.triggered.connect(lambda:self.opensignal())
self.actionzoom_in.triggered.connect(lambda:self.zoomin())
self.actionzoom_out.triggered.connect(lambda:self.zoomout())
self.actionSave_as_pdf.triggered.connect(lambda:self.savepdf())
self.actionBackward.triggered.connect(lambda:self.scrlleft())
self.actionForward.triggered.connect(lambda:self.scrlright())
self.actionSpectrogram.triggered.connect(lambda:self.spectro())
self.actionPlay.triggered.connect(lambda:self.playy())
self.actionPause.triggered.connect(lambda:self.pausee())
self.Zoom_in.clicked.connect(lambda:self.zoomin())
self.zoom_out.clicked.connect(lambda:self.zoomout())
self.left.clicked.connect(lambda:self.scrlleft())
self.right.clicked.connect(lambda:self.scrlright())
self.pause.clicked.connect(lambda:self.pausee())
self.play.clicked.connect(lambda:self.playy())
self.open.clicked.connect(lambda:self.opensignal())
self.save.clicked.connect(lambda:self.savepdf())
self.spec.clicked.connect(lambda:self.spectro())
def readsignal(self):
self.fname=QtGui.QFileDialog.getOpenFileName(self,' txt or CSV or xls',os.getenv('home'),"xls(*.xls) ;; text(*.txt) ;; csv(*.csv)")
path=self.fname[0]
self.data.append(np.genfromtxt(path))
def opensignal(self):
self.readsignal()
self.counter+=1
self.n.append(0)
self.nn.append(0)
self.data_line.append(self.signals[self.counter % 3].plot(self.data[self.counter], name="mode2"))
self.pen = pg.mkPen(color=(255, 0, 0))
# Set timer
self.timer.append(pg.QtCore.QTimer())
# Timer signal binding update_data function
x = self.counter
if x%3 == 0:
self.timer[x].timeout.connect(lambda: self.update_data1(x))
self.timer[x].start(50)
if x%3 == 1:
self.timer[x].timeout.connect(lambda: self.update_data2(x))
self.timer[x].start(50)
if x%3 == 2:
self.timer[x].timeout.connect(lambda: self.update_data3(x))
self.timer[x].start(50)
# The timer interval is 50ms, which can be understood as refreshing data once in 50ms
#self.timer1.start(50)
self.signals[x%3].show()
self.checkBox[x%3].show()
self.checkBox[x%3].setChecked(True)
# Data shift left
def update_data1(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def update_data2(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def update_data3(self,index):
if self.n[index] < len(self.data[index]) :
if self.n[index] < 1000 :
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0, self.r[index] , padding=0)
else :
self.nn[index] += 10
self.n[index] += 10
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(self.nn[index],self.r[index] +self.nn[index] , padding=0)
self.z[index] = 1
else :
self.data_line[index].setData(self.data[index][0 : self.n[index]])
self.signals[index%3].plotItem.setXRange(0 , len(self.data[index]) * self.z[index] , padding=0)
def spectro(self):
index = (len(self.data) - 1) - ((len(self.data)-1)%3)
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.spectrogram[i].show()
if i==0:
plt.specgram(self.data[index], Fs= 250 )
elif i == 1:
if (len(self.data ) - 1 - index >= 1):
plt.specgram(self.data[index + 1], Fs= 250 )
else:
plt.specgram(self.data[index - 2], Fs= 250 )
else:
if (len(self.data) - 1 - index == 2):
plt.specgram(self.data[index + 2], Fs= 250 )
else:
plt.specgram(self.data[index - 1], Fs= 250 )
plt.savefig('spectro'+str(i)+'.png', dpi=300, bbox_inches='tight')
self.spectrogram[i].setPixmap(QtGui.QPixmap('spectro'+str(i)+'.png'))
plt.close(None)
def pausee(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if self.timer[i].isActive():
self.timer[i].stop()
def playy(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if self.timer[i].isActive()==False:
self.timer[i].start()
def zoomin(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().scaleBy(x=0.5,y=1)
self.r[i]=self.r[i]*0.5
self.z[i] = self.z[i] * 0.5
def zoomout(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().scaleBy(x=2,y=1)
self.r[i]=self.r[i]*2
self.z[i] = self.z[i] * 2
def scrlleft(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().translateBy(x=-100,y=0)
def scrlright(self):
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
self.signals[i].plotItem.getViewBox().translateBy(x=100,y=0)
#
def savepdf(self):
fig=plt.figure(figsize=(1000, 1000))
index = (len(self.data) - 1) - ((len(self.data)-1)%3)
spectrogramData = []
for i in range (0,3):
if (self.checkBox[i].isChecked()==True):
if i == 0:
plt.subplot(3,2,1)
spectrogramData = list(self.data[index][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,2)
elif i == 1:
if (len(self.data ) - 1 - index >= 1):
plt.subplot(3,2,3)
spectrogramData = list(self.data[index+1][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,4)
else:
plt.subplot(3,2,3)
spectrogramData = list(self.data[index-2][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,4)
else:
if (len(self.data) - 1 - index == 2):
plt.subplot(3,2,5)
spectrogramData = list(self.data[index+2][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,6)
else:
plt.subplot(3,2,5)
spectrogramData = list(self.data[index-1][0:])
plt.plot(spectrogramData,linewidth=0.5,scalex=True)
plt.subplot(3,2,6)
plt.specgram(spectrogramData, Fs= 250)
plt.subplots_adjust(bottom=0.1,right=0.9,top=1.0)
plt.show()
fn,_=QtWidgets.QFileDialog.getSaveFileName(self,"Export PDF",None,"PDF files(.pdf);;AllFiles()")
if fn:
if QtCore.QFileInfo(fn).suffix()=="":
fn+=".pdf"
fig.savefig(fn)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.checkBox[1].setText(_translate("MainWindow", "signal-2"))
self.checkBox[1].setShortcut(_translate("MainWindow", "2"))
self.checkBox[2].setText(_translate("MainWindow", "signal-3"))
self.checkBox[2].setShortcut(_translate("MainWindow", "3"))
self.checkBox[0].setText(_translate("MainWindow", "signal-1"))
self.checkBox[0].setShortcut(_translate("MainWindow", "1"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
self.menuSignal_tools.setTitle(_translate("MainWindow", "Signal tools"))
self.menuPlay_navigate.setTitle(_translate("MainWindow", "Play and navigate "))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+o"))
self.actionzoom_in.setText(_translate("MainWindow", "Zoom-in"))
self.actionzoom_in.setShortcut(_translate("MainWindow", "Up"))
self.actionzoom_out.setText(_translate("MainWindow", "Zoom-out"))
self.actionzoom_out.setShortcut(_translate("MainWindow", "Down"))
self.actionSpectrogram.setText(_translate("MainWindow", "Spectrogram"))
self.actionSpectrogram.setShortcut(_translate("MainWindow", "S"))
self.actionPlay.setText(_translate("MainWindow", "Play"))
self.actionPlay.setShortcut(_translate("MainWindow", "Space"))
self.actionPause.setText(_translate("MainWindow", "Pause"))
self.actionPause.setShortcut(_translate("MainWindow", "Shift+Space"))
self.actionBackward.setText(_translate("MainWindow", "Backward"))
self.actionBackward.setShortcut(_translate("MainWindow", "Left"))
self.actionForward.setText(_translate("MainWindow", "Forward"))
self.actionForward.setShortcut(_translate("MainWindow", "Right"))
self.actionSave_as_pdf.setText(_translate("MainWindow", "Save as pdf"))
self.actionSave_as_pdf.setShortcut(_translate("MainWindow", "Ctrl+S"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"PyQt5.QtGui.QIcon",
"matplotlib.pyplot.specgram",
"PyQt5.QtWidgets.QApplication",
"pyqtgraph.mkPen",
"pyqtgraph.QtCore.QTimer",
"numpy.genfromtxt",
"PyQt5.QtCore.QFileInfo",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.Qt... | [((553, 577), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (567, 577), False, 'import matplotlib\n'), ((23354, 23386), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (23376, 23386), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((23404, 23427), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (23425, 23427), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1055, 1076), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""Mw.png"""'], {}), "('Mw.png')\n", (1066, 1076), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1143, 1172), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (1160, 1172), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3264, 3305), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3285, 3305), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3410, 3423), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (3421, 3423), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3610, 3651), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3631, 3651), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3757, 3770), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (3768, 3770), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3977, 4018), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3998, 4018), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4129, 4142), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (4140, 4142), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4343, 4384), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4364, 4384), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4498, 4511), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (4509, 4511), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4714, 4755), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4735, 4755), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4862, 4875), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (4873, 4875), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5066, 5107), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5087, 5107), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5214, 5227), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (5225, 5227), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5424, 5465), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5445, 5465), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5574, 5587), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (5585, 5587), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5779, 5820), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5800, 5820), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5929, 5942), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (5940, 5942), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((6142, 6183), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6163, 6183), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((6291, 6304), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (6302, 6304), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7105, 7135), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (7123, 7135), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7269, 7298), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (7284, 7298), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7371, 7400), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (7386, 7400), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7481, 7510), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (7496, 7510), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7608, 7637), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (7623, 7637), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7775, 7804), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (7792, 7804), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7821, 7834), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (7832, 7834), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8054, 8083), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (8071, 8083), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8101, 8114), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8112, 8114), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8340, 8369), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (8357, 8369), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8387, 8400), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8398, 8400), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8631, 8660), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (8648, 8660), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8678, 8691), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8689, 8691), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8921, 8950), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (8938, 8950), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8968, 8981), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (8979, 8981), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9197, 9226), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (9214, 9226), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9244, 9257), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (9255, 9257), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9489, 9518), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (9506, 9518), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9536, 9549), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (9547, 9549), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9776, 9805), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (9793, 9805), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9823, 9836), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (9834, 9836), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((10063, 10092), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (10080, 10092), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((10110, 10123), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (10121, 10123), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((11528, 11577), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (11565, 11577), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((13223, 13250), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '(255, 0, 0)'}), '(color=(255, 0, 0))\n', (13231, 13250), True, 'import pyqtgraph as pg\n'), ((19065, 19097), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1000, 1000)'}), '(figsize=(1000, 1000))\n', (19075, 19097), True, 'import matplotlib.pyplot as plt\n'), ((20768, 20819), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.1)', 'right': '(0.9)', 'top': '(1.0)'}), '(bottom=0.1, right=0.9, top=1.0)\n', (20787, 20819), True, 'import matplotlib.pyplot as plt\n'), ((20826, 20836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20834, 20836), True, 'import matplotlib.pyplot as plt\n'), ((20850, 20948), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""Export PDF"""', 'None', '"""PDF files(.pdf);;AllFiles()"""'], {}), "(self, 'Export PDF', None,\n 'PDF files(.pdf);;AllFiles()')\n", (20887, 20948), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3336, 3362), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(1)', '(35)', '(35)'], {}), '(0, 1, 35, 35)\n', (3348, 3362), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3448, 3477), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/open.png"""'], {}), "('img/open.png')\n", (3461, 3477), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3682, 3709), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(1)', '(35)', '(35)'], {}), '(30, 1, 35, 35)\n', (3694, 3709), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((3795, 3824), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/save.png"""'], {}), "('img/save.png')\n", (3808, 3824), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4052, 4079), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(1)', '(35)', '(35)'], {}), '(60, 1, 35, 35)\n', (4064, 4079), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4166, 4198), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/zoom-in.png"""'], {}), "('img/zoom-in.png')\n", (4179, 4198), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4419, 4446), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(1)', '(35)', '(35)'], {}), '(90, 1, 35, 35)\n', (4431, 4446), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4536, 4569), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/zoom-out.png"""'], {}), "('img/zoom-out.png')\n", (4549, 4569), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4786, 4814), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(120)', '(1)', '(35)', '(35)'], {}), '(120, 1, 35, 35)\n', (4798, 4814), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((4900, 4933), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/previous.png"""'], {}), "('img/previous.png')\n", (4913, 4933), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5138, 5166), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(1)', '(35)', '(35)'], {}), '(150, 1, 35, 35)\n', (5150, 5166), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5252, 5281), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/play.png"""'], {}), "('img/play.png')\n", (5265, 5281), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5497, 5525), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(180)', '(1)', '(35)', '(35)'], {}), '(180, 1, 35, 35)\n', (5509, 5525), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5612, 5641), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/next.png"""'], {}), "('img/next.png')\n", (5625, 5641), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5852, 5880), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(210)', '(1)', '(35)', '(35)'], {}), '(210, 1, 35, 35)\n', (5864, 5880), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((5967, 5997), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/pause.png"""'], {}), "('img/pause.png')\n", (5980, 5997), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((6214, 6242), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(240)', '(1)', '(35)', '(35)'], {}), '(240, 1, 35, 35)\n', (6226, 6242), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((6330, 6361), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""img/spec3.jpeg"""'], {}), "('img/spec3.jpeg')\n", (6343, 6361), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7169, 7197), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(1010)', '(21)'], {}), '(0, 0, 1010, 21)\n', (7181, 7197), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((7859, 7886), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""search.png"""'], {}), "('search.png')\n", (7872, 7886), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8140, 8170), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""zoom-in_1.png"""'], {}), "('zoom-in_1.png')\n", (8153, 8170), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8426, 8455), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""zoom-out.png"""'], {}), "('zoom-out.png')\n", (8439, 8455), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((8717, 8743), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""sound.png"""'], {}), "('sound.png')\n", (8730, 8743), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9007, 9039), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""play-button.png"""'], {}), "('play-button.png')\n", (9020, 9039), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9283, 9316), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""pause-button.png"""'], {}), "('pause-button.png')\n", (9296, 9316), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9575, 9604), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""backward.png"""'], {}), "('backward.png')\n", (9588, 9604), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((9862, 9890), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""forward.png"""'], {}), "('forward.png')\n", (9875, 9890), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((10149, 10178), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""pdf-file.png"""'], {}), "('pdf-file.png')\n", (10162, 10178), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((12832, 12849), 'os.getenv', 'os.getenv', (['"""home"""'], {}), "('home')\n", (12841, 12849), False, 'import os\n'), ((12945, 12964), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {}), '(path)\n', (12958, 12964), True, 'import numpy as np\n'), ((13298, 13316), 'pyqtgraph.QtCore.QTimer', 'pg.QtCore.QTimer', ([], {}), '()\n', (13314, 13316), True, 'import pyqtgraph as pg\n'), ((1303, 1333), 'pyqtgraph.PlotWidget', 'PlotWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1313, 1333), False, 'from pyqtgraph import PlotWidget, PlotItem\n'), ((1381, 1417), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1397, 1417), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1453, 1492), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1472, 1492), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((17690, 17705), 'matplotlib.pyplot.close', 'plt.close', (['None'], {}), '(None)\n', (17699, 17705), True, 'import matplotlib.pyplot as plt\n'), ((20713, 20750), 'matplotlib.pyplot.specgram', 'plt.specgram', (['spectrogramData'], {'Fs': '(250)'}), '(spectrogramData, Fs=250)\n', (20725, 20750), True, 'import matplotlib.pyplot as plt\n'), ((1561, 1591), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(90)', '(461)', '(192)'], {}), '(20, 90, 461, 192)\n', (1573, 1591), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1700, 1731), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(490)', '(90)', '(471)', '(192)'], {}), '(490, 90, 471, 192)\n', (1712, 1731), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((1842, 1870), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(50)', '(68)', '(20)'], {}), '(20, 50, 68, 20)\n', (1854, 1870), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((16962, 17000), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index]'], {'Fs': '(250)'}), '(self.data[index], Fs=250)\n', (16974, 17000), True, 'import matplotlib.pyplot as plt\n'), ((19324, 19344), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (19335, 19344), True, 'import matplotlib.pyplot as plt\n'), ((19429, 19482), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (19437, 19482), True, 'import matplotlib.pyplot as plt\n'), ((19501, 19521), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(2)'], {}), '(3, 2, 2)\n', (19512, 19521), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2031), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(340)', '(461)', '(192)'], {}), '(20, 340, 461, 192)\n', (2012, 2031), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2140, 2172), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(490)', '(340)', '(471)', '(192)'], {}), '(490, 340, 471, 192)\n', (2152, 2172), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2283, 2312), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(300)', '(68)', '(20)'], {}), '(20, 300, 68, 20)\n', (2295, 2312), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2435, 2466), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(600)', '(461)', '(192)'], {}), '(20, 600, 461, 192)\n', (2447, 2466), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2575, 2607), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(490)', '(600)', '(471)', '(192)'], {}), '(490, 600, 471, 192)\n', (2587, 2607), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((2718, 2747), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(560)', '(68)', '(20)'], {}), '(20, 560, 68, 20)\n', (2730, 2747), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((20972, 20992), 'PyQt5.QtCore.QFileInfo', 'QtCore.QFileInfo', (['fn'], {}), '(fn)\n', (20988, 20992), False, 'from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport\n'), ((17119, 17161), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index + 1]'], {'Fs': '(250)'}), '(self.data[index + 1], Fs=250)\n', (17131, 17161), True, 'import matplotlib.pyplot as plt\n'), ((17214, 17256), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index - 2]'], {'Fs': '(250)'}), '(self.data[index - 2], Fs=250)\n', (17226, 17256), True, 'import matplotlib.pyplot as plt\n'), ((17364, 17406), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index + 2]'], {'Fs': '(250)'}), '(self.data[index + 2], Fs=250)\n', (17376, 17406), True, 'import matplotlib.pyplot as plt\n'), ((17460, 17502), 'matplotlib.pyplot.specgram', 'plt.specgram', (['self.data[index - 1]'], {'Fs': '(250)'}), '(self.data[index - 1], Fs=250)\n', (17472, 17502), True, 'import matplotlib.pyplot as plt\n'), ((19657, 19677), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (19668, 19677), True, 'import matplotlib.pyplot as plt\n'), ((19771, 19824), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (19779, 19824), True, 'import matplotlib.pyplot as plt\n'), ((19847, 19867), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (19858, 19867), True, 'import matplotlib.pyplot as plt\n'), ((19916, 19936), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (19927, 19936), True, 'import matplotlib.pyplot as plt\n'), ((20030, 20083), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (20038, 20083), True, 'import matplotlib.pyplot as plt\n'), ((20106, 20126), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (20117, 20126), True, 'import matplotlib.pyplot as plt\n'), ((20229, 20249), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (20240, 20249), True, 'import matplotlib.pyplot as plt\n'), ((20343, 20396), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (20351, 20396), True, 'import matplotlib.pyplot as plt\n'), ((20419, 20439), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (20430, 20439), True, 'import matplotlib.pyplot as plt\n'), ((20488, 20508), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (20499, 20508), True, 'import matplotlib.pyplot as plt\n'), ((20602, 20655), 'matplotlib.pyplot.plot', 'plt.plot', (['spectrogramData'], {'linewidth': '(0.5)', 'scalex': '(True)'}), '(spectrogramData, linewidth=0.5, scalex=True)\n', (20610, 20655), True, 'import matplotlib.pyplot as plt\n'), ((20678, 20698), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (20689, 20698), True, 'import matplotlib.pyplot as plt\n')] |
"""Main Parser Class"""
import json
from .segments.utilities.operations import convert_bitmap_to_active_bits
from .segments.iso import ISO
from .segments.header import Header
from .segments.mti import MTI
from .segments.primary_bitmap import PrimaryBitmap
from .segments.data_elements import DataElements
class Parser:
"""Main parser class"""
def __init__(self, message):
"""Init class require a message as input"""
# Parser variables
self.message = message
self.active_data_elements = []
self.json_data = None
# Parser segments
self.iso = self.message[ISO.start_position:ISO.end_position]
self.header = self.message[Header.start_position:Header.end_position]
self.mti = self.message[MTI.start_position:MTI.end_position]
self.primary_bitmap = self.message[PrimaryBitmap.start_position:PrimaryBitmap.end_position]
self.data_elements = self.message[DataElements.start_position:]
# Parser function
self.iso_8583()
def iso_8583(self):
"""Processing data"""
self.active_data_elements = convert_bitmap_to_active_bits(self.primary_bitmap)
def get_json(self, save=False):
"""Return json format data"""
to_convert = {
"literal": self.iso,
"header": Header.get_message(self.header),
"mti": MTI.get_type(self.mti),
"primary_bitmap": self.primary_bitmap,
"data_elements": DataElements.get_all_data_elements(self.active_data_elements, self.data_elements)
}
if save:
with open('data.json', 'w', encoding='utf-8') as f:
json.dump(to_convert, f, ensure_ascii=False, indent=4)
return json.dumps(to_convert)
| [
"json.dumps",
"json.dump"
] | [((1740, 1762), 'json.dumps', 'json.dumps', (['to_convert'], {}), '(to_convert)\n', (1750, 1762), False, 'import json\n'), ((1669, 1723), 'json.dump', 'json.dump', (['to_convert', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(to_convert, f, ensure_ascii=False, indent=4)\n', (1678, 1723), False, 'import json\n')] |
from dataclasses import dataclass
from logging import getLogger
from typing import List, Callable, Awaitable, Optional, Dict
from pydantic.main import BaseModel
@dataclass
class ActionHandler:
name: str
description: str
handler: Callable[[], Awaitable]
async def __call__(self):
return await self.handler()
class ActionInfo(BaseModel):
name: str
description: str
class Action:
"""
Expose user-defined action as 'button' in UI
Expose async function as button to ui.
It will not be automatically journaled: it's up to you
add ``@binp.journal`` annotation or not.
:Example:
.. code-block:: python
from binp import BINP
from asyncio import sleep
binp = BINP()
@binp.action
async def invoke():
'''
Do something
'''
await sleep(3) # emulate some work
print("done")
By default, action will be exposed with name equal to fully-qualified
function name and description from doc-string (if exists).
Exposed name could by optionally defined manually.
.. code-block:: python
from binp import BINP
from asyncio import sleep
binp = BINP()
@binp.action(name='Do Something', description='Emulate some heavy work')
async def invoke():
await sleep(3)
print("done")
:Conflicts:
Actions are indexed by name. If multiple actions defined with the same name - the latest one will be used.
"""
def __init__(self):
self.__actions: Dict[str, ActionHandler] = {}
def __call__(self, func: Optional[Callable[[], Awaitable]] = None, *, name: Optional[str] = None,
description: Optional[str] = None):
"""
Decorator that expose function as an action in UI (ex: button)
"""
def trace_operation(fn: Callable[[], Awaitable]):
nonlocal name
nonlocal description
if name is None:
name = fn.__qualname__
if description is None:
description = "\n".join(line.strip() for line in (fn.__doc__ or '').splitlines()).strip()
if name in self.__actions:
old = self.__actions[name]
getLogger(self.__class__.__qualname__).warning("redefining UI action %r: %s => %s", name,
old.handler.__qualname__, fn.__qualname__)
self.__actions[name] = ActionHandler(name=name, description=description, handler=fn)
return fn
if func is None:
return trace_operation
return trace_operation(func)
async def invoke(self, name: str) -> bool:
"""
Invoke action by name or ignore. If handler will raise an error, the error will NOT be suppressed.
:param name: action name
:return: true if action invoked
"""
handler = self.__actions.get(name)
if handler is None:
getLogger(self.__class__.__qualname__).warning("attempt to invoke unknown action %r", name)
return False
await handler()
return True
@property
def actions(self) -> List[ActionInfo]:
"""
Copy of list of defined actions prepared for serialization.
"""
return [ActionInfo(name=x.name, description=x.description) for x in self.__actions.values()]
| [
"logging.getLogger"
] | [((3036, 3074), 'logging.getLogger', 'getLogger', (['self.__class__.__qualname__'], {}), '(self.__class__.__qualname__)\n', (3045, 3074), False, 'from logging import getLogger\n'), ((2286, 2324), 'logging.getLogger', 'getLogger', (['self.__class__.__qualname__'], {}), '(self.__class__.__qualname__)\n', (2295, 2324), False, 'from logging import getLogger\n')] |
from collections import Counter
def parse_policy(policy):
range_str, c = policy.split()
lo,hi = range_str.split('-')
lo,hi = int(lo),int(hi)
return lo,hi,c
def pwcheck(pw, policy):
counts = Counter(pw)
lo,hi,char = parse_policy(policy)
if char not in counts:
return False
return lo <= counts[char] <= hi
def pwcheck2(pw, policy):
lo,hi,char = parse_policy(policy)
lo,hi = (lo-1),(hi-1) # Adjust to 0-index
return sum(pw[i] == char for i in [lo,hi]) == 1
if __name__ == '__main__':
import fileinput
lines = list(fileinput.input())
valid_count = 0
for line in lines:
policy,pw = line.split(':')
if pwcheck(pw.strip(), policy):
valid_count += 1
print('Part 1:', valid_count)
valid_count = 0
for line in lines:
policy,pw = line.split(':')
if pwcheck2(pw.strip(), policy):
valid_count += 1
print('Part 2:', valid_count)
| [
"collections.Counter",
"fileinput.input"
] | [((212, 223), 'collections.Counter', 'Counter', (['pw'], {}), '(pw)\n', (219, 223), False, 'from collections import Counter\n'), ((581, 598), 'fileinput.input', 'fileinput.input', ([], {}), '()\n', (596, 598), False, 'import fileinput\n')] |
import h5py
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import scipy.stats
import complex_pca
def plot_pca_variance_curve(x: np.ndarray, title: str = 'PCA -- Variance Explained Curve') -> None:
pca = complex_pca.ComplexPCA(n_components=x.shape[1])
pca.fit(x)
plt.figure()
plt.plot(range(1, x.shape[1] + 1), np.cumsum(pca.explained_variance_ratio_) / np.sum(pca.explained_variance_ratio_))
plt.xlabel('Number of Principal Components')
plt.ylabel('Proportion of Variance Captured')
plt.title(title)
plt.grid(True)
# noinspection DuplicatedCode
def main() -> None:
# data_path = r'D:\EE 364D\dataset\synthetic_data\channel_specific\train_indoor\subsampled\10_percent\train_indoor_channel_e_flat_3.h5'
data_path = r'D:\EE 364D\dataset\synthetic_data\channel_specific\test_indoor_20dB\test_indoor_20dB_channel_e_flat.h5'
constant_features_path = '../data_preprocessing/constant_features.mat'
data = h5py.File(data_path, 'r')
constant_features = scipy.io.loadmat(constant_features_path, squeeze_me=True)
constant_features = constant_features['constant']
# Number of data points to use.
n = 1
# Data and pilot extraction.
data_indices = constant_features['iMDataTone_HePpdu'][()].astype(np.int32) - 1
pilot_indices = constant_features['iMPilotTone_HePpdu'][()].astype(np.int32) - 1
data_size = 256
rx_pilot = np.array(data['rx_pilot'][0:n, :])
tx_pilot = np.array(data['tx_pilot'][0:n, :])
pilot_gain = rx_pilot / tx_pilot
rx_data = np.array(data['rx_data'][0:n, :])
tx_data = np.array(data['tx_data'][0:n, :])
data_gain = rx_data / tx_data
# L-LTF extraction.
l_ltf_size = 64
rx_l_ltf_1 = np.array(data['rx_l_ltf_1'][0:n, :])
rx_l_ltf_2 = np.array(data['rx_l_ltf_2'][0:n, :])
tx_l_ltf = constant_features['txLltfFftOut'][()]
rx_l_ltf_1_trimmed = rx_l_ltf_1[:, tx_l_ltf != 0]
rx_l_ltf_2_trimmed = rx_l_ltf_2[:, tx_l_ltf != 0]
tx_l_ltf_trimmed = tx_l_ltf[tx_l_ltf != 0]
l_ltf_1_trimmed_gain = rx_l_ltf_1_trimmed / tx_l_ltf_trimmed
l_ltf_2_trimmed_gain = rx_l_ltf_2_trimmed / tx_l_ltf_trimmed
# HE-LTF extraction.
he_ltf_data_indices = constant_features['iMDataTone_Heltf'][()].astype(np.int32) - 1
he_ltf_pilot_indices = constant_features['iMPilotTone_Heltf'][()].astype(np.int32) - 1
he_ltf_size = 256
rx_he_ltf_data = np.array(data['rx_he_ltf_data'][0:n, :])
rx_he_ltf_pilot = np.array(data['rx_he_ltf_pilot'][0:n, :])
rx_he_ltf = np.zeros((rx_he_ltf_data.shape[0], he_ltf_size), dtype=complex)
rx_he_ltf[:, he_ltf_data_indices] = rx_he_ltf_data
rx_he_ltf[:, he_ltf_pilot_indices] = rx_he_ltf_pilot
tx_he_ltf = constant_features['txHeltfFftOut'][()]
rx_he_ltf_trimmed = rx_he_ltf[:, tx_he_ltf != 0]
tx_he_ltf_trimmed = tx_he_ltf[tx_he_ltf != 0]
he_ltf_trimmed_gain = rx_he_ltf_trimmed / tx_he_ltf_trimmed
# Frequency domain.
f = np.linspace(0, 1, data_size)
f_data = f[data_indices]
f_pilot = f[pilot_indices]
f_rx_he_ltf = np.linspace(0, 1, he_ltf_size)
f_rx_he_ltf_trimmed = f_rx_he_ltf[tx_he_ltf != 0]
f_l_ltf = np.linspace(0, 1, l_ltf_size)
f_l_ltf_trimmed = f_l_ltf[tx_l_ltf != 0]
# Channel instance to use.
i = 0
# Make plots.
plot_constellation = False
plot_magnitude = True
plot_phase = True
plot_pca = False
plot_mean_magnitude = False
plot_correction_phase = False
if plot_constellation:
plt.figure()
plt.scatter(np.real(tx_he_ltf_trimmed), np.imag(tx_he_ltf_trimmed))
plt.scatter(np.real(tx_l_ltf_trimmed), np.imag(tx_l_ltf_trimmed))
plt.scatter(np.real(tx_pilot[i, :]), np.imag(tx_pilot[i, :]))
plt.xlabel('In-phase Component')
plt.ylabel('Quadrature Component')
plt.title('Transmitted Symbol Constellation')
plt.legend(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot'])
plt.grid()
plt.figure()
plt.scatter(np.real(he_ltf_trimmed_gain[i, :]), np.imag(he_ltf_trimmed_gain[i, :]))
plt.scatter(np.real(l_ltf_1_trimmed_gain[i, :]), np.imag(l_ltf_1_trimmed_gain[i, :]))
plt.scatter(np.real(l_ltf_2_trimmed_gain[i, :]), np.imag(l_ltf_2_trimmed_gain[i, :]))
plt.scatter(np.real(pilot_gain[i, :]), np.imag(pilot_gain[i, :]))
plt.xlabel('In-phase Component')
plt.ylabel('Quadrature Component')
plt.title('Channel Gain Estimate Constellation')
plt.legend(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot'])
plt.grid()
if plot_magnitude:
plt.figure()
plt.scatter(f_rx_he_ltf_trimmed, 20 * np.log10(np.abs(he_ltf_trimmed_gain[i, :])))
plt.scatter(f_l_ltf_trimmed, 20 * np.log10(np.abs(l_ltf_1_trimmed_gain[i, :])))
plt.scatter(f_l_ltf_trimmed, 20 * np.log10(np.abs(l_ltf_2_trimmed_gain[i, :])))
plt.scatter(f_pilot, 20 * np.log10(np.abs(pilot_gain[i, :])))
plt.scatter(f_data, 20 * np.log10(np.abs(data_gain[i, :])), marker='x')
plt.xlabel(r'$f$ (normalized)')
plt.ylabel(r'$|H|^2$ (dB)')
plt.title('Channel Gain Estimate')
plt.legend(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data'])
plt.grid()
if plot_phase:
plt.figure()
unwrap = False
if unwrap:
plt.scatter(f_rx_he_ltf_trimmed, np.unwrap(np.angle(he_ltf_trimmed_gain[i, :])) / np.pi)
plt.scatter(f_l_ltf_trimmed, np.unwrap(np.angle(l_ltf_1_trimmed_gain[i, :])) / np.pi)
plt.scatter(f_l_ltf_trimmed, np.unwrap(np.angle(l_ltf_2_trimmed_gain[i, :])) / np.pi)
plt.scatter(f_pilot, np.unwrap(np.angle(pilot_gain[i, :])) / np.pi)
plt.scatter(f_data, np.unwrap(np.angle(data_gain[i, :])) / np.pi, marker='x')
else:
plt.scatter(f_rx_he_ltf_trimmed, np.angle(he_ltf_trimmed_gain[i, :]) / np.pi)
plt.scatter(f_l_ltf_trimmed, np.angle(l_ltf_1_trimmed_gain[i, :]) / np.pi)
plt.scatter(f_l_ltf_trimmed, np.angle(l_ltf_2_trimmed_gain[i, :]) / np.pi)
plt.scatter(f_pilot, np.angle(pilot_gain[i, :]) / np.pi)
plt.scatter(f_data, np.angle(data_gain[i, :]) / np.pi, marker='x')
plt.xlabel(r'$f$ (normalized)')
plt.ylabel(r'$\angle H$ ($\times \pi^{-1}$)')
plt.title('Channel Phase')
plt.legend(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data'])
plt.grid()
if plot_pca:
plot_pca_variance_curve(he_ltf_trimmed_gain, 'HE-LTF Trimmed Gain')
plot_pca_variance_curve(rx_he_ltf, 'HE-LTF Raw')
plot_pca_variance_curve(l_ltf_1_trimmed_gain, 'L-LTF-1 Trimmed Gain')
plot_pca_variance_curve(rx_l_ltf_1, 'L-LTF-1 Raw')
plot_pca_variance_curve(l_ltf_2_trimmed_gain, 'L-LTF-2 Trimmed Gain')
plot_pca_variance_curve(rx_l_ltf_2, 'L-LTF-2 Raw')
plot_pca_variance_curve(rx_pilot, 'Pilot Raw')
plot_pca_variance_curve(pilot_gain, 'Pilot Gain')
plot_pca_variance_curve(np.hstack([
he_ltf_trimmed_gain,
l_ltf_1_trimmed_gain,
l_ltf_2_trimmed_gain,
pilot_gain
]), 'HE-LTF, L-LTF-1, L-LTF-2, and Pilot Trimmed Gain')
if plot_mean_magnitude:
plt.figure()
x = f_rx_he_ltf_trimmed
y = np.mean(np.abs(he_ltf_trimmed_gain), axis=0)
s = np.std(np.abs(he_ltf_trimmed_gain), axis=0)
plt.plot(x, 20 * np.log10(y))
plt.fill_between(x, 20 * np.log10(y - s), 20 * np.log10(y + s), alpha=0.5)
plt.xlabel(r'$f$ (normalized)')
plt.ylabel(r'$|H|^2$ (dB)')
plt.title('Mean Channel Gain')
plt.legend([r'$\mu$', r'$\pm\sigma$'])
plt.grid()
if plot_correction_phase:
index = np.arange(0, he_ltf_size)[tx_he_ltf != 0]
phase = np.angle(he_ltf_trimmed_gain[0, :])
consecutive_phase = np.split(phase, np.where(np.diff(index) != 1)[0] + 1)
consecutive_index = np.split(index, np.where(np.diff(index) != 1)[0] + 1)
consecutive_phase = [np.unwrap(x) for x in consecutive_phase]
consecutive_fits = [scipy.stats.linregress(x, y) for x, y in zip(consecutive_index, consecutive_phase)]
combined_phase = []
for x, y in zip(consecutive_index, consecutive_phase):
y_hat = x * consecutive_fits[0].slope + consecutive_fits[0].intercept
# We can add this offset WLoG because phase is 2π periodic.
offset = 2 * np.pi * np.round((y_hat - y) / (2 * np.pi))
combined_phase.append(y + offset)
combined_phase = np.hstack(combined_phase)
plt.figure()
for x, y in zip(consecutive_index, consecutive_phase):
plt.scatter(x, y / np.pi)
for fit in consecutive_fits:
x = np.linspace(0, he_ltf_size, 1000)
y = fit.slope * x + fit.intercept
plt.plot(x, y / np.pi)
plt.xlabel('Subcarrier Index')
plt.ylabel(r'$\angle H$ ($\times \pi^{-1}$)')
plt.title('HE-LTF Channel Phase Estimates')
plt.legend([f'Interval {i + 1}' for i in range(len(consecutive_index))])
plt.grid()
plt.figure()
plt.scatter(index, combined_phase / np.pi)
plt.xlabel('Subcarrier Index')
plt.ylabel(r'$\angle H$ ($\times \pi^{-1}$)')
plt.title('HE-LTF Channel Phase Combined Estimate')
plt.grid()
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.grid",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"complex_pca.ComplexPCA",
"numpy.unwrap",
"numpy.array",
"numpy.imag",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.real",
"numpy.linspace",
"matplotlib.pypl... | [((230, 277), 'complex_pca.ComplexPCA', 'complex_pca.ComplexPCA', ([], {'n_components': 'x.shape[1]'}), '(n_components=x.shape[1])\n', (252, 277), False, 'import complex_pca\n'), ((298, 310), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (308, 310), True, 'import matplotlib.pyplot as plt\n'), ((436, 480), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Principal Components"""'], {}), "('Number of Principal Components')\n", (446, 480), True, 'import matplotlib.pyplot as plt\n'), ((485, 530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion of Variance Captured"""'], {}), "('Proportion of Variance Captured')\n", (495, 530), True, 'import matplotlib.pyplot as plt\n'), ((535, 551), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (544, 551), True, 'import matplotlib.pyplot as plt\n'), ((556, 570), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (564, 570), True, 'import matplotlib.pyplot as plt\n'), ((972, 997), 'h5py.File', 'h5py.File', (['data_path', '"""r"""'], {}), "(data_path, 'r')\n", (981, 997), False, 'import h5py\n'), ((1419, 1453), 'numpy.array', 'np.array', (["data['rx_pilot'][0:n, :]"], {}), "(data['rx_pilot'][0:n, :])\n", (1427, 1453), True, 'import numpy as np\n'), ((1469, 1503), 'numpy.array', 'np.array', (["data['tx_pilot'][0:n, :]"], {}), "(data['tx_pilot'][0:n, :])\n", (1477, 1503), True, 'import numpy as np\n'), ((1556, 1589), 'numpy.array', 'np.array', (["data['rx_data'][0:n, :]"], {}), "(data['rx_data'][0:n, :])\n", (1564, 1589), True, 'import numpy as np\n'), ((1604, 1637), 'numpy.array', 'np.array', (["data['tx_data'][0:n, :]"], {}), "(data['tx_data'][0:n, :])\n", (1612, 1637), True, 'import numpy as np\n'), ((1735, 1771), 'numpy.array', 'np.array', (["data['rx_l_ltf_1'][0:n, :]"], {}), "(data['rx_l_ltf_1'][0:n, :])\n", (1743, 1771), True, 'import numpy as np\n'), ((1789, 1825), 'numpy.array', 'np.array', (["data['rx_l_ltf_2'][0:n, :]"], {}), "(data['rx_l_ltf_2'][0:n, :])\n", (1797, 1825), True, 'import numpy as np\n'), ((2417, 2457), 'numpy.array', 'np.array', (["data['rx_he_ltf_data'][0:n, :]"], {}), "(data['rx_he_ltf_data'][0:n, :])\n", (2425, 2457), True, 'import numpy as np\n'), ((2480, 2521), 'numpy.array', 'np.array', (["data['rx_he_ltf_pilot'][0:n, :]"], {}), "(data['rx_he_ltf_pilot'][0:n, :])\n", (2488, 2521), True, 'import numpy as np\n'), ((2538, 2601), 'numpy.zeros', 'np.zeros', (['(rx_he_ltf_data.shape[0], he_ltf_size)'], {'dtype': 'complex'}), '((rx_he_ltf_data.shape[0], he_ltf_size), dtype=complex)\n', (2546, 2601), True, 'import numpy as np\n'), ((2972, 3000), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'data_size'], {}), '(0, 1, data_size)\n', (2983, 3000), True, 'import numpy as np\n'), ((3080, 3110), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'he_ltf_size'], {}), '(0, 1, he_ltf_size)\n', (3091, 3110), True, 'import numpy as np\n'), ((3180, 3209), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'l_ltf_size'], {}), '(0, 1, l_ltf_size)\n', (3191, 3209), True, 'import numpy as np\n'), ((9387, 9397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9395, 9397), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3530), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3528, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3759, 3791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""In-phase Component"""'], {}), "('In-phase Component')\n", (3769, 3791), True, 'import matplotlib.pyplot as plt\n'), ((3800, 3834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Quadrature Component"""'], {}), "('Quadrature Component')\n", (3810, 3834), True, 'import matplotlib.pyplot as plt\n'), ((3843, 3888), 'matplotlib.pyplot.title', 'plt.title', (['"""Transmitted Symbol Constellation"""'], {}), "('Transmitted Symbol Constellation')\n", (3852, 3888), True, 'import matplotlib.pyplot as plt\n'), ((3897, 3950), 'matplotlib.pyplot.legend', 'plt.legend', (["['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot']"], {}), "(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot'])\n", (3907, 3950), True, 'import matplotlib.pyplot as plt\n'), ((3959, 3969), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3967, 3969), True, 'import matplotlib.pyplot as plt\n'), ((3979, 3991), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3989, 3991), True, 'import matplotlib.pyplot as plt\n'), ((4354, 4386), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""In-phase Component"""'], {}), "('In-phase Component')\n", (4364, 4386), True, 'import matplotlib.pyplot as plt\n'), ((4395, 4429), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Quadrature Component"""'], {}), "('Quadrature Component')\n", (4405, 4429), True, 'import matplotlib.pyplot as plt\n'), ((4438, 4486), 'matplotlib.pyplot.title', 'plt.title', (['"""Channel Gain Estimate Constellation"""'], {}), "('Channel Gain Estimate Constellation')\n", (4447, 4486), True, 'import matplotlib.pyplot as plt\n'), ((4495, 4548), 'matplotlib.pyplot.legend', 'plt.legend', (["['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot']"], {}), "(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot'])\n", (4505, 4548), True, 'import matplotlib.pyplot as plt\n'), ((4557, 4567), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4565, 4567), True, 'import matplotlib.pyplot as plt\n'), ((4600, 4612), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4610, 4612), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5068), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$f$ (normalized)"""'], {}), "('$f$ (normalized)')\n", (5048, 5068), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5104), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$|H|^2$ (dB)"""'], {}), "('$|H|^2$ (dB)')\n", (5088, 5104), True, 'import matplotlib.pyplot as plt\n'), ((5114, 5148), 'matplotlib.pyplot.title', 'plt.title', (['"""Channel Gain Estimate"""'], {}), "('Channel Gain Estimate')\n", (5123, 5148), True, 'import matplotlib.pyplot as plt\n'), ((5157, 5218), 'matplotlib.pyplot.legend', 'plt.legend', (["['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data']"], {}), "(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data'])\n", (5167, 5218), True, 'import matplotlib.pyplot as plt\n'), ((5227, 5237), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5235, 5237), True, 'import matplotlib.pyplot as plt\n'), ((5266, 5278), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5276, 5278), True, 'import matplotlib.pyplot as plt\n'), ((6222, 6252), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$f$ (normalized)"""'], {}), "('$f$ (normalized)')\n", (6232, 6252), True, 'import matplotlib.pyplot as plt\n'), ((6262, 6309), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)"""'], {}), "('$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)')\n", (6272, 6309), True, 'import matplotlib.pyplot as plt\n'), ((6316, 6342), 'matplotlib.pyplot.title', 'plt.title', (['"""Channel Phase"""'], {}), "('Channel Phase')\n", (6325, 6342), True, 'import matplotlib.pyplot as plt\n'), ((6351, 6412), 'matplotlib.pyplot.legend', 'plt.legend', (["['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data']"], {}), "(['HE-LTF', 'L-LTF-1', 'L-LTF-2', 'Pilot', 'Data'])\n", (6361, 6412), True, 'import matplotlib.pyplot as plt\n'), ((6421, 6431), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6429, 6431), True, 'import matplotlib.pyplot as plt\n'), ((7239, 7251), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7249, 7251), True, 'import matplotlib.pyplot as plt\n'), ((7526, 7556), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$f$ (normalized)"""'], {}), "('$f$ (normalized)')\n", (7536, 7556), True, 'import matplotlib.pyplot as plt\n'), ((7566, 7592), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$|H|^2$ (dB)"""'], {}), "('$|H|^2$ (dB)')\n", (7576, 7592), True, 'import matplotlib.pyplot as plt\n'), ((7602, 7632), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Channel Gain"""'], {}), "('Mean Channel Gain')\n", (7611, 7632), True, 'import matplotlib.pyplot as plt\n'), ((7641, 7680), 'matplotlib.pyplot.legend', 'plt.legend', (["['$\\\\mu$', '$\\\\pm\\\\sigma$']"], {}), "(['$\\\\mu$', '$\\\\pm\\\\sigma$'])\n", (7651, 7680), True, 'import matplotlib.pyplot as plt\n'), ((7688, 7698), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7696, 7698), True, 'import matplotlib.pyplot as plt\n'), ((7804, 7839), 'numpy.angle', 'np.angle', (['he_ltf_trimmed_gain[0, :]'], {}), '(he_ltf_trimmed_gain[0, :])\n', (7812, 7839), True, 'import numpy as np\n'), ((8573, 8598), 'numpy.hstack', 'np.hstack', (['combined_phase'], {}), '(combined_phase)\n', (8582, 8598), True, 'import numpy as np\n'), ((8608, 8620), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8618, 8620), True, 'import matplotlib.pyplot as plt\n'), ((8900, 8930), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Subcarrier Index"""'], {}), "('Subcarrier Index')\n", (8910, 8930), True, 'import matplotlib.pyplot as plt\n'), ((8939, 8986), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)"""'], {}), "('$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)')\n", (8949, 8986), True, 'import matplotlib.pyplot as plt\n'), ((8993, 9036), 'matplotlib.pyplot.title', 'plt.title', (['"""HE-LTF Channel Phase Estimates"""'], {}), "('HE-LTF Channel Phase Estimates')\n", (9002, 9036), True, 'import matplotlib.pyplot as plt\n'), ((9126, 9136), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9134, 9136), True, 'import matplotlib.pyplot as plt\n'), ((9146, 9158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9156, 9158), True, 'import matplotlib.pyplot as plt\n'), ((9167, 9209), 'matplotlib.pyplot.scatter', 'plt.scatter', (['index', '(combined_phase / np.pi)'], {}), '(index, combined_phase / np.pi)\n', (9178, 9209), True, 'import matplotlib.pyplot as plt\n'), ((9218, 9248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Subcarrier Index"""'], {}), "('Subcarrier Index')\n", (9228, 9248), True, 'import matplotlib.pyplot as plt\n'), ((9257, 9304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)"""'], {}), "('$\\\\angle H$ ($\\\\times \\\\pi^{-1}$)')\n", (9267, 9304), True, 'import matplotlib.pyplot as plt\n'), ((9311, 9362), 'matplotlib.pyplot.title', 'plt.title', (['"""HE-LTF Channel Phase Combined Estimate"""'], {}), "('HE-LTF Channel Phase Combined Estimate')\n", (9320, 9362), True, 'import matplotlib.pyplot as plt\n'), ((9371, 9381), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9379, 9381), True, 'import matplotlib.pyplot as plt\n'), ((350, 390), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (359, 390), True, 'import numpy as np\n'), ((393, 430), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (399, 430), True, 'import numpy as np\n'), ((3551, 3577), 'numpy.real', 'np.real', (['tx_he_ltf_trimmed'], {}), '(tx_he_ltf_trimmed)\n', (3558, 3577), True, 'import numpy as np\n'), ((3579, 3605), 'numpy.imag', 'np.imag', (['tx_he_ltf_trimmed'], {}), '(tx_he_ltf_trimmed)\n', (3586, 3605), True, 'import numpy as np\n'), ((3627, 3652), 'numpy.real', 'np.real', (['tx_l_ltf_trimmed'], {}), '(tx_l_ltf_trimmed)\n', (3634, 3652), True, 'import numpy as np\n'), ((3654, 3679), 'numpy.imag', 'np.imag', (['tx_l_ltf_trimmed'], {}), '(tx_l_ltf_trimmed)\n', (3661, 3679), True, 'import numpy as np\n'), ((3701, 3724), 'numpy.real', 'np.real', (['tx_pilot[i, :]'], {}), '(tx_pilot[i, :])\n', (3708, 3724), True, 'import numpy as np\n'), ((3726, 3749), 'numpy.imag', 'np.imag', (['tx_pilot[i, :]'], {}), '(tx_pilot[i, :])\n', (3733, 3749), True, 'import numpy as np\n'), ((4012, 4046), 'numpy.real', 'np.real', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (4019, 4046), True, 'import numpy as np\n'), ((4048, 4082), 'numpy.imag', 'np.imag', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (4055, 4082), True, 'import numpy as np\n'), ((4104, 4139), 'numpy.real', 'np.real', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (4111, 4139), True, 'import numpy as np\n'), ((4141, 4176), 'numpy.imag', 'np.imag', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (4148, 4176), True, 'import numpy as np\n'), ((4198, 4233), 'numpy.real', 'np.real', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (4205, 4233), True, 'import numpy as np\n'), ((4235, 4270), 'numpy.imag', 'np.imag', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (4242, 4270), True, 'import numpy as np\n'), ((4292, 4317), 'numpy.real', 'np.real', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (4299, 4317), True, 'import numpy as np\n'), ((4319, 4344), 'numpy.imag', 'np.imag', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (4326, 4344), True, 'import numpy as np\n'), ((7002, 7094), 'numpy.hstack', 'np.hstack', (['[he_ltf_trimmed_gain, l_ltf_1_trimmed_gain, l_ltf_2_trimmed_gain, pilot_gain]'], {}), '([he_ltf_trimmed_gain, l_ltf_1_trimmed_gain, l_ltf_2_trimmed_gain,\n pilot_gain])\n', (7011, 7094), True, 'import numpy as np\n'), ((7304, 7331), 'numpy.abs', 'np.abs', (['he_ltf_trimmed_gain'], {}), '(he_ltf_trimmed_gain)\n', (7310, 7331), True, 'import numpy as np\n'), ((7360, 7387), 'numpy.abs', 'np.abs', (['he_ltf_trimmed_gain'], {}), '(he_ltf_trimmed_gain)\n', (7366, 7387), True, 'import numpy as np\n'), ((7746, 7771), 'numpy.arange', 'np.arange', (['(0)', 'he_ltf_size'], {}), '(0, he_ltf_size)\n', (7755, 7771), True, 'import numpy as np\n'), ((8033, 8045), 'numpy.unwrap', 'np.unwrap', (['x'], {}), '(x)\n', (8042, 8045), True, 'import numpy as np\n'), ((8696, 8721), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', '(y / np.pi)'], {}), '(x, y / np.pi)\n', (8707, 8721), True, 'import matplotlib.pyplot as plt\n'), ((8776, 8809), 'numpy.linspace', 'np.linspace', (['(0)', 'he_ltf_size', '(1000)'], {}), '(0, he_ltf_size, 1000)\n', (8787, 8809), True, 'import numpy as np\n'), ((8868, 8890), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(y / np.pi)'], {}), '(x, y / np.pi)\n', (8876, 8890), True, 'import matplotlib.pyplot as plt\n'), ((7422, 7433), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (7430, 7433), True, 'import numpy as np\n'), ((7468, 7483), 'numpy.log10', 'np.log10', (['(y - s)'], {}), '(y - s)\n', (7476, 7483), True, 'import numpy as np\n'), ((7490, 7505), 'numpy.log10', 'np.log10', (['(y + s)'], {}), '(y + s)\n', (7498, 7505), True, 'import numpy as np\n'), ((8465, 8500), 'numpy.round', 'np.round', (['((y_hat - y) / (2 * np.pi))'], {}), '((y_hat - y) / (2 * np.pi))\n', (8473, 8500), True, 'import numpy as np\n'), ((4668, 4701), 'numpy.abs', 'np.abs', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (4674, 4701), True, 'import numpy as np\n'), ((4755, 4789), 'numpy.abs', 'np.abs', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (4761, 4789), True, 'import numpy as np\n'), ((4843, 4877), 'numpy.abs', 'np.abs', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (4849, 4877), True, 'import numpy as np\n'), ((4923, 4947), 'numpy.abs', 'np.abs', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (4929, 4947), True, 'import numpy as np\n'), ((4992, 5015), 'numpy.abs', 'np.abs', (['data_gain[i, :]'], {}), '(data_gain[i, :])\n', (4998, 5015), True, 'import numpy as np\n'), ((5847, 5882), 'numpy.angle', 'np.angle', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (5855, 5882), True, 'import numpy as np\n'), ((5933, 5969), 'numpy.angle', 'np.angle', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (5941, 5969), True, 'import numpy as np\n'), ((6020, 6056), 'numpy.angle', 'np.angle', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (6028, 6056), True, 'import numpy as np\n'), ((6099, 6125), 'numpy.angle', 'np.angle', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (6107, 6125), True, 'import numpy as np\n'), ((6167, 6192), 'numpy.angle', 'np.angle', (['data_gain[i, :]'], {}), '(data_gain[i, :])\n', (6175, 6192), True, 'import numpy as np\n'), ((5376, 5411), 'numpy.angle', 'np.angle', (['he_ltf_trimmed_gain[i, :]'], {}), '(he_ltf_trimmed_gain[i, :])\n', (5384, 5411), True, 'import numpy as np\n'), ((5473, 5509), 'numpy.angle', 'np.angle', (['l_ltf_1_trimmed_gain[i, :]'], {}), '(l_ltf_1_trimmed_gain[i, :])\n', (5481, 5509), True, 'import numpy as np\n'), ((5571, 5607), 'numpy.angle', 'np.angle', (['l_ltf_2_trimmed_gain[i, :]'], {}), '(l_ltf_2_trimmed_gain[i, :])\n', (5579, 5607), True, 'import numpy as np\n'), ((5661, 5687), 'numpy.angle', 'np.angle', (['pilot_gain[i, :]'], {}), '(pilot_gain[i, :])\n', (5669, 5687), True, 'import numpy as np\n'), ((5740, 5765), 'numpy.angle', 'np.angle', (['data_gain[i, :]'], {}), '(data_gain[i, :])\n', (5748, 5765), True, 'import numpy as np\n'), ((7893, 7907), 'numpy.diff', 'np.diff', (['index'], {}), '(index)\n', (7900, 7907), True, 'import numpy as np\n'), ((7975, 7989), 'numpy.diff', 'np.diff', (['index'], {}), '(index)\n', (7982, 7989), True, 'import numpy as np\n')] |
import sys
import re
import string
from collections import Counter
import pickle
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
import unicodedata
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
def para_has_answer(answer, para, tokenizer):
# TODO: Fix for multilingual tokenization
assert isinstance(answer, list)
text = normalize(para)
tokens = tokenizer.tokenize(text)
text = tokens.words(uncased=True)
assert len(text) == len(tokens)
for single_answer in answer:
single_answer = normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
| [
"collections.Counter",
"re.sub",
"unicodedata.normalize"
] | [((1530, 1564), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (1551, 1564), False, 'import unicodedata\n'), ((156, 193), 're.sub', 're.sub', (['"""\\\\b(a|an|the)\\\\b"""', '""" """', 'text'], {}), "('\\\\b(a|an|the)\\\\b', ' ', text)\n", (162, 193), False, 'import re\n'), ((694, 720), 'collections.Counter', 'Counter', (['prediction_tokens'], {}), '(prediction_tokens)\n', (701, 720), False, 'from collections import Counter\n'), ((723, 751), 'collections.Counter', 'Counter', (['ground_truth_tokens'], {}), '(ground_truth_tokens)\n', (730, 751), False, 'from collections import Counter\n')] |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel
from wagtail.wagtailcore.models import Orderable
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
class LinkFields(models.Model):
"""
Represents a link to an external page, a document or a fellow page
"""
link_external = models.URLField(
"External link",
blank=True,
null=True,
help_text='Set an external link if you want the link to point somewhere outside the CMS.'
)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
on_delete=models.SET_NULL,
blank=True,
related_name='+',
help_text='Choose an existing page if you want the link to point somewhere inside the CMS.'
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
on_delete=models.SET_NULL,
blank=True,
related_name='+',
help_text='Choose an existing document if you want the link to open a document.'
)
link_email = models.EmailField(
blank=True,
null=True,
help_text='Set the recipient email address if you want the link to send an email.'
)
link_phone = models.CharField(
max_length=20,
blank=True,
null=True,
help_text='Set the number if you want the link to dial a phone number.'
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_external:
return self.link_external
elif self.link_document:
return self.link_document.url
elif self.link_email:
return 'mailto:%s' % self.link_email
elif self.link_phone:
return 'tel:%s' % self.link_phone.strip()
else:
return "#"
panels = [
MultiFieldPanel([
PageChooserPanel('link_page'),
FieldPanel('link_external'),
DocumentChooserPanel('link_document'),
FieldPanel('link_email'),
FieldPanel('link_phone'),
],
"Link"
),
]
class Meta:
abstract = True
@python_2_unicode_compatible
class MenuElement(LinkFields):
explicit_name = models.CharField(
max_length=64,
blank=True,
null=True,
help_text='If you want a different name than the page title.'
)
short_name = models.CharField(
max_length=32,
blank=True,
null=True,
help_text='If you need a custom name for responsive devices.'
)
css_class = models.CharField(
max_length=255,
blank=True,
null=True,
verbose_name="CSS Class",
help_text="Optional styling for the menu item"
)
icon_class = models.CharField(
max_length=255,
blank=True,
null=True,
verbose_name="Icon Class",
help_text="In case you need an icon element <i> for the menu item"
)
@property
def title(self):
if self.explicit_name:
return self.explicit_name
elif self.link_page:
return self.link_page.title
elif self.link_document:
return self.link_document.title
else:
return None
@property
def url(self):
return self.link
def __str__(self):
if self.explicit_name:
title = self.explicit_name
elif self.link_page:
title = self.link_page.title
else:
title = ''
return "%s ( %s )" % (title, self.short_name)
class Meta:
verbose_name = "Menu item"
panels = LinkFields.panels + [
FieldPanel('explicit_name'),
FieldPanel('short_name'),
FieldPanel('css_class'),
FieldPanel('icon_class'),
]
class NavigationMenuMenuElement(Orderable, MenuElement):
parent = ParentalKey(to='core.NavigationMenu', related_name='menu_items')
class NavigationMenuManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(menu_name=name)
@register_snippet
@python_2_unicode_compatible
class NavigationMenu(ClusterableModel):
objects = NavigationMenuManager()
menu_name = models.CharField(max_length=255, null=False, blank=False)
@property
def items(self):
return self.menu_items.all()
def __str__(self):
return self.menu_name
class Meta:
verbose_name = "Navigation menu"
NavigationMenu.panels = [
FieldPanel('menu_name', classname='full title'),
InlinePanel('menu_items', label="Menu Items", help_text='Set the menu items for the current menu.')
]
| [
"django.db.models.EmailField",
"wagtail.wagtailadmin.edit_handlers.PageChooserPanel",
"django.db.models.ForeignKey",
"wagtail.wagtailadmin.edit_handlers.FieldPanel",
"wagtail.wagtailadmin.edit_handlers.InlinePanel",
"wagtail.wagtaildocs.edit_handlers.DocumentChooserPanel",
"django.db.models.URLField",
... | [((609, 765), 'django.db.models.URLField', 'models.URLField', (['"""External link"""'], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Set an external link if you want the link to point somewhere outside the CMS."""'}), "('External link', blank=True, null=True, help_text=\n 'Set an external link if you want the link to point somewhere outside the CMS.'\n )\n", (624, 765), False, 'from django.db import models\n'), ((810, 1022), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""wagtailcore.Page"""'], {'null': '(True)', 'on_delete': 'models.SET_NULL', 'blank': '(True)', 'related_name': '"""+"""', 'help_text': '"""Choose an existing page if you want the link to point somewhere inside the CMS."""'}), "('wagtailcore.Page', null=True, on_delete=models.SET_NULL,\n blank=True, related_name='+', help_text=\n 'Choose an existing page if you want the link to point somewhere inside the CMS.'\n )\n", (827, 1022), False, 'from django.db import models\n'), ((1083, 1284), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""wagtaildocs.Document"""'], {'null': '(True)', 'on_delete': 'models.SET_NULL', 'blank': '(True)', 'related_name': '"""+"""', 'help_text': '"""Choose an existing document if you want the link to open a document."""'}), "('wagtaildocs.Document', null=True, on_delete=models.\n SET_NULL, blank=True, related_name='+', help_text=\n 'Choose an existing document if you want the link to open a document.')\n", (1100, 1284), False, 'from django.db import models\n'), ((1346, 1475), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""Set the recipient email address if you want the link to send an email."""'}), "(blank=True, null=True, help_text=\n 'Set the recipient email address if you want the link to send an email.')\n", (1363, 1475), False, 'from django.db import models\n'), ((1518, 1650), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)', 'help_text': '"""Set the number if you want the link to dial a phone number."""'}), "(max_length=20, blank=True, null=True, help_text=\n 'Set the number if you want the link to dial a phone number.')\n", (1534, 1650), False, 'from django.db import models\n'), ((2553, 2675), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'blank': '(True)', 'null': '(True)', 'help_text': '"""If you want a different name than the page title."""'}), "(max_length=64, blank=True, null=True, help_text=\n 'If you want a different name than the page title.')\n", (2569, 2675), False, 'from django.db import models\n'), ((2726, 2848), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(True)', 'null': '(True)', 'help_text': '"""If you need a custom name for responsive devices."""'}), "(max_length=32, blank=True, null=True, help_text=\n 'If you need a custom name for responsive devices.')\n", (2742, 2848), False, 'from django.db import models\n'), ((2898, 3032), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)', 'verbose_name': '"""CSS Class"""', 'help_text': '"""Optional styling for the menu item"""'}), "(max_length=255, blank=True, null=True, verbose_name=\n 'CSS Class', help_text='Optional styling for the menu item')\n", (2914, 3032), False, 'from django.db import models\n'), ((3091, 3251), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Icon Class"""', 'help_text': '"""In case you need an icon element <i> for the menu item"""'}), "(max_length=255, blank=True, null=True, verbose_name=\n 'Icon Class', help_text=\n 'In case you need an icon element <i> for the menu item')\n", (3107, 3251), False, 'from django.db import models\n'), ((4195, 4259), 'modelcluster.fields.ParentalKey', 'ParentalKey', ([], {'to': '"""core.NavigationMenu"""', 'related_name': '"""menu_items"""'}), "(to='core.NavigationMenu', related_name='menu_items')\n", (4206, 4259), False, 'from modelcluster.fields import ParentalKey\n'), ((4532, 4589), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(False)', 'blank': '(False)'}), '(max_length=255, null=False, blank=False)\n', (4548, 4589), False, 'from django.db import models\n'), ((4807, 4854), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""menu_name"""'], {'classname': '"""full title"""'}), "('menu_name', classname='full title')\n", (4817, 4854), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((4860, 4964), 'wagtail.wagtailadmin.edit_handlers.InlinePanel', 'InlinePanel', (['"""menu_items"""'], {'label': '"""Menu Items"""', 'help_text': '"""Set the menu items for the current menu."""'}), "('menu_items', label='Menu Items', help_text=\n 'Set the menu items for the current menu.')\n", (4871, 4964), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((3987, 4014), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""explicit_name"""'], {}), "('explicit_name')\n", (3997, 4014), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((4024, 4048), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""short_name"""'], {}), "('short_name')\n", (4034, 4048), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((4058, 4081), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""css_class"""'], {}), "('css_class')\n", (4068, 4081), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((4091, 4115), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""icon_class"""'], {}), "('icon_class')\n", (4101, 4115), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((2184, 2213), 'wagtail.wagtailadmin.edit_handlers.PageChooserPanel', 'PageChooserPanel', (['"""link_page"""'], {}), "('link_page')\n", (2200, 2213), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((2227, 2254), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""link_external"""'], {}), "('link_external')\n", (2237, 2254), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((2268, 2305), 'wagtail.wagtaildocs.edit_handlers.DocumentChooserPanel', 'DocumentChooserPanel', (['"""link_document"""'], {}), "('link_document')\n", (2288, 2305), False, 'from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel\n'), ((2319, 2343), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""link_email"""'], {}), "('link_email')\n", (2329, 2343), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n'), ((2357, 2381), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""link_phone"""'], {}), "('link_phone')\n", (2367, 2381), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel\n')] |
## Author: <NAME>
import json
import io
import os
import re
from hl7apy.parser import parse_message
from hl7apy.exceptions import UnsupportedVersion
#receives the name of the file and reads the messages in the file
def readMessageFile(filename):
#read the file
message = open(filename, 'r').read()
print("Step 1: File read successfully")
return message
#This method splits the 3 messages based on the blank line that is between messages
def splitMessages(strmsg):
messageslist = re.split('\\n\\n', strmsg)
print("Step 2: Messages splitted successfully")
return messageslist
def hl7StrToDictionary(hl7string, use_long_name=True):
""" Takes a string parameter and converts it to a Dictionary
:param hl7string: HL7 string that is passed to the method
:returns: A dictionary representation of the HL7 message
"""
hl7string = hl7string.replace("\n", "\r")
try :
m = parse_message(hl7string)
except UnsupportedVersion:
print(" Error! : The specified version in the file is unsurpoted.")
print(" Kindly change the version number in the text file to 2.5")
#We create a dictionary to ensure it is json serializable
return hl7MessageToDictionary(m, use_long_name=use_long_name)
def hl7MessageToDictionary(m, use_long_name=True):
"""Convert an HL7 message to a dictionary
"""
if m.children:
d = {}
for c in m.children:
name = c.name.lower()
if use_long_name:
name = c.long_name.lower() if c.long_name else name
dictified = hl7MessageToDictionary(c, use_long_name=use_long_name)
if name in d:
if not isinstance(d[name], list):
d[name] = [d[name]]
d[name].append(dictified)
else:
d[name] = dictified
return d
else:
return m.to_er7()
def writeJsonFile(dictionary):
# Write JSON file
try:
to_unicode = unicode
except NameError:
to_unicode = str
#we want to write all messages into one file so we append
#to file first and then delete any previously writen file
with io.open('ml7tojson.json', 'a', encoding='utf8') as outfile:
str_ = json.dumps(dictionary,
indent=4, sort_keys=False,
separators=(',', ':'), ensure_ascii=False)
outfile.write(to_unicode(str_))
#read messages from file
strmsg = readMessageFile("HL7_Final.hl7")
#split the messages based on the blank line between messages
msgList = splitMessages(strmsg)
#lets remove a previously writen json file if the file exists, delete it
if os.path.isfile('ml7tojson.json'):
os.remove('ml7tojson.json')
print("Step 3: Previous json file deleted Successfully")
#Loop through the message to handle each message at a time
for message in msgList:
# Convert it to a dictionary
d = hl7StrToDictionary(message)
#write JSON file
writeJsonFile(d)
print ("A jason file with the message has been created")
| [
"re.split",
"json.dumps",
"io.open",
"os.path.isfile",
"hl7apy.parser.parse_message",
"os.remove"
] | [((2847, 2879), 'os.path.isfile', 'os.path.isfile', (['"""ml7tojson.json"""'], {}), "('ml7tojson.json')\n", (2861, 2879), False, 'import os\n'), ((501, 527), 're.split', 're.split', (['"""\\\\n\\\\n"""', 'strmsg'], {}), "('\\\\n\\\\n', strmsg)\n", (509, 527), False, 'import re\n'), ((2885, 2912), 'os.remove', 'os.remove', (['"""ml7tojson.json"""'], {}), "('ml7tojson.json')\n", (2894, 2912), False, 'import os\n'), ((954, 978), 'hl7apy.parser.parse_message', 'parse_message', (['hl7string'], {}), '(hl7string)\n', (967, 978), False, 'from hl7apy.parser import parse_message\n'), ((2332, 2379), 'io.open', 'io.open', (['"""ml7tojson.json"""', '"""a"""'], {'encoding': '"""utf8"""'}), "('ml7tojson.json', 'a', encoding='utf8')\n", (2339, 2379), False, 'import io\n'), ((2411, 2507), 'json.dumps', 'json.dumps', (['dictionary'], {'indent': '(4)', 'sort_keys': '(False)', 'separators': "(',', ':')", 'ensure_ascii': '(False)'}), "(dictionary, indent=4, sort_keys=False, separators=(',', ':'),\n ensure_ascii=False)\n", (2421, 2507), False, 'import json\n')] |
# -*- coding:utf-8 -*-
import urllib
import urllib.request
import json
import django.utils.http
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from operator import itemgetter # 排序
import time
from django.core.paginator import Paginator
servers = ['127.0.0.1:8100', '127.0.0.1:8100', '127.0.0.1:8100'] # 多台server的IP
results = []
servernum = len(servers) # 计算服务器数量,下一版本统计在线数据库数量
resultsnumbers = 0
for server in servers:
url = 'http://{}/?s={}&j=1&sort=date_modified&ascending=0&date_modified_column=1&path_column=1&c=1000'.format(
server, '1')
username = '59' # 登录everthing服务器 start
password = '<PASSWORD>'
p = urllib.request.HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, url, username, password)
handler = urllib.request.HTTPBasicAuthHandler(p)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener) # 登录everthing服务器 end
response = urllib.request.urlopen(url)
data = json.loads(response.read().decode("utf-8"))
resultsnumbers = resultsnumbers + int(data['totalResults'])
for results_dic in data['results']:
date_modified = results_dic['date_modified'] # FileTime
date_modified = int(date_modified[:-7]) - 11644473600 # FileTime to UnixTime
date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(date_modified))
results_dic['date_modified'] = date
results_dic['ip'] = server[:-5] # 在返回的字典中添加ip去掉端口号
results_dic['ip_all'] = server
results.extend(data['results'])
p = Paginator(results, 5) # 每页显示5个list的内容
page1 = p.page(1) # 第一页
print(page1.object_list) | [
"urllib.request.HTTPBasicAuthHandler",
"urllib.request.HTTPPasswordMgrWithDefaultRealm",
"urllib.request.install_opener",
"urllib.request.build_opener",
"time.localtime",
"urllib.request.urlopen",
"django.core.paginator.Paginator"
] | [((689, 737), 'urllib.request.HTTPPasswordMgrWithDefaultRealm', 'urllib.request.HTTPPasswordMgrWithDefaultRealm', ([], {}), '()\n', (735, 737), False, 'import urllib\n'), ((802, 840), 'urllib.request.HTTPBasicAuthHandler', 'urllib.request.HTTPBasicAuthHandler', (['p'], {}), '(p)\n', (837, 840), False, 'import urllib\n'), ((854, 890), 'urllib.request.build_opener', 'urllib.request.build_opener', (['handler'], {}), '(handler)\n', (881, 890), False, 'import urllib\n'), ((895, 932), 'urllib.request.install_opener', 'urllib.request.install_opener', (['opener'], {}), '(opener)\n', (924, 932), False, 'import urllib\n'), ((970, 997), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (992, 997), False, 'import urllib\n'), ((1576, 1597), 'django.core.paginator.Paginator', 'Paginator', (['results', '(5)'], {}), '(results, 5)\n', (1585, 1597), False, 'from django.core.paginator import Paginator\n'), ((1358, 1387), 'time.localtime', 'time.localtime', (['date_modified'], {}), '(date_modified)\n', (1372, 1387), False, 'import time\n')] |
from dnsget import dnsget
from analyse_dependency import a_d
f=open("targetd2")
line=f.readline().strip('\n')
o=open("targetd2r",'w')
target=[]
name="top50"
while (line):
target.append(line)
line=f.readline().strip('\n')
r=dnsget(target,name)
a_d(name)
f.close()
o.close()
| [
"analyse_dependency.a_d",
"dnsget.dnsget"
] | [((225, 245), 'dnsget.dnsget', 'dnsget', (['target', 'name'], {}), '(target, name)\n', (231, 245), False, 'from dnsget import dnsget\n'), ((245, 254), 'analyse_dependency.a_d', 'a_d', (['name'], {}), '(name)\n', (248, 254), False, 'from analyse_dependency import a_d\n')] |
import os
from random import sample
import numpy as np
from numpy import cos
from scipy.linalg import lstsq
from compmech.constants import CMHOME
from compmech.logger import *
def load_c0(name, funcnum, m0, n0):
path = os.path.join(CMHOME, 'conecyl', 'imperfections', 'c0',
'c0_{0}_f{1}_m{2:03d}_n{3:03d}.txt'.format(
name, funcnum, m0, n0))
if os.path.isfile(path):
return np.loadtxt(path)
else:
raise ValueError('Coefficient file not found!')
def calc_c0(path, m0=40, n0=40, funcnum=2, sample_size=None,
maxmem=8, save=True, offset_w0=None):
r"""Find the coefficients `c_0` that best fit the `w_0` function.
The measured data will be fit using one of the following functions,
selected using the ``funcnum`` parameter:
``funcnum=1``
.. math::
w_0 = \sum_{i=1}^{m_0}{ \sum_{j=0}^{n_0}{
c_{ij}^a sin{b_x} sin{b_\theta}
+c_{ij}^b sin{b_x} cos{b_\theta}}}
``funcnum=2`` (default)
.. math::
w_0 = \sum_{i=0}^{m_0}{ \sum_{j=0}^{n_0}{
c_{ij}^a cos{b_x} sin{b_\theta}
+c_{ij}^b cos{b_x} cos{b_\theta}}}
``funcnum=3``
.. math::
w_0 = \sum_{i=0}^{m_0}{ \sum_{j=0}^{n_0}{
c_{ij}^a sin{b_x} sin{b_\theta}
+c_{ij}^b sin{b_x} cos{b_\theta}
+c_{ij}^c cos{b_x} sin{b_\theta}
+c_{ij}^d cos{b_x} cos{b_\theta}}}
where:
.. math::
b_x = i \pi \frac x L_{points}
b_\theta = j \theta
where `L_{points}` represents the difference between the maximum and
the height values in the imperfection file divided by the cosine
of the semi-vertex angle:
.. math::
L_{points} = \frac{H_{max} - H_{min}}{cos(\alpha)}
= \frac{H_{points}}{cos(\alpha)}
In this form `{}^x/_{L_{points}}` will vary from `0.` (at the top)
to `1.` (at the bottom).
.. note:: Note that if the measured sample does not
cover all the height, **it will be stretched**.
The approximation can be written in matrix form as:
.. math::
w_0 = [g] \{c\}
where `[g]` carries the base functions and `{c}` the respective
amplitudes. The solution consists on finding the best `\{c\}` that minimizes
the least-square error between the measured imperfection pattern and the
`w_0` function.
Parameters
----------
path : str or numpy.ndarray
The path of the file containing the data. Can be a full path using
``r"C:\Temp\inputfile.txt"``, for example.
The input file must have 3 columns: `\theta`, `height`, `imp`;
expressed in Cartesian coordinates.
This input can also be a ``numpy.ndarray`` object, with
`\theta`, `height`, `imp` in each corresponding column.
m0 : int
Number of terms along the meridian (`x`).
n0 : int
Number of terms along the circumference (`\theta`).
funcnum : int, optional
As explained above, selects the base functions used for
the approximation.
sample_size : int or None, optional
Specifies how many points of the imperfection file should be used. If
``None`` all points will be used in the computations.
maxmem : int, optional
Maximum RAM memory in GB allowed to compute the base functions.
The ``scipy.interpolate.lstsq`` will go beyond this limit.
save : bool, optional
If ``True`` saves the calculated coefficients in the
``compmech/conecyl/imperfections/c0`` folder.
Returns
-------
out : numpy.ndarray
A 1-D array with the best-fit coefficients.
"""
import mgi
if isinstance(path, np.ndarray):
input_pts = path
path = 'unnamed.txt'
else:
input_pts = np.loadtxt(path)
if input_pts.shape[1] != 3:
raise ValueError('Input does not have the format: "theta, x, imp"')
log('Finding w0 coefficients for {0},\n\tusing funcnum {1}'.format(
str(os.path.basename(path)), funcnum))
if sample_size:
num = input_pts.shape[0]
if sample_size < num:
input_pts = input_pts[sample(range(num), int(sample_size))]
if funcnum==1:
size = 2
elif funcnum==2:
size = 2
elif funcnum==3:
size = 4
else:
raise ValueError('Valid values for "funcnum" are 1, 2 or 3')
maxnum = maxmem*1024*1024*1024*8/(64*size*m0*n0)
num = input_pts.shape[0]
if num >= maxnum:
input_pts = input_pts[sample(range(num), int(maxnum))]
warn('Reducing sample size from {0} to {1} ' +
'due to the "maxmem" specified'.format(num, maxnum), level=1)
thetas = input_pts[:, 0].copy()
xs = input_pts[:, 1]
w0pts = input_pts[:, 2]
if offset_w0:
w0pts += offset_w0
# normalizing x
xs = (xs - xs.min())/(xs.max() - xs.min())
# inverting x to cope with the coordsys of the semi-analytical model
xs = 1 - xs
a = mgi.fa(m0, n0, xs, thetas, funcnum=funcnum)
log('Base functions calculated', level=1)
try:
c0, residues, rank, s = lstsq(a, w0pts)
except MemoryError:
error('Reduce the "maxmem" parameter!')
log('Finished scipy.linalg.lstsq', level=1)
if save:
name = '.'.join(os.path.basename(path).split('.')[0:-1])
outpath = os.path.join(CMHOME, 'conecyl', 'imperfections', 'c0',
'c0_{0}_f{1}_m{2:03d}_n{3:03d}.txt'.format(
name, funcnum, m0, n0))
np.savetxt(outpath, c0)
return c0, residues
def fw0(m0, n0, c0, xs_norm, ts, funcnum=2):
r"""Calculates the imperfection field `w_0` for a given input.
Parameters
----------
m0 : int
The number of terms along the meridian.
n0 : int
The number of terms along the circumference.
c0 : numpy.ndarray
The coefficients of the imperfection pattern.
xs_norm : numpy.ndarray
The meridian coordinate (`x`) normalized to be between ``0.`` and
``1.``.
ts : numpy.ndarray
The angles in radians representing the circumferential coordinate
(`\theta`).
funcnum : int, optional
The function used for the approximation (see the ``calc_c0`` function)
Notes
-----
The inputs ``xs_norm`` and ``ts`` must be of the same size.
If ``funcnum==1 or funcnum==2`` then ``size=2``, if ``funcnum==3`` then
``size=4`` and the inputs must satisfy ``c0.shape[0] == size*m0*n0``.
"""
if xs_norm.shape != ts.shape:
raise ValueError('xs_norm and ts must have the same shape')
if funcnum==1:
size = 2
elif funcnum==2:
size = 2
elif funcnum==3:
size = 4
if c0.shape[0] != size*m0*n0:
raise ValueError('Invalid c0 for the given m0 and n0!')
import mgi
w0s = mgi.fw0(m0, n0, c0, xs_norm.ravel(), ts.ravel(), funcnum)
return w0s.reshape(xs_norm.shape)
| [
"scipy.linalg.lstsq",
"os.path.isfile",
"os.path.basename",
"numpy.savetxt",
"numpy.loadtxt",
"mgi.fa"
] | [((380, 400), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (394, 400), False, 'import os\n'), ((5034, 5077), 'mgi.fa', 'mgi.fa', (['m0', 'n0', 'xs', 'thetas'], {'funcnum': 'funcnum'}), '(m0, n0, xs, thetas, funcnum=funcnum)\n', (5040, 5077), False, 'import mgi\n'), ((417, 433), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (427, 433), True, 'import numpy as np\n'), ((3839, 3855), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (3849, 3855), True, 'import numpy as np\n'), ((5165, 5180), 'scipy.linalg.lstsq', 'lstsq', (['a', 'w0pts'], {}), '(a, w0pts)\n', (5170, 5180), False, 'from scipy.linalg import lstsq\n'), ((5561, 5584), 'numpy.savetxt', 'np.savetxt', (['outpath', 'c0'], {}), '(outpath, c0)\n', (5571, 5584), True, 'import numpy as np\n'), ((4050, 4072), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4066, 4072), False, 'import os\n'), ((5339, 5361), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (5355, 5361), False, 'import os\n')] |
import numpy as np
from cost_functions import trajectory_cost_fn
import time
class Controller():
def __init__(self):
pass
# Get the appropriate action(s) for this state(s)
def get_action(self, state):
pass
class RandomController(Controller):
def __init__(self, env):
""" YOUR CODE HERE """
self.env = env
def get_action(self, state):
""" YOUR CODE HERE """
""" Your code should randomly sample an action uniformly from the action space """
return self.env.action_space.sample()
class MPCcontroller(Controller):
""" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 """
def __init__(self,
env,
dyn_model,
horizon=5,
cost_fn=None,
num_simulated_paths=10,
):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
def get_action(self, state):
""" YOUR CODE HERE
Note: be careful to batch your simulations through the model for speed """
observations = np.empty(
(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0]))
next_observations = np.empty(
(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0]))
actions = [
[self.env.action_space.sample()
for _ in range(self.horizon)]
for _ in range(self.num_simulated_paths)
]
actions = np.array(actions)
last_state = np.array([state for _ in range(self.num_simulated_paths)])
for idx in range(self.horizon):
action_batch = actions[:, idx]
next_state = self.dyn_model.predict(last_state, action_batch)
observations[:, idx, :] = last_state
next_observations[:, idx, :] = next_state
last_state = next_state
costs = np.array([trajectory_cost_fn(
self.cost_fn, observations[i], actions[i],
next_observations[i])
for i in range(self.num_simulated_paths)
])
min_cost_path_id = np.argmin(costs)
return actions[min_cost_path_id][0]
| [
"numpy.argmin",
"numpy.array",
"numpy.empty",
"cost_functions.trajectory_cost_fn"
] | [((1235, 1327), 'numpy.empty', 'np.empty', (['(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0])'], {}), '((self.num_simulated_paths, self.horizon, self.env.\n observation_space.shape[0]))\n', (1243, 1327), True, 'import numpy as np\n'), ((1364, 1456), 'numpy.empty', 'np.empty', (['(self.num_simulated_paths, self.horizon, self.env.observation_space.shape[0])'], {}), '((self.num_simulated_paths, self.horizon, self.env.\n observation_space.shape[0]))\n', (1372, 1456), True, 'import numpy as np\n'), ((1654, 1671), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (1662, 1671), True, 'import numpy as np\n'), ((2277, 2293), 'numpy.argmin', 'np.argmin', (['costs'], {}), '(costs)\n', (2286, 2293), True, 'import numpy as np\n'), ((2076, 2163), 'cost_functions.trajectory_cost_fn', 'trajectory_cost_fn', (['self.cost_fn', 'observations[i]', 'actions[i]', 'next_observations[i]'], {}), '(self.cost_fn, observations[i], actions[i],\n next_observations[i])\n', (2094, 2163), False, 'from cost_functions import trajectory_cost_fn\n')] |
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
# from modeling.dynamic_filters.multiheadatt import TransformerBlock
from modeling.dynamic_filters.build import DynamicFilter,ACRM_query,ACRM_video
from utils import loss as L
from utils.rnns import feed_forward_rnn
import utils.pooling as POOLING
class Localization_ACRM(nn.Module):
def __init__(self, cfg):
super(Localization_ACRM, self).__init__()
self.cfg = cfg
self.batch_size = cfg.BATCH_SIZE_TRAIN
self.model_df = ACRM_query(cfg)
# if cfg.ACRM_VIDEO.TAIL_MODEL == "LSTM":
# self.model_video_GRU = nn.LSTM(input_size = cfg.DYNAMIC_FILTER.LSTM_VIDEO.INPUT_SIZE,
# num_layers = cfg.DYNAMIC_FILTER.LSTM_VIDEO.NUM_LAYERS,
# hidden_size = cfg.DYNAMIC_FILTER.LSTM_VIDEO.HIDDEN_SIZE,
# bias = cfg.DYNAMIC_FILTER.LSTM_VIDEO.BIAS,
# dropout = cfg.DYNAMIC_FILTER.LSTM_VIDEO.DROPOUT,
# bidirectional= cfg.DYNAMIC_FILTER.LSTM_VIDEO.BIDIRECTIONAL,
# batch_first = cfg.DYNAMIC_FILTER.LSTM_VIDEO.BATCH_FIRST)
self.model_video_GRU = ACRM_video(cfg)
# self.reduction = nn.Linear(cfg.REDUCTION.INPUT_SIZE, cfg.REDUCTION.OUTPUT_SIZE)
self.multimodal_fc1 = nn.Linear(512*2, 1)
self.multimodal_fc2 = nn.Linear(512, 1)
self.is_use_rnn_loc = cfg.ACRM_CLASSIFICATION.USED
self.rnn_localization = nn.LSTM(input_size = cfg.ACRM_CLASSIFICATION.INPUT_SIZE,
hidden_size = cfg.ACRM_CLASSIFICATION.INPUT_SIZE_RNN,
num_layers = cfg.LOCALIZATION.ACRM_NUM_LAYERS,
bias = cfg.LOCALIZATION.BIAS,
dropout = cfg.LOCALIZATION.DROPOUT,
bidirectional= cfg.LOCALIZATION.BIDIRECTIONAL,
batch_first = cfg.LOCALIZATION.BATCH_FIRST)
if cfg.ACRM_CLASSIFICATION.FUSION == 'CAT':
cfg.ACRM_CLASSIFICATION.INPUT_SIZE = cfg.DYNAMIC_FILTER.LSTM_VIDEO.HIDDEN_SIZE * (1 + int(cfg.DYNAMIC_FILTER.LSTM_VIDEO.BIDIRECTIONAL)) \
+ cfg.DYNAMIC_FILTER.LSTM.HIDDEN_SIZE * (1 + int(cfg.DYNAMIC_FILTER.LSTM.BIDIRECTIONAL))
else:
assert cfg.DYNAMIC_FILTER.LSTM_VIDEO.HIDDEN_SIZE * (1 + int(cfg.DYNAMIC_FILTER.LSTM_VIDEO.BIDIRECTIONAL)) == \
cfg.DYNAMIC_FILTER.LSTM.HIDDEN_SIZE * (1 + int(cfg.DYNAMIC_FILTER.LSTM.BIDIRECTIONAL))
cfg.ACRM_CLASSIFICATION.INPUT_SIZE = cfg.DYNAMIC_FILTER.LSTM_VIDEO.HIDDEN_SIZE * (1 + int(cfg.DYNAMIC_FILTER.LSTM_VIDEO.BIDIRECTIONAL))
if cfg.ACRM_CLASSIFICATION.USED == True:
cfg.ACRM_CLASSIFICATION.INPUT_SIZE = cfg.ACRM_CLASSIFICATION.INPUT_SIZE_RNN * (1 + int(cfg.LOCALIZATION.BIDIRECTIONAL))
self.pooling = POOLING.MeanPoolingLayer()
self.starting = nn.Sequential(
nn.Linear(cfg.ACRM_CLASSIFICATION.INPUT_SIZE, cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE),
nn.Tanh(),
# nn.Dropout(cfg.LOCALIZATION.ACRM_DROPOUT),
nn.Linear(cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE, cfg.ACRM_CLASSIFICATION.OUTPUT_SIZE))
self.ending = nn.Sequential(
nn.Linear(cfg.ACRM_CLASSIFICATION.INPUT_SIZE, cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE),
nn.Tanh(),
# nn.Dropout(cfg.LOCALIZATION.ACRM_DROPOUT),
nn.Linear(cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE, cfg.ACRM_CLASSIFICATION.OUTPUT_SIZE))
self.intering = nn.Sequential(
nn.Linear(cfg.ACRM_CLASSIFICATION.INPUT_SIZE, cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE),
nn.Tanh(),
# nn.Dropout(cfg.LOCALIZATION.ACRM_DROPOUT),
nn.Linear(cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE, cfg.ACRM_CLASSIFICATION.OUTPUT_SIZE))
# self.dropout_layer = nn.Dropout(cfg.DYNAMIC_FILTER.LSTM_VIDEO.DROPOUT)
self.dropout_layer = nn.Dropout(cfg.DYNAMIC_FILTER.LSTM_VIDEO.DROPOUT)
# self.starting = nn.Linear(cfg.CLASSIFICATION.INPUT_SIZE, cfg.CLASSIFICATION.OUTPUT_SIZE)
# self.ending = nn.Linear(cfg.CLASSIFICATION.INPUT_SIZE, cfg.CLASSIFICATION.OUTPUT_SIZE)
def attention(self, videoFeat, filter, lengths):
pred_local = torch.bmm(videoFeat, filter.unsqueeze(2)).squeeze()
return pred_local
def get_mask_from_sequence_lengths(self, sequence_lengths: torch.Tensor, max_length: int):
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def feature_l2_normalize(self, data_tensor):
mu = torch.norm(data_tensor,dim=-1, keepdim=True)
data_tensor = data_tensor/mu
return data_tensor
def feature_gauss_normalize(self, data_tensor):
mu = torch.mean(data_tensor,dim=-1,keepdim=True)
std_value = torch.std(data_tensor,dim=-1,keepdim=True)
return (data_tensor - mu)/std_value
def fusion_layer(self , filter_start, output_video, mode):
if mode == 'CAT':
output = torch.cat([filter_start.unsqueeze(dim=1).repeat(1,output_video.shape[1],1),output_video],dim=-1)
elif mode == 'COS':
output = filter_start.unsqueeze(dim=1).repeat(1,output_video.shape[1],1) * output_video
elif mode =='SUB':
output = (filter_start.unsqueeze(dim=1).repeat(1,output_video.shape[1],1) - output_video)
elif mode == 'CROSS_COS':
output = filter_start * output_video
elif mode == 'CROSS_SUB':
output = torch.abs(filter_start - output_video)
return output
def masked_softmax(self, vector: torch.Tensor, mask: torch.Tensor, dim: int = -1, memory_efficient: bool = False, mask_fill_value: float = -1e32):
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result + 1e-13
def mask_softmax(self, feat, mask):
return self.masked_softmax(feat, mask, memory_efficient=False)
def kl_div(self, p, gt, length):
individual_loss = []
for i in range(length.size(0)):
vlength = int(length[i])
ret = gt[i][:vlength] * torch.log(p[i][:vlength]/gt[i][:vlength])
individual_loss.append(-torch.sum(ret))
individual_loss = torch.stack(individual_loss)
return torch.mean(individual_loss), individual_loss
def max_boundary(self, p, gt, length):
individual_loss = []
for i in range(length.size(0)):
# vlength = int(length[i])
index_bd = gt[i]
ret = torch.log(p[i][index_bd])
individual_loss.append(-torch.sum(ret))
individual_loss = torch.stack(individual_loss)
return torch.mean(individual_loss), individual_loss
def max_inter(self, p, gt_s, gt_e, length):
individual_loss = []
for i in range(length.size(0)):
# vlength = int(length[i])
index_bs = gt_s[i]
index_be = gt_e[i]
ret = torch.log(p[i][index_bs:(index_be+1)])/(max(index_be-index_bs,1))
individual_loss.append(-torch.sum(ret))
individual_loss = torch.stack(individual_loss)
return torch.mean(individual_loss), individual_loss
def forward(self, videoFeat, videoFeat_lengths, tokens, tokens_lengths, start, end, localiz, frame_start, frame_end):
mask = self.get_mask_from_sequence_lengths(videoFeat_lengths, int(videoFeat.shape[1]))
output_video = self.model_video_GRU(videoFeat,videoFeat_lengths,mask)
filter_start, lengths = self.model_df(tokens, tokens_lengths,output_video)
# output_video = self.feature_gauss_normalize(output_video)
# filter_start = self.feature_gauss_normalize(filter_start)
# attention_weights = attention_weights.detach().cpu().numpy()
# np.save('/home/thy/disk/proposal_free/experiments/visualization/attention.npy',attention_weights)
output = self.fusion_layer(filter_start,output_video,self.cfg.ACRM_CLASSIFICATION.FUSION)
# output = torch.cat([filter_start.unsqueeze(dim=1).repeat(1,output_video.shape[1],1),output_video],dim=-1)
# output = filter_start.unsqueeze(dim=1).repeat(1,output_video.shape[1],1) * output_video
if self.is_use_rnn_loc == True:
output, _ = feed_forward_rnn(self.rnn_localization,
output,
lengths=videoFeat_lengths)
output = self.dropout_layer(output)
pred_start = self.starting(output.view(-1, output.size(2))).view(-1,output.size(1),1).squeeze()
pred_start = self.mask_softmax(pred_start, mask)
pred_end = self.ending(output.view(-1, output.size(2))).view(-1,output.size(1),1).squeeze()
pred_end = self.mask_softmax(pred_end, mask)
pred_inter = self.intering(output.view(-1, output.size(2))).view(-1,output.size(1),1).squeeze()
pred_inter = self.mask_softmax(pred_inter, mask)
start_loss, individual_start_loss = self.max_boundary(pred_start, frame_start, videoFeat_lengths)
end_loss, individual_end_loss = self.max_boundary(pred_end, frame_end, videoFeat_lengths)
inter_loss, individual_inter_loss = self.max_inter(pred_inter,frame_start,frame_end,videoFeat_lengths)
individual_loss = individual_start_loss + individual_end_loss + individual_inter_loss
atten_loss = torch.tensor(0).cuda()
# atten_loss = torch.sum(-( (1-localiz) * torch.log((1-attention) + 1E-12)), dim=1)
# atten_loss = torch.mean(atten_loss)
attention = output_video[:,:,0]
if True:
# total_loss = start_loss + end_loss + atten_loss
total_loss = start_loss + end_loss + 1*inter_loss
else:
total_loss = start_loss + end_loss
return total_loss, individual_loss, pred_start, pred_end, attention, atten_loss
| [
"torch.nn.Dropout",
"torch.log",
"torch.nn.Tanh",
"modeling.dynamic_filters.build.ACRM_query",
"torch.abs",
"torch.nn.LSTM",
"torch.mean",
"utils.rnns.feed_forward_rnn",
"torch.stack",
"utils.pooling.MeanPoolingLayer",
"torch.tensor",
"torch.norm",
"torch.sum",
"torch.nn.Linear",
"modeli... | [((549, 564), 'modeling.dynamic_filters.build.ACRM_query', 'ACRM_query', (['cfg'], {}), '(cfg)\n', (559, 564), False, 'from modeling.dynamic_filters.build import DynamicFilter, ACRM_query, ACRM_video\n'), ((1371, 1386), 'modeling.dynamic_filters.build.ACRM_video', 'ACRM_video', (['cfg'], {}), '(cfg)\n', (1381, 1386), False, 'from modeling.dynamic_filters.build import DynamicFilter, ACRM_query, ACRM_video\n'), ((1509, 1530), 'torch.nn.Linear', 'nn.Linear', (['(512 * 2)', '(1)'], {}), '(512 * 2, 1)\n', (1518, 1530), False, 'from torch import nn\n'), ((1559, 1576), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (1568, 1576), False, 'from torch import nn\n'), ((1669, 1990), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'cfg.ACRM_CLASSIFICATION.INPUT_SIZE', 'hidden_size': 'cfg.ACRM_CLASSIFICATION.INPUT_SIZE_RNN', 'num_layers': 'cfg.LOCALIZATION.ACRM_NUM_LAYERS', 'bias': 'cfg.LOCALIZATION.BIAS', 'dropout': 'cfg.LOCALIZATION.DROPOUT', 'bidirectional': 'cfg.LOCALIZATION.BIDIRECTIONAL', 'batch_first': 'cfg.LOCALIZATION.BATCH_FIRST'}), '(input_size=cfg.ACRM_CLASSIFICATION.INPUT_SIZE, hidden_size=cfg.\n ACRM_CLASSIFICATION.INPUT_SIZE_RNN, num_layers=cfg.LOCALIZATION.\n ACRM_NUM_LAYERS, bias=cfg.LOCALIZATION.BIAS, dropout=cfg.LOCALIZATION.\n DROPOUT, bidirectional=cfg.LOCALIZATION.BIDIRECTIONAL, batch_first=cfg.\n LOCALIZATION.BATCH_FIRST)\n', (1676, 1990), False, 'from torch import nn\n'), ((3165, 3191), 'utils.pooling.MeanPoolingLayer', 'POOLING.MeanPoolingLayer', ([], {}), '()\n', (3189, 3191), True, 'import utils.pooling as POOLING\n'), ((4257, 4306), 'torch.nn.Dropout', 'nn.Dropout', (['cfg.DYNAMIC_FILTER.LSTM_VIDEO.DROPOUT'], {}), '(cfg.DYNAMIC_FILTER.LSTM_VIDEO.DROPOUT)\n', (4267, 4306), False, 'from torch import nn\n'), ((5007, 5052), 'torch.norm', 'torch.norm', (['data_tensor'], {'dim': '(-1)', 'keepdim': '(True)'}), '(data_tensor, dim=-1, keepdim=True)\n', (5017, 5052), False, 'import torch\n'), ((5182, 5227), 'torch.mean', 'torch.mean', (['data_tensor'], {'dim': '(-1)', 'keepdim': '(True)'}), '(data_tensor, dim=-1, keepdim=True)\n', (5192, 5227), False, 'import torch\n'), ((5246, 5290), 'torch.std', 'torch.std', (['data_tensor'], {'dim': '(-1)', 'keepdim': '(True)'}), '(data_tensor, dim=-1, keepdim=True)\n', (5255, 5290), False, 'import torch\n'), ((7339, 7367), 'torch.stack', 'torch.stack', (['individual_loss'], {}), '(individual_loss)\n', (7350, 7367), False, 'import torch\n'), ((7738, 7766), 'torch.stack', 'torch.stack', (['individual_loss'], {}), '(individual_loss)\n', (7749, 7766), False, 'import torch\n'), ((8208, 8236), 'torch.stack', 'torch.stack', (['individual_loss'], {}), '(individual_loss)\n', (8219, 8236), False, 'import torch\n'), ((3243, 3330), 'torch.nn.Linear', 'nn.Linear', (['cfg.ACRM_CLASSIFICATION.INPUT_SIZE', 'cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE'], {}), '(cfg.ACRM_CLASSIFICATION.INPUT_SIZE, cfg.ACRM_CLASSIFICATION.\n HIDDEN_SIZE)\n', (3252, 3330), False, 'from torch import nn\n'), ((3339, 3348), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3346, 3348), False, 'from torch import nn\n'), ((3419, 3507), 'torch.nn.Linear', 'nn.Linear', (['cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE', 'cfg.ACRM_CLASSIFICATION.OUTPUT_SIZE'], {}), '(cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE, cfg.ACRM_CLASSIFICATION.\n OUTPUT_SIZE)\n', (3428, 3507), False, 'from torch import nn\n'), ((3553, 3640), 'torch.nn.Linear', 'nn.Linear', (['cfg.ACRM_CLASSIFICATION.INPUT_SIZE', 'cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE'], {}), '(cfg.ACRM_CLASSIFICATION.INPUT_SIZE, cfg.ACRM_CLASSIFICATION.\n HIDDEN_SIZE)\n', (3562, 3640), False, 'from torch import nn\n'), ((3649, 3658), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3656, 3658), False, 'from torch import nn\n'), ((3729, 3817), 'torch.nn.Linear', 'nn.Linear', (['cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE', 'cfg.ACRM_CLASSIFICATION.OUTPUT_SIZE'], {}), '(cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE, cfg.ACRM_CLASSIFICATION.\n OUTPUT_SIZE)\n', (3738, 3817), False, 'from torch import nn\n'), ((3877, 3964), 'torch.nn.Linear', 'nn.Linear', (['cfg.ACRM_CLASSIFICATION.INPUT_SIZE', 'cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE'], {}), '(cfg.ACRM_CLASSIFICATION.INPUT_SIZE, cfg.ACRM_CLASSIFICATION.\n HIDDEN_SIZE)\n', (3886, 3964), False, 'from torch import nn\n'), ((3973, 3982), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3980, 3982), False, 'from torch import nn\n'), ((4053, 4141), 'torch.nn.Linear', 'nn.Linear', (['cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE', 'cfg.ACRM_CLASSIFICATION.OUTPUT_SIZE'], {}), '(cfg.ACRM_CLASSIFICATION.HIDDEN_SIZE, cfg.ACRM_CLASSIFICATION.\n OUTPUT_SIZE)\n', (4062, 4141), False, 'from torch import nn\n'), ((6198, 6242), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['vector'], {'dim': 'dim'}), '(vector, dim=dim)\n', (6225, 6242), False, 'import torch\n'), ((7383, 7410), 'torch.mean', 'torch.mean', (['individual_loss'], {}), '(individual_loss)\n', (7393, 7410), False, 'import torch\n'), ((7634, 7659), 'torch.log', 'torch.log', (['p[i][index_bd]'], {}), '(p[i][index_bd])\n', (7643, 7659), False, 'import torch\n'), ((7782, 7809), 'torch.mean', 'torch.mean', (['individual_loss'], {}), '(individual_loss)\n', (7792, 7809), False, 'import torch\n'), ((8252, 8279), 'torch.mean', 'torch.mean', (['individual_loss'], {}), '(individual_loss)\n', (8262, 8279), False, 'import torch\n'), ((9371, 9445), 'utils.rnns.feed_forward_rnn', 'feed_forward_rnn', (['self.rnn_localization', 'output'], {'lengths': 'videoFeat_lengths'}), '(self.rnn_localization, output, lengths=videoFeat_lengths)\n', (9387, 9445), False, 'from utils.rnns import feed_forward_rnn\n'), ((6545, 6596), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['(vector * mask)'], {'dim': 'dim'}), '(vector * mask, dim=dim)\n', (6572, 6596), False, 'import torch\n'), ((6844, 6895), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['masked_vector'], {'dim': 'dim'}), '(masked_vector, dim=dim)\n', (6871, 6895), False, 'import torch\n'), ((7219, 7262), 'torch.log', 'torch.log', (['(p[i][:vlength] / gt[i][:vlength])'], {}), '(p[i][:vlength] / gt[i][:vlength])\n', (7228, 7262), False, 'import torch\n'), ((8064, 8102), 'torch.log', 'torch.log', (['p[i][index_bs:index_be + 1]'], {}), '(p[i][index_bs:index_be + 1])\n', (8073, 8102), False, 'import torch\n'), ((10463, 10478), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (10475, 10478), False, 'import torch\n'), ((7297, 7311), 'torch.sum', 'torch.sum', (['ret'], {}), '(ret)\n', (7306, 7311), False, 'import torch\n'), ((7696, 7710), 'torch.sum', 'torch.sum', (['ret'], {}), '(ret)\n', (7705, 7710), False, 'import torch\n'), ((8166, 8180), 'torch.sum', 'torch.sum', (['ret'], {}), '(ret)\n', (8175, 8180), False, 'import torch\n'), ((5939, 5977), 'torch.abs', 'torch.abs', (['(filter_start - output_video)'], {}), '(filter_start - output_video)\n', (5948, 5977), False, 'import torch\n')] |
import os
import sys
import json
accasim = os.path.abspath(os.path.join('../../accasim'))
sys.path.insert(0, accasim)
import unittest
from accasim.base.resource_manager_class import Resources
class ResourcesTests(unittest.TestCase):
def load_sys_config(self):
fp = 'data/system_def.config'
data = None
with open(fp) as f:
data = json.load(f)
return data['groups'], data['resources']
def test_init_resources(self):
groups, resources = self.load_sys_config()
resources = Resources(groups, resources)
class ResourcesTotalCheckTests(unittest.TestCase):
def load_sys_config(self):
fp = 'data/RICC.config'
data = None
with open(fp) as f:
data = json.load(f)
return data['groups'], data['resources']
def test_init_resources(self):
groups, resources = self.load_sys_config()
resources = Resources(groups, resources)
total_resources = resources.system_capacity('total')
self.assertTrue(total_resources['core'] == 8384, 'Incorrect core def.')
self.assertTrue(total_resources['mem'] == 12576000000, 'Incorrect core def.')
if __name__ == '__main__':
unittest.main()
| [
"sys.path.insert",
"os.path.join",
"json.load",
"accasim.base.resource_manager_class.Resources",
"unittest.main"
] | [((96, 123), 'sys.path.insert', 'sys.path.insert', (['(0)', 'accasim'], {}), '(0, accasim)\n', (111, 123), False, 'import sys\n'), ((64, 93), 'os.path.join', 'os.path.join', (['"""../../accasim"""'], {}), "('../../accasim')\n", (76, 93), False, 'import os\n'), ((1369, 1384), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1382, 1384), False, 'import unittest\n'), ((613, 641), 'accasim.base.resource_manager_class.Resources', 'Resources', (['groups', 'resources'], {}), '(groups, resources)\n', (622, 641), False, 'from accasim.base.resource_manager_class import Resources\n'), ((1057, 1085), 'accasim.base.resource_manager_class.Resources', 'Resources', (['groups', 'resources'], {}), '(groups, resources)\n', (1066, 1085), False, 'from accasim.base.resource_manager_class import Resources\n'), ((405, 417), 'json.load', 'json.load', (['f'], {}), '(f)\n', (414, 417), False, 'import json\n'), ((849, 861), 'json.load', 'json.load', (['f'], {}), '(f)\n', (858, 861), False, 'import json\n')] |
#!/usr/bin/env python3
import traceback
from progressbar import ProgressBar, ETA, FormatLabel, Bar
import csv
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from praw import Reddit
from redditnfl.reddittools.reddittoken import ensure_scopes
import sys
import pytz
EST = pytz.timezone('America/New_York')
UTC = pytz.utc
def userage(u):
return (datetime.now().date() - datetime.utcfromtimestamp(u.created_utc).date()).days
def get_comments(u):
if not hasattr(u, '__vet_comments'):
comments = []
for comment in u.comments.new(limit=1000):
comments.append(comment)
u.__vet_comments = comments
return u.__vet_comments
def topcomments_count(n):
def f(u):
s = {}
c = 0
for comment in get_comments(u):
sub = comment.subreddit.display_name
if sub not in s:
s[sub] = 0
s[sub] += 1
c += 1
top = sorted(s.items(), key=lambda n: n[1], reverse=True)[0:n]
return ", ".join(["{s} ({p:.0f}%)".format(n=n, p=100.0*n/c, s=s) for s, n in top])
return f
def subreddit_karma(sub):
def f(u):
karma = 0
for comment in get_comments(u):
if comment.subreddit.display_name == sub:
karma += comment.score
return karma
return f
def modof(u):
url = "https://old.reddit.com/user/%s/overview" % u.name
headers = {
'user-agent': 'applicantstats/0.1;r/nfl',
}
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.content, 'html5lib')
try:
return ", ".join([a.string.replace('r/', '') for a in soup.find(text='MODERATOR OF').parent.parent.next_sibling.find_all('a')][0:5])
except AttributeError as e:
return ""
def hourly_breakdown(u):
hours = dict((hour, 0) for hour in range(24))
for comment in get_comments(u):
utc_dt = UTC.localize(datetime.utcfromtimestamp(comment.created_utc))
eastern = utc_dt.astimezone(EST)
hours[eastern.hour] += 1
return ",".join(map(str, hours.values()))
stats = {
'Account age': userage,
'username': lambda u: u.name,
'Link karma': lambda u: u.link_karma,
'Comment karma': lambda u: u.comment_karma,
'Top-3 SR by comment count': topcomments_count(3),
'NFL comment karma': subreddit_karma('nfl'),
'Mod of (top 5)': modof,
'Hourly Breakdown (EST)': hourly_breakdown,
}
def main():
r = Reddit('vet')
ensure_scopes(r, 'read,history')
infile = sys.argv[1]
outfile = sys.argv[2]
with open(outfile, 'w') as outf:
out = csv.DictWriter(outf, ['inputname'] + list(stats.keys()))
out.writeheader()
users = []
with open(infile, 'r') as fp:
for line in fp:
un = line.strip().replace('u/', '').replace('/', '')
users.append(un)
print("\nVetting %d users\n" % len(users))
with ProgressBar(max_value=len(users), widgets=[ETA(), ' ', Bar(marker='=', left='[', right=']'), ' ', FormatLabel('%(value)d/%(max_value)d')]) as p:
for i, un in enumerate(users):
user = r.redditor(un)
userstats = {'inputname': user}
try:
user.created_utc
for k, f in stats.items():
try:
userstats[k] = f(user)
except Exception as e:
traceback.print_exc()
userstats[k] = ""
except Exception as e:
traceback.print_exc()
out.writerow(userstats)
p.update(i)
if __name__ == '__main__':
main()
| [
"datetime.datetime.utcfromtimestamp",
"pytz.timezone",
"progressbar.Bar",
"requests.get",
"bs4.BeautifulSoup",
"datetime.datetime.now",
"traceback.print_exc",
"praw.Reddit",
"progressbar.ETA",
"redditnfl.reddittools.reddittoken.ensure_scopes",
"progressbar.FormatLabel"
] | [((301, 334), 'pytz.timezone', 'pytz.timezone', (['"""America/New_York"""'], {}), "('America/New_York')\n", (314, 334), False, 'import pytz\n'), ((1528, 1566), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'headers'}), '(url=url, headers=headers)\n', (1540, 1566), False, 'import requests\n'), ((1578, 1621), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html5lib"""'], {}), "(response.content, 'html5lib')\n", (1591, 1621), False, 'from bs4 import BeautifulSoup\n'), ((2554, 2567), 'praw.Reddit', 'Reddit', (['"""vet"""'], {}), "('vet')\n", (2560, 2567), False, 'from praw import Reddit\n'), ((2572, 2604), 'redditnfl.reddittools.reddittoken.ensure_scopes', 'ensure_scopes', (['r', '"""read,history"""'], {}), "(r, 'read,history')\n", (2585, 2604), False, 'from redditnfl.reddittools.reddittoken import ensure_scopes\n'), ((1964, 2010), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['comment.created_utc'], {}), '(comment.created_utc)\n', (1989, 2010), False, 'from datetime import datetime\n'), ((379, 393), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (391, 393), False, 'from datetime import datetime\n'), ((403, 443), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['u.created_utc'], {}), '(u.created_utc)\n', (428, 443), False, 'from datetime import datetime\n'), ((3105, 3110), 'progressbar.ETA', 'ETA', ([], {}), '()\n', (3108, 3110), False, 'from progressbar import ProgressBar, ETA, FormatLabel, Bar\n'), ((3117, 3153), 'progressbar.Bar', 'Bar', ([], {'marker': '"""="""', 'left': '"""["""', 'right': '"""]"""'}), "(marker='=', left='[', right=']')\n", (3120, 3153), False, 'from progressbar import ProgressBar, ETA, FormatLabel, Bar\n'), ((3160, 3198), 'progressbar.FormatLabel', 'FormatLabel', (['"""%(value)d/%(max_value)d"""'], {}), "('%(value)d/%(max_value)d')\n", (3171, 3198), False, 'from progressbar import ProgressBar, ETA, FormatLabel, Bar\n'), ((3723, 3744), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3742, 3744), False, 'import traceback\n'), ((3596, 3617), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3615, 3617), False, 'import traceback\n')] |
import random
import string
import json
import os
KEYS_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project", "keys_file.json")
def get_random_alphanumeric_string(length):
letters_and_digits = string.ascii_letters + string.digits
result_str = ''.join((random.choice(letters_and_digits) for i in range(length)))
return result_str
def write_data(keys_data):
""" write keys_data to file """
with open(KEYS_FILE, "w") as f:
json.dump(keys_data, f, indent=2)
def create_keys(num, length):
""" generate num of unique keys and return them in a list """
existing_keys = None
# get list of all keys previously generated
try:
with open(KEYS_FILE, "r") as f:
keys_data = json.load(f)
existing_keys = keys_data["valid_keys"]
for val in keys_data["used_keys"]:
existing_keys.append(val["key"])
except IOError:
existing_keys = []
# generate unqiue keys
out_keys_list = []
for i in range(0, num):
new_key = get_random_alphanumeric_string(length)
while new_key in existing_keys:
new_key = get_random_alphanumeric_string(length)
out_keys_list.append(new_key)
return out_keys_list
def add_keys(num):
""" generate num of unique keys and add them to valid_keys """
try:
with open(KEYS_FILE, "r") as f:
keys_data = json.load(f)
except IOError:
# if not exists, init empty file
keys_data = {"valid_keys": []}
for new_key in create_keys(num, 13):
keys_data["valid_keys"].append(new_key)
print(new_key)
write_data(keys_data)
def main():
print("Used to generate user keys for Corec AutoScheduler")
num = input("How many keys would you like to add?\n: ")
print()
while not num.isdigit():
print("Please enter a valid number!")
num = input("How many keys would you like to add?\n: ")
add_keys(int(num))
if __name__ == "__main__":
main()
| [
"os.path.realpath",
"json.load",
"random.choice",
"json.dump"
] | [((92, 118), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (108, 118), False, 'import os\n'), ((472, 505), 'json.dump', 'json.dump', (['keys_data', 'f'], {'indent': '(2)'}), '(keys_data, f, indent=2)\n', (481, 505), False, 'import json\n'), ((283, 316), 'random.choice', 'random.choice', (['letters_and_digits'], {}), '(letters_and_digits)\n', (296, 316), False, 'import random\n'), ((750, 762), 'json.load', 'json.load', (['f'], {}), '(f)\n', (759, 762), False, 'import json\n'), ((1419, 1431), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1428, 1431), False, 'import json\n')] |
#!/usr/bin/env python
import random
import subprocess
HOURS = range(1, 13)
MINUTES = ["o'clock"] + ["o %s" % x for x in range(1, 10)] + range(10, 60)
AM_PM = ['a.m.', 'p.m.']
TIME = 'it is now {} {} {}'.format(random.choice(HOURS),
random.choice(MINUTES),
random.choice(AM_PM))
subprocess.call(['say', '-v', 'Alex', TIME])
| [
"random.choice",
"subprocess.call"
] | [((345, 389), 'subprocess.call', 'subprocess.call', (["['say', '-v', 'Alex', TIME]"], {}), "(['say', '-v', 'Alex', TIME])\n", (360, 389), False, 'import subprocess\n'), ((212, 232), 'random.choice', 'random.choice', (['HOURS'], {}), '(HOURS)\n', (225, 232), False, 'import random\n'), ((266, 288), 'random.choice', 'random.choice', (['MINUTES'], {}), '(MINUTES)\n', (279, 288), False, 'import random\n'), ((322, 342), 'random.choice', 'random.choice', (['AM_PM'], {}), '(AM_PM)\n', (335, 342), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""Helper to deal with querystring parameters according to jsonapi specification"""
import json
from flask import current_app
from flapison.exceptions import (
BadRequest,
InvalidFilters,
InvalidSort,
InvalidField,
InvalidInclude,
)
from flapison.schema import get_model_field, get_relationships, get_schema_from_type
class QueryStringManager(object):
"""Querystring parser according to jsonapi reference"""
MANAGED_KEYS = ("filter", "page", "fields", "sort", "include", "q")
def __init__(self, querystring, schema):
"""Initialization instance
:param dict querystring: query string dict from request.args
"""
if not isinstance(querystring, dict):
raise ValueError(
"QueryStringManager require a dict-like object querystring parameter"
)
self.qs = querystring
self.schema = schema
def _get_key_values(self, name):
"""Return a dict containing key / values items for a given key, used for items like filters, page, etc.
:param str name: name of the querystring parameter
:return dict: a dict of key / values items
"""
results = {}
for key, value in self.qs.items():
try:
if not key.startswith(name):
continue
key_start = key.index("[") + 1
key_end = key.index("]")
item_key = key[key_start:key_end]
if "," in value:
item_value = value.split(",")
else:
item_value = value
results.update({item_key: item_value})
except Exception:
raise BadRequest("Parse error", source={"parameter": key})
return results
def _simple_filters(self, dict_):
ret = []
# Since the value of the filter can be a list, we have to choose the right
# operator dynamically
for key, value in dict_.items():
if isinstance(value, list):
op = "in_"
else:
op = "eq"
ret.append({"name": key, "op": op, "val": value})
return ret
@property
def querystring(self):
"""Return original querystring but containing only managed keys
:return dict: dict of managed querystring parameter
"""
return {
key: value
for (key, value) in self.qs.items()
if key.startswith(self.MANAGED_KEYS) or self._get_key_values("filter[")
}
@property
def filters(self):
"""Return filters from query string.
:return list: filter information
"""
results = []
filters = self.qs.get("filter")
if filters is not None:
try:
results.extend(json.loads(filters))
except (ValueError, TypeError):
raise InvalidFilters("Parse error")
if self._get_key_values("filter["):
results.extend(self._simple_filters(self._get_key_values("filter[")))
return results
@property
def pagination(self):
"""Return all page parameters as a dict.
:return dict: a dict of pagination information
To allow multiples strategies, all parameters starting with `page` will be included. e.g::
{
"number": '25',
"size": '150',
}
Example with number strategy::
>>> query_string = {'page[number]': '25', 'page[size]': '10'}
>>> parsed_query.pagination
{'number': '25', 'size': '10'}
"""
# check values type
result = self._get_key_values("page")
for key, value in result.items():
if key not in ("number", "size"):
raise BadRequest(
"{} is not a valid parameter of pagination".format(key),
source={"parameter": "page"},
)
try:
int(value)
except ValueError:
raise BadRequest(
"Parse error", source={"parameter": "page[{}]".format(key)}
)
if (
current_app.config.get("ALLOW_DISABLE_PAGINATION", True) is False
and int(result.get("size", 1)) == 0
):
raise BadRequest(
"You are not allowed to disable pagination",
source={"parameter": "page[size]"},
)
if current_app.config.get("MAX_PAGE_SIZE") is not None and "size" in result:
if int(result["size"]) > current_app.config["MAX_PAGE_SIZE"]:
raise BadRequest(
"Maximum page size is {}".format(
current_app.config["MAX_PAGE_SIZE"]
),
source={"parameter": "page[size]"},
)
return result
@property
def fields(self):
"""Return fields wanted by client.
:return dict: a dict of sparse fieldsets information
Return value will be a dict containing all fields by resource, for example::
{
"user": ['name', 'email'],
}
"""
result = self._get_key_values("fields")
for key, value in result.items():
if not isinstance(value, list):
result[key] = [value]
for key, value in result.items():
schema = get_schema_from_type(key)
for obj in value:
if obj not in schema._declared_fields:
raise InvalidField(
"{} has no attribute {}".format(schema.__name__, obj)
)
return result
@property
def sorting(self):
"""Return fields to sort by including sort name for SQLAlchemy and row
sort parameter for other ORMs
:return list: a list of sorting information
Example of return value::
[
{'field': 'created_at', 'order': 'desc'},
]
"""
if self.qs.get("sort"):
sorting_results = []
for sort_field in self.qs["sort"].split(","):
field = sort_field.replace("-", "")
if field not in self.schema._declared_fields:
raise InvalidSort(
"{} has no attribute {}".format(self.schema.__name__, field)
)
if field in get_relationships(self.schema):
raise InvalidSort(
"You can't sort on {} because it is a relationship field".format(
field
)
)
field = get_model_field(self.schema, field)
order = "desc" if sort_field.startswith("-") else "asc"
sorting_results.append({"field": field, "order": order})
return sorting_results
return []
@property
def include(self):
"""Return fields to include
:return list: a list of include information
"""
include_param = self.qs.get("include", [])
if current_app.config.get("MAX_INCLUDE_DEPTH") is not None:
for include_path in include_param:
if (
len(include_path.split("."))
> current_app.config["MAX_INCLUDE_DEPTH"]
):
raise InvalidInclude(
"You can't use include through more than {} relationships".format(
current_app.config["MAX_INCLUDE_DEPTH"]
)
)
return include_param.split(",") if include_param else []
| [
"json.loads",
"flapison.exceptions.InvalidFilters",
"flapison.schema.get_model_field",
"flapison.exceptions.BadRequest",
"flapison.schema.get_schema_from_type",
"flask.current_app.config.get",
"flapison.schema.get_relationships"
] | [((4414, 4509), 'flapison.exceptions.BadRequest', 'BadRequest', (['"""You are not allowed to disable pagination"""'], {'source': "{'parameter': 'page[size]'}"}), "('You are not allowed to disable pagination', source={'parameter':\n 'page[size]'})\n", (4424, 4509), False, 'from flapison.exceptions import BadRequest, InvalidFilters, InvalidSort, InvalidField, InvalidInclude\n'), ((5530, 5555), 'flapison.schema.get_schema_from_type', 'get_schema_from_type', (['key'], {}), '(key)\n', (5550, 5555), False, 'from flapison.schema import get_model_field, get_relationships, get_schema_from_type\n'), ((7262, 7305), 'flask.current_app.config.get', 'current_app.config.get', (['"""MAX_INCLUDE_DEPTH"""'], {}), "('MAX_INCLUDE_DEPTH')\n", (7284, 7305), False, 'from flask import current_app\n'), ((4271, 4327), 'flask.current_app.config.get', 'current_app.config.get', (['"""ALLOW_DISABLE_PAGINATION"""', '(True)'], {}), "('ALLOW_DISABLE_PAGINATION', True)\n", (4293, 4327), False, 'from flask import current_app\n'), ((4565, 4604), 'flask.current_app.config.get', 'current_app.config.get', (['"""MAX_PAGE_SIZE"""'], {}), "('MAX_PAGE_SIZE')\n", (4587, 4604), False, 'from flask import current_app\n'), ((6825, 6860), 'flapison.schema.get_model_field', 'get_model_field', (['self.schema', 'field'], {}), '(self.schema, field)\n', (6840, 6860), False, 'from flapison.schema import get_model_field, get_relationships, get_schema_from_type\n'), ((1754, 1806), 'flapison.exceptions.BadRequest', 'BadRequest', (['"""Parse error"""'], {'source': "{'parameter': key}"}), "('Parse error', source={'parameter': key})\n", (1764, 1806), False, 'from flapison.exceptions import BadRequest, InvalidFilters, InvalidSort, InvalidField, InvalidInclude\n'), ((2883, 2902), 'json.loads', 'json.loads', (['filters'], {}), '(filters)\n', (2893, 2902), False, 'import json\n'), ((2970, 2999), 'flapison.exceptions.InvalidFilters', 'InvalidFilters', (['"""Parse error"""'], {}), "('Parse error')\n", (2984, 2999), False, 'from flapison.exceptions import BadRequest, InvalidFilters, InvalidSort, InvalidField, InvalidInclude\n'), ((6558, 6588), 'flapison.schema.get_relationships', 'get_relationships', (['self.schema'], {}), '(self.schema)\n', (6575, 6588), False, 'from flapison.schema import get_model_field, get_relationships, get_schema_from_type\n')] |
# Copyright (C) 2021-2022 INPE.
# DSAT-Cli is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
from dsat import cache
from dsat.config import Config
from dsat.utils import computeImageSize, createImage, pickServer
import logging as log
import multiprocessing as mp
from PIL import Image
import requests
from tqdm import tqdm
lock = mp.Lock()
def requestImage(url):
log.info('Request ' + url[11:])
# First, look cache...
with lock:
if Config.useCache and cache.exists(url):
log.info('--> Loaded from cache')
return cache.load(url) # Done!
try:
# else, try request...
response = requests.get(url, stream=True)
log.info('* Response code:' + str(response.status_code))
if not response.ok:
log.info('** Tile not found **')
return Config.missingTileImage.copy()
image = Image.open(response.raw).convert('RGBA')
if Config.useCache:
log.info('<-- Saving to cache')
cache.save(url, image)
return image
except requests.exceptions.RequestException:
log.info('** Tile not found **')
return Config.missingTileImage.copy()
def getReference(ref, level, x, y):
url = '{}://{}.{}/references/{}/{}/{}/{}.png'.format(Config.protocol, pickServer(),
Config.baseURL, ref, level, x, y)
return requestImage(url)
def addReference(name, level, x, y, tile):
ref = getReference(name, level, x, y)
tile.paste(ref, (0,0), mask=ref)
def getTile(date, product, time, level, x, y):
url = '{}://{}.{}/{}/{}/{}/{}/{}/{}.png'.format(Config.protocol,
pickServer(), Config.baseURL, date, product, time, level, x, y)
tile = requestImage(url)
if Config.addReferences:
for name in Config.references:
addReference(name, level, x, y, tile)
return tile
def getTiles(date, product, time, level, tilesx, tilesy):
tiles = {}
for x, i in zip(tilesx, range(len(tilesx))):
for y, j in zip(tilesy, range(len(tilesy))):
tiles[(i,j)] = getTile(date, product, time, level, x, y)
return tiles
def getAllTiles(date, product, time, level):
return getTiles(date, product, time, level,
Config.tilesLocation[level], Config.tilesLocation[level])
def getImage(date, product, time, level, tilesx, tilesy, queue=None):
tiles = getTiles(date, product, time, level, tilesx, tilesy)
ncols, nlines = computeImageSize(len(tilesx), len(tilesy))
image = createImage(tiles, nlines, ncols)
if queue:
queue.put((mp.current_process().name, image))
return image
# Note: Multi-thread version
def getImages(dates, product, level, tilesx, tilesy, nProcess=Config.nProcesses):
queue, processes, images = mp.Queue(), [], []
for date in tqdm(dates, desc='Processing images', ascii=True, unit='image'):
log.info('Processing date ' + date.strftime('%Y/%m/%d %H:%M'))
p = mp.Process(target=getImage, name=date.strftime('%Y%m%d%H%M'),
args=(date.strftime('%Y%m%d'), product, date.strftime('%H%M'), level, tilesx, tilesy, queue))
processes.append(p)
p.start()
if len(processes) == nProcess: # Consume, wait and go ahead...
log.info('Waiting...')
_consume(queue, processes, images)
queue, processes = mp.Queue(), []
# The last one...
_consume(queue, processes, images)
# Note: images -> list([pid, image])
# Order images by process name = date order
images.sort(key=lambda tup: tup[0])
return [img[1] for img in images]
def _consume(queue, processes, images):
for p in processes:
img = queue.get() # block
images.append(img)
for p in processes:
p.join() # "Come together, yeah! \o"
# Note: Single-thread version
'''
def getImages(dates, product, level, tilesx, tilesy):
images = []
for date in tqdm(dates, desc='Processing images', ascii=True, unit='image'):
images.append(getImage(date.strftime('%Y%m%d'), product,
date.strftime('%H%M'), level, tilesx, tilesy))
return images
'''
| [
"dsat.cache.exists",
"PIL.Image.open",
"dsat.utils.pickServer",
"tqdm.tqdm",
"dsat.cache.load",
"dsat.config.Config.missingTileImage.copy",
"requests.get",
"dsat.cache.save",
"multiprocessing.Lock",
"dsat.utils.createImage",
"multiprocessing.Queue",
"logging.info",
"multiprocessing.current_p... | [((460, 469), 'multiprocessing.Lock', 'mp.Lock', ([], {}), '()\n', (467, 469), True, 'import multiprocessing as mp\n'), ((498, 529), 'logging.info', 'log.info', (["('Request ' + url[11:])"], {}), "('Request ' + url[11:])\n", (506, 529), True, 'import logging as log\n'), ((2615, 2648), 'dsat.utils.createImage', 'createImage', (['tiles', 'nlines', 'ncols'], {}), '(tiles, nlines, ncols)\n', (2626, 2648), False, 'from dsat.utils import computeImageSize, createImage, pickServer\n'), ((2912, 2975), 'tqdm.tqdm', 'tqdm', (['dates'], {'desc': '"""Processing images"""', 'ascii': '(True)', 'unit': '"""image"""'}), "(dates, desc='Processing images', ascii=True, unit='image')\n", (2916, 2975), False, 'from tqdm import tqdm\n'), ((770, 800), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (782, 800), False, 'import requests\n'), ((1421, 1433), 'dsat.utils.pickServer', 'pickServer', ([], {}), '()\n', (1431, 1433), False, 'from dsat.utils import computeImageSize, createImage, pickServer\n'), ((1755, 1767), 'dsat.utils.pickServer', 'pickServer', ([], {}), '()\n', (1765, 1767), False, 'from dsat.utils import computeImageSize, createImage, pickServer\n'), ((2877, 2887), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (2885, 2887), True, 'import multiprocessing as mp\n'), ((603, 620), 'dsat.cache.exists', 'cache.exists', (['url'], {}), '(url)\n', (615, 620), False, 'from dsat import cache\n'), ((634, 667), 'logging.info', 'log.info', (['"""--> Loaded from cache"""'], {}), "('--> Loaded from cache')\n", (642, 667), True, 'import logging as log\n'), ((687, 702), 'dsat.cache.load', 'cache.load', (['url'], {}), '(url)\n', (697, 702), False, 'from dsat import cache\n'), ((906, 938), 'logging.info', 'log.info', (['"""** Tile not found **"""'], {}), "('** Tile not found **')\n", (914, 938), True, 'import logging as log\n'), ((958, 988), 'dsat.config.Config.missingTileImage.copy', 'Config.missingTileImage.copy', ([], {}), '()\n', (986, 988), False, 'from dsat.config import Config\n'), ((1086, 1117), 'logging.info', 'log.info', (['"""<-- Saving to cache"""'], {}), "('<-- Saving to cache')\n", (1094, 1117), True, 'import logging as log\n'), ((1130, 1152), 'dsat.cache.save', 'cache.save', (['url', 'image'], {}), '(url, image)\n', (1140, 1152), False, 'from dsat import cache\n'), ((1231, 1263), 'logging.info', 'log.info', (['"""** Tile not found **"""'], {}), "('** Tile not found **')\n", (1239, 1263), True, 'import logging as log\n'), ((1279, 1309), 'dsat.config.Config.missingTileImage.copy', 'Config.missingTileImage.copy', ([], {}), '()\n', (1307, 1309), False, 'from dsat.config import Config\n'), ((3357, 3379), 'logging.info', 'log.info', (['"""Waiting..."""'], {}), "('Waiting...')\n", (3365, 3379), True, 'import logging as log\n'), ((1005, 1029), 'PIL.Image.open', 'Image.open', (['response.raw'], {}), '(response.raw)\n', (1015, 1029), False, 'from PIL import Image\n'), ((3458, 3468), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3466, 3468), True, 'import multiprocessing as mp\n'), ((2682, 2702), 'multiprocessing.current_process', 'mp.current_process', ([], {}), '()\n', (2700, 2702), True, 'import multiprocessing as mp\n')] |
import logging
from io import BytesIO
from zipfile import ZipFile
from opennem.utils.handlers import _handle_zip, chain_streams
from opennem.utils.http import http
from opennem.utils.mime import mime_from_content, mime_from_url
logger = logging.getLogger("opennem.downloader")
def url_downloader(url: str) -> bytes:
"""Downloads a URL and returns content, handling embedded zips and other MIME's"""
logger.debug("Downloading: {}".format(url))
r = http.get(url)
if not r.ok:
raise Exception("Bad link returned {}: {}".format(r.status_code, url))
content = BytesIO(r.content)
file_mime = mime_from_content(content)
if not file_mime:
file_mime = mime_from_url(url)
# @TODO handle all this in utils/archive.py
# and make it all generic to handle other
# mime types
if file_mime == "application/zip":
with ZipFile(content) as zf:
if len(zf.namelist()) == 1:
return zf.open(zf.namelist()[0]).read()
c = []
stream_count = 0
for filename in zf.namelist():
if filename.endswith(".zip"):
c.append(_handle_zip(zf.open(filename), "r"))
stream_count += 1
else:
c.append(zf.open(filename))
return chain_streams(c).read()
return content.getvalue()
| [
"logging.getLogger",
"opennem.utils.mime.mime_from_content",
"zipfile.ZipFile",
"io.BytesIO",
"opennem.utils.handlers.chain_streams",
"opennem.utils.mime.mime_from_url",
"opennem.utils.http.http.get"
] | [((239, 278), 'logging.getLogger', 'logging.getLogger', (['"""opennem.downloader"""'], {}), "('opennem.downloader')\n", (256, 278), False, 'import logging\n'), ((465, 478), 'opennem.utils.http.http.get', 'http.get', (['url'], {}), '(url)\n', (473, 478), False, 'from opennem.utils.http import http\n'), ((591, 609), 'io.BytesIO', 'BytesIO', (['r.content'], {}), '(r.content)\n', (598, 609), False, 'from io import BytesIO\n'), ((627, 653), 'opennem.utils.mime.mime_from_content', 'mime_from_content', (['content'], {}), '(content)\n', (644, 653), False, 'from opennem.utils.mime import mime_from_content, mime_from_url\n'), ((697, 715), 'opennem.utils.mime.mime_from_url', 'mime_from_url', (['url'], {}), '(url)\n', (710, 715), False, 'from opennem.utils.mime import mime_from_content, mime_from_url\n'), ((880, 896), 'zipfile.ZipFile', 'ZipFile', (['content'], {}), '(content)\n', (887, 896), False, 'from zipfile import ZipFile\n'), ((1333, 1349), 'opennem.utils.handlers.chain_streams', 'chain_streams', (['c'], {}), '(c)\n', (1346, 1349), False, 'from opennem.utils.handlers import _handle_zip, chain_streams\n')] |
import re
from functools import lru_cache
from pathlib import Path
import typing as T
from yaml import dump as yaml_dump
from faker import Faker
from faker.config import AVAILABLE_LOCALES
from tools.faker_docs_utils.format_samples import (
yaml_samples_for_docstring,
snowfakery_output_for,
)
from .summarize_fakers import summarize_all_fakers
from .language_codes import language_codes
from snowfakery.fakedata.fake_data_generator import FakeData
_RE_COMBINE_WHITESPACE = re.compile(r"(?<=^) +", re.MULTILINE)
_RE_STRIP_SAMPLES = re.compile(r"^\s*:sample:.*$", re.MULTILINE)
_COMMENT_LINES_THAT_LOOK_LIKE_TITLES = re.compile(r"^#", re.MULTILINE)
non_countries = ("fr_QC", "ar_AA")
AVAILABLE_LOCALES = [
locale
for locale in AVAILABLE_LOCALES
if locale not in non_countries and "_" in locale
]
def cleanup_docstring(my_str):
"Clean up a docstring to remove Faker-doc weirdness and excesss whitespace"
my_str = _RE_COMBINE_WHITESPACE.sub("", my_str)
my_str = _RE_STRIP_SAMPLES.sub("", my_str).strip()
my_str = _COMMENT_LINES_THAT_LOOK_LIKE_TITLES.sub(" #", my_str)
my_str = my_str.replace(":example", "\nExample:")
my_str = my_str.replace(":param", "\nParam:")
my_str = my_str.replace(":return", "\nReturn:")
return my_str
@lru_cache(maxsize=1000)
def country_for_locale(locale: str):
f = Faker(locale)
return f.current_country()
def locales_as_markdown_links(current_locale: str, locale_list: T.List[str]):
"Generate a list of Markdown locale links"
def format_link(locale: str):
try:
country_name = country_for_locale(locale)
except (ValueError, AttributeError):
return None
language = language_codes[locale.split("_")[0]]
link_text = f"{language} as spoken in {country_name}: ({locale})"
return f" - [{link_text}](fakedata/{locale}.md)\n"
other_locales = [locale for locale in locale_list if locale != current_locale]
links = [format_link(locale) for locale in other_locales]
return " ".join(link for link in links if link)
standard_header = (Path(__file__).parent / "fakedata_header_short.md").read_text()
def generate_markdown_for_fakers(outfile, locale: str, header: str = standard_header):
"Generate the Markdown page for a locale"
faker = Faker(locale)
language = language_codes[locale.split("_")[0]]
fd = FakeData([], locale)
all_fakers = summarize_all_fakers(fd)
def output(*args, **kwargs):
print(*args, **kwargs, file=outfile)
head_md = header.format(
locale=locale, current_country=faker.current_country(), language=language
)
output(
head_md,
)
output("[TOC]\n")
output("## Commonly Used\n")
output_fakers_in_categories(output, [f for f in all_fakers if f.common], "", locale)
output("## Rarely Used\n")
output_fakers_in_categories(
output, [f for f in all_fakers if not f.common], "", locale
)
def output_fakers_in_categories(output, fakers, common: str, locale):
"""Sort fakers into named categores and then output them"""
categorized = categorize(fakers)
for category_name, fakers in categorized.items():
output(f"### {category_name.title()} Fakers\n")
for faker in fakers:
output_faker(faker.name, faker, output, locale)
def categorize(fakers):
"Sort fakers based on their categories (what module they came from)"
categories = {}
for fakerdata in fakers:
category = fakerdata.category
categories.setdefault(category, [])
categories[category].append(fakerdata)
return {name: value for name, value in sorted(categories.items())}
def gather_samples(name, data, locale):
if data.sample: # I already have a sample, no need to generate one
if locale and locale != "en_US":
locale_header = [{"var": "snowfakery_locale", "value": locale}]
sample = locale_header + data.sample
else:
sample = data.sample
example = yaml_dump(sample, sort_keys=False)
samples = [snowfakery_output_for(data.name, example, example)]
else: # need to generate a sample from scratch
samples = yaml_samples_for_docstring(name, data.fullname, data.doc, locale)
return list(filter(None, samples))
def output_faker(name: str, data: str, output: callable, locale: str):
"""Output the data relating to a particular faker"""
samples = gather_samples(name, data, locale)
# if there isn't at least one sample, don't publish
if not samples:
return
output(f"#### fake: {name}\n")
cleaned_docstring = cleanup_docstring(data.doc)
if cleaned_docstring:
output(cleaned_docstring)
output()
output("Aliases: ", ", ".join(data.aliases))
output()
link = f"[{data.source}]({data.url}) : {data.fullname}"
output("Source:", link)
if samples:
output()
for sample in samples:
yaml, out = sample
output("Recipe:\n")
output(indent(yaml))
output("Outputs:\n")
output(indent(out))
else:
output()
def indent(yaml: str):
"""Add indents to yaml"""
lines = yaml.split("\n")
def prefix(line):
return " " if line.strip() else ""
lines = [prefix(line) + line for line in lines]
return "\n".join(lines)
def generate_markdown_for_all_locales(path: Path, locales=None):
"Generate markdown file for each listed locale. None means all locales"
locales = locales or AVAILABLE_LOCALES
for locale in locales:
with Path(path, f"{locale}.md").open("w") as f:
print(f.name)
generate_markdown_for_fakers(f, locale)
def generate_locales_index(path: Path, locales_list: T.List[str]):
"Generate markdown index including listed locales. None means all locales"
locales_list = locales_list or AVAILABLE_LOCALES
with Path(path).open("w") as outfile:
def output(*args, **kwargs):
print(*args, **kwargs, file=outfile)
locales = locales_as_markdown_links(None, locales_list)
if locales:
output("## Fake Data Locales\n")
output(
"Learn more about Snowfakery localization in the [Fake Data Tutorial](fakedata.md#localization)\n"
)
output(locales)
| [
"yaml.dump",
"re.compile",
"pathlib.Path",
"snowfakery.fakedata.fake_data_generator.FakeData",
"tools.faker_docs_utils.format_samples.snowfakery_output_for",
"faker.Faker",
"tools.faker_docs_utils.format_samples.yaml_samples_for_docstring",
"functools.lru_cache"
] | [((484, 520), 're.compile', 're.compile', (['"""(?<=^) +"""', 're.MULTILINE'], {}), "('(?<=^) +', re.MULTILINE)\n", (494, 520), False, 'import re\n'), ((542, 586), 're.compile', 're.compile', (['"""^\\\\s*:sample:.*$"""', 're.MULTILINE'], {}), "('^\\\\s*:sample:.*$', re.MULTILINE)\n", (552, 586), False, 'import re\n'), ((626, 656), 're.compile', 're.compile', (['"""^#"""', 're.MULTILINE'], {}), "('^#', re.MULTILINE)\n", (636, 656), False, 'import re\n'), ((1283, 1306), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1000)'}), '(maxsize=1000)\n', (1292, 1306), False, 'from functools import lru_cache\n'), ((1352, 1365), 'faker.Faker', 'Faker', (['locale'], {}), '(locale)\n', (1357, 1365), False, 'from faker import Faker\n'), ((2314, 2327), 'faker.Faker', 'Faker', (['locale'], {}), '(locale)\n', (2319, 2327), False, 'from faker import Faker\n'), ((2389, 2409), 'snowfakery.fakedata.fake_data_generator.FakeData', 'FakeData', (['[]', 'locale'], {}), '([], locale)\n', (2397, 2409), False, 'from snowfakery.fakedata.fake_data_generator import FakeData\n'), ((4034, 4068), 'yaml.dump', 'yaml_dump', (['sample'], {'sort_keys': '(False)'}), '(sample, sort_keys=False)\n', (4043, 4068), True, 'from yaml import dump as yaml_dump\n'), ((4210, 4275), 'tools.faker_docs_utils.format_samples.yaml_samples_for_docstring', 'yaml_samples_for_docstring', (['name', 'data.fullname', 'data.doc', 'locale'], {}), '(name, data.fullname, data.doc, locale)\n', (4236, 4275), False, 'from tools.faker_docs_utils.format_samples import yaml_samples_for_docstring, snowfakery_output_for\n'), ((4088, 4138), 'tools.faker_docs_utils.format_samples.snowfakery_output_for', 'snowfakery_output_for', (['data.name', 'example', 'example'], {}), '(data.name, example, example)\n', (4109, 4138), False, 'from tools.faker_docs_utils.format_samples import yaml_samples_for_docstring, snowfakery_output_for\n'), ((2103, 2117), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2107, 2117), False, 'from pathlib import Path\n'), ((5946, 5956), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (5950, 5956), False, 'from pathlib import Path\n'), ((5615, 5641), 'pathlib.Path', 'Path', (['path', 'f"""{locale}.md"""'], {}), "(path, f'{locale}.md')\n", (5619, 5641), False, 'from pathlib import Path\n')] |
from distutils.core import setup
setup(
name='entrypoints',
version='0.2.2',
description='Discover and load entry points from installed packages.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/takluyver/entrypoints',
py_modules=['entrypoints'],
)
| [
"distutils.core.setup"
] | [((34, 279), 'distutils.core.setup', 'setup', ([], {'name': '"""entrypoints"""', 'version': '"""0.2.2"""', 'description': '"""Discover and load entry points from installed packages."""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/takluyver/entrypoints"""', 'py_modules': "['entrypoints']"}), "(name='entrypoints', version='0.2.2', description=\n 'Discover and load entry points from installed packages.', author=\n '<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/takluyver/entrypoints', py_modules=['entrypoints'])\n", (39, 279), False, 'from distutils.core import setup\n')] |
# -*- coding: utf-8 -*-
"""The main module for running the SEAMM installer.
"""
import argparse
import logging
import sys
import seamm_installer
logger = logging.getLogger(__name__)
def run():
"""Run the installer.
How the installer runs is controlled by command-line arguments.
We need the installer object to setup the parser; however, it needs
some of the information from the commandline so we parse those arguments
first, then setup the rest.
"""
# Parse the commandline
parser = argparse.ArgumentParser()
parser.add_argument(
"--log-level",
default="WARNING",
type=str.upper,
choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help=("The level of informational output, defaults to " "'%(default)s'"),
)
parser.add_argument(
"--update-cache", action="store_true", help="Update the package database."
)
# And continue
parser.add_argument(
"--environment",
default="",
type=str.lower,
help="The conda environment to install to, defaults to the current environment",
)
# Parse the first options
if "-h" not in sys.argv and "--help" not in sys.argv:
options, _ = parser.parse_known_args()
kwargs = vars(options)
# Set up the logging
level = kwargs.pop("log_level")
logging.basicConfig(level=level)
environment = kwargs.pop("environment")
# Create the installer
installer = seamm_installer.SEAMMInstaller(environment=environment)
else:
# Create the installer
installer = seamm_installer.SEAMMInstaller()
subparsers = parser.add_subparsers()
# check
check = subparsers.add_parser("check")
check.set_defaults(method=installer.check)
check.add_argument(
"-y", "--yes", action="store_true", help="Answer 'yes' to all prompts"
)
check.add_argument(
"modules",
nargs="*",
default=["all"],
help=(
"The modules to install. 'core', 'plug-ins', 'all', 'development', or a "
"list of modules separated by spaces. Default is %(default)s."
),
)
# install
install = subparsers.add_parser("install")
install.set_defaults(method=installer.install)
install.add_argument(
"modules",
nargs="*",
default=["all"],
help=(
"The modules to install. 'core', 'plug-ins', 'all', 'development', or a "
"list of modules separated by spaces. Default is %(default)s."
),
)
# show
show = subparsers.add_parser("show")
show.set_defaults(method=installer.show)
show.add_argument(
"modules",
nargs="*",
default=["all"],
help=(
"The modules to install. 'core', 'plug-ins', 'all', 'development', or a "
"list of modules separated by spaces. Default is %(default)s."
),
)
# update
update = subparsers.add_parser("update")
update.set_defaults(method=installer.update)
update.add_argument(
"modules",
nargs="*",
default=["all"],
help=(
"The modules to install. 'core', 'plug-ins', 'all', 'development', or a "
"list of modules separated by spaces. Default is %(default)s."
),
)
# uninstall
uninstall = subparsers.add_parser("uninstall")
uninstall.set_defaults(method=installer.uninstall)
uninstall.add_argument(
"modules",
nargs="*",
default=["all"],
help=(
"The modules to install. 'core', 'plug-ins', 'all', 'development', or a "
"list of modules separated by spaces. Default is %(default)s."
),
)
# Parse the options
options = parser.parse_args()
kwargs = vars(options)
# Remove the logging and environment options since they have been handled
level = kwargs.pop("log_level")
environment = kwargs.pop("environment")
# get the modules
modules = kwargs.pop("modules", ["all"])
# And remove the method
method = kwargs.pop("method", installer.show)
# Check the installer itself.
if method == installer.install or method == installer.update:
answer = True
elif method == installer.check:
answer = kwargs["yes"]
else:
answer = False
installer.check_installer(yes=answer)
# Run the requested subcommand
method(*modules, **kwargs)
| [
"logging.getLogger",
"logging.basicConfig",
"argparse.ArgumentParser",
"seamm_installer.SEAMMInstaller"
] | [((157, 184), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (174, 184), False, 'import logging\n'), ((525, 550), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (548, 550), False, 'import argparse\n'), ((1384, 1416), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level'}), '(level=level)\n', (1403, 1416), False, 'import logging\n'), ((1518, 1573), 'seamm_installer.SEAMMInstaller', 'seamm_installer.SEAMMInstaller', ([], {'environment': 'environment'}), '(environment=environment)\n', (1548, 1573), False, 'import seamm_installer\n'), ((1635, 1667), 'seamm_installer.SEAMMInstaller', 'seamm_installer.SEAMMInstaller', ([], {}), '()\n', (1665, 1667), False, 'import seamm_installer\n')] |
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
sys.path.append('./../')
from petsc4py import PETSc
if __name__=="__main__":
from psystem import *
refn = 7 # goal 7 (1024 DoFs)
outdir = './_output'
rank = PETSc.COMM_WORLD.rank
if rank==0:
if not os.path.exists(outdir): os.mkdir(outdir)
#
p_system(refn=refn, #refn=1 -> Nx=16 DoFs per unit
path=outdir,
#solver_type='sharpclaw',
final_time=400,
nDOut=400,
restart_from_frame=None,
wave_type=1,
A=1.0,
sigma2=10.0,
coeff_type='smooth',
Lx=400)
| [
"os.path.exists",
"sys.path.append",
"os.mkdir"
] | [((61, 85), 'sys.path.append', 'sys.path.append', (['"""./../"""'], {}), "('./../')\n", (76, 85), False, 'import sys\n'), ((289, 311), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (303, 311), False, 'import os\n'), ((313, 329), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (321, 329), False, 'import os\n')] |
import os
import time
from concurrent import futures
from contextlib import contextmanager
from unittest import mock
import pytest
import diffuse
from diffuse.diffuser.base import pool
def target(msg):
return f"hello {msg}"
def target_exception(msg):
raise ValueError("Test")
def target_long_running(msg):
time.sleep(1 / 10)
return f"hello {msg}"
class BaseDiffuserTest:
def test__target_not_callable(self, diffuser_type):
with pytest.raises(TypeError, match="target must be a callable."):
diffuse.Diffuser.create(target=None, diffuser_type=diffuser_type)
def test__diffuse(self, mocker, diffuser_type, future_running):
with diffuse.Diffuser.create(
target=target, diffuser_type=diffuser_type
) as diffuser:
spy_async_worker = mocker.spy(diffuser, "_WORKER_CLASS")
future = diffuser.diffuse("world")
futures.wait([future])
assert isinstance(future, futures.Future)
assert not future.cancelled()
assert future.done()
assert future.result() == "hello world"
assert diffuser._task_queue.qsize() == 0
spy_async_worker.assert_called_once_with(
diffuser._task_queue, False, **diffuser._worker_init_kwargs()
)
assert diffuser._worker_pool.size == 1
return diffuser
def test__diffuse__task_exception(
self, mocker, diffuser_type, future_running
):
with diffuse.Diffuser.create(
target=target_exception, diffuser_type=diffuser_type
) as diffuser:
spy_async_worker = mocker.spy(diffuser, "_WORKER_CLASS")
future = diffuser.diffuse("world")
futures.wait([future])
assert isinstance(future, futures.Future)
assert not future.cancelled()
assert future.done()
with pytest.raises(ValueError, match="Test"):
assert future.result()
assert diffuser._task_queue.qsize() == 0
spy_async_worker.assert_called_once_with(
diffuser._task_queue, False, **diffuser._worker_init_kwargs()
)
assert diffuser._worker_pool.size == 1
return diffuser
def test__diffuse__task_consumed_by_worker(
self, mocker, diffuser_type, future_running
):
"""
Verifies that no new worker is initialized when there are no task in
queue i.e. task was consumed by another worker as soon as it was added
to queue.
"""
with diffuse.Diffuser.create(
target=target, diffuser_type=diffuser_type
) as diffuser:
mocker.patch.object(diffuser._task_queue, "qsize", return_value=0)
future = diffuser.diffuse("world")
assert isinstance(future, futures.Future)
assert future.running() == future_running
assert not future.done()
assert diffuser._worker_pool.size == 0
return diffuser
def test__diffuse__max_pool_size(
self, mocker, diffuser_type, future_running
):
with diffuse.Diffuser.create(
target=target, diffuser_type=diffuser_type, max_workers=1
) as diffuser:
mock_worker = mocker.MagicMock()
diffuser._worker_pool.add(mock_worker)
future = diffuser.diffuse("world")
assert isinstance(future, futures.Future)
assert future.running() == future_running
assert not future.done()
assert diffuser._worker_pool.size == 1
return diffuser
def test__diffuse__close(self, mocker, diffuser_type):
diffuser = diffuse.Diffuser.create(
target=target, diffuser_type=diffuser_type
)
spy_pool_shutdown = mocker.spy(diffuser._worker_pool, "shutdown")
future = diffuser.diffuse("world")
diffuser.close()
assert diffuser.closed
assert not future.cancelled()
assert future.done()
spy_pool_shutdown.assert_called_once_with(wait=True)
return diffuser
def test__diffuse__close__no_wait(self, mocker, diffuser_type):
diffuser = diffuse.Diffuser.create(
target=target_long_running, diffuser_type=diffuser_type
)
spy_pool_shutdown = mocker.spy(diffuser._worker_pool, "shutdown")
future = diffuser.diffuse("world")
diffuser.close(wait=False)
assert diffuser.closed
assert not future.cancelled()
assert not future.done()
spy_pool_shutdown.assert_called_once_with(wait=False)
return diffuser
def test__diffuse__close__cancel_pending(
self, mocker, diffuser_type, future_cancelled
):
diffuser = diffuse.Diffuser.create(
target=target, diffuser_type=diffuser_type
)
spy_pool_shutdown = mocker.spy(diffuser._worker_pool, "shutdown")
mock_worker = mocker.patch.object(diffuser, "_WORKER_CLASS")
future = diffuser.diffuse("world")
diffuser.close(cancel_pending=True)
assert diffuser.closed
assert future.cancelled() == future_cancelled
assert diffuser._task_queue.qsize() == 0
spy_pool_shutdown.assert_called_once_with(wait=True)
return diffuser
def test__diffuse__closed_diffuser(self, mocker, diffuser_type):
diffuser = diffuse.Diffuser.create(
target=target, diffuser_type=diffuser_type
)
diffuser.close()
with pytest.raises(
RuntimeError, match="Cannot diffuse on closed Diffuser."
):
diffuser.diffuse("world")
assert diffuser._task_queue.qsize() == 0
assert diffuser._worker_pool.size == 0
return diffuser
| [
"diffuse.Diffuser.create",
"concurrent.futures.wait",
"pytest.raises",
"time.sleep"
] | [((326, 344), 'time.sleep', 'time.sleep', (['(1 / 10)'], {}), '(1 / 10)\n', (336, 344), False, 'import time\n'), ((3729, 3796), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'target', 'diffuser_type': 'diffuser_type'}), '(target=target, diffuser_type=diffuser_type)\n', (3752, 3796), False, 'import diffuse\n'), ((4235, 4320), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'target_long_running', 'diffuser_type': 'diffuser_type'}), '(target=target_long_running, diffuser_type=diffuser_type\n )\n', (4258, 4320), False, 'import diffuse\n'), ((4808, 4875), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'target', 'diffuser_type': 'diffuser_type'}), '(target=target, diffuser_type=diffuser_type)\n', (4831, 4875), False, 'import diffuse\n'), ((5439, 5506), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'target', 'diffuser_type': 'diffuser_type'}), '(target=target, diffuser_type=diffuser_type)\n', (5462, 5506), False, 'import diffuse\n'), ((466, 526), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""target must be a callable."""'}), "(TypeError, match='target must be a callable.')\n", (479, 526), False, 'import pytest\n'), ((540, 605), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'None', 'diffuser_type': 'diffuser_type'}), '(target=None, diffuser_type=diffuser_type)\n', (563, 605), False, 'import diffuse\n'), ((688, 755), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'target', 'diffuser_type': 'diffuser_type'}), '(target=target, diffuser_type=diffuser_type)\n', (711, 755), False, 'import diffuse\n'), ((920, 942), 'concurrent.futures.wait', 'futures.wait', (['[future]'], {}), '([future])\n', (932, 942), False, 'from concurrent import futures\n'), ((1514, 1591), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'target_exception', 'diffuser_type': 'diffuser_type'}), '(target=target_exception, diffuser_type=diffuser_type)\n', (1537, 1591), False, 'import diffuse\n'), ((1756, 1778), 'concurrent.futures.wait', 'futures.wait', (['[future]'], {}), '([future])\n', (1768, 1778), False, 'from concurrent import futures\n'), ((2603, 2670), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'target', 'diffuser_type': 'diffuser_type'}), '(target=target, diffuser_type=diffuser_type)\n', (2626, 2670), False, 'import diffuse\n'), ((3166, 3252), 'diffuse.Diffuser.create', 'diffuse.Diffuser.create', ([], {'target': 'target', 'diffuser_type': 'diffuser_type', 'max_workers': '(1)'}), '(target=target, diffuser_type=diffuser_type,\n max_workers=1)\n', (3189, 3252), False, 'import diffuse\n'), ((5569, 5640), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Cannot diffuse on closed Diffuser."""'}), "(RuntimeError, match='Cannot diffuse on closed Diffuser.')\n", (5582, 5640), False, 'import pytest\n'), ((1927, 1966), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Test"""'}), "(ValueError, match='Test')\n", (1940, 1966), False, 'import pytest\n')] |
#
# Copyright (C) 2018 ETH Zurich, University of Bologna
# and GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME>, ETH (<EMAIL>)
import regmap_c_header
import regmap_table
import regmap_rst
import regmap_json
import collections
class Constant(regmap_c_header.Constant):
def __init__(self, name, type, value=None, parent=None):
self.name = name
self.type = type
self.value = value
self.parent = parent
def get_full_name(self):
if self.parent is None:
return self.name
group = self.parent.get_group_path()
if group is not None:
return group + ':' + self.name
else:
return self.name
class Regfield(regmap_c_header.Regfield, regmap_table.Regfield, regmap_rst.Regfield, regmap_json.Regfield):
def __init__(self, name, width, bit, access, desc, full_name=None, reset=None, reg_reset=None):
self.name = name
self.width = width
self.bit = bit
self.access = access
self.desc = desc
self.reset = reset
self.reg_reset = reg_reset
self.full_name = full_name
class Register(regmap_c_header.Register, regmap_table.Register, regmap_rst.Register, regmap_json.Register):
def __init__(self, name, offset, width, desc, parent=None, reset=None, help=None):
self.name = name
self.parent = parent
self.offset = offset
self.width = width
self.desc = desc
self.reset = reset
self.help = help
self.fields = collections.OrderedDict([])
def add_regfield(self, regfield):
self.fields[regfield.name] = regfield
def get_full_name(self):
if self.parent is None:
return self.name
group = self.parent.get_group_path()
if group is not None:
return group + ':' + self.name
else:
return self.name
def get_offset(self):
return self.offset
parent_offset = self.parent.get_offset()
if parent_offset is not None:
return parent_offset + self.offset
else:
return self.offset
class Regmap(regmap_c_header.Regmap, regmap_table.Regmap, regmap_rst.Regmap, regmap_json.Regmap):
def __init__(self, name, parent=None, offset=None):
self.name = name
self.parent = parent
self.registers = collections.OrderedDict([])
self.constants = collections.OrderedDict([])
self.regmaps = collections.OrderedDict([])
self.offset = offset
def add_register(self, register):
self.registers[register.name] = register
return register
def add_regmap(self, regmap):
self.regmaps[regmap.name] = regmap
return regmap
def add_constant(self, constant):
self.constants[constant.name] = constant
return constant
def get_register(self, name):
return self.registers.get(name)
def get_group_path(self):
if self.parent is None:
return self.name
group = self.parent.get_group_path()
if group is not None:
return group + ':' + self.name
else:
return self.name
| [
"collections.OrderedDict"
] | [((2080, 2107), 'collections.OrderedDict', 'collections.OrderedDict', (['[]'], {}), '([])\n', (2103, 2107), False, 'import collections\n'), ((2918, 2945), 'collections.OrderedDict', 'collections.OrderedDict', (['[]'], {}), '([])\n', (2941, 2945), False, 'import collections\n'), ((2971, 2998), 'collections.OrderedDict', 'collections.OrderedDict', (['[]'], {}), '([])\n', (2994, 2998), False, 'import collections\n'), ((3022, 3049), 'collections.OrderedDict', 'collections.OrderedDict', (['[]'], {}), '([])\n', (3045, 3049), False, 'import collections\n')] |
import h5py
from os import listdir, getcwd
from os.path import isfile, join
def hdf_to_csv():
path = getcwd() + '/2020_Challenge_IOT_Analytics/competitionfiles'
files = [f for f in listdir(path) if isfile(join(path, f))]
for file in files:
rf = h5py.File(path + '/' + file, 'r')
chanIDs = rf['DYNAMIC DATA']
csv_file_name = getcwd() + '/csv_data/' + \
file.split('/')[-1].split('.')[0] + '.csv'
with open(csv_file_name, 'w') as wf:
channel_vals = {}
row = ''
for key in list(chanIDs.keys()):
channel_vals[key] = chanIDs[key]['MEASURED'][()]
row += key + ','
row = row[:-1] + '\n'
wf.write(row)
for i in range(len(channel_vals[list(chanIDs.keys())[0]])):
row = ''
for key in list(chanIDs.keys()):
row += str(channel_vals[key][i]) + ','
row = row[:-1] + '\n'
wf.write(row)
wf.close()
rf.close()
def hdf_to_json():
path = getcwd() + '/2020_Challenge_IOT_Analytics/competitionfiles'
files = [f for f in listdir(path) if isfile(join(path, f))]
for file in files:
rf = h5py.File(path + '/' + file, 'r')
chanIDs = rf['DYNAMIC DATA']
csv_file_name = getcwd() + '/json_data/' + \
file.split('/')[-1].split('.')[0] + '.json'
with open(csv_file_name, 'w') as wf:
channel_vals = {}
for key in list(chanIDs.keys()):
channel_vals[key] = chanIDs[key]['MEASURED'][()]
for i in range(len(channel_vals[list(chanIDs.keys())[0]])):
row = '{'
for key in list(chanIDs.keys()):
row += '"' + key + '": ' + str(channel_vals[key][i]) + ', '
row = row[:-2] + '}\n'
wf.write(row)
wf.close()
rf.close()
if __name__ == '__main__':
hdf_to_json()
| [
"h5py.File",
"os.listdir",
"os.path.join",
"os.getcwd"
] | [((107, 115), 'os.getcwd', 'getcwd', ([], {}), '()\n', (113, 115), False, 'from os import listdir, getcwd\n'), ((267, 300), 'h5py.File', 'h5py.File', (["(path + '/' + file)", '"""r"""'], {}), "(path + '/' + file, 'r')\n", (276, 300), False, 'import h5py\n'), ((1093, 1101), 'os.getcwd', 'getcwd', ([], {}), '()\n', (1099, 1101), False, 'from os import listdir, getcwd\n'), ((1253, 1286), 'h5py.File', 'h5py.File', (["(path + '/' + file)", '"""r"""'], {}), "(path + '/' + file, 'r')\n", (1262, 1286), False, 'import h5py\n'), ((191, 204), 'os.listdir', 'listdir', (['path'], {}), '(path)\n', (198, 204), False, 'from os import listdir, getcwd\n'), ((1177, 1190), 'os.listdir', 'listdir', (['path'], {}), '(path)\n', (1184, 1190), False, 'from os import listdir, getcwd\n'), ((215, 228), 'os.path.join', 'join', (['path', 'f'], {}), '(path, f)\n', (219, 228), False, 'from os.path import isfile, join\n'), ((1201, 1214), 'os.path.join', 'join', (['path', 'f'], {}), '(path, f)\n', (1205, 1214), False, 'from os.path import isfile, join\n'), ((363, 371), 'os.getcwd', 'getcwd', ([], {}), '()\n', (369, 371), False, 'from os import listdir, getcwd\n'), ((1349, 1357), 'os.getcwd', 'getcwd', ([], {}), '()\n', (1355, 1357), False, 'from os import listdir, getcwd\n')] |
import logging
import magic
import os.path
import requests
from tqdm import tqdm
def read_in_chunks(file_path, blocksize=1024, chunks=-1):
""" Splitting the file into chunks. """
with open(file_path, 'rb') as file_object:
size = os.path.getsize(file_path)
pbar = tqdm(total=100)
current = 0
while chunks:
data = file_object.read(blocksize)
if not data:
break
yield data
chunks -= 1
step = round(blocksize / size * 100, 1)
current = min(100, current + step)
pbar.update(min(100 - current, step))
pbar.update(100 - current)
pbar.close()
def upload_to_signed_url(file_path, signed_url, querier, orm_class):
res_upload = requests.put(
signed_url.get('signed_url'),
data=read_in_chunks(file_path),
headers={'Content-Type': 'application/octet-stream'}
)
res = None
if res_upload.status_code == 200:
data_hash = signed_url.get('data_hash')
res = querier.basic_put(
orm_class,
uid=data_hash,
payload=signed_url
)
if orm_class(res):
logging.info("Successfully uploaded: %s",
signed_url.get('title', ''))
else:
logging.info("Error uploading: %s",
signed_url.get('title', ''))
else:
logging.info("Error generating signed url for: %s",
signed_url.get('title', ''))
return orm_class(res)
def upload_to_signed_url_list(file_paths, signed_urls, querier, orm_class):
assert len(file_paths) == len(signed_urls),\
'Error getting the correct number of signed urls'
data_uid_list = []
mime = magic.Magic(mime=True)
for i in range(len(file_paths)):
file_path = file_paths[i]
file_name = os.path.basename(file_path)
mime_type = mime.from_file(file_path)
url = signed_urls[i]
assert url.get('title', '') == file_name, 'Ordering issue'
res_upload = requests.put(
url.get('signed_url'),
data=read_in_chunks(file_path),
headers={'Content-Type': mime_type}
)
if res_upload.status_code == 200:
data_hash = url.get('data_hash')
res = querier.basic_put(
orm_class,
uid=data_hash,
payload=url
)
if res:
logging.info("Successfully uploaded: %s",
url.get('title', ''))
data_uid_list.append(url.get('data_hash'))
else:
logging.info("Error uploading: %s",
url.get('title', ''))
raise Exception('Could not save information into database')
else:
raise Exception('Bad request')
return data_uid_list
| [
"magic.Magic",
"tqdm.tqdm"
] | [((1787, 1809), 'magic.Magic', 'magic.Magic', ([], {'mime': '(True)'}), '(mime=True)\n', (1798, 1809), False, 'import magic\n'), ((291, 306), 'tqdm.tqdm', 'tqdm', ([], {'total': '(100)'}), '(total=100)\n', (295, 306), False, 'from tqdm import tqdm\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 09:15:33 2020
@author: dhulls
"""
# Imports
import numpy as np
np.random.seed(100)
from tensorflow import random
random.set_seed(100)
import os
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
os.chdir('/Users/som/Dropbox/Complex_systems_RNN/DL_tutorial')
from Kij import Kij
from scipy.stats import beta
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
print(tf.__version__)
import tensorflow_docs as tfdocs
import tensorflow_docs.plots
import tensorflow_docs.modeling
# from matplotlib import rc
# Import dataset
# dataset_path = '/Users/som/Dropbox/Complex_systems_RNN/Data/DL_test_data_Eq_Hazus_125.csv'
dataset_path = '/Users/som/Dropbox/Complex_systems_RNN/Data/New data for improved disf match/Eq_10IM.csv'
# column_names = ['Time','P1','P2','IM','Rec']
# dataset = pd.read_csv(dataset_path, names=column_names,
# na_values = "?", comment='\t',
# sep=" ", skipinitialspace=True)
dataset = pd.read_csv(dataset_path)
dataset.pop("IM")
dataset.pop("Time")
# dataset["IM"] = np.log(dataset["IM"])
# dataset.pop("P1")
# dataset.pop("P2")
a1 = 1.0
b1 = 1.0
loc1 = 0
sca1 = 1
def transform(Rec):
# Rec = beta.ppf(Rec,a1,b1,loc1,sca1)
# Fin = np.zeros((len(Rec),1))
# for ii in np.arange(0,len(Rec),1):
# if ((1-Rec[ii])<0.04):
# Fin[ii] = np.power((1-Rec[ii]),1/4)
# else:
# Fin[ii] = 1-Rec[ii]
# return Fin
return np.power((1-Rec),1/4)
# return (1/(1+np.exp(-(1-Rec))))
def invtransform(x):
# Fin = np.zeros(len(x))
# for ii in np.arange(0,len(x),1):
# if ((1-np.power(x[ii],4))<0.04):
# Fin[ii] = (1-np.power(x[ii],4))
# else:
# Fin[ii] = 1-x[ii]
# return Fin # (1-np.power(x,4))
return (1-np.power(x,4))
# return (1+np.log(1/(x)-1))
#beta.cdf(x,a1,b1,loc1,sca1)
dataset['Rec'] = transform(dataset['Rec'])
dataset['P1'] = transform(dataset['P1'])
dataset['P2'] = transform(dataset['P2'])
dataset.tail()
# Split the data into train and test
train_dataset = dataset.sample(frac=1.0,random_state=100)
test_dataset = dataset.drop(train_dataset.index)
# Inspect the data
# sns.pairplot(train_dataset[["Rec", "P1", "P2", "Time"]], diag_kind="kde")
# sns.pairplot(train_dataset[["Rec", "IM"]], diag_kind="kde")
train_stats = train_dataset.describe()
train_stats.pop("Rec")
train_stats = train_stats.transpose()
train_stats
# Split features from labels
train_labels = train_dataset.pop('Rec')
test_labels = test_dataset.pop('Rec')
# Normalize the data
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
# Build the model ,kernel_regularizer='l2'
def build_model():
model = keras.Sequential([
layers.Dense(10, activation='softmax', input_shape=[len(train_dataset.keys())],bias_initializer='zeros'),
layers.Dense(10, activation='softmax',bias_initializer='zeros'),
layers.Dense(1,bias_initializer='zeros')
])
# optimizer = tf.keras.optimizers.RMSprop(0.001)
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
# Inspect the model
model.summary()
# example_batch = normed_train_data[:10]
# example_result = model.predict(example_batch)
# example_result
# Train the model
EPOCHS = 3000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.0, verbose=0,
callbacks=[tfdocs.modeling.EpochDots()],shuffle = False)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
## Verify model for multiple recovery curves
dataset_path1 = '/Users/som/Dropbox/Complex_systems_RNN/Data/New data for sequence/DL_verify_EQ_0_7.csv'
data1 = pd.read_csv(dataset_path1)
# data1["IM"] = np.log(data1["IM"])
Time1 = data1.pop("Time")
# data1.pop("P1")
# data1.pop("P2")
data1['Rec'] = transform(data1['Rec'])
data1['P1'] = transform(data1['P1'])
data1['P2'] = transform(data1['P2'])
data1_labels = data1.pop('Rec')
normed_data1 = norm(data1)
data1_pred = model.predict(normed_data1).flatten() | [
"tensorflow.random.set_seed",
"pandas.read_csv",
"numpy.power",
"tensorflow_docs.modeling.EpochDots",
"os.chdir",
"tensorflow.keras.layers.Dense",
"numpy.random.seed",
"pandas.DataFrame",
"tensorflow.keras.optimizers.RMSprop"
] | [((138, 157), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (152, 157), True, 'import numpy as np\n'), ((188, 208), 'tensorflow.random.set_seed', 'random.set_seed', (['(100)'], {}), '(100)\n', (203, 208), False, 'from tensorflow import random\n'), ((309, 371), 'os.chdir', 'os.chdir', (['"""/Users/som/Dropbox/Complex_systems_RNN/DL_tutorial"""'], {}), "('/Users/som/Dropbox/Complex_systems_RNN/DL_tutorial')\n", (317, 371), False, 'import os\n'), ((1143, 1168), 'pandas.read_csv', 'pd.read_csv', (['dataset_path'], {}), '(dataset_path)\n', (1154, 1168), True, 'import pandas as pd\n'), ((3909, 3938), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (3921, 3938), True, 'import pandas as pd\n'), ((4142, 4168), 'pandas.read_csv', 'pd.read_csv', (['dataset_path1'], {}), '(dataset_path1)\n', (4153, 4168), True, 'import pandas as pd\n'), ((1623, 1647), 'numpy.power', 'np.power', (['(1 - Rec)', '(1 / 4)'], {}), '(1 - Rec, 1 / 4)\n', (1631, 1647), True, 'import numpy as np\n'), ((3335, 3369), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', (['(0.001)'], {}), '(0.001)\n', (3362, 3369), True, 'import tensorflow as tf\n'), ((1961, 1975), 'numpy.power', 'np.power', (['x', '(4)'], {}), '(x, 4)\n', (1969, 1975), True, 'import numpy as np\n'), ((3114, 3178), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""softmax"""', 'bias_initializer': '"""zeros"""'}), "(10, activation='softmax', bias_initializer='zeros')\n", (3126, 3178), False, 'from tensorflow.keras import layers\n'), ((3191, 3232), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'bias_initializer': '"""zeros"""'}), "(1, bias_initializer='zeros')\n", (3203, 3232), False, 'from tensorflow.keras import layers\n'), ((3855, 3882), 'tensorflow_docs.modeling.EpochDots', 'tfdocs.modeling.EpochDots', ([], {}), '()\n', (3880, 3882), True, 'import tensorflow_docs as tfdocs\n')] |
import torch
from torch import nn
class Cache(nn.Module):
def __init__(self, n_entries, entry_size):
super(Cache, self).__init__()
self.n_entries = n_entries
self.entry_size = entry_size
self.register_buffer(
name='idx_sparse',
tensor=torch.zeros((n_entries, entry_size), dtype=torch.long)
)
self.register_buffer(
name='value_sparse',
tensor=torch.zeros((n_entries, entry_size))
)
def forward(self):
return
def read(self, idx):
return self.idx_sparse[idx], self.value_sparse[idx]
def write(self, idx, idx_sparse, value_sparse):
self.idx_sparse[idx] = idx_sparse
self.value_sparse[idx] = value_sparse
return | [
"torch.zeros"
] | [((313, 367), 'torch.zeros', 'torch.zeros', (['(n_entries, entry_size)'], {'dtype': 'torch.long'}), '((n_entries, entry_size), dtype=torch.long)\n', (324, 367), False, 'import torch\n'), ((461, 497), 'torch.zeros', 'torch.zeros', (['(n_entries, entry_size)'], {}), '((n_entries, entry_size))\n', (472, 497), False, 'import torch\n')] |
# stdlib
import os
import sys
import logging
# addlib
import boto3
log = logging.getLogger(__name__)
class RecordAccumulator(object):
def __init__(self, limit=20):
self.limit = limit
self.container = []
def empty(self):
result, self.container = self.container, []
return result
def full(self):
return True if len(self.container) >= self.limit else False
def append(self, record):
self.container.append(record)
class KinesisProducer(object):
def __init__(self, api_name, region_name, stream_name, partition_key='DEFAULT'):
self.api_name = api_name
self.client = boto3.client(api_name, region_name=region_name)
self.stream_name = stream_name
self.partition_key = partition_key
self.accumulator = RecordAccumulator()
def send(self, topic, data):
temp_record={"Data": data.encode('utf-8')}
if self.api_name=='kinesis':
temp_record['PartitionKey']=self.partition_key
self.accumulator.append(temp_record)
if self.accumulator.full():
if self.api_name=='firehose':
return self.client.put_record_batch(
Records=self.accumulator.empty(),
DeliveryStreamName=self.stream_name)
elif self.api_name=='kinesis':
return self.client.put_records(
Records=self.accumulator.empty(),
StreamName=self.stream_name)
else:
raise Exception(f"Invalid api choice for KinesisProducer={self.api_name}")
else:
return True
class StdoutProducer(object):
def send(self, topic, data):
print(f"{topic}: {data}\n")
return True | [
"logging.getLogger",
"boto3.client"
] | [((83, 110), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (100, 110), False, 'import logging\n'), ((686, 733), 'boto3.client', 'boto3.client', (['api_name'], {'region_name': 'region_name'}), '(api_name, region_name=region_name)\n', (698, 733), False, 'import boto3\n')] |
# https://atcoder.jp/contests/math-and-algorithm/tasks/math_and_algorithm_v
from collections import Counter
n = int(input())
aa = list(map(int, input().split()))
ctr = Counter(aa)
ans = 0
for x, c in ctr.items():
if x == 100000 // 2:
ans += ctr[x] * (ctr[x] - 1)
else:
ans += ctr[x] * ctr[100000 - x]
print(ans // 2) | [
"collections.Counter"
] | [((169, 180), 'collections.Counter', 'Counter', (['aa'], {}), '(aa)\n', (176, 180), False, 'from collections import Counter\n')] |
from abc import ABC, abstractmethod
from typing import Callable, cast, Set, List, Dict, Optional
import numpy as np
from autofit import ModelInstance, Analysis, DirectoryPaths
from autofit.graphical.expectation_propagation import AbstractFactorOptimiser
from autofit.graphical.expectation_propagation import EPMeanField
from autofit.graphical.expectation_propagation import EPOptimiser
from autofit.graphical.factor_graphs.factor import Factor
from autofit.graphical.factor_graphs.graph import FactorGraph
from autofit.graphical.messages import NormalMessage
from autofit.mapper.prior.prior import Prior
from autofit.mapper.prior_model.collection import CollectionPriorModel
from autofit.mapper.prior_model.prior_model import PriorModel, AbstractPriorModel
class AbstractModelFactor(Analysis, ABC):
@property
@abstractmethod
def model_factors(self) -> List["ModelFactor"]:
"""
A list of factors that comprise a PriorModel and corresponding fitness function
"""
def freeze(self):
for model_factor in self.model_factors:
model_factor.freeze()
@property
def priors(self) -> Set[Prior]:
"""
A set of all priors encompassed by the contained likelihood models
"""
return {
prior
for model
in self.model_factors
for prior
in model.prior_model.priors
}
@property
def prior_factors(self) -> List[Factor]:
"""
A list of factors that act as priors on latent variables. One factor exists
for each unique prior.
"""
return [
Factor(
cast(
Callable,
prior
),
x=prior
)
for prior
in self.priors
]
@property
def message_dict(self) -> Dict[Prior, NormalMessage]:
"""
Dictionary mapping priors to messages.
TODO: should support more than just GaussianPriors/NormalMessages
"""
return {
prior: NormalMessage.from_prior(
prior
)
for prior
in self.priors
}
@property
def graph(self) -> FactorGraph:
"""
The complete graph made by combining all factors and priors
"""
return cast(
FactorGraph,
np.prod(
[
model
for model
in self.model_factors
] + self.prior_factors
)
)
def mean_field_approximation(self) -> EPMeanField:
"""
Returns a EPMeanField of the factor graph
"""
return EPMeanField.from_approx_dists(
self.graph,
self.message_dict
)
def _make_ep_optimiser(
self,
optimiser: AbstractFactorOptimiser
) -> EPOptimiser:
return EPOptimiser(
self.graph,
default_optimiser=optimiser,
factor_optimisers={
factor: factor.optimiser
for factor in self.model_factors
if factor.optimiser is not None
}
)
def optimise(
self,
optimiser:
AbstractFactorOptimiser
) -> CollectionPriorModel:
"""
Use an EP Optimiser to optimise the graph associated with this collection
of factors and create a Collection to represent the results.
Parameters
----------
optimiser
An optimiser that acts on graphs
Returns
-------
A collection of prior models
"""
self.freeze()
opt = self._make_ep_optimiser(
optimiser
)
updated_model = opt.run(
self.mean_field_approximation()
)
collection = CollectionPriorModel([
factor.prior_model
for factor
in self.model_factors
])
arguments = {
prior: updated_model.mean_field[
prior
].as_prior()
for prior
in collection.priors
}
return collection.gaussian_prior_model_for_arguments(
arguments
)
def visualize(
self,
paths: DirectoryPaths,
instance: ModelInstance,
during_analysis: bool
):
"""
Visualise the instances provided using each factor.
Instances in the ModelInstance must have the same order as the factors.
Parameters
----------
paths
Object describing where data should be saved to
instance
A collection of instances, each corresponding to a factor
during_analysis
Is this visualisation during analysis?
"""
for model_factor, instance in zip(
self.model_factors,
instance
):
model_factor.visualize(
paths,
instance,
during_analysis
)
def log_likelihood_function(
self,
instance: ModelInstance
) -> float:
"""
Compute the combined likelihood of each factor from a collection of instances
with the same ordering as the factors.
Parameters
----------
instance
A collection of instances, one corresponding to each factor
Returns
-------
The combined likelihood of all factors
"""
likelihood = abs(
self.model_factors[0].analysis.log_likelihood_function(
instance[0]
)
)
for model_factor, instance_ in zip(
self.model_factors[1:],
instance[1:]
):
likelihood *= abs(
model_factor.analysis.log_likelihood_function(
instance_
)
)
return -likelihood
@property
def global_prior_model(self) -> CollectionPriorModel:
"""
A collection of prior models, with one model for each factor.
"""
return CollectionPriorModel([
model_factor.prior_model
for model_factor
in self.model_factors
])
class ModelFactor(Factor, AbstractModelFactor):
def __init__(
self,
prior_model: AbstractPriorModel,
analysis: Analysis,
optimiser: Optional[AbstractFactorOptimiser] = None
):
"""
A factor in the graph that actually computes the likelihood of a model
given values for each variable that model contains
Parameters
----------
prior_model
A model with some dimensionality
analysis
A class that implements a function which evaluates how well an
instance of the model fits some data
optimiser
A custom optimiser that will be used to fit this factor specifically
instead of the default optimiser
"""
self.prior_model = prior_model
self.analysis = analysis
self.optimiser = optimiser
prior_variable_dict = {
prior.name: prior
for prior
in prior_model.priors
}
def _factor(
**kwargs: np.ndarray
) -> float:
"""
Returns an instance of the prior model and evaluates it, forming
a factor.
Parameters
----------
kwargs
Arguments with names that are unique for each prior.
Returns
-------
Calculated likelihood
"""
arguments = dict()
for name, array in kwargs.items():
prior_id = int(name.split("_")[1])
prior = prior_model.prior_with_id(
prior_id
)
arguments[prior] = array
instance = prior_model.instance_for_arguments(
arguments
)
return analysis.log_likelihood_function(
instance
)
super().__init__(
_factor,
**prior_variable_dict
)
def freeze(self):
self.prior_model.freeze()
@property
def model_factors(self) -> List["ModelFactor"]:
return [self]
def optimise(self, optimiser) -> PriorModel:
"""
Optimise this factor on its own returning a PriorModel
representing the final state of the messages.
Parameters
----------
optimiser
Returns
-------
A PriorModel representing the optimised factor
"""
return super().optimise(
optimiser
)[0]
class FactorGraphModel(AbstractModelFactor):
def __init__(self, *model_factors: ModelFactor):
"""
A collection of factors that describe models, which can be
used to create a graph and messages.
If the models have shared priors then the graph has shared variables
Parameters
----------
model_factors
"""
self._model_factors = model_factors
@property
def model_factors(self):
return self._model_factors
| [
"numpy.prod",
"autofit.mapper.prior_model.collection.CollectionPriorModel",
"autofit.graphical.expectation_propagation.EPMeanField.from_approx_dists",
"autofit.graphical.expectation_propagation.EPOptimiser",
"autofit.graphical.messages.NormalMessage.from_prior",
"typing.cast"
] | [((2759, 2819), 'autofit.graphical.expectation_propagation.EPMeanField.from_approx_dists', 'EPMeanField.from_approx_dists', (['self.graph', 'self.message_dict'], {}), '(self.graph, self.message_dict)\n', (2788, 2819), False, 'from autofit.graphical.expectation_propagation import EPMeanField\n'), ((2985, 3158), 'autofit.graphical.expectation_propagation.EPOptimiser', 'EPOptimiser', (['self.graph'], {'default_optimiser': 'optimiser', 'factor_optimisers': '{factor: factor.optimiser for factor in self.model_factors if factor.\n optimiser is not None}'}), '(self.graph, default_optimiser=optimiser, factor_optimisers={\n factor: factor.optimiser for factor in self.model_factors if factor.\n optimiser is not None})\n', (2996, 3158), False, 'from autofit.graphical.expectation_propagation import EPOptimiser\n'), ((3934, 4009), 'autofit.mapper.prior_model.collection.CollectionPriorModel', 'CollectionPriorModel', (['[factor.prior_model for factor in self.model_factors]'], {}), '([factor.prior_model for factor in self.model_factors])\n', (3954, 4009), False, 'from autofit.mapper.prior_model.collection import CollectionPriorModel\n'), ((6271, 6363), 'autofit.mapper.prior_model.collection.CollectionPriorModel', 'CollectionPriorModel', (['[model_factor.prior_model for model_factor in self.model_factors]'], {}), '([model_factor.prior_model for model_factor in self.\n model_factors])\n', (6291, 6363), False, 'from autofit.mapper.prior_model.collection import CollectionPriorModel\n'), ((2104, 2135), 'autofit.graphical.messages.NormalMessage.from_prior', 'NormalMessage.from_prior', (['prior'], {}), '(prior)\n', (2128, 2135), False, 'from autofit.graphical.messages import NormalMessage\n'), ((2426, 2495), 'numpy.prod', 'np.prod', (['([model for model in self.model_factors] + self.prior_factors)'], {}), '([model for model in self.model_factors] + self.prior_factors)\n', (2433, 2495), True, 'import numpy as np\n'), ((1671, 1692), 'typing.cast', 'cast', (['Callable', 'prior'], {}), '(Callable, prior)\n', (1675, 1692), False, 'from typing import Callable, cast, Set, List, Dict, Optional\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-23 15:42
from __future__ import unicode_literals
import django.contrib.postgres.fields.hstore
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('correctiv_eurosfueraerzte', '0021_paymentrecipient_aggs'),
]
operations = [
migrations.AddField(
model_name='zerodoctor',
name='address_type',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='zerodoctor',
name='specialisation',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='zerodoctor',
name='web',
field=models.URLField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='paymentrecipient',
name='data',
field=django.contrib.postgres.fields.hstore.HStoreField(blank=True, default=dict),
),
]
| [
"django.db.models.URLField",
"django.db.models.CharField"
] | [((475, 518), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)'}), '(blank=True, max_length=50)\n', (491, 518), False, 'from django.db import migrations, models\n'), ((650, 694), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)'}), '(blank=True, max_length=255)\n', (666, 694), False, 'from django.db import migrations, models\n'), ((815, 859), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'max_length': '(1024)'}), '(blank=True, max_length=1024)\n', (830, 859), False, 'from django.db import migrations, models\n')] |
import json
from justfunc.env import setup_env
from justfunc.evaluator import evaluate
from justfunc.reader import read
class JustFunc:
def __init__(self):
self.env = setup_env()
def run(self, src):
return evaluate(read(src), self.env)
def run_repl(self, prompt=">>> "):
while line := input(prompt):
try:
result = self.run(line)
print(json.dumps(result))
except RuntimeError as e:
print(e.args[0])
def main(self, _args):
self.run_repl()
| [
"json.dumps",
"justfunc.reader.read",
"justfunc.env.setup_env"
] | [((182, 193), 'justfunc.env.setup_env', 'setup_env', ([], {}), '()\n', (191, 193), False, 'from justfunc.env import setup_env\n'), ((243, 252), 'justfunc.reader.read', 'read', (['src'], {}), '(src)\n', (247, 252), False, 'from justfunc.reader import read\n'), ((420, 438), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (430, 438), False, 'import json\n')] |
import numpy as np
import torch
from fairseq.data.indexed_dataset import __best_fitting_dtype, MMapIndexedDatasetBuilder, IndexedDatasetBuilder
from fairseq.tokenizer import tokenize_line
# TODO move this file into data folder
def make_builder(out_file, impl, vocab_size=None, dtype=None):
if impl == 'mmap':
if dtype is None:
dtype = __best_fitting_dtype(vocab_size)
return MMapIndexedDatasetBuilder(out_file, dtype=dtype)
else:
return IndexedDatasetBuilder(out_file)
def binarize_file(input_file, out_file_pref, impl, dtype=np.int64, tokenize=tokenize_line):
out_file = out_file_pref + '.bin'
index_file = out_file_pref + '.idx'
ds = make_builder(out_file, impl=impl, dtype=dtype)
with open(input_file, 'r') as f:
for line in f:
if line.strip():
line = tokenize_line(line)
line = list(map(int, line))
line = torch.tensor(line)
ds.add_item(line)
else:
raise Exception('empty line')
ds.finalize(index_file)
return
| [
"fairseq.data.indexed_dataset.MMapIndexedDatasetBuilder",
"fairseq.tokenizer.tokenize_line",
"fairseq.data.indexed_dataset.__best_fitting_dtype",
"torch.tensor",
"fairseq.data.indexed_dataset.IndexedDatasetBuilder"
] | [((409, 457), 'fairseq.data.indexed_dataset.MMapIndexedDatasetBuilder', 'MMapIndexedDatasetBuilder', (['out_file'], {'dtype': 'dtype'}), '(out_file, dtype=dtype)\n', (434, 457), False, 'from fairseq.data.indexed_dataset import __best_fitting_dtype, MMapIndexedDatasetBuilder, IndexedDatasetBuilder\n'), ((483, 514), 'fairseq.data.indexed_dataset.IndexedDatasetBuilder', 'IndexedDatasetBuilder', (['out_file'], {}), '(out_file)\n', (504, 514), False, 'from fairseq.data.indexed_dataset import __best_fitting_dtype, MMapIndexedDatasetBuilder, IndexedDatasetBuilder\n'), ((361, 393), 'fairseq.data.indexed_dataset.__best_fitting_dtype', '__best_fitting_dtype', (['vocab_size'], {}), '(vocab_size)\n', (381, 393), False, 'from fairseq.data.indexed_dataset import __best_fitting_dtype, MMapIndexedDatasetBuilder, IndexedDatasetBuilder\n'), ((855, 874), 'fairseq.tokenizer.tokenize_line', 'tokenize_line', (['line'], {}), '(line)\n', (868, 874), False, 'from fairseq.tokenizer import tokenize_line\n'), ((942, 960), 'torch.tensor', 'torch.tensor', (['line'], {}), '(line)\n', (954, 960), False, 'import torch\n')] |
"""
Protocol wrapper that will detect hung connections.
In particular, since PB expects the server to talk first and HTTP
expects the client to talk first, when a PB client talks to an HTTP
server, neither side will talk, leading to a hung connection. This
wrapper will disconnect in that case, and inform the caller.
"""
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet.interfaces import IProtocol
from twisted.internet.interfaces import IProtocolFactory
from twisted.python.components import proxyForInterface
def _noop():
pass
class HangCheckProtocol(
proxyForInterface(IProtocol, '_wrapped_protocol'), object,
):
"""
Wrap a protocol, so the underlying connection will disconnect if
the other end doesn't send data within a given timeout.
"""
transport = None
_hungConnectionTimer = None
# hung connections wait for a relatively long time, since a busy master may
# take a while to get back to us.
_HUNG_CONNECTION_TIMEOUT = 120
def __init__(self, wrapped_protocol, hung_callback=_noop, reactor=None):
"""
:param IProtocol wrapped_protocol: The protocol to wrap.
:param hung_callback: Called when the connection has hung.
:type hung_callback: callable taking no arguments.
:param IReactorTime reactor: The reactor to use to schedule
the hang check.
"""
if reactor is None:
from twisted.internet import reactor
self._wrapped_protocol = wrapped_protocol
self._reactor = reactor
self._hung_callback = hung_callback
def makeConnection(self, transport):
# Note that we don't wrap the transport for the protocol,
# because we only care about noticing data received, not
# sent.
self.transport = transport
super(HangCheckProtocol, self).makeConnection(transport)
self._startHungConnectionTimer()
def dataReceived(self, data):
self._stopHungConnectionTimer()
super(HangCheckProtocol, self).dataReceived(data)
def connectionLost(self, reason):
self._stopHungConnectionTimer()
super(HangCheckProtocol, self).connectionLost(reason)
def _startHungConnectionTimer(self):
"""
Start a timer to detect if the connection is hung.
"""
def hungConnection():
self._hung_callback()
self._hungConnectionTimer = None
self.transport.loseConnection()
self._hungConnectionTimer = self._reactor.callLater(
self._HUNG_CONNECTION_TIMEOUT, hungConnection)
def _stopHungConnectionTimer(self):
"""
Cancel the hang check timer, since we have received data or
been closed.
"""
if self._hungConnectionTimer:
self._hungConnectionTimer.cancel()
self._hungConnectionTimer = None
class HangCheckFactory(
proxyForInterface(IProtocolFactory, '_wrapped_factory'), object,
):
"""
Wrap a protocol factory, so the underlying connection will
disconnect if the other end doesn't send data within a given
timeout.
"""
def __init__(self, wrapped_factory, hung_callback):
"""
:param IProtocolFactory wrapped_factory: The factory to wrap.
:param hung_callback: Called when the connection has hung.
:type hung_callback: callable taking no arguments.
"""
self._wrapped_factory = wrapped_factory
self._hung_callback = hung_callback
def buildProtocol(self, addr):
protocol = self._wrapped_factory.buildProtocol(addr)
return HangCheckProtocol(protocol, hung_callback=self._hung_callback)
# This is used as a ClientFactory, which doesn't have a specific interface, so forward the additional methods.
def startedConnecting(self, connector):
self._wrapped_factory.startedConnecting(connector)
def clientConnectionFailed(self, connector, reason):
self._wrapped_factory.clientConnectionFailed(connector, reason)
def clientConnectionLost(self, connector, reason):
self._wrapped_factory.clientConnectionLost(connector, reason)
| [
"twisted.python.components.proxyForInterface"
] | [((620, 669), 'twisted.python.components.proxyForInterface', 'proxyForInterface', (['IProtocol', '"""_wrapped_protocol"""'], {}), "(IProtocol, '_wrapped_protocol')\n", (637, 669), False, 'from twisted.python.components import proxyForInterface\n'), ((2938, 2993), 'twisted.python.components.proxyForInterface', 'proxyForInterface', (['IProtocolFactory', '"""_wrapped_factory"""'], {}), "(IProtocolFactory, '_wrapped_factory')\n", (2955, 2993), False, 'from twisted.python.components import proxyForInterface\n')] |
import ctypes
import os
so = os.path.join(os.path.dirname(__file__), 'maths.so')
lib = ctypes.CDLL(so)
lib.relax.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
]
def relax(pos, links, mrel1, mrel2, lengths, push, pull, iters):
nlinks = links.shape[0]
lib.relax(pos.ctypes, links.ctypes, mrel1.ctypes, mrel2.ctypes, lengths.ctypes, push.ctypes, pull.ctypes, nlinks, iters)
| [
"os.path.dirname",
"ctypes.CDLL"
] | [((88, 103), 'ctypes.CDLL', 'ctypes.CDLL', (['so'], {}), '(so)\n', (99, 103), False, 'import ctypes\n'), ((43, 68), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (58, 68), False, 'import os\n')] |
from newspaperclassifier.evaluation.merging import merge_labels
import click
@click.command()
@click.option('--aws-dir', type=str, help="Directory where AWS labels are stored")
@click.option('--gc-dir', type=str, help="Directory where GC labels are stored")
@click.option('--destination', type=str, help="Destination file")
def main(aws_dir, gc_dir, destination):
"""Merge stored labels"""
merged = merge_labels(aws_dir, gc_dir)
merged.to_csv(destination, index=False)
| [
"click.option",
"newspaperclassifier.evaluation.merging.merge_labels",
"click.command"
] | [((80, 95), 'click.command', 'click.command', ([], {}), '()\n', (93, 95), False, 'import click\n'), ((97, 183), 'click.option', 'click.option', (['"""--aws-dir"""'], {'type': 'str', 'help': '"""Directory where AWS labels are stored"""'}), "('--aws-dir', type=str, help=\n 'Directory where AWS labels are stored')\n", (109, 183), False, 'import click\n'), ((180, 259), 'click.option', 'click.option', (['"""--gc-dir"""'], {'type': 'str', 'help': '"""Directory where GC labels are stored"""'}), "('--gc-dir', type=str, help='Directory where GC labels are stored')\n", (192, 259), False, 'import click\n'), ((261, 325), 'click.option', 'click.option', (['"""--destination"""'], {'type': 'str', 'help': '"""Destination file"""'}), "('--destination', type=str, help='Destination file')\n", (273, 325), False, 'import click\n'), ((409, 438), 'newspaperclassifier.evaluation.merging.merge_labels', 'merge_labels', (['aws_dir', 'gc_dir'], {}), '(aws_dir, gc_dir)\n', (421, 438), False, 'from newspaperclassifier.evaluation.merging import merge_labels\n')] |
from shapely.geometry import shape
import fiona
import networkx as nx
import matplotlib.pyplot as plt
import math
import random
import traffic
import pickle
from datetime import datetime
from request import Request
import numpy as np
try:
from itertools import izip as zip
except ImportError:
pass
def main():
"""
Main function used for demo of data loading and pathfinding.
"""
G, trips = load_data(reset=False, graph=False, trip=False, abbr=False)
# t = random_trip(G) # Selects a random trip for pathfinding demo
# Predetermined trip for demo
t = Request((40.74345679662331, -73.72770035929027), (40.77214782804362, -73.76426798716528), 0, 0, datetime(2015, 1, 1))
draw_graph(G, bounds=(t.start, t.stop))
process_trips(G, trips=[t], heuristic=diste)
plt.axis('equal')
plt.show()
# === Load Data ===
def load_data(reset=False, graph=False, trip=False, abbr=False):
"""
Returns a graph representing the NYC map and an array of 2015 trips. Saves all the data in pickle files.
*** To refresh everything, reset=True ***
Parameters: (reset, graph, trip, abbr)
reset - bool
graph - bool
trips - bool
abbr - bool
"""
G = None
trips = None
if reset:
graph = trip = abbr = True
if graph:
traffic_dict = traffic.process_traffic("NYC/Traffic_Data/traffic_volume.csv")
pickle_graph(abbr, traffic_dict)
with open('graph.pkl', 'rb') as graph_file:
G = pickle.load(graph_file)
if trip:
pickle_trips(G)
with open('trips.pkl', 'rb') as trips_file:
trips = pickle.load(trips_file)
return G, trips
def pickle_graph(abbr, traffic_dict):
"""
Generate and save the graph in a pickle file.
Parameters: (abbr, traffic_dict)
abbr - bool
traffic_dict - dict of traffic volume per street
"""
# Replace with street abbr
try:
if abbr:
raise ResetPickle
with open('abbr.pkl', 'rb') as abbr_file:
abbr = pickle.load(abbr_file)
except:
print("Loading abbreviations...")
abbr = {}
with open("abbr.txt") as rFile:
for line in rFile:
line = line.rstrip("\n")
abbr[line.split(" ")[0].upper()] = line.split(" ")[1].upper()
with open('abbr.pkl', 'wb') as out:
pickle.dump(abbr, out)
print("Done.")
# Variables to keep track of the number of recognized streets
recognized = 0
unrecognized = 0
# Build speeds dictionary for every road
print("Building speeds dictionary...")
speeds = {}
for feature in fiona.open("NYC/VZV_Speed Limits/geo_export_6459c10e-7bfb-4e64-ae29-f0747dc3824c.shp"):
street = feature["properties"]["street"]
for v in street_variations(street, abbr):
speeds[v] = feature["properties"]["postvz_sl"]
print("Done.")
# Create a Graph with intersections as nodes and roads as edges
print("Creating graph...")
time = random.randint(0, 23)
G = nx.Graph()
for feature in fiona.open("NYC/Map/geo_export_24fdfadb-893d-40a0-a751-a76cdefc9bc6.shp"):
for seg_start, seg_end in zip(list(shape(feature["geometry"]).coords),
list(shape(feature["geometry"]).coords)[1:]):
street = feature["properties"]["st_label"]
if street in speeds:
recognized += 1
else:
unrecognized += 1
divider = speeds.get(street, 0)
if divider == 0:
divider = 25
seg_start = seg_start[1] , seg_start[0]
seg_end = seg_end[1] , seg_end[0]
if street in traffic_dict:
volume_total = traffic_dict[street]
volume_count = volume_total[time]
w = reweight(seg_start, seg_end, divider, int(volume_count))
else:
w = weight(seg_start, seg_end, divider)
G.add_edge(seg_start, seg_end, weight=w, distance=feature["properties"]["shape_leng"],
speed=divider / 3600 * 1609) # Gives the edge properties like a weight, the in real life distance, and the speed limit
print(
f"Streets recognized: {recognized}. Unrecognized: {unrecognized}. Percent recognized: {recognized / (unrecognized + recognized) * 100}%.")
with open('graph.pkl', 'wb') as out:
pickle.dump(G, out)
print("Done.")
def pickle_trips(G):
"""
Saves the trips in a pickle file.
Parameters: (G)
G - networkx.graph()
"""
print("Loading trips...")
t = 0 # Number of trips loaded so far
trips = []
with open("NYC/2015_taxi_data.csv") as rFile:
first_line = rFile.readline().rstrip("\n").split(",")
for line in rFile:
line = line.rstrip("\n").split(",")
temp = {}
for i in range(len(first_line)):
temp[first_line[i]] = line[i]
starting = (float(temp["pickup_latitude"]) , float(temp["pickup_longitude"]) )
ending = (float(temp["dropoff_latitude"]) , float(temp["dropoff_longitude"]) )
n1, n2 = find_closest_node(G, starting), find_closest_node(G, ending)
trips.append(Request(n1, n2, 0, int(temp["passenger_count"]),
datetime.strptime(temp["tpep_pickup_datetime"], "%Y-%m-%d %H:%M:%S")))
t += 1
if t == 100: # Sets a limit on the number of trips to save time.
print("Loaded " + str(t) + " trips.")
break
with open('trips.pkl', 'wb') as out:
pickle.dump(trips, out)
print("Done.")
def find_closest_node(G, starting):
"""
Finds the closest node to starting.
Parameters: (G, starting)
G - networkx.graph()
starting - (lat, lon)
"""
n1 = (None, float("inf"))
for node in G.nodes():
closeness = abs(starting[0] - node[0]) + abs(starting[1] - node[1])
if closeness < n1[1]:
n1 = (node, closeness)
return n1[0]
def street_variations(s, abbr):
"""
Returns multiple variations of the street name based on common street term abbreviations.
Parameters: (s, abbr)
s - string
abbr - dict of common street abbreviations
"""
variations = [s]
for a in abbr:
for v in variations.copy():
if a in v:
v = v.replace(a, abbr[a])
variations.append(v)
return variations
class ResetPickle(Exception):
pass
# === Plotting ===
def draw_graph(g, bounds=((-180 , -90 ), (180 , 90 ))):
"""
Plots the edges on matplotlib.
Parameters: (g, bounds)
g - networkx.graph()
bounds - (node, node)
node - (lat, lon)
"""
n1 = bounds[0]
n2 = bounds[1]
for edge in g.edges():
if min(n1[0], n2[0]) < edge[0][0] < max(n1[0], n2[0]) and min(n1[1], n2[1]) < edge[0][1] < max(n1[1], n2[1]):
plt.plot((edge[0][1], edge[1][1]), (edge[0][0], edge[1][0]), 'c.-')
def draw_path(path, color="b"):
"""
Plots a path on matplotlib.
Parameters: (path, color)
path - [nodes]
color - str
node - (lat, lon)
"""
px = []
py = []
for p in range(len(path) - 1):
plt.plot((path[p][1], path[p + 1][1]), (path[p][0], path[p + 1][0]), "m--")
px.append(path[p][1])
py.append(path[p][0])
plt.plot(px, py, color + '.')
# === Trips ===
def process_trips(G, trips, heuristic):
"""
Processes trips and plots them on the graph.
Parameters: (G, trips, heuristic)
G - networkx.graph()
trips - [trips]
heuristic - Callable
trip - (node, node)
node - (lat, lon)
"""
for trip in trips:
n1 = trip.start
n2 = trip.stop
print(f"\nGoing from {n1} to {n2}")
print("Calculating traffic...")
try:
path = nx.astar_path(G, n1, n2, heuristic)
print(f"Cost of trip: {nx.astar_path_length(G, n1, n2, heuristic)}")
print(f"Nodes in trip: {len(path)}")
print_trip_info(n1, n2, path, G)
draw_path(path)
except:
print("Couldn't find a path")
def random_trip(G):
"""
Returns a randomly generated trip as a Request.
Parameters: (G)
G - netwrokx.graph()
"""
tn = len(G.nodes())
n1 = random.randint(0, tn)
n2 = random.randint(0, tn)
tn = 0
for node in G.nodes():
if n1 == tn:
n1 = node
if n2 == tn:
n2 = node
tn += 1
return Request(n1, n2, 0, 0, datetime(2015, 1, 1))
def print_trip_info(n1, n2, path, G, pr=False):
"""
Prints and returns out the trip info for the trip: path.
Parameters: (n1, n2, path, G)
n1 - (lat, lon)
n2 - (lat, lon)
path - list of nodes in order
G - networkx.graph()
pr - bool - whether to print the info
node - (lat, lon)
"""
# Note: Edges with the exact same length are only counted once as this was found to be the most accurate so far
speeds = {}
distances = []
time = 0
for p in range(len(path) - 1):
speed = round(G[path[p]][path[p + 1]]["speed"], 2)
if G[path[p]][path[p + 1]]["distance"] not in distances:
distances.append(G[path[p]][path[p + 1]]["distance"])
speeds[speed] = speeds.get(speed, 0) + 1
time += G[path[p]][path[p + 1]]["distance"] * 0.3048 / speed
if pr:
print(f"Speeds (m/s): {speeds}")
print(f"Distance (meters?): {round(sum(distances) * 0.3048, 2)}")
print(f"Euclidean distance (meters): {distance_to_meters(n1, n2)}")
print(f"Time (minutes): {round(time / 60, 2)}")
return speeds, round(sum(distances) * 0.3048, 2), round(time / 60, 2)
# === Heuristics ===
def weight(s, e, speed):
"""
Returns the weight to be assigned to the edges of the graph.
Parameters: (s, e, d)
s - (lat, lon)
e - (lat, lon)
speed - int
"""
return ((s[0] - e[0]) ** 2 + (s[1] - e[1]) ** 2) ** 0.5 / speed
def reweight(s, e, speed, volume):
"""
Returns the weight to be assigned to the edges of the graph.
** Traffic Version (Includes historical traffic data for more accurate weighting) **
Parameters: (s, e, speed, volume)
s - (lat, lon)
e - (lat, lon)
speed - int
volume - int
"""
density = volume / (distance_to_meters(s, e))
congestion = density / speed
return ((s[0] - e[0]) ** 2 + (s[1] - e[1]) ** 2) ** 0.5 / congestion
def diste(p1, p2):
"""
Returns euclidean distance divided by the default NYC speed. Admissible.
Parameters: (p1, p2)
p1 - (lat, lon)
p2 - (lat, lon)
"""
return (pow(abs(p1[0] - p2[0]), 2) + pow(abs(p1[1] - p2[1]), 2)) ** 0.5 / 65
def distm(p1, p2):
"""
Returns manhattan distance divided by the default NYC speed. NOT admissible.
Parameters: (p1, p2)
p1 - (lat, lon)
p2 - (lat, lon)
"""
return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1]) / 65
# === Helpers ===
def distance_to_meters(n1, n2):
"""
Calculates the great circle distance between two points.
Parameters: (n1, n2)
n1 - (lat, lon)
n2 - (lat, lon)
"""
radius = 6371000 # Radius of earth
x1, y1 = float(n1[0]), float(n1[1])
x2, y2 = float(n2[0]), float(n2[1])
o1 = np.divide(np.multiply(x1, math.pi), 180)
o2 = np.divide(np.multiply(x2,math.pi),180)
d1 = np.divide(np.multiply(np.subtract(x2,x1),math.pi),180)
d2 = np.divide(np.multiply(np.subtract(y2,y1),math.pi),180)
a = np.add(np.multiply(np.sin(np.divide(d1,2)),np.sin(np.divide(d1,2))),np.multiply(np.multiply(np.cos(o2),math.sin(np.divide(d2,2))),np.sin(np.divide(d2,2))))
c = np.multiply(2, np.arctan(np.divide(np.sqrt(a),np.sqrt(np.subtract(1,a)))))
return round(np.multiply(radius,c),2)
# === Main ===
if __name__ == "__main__":
main()
| [
"numpy.sqrt",
"shapely.geometry.shape",
"networkx.astar_path",
"numpy.divide",
"datetime.datetime",
"numpy.multiply",
"matplotlib.pyplot.plot",
"numpy.subtract",
"fiona.open",
"matplotlib.pyplot.axis",
"random.randint",
"traffic.process_traffic",
"pickle.load",
"numpy.cos",
"matplotlib.p... | [((804, 821), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (812, 821), True, 'import matplotlib.pyplot as plt\n'), ((826, 836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (834, 836), True, 'import matplotlib.pyplot as plt\n'), ((2669, 2760), 'fiona.open', 'fiona.open', (['"""NYC/VZV_Speed Limits/geo_export_6459c10e-7bfb-4e64-ae29-f0747dc3824c.shp"""'], {}), "(\n 'NYC/VZV_Speed Limits/geo_export_6459c10e-7bfb-4e64-ae29-f0747dc3824c.shp')\n", (2679, 2760), False, 'import fiona\n'), ((3045, 3066), 'random.randint', 'random.randint', (['(0)', '(23)'], {}), '(0, 23)\n', (3059, 3066), False, 'import random\n'), ((3075, 3085), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3083, 3085), True, 'import networkx as nx\n'), ((3105, 3178), 'fiona.open', 'fiona.open', (['"""NYC/Map/geo_export_24fdfadb-893d-40a0-a751-a76cdefc9bc6.shp"""'], {}), "('NYC/Map/geo_export_24fdfadb-893d-40a0-a751-a76cdefc9bc6.shp')\n", (3115, 3178), False, 'import fiona\n'), ((7499, 7528), 'matplotlib.pyplot.plot', 'plt.plot', (['px', 'py', "(color + '.')"], {}), "(px, py, color + '.')\n", (7507, 7528), True, 'import matplotlib.pyplot as plt\n'), ((8484, 8505), 'random.randint', 'random.randint', (['(0)', 'tn'], {}), '(0, tn)\n', (8498, 8505), False, 'import random\n'), ((8515, 8536), 'random.randint', 'random.randint', (['(0)', 'tn'], {}), '(0, tn)\n', (8529, 8536), False, 'import random\n'), ((685, 705), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)'], {}), '(2015, 1, 1)\n', (693, 705), False, 'from datetime import datetime\n'), ((1339, 1401), 'traffic.process_traffic', 'traffic.process_traffic', (['"""NYC/Traffic_Data/traffic_volume.csv"""'], {}), "('NYC/Traffic_Data/traffic_volume.csv')\n", (1362, 1401), False, 'import traffic\n'), ((1503, 1526), 'pickle.load', 'pickle.load', (['graph_file'], {}), '(graph_file)\n', (1514, 1526), False, 'import pickle\n'), ((1629, 1652), 'pickle.load', 'pickle.load', (['trips_file'], {}), '(trips_file)\n', (1640, 1652), False, 'import pickle\n'), ((4462, 4481), 'pickle.dump', 'pickle.dump', (['G', 'out'], {}), '(G, out)\n', (4473, 4481), False, 'import pickle\n'), ((5679, 5702), 'pickle.dump', 'pickle.dump', (['trips', 'out'], {}), '(trips, out)\n', (5690, 5702), False, 'import pickle\n'), ((7359, 7434), 'matplotlib.pyplot.plot', 'plt.plot', (['(path[p][1], path[p + 1][1])', '(path[p][0], path[p + 1][0])', '"""m--"""'], {}), "((path[p][1], path[p + 1][1]), (path[p][0], path[p + 1][0]), 'm--')\n", (7367, 7434), True, 'import matplotlib.pyplot as plt\n'), ((8710, 8730), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)'], {}), '(2015, 1, 1)\n', (8718, 8730), False, 'from datetime import datetime\n'), ((11572, 11596), 'numpy.multiply', 'np.multiply', (['x1', 'math.pi'], {}), '(x1, math.pi)\n', (11583, 11596), True, 'import numpy as np\n'), ((11622, 11646), 'numpy.multiply', 'np.multiply', (['x2', 'math.pi'], {}), '(x2, math.pi)\n', (11633, 11646), True, 'import numpy as np\n'), ((12044, 12066), 'numpy.multiply', 'np.multiply', (['radius', 'c'], {}), '(radius, c)\n', (12055, 12066), True, 'import numpy as np\n'), ((2051, 2073), 'pickle.load', 'pickle.load', (['abbr_file'], {}), '(abbr_file)\n', (2062, 2073), False, 'import pickle\n'), ((7041, 7108), 'matplotlib.pyplot.plot', 'plt.plot', (['(edge[0][1], edge[1][1])', '(edge[0][0], edge[1][0])', '"""c.-"""'], {}), "((edge[0][1], edge[1][1]), (edge[0][0], edge[1][0]), 'c.-')\n", (7049, 7108), True, 'import matplotlib.pyplot as plt\n'), ((8014, 8049), 'networkx.astar_path', 'nx.astar_path', (['G', 'n1', 'n2', 'heuristic'], {}), '(G, n1, n2, heuristic)\n', (8027, 8049), True, 'import networkx as nx\n'), ((11682, 11701), 'numpy.subtract', 'np.subtract', (['x2', 'x1'], {}), '(x2, x1)\n', (11693, 11701), True, 'import numpy as np\n'), ((11746, 11765), 'numpy.subtract', 'np.subtract', (['y2', 'y1'], {}), '(y2, y1)\n', (11757, 11765), True, 'import numpy as np\n'), ((2392, 2414), 'pickle.dump', 'pickle.dump', (['abbr', 'out'], {}), '(abbr, out)\n', (2403, 2414), False, 'import pickle\n'), ((11814, 11830), 'numpy.divide', 'np.divide', (['d1', '(2)'], {}), '(d1, 2)\n', (11823, 11830), True, 'import numpy as np\n'), ((11838, 11854), 'numpy.divide', 'np.divide', (['d1', '(2)'], {}), '(d1, 2)\n', (11847, 11854), True, 'import numpy as np\n'), ((11880, 11890), 'numpy.cos', 'np.cos', (['o2'], {}), '(o2)\n', (11886, 11890), True, 'import numpy as np\n'), ((11925, 11941), 'numpy.divide', 'np.divide', (['d2', '(2)'], {}), '(d2, 2)\n', (11934, 11941), True, 'import numpy as np\n'), ((11987, 11997), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (11994, 11997), True, 'import numpy as np\n'), ((3223, 3249), 'shapely.geometry.shape', 'shape', (["feature['geometry']"], {}), "(feature['geometry'])\n", (3228, 3249), False, 'from shapely.geometry import shape\n'), ((5386, 5454), 'datetime.datetime.strptime', 'datetime.strptime', (["temp['tpep_pickup_datetime']", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(temp['tpep_pickup_datetime'], '%Y-%m-%d %H:%M:%S')\n", (5403, 5454), False, 'from datetime import datetime\n'), ((11900, 11916), 'numpy.divide', 'np.divide', (['d2', '(2)'], {}), '(d2, 2)\n', (11909, 11916), True, 'import numpy as np\n'), ((12006, 12023), 'numpy.subtract', 'np.subtract', (['(1)', 'a'], {}), '(1, a)\n', (12017, 12023), True, 'import numpy as np\n'), ((3302, 3328), 'shapely.geometry.shape', 'shape', (["feature['geometry']"], {}), "(feature['geometry'])\n", (3307, 3328), False, 'from shapely.geometry import shape\n'), ((8086, 8128), 'networkx.astar_path_length', 'nx.astar_path_length', (['G', 'n1', 'n2', 'heuristic'], {}), '(G, n1, n2, heuristic)\n', (8106, 8128), True, 'import networkx as nx\n')] |
import time
from TSP_toStudents import *
def main():
maxMenu = 6
iterationNumber = 1
maxIterations = 4
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 100, 0.3))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 100, 0.3, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 100, 0.5))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 100, 0.5, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 100, 0.7))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 100, 0.7, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 150, 0.1))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 150, 0.1, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 150, 0.3))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 150, 0.3, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 150, 0.5))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 150, 0.5, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 150, 0.7))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 150, 0.7, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 50, 0.3))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 100, 0.3, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 50, 0.5))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 100, 0.5, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
for i in range(1, maxMenu + 1):
for j in range(1, maxIterations+1):
BasicTSP.menuChoice = i
data_input_file = sys.argv[1]
print('================================== {0}.{1} ================================='.format(i, j))
print('{0} iterations, population {1}, mutation rate {2}'.format(300, 50, 0.7))
start_time = time.time()
ga = BasicTSP(sys.argv[1], 100, 0.7, 300)
ga.search()
print("Process took %s seconds" % int(time.time() - start_time))
print('######################################################################################################')
if __name__ == '__main__':
main() | [
"time.time"
] | [((508, 519), 'time.time', 'time.time', ([], {}), '()\n', (517, 519), False, 'import time\n'), ((1183, 1194), 'time.time', 'time.time', ([], {}), '()\n', (1192, 1194), False, 'import time\n'), ((1858, 1869), 'time.time', 'time.time', ([], {}), '()\n', (1867, 1869), False, 'import time\n'), ((2533, 2544), 'time.time', 'time.time', ([], {}), '()\n', (2542, 2544), False, 'import time\n'), ((3208, 3219), 'time.time', 'time.time', ([], {}), '()\n', (3217, 3219), False, 'import time\n'), ((3883, 3894), 'time.time', 'time.time', ([], {}), '()\n', (3892, 3894), False, 'import time\n'), ((4558, 4569), 'time.time', 'time.time', ([], {}), '()\n', (4567, 4569), False, 'import time\n'), ((5232, 5243), 'time.time', 'time.time', ([], {}), '()\n', (5241, 5243), False, 'import time\n'), ((5906, 5917), 'time.time', 'time.time', ([], {}), '()\n', (5915, 5917), False, 'import time\n'), ((6580, 6591), 'time.time', 'time.time', ([], {}), '()\n', (6589, 6591), False, 'import time\n'), ((648, 659), 'time.time', 'time.time', ([], {}), '()\n', (657, 659), False, 'import time\n'), ((1323, 1334), 'time.time', 'time.time', ([], {}), '()\n', (1332, 1334), False, 'import time\n'), ((1998, 2009), 'time.time', 'time.time', ([], {}), '()\n', (2007, 2009), False, 'import time\n'), ((2673, 2684), 'time.time', 'time.time', ([], {}), '()\n', (2682, 2684), False, 'import time\n'), ((3348, 3359), 'time.time', 'time.time', ([], {}), '()\n', (3357, 3359), False, 'import time\n'), ((4023, 4034), 'time.time', 'time.time', ([], {}), '()\n', (4032, 4034), False, 'import time\n'), ((4698, 4709), 'time.time', 'time.time', ([], {}), '()\n', (4707, 4709), False, 'import time\n'), ((5372, 5383), 'time.time', 'time.time', ([], {}), '()\n', (5381, 5383), False, 'import time\n'), ((6046, 6057), 'time.time', 'time.time', ([], {}), '()\n', (6055, 6057), False, 'import time\n'), ((6720, 6731), 'time.time', 'time.time', ([], {}), '()\n', (6729, 6731), False, 'import time\n')] |
#
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema
from bdd_test_util import bdd_log
def getNetwork(context):
""" Get the Network ID."""
if hasattr(context, 'network_id'):
return context.network_id
headers = getTokenedHeaders(context)
url = "{0}://{1}/api/com.ibm.zBlockchain/networks".format(getSchema(context.tls), context.remote_ip)
response = httpGet(url, headers=headers)
context.network_id = response.json()[0]
return response
def stopNode(context, peer):
"""Stops the peer node on a specific network."""
headers = getTokenedHeaders(context)
getNetwork(context)
url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/stop".format(getSchema(context.tls), context.remote_ip, context.network_id, peer)
body = {}
response = httpPost(url, body, headers=headers)
def restartNode(context, peer):
"""Restart the peer node on a specific network."""
headers = getTokenedHeaders(context)
getNetwork(context)
url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/restart".format(getSchema(context.tls), context.remote_ip, context.network_id, peer)
body = {}
response = httpPost(url, body, headers=headers)
def getNodeStatus(context, peer):
""" Get the Node status."""
headers = getTokenedHeaders(context)
getNetwork(context)
url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/status".format(getSchema(context.tls), context.remote_ip, context.network_id, peer)
response = httpGet(url, headers=headers)
return response
def getNodeLogs(context):
""" Get the Node logs."""
headers = getTokenedHeaders(context)
getNetwork(context)
url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/logs".format(getSchema(context.tls), context.remote_ip, context.network_id, peer)
response = httpGet(url, headers=headers)
return response
def getChaincodeLogs(context, peer):
""" Get the Chaincode logs."""
headers = getTokenedHeaders(context)
getNetwork(context)
# /api/com.ibm.zBlockchain/networks/{network_id}/nodes/{node_id}/chaincodes/{chaincode_id}/logs
#url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/chaincodes/{4}/logs".format(getSchema(context.tls), context.remote_ip, context.network_id, peer, context.chaincodeSpec['chaincodeID']['name'])
if hasattr(context, 'chaincodeSpec'):
url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/chaincodes/{4}/logs".format(getSchema(context.tls), context.remote_ip, context.network_id, peer, context.chaincodeSpec.get('chaincodeID', {}).get('name', ''))
response = httpGet(url, headers=headers)
else:
response = "No chaincode has been deployed"
return response
| [
"bdd_request_util.getTokenedHeaders",
"bdd_request_util.httpPost",
"bdd_request_util.getSchema",
"bdd_request_util.httpGet"
] | [((868, 894), 'bdd_request_util.getTokenedHeaders', 'getTokenedHeaders', (['context'], {}), '(context)\n', (885, 894), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((1015, 1044), 'bdd_request_util.httpGet', 'httpGet', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1022, 1044), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((1207, 1233), 'bdd_request_util.getTokenedHeaders', 'getTokenedHeaders', (['context'], {}), '(context)\n', (1224, 1233), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((1438, 1474), 'bdd_request_util.httpPost', 'httpPost', (['url', 'body'], {'headers': 'headers'}), '(url, body, headers=headers)\n', (1446, 1474), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((1578, 1604), 'bdd_request_util.getTokenedHeaders', 'getTokenedHeaders', (['context'], {}), '(context)\n', (1595, 1604), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((1811, 1847), 'bdd_request_util.httpPost', 'httpPost', (['url', 'body'], {'headers': 'headers'}), '(url, body, headers=headers)\n', (1819, 1847), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((1930, 1956), 'bdd_request_util.getTokenedHeaders', 'getTokenedHeaders', (['context'], {}), '(context)\n', (1947, 1956), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((2148, 2177), 'bdd_request_util.httpGet', 'httpGet', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2155, 2177), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((2270, 2296), 'bdd_request_util.getTokenedHeaders', 'getTokenedHeaders', (['context'], {}), '(context)\n', (2287, 2296), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((2486, 2515), 'bdd_request_util.httpGet', 'httpGet', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2493, 2515), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((2624, 2650), 'bdd_request_util.getTokenedHeaders', 'getTokenedHeaders', (['context'], {}), '(context)\n', (2641, 2650), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((957, 979), 'bdd_request_util.getSchema', 'getSchema', (['context.tls'], {}), '(context.tls)\n', (966, 979), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((1340, 1362), 'bdd_request_util.getSchema', 'getSchema', (['context.tls'], {}), '(context.tls)\n', (1349, 1362), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((1713, 1735), 'bdd_request_util.getSchema', 'getSchema', (['context.tls'], {}), '(context.tls)\n', (1722, 1735), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((2064, 2086), 'bdd_request_util.getSchema', 'getSchema', (['context.tls'], {}), '(context.tls)\n', (2073, 2086), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((2402, 2424), 'bdd_request_util.getSchema', 'getSchema', (['context.tls'], {}), '(context.tls)\n', (2411, 2424), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((3279, 3308), 'bdd_request_util.httpGet', 'httpGet', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3286, 3308), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n'), ((3129, 3151), 'bdd_request_util.getSchema', 'getSchema', (['context.tls'], {}), '(context.tls)\n', (3138, 3151), False, 'from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema\n')] |
#!/usr/bin/env python
"""
BSD 3-Clause License
Copyright (c) 2017, SafeBreach Labs
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Storage provider implementation for AltFS, built on top of Windows WMI system.
system.
References:
- https://www.blackhat.com/docs/us-15/materials/us-15-Graeber-Abusing-Windows
-Management-Instrumentation-WMI-To-Build-A-Persistent%20Asynchronous-And-
Fileless-Backdoor-wp.pdf
- https://gallery.technet.microsoft.com/WMI-PowerShell-cmdlets-ac049637
- https://docs.microsoft.com/en-us/windows/desktop/wmisdk/
creating-a-base-class
- https://stackoverflow.com/questions/252417/
how-can-i-use-a-dll-file-from-python
- https://docs.microsoft.com/en-us/windows/desktop/api/wbemcli/
nn-wbemcli-iwbemservices
Author: <NAME> <<EMAIL>>
Date: 2019-01-01
"""
import ctypes
import logging
import os
from common import WMI_CLIENT_DLL_PATH
from exceptions_ import BucketValueMissingException
from providers.common.calculations import calculate_bits_sum, \
calculate_next_available_index
from StorageProvider import StorageProvider
import wmi
logger = logging.getLogger(__name__)
class WMIStorageProvider(StorageProvider):
"""
Concrete Storage provider implementation for AltFS.
Built on top of Windows WMI system (WBEM).
"""
PROPERTY_NAME_DELIMITER = "_"
TARGET_CLASS_NAME_SUFFIX = "Wow64_"
def __init__(self, machine_identification_string, **kwargs):
"""Constructor for UserDefaultsStorageProvider"""
super(WMIStorageProvider, self).__init__()
self._machine_id_string = machine_identification_string
self._wmi_client = wmi.WMI()
self._wmi_client_dll = ctypes.cdll.LoadLibrary(
os.path.join(os.path.dirname(__file__), WMI_CLIENT_DLL_PATH))
self._namespace = kwargs["namespace"]
self._class_name = self._generate_bucket_name()
# calculate number of available buckets, used for modulus division
# when calculating the bucket index
self._buckets_names = [self._class_name]
self._buckets_count = len(self._buckets_names)
self._create_bucket()
logger.debug("namespace: %s" % self._namespace)
logger.debug("root class name: %s" % self._class_name)
def _generate_bucket_name(self):
classes = list([klass for klass in self._wmi_client.subclasses_of()
if not klass.startswith(
WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX)])
classes_count = len(classes)
logger.debug("found %s legitimate classes" % classes_count)
machine_id_checksum = calculate_bits_sum(
self._machine_id_string)
target_class_id = machine_id_checksum % classes_count - len(
[
klass for klass in list(
self._wmi_client.subclasses_of())[
:machine_id_checksum % classes_count] if klass.startswith(
WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX)])
logger.debug("target class for name generation: %s" %
(classes[target_class_id]))
return WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX + \
classes[target_class_id].split("_")[-1]
def _create_bucket(self):
is_bucket_exist = self._class_name in self._wmi_client.subclasses_of()
if is_bucket_exist:
return
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
logger.debug("creating class: %s\\%s" %
(self._namespace, self._class_name))
self._wmi_client_dll.CreateClass(p_ns, p_cn)
def write_block(self, bucket_id, value_id, data=""):
"""Described in parent class"""
logger.debug("writing block at (%s:%s)" % (bucket_id, value_id))
try:
value_name = self._get_value_name(
bucket_id, value_id)
logger.debug("value with id already exists at (%s:%s)" %
(bucket_id, value_id))
except BucketValueMissingException:
logger.debug(
"value with id does not exist in specified bucket." +
" generating a new value name for bucket id %s" % bucket_id)
value_name = self._generate_value_name()
logger.debug("generated a new value name in bucket id %s: %s" % (
bucket_id, value_name))
target_value_id = WMIStorageProvider.value_name_to_value_id(value_name)
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
p_vn = ctypes.c_wchar_p(value_name)
p_data = ctypes.c_wchar_p(data)
logger.debug(
"creating a new property at (%s:%s): %s\\%s.%s" %
(bucket_id,
target_value_id,
self._namespace,
self._class_name,
value_name))
self._wmi_client_dll.CreateProperty(p_ns, p_cn, p_vn, p_data)
return target_value_id
def get_block(self, bucket_id, value_id):
"""Described in parent class"""
logger.debug("getting block at (%s:%s)" % (bucket_id, value_id))
data = self._wmi_client.get(self._class_name).wmi_property(
self._get_value_name(bucket_id, value_id)).value
return data
def delete_block(self, bucket_id, value_id):
"""Described in parent class"""
value_name = self._get_value_name(
bucket_id, value_id)
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
p_vn = ctypes.c_wchar_p(value_name)
logger.debug(
"deleting a property at (%s:%s): %s\\%s.%s" %
(bucket_id,
WMIStorageProvider.value_name_to_value_id(value_name),
self._namespace,
self._class_name,
value_name))
self._wmi_client_dll.DeleteProperty(p_ns, p_cn, p_vn)
def get_value_ids_in_bucket(self, bucket_id):
"""Described in parent class"""
return self._enumerate_applicable_values_dict().keys()
def generate_new_value_id_in_bucket(self, bucket_id):
"""Described in parent class"""
return WMIStorageProvider.value_name_to_value_id(
self._generate_value_name())
@staticmethod
def value_name_to_value_id(value_name):
"""Returns the value ID of the given value_name"""
return int(value_name.split(
WMIStorageProvider.PROPERTY_NAME_DELIMITER)[-1])
def _get_value_name(self, bucket_id, value_id):
logger.debug("looking for value name at (%s:%s)" %
(bucket_id, value_id))
if value_id is not None:
values_dict = self._enumerate_applicable_values_dict()
logger.debug("existing values: %s" % values_dict)
if value_id in values_dict:
logger.debug("value name exists at (%s:%s): %s" %
(bucket_id, value_id, values_dict[value_id]))
return values_dict[value_id]
logger.debug("no value name at (%s:%s)" % (bucket_id, value_id))
raise BucketValueMissingException(
"No applicable value found in bucket")
def _enumerate_applicable_values_dict(self):
values_names = self._enumerate_applicable_values()
return dict(zip([WMIStorageProvider.value_name_to_value_id(name)
for name in values_names], values_names))
def _enumerate_applicable_values(self):
return self._wmi_client.get(self._class_name).properties.keys()
def _get_bucket_name(self, bucket_id):
return self._buckets_names[bucket_id]
def _generate_value_name_machine_part(self):
return self._class_name.split(
"_")[1] + WMIStorageProvider.PROPERTY_NAME_DELIMITER
def _generate_value_name(self):
return self._generate_value_name_machine_part() + \
("%04d" % calculate_next_available_index(
self._enumerate_applicable_values_dict().keys()))
def _is_value_name_applicable(self, value_name):
return value_name.startswith(
self._generate_value_name_machine_part()) and all(
[char.isdigit() for char in value_name[-4:]])
| [
"logging.getLogger",
"exceptions_.BucketValueMissingException",
"providers.common.calculations.calculate_bits_sum",
"wmi.WMI",
"os.path.dirname",
"ctypes.c_wchar_p"
] | [((2519, 2546), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2536, 2546), False, 'import logging\n'), ((3053, 3062), 'wmi.WMI', 'wmi.WMI', ([], {}), '()\n', (3060, 3062), False, 'import wmi\n'), ((4040, 4083), 'providers.common.calculations.calculate_bits_sum', 'calculate_bits_sum', (['self._machine_id_string'], {}), '(self._machine_id_string)\n', (4058, 4083), False, 'from providers.common.calculations import calculate_bits_sum, calculate_next_available_index\n'), ((4820, 4853), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['self._namespace'], {}), '(self._namespace)\n', (4836, 4853), False, 'import ctypes\n'), ((4869, 4903), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['self._class_name'], {}), '(self._class_name)\n', (4885, 4903), False, 'import ctypes\n'), ((5931, 5964), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['self._namespace'], {}), '(self._namespace)\n', (5947, 5964), False, 'import ctypes\n'), ((5980, 6014), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['self._class_name'], {}), '(self._class_name)\n', (5996, 6014), False, 'import ctypes\n'), ((6030, 6058), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['value_name'], {}), '(value_name)\n', (6046, 6058), False, 'import ctypes\n'), ((6076, 6098), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['data'], {}), '(data)\n', (6092, 6098), False, 'import ctypes\n'), ((6915, 6948), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['self._namespace'], {}), '(self._namespace)\n', (6931, 6948), False, 'import ctypes\n'), ((6964, 6998), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['self._class_name'], {}), '(self._class_name)\n', (6980, 6998), False, 'import ctypes\n'), ((7014, 7042), 'ctypes.c_wchar_p', 'ctypes.c_wchar_p', (['value_name'], {}), '(value_name)\n', (7030, 7042), False, 'import ctypes\n'), ((8567, 8633), 'exceptions_.BucketValueMissingException', 'BucketValueMissingException', (['"""No applicable value found in bucket"""'], {}), "('No applicable value found in bucket')\n", (8594, 8633), False, 'from exceptions_ import BucketValueMissingException\n'), ((3144, 3169), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3159, 3169), False, 'import os\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import math
import numpy as np
import unittest
from op_test import OpTest
def calc_psroi_pool(x, rois, rois_num_per_img, output_channels, spatial_scale,
pooled_height, pooled_width):
"""
Psroi_pool implemented by Numpy.
x: 4-D as (N, C, H, W),
rois: 2-D as [[x1, y1, x2, y2], ...],
rois_num_per_img: 1-D as [nums_of_batch_0, nums_of_batch_1, ...]
"""
output_shape = (len(rois), output_channels, pooled_height, pooled_width)
out_data = np.zeros(output_shape)
batch_id = 0
rois_num_id = 0
rois_num_left = rois_num_per_img[rois_num_id]
for i in range(len(rois)):
roi = rois[i]
roi_batch_id = batch_id
rois_num_left -= 1
if rois_num_left == 0:
rois_num_id += 1
if rois_num_id < len(rois_num_per_img):
rois_num_left = rois_num_per_img[rois_num_id]
batch_id += 1
roi_start_w = round(roi[0]) * spatial_scale
roi_start_h = round(roi[1]) * spatial_scale
roi_end_w = (round(roi[2]) + 1.) * spatial_scale
roi_end_h = (round(roi[3]) + 1.) * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / float(pooled_height)
bin_size_w = roi_width / float(pooled_width)
x_i = x[roi_batch_id]
for c in range(output_channels):
for ph in range(pooled_height):
for pw in range(pooled_width):
hstart = int(
math.floor(float(ph) * bin_size_h + roi_start_h))
wstart = int(
math.floor(float(pw) * bin_size_w + roi_start_w))
hend = int(
math.ceil(float(ph + 1) * bin_size_h + roi_start_h))
wend = int(
math.ceil(float(pw + 1) * bin_size_w + roi_start_w))
hstart = min(max(hstart, 0), x.shape[2])
hend = min(max(hend, 0), x.shape[2])
wstart = min(max(wstart, 0), x.shape[3])
wend = min(max(wend, 0), x.shape[3])
c_in = (c * pooled_height + ph) * pooled_width + pw
is_empty = (hend <= hstart) or (wend <= wstart)
out_sum = 0.
for ih in range(hstart, hend):
for iw in range(wstart, wend):
out_sum += x_i[c_in, ih, iw]
bin_area = (hend - hstart) * (wend - wstart)
out_data[i, c, ph, pw] = 0. if is_empty else (
out_sum / float(bin_area))
return out_data
class TestPSROIPoolOp(OpTest):
def set_data(self):
paddle.enable_static()
self.init_test_case()
self.make_rois()
self.outs = calc_psroi_pool(self.x, self.boxes, self.boxes_num,
self.output_channels, self.spatial_scale,
self.pooled_height,
self.pooled_width).astype('float64')
self.inputs = {
'X': self.x,
'ROIs': (self.rois_with_batch_id[:, 1:5], self.rois_lod)
}
self.attrs = {
'output_channels': self.output_channels,
'spatial_scale': self.spatial_scale,
'pooled_height': self.pooled_height,
'pooled_width': self.pooled_width
}
self.outputs = {'Out': self.outs}
def init_test_case(self):
self.batch_size = 3
self.channels = 3 * 2 * 2
self.height = 6
self.width = 4
self.x_dim = [self.batch_size, self.channels, self.height, self.width]
self.spatial_scale = 1.0 / 4.0
self.output_channels = 3
self.pooled_height = 2
self.pooled_width = 2
self.x = np.random.random(self.x_dim).astype('float64')
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.random_integers(
0, self.width // self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers(
0, self.height // self.spatial_scale - self.pooled_height)
x2 = np.random.random_integers(x1 + self.pooled_width,
self.width // self.spatial_scale)
y2 = np.random.random_integers(
y1 + self.pooled_height, self.height // self.spatial_scale)
roi = [bno, x1, y1, x2, y2]
rois.append(roi)
self.rois_num = len(rois)
self.rois_with_batch_id = np.array(rois).astype('float64')
self.boxes = self.rois_with_batch_id[:, 1:]
self.boxes_num = np.array(
[bno + 1 for bno in range(self.batch_size)]).astype('int32')
def setUp(self):
self.op_type = 'psroi_pool'
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.random([2, 490, 28, 28]).astype(np.float32)
self.boxes = np.array(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]).astype(np.float32)
self.boxes_num = np.array([1, 2]).astype(np.int32)
def test_output_size(self):
def test_output_size_is_int():
output_size = 7
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
def test_output_size_is_tuple():
output_size = (7, 7)
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
def test_dytype_is_float64():
output_size = (7, 7)
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x, 'float64'),
paddle.to_tensor(self.boxes, 'float64'),
paddle.to_tensor(self.boxes_num, 'int32'), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
places = ['cpu']
if paddle.fluid.core.is_compiled_with_cuda():
places.append('gpu')
for place in places:
paddle.set_device(place)
test_output_size_is_int()
test_output_size_is_tuple()
test_dytype_is_float64()
class TestPSROIPoolDynamicClassAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.random([2, 128, 32, 32]).astype(np.float32)
self.boxes = np.array([[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10],
[5, 3, 25, 21]]).astype(np.float32)
self.boxes_num = np.array([2, 2]).astype(np.int32)
def test_output_size(self):
def test_output_size_is_int():
psroi_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_module(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num)).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
def test_output_size_is_tuple():
psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_pool_module(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num)).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
def test_dytype_is_float64():
psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_pool_module(
paddle.to_tensor(self.x, 'float64'),
paddle.to_tensor(self.boxes, 'float64'),
paddle.to_tensor(self.boxes_num, 'int32')).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
paddle.disable_static()
places = ['cpu']
if paddle.fluid.core.is_compiled_with_cuda():
places.append('gpu')
for place in places:
paddle.set_device(place)
test_output_size_is_int()
test_output_size_is_tuple()
test_dytype_is_float64()
class TestPSROIPoolBoxesNumError(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.x = paddle.uniform([2, 490, 28, 28], dtype='float32')
self.boxes = paddle.to_tensor(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')
def test_errors(self):
def test_boxes_num_nums_error():
boxes_num = paddle.to_tensor([1, 5], 'int32')
out = paddle.vision.ops.psroi_pool(
self.x, self.boxes, boxes_num, output_size=7)
self.assertRaises(ValueError, test_boxes_num_nums_error)
def test_boxes_num_length_error():
boxes_num = paddle.to_tensor([1, 1, 1], 'int32')
out = paddle.vision.ops.psroi_pool(
self.x, self.boxes, boxes_num, output_size=7)
self.assertRaises(ValueError, test_boxes_num_length_error)
class TestPSROIPoolChannelError(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.x = paddle.uniform([2, 490, 28, 28], dtype='float32')
self.boxes = paddle.to_tensor(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')
self.output_size = 4
def test_errors(self):
def test_channel_error():
boxes_num = paddle.to_tensor([2, 1], 'int32')
out = paddle.vision.ops.psroi_pool(self.x, self.boxes, boxes_num,
self.output_size)
self.assertRaises(ValueError, test_channel_error)
class TestPSROIPoolStaticAPI(unittest.TestCase):
def setUp(self):
paddle.enable_static()
self.x_placeholder = paddle.static.data(
name='x', shape=[2, 490, 28, 28])
self.x = np.random.random([2, 490, 28, 28]).astype(np.float32)
self.boxes_placeholder = paddle.static.data(
name='boxes', shape=[3, 4], lod_level=1)
self.boxes = np.array(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]).astype(np.float32)
self.boxes_num = np.array([1, 2]).astype(np.int32)
def test_function_in_static(self):
output_size = 7
out = paddle.vision.ops.psroi_pool(self.x_placeholder,
self.boxes_placeholder,
self.boxes_num, output_size)
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
places = [paddle.CPUPlace()]
if paddle.fluid.core.is_compiled_with_cuda():
places.append(paddle.CUDAPlace(0))
for place in places:
exe = paddle.static.Executor(place)
boxes_lod_data = paddle.fluid.create_lod_tensor(self.boxes,
[[1, 2]], place)
out_res = exe.run(paddle.static.default_main_program(),
feed={'x': self.x,
'boxes': boxes_lod_data},
fetch_list=[out.name])
self.assertTrue(np.allclose(out_res, expect_out))
if __name__ == '__main__':
unittest.main()
| [
"numpy.array",
"paddle.disable_static",
"unittest.main",
"paddle.CPUPlace",
"numpy.random.random",
"paddle.vision.ops.psroi_pool",
"paddle.vision.ops.PSRoIPool",
"paddle.fluid.create_lod_tensor",
"paddle.enable_static",
"paddle.to_tensor",
"paddle.fluid.core.is_compiled_with_cuda",
"paddle.set... | [((1156, 1178), 'numpy.zeros', 'np.zeros', (['output_shape'], {}), '(output_shape)\n', (1164, 1178), True, 'import numpy as np\n'), ((13311, 13326), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13324, 13326), False, 'import unittest\n'), ((3454, 3476), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (3474, 3476), False, 'import paddle\n'), ((7744, 7785), 'paddle.fluid.core.is_compiled_with_cuda', 'paddle.fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (7783, 7785), False, 'import paddle\n'), ((9859, 9882), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (9880, 9882), False, 'import paddle\n'), ((9919, 9960), 'paddle.fluid.core.is_compiled_with_cuda', 'paddle.fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (9958, 9960), False, 'import paddle\n'), ((10260, 10283), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (10281, 10283), False, 'import paddle\n'), ((10301, 10350), 'paddle.uniform', 'paddle.uniform', (['[2, 490, 28, 28]'], {'dtype': '"""float32"""'}), "([2, 490, 28, 28], dtype='float32')\n", (10315, 10350), False, 'import paddle\n'), ((10372, 10448), 'paddle.to_tensor', 'paddle.to_tensor', (['[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]', '"""float32"""'], {}), "([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')\n", (10388, 10448), False, 'import paddle\n'), ((11131, 11154), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (11152, 11154), False, 'import paddle\n'), ((11172, 11221), 'paddle.uniform', 'paddle.uniform', (['[2, 490, 28, 28]'], {'dtype': '"""float32"""'}), "([2, 490, 28, 28], dtype='float32')\n", (11186, 11221), False, 'import paddle\n'), ((11243, 11319), 'paddle.to_tensor', 'paddle.to_tensor', (['[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]', '"""float32"""'], {}), "([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')\n", (11259, 11319), False, 'import paddle\n'), ((11764, 11786), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (11784, 11786), False, 'import paddle\n'), ((11816, 11868), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""x"""', 'shape': '[2, 490, 28, 28]'}), "(name='x', shape=[2, 490, 28, 28])\n", (11834, 11868), False, 'import paddle\n'), ((11986, 12045), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""boxes"""', 'shape': '[3, 4]', 'lod_level': '(1)'}), "(name='boxes', shape=[3, 4], lod_level=1)\n", (12004, 12045), False, 'import paddle\n'), ((12307, 12412), 'paddle.vision.ops.psroi_pool', 'paddle.vision.ops.psroi_pool', (['self.x_placeholder', 'self.boxes_placeholder', 'self.boxes_num', 'output_size'], {}), '(self.x_placeholder, self.boxes_placeholder,\n self.boxes_num, output_size)\n', (12335, 12412), False, 'import paddle\n'), ((12668, 12709), 'paddle.fluid.core.is_compiled_with_cuda', 'paddle.fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (12707, 12709), False, 'import paddle\n'), ((7861, 7885), 'paddle.set_device', 'paddle.set_device', (['place'], {}), '(place)\n', (7878, 7885), False, 'import paddle\n'), ((8452, 8487), 'paddle.vision.ops.PSRoIPool', 'paddle.vision.ops.PSRoIPool', (['(8)', '(1.1)'], {}), '(8, 1.1)\n', (8479, 8487), False, 'import paddle\n'), ((8930, 8965), 'paddle.vision.ops.PSRoIPool', 'paddle.vision.ops.PSRoIPool', (['(8)', '(1.1)'], {}), '(8, 1.1)\n', (8957, 8965), False, 'import paddle\n'), ((9410, 9445), 'paddle.vision.ops.PSRoIPool', 'paddle.vision.ops.PSRoIPool', (['(8)', '(1.1)'], {}), '(8, 1.1)\n', (9437, 9445), False, 'import paddle\n'), ((10036, 10060), 'paddle.set_device', 'paddle.set_device', (['place'], {}), '(place)\n', (10053, 10060), False, 'import paddle\n'), ((10555, 10588), 'paddle.to_tensor', 'paddle.to_tensor', (['[1, 5]', '"""int32"""'], {}), "([1, 5], 'int32')\n", (10571, 10588), False, 'import paddle\n'), ((10607, 10681), 'paddle.vision.ops.psroi_pool', 'paddle.vision.ops.psroi_pool', (['self.x', 'self.boxes', 'boxes_num'], {'output_size': '(7)'}), '(self.x, self.boxes, boxes_num, output_size=7)\n', (10635, 10681), False, 'import paddle\n'), ((10833, 10869), 'paddle.to_tensor', 'paddle.to_tensor', (['[1, 1, 1]', '"""int32"""'], {}), "([1, 1, 1], 'int32')\n", (10849, 10869), False, 'import paddle\n'), ((10888, 10962), 'paddle.vision.ops.psroi_pool', 'paddle.vision.ops.psroi_pool', (['self.x', 'self.boxes', 'boxes_num'], {'output_size': '(7)'}), '(self.x, self.boxes, boxes_num, output_size=7)\n', (10916, 10962), False, 'import paddle\n'), ((11448, 11481), 'paddle.to_tensor', 'paddle.to_tensor', (['[2, 1]', '"""int32"""'], {}), "([2, 1], 'int32')\n", (11464, 11481), False, 'import paddle\n'), ((11500, 11577), 'paddle.vision.ops.psroi_pool', 'paddle.vision.ops.psroi_pool', (['self.x', 'self.boxes', 'boxes_num', 'self.output_size'], {}), '(self.x, self.boxes, boxes_num, self.output_size)\n', (11528, 11577), False, 'import paddle\n'), ((12638, 12655), 'paddle.CPUPlace', 'paddle.CPUPlace', ([], {}), '()\n', (12653, 12655), False, 'import paddle\n'), ((12805, 12834), 'paddle.static.Executor', 'paddle.static.Executor', (['place'], {}), '(place)\n', (12827, 12834), False, 'import paddle\n'), ((12864, 12923), 'paddle.fluid.create_lod_tensor', 'paddle.fluid.create_lod_tensor', (['self.boxes', '[[1, 2]]', 'place'], {}), '(self.boxes, [[1, 2]], place)\n', (12894, 12923), False, 'import paddle\n'), ((4583, 4611), 'numpy.random.random', 'np.random.random', (['self.x_dim'], {}), '(self.x_dim)\n', (4599, 4611), True, 'import numpy as np\n'), ((4849, 4936), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(self.width // self.spatial_scale - self.pooled_width)'], {}), '(0, self.width // self.spatial_scale - self.\n pooled_width)\n', (4874, 4936), True, 'import numpy as np\n'), ((4974, 5063), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(self.height // self.spatial_scale - self.pooled_height)'], {}), '(0, self.height // self.spatial_scale - self.\n pooled_height)\n', (4999, 5063), True, 'import numpy as np\n'), ((5102, 5190), 'numpy.random.random_integers', 'np.random.random_integers', (['(x1 + self.pooled_width)', '(self.width // self.spatial_scale)'], {}), '(x1 + self.pooled_width, self.width // self.\n spatial_scale)\n', (5127, 5190), True, 'import numpy as np\n'), ((5254, 5344), 'numpy.random.random_integers', 'np.random.random_integers', (['(y1 + self.pooled_height)', '(self.height // self.spatial_scale)'], {}), '(y1 + self.pooled_height, self.height // self.\n spatial_scale)\n', (5279, 5344), True, 'import numpy as np\n'), ((5506, 5520), 'numpy.array', 'np.array', (['rois'], {}), '(rois)\n', (5514, 5520), True, 'import numpy as np\n'), ((6011, 6045), 'numpy.random.random', 'np.random.random', (['[2, 490, 28, 28]'], {}), '([2, 490, 28, 28])\n', (6027, 6045), True, 'import numpy as np\n'), ((6086, 6143), 'numpy.array', 'np.array', (['[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]'], {}), '([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]])\n', (6094, 6143), True, 'import numpy as np\n'), ((6201, 6217), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (6209, 6217), True, 'import numpy as np\n'), ((6703, 6731), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (6714, 6731), True, 'import numpy as np\n'), ((7176, 7204), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (7187, 7204), True, 'import numpy as np\n'), ((7677, 7705), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (7688, 7705), True, 'import numpy as np\n'), ((8096, 8130), 'numpy.random.random', 'np.random.random', (['[2, 128, 32, 32]'], {}), '([2, 128, 32, 32])\n', (8112, 8130), True, 'import numpy as np\n'), ((8171, 8243), 'numpy.array', 'np.array', (['[[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10], [5, 3, 25, 21]]'], {}), '([[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10], [5, 3, 25, 21]])\n', (8179, 8243), True, 'import numpy as np\n'), ((8319, 8335), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (8327, 8335), True, 'import numpy as np\n'), ((8826, 8854), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (8837, 8854), True, 'import numpy as np\n'), ((9309, 9337), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (9320, 9337), True, 'import numpy as np\n'), ((9820, 9848), 'numpy.allclose', 'np.allclose', (['out', 'expect_out'], {}), '(out, expect_out)\n', (9831, 9848), True, 'import numpy as np\n'), ((11899, 11933), 'numpy.random.random', 'np.random.random', (['[2, 490, 28, 28]'], {}), '([2, 490, 28, 28])\n', (11915, 11933), True, 'import numpy as np\n'), ((12080, 12137), 'numpy.array', 'np.array', (['[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]'], {}), '([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]])\n', (12088, 12137), True, 'import numpy as np\n'), ((12195, 12211), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (12203, 12211), True, 'import numpy as np\n'), ((12737, 12756), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (12753, 12756), False, 'import paddle\n'), ((13014, 13050), 'paddle.static.default_main_program', 'paddle.static.default_main_program', ([], {}), '()\n', (13048, 13050), False, 'import paddle\n'), ((13244, 13276), 'numpy.allclose', 'np.allclose', (['out_res', 'expect_out'], {}), '(out_res, expect_out)\n', (13255, 13276), True, 'import numpy as np\n'), ((6399, 6423), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (6415, 6423), False, 'import paddle\n'), ((6441, 6469), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes'], {}), '(self.boxes)\n', (6457, 6469), False, 'import paddle\n'), ((6487, 6519), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num'], {}), '(self.boxes_num)\n', (6503, 6519), False, 'import paddle\n'), ((6872, 6896), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (6888, 6896), False, 'import paddle\n'), ((6914, 6942), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes'], {}), '(self.boxes)\n', (6930, 6942), False, 'import paddle\n'), ((6960, 6992), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num'], {}), '(self.boxes_num)\n', (6976, 6992), False, 'import paddle\n'), ((7342, 7377), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x', '"""float64"""'], {}), "(self.x, 'float64')\n", (7358, 7377), False, 'import paddle\n'), ((7395, 7434), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes', '"""float64"""'], {}), "(self.boxes, 'float64')\n", (7411, 7434), False, 'import paddle\n'), ((7452, 7493), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num', '"""int32"""'], {}), "(self.boxes_num, 'int32')\n", (7468, 7493), False, 'import paddle\n'), ((8536, 8560), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (8552, 8560), False, 'import paddle\n'), ((8578, 8606), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes'], {}), '(self.boxes)\n', (8594, 8606), False, 'import paddle\n'), ((8624, 8656), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num'], {}), '(self.boxes_num)\n', (8640, 8656), False, 'import paddle\n'), ((9019, 9043), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x'], {}), '(self.x)\n', (9035, 9043), False, 'import paddle\n'), ((9061, 9089), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes'], {}), '(self.boxes)\n', (9077, 9089), False, 'import paddle\n'), ((9107, 9139), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num'], {}), '(self.boxes_num)\n', (9123, 9139), False, 'import paddle\n'), ((9499, 9534), 'paddle.to_tensor', 'paddle.to_tensor', (['self.x', '"""float64"""'], {}), "(self.x, 'float64')\n", (9515, 9534), False, 'import paddle\n'), ((9552, 9591), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes', '"""float64"""'], {}), "(self.boxes, 'float64')\n", (9568, 9591), False, 'import paddle\n'), ((9609, 9650), 'paddle.to_tensor', 'paddle.to_tensor', (['self.boxes_num', '"""int32"""'], {}), "(self.boxes_num, 'int32')\n", (9625, 9650), False, 'import paddle\n')] |
from django.urls import path, re_path
from Net640.apps.images import views
app_name = 'images'
urlpatterns = [
path(r'my_images/', views.user_images_view, name="my_images"),
# TODO move to rest api
path(r'action/', views.user_image_action, name="user_image_action"),
]
| [
"django.urls.path"
] | [((116, 176), 'django.urls.path', 'path', (['"""my_images/"""', 'views.user_images_view'], {'name': '"""my_images"""'}), "('my_images/', views.user_images_view, name='my_images')\n", (120, 176), False, 'from django.urls import path, re_path\n'), ((211, 277), 'django.urls.path', 'path', (['"""action/"""', 'views.user_image_action'], {'name': '"""user_image_action"""'}), "('action/', views.user_image_action, name='user_image_action')\n", (215, 277), False, 'from django.urls import path, re_path\n')] |
import csv
# django-request analytics package, NOT requests URL library!
from request.models import Request
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from . import views
class GraphsTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(GraphsTestCase, cls).setUpClass()
cls.factory = RequestFactory()
Request(path='/admin/', ip='127.0.0.1').save()
Request(path='/admin/', ip='127.0.0.1').save()
Request(path='/admin/login/', ip='127.0.0.1').save()
user = User.objects.create_user(username='foo', password='<PASSWORD>')
user.is_staff = True
user.save()
cls.staff_user = user
def _verify_equal(self, resp, expected_data):
reader_list = csv.reader(resp.content.splitlines())
reader_list.next() # Skip header row
for row in reader_list:
assert row in expected_data
# The total number of lines in our CSV should be one more than the
# number of lines in our expected_data (because of the header row).
# As we've verified that all the lines in our CSV are present in our
# expected data, we've now verified that they are identical (modulo the
# header).
self.assertEqual(reader_list.line_num, len(expected_data) + 1)
def test_csv_page_views(self):
url = reverse('csv:page_views')
# Page view metrics are limited to staff, so make sure to use
# RequestFactory to add a staff user to the request.
req = self.factory.get(url)
req.user = self.staff_user
resp = views.CSVPageViews.as_view()(req)
expected_data = [['/admin/', '2'], ['/admin/login/', '1']]
self._verify_equal(resp, expected_data)
def test_csv_page_views_by_path(self):
# Try one of the paths in our responses.
url = reverse('csv:page_views_by_path', kwargs={'path': 'admin/login'})
req = self.factory.get(url)
req.user = self.staff_user
resp = views.CSVPageViewsByPath.as_view()(req, path='admin/login')
expected_data = [['/admin/login/', '1']]
self._verify_equal(resp, expected_data)
# Try the other.
url = reverse('csv:page_views_by_path', kwargs={'path': 'admin'})
req = self.factory.get(url)
req.user = self.staff_user
resp = views.CSVPageViewsByPath.as_view()(req, path='admin')
expected_data = [['/admin/', '2']]
self._verify_equal(resp, expected_data)
# Try a path we haven't hit.
url = reverse('csv:page_views_by_path', kwargs={'path': 'fake/url'})
req = self.factory.get(url)
req.user = self.staff_user
resp = views.CSVPageViewsByPath.as_view()(req, path='fake/url')
expected_data = [['/fake/url/', '0']]
self._verify_equal(resp, expected_data)
| [
"request.models.Request",
"django.contrib.auth.models.User.objects.create_user",
"django.test.client.RequestFactory",
"django.core.urlresolvers.reverse"
] | [((444, 460), 'django.test.client.RequestFactory', 'RequestFactory', ([], {}), '()\n', (458, 460), False, 'from django.test.client import RequestFactory\n'), ((649, 712), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""foo"""', 'password': '"""<PASSWORD>"""'}), "(username='foo', password='<PASSWORD>')\n", (673, 712), False, 'from django.contrib.auth.models import User\n'), ((1471, 1496), 'django.core.urlresolvers.reverse', 'reverse', (['"""csv:page_views"""'], {}), "('csv:page_views')\n", (1478, 1496), False, 'from django.core.urlresolvers import reverse\n'), ((1975, 2040), 'django.core.urlresolvers.reverse', 'reverse', (['"""csv:page_views_by_path"""'], {'kwargs': "{'path': 'admin/login'}"}), "('csv:page_views_by_path', kwargs={'path': 'admin/login'})\n", (1982, 2040), False, 'from django.core.urlresolvers import reverse\n'), ((2327, 2386), 'django.core.urlresolvers.reverse', 'reverse', (['"""csv:page_views_by_path"""'], {'kwargs': "{'path': 'admin'}"}), "('csv:page_views_by_path', kwargs={'path': 'admin'})\n", (2334, 2386), False, 'from django.core.urlresolvers import reverse\n'), ((2673, 2735), 'django.core.urlresolvers.reverse', 'reverse', (['"""csv:page_views_by_path"""'], {'kwargs': "{'path': 'fake/url'}"}), "('csv:page_views_by_path', kwargs={'path': 'fake/url'})\n", (2680, 2735), False, 'from django.core.urlresolvers import reverse\n'), ((470, 509), 'request.models.Request', 'Request', ([], {'path': '"""/admin/"""', 'ip': '"""127.0.0.1"""'}), "(path='/admin/', ip='127.0.0.1')\n", (477, 509), False, 'from request.models import Request\n'), ((525, 564), 'request.models.Request', 'Request', ([], {'path': '"""/admin/"""', 'ip': '"""127.0.0.1"""'}), "(path='/admin/', ip='127.0.0.1')\n", (532, 564), False, 'from request.models import Request\n'), ((580, 625), 'request.models.Request', 'Request', ([], {'path': '"""/admin/login/"""', 'ip': '"""127.0.0.1"""'}), "(path='/admin/login/', ip='127.0.0.1')\n", (587, 625), False, 'from request.models import Request\n')] |
import sys, inspect
class Tutorial:
flow = "TUTORIAL"
type = "embed"
title = "__TUTORIAL HELP__"
colour = "green"
def __init__(self, player=None):
self.fields = (('Official Discord Server:',
'''
https://discord.com/invite/discord-developers
**-**`tutorial`: :mortar_board: A simple tutorial for how the bot works.
''', False))
self.footer_text = "\u00A9 2018 | Bot"
self.footer_icon = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.thumbnail = ""
self.image = ""
pointer = None
class P1:
flow = "p1"
type = "menu"
title = "Tutorial HELP CONT.__"
colour = "blue"
def __init__(self, player=None):
self.fields = (("https://discord.com/invite/discord-developers",
"""
https://discord.com/invite/discord-developers """))
self.footer_text = "\u00A9 2018 | Bot"
self.footer_icon = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.thumbnail = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.image = ""
pointer = None
class P2:
flow = "p2"
type = " main menu"
title = "__Tutorial HELP__"
colour = "blue"
def __init__(self, player=None):
self.fields = (("Official Discord Server:",
"""
https://discord.com/invite/discord-developers
"""))
self.footer_text = "\u00A9 2018 | Bot"
self.footer_icon = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.thumbnail = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
self.image = "https://1000logos.net/wp-content/uploads/2020/10/Discord-emblem.jpg"
pointer = P1
# Automatically reads and assigns flows.
flows = {}
for a, obj in inspect.getmembers(sys.modules[__name__]):
try:
flows[obj.flow] = obj
except AttributeError:
pass
| [
"inspect.getmembers"
] | [((2063, 2104), 'inspect.getmembers', 'inspect.getmembers', (['sys.modules[__name__]'], {}), '(sys.modules[__name__])\n', (2081, 2104), False, 'import sys, inspect\n')] |
import connexion
import six
from deregnet_rest.models.score import Score # noqa: E501
from deregnet_rest.models.score_info import ScoreInfo # noqa: E501
from deregnet_rest import util
from deregnet_rest.controllers_impl.scores import ScoreController
def delete_score(score_id): # noqa: E501
"""Delete a previously uploaded node score
Delete a previously uploaded node score # noqa: E501
:param score_id: ID of the node score to be deleted
:type score_id: str
:rtype: None
"""
return ScoreController.delete_score(score_id)
def get_score(score_id): # noqa: E501
"""Retrieve information on a previously uploaded score
# noqa: E501
:param score_id: ID of node score to return
:type score_id: str
:rtype: ScoreInfo
"""
return ScoreController.get_score(score_id)
def get_scores(skip=0, limit=1000): # noqa: E501
"""List available previously uploaded node scores
Returns a list with all available node scores # noqa: E501
:param skip: number of records to skip for pagination
:type skip: int
:param limit: maximum number of records to return
:type limit: int
:rtype: List[ScoreInfo]
"""
return ScoreController.get_scores(skip, limit)
def post_score(body): # noqa: E501
"""Upload a node score for use with DeRegNet algorithms
# noqa: E501
:param body: Node scores to be uploaded for later use with a DeRegNet algorithm
:type body: dict | bytes
:rtype: ScoreInfo
"""
if connexion.request.is_json:
body = Score.from_dict(connexion.request.get_json()) # noqa: E501
return ScoreController.post_score(body)
| [
"deregnet_rest.controllers_impl.scores.ScoreController.get_score",
"deregnet_rest.controllers_impl.scores.ScoreController.delete_score",
"deregnet_rest.controllers_impl.scores.ScoreController.get_scores",
"deregnet_rest.controllers_impl.scores.ScoreController.post_score",
"connexion.request.get_json"
] | [((521, 559), 'deregnet_rest.controllers_impl.scores.ScoreController.delete_score', 'ScoreController.delete_score', (['score_id'], {}), '(score_id)\n', (549, 559), False, 'from deregnet_rest.controllers_impl.scores import ScoreController\n'), ((794, 829), 'deregnet_rest.controllers_impl.scores.ScoreController.get_score', 'ScoreController.get_score', (['score_id'], {}), '(score_id)\n', (819, 829), False, 'from deregnet_rest.controllers_impl.scores import ScoreController\n'), ((1202, 1241), 'deregnet_rest.controllers_impl.scores.ScoreController.get_scores', 'ScoreController.get_scores', (['skip', 'limit'], {}), '(skip, limit)\n', (1228, 1241), False, 'from deregnet_rest.controllers_impl.scores import ScoreController\n'), ((1624, 1656), 'deregnet_rest.controllers_impl.scores.ScoreController.post_score', 'ScoreController.post_score', (['body'], {}), '(body)\n', (1650, 1656), False, 'from deregnet_rest.controllers_impl.scores import ScoreController\n'), ((1569, 1597), 'connexion.request.get_json', 'connexion.request.get_json', ([], {}), '()\n', (1595, 1597), False, 'import connexion\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2006 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Create binary symbol map out of linker map file
"""
import sys
import struct
import re
MAXSTRING = 63
symtabfmt = "<Q%ds" % (MAXSTRING + 1)
funcline = re.compile(r'([0-9a-f]+)\s+[lg]\s+.\s+\.text\s+([0-9a-f]+)\s+(.*)$')
bssline = re.compile(r'([0-9a-f]+)\s+[lg]\s+[a-zA-Z]\s+\.bss\s+([0-9a-f]+)\s+(.*)$')
dataline = re.compile(r'([0-9a-f]+)\s+[lg]\s+[a-zA-Z]\s+\.data\s+([0-9a-f]+)\s+(.*)$')
fileexp = re.compile(r'([^\s]+):\s+file format')
startfile = re.compile(r'\.(text|bss|data)\s+(0x[0-9a-f]+)\s+0x[0-9a-f]+\s+(.*)$')
def read_obdump(inp):
"Parse input"
funcs = {}
data = {}
bss = {}
fname = ''
for line in inp:
line = line.strip()
res = funcline.match(line)
if (res):
funcs.setdefault(fname, []).append((int(res.group(1), 16), res.group(3)))
continue
res = bssline.match(line)
if (res):
start = int(res.group(1), 16)
end = int(res.group(2), 16)
if (end):
bss.setdefault(fname, []).append((start, res.group(3)))
res = dataline.match(line)
if (res):
start = int(res.group(1), 16)
end = int(res.group(2), 16)
if (end):
data.setdefault(fname, []).append((start, res.group(3)))
res = fileexp.match(line)
if (res):
fname = res.group(1)
continue
return {'text' : funcs, 'bss' : bss, 'data' : data}
def generate(kmapf, obmapf, out):
"Generate output file"
obdump = read_obdump(obmapf)
def key_sorter(x):
return x[0]
for line in kmapf:
line = line.strip()
res = startfile.match(line)
if ((res) and (res.group(3) in obdump[res.group(1)])):
offset = int(res.group(2), 16)
fname = res.group(3)
symbols = obdump[res.group(1)][fname]
symbols.sort(key = key_sorter)
for addr, symbol in symbols:
value = fname + ':' + symbol
value_bytes = value.encode('ascii')
data = struct.pack(symtabfmt, addr + offset, value_bytes[:MAXSTRING])
out.write(data)
out.write(struct.pack(symtabfmt, 0, b''))
def main():
if (len(sys.argv) != 4):
print("Usage: %s <kernel.map> <nm dump> <output.bin>" % sys.argv[0])
return 1
kmapf = open(sys.argv[1], 'r')
obmapf = open(sys.argv[2], 'r')
out = open(sys.argv[3], 'wb')
generate(kmapf, obmapf, out)
kmapf.close()
obmapf.close()
out.close()
if __name__ == '__main__':
sys.exit(main())
| [
"struct.pack",
"re.compile"
] | [((1611, 1684), 're.compile', 're.compile', (['"""([0-9a-f]+)\\\\s+[lg]\\\\s+.\\\\s+\\\\.text\\\\s+([0-9a-f]+)\\\\s+(.*)$"""'], {}), "('([0-9a-f]+)\\\\s+[lg]\\\\s+.\\\\s+\\\\.text\\\\s+([0-9a-f]+)\\\\s+(.*)$')\n", (1621, 1684), False, 'import re\n'), ((1690, 1769), 're.compile', 're.compile', (['"""([0-9a-f]+)\\\\s+[lg]\\\\s+[a-zA-Z]\\\\s+\\\\.bss\\\\s+([0-9a-f]+)\\\\s+(.*)$"""'], {}), "('([0-9a-f]+)\\\\s+[lg]\\\\s+[a-zA-Z]\\\\s+\\\\.bss\\\\s+([0-9a-f]+)\\\\s+(.*)$')\n", (1700, 1769), False, 'import re\n'), ((1776, 1861), 're.compile', 're.compile', (['"""([0-9a-f]+)\\\\s+[lg]\\\\s+[a-zA-Z]\\\\s+\\\\.data\\\\s+([0-9a-f]+)\\\\s+(.*)$"""'], {}), "('([0-9a-f]+)\\\\s+[lg]\\\\s+[a-zA-Z]\\\\s+\\\\.data\\\\s+([0-9a-f]+)\\\\s+(.*)$'\n )\n", (1786, 1861), False, 'import re\n'), ((1862, 1901), 're.compile', 're.compile', (['"""([^\\\\s]+):\\\\s+file format"""'], {}), "('([^\\\\s]+):\\\\s+file format')\n", (1872, 1901), False, 'import re\n'), ((1913, 1986), 're.compile', 're.compile', (['"""\\\\.(text|bss|data)\\\\s+(0x[0-9a-f]+)\\\\s+0x[0-9a-f]+\\\\s+(.*)$"""'], {}), "('\\\\.(text|bss|data)\\\\s+(0x[0-9a-f]+)\\\\s+0x[0-9a-f]+\\\\s+(.*)$')\n", (1923, 1986), False, 'import re\n'), ((3345, 3375), 'struct.pack', 'struct.pack', (['symtabfmt', '(0)', "b''"], {}), "(symtabfmt, 0, b'')\n", (3356, 3375), False, 'import struct\n'), ((3247, 3309), 'struct.pack', 'struct.pack', (['symtabfmt', '(addr + offset)', 'value_bytes[:MAXSTRING]'], {}), '(symtabfmt, addr + offset, value_bytes[:MAXSTRING])\n', (3258, 3309), False, 'import struct\n')] |
#!/usr/bin/env python3
import random
import time
import sys
import pygame
from pygame.locals import *
import pygame.surfarray as surfarray # for performance
import numpy as np
import colors # color definition
SCREEN_SIZE = (1600, 900) # change it to your screen size
#color definitions
# (r, g, b)
RED = (255, 0, 0)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 0, 128)
BG_COLOR = BLACK
CELL_COLOR = GREEN
def main():
"""\
Press 'a' to decrease max possible fps.
Press 'd' to increase max possible fps.
Press 's' for no max fps limit.
Press 'z' to decrease length of cell side.
Press 'x' to increase length of cell side.
Press 'p' to pause the game.
"""
side = 50 # length of cell side
width = int(SCREEN_SIZE[0] / side) # number of cells per row
height = int(SCREEN_SIZE[1] / side) # number of cellls per column
pygame.init()
pygame.mouse.set_visible(False)
SURF = pygame.display.set_mode(SCREEN_SIZE,FULLSCREEN,32);
fontObj = pygame.font.Font('freesansbold.ttf',32);
FPSCLOCK = pygame.time.Clock()
fps = 5 # max fps
maps, slices = generate_random_map(width, height, side);
pre_frame_time = time.time() # time of previous frame
paused = False
while True:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_q:
pygame.quit()
sys.exit()
if event.key == K_a and fps > 1:
fps -= 1
if event.key == K_d and fps < 60:
fps += 1
if event.key == K_p:
paused = not paused
if event.key == K_f:
maps[random.randint(1, width), random.randint(1, height)] = True
if event.key == K_k:
maps[random.randint(1, width), :] = True
if event.key == K_l:
maps[:, random.randint(1, height)] = True
if event.key == K_m:
maps[:, :] = False
if event.key == K_z and side > 5:
side -= 5
if side == 15:
side = 10
width = int(SCREEN_SIZE[0] / side);
height = int(SCREEN_SIZE[1] / side);
maps, slices = generate_random_map(width, height, side)
if event.key == K_x and side < 100:
side += 5
if side == 15:
side = 20
width = int(SCREEN_SIZE[0] / side);
height = int(SCREEN_SIZE[1] / side);
maps, slices = generate_random_map(width, height, side)
if event.key == K_s:
fps = 0
if event.key == K_r:
maps, slices = generate_random_map(width, height, side)
if event.type == QUIT:
pygame.quit()
sys.exit()
SURF.fill(BG_COLOR)
show_map(SURF, maps, side, slices)
if not paused:
maps = update(maps)
current_frame_time = time.time()
textSURF = fontObj.render('real fps: ' + str(1//(current_frame_time-pre_frame_time)), True, colors.random_color());
pre_frame_time = current_frame_time
textRect = textSURF.get_rect();
textRect.topright = (SCREEN_SIZE[0],200);
SURF.blit(textSURF,textRect);
textSURF = fontObj.render('length of side: ' + str(side), True, colors.random_color());
textRect = textSURF.get_rect();
textRect.topright = (SCREEN_SIZE[0],100);
SURF.blit(textSURF,textRect);
pygame.display.update();
FPSCLOCK.tick(fps)
def generate_random_map(width, height, side):
"""\
Generate a larger sized map than given width, height.
Define slices for quickly drawing with small length of side.
Return generated map and slices.
"""
slices = None
if side < 10:
K = side
Y, X = SCREEN_SIZE
slices = []
for y in range(0, K):
for x in range(0, K):
s = slice(y, Y, K), slice(x, X, K)
slices.append(s)
maps = np.zeros((width+2, height+2), dtype=np.bool)
for col in range(width):
n_cell = random.randint(0, height)
col_map = n_cell * [np.bool(1)]
col_map.extend([np.bool(0)] * (height-n_cell))
assert len(col_map) == height
random.shuffle(col_map)
maps[col+1,1:-1] = col_map
return (maps, slices)
def show_map(SURF, _map, side, slices=None):
"""\
Draw the map to surface SURF. If side is to small, pass in slices returned
by generate_random_map.
"""
_map = _map[1:-1, 1:-1]
if slices is not None:
bit_map = np.zeros(SCREEN_SIZE, dtype=np.bool)
for s in slices:
bit_map[s] = _map
bit_map = bit_map * SURF.map_rgb(colors.random_color())
surfarray.blit_array(SURF, bit_map)
else:
cell_surf = pygame.Surface((side,side))
for w in range(_map.shape[0]):
for h in range(_map.shape[1]):
if _map[w][h]:
cell_surf.fill(colors.random_color())
SURF.blit(cell_surf, (w * side, h * side))
def update(oldmap):
"""\
Update the status fo every cell according to arround live cells.
"""
nbrs_count = sum(np.roll(np.roll(oldmap, i, 0), j, 1)
for i in (-1, 0, 1) for j in (-1, 0, 1)
if (i != 0 or j != 0))
_newmap = (nbrs_count == 3) | (oldmap & (nbrs_count == 2))
newmap = np.zeros(oldmap.shape, dtype=np.bool)
newmap[1:-1, 1:-1] = _newmap[1:-1, 1:-1]
return newmap
if __name__ == '__main__':
main()
| [
"colors.random_color",
"sys.exit",
"pygame.init",
"random.shuffle",
"pygame.event.get",
"pygame.Surface",
"pygame.display.set_mode",
"pygame.display.update",
"pygame.quit",
"numpy.bool",
"numpy.roll",
"pygame.mouse.set_visible",
"numpy.zeros",
"pygame.surfarray.blit_array",
"pygame.time.... | [((1014, 1027), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1025, 1027), False, 'import pygame\n'), ((1032, 1063), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (1056, 1063), False, 'import pygame\n'), ((1075, 1127), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SCREEN_SIZE', 'FULLSCREEN', '(32)'], {}), '(SCREEN_SIZE, FULLSCREEN, 32)\n', (1098, 1127), False, 'import pygame\n'), ((1141, 1181), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(32)'], {}), "('freesansbold.ttf', 32)\n", (1157, 1181), False, 'import pygame\n'), ((1198, 1217), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1215, 1217), False, 'import pygame\n'), ((1359, 1370), 'time.time', 'time.time', ([], {}), '()\n', (1368, 1370), False, 'import time\n'), ((4427, 4475), 'numpy.zeros', 'np.zeros', (['(width + 2, height + 2)'], {'dtype': 'np.bool'}), '((width + 2, height + 2), dtype=np.bool)\n', (4435, 4475), True, 'import numpy as np\n'), ((5824, 5861), 'numpy.zeros', 'np.zeros', (['oldmap.shape'], {'dtype': 'np.bool'}), '(oldmap.shape, dtype=np.bool)\n', (5832, 5861), True, 'import numpy as np\n'), ((1467, 1485), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1483, 1485), False, 'import pygame\n'), ((3363, 3374), 'time.time', 'time.time', ([], {}), '()\n', (3372, 3374), False, 'import time\n'), ((3906, 3929), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3927, 3929), False, 'import pygame\n'), ((4518, 4543), 'random.randint', 'random.randint', (['(0)', 'height'], {}), '(0, height)\n', (4532, 4543), False, 'import random\n'), ((4686, 4709), 'random.shuffle', 'random.shuffle', (['col_map'], {}), '(col_map)\n', (4700, 4709), False, 'import random\n'), ((5003, 5039), 'numpy.zeros', 'np.zeros', (['SCREEN_SIZE'], {'dtype': 'np.bool'}), '(SCREEN_SIZE, dtype=np.bool)\n', (5011, 5039), True, 'import numpy as np\n'), ((5168, 5203), 'pygame.surfarray.blit_array', 'surfarray.blit_array', (['SURF', 'bit_map'], {}), '(SURF, bit_map)\n', (5188, 5203), True, 'import pygame.surfarray as surfarray\n'), ((5234, 5262), 'pygame.Surface', 'pygame.Surface', (['(side, side)'], {}), '((side, side))\n', (5248, 5262), False, 'import pygame\n'), ((3475, 3496), 'colors.random_color', 'colors.random_color', ([], {}), '()\n', (3494, 3496), False, 'import colors\n'), ((3745, 3766), 'colors.random_color', 'colors.random_color', ([], {}), '()\n', (3764, 3766), False, 'import colors\n'), ((3164, 3177), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3175, 3177), False, 'import pygame\n'), ((3194, 3204), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3202, 3204), False, 'import sys\n'), ((4572, 4582), 'numpy.bool', 'np.bool', (['(1)'], {}), '(1)\n', (4579, 4582), True, 'import numpy as np\n'), ((5137, 5158), 'colors.random_color', 'colors.random_color', ([], {}), '()\n', (5156, 5158), False, 'import colors\n'), ((5624, 5645), 'numpy.roll', 'np.roll', (['oldmap', 'i', '(0)'], {}), '(oldmap, i, 0)\n', (5631, 5645), True, 'import numpy as np\n'), ((1582, 1595), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1593, 1595), False, 'import pygame\n'), ((1616, 1626), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1624, 1626), False, 'import sys\n'), ((4608, 4618), 'numpy.bool', 'np.bool', (['(0)'], {}), '(0)\n', (4615, 4618), True, 'import numpy as np\n'), ((5410, 5431), 'colors.random_color', 'colors.random_color', ([], {}), '()\n', (5429, 5431), False, 'import colors\n'), ((1923, 1947), 'random.randint', 'random.randint', (['(1)', 'width'], {}), '(1, width)\n', (1937, 1947), False, 'import random\n'), ((1949, 1974), 'random.randint', 'random.randint', (['(1)', 'height'], {}), '(1, height)\n', (1963, 1974), False, 'import random\n'), ((2045, 2069), 'random.randint', 'random.randint', (['(1)', 'width'], {}), '(1, width)\n', (2059, 2069), False, 'import random\n'), ((2146, 2171), 'random.randint', 'random.randint', (['(1)', 'height'], {}), '(1, height)\n', (2160, 2171), False, 'import random\n')] |
import unittest #unitest module
from password import Credential #imports Credential class for testing
from password import User #imports User class for testing
class TestUser(unittest.TestCase):
'''
Test class that helps define test cases for the credentials class behaviours
Args:
Testcase class taht helps create test cases for User
'''
# test to check if user object is instatiated properly
def setUp(self):
'''
Set up method to run before each test case.
'''
self.new_profile = User('Vernice','<NAME>')
def test__init(self):
'''
Test case to test if User object is instantiated correctly
'''
self.assertEqual(self.new_profile.userName, 'Vernice')
self.assertEqual(self.new_profile.password, '<NAME>')
#end of class user test
#start of class credential test
class TestCredential(unittest.TestCase):
'''
Test class that helps define test cases for the credentials class behaviours
Args:
Testcase class that helps create test cases for Credential
'''
#test to check if credential object is instantiated properly
def setUp(self):
'''
Set up method to run before each test case.
'''
self.new_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98') #sample login details for a new pintrest account
def test__init(self):
'''
Test case to test if credential object is instantiated correctly
'''
self.assertEqual(self.new_account.accountName, 'Pintrest')
self.assertEqual(self.new_account.accountUsername,'kimperria')
self.assertEqual(self.new_account.accountPassword,'<PASSWORD>')
# save account
def test_save_account(self):
'''
Test case to check account object is saved into the contact list
'''
self.new_account.save_account()
self.assertEqual(len(Credential.credentials_list),1)
def tearDown(self):
'''
cleans up each credential list after instance
'''
Credential.credentials_list = []
# save multiple accounts
def test_save_multiple_accounts(self):
'''
Test case to check if users can save multiple accounts
'''
self.new_account.save_account()
test_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98')
test_account.save_account()
self.assertEqual(len(Credential.credentials_list),2)
#delete account
def test_delete_account(self):
'''
Test case to check if user can delete an account
'''
self.new_account.save_account()
test_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98')
test_account.save_account()
self.new_account.delete_account() #deletes account object
self.assertEqual(len(Credential.credentials_list),1)
# search account by username
def test_find_account_by_username(self):
'''
Test case to check if we can find an account by username and display information
'''
self.new_account.save_account()
test_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98')
test_account.save_account()
found_account = Credential.find_by_accountUsername('kimperria')
self.assertEqual(found_account.accountUsername,test_account.accountUsername)
# check if account exist
def test_account_exist(self):
'''
Test case to check if a user account already exist returns a boolean
'''
self.new_account.save_account()
test_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98')
test_account.save_account()
account_exists = Credential.account_exist('kimperria')
self.assertTrue(account_exists)
# display available accounts
def test_display_all_accounts(self):
'''
Method that returns a list of all saved accounts
'''
self.assertEqual(Credential.display_accounts(),Credential.credentials_list)
#class condition
if __name__ == '__main__':
unittest.main() | [
"password.Credential.display_accounts",
"password.Credential.find_by_accountUsername",
"password.Credential",
"unittest.main",
"password.Credential.account_exist",
"password.User"
] | [((4125, 4140), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4138, 4140), False, 'import unittest\n'), ((547, 572), 'password.User', 'User', (['"""Vernice"""', '"""<NAME>"""'], {}), "('Vernice', '<NAME>')\n", (551, 572), False, 'from password import User\n'), ((1287, 1336), 'password.Credential', 'Credential', (['"""Pintrest"""', '"""kimperria"""', '"""Aura-Dev98"""'], {}), "('Pintrest', 'kimperria', 'Aura-Dev98')\n", (1297, 1336), False, 'from password import Credential\n'), ((2342, 2391), 'password.Credential', 'Credential', (['"""Pintrest"""', '"""kimperria"""', '"""Aura-Dev98"""'], {}), "('Pintrest', 'kimperria', 'Aura-Dev98')\n", (2352, 2391), False, 'from password import Credential\n'), ((2693, 2742), 'password.Credential', 'Credential', (['"""Pintrest"""', '"""kimperria"""', '"""Aura-Dev98"""'], {}), "('Pintrest', 'kimperria', 'Aura-Dev98')\n", (2703, 2742), False, 'from password import Credential\n'), ((3166, 3215), 'password.Credential', 'Credential', (['"""Pintrest"""', '"""kimperria"""', '"""Aura-Dev98"""'], {}), "('Pintrest', 'kimperria', 'Aura-Dev98')\n", (3176, 3215), False, 'from password import Credential\n'), ((3277, 3324), 'password.Credential.find_by_accountUsername', 'Credential.find_by_accountUsername', (['"""kimperria"""'], {}), "('kimperria')\n", (3311, 3324), False, 'from password import Credential\n'), ((3641, 3690), 'password.Credential', 'Credential', (['"""Pintrest"""', '"""kimperria"""', '"""Aura-Dev98"""'], {}), "('Pintrest', 'kimperria', 'Aura-Dev98')\n", (3651, 3690), False, 'from password import Credential\n'), ((3753, 3790), 'password.Credential.account_exist', 'Credential.account_exist', (['"""kimperria"""'], {}), "('kimperria')\n", (3777, 3790), False, 'from password import Credential\n'), ((4016, 4045), 'password.Credential.display_accounts', 'Credential.display_accounts', ([], {}), '()\n', (4043, 4045), False, 'from password import Credential\n')] |
import enum
import sys
import os.path
TokenType = enum.Enum("TokenType", "form lemma parse morph_lemma all")
ChunkType = enum.Enum("ChunkType", "book chapter verse paragraph pericope")
chunk_data_filename = {
ChunkType.book: "books.txt",
ChunkType.chapter: "chapters.txt",
ChunkType.verse: "verses.txt",
ChunkType.paragraph: "paragraphs.txt",
ChunkType.pericope: "pericopes.txt"
}
chunk_ids = {}
chunk_data = {}
def load_chunk_data():
for chunk_type, filename in chunk_data_filename.items():
chunk_ids[chunk_type] = []
print(f'loading {filename}', file=sys.stderr)
with open(os.path.join(os.path.dirname(__file__), filename), encoding="UTF-8") as f:
for line in f:
try:
chunk_id, token_start, token_end = line.strip().split(maxsplit=2)
chunk_data[(chunk_type, chunk_id)] = (
int(token_start), int(token_end)
)
chunk_ids[chunk_type].append(chunk_id)
except:
print(line)
sys.exit()
def load_wlc():
with open(os.path.join(os.path.dirname(__file__), "tokens.txt"), 'r', encoding="UTF-8") as f:
for line in f:
yield line.replace('\n','').split('\t', maxsplit=4)
token_data = {}
def load_tokens():
for token_type in TokenType:
token_data[token_type] = []
for token_id, ref, form, lemma, parse in load_wlc():
token_data[token_type.form].append(form)
token_data[token_type.lemma].append(lemma)
token_data[token_type.morph_lemma].append((parse, lemma))
token_data[token_type.parse].append(parse)
token_data[token_type.all].append((form, lemma, parse))
load_tokens()
load_chunk_data()
def get_tokens(token_type, chunk_type=None, chunk_id=None):
if chunk_type and chunk_id:
start, end = chunk_data[(chunk_type, chunk_id)]
return token_data[token_type][start - 1:end]
elif chunk_type is None and chunk_id is None:
return token_data[token_type]
else:
raise ValueError(
"either both or neigher of chunk_type and chunk_id"
"must be provided"
)
def get_tokens_by_chunk(token_type, chunk_type):
return {
chunk_id: get_tokens(token_type, chunk_type, chunk_id)
for chunk_id in chunk_ids[chunk_type]
}
def get_chunk_ids(chunk_type):
return chunk_ids[chunk_type]
def pprint_text(items):
return ' '.join(items).replace(' ׃', '׃').replace(' ־ ', '־').replace(' /', '').replace('/', '')
def load_pericope_verse_map():
data = {}
with open(os.path.join(os.path.dirname(__file__),'pericope_verse_map.txt'), 'r', encoding="UTF-8") as f:
for line in f:
pid, start, end = line.strip().split(" ", maxsplit=2)
data[pid] = (start, end)
return data
if __name__ == "__main__":
from heb_lex_tools import HEBLEX
glosser = HEBLEX()
for token in get_tokens(TokenType.lemma, ChunkType.verse, "Gen.1.1"):
print(f"{token}: '{glosser.strongs_to_gloss(token)}'")
with open('test.txt', 'w', encoding="UTF-8") as f:
print(pprint_text(get_tokens_by_chunk(TokenType.form, ChunkType.verse)["Gen.1.1"]), file=f)
| [
"heb_lex_tools.HEBLEX",
"enum.Enum",
"sys.exit"
] | [((51, 109), 'enum.Enum', 'enum.Enum', (['"""TokenType"""', '"""form lemma parse morph_lemma all"""'], {}), "('TokenType', 'form lemma parse morph_lemma all')\n", (60, 109), False, 'import enum\n'), ((122, 185), 'enum.Enum', 'enum.Enum', (['"""ChunkType"""', '"""book chapter verse paragraph pericope"""'], {}), "('ChunkType', 'book chapter verse paragraph pericope')\n", (131, 185), False, 'import enum\n'), ((2987, 2995), 'heb_lex_tools.HEBLEX', 'HEBLEX', ([], {}), '()\n', (2993, 2995), False, 'from heb_lex_tools import HEBLEX\n'), ((1111, 1121), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1119, 1121), False, 'import sys\n')] |
# Script to pull outcome data for most recent HLA data from Refugio
import pandas as pd
import os
os.chdir("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/")
# from getHLApatients import cleanCohort, cleanOutcome, cleanCountry, getAltIDs
import helpers
import_path = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/expt_summary_data/HLA/2019-07-10_CVISB HLA 1_2 Genotype calls.xlsx"
# [Import HLA dataset] ----------------------------------------------------------------------------------
df = pd.read_excel(import_path, skiprows=140)
df.columns
df.head()
# [Clean IDs, add generic properties] -------------------------------------------------------------------
df['privatePatientID'] = df["Sample "].apply(helpers.interpretID)
# [Add in public IDs] ----------------------------------------------------------------------------------------------------
id_dict = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/output_data/patients/patients_2019-06-19_PRIVATE_dict.json"
ids = pd.read_json(id_dict)
ids.reset_index(inplace = True)
ids.rename(columns = {'index': 'privatePatientID'}, inplace = True)
# Merge on ID
df_merged = pd.merge(df, ids, how="left", indicator=True, on="privatePatientID")
df_merged._merge.value_counts()
df_merged.cohort.value_counts(dropna=False)
df_merged.outcome.value_counts(dropna=False)
df_merged[df_merged._merge == "left_only"].sort_values("privatePatientID")['privatePatientID']
df_merged.columns
df_merged.drop(["_merge", "issue", "Outcome ", "Status ", "sID", "gender", "age", "gID", "age", "alternateIdentifier", "elisa", "countryName"], axis=1).to_csv("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/expt_summary_data/HLA/2019-07-10_CVISB HLA 1_2 Genotype calls_withOutcomes.csv", index=False)
| [
"os.chdir",
"pandas.merge",
"pandas.read_json",
"pandas.read_excel"
] | [((98, 186), 'os.chdir', 'os.chdir', (['"""/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/"""'], {}), "(\n '/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/')\n", (106, 186), False, 'import os\n'), ((555, 595), 'pandas.read_excel', 'pd.read_excel', (['import_path'], {'skiprows': '(140)'}), '(import_path, skiprows=140)\n', (568, 595), True, 'import pandas as pd\n'), ((1065, 1086), 'pandas.read_json', 'pd.read_json', (['id_dict'], {}), '(id_dict)\n', (1077, 1086), True, 'import pandas as pd\n'), ((1214, 1282), 'pandas.merge', 'pd.merge', (['df', 'ids'], {'how': '"""left"""', 'indicator': '(True)', 'on': '"""privatePatientID"""'}), "(df, ids, how='left', indicator=True, on='privatePatientID')\n", (1222, 1282), True, 'import pandas as pd\n')] |
################################################################################
# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import yaml
class Config:
def __init__(self, config_file_path):
# Load config file
with open(config_file_path, "r") as stream:
self._config = yaml.load(stream, Loader=yaml.FullLoader)
# Define colors to be used internally through the app, and also externally if wanted
self.colors = {
"green": (0, 128, 0),
"white": (255, 255, 255),
"olive": (0, 128, 128),
"black": (0, 0, 0),
"navy": (128, 0, 0),
"red": (0, 0, 255),
"pink": (128, 128, 255),
"maroon": (0, 0, 128),
"grey": (128, 128, 128),
"purple": (128, 0, 128),
"yellow": (0, 255, 255),
"lime": (0, 255, 0),
"fuchsia": (255, 0, 255),
"aqua": (255, 255, 0),
"blue": (255, 0, 0),
"teal": (128, 128, 0),
"silver": (192, 192, 192),
}
def __getitem__(self, name):
return self._config[name]
| [
"yaml.load"
] | [((1478, 1519), 'yaml.load', 'yaml.load', (['stream'], {'Loader': 'yaml.FullLoader'}), '(stream, Loader=yaml.FullLoader)\n', (1487, 1519), False, 'import yaml\n')] |
#!/usr/bin/python
###############################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not #
# use this file except in compliance with the License. A copy of the License #
# is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #
# or implied. See the License for the specific language governing permis- #
# sions and limitations under the License. #
###############################################################################
import boto3
from botocore.exceptions import ClientError
from botocore.config import Config
def connect_to_s3(boto_config):
return boto3.client('s3', config=boto_config)
def create_logging_bucket(event, context):
boto_config = Config(
retries ={
'mode': 'standard'
}
)
s3 = connect_to_s3(boto_config)
try:
kwargs = {
'Bucket': event['BucketName'],
'GrantWrite': 'uri=http://acs.amazonaws.com/groups/s3/LogDelivery',
'GrantReadACP': 'uri=http://acs.amazonaws.com/groups/s3/LogDelivery'
}
if event['AWS_REGION'] != 'us-east-1':
kwargs['CreateBucketConfiguration'] = {
'LocationConstraint': event['AWS_REGION']
}
s3.create_bucket(**kwargs)
s3.put_bucket_encryption(
Bucket=event['BucketName'],
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
}
]
}
)
return {
"output": {
"Message": f'Bucket {event["BucketName"]} created'
}
}
except ClientError as error:
if error.response['Error']['Code'] != 'BucketAlreadyExists' and \
error.response['Error']['Code'] != 'BucketAlreadyOwnedByYou':
exit(str(error))
else:
return {
"output": {
"Message": f'Bucket {event["BucketName"]} already exists'
}
}
except Exception as e:
print(e)
exit(str(e))
| [
"botocore.config.Config",
"boto3.client"
] | [((1282, 1320), 'boto3.client', 'boto3.client', (['"""s3"""'], {'config': 'boto_config'}), "('s3', config=boto_config)\n", (1294, 1320), False, 'import boto3\n'), ((1383, 1419), 'botocore.config.Config', 'Config', ([], {'retries': "{'mode': 'standard'}"}), "(retries={'mode': 'standard'})\n", (1389, 1419), False, 'from botocore.config import Config\n')] |
# -*- coding: utf-8 -*-
from collections import Sequence
from datetime import datetime
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin # NOQA
from xively.models import (
Datapoint,
Datastream,
Feed,
Key,
Location,
Permission,
Resource,
Trigger,
Unit,
Waypoint,
)
class ManagerBase(object):
"""Abstract base class for all of out manager classes."""
@property
def base_url(self):
if getattr(self, '_base_url', None) is not None:
return self._base_url
parent = getattr(self, 'parent', None)
if parent is None:
return
manager = getattr(parent, '_manager', None)
if manager is None:
return
base_url = manager.url(parent.id) + '/' + self.resource
return base_url
@base_url.setter # NOQA
def base_url(self, base_url):
self._base_url = base_url
def url(self, id_or_url=None):
"""Return a url relative to the base url."""
url = self.base_url
if id_or_url:
url = urljoin(url + '/', str(id_or_url))
return url
def _parse_datetime(self, value):
"""Parse and return a datetime string from the Xively API."""
return datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%fZ")
def _prepare_params(self, params):
"""Prepare parameters to be passed in query strings to the Xively API."""
params = dict(params)
for name, value in params.items():
if isinstance(value, datetime):
params[name] = value.isoformat() + 'Z'
return params
class FeedsManager(ManagerBase):
"""Create, update and return Feed objects.
.. note:: This manager should live on a :class:`.XivelyAPIClient` instance
and not instantiated directly.
:param client: Low level :class:`.Client` instance
Usage::
>>> import xively
>>> api = xively.XivelyAPIClient("API_KEY")
>>> api.feeds.create(title="Xively Office environment")
<xively.Feed(7021)>
>>> api.feeds.get(7021)
<xively.Feed(7021)>
>>> api.feeds.update(7021, private=True)
>>> api.feeds.delete(7021)
"""
resource = 'feeds'
# List of fields that can be returned from the API but not directly set.
_readonly_fields = (
'id',
'feed',
'status',
'creator',
'created',
'updated',
'version',
'auto_feed_url',
'product_id',
'device_serial',
)
def __init__(self, client):
self.client = client
self.base_url = client.base_url + self.resource
def create(self, title, description=None, website=None, email=None,
tags=None, location=None, private=None, datastreams=None):
"""Creates a new Feed.
:param title: A descriptive name for the feed
:param description: A longer text description of the feed
:param website: The URL of a website which is relevant to this feed
e.g. home page
:param email: A public contact email address for the creator of this
feed
:param tags: Tagged metadata about the environment (characters ' " and
commas will be stripped out)
:param location: :class:`.Location` object for this feed
:param private: Whether the environment is private or not. Can be
either True or False
"""
data = {
'version': Feed.VERSION,
'title': title,
'description': description,
'website': website,
'email': email,
'tags': tags,
'location': location,
'private': private,
'datastreams': datastreams,
}
feed = self._coerce_feed(data)
response = self.client.post(self.url(), data=feed)
response.raise_for_status()
location = response.headers['location']
feed.feed = location
feed.id = _id_from_url(location)
return feed
def update(self, id_or_url, **kwargs):
"""Updates an existing feed by its id or url.
:param id_or_url: The id of a :class:`.Feed` or its URL
:param kwargs: The fields to be updated
"""
url = self.url(id_or_url)
response = self.client.put(url, data=kwargs)
response.raise_for_status()
def list(self, page=None, per_page=None, content=None, q=None, tag=None,
user=None, units=None, status=None, order=None, show_user=None,
lat=None, lon=None, distance=None, distance_units=None):
"""Returns a paged list of feeds.
Only feeds that are viewable by the authenticated account will be
returned. The following parameters can be applied to limit or refine
the returned feeds:
:param page: Integer indicating which page of results you are
requesting. Starts from 1.
:param per_page: Integer defining how many results to return per page
(1 to 1000)
:param content: String parameter ('full' or 'summary') describing
whether we want full or summary results. Full results means all
datastream values are returned, summary just returns the
environment meta data for each feed
:param q: Full text search parameter. Should return any feeds matching
this string
:param tag: Returns feeds containing datastreams tagged with the search
query
:param user: Returns feeds created by the user specified
:param units: Returns feeds containing datastreams with units specified
by the search query
:param status: Possible values ('live', 'frozen', or 'all'). Whether to
search for only live feeds, only frozen feeds, or all feeds.
Defaults to all
:param order: Order of returned feeds. Possible values ('created_at',
'retrieved_at', or 'relevance')
:param show_user: Include user login and user level for each feed.
Possible values: true, false (default)
The following additional parameters are available which allow location
based searching of feeds:
:param lat: Used to find feeds located around this latitude
:param lon: Used to find feeds located around this longitude
:param distance: search radius
:param distance_units: miles or kms (default)
"""
url = self.url()
params = {k: v for k, v in (
('page', page),
('per_page', per_page),
('content', content),
('q', q),
('tag', tag),
('user', user),
('units', units),
('status', status),
('order', order),
('show_user', show_user),
('lat', lat),
('lon', lon),
('distance', distance),
('distance_units', distance_units),
) if v is not None}
response = self.client.get(url, params=params)
response.raise_for_status()
json = response.json()
feeds = [self._coerce_feed(feed_data) for feed_data in json['results']]
return feeds
def get(self, id_or_url, datastreams=None, show_user=None, start=None,
end=None, duration=None, find_previous=None, limit=None,
interval_type=None, interval=None):
"""Fetches and returns a feed by id or url.
By default the most recent datastreams are returned. It is also
possible to filter the datastreams returned with the feed by using the
"datastreams" parameter and a list of datastream IDs.
:param datastreams: Filter the returned datastreams
:type datastreams: list of datastream IDs
:param show_user: Include user login for each feed. (default: False)
:type show_user: bool
:param start: Defines the starting point of the query
:param end: Defines the end point of the data returned
:param duration: Specifies the duration of the query
:param find_previous:
Will also return the previous value to the date range being
requested.
:param limit:
Limits the number of results to the number specified. Defaults to
100 and has a maximum of 1000.
:param interval_type:
If set to "discrete" the data will be returned in fixed time
interval format according to the inverval value supplied. If this
is not set, the raw datapoints will be returned.
:param interval:
Determines what interval of data is requested and is defined in
seconds between the datapoints. If a value is passed in which does
not match one of these values, it is rounded up to the next value.
See :meth:`~.DatapointsManager.history` for details.
"""
url = self.url(id_or_url)
if isinstance(datastreams, Sequence):
datastreams = ','.join(datastreams)
params = {k: v for k, v in (
('datastreams', datastreams),
('show_user', show_user),
('start', start),
('end', end),
('duration', duration),
('find_previous', find_previous),
('limit', limit),
('interval_type', interval_type),
('interval', interval),
) if v is not None}
params = self._prepare_params(params)
response = self.client.get(url, params=params)
response.raise_for_status()
data = response.json()
feed = self._coerce_feed(data)
return feed
def delete(self, id_or_url):
"""Delete a feed by id or url.
.. WARNING:: This is final and cannot be undone.
:param id_or_url: The feed ID or its URL
"""
url = self.url(id_or_url)
response = self.client.delete(url)
response.raise_for_status()
def _coerce_feed(self, feed_data):
"""Returns a Feed object from a mapping object (dict)."""
datastreams_data = feed_data.pop('datastreams', None)
location_data = feed_data.pop('location', None)
# Strip out the readonly fields and manually set later.
readonly = {f: feed_data.pop(f)
for f in self._readonly_fields
if f in feed_data}
feed = Feed(**feed_data)
feed._manager = self
feed.id = readonly.pop('id', None)
feed.feed = readonly.pop('feed', None) or self.url(feed.id)
# Explicitely set the readonly fields we stripped out earlier.
for name, value in readonly.items():
setattr(feed, name, value)
if datastreams_data:
feed._datastreams_manager = DatastreamsManager(feed)
feed.datastreams = self._coerce_datastreams(
datastreams_data, feed._datastreams_manager)
if location_data:
location = self._coerce_location(location_data)
else:
location = Location()
feed._data['location'] = location
return feed
def _coerce_datastreams(self, datastreams_data, datastreams_manager):
"""Returns Datastream objects from the data given."""
datastreams = []
for data in datastreams_data:
datastream = datastreams_manager._coerce_datastream(data)
datastreams.append(datastream)
return datastreams
def _coerce_location(self, instance):
"""Returns a Location object, converted from instance if required."""
if isinstance(instance, Location):
location = instance
else:
location_data = dict(**instance)
waypoints_data = location_data.pop('waypoints', None)
if waypoints_data is not None:
waypoints = self._coerce_waypoints(waypoints_data)
location_data['waypoints'] = waypoints
location = Location(**location_data)
return location
def _coerce_waypoints(self, waypoints_data):
"""Returns a list of Waypoint objects from the given waypoint data."""
waypoints = []
for data in waypoints_data:
at = self._parse_datetime(data['at'])
data = {k: v for k, v in data.items() if k != 'at'}
waypoint = Waypoint(at=at, **data)
waypoints.append(waypoint)
return waypoints
class DatastreamsManager(Sequence, ManagerBase):
"""Create, update and return Datastream objects.
Instances of this class hang off of :class:`.Feed` objects to manage
datastreams of that feed.
A list of datastreams can be retrieved along with the feed which can be
accessed via this instance as a sequence.
:param feed: A :class:`.Feed` instance.
Usage::
>>> import xively
>>> api = xively.XivelyAPIClient("API_KEY")
>>> feed = api.feeds.get(7021)
>>> list(feed.datastreams) # doctest: +IGNORE_UNICODE
[<xively.Datastream('3')>, <xively.Datastream('4')>]
>>> feed.datastreams.create("1")
<xively.Datastream('1')>
"""
resource = 'datastreams'
# List of fields that can be returned from the API but not directly set.
_readonly_fields = (
'at',
'current_value',
)
def __init__(self, feed):
self.parent = feed
feed_manager = getattr(feed, '_manager', None)
self.client = getattr(feed_manager, 'client', None)
def __contains__(self, value):
return value in self.datastreams['datastreams']
def __getitem__(self, item):
return self._datastreams[item]
def __len__(self):
return len(self._datastreams)
@property
def _datastreams(self):
return self.parent._data.setdefault('datastreams', [])
def create(self, id, current_value=None, tags=None, unit=None,
min_value=None, max_value=None, at=None):
"""Creates a new datastream on a feed.
:param id: The ID of the datastream
:param current_value: The current value of the datastream
:param tags: Tagged metadata about the datastream
:param unit: The :class:`.Unit` for this datastream
:param min_value: The minimum value since the last reset
:param max_value: The maximum value since the last reset
:param at: The timestamp of the current value
:returns: A :class:`.Datastream` object
"""
datastream_data = dict(
id=id,
current_value=current_value,
tags=tags,
unit=unit,
min_value=min_value,
max_value=max_value,
at=at)
datastream = self._coerce_datastream(datastream_data)
data = {
'version': self.parent.version,
'datastreams': [datastream],
}
response = self.client.post(self.url(), data=data)
response.raise_for_status()
return datastream
def update(self, datastream_id, **kwargs):
"""Updates a feeds datastream by id.
:param datastream_id: The ID of the datastream to update
:param kwargs: The datastream fields to be updated
"""
url = self.url(datastream_id)
response = self.client.put(url, data=kwargs)
response.raise_for_status()
def list(self, datastreams=None, show_user=None):
"""Returns a list of datastreams for the parent feed object.
:param datastreams: Filter the returned datastreams
:type datastreams: list of datastream IDs
:param show_user: Include user login for each feed (default: False)
:type show_user: bool
"""
url = self.url('..')
params = {k: v for k, v in (
('datastreams', datastreams),
('show_user', show_user),
) if v is not None}
response = self.client.get(url, params=params)
response.raise_for_status()
json = response.json()
for datastream_data in json.get('datastreams', []):
datastream = self._coerce_datastream(datastream_data)
yield datastream
def get(self, id_or_url, start=None, end=None, duration=None,
find_previous=None, limit=None, interval_type=None, interval=None):
"""Fetches and returns a feed's datastream by its id.
If start, end or duration are given, also returns Datapoints for that
period.
:param id_or_url: The ID of the datastream to retrieve or its URL
:param start: Defines the starting point of the query
:param end: Defines the end point of the data returned
:param duration: Specifies the duration of the query
:param find_previous:
Will also return the previous value to the date range being
requested.
:param limit:
Limits the number of results to the number specified. Defaults to
100 and has a maximum of 1000.
:param interval_type:
If set to "discrete" the data will be returned in fixed time
interval format according to the inverval value supplied. If this
is not set, the raw datapoints will be returned.
:param interval:
Determines what interval of data is requested and is defined in
seconds between the datapoints. If a value is passed in which does
not match one of these values, it is rounded up to the next value.
See :meth:`~.DatapointsManager.history` for details.
"""
url = self.url(id_or_url)
params = {k: v for k, v in (
('start', start),
('end', end),
('duration', duration),
('find_previous', find_previous),
('limit', limit),
('interval_type', interval_type),
('interval', interval),
) if v is not None}
params = self._prepare_params(params)
response = self.client.get(url, params=params)
response.raise_for_status()
data = response.json()
datastream = self._coerce_datastream(data)
return datastream
def delete(self, id_or_url):
"""Delete a datastream by id or url.
.. WARNING:: This is final and cannot be undone.
:param id_or_url: The datastream ID or its URL
"""
url = self.url(id_or_url)
response = self.client.delete(url)
response.raise_for_status()
def _coerce_datapoints(self, datapoints_manager, datapoints_data):
"""Returns Datapoints objects from a list of mapping objects (dict)."""
datapoints = []
for data in datapoints_data:
data['at'] = self._parse_datetime(data['at'])
datapoint = datapoints_manager._coerce_datapoint(data)
datapoints.append(datapoint)
return datapoints
def _coerce_unit(self, instance):
"""Returns a Unit object, converted from instance if required."""
if isinstance(instance, Unit):
unit = instance
else:
instance_data = dict(**instance)
unit = Unit(**instance_data)
return unit
def _coerce_datastream(self, d):
"""Returns a Datastream object from a mapping object (dict)."""
if isinstance(d, dict):
datapoints_data = d.pop('datapoints', None)
unit_data = d.pop('unit', None)
# Remove version, part of Feed not Datastream
d.pop('version', None)
# Strip out the readonly fields and manually set later.
readonly = {f: d.pop(f) for f in self._readonly_fields if f in d}
datastream = Datastream(**d)
# Explicitely set the readonly fields we stripped out earlier.
for name, value in readonly.items():
setattr(datastream, name, value)
if datapoints_data:
datapoints = self._coerce_datapoints(
datastream.datapoints, datapoints_data)
datastream.datapoints = datapoints
if unit_data:
unit = self._coerce_unit(unit_data)
datastream.unit = unit
elif isinstance(d, Datastream):
datastream = d
datastream._manager = self
return datastream
class DatapointsManager(Sequence, ManagerBase):
"""Manage datapoints of a datastream.
A list of :class:`.Datapoint` objects can be retrieved along with the
:class:`.Datastream` (or :class:`.Feed`) which can be accessed via this
instance as a sequence.
:param datastream: A :class:`.Datastream` instance.
"""
resource = 'datapoints'
def __init__(self, datastream):
self.parent = datastream
datastream_manager = getattr(datastream, '_manager', None)
self.client = getattr(datastream_manager, 'client', None)
def __contains__(self, value):
return value in self.datapoints['datapoints']
def __getitem__(self, item):
return self._datapoints[item]
def __len__(self):
return len(self._datapoints)
@property
def _datapoints(self):
return self.parent._data['datapoints']
def create(self, value, at=None):
"""Create a single new datapoint for this datastream.
:param at: The timestamp of the datapoint (default: datetime.now())
:param value: The value at this time
To create multiple datapoints at the same time do the following
instead:
.. note:: You can use ISO8601 formatted strings instead of datetime
objects when dealing with the API.
>>> import xively
>>> api = xively.XivelyAPIClient("API_KEY")
>>> feed = api.feeds.get(7021)
>>> datastream = feed.datastreams[0]
>>> # First create the datapoints.
>>> datastream.datapoints = [
... xively.Datapoint(at="2010-05-20T11:01:43Z", value=294),
... xively.Datapoint(at="2010-05-20T11:01:44Z", value=295),
... xively.Datapoint(at="2010-05-20T11:01:45Z", value=296),
... xively.Datapoint(at="2010-05-20T11:01:46Z", value=297),
... ]
>>> # Then send them to the server.
>>> datastream.update(fields='datapoints')
"""
at = at or datetime.now()
datapoint = Datapoint(at, value)
payload = {'datapoints': [datapoint]}
response = self.client.post(self.url(), data=payload)
response.raise_for_status()
return datapoint
def update(self, at, value):
"""Update the value of a datapiont at a given timestamp.
:param at: The timestamp of a value to change
:param value: The value to change
.. note:: A datapoint at the given time must already exist.
"""
url = "{}/{}Z".format(self.url(), at.isoformat())
payload = {'value': value}
response = self.client.put(url, data=payload)
response.raise_for_status()
def get(self, at):
"""Fetch and return a :class:`.Datapoint` at the given timestamp.
:param at: The timestamp to return a datapoint for
"""
url = "{}/{}Z".format(self.url(), at.isoformat())
response = self.client.get(url)
response.raise_for_status()
data = response.json()
data['at'] = self._parse_datetime(data['at'])
return self._coerce_datapoint(data)
def history(self, start=None, end=None, duration=None, find_previous=None,
limit=None, interval_type=None, interval=None):
"""Fetch and return a list of datapoints in a given timerange.
:param start: Defines the starting point of the query
:param end: Defines the end point of the data returned
:param duration: Specifies the duration of the query
:param find_previous:
Will also return the previous value to the date range being
requested.
:param limit:
Limits the number of results to the number specified. Defaults to
100 and has a maximum of 1000.
:param interval_type:
If set to "discrete" the data will be returned in fixed time
interval format according to the inverval value supplied. If this
is not set, the raw datapoints will be returned.
:param interval:
Determines what interval of data is requested and is defined in
seconds between the datapoints. If a value is passed in which does
not match one of these values, it is rounded up to the next value.
.. note::
``find_previous`` is useful for any graphing because if you
want to draw a graph of the date range you specified you would end
up with a small gap until the first value.
.. note::
In order to paginate through the data use the last timestamp
returned as the start of the next query.
.. note::
The maximum number of datapoints able to be returned from the API
in one query is 1000. If you need more than 1000 datapoints for
a specific period you should use the start and end times to split
them up into smaller chunks.
The valid time units are::
* seconds
* minute(s)
* hour(s)
* day(s)
* week(s)
* month(s)
* year(s)
The acceptable intervals are currently:
===== ============================== ==========================
Value Description Maximum range in one query
===== ============================== ==========================
0 Every datapoint stored 6 hours
30 One datapoint every 30 seconds 12 hours
60 One datapoint every minute 24 hours
300 One datapoint every 5 minutes 5 days
900 One datapoint every 15 minutes 14 days
1800 One datapoint per 30 minutes 31 days
3600 One datapoint per hour 31 days
10800 One datapoint per three hours 90 days
21600 One datapoint per six hours 180 days
43200 One datapoint per twelve hours 1 year
86400 One datapoint per day 1 year
===== ============================== ==========================
"""
url = self.url('..').rstrip('/')
params = {k: v for k, v in (
('start', start),
('end', end),
('duration', duration),
('find_previous', find_previous),
('limit', limit),
('interval_type', interval_type),
('interval', interval),
) if v is not None}
params = self._prepare_params(params)
response = self.client.get(url, params=params)
response.raise_for_status()
data = response.json()
for datapoint_data in data.get('datapoints', []):
datapoint_data['at'] = self._parse_datetime(datapoint_data['at'])
yield self._coerce_datapoint(datapoint_data)
def delete(self, at=None, start=None, end=None, duration=None):
"""Delete a datapoint or a range of datapoints.
:param at: A timestamp of a single datapoint to delete
:param start: Defines the starting point of the query
:param end: Defines the end point of the datapoints deleted
:param duration: Specifies the duration of the query
By providing a start and end timestamp as query parameters, you may
remove all datapoints that lie between those dates. If you send your
request with only a start timestamp, all datapoints after the value
will be removed. Providing an end timestamp will remove all datapoints
prior to the supplied value.
Additionally, this method supports a duration parameter (e.g.
``duration="3hours"``). Providing a `start` and a `duration` will
delete all datapoints between the `start` and (`start` + `duration`).
Providing `end` will delete all datapoints between (`end` - `duration`)
and `end`. The formatting of the `duration` parameter is the same as is
used in the :meth:`.history` method.
.. warning: This is final and cannot be undone.
"""
url = self.url()
params = {k: v for k, v in (
('start', start),
('end', end),
('duration', duration),
) if v is not None}
if at:
url = "{}/{}Z".format(url, at.isoformat())
elif params:
params = self._prepare_params(params)
response = self.client.delete(url, params=params)
response.raise_for_status()
def _coerce_datapoint(self, d):
if isinstance(d, Datapoint):
datapoint = self._clone_datapoint(d)
elif isinstance(d, dict):
datapoint = Datapoint(**d)
datapoint._manager = self
return datapoint
def _clone_datapoint(self, d):
return Datapoint(**d._data)
class TriggersManager(ManagerBase):
"""Manage :class:`.Trigger`.
This manager should live on a :class:`.XivelyAPIClient` instance and not
instantiated directly.
:param client: Low level :class:`.Client` instance
Usage::
>>> import xively
>>> api = xively.XivelyAPIClient("API_KEY")
>>> api.triggers.create(
... environment_id=8470, stream_id="0",
... url="http://www.postbin.org/1ijyltn",
... trigger_type='lt', threshold_value="15.0")
<xively.Trigger(3)>
"""
resource = 'triggers'
_readonly_fields = (
'id',
'notified_at',
'user',
)
def __init__(self, client):
self.client = client
self.base_url = client.base_url + self.resource
def create(self, environment_id, stream_id, url, trigger_type,
threshold_value=None):
"""Create a new :class:`.Trigger`.
:param environment_id: An ID of a :class:`.Feed`
:param stream_id: An ID of a :class:`.Datastream`
:param url: The URL to POST events to
:param trigger_type: The type of trigger (from below)
:param threshold_value: The threshold at which the trigger fires
:returns: A new :class:`.Trigger` object.
Possible values for ``trigger_type`` are:
======= ================================
gt greater than
gte greater than or equal to
lt less than
lte less than or equal to
eq equal to
change any change
frozen no updates for 15 minutes
live updated again after being frozen
======= ================================
"""
data = {
'environment_id': environment_id,
'stream_id': stream_id,
'url': url,
'trigger_type': trigger_type,
'threshold_value': threshold_value,
}
trigger = self._coerce_trigger(data)
response = self.client.post(self.url(), data=trigger)
response.raise_for_status()
trigger._manager = self
location = response.headers['location']
trigger._data['id'] = int(location.rsplit('/', 1)[1])
return trigger
def get(self, id_or_url):
"""Fetch and return an existing trigger.
:param id_or_url: The ID of the trigger or its URL
"""
url = self.url(id_or_url)
response = self.client.get(url)
response.raise_for_status()
data = response.json()
data.pop('id')
notified_at = data.pop('notified_at', None)
user = data.pop('user', None)
trigger = self._coerce_trigger(data)
trigger._data['id'] = id_or_url
if notified_at:
trigger._data['notified_at'] = self._parse_datetime(notified_at)
if user:
trigger._data['user'] = user
trigger._manager = self
return trigger
def update(self, id_or_url, **kwargs):
"""Update an existing trigger.
:param id_or_url: The ID of the :class:`.Trigger` to update or its URL
:param kwargs: The fields to be updated
"""
url = self.url(id_or_url)
response = self.client.put(url, data=kwargs)
response.raise_for_status()
def list(self, feed_id=None):
"""Return a list of triggers.
:param feed_id: Filter the returned triggers to only include those on
datastreams of the specified feed.
"""
url = self.url()
params = {k: v for k, v in (
('feed_id', feed_id),
) if v is not None}
response = self.client.get(url, params=params)
response.raise_for_status()
json = response.json()
for data in json:
trigger = self._coerce_trigger(data)
trigger._manager = self
yield trigger
def delete(self, id_or_url):
"""Delete a trigger by id or url.
.. WARNING:: This is final and cannot be undone.
:param id_or_url: The datastream ID or its URL
"""
url = self.url(id_or_url)
response = self.client.delete(url)
response.raise_for_status()
def _coerce_trigger(self, d):
# Strip out the readonly fields and manually set later.
readonly = {f: d.pop(f) for f in self._readonly_fields if f in d}
trigger = Trigger(**d)
# Explicitely set the readonly fields we stripped out earlier.
for name, value in readonly.items():
setattr(trigger, name, value)
return trigger
class KeysManager(ManagerBase):
"""Manage keys their permissions and restrict by resource.
This manager should live on a :class:`.XivelyAPIClient` instance and not
instantiated directly.
:param client: Low level :class:`.Client` instance
Usage::
>>> import xively
>>> api = xively.XivelyAPIClient("API_KEY")
>>> api.keys.create(
... label="sharing key",
... private_access=True,
... permissions=[
... xively.Permission(
... access_methods=["put"],
... source_ip="172.16.17.32",
... resources=[
... xively.Resource(feed_id=504),
... ]),
... xively.Permission(access_methods=["get"])
... ])
<xively.Key('sharing key')>
"""
resource = 'keys'
def __init__(self, client):
self.client = client
self.base_url = client.base_url + self.resource
def create(self, label, permissions, expires_at=None, private_access=None):
"""Create a new API key.
:param label: A label by which the key can be referenced
:param permissions: Collection of Permission objects controlling the
access level
:param expires_at: Expiry date for the key after which it won't work
:param private_access: Flag that indicates whether this key can access
private resources belonging to the user
"""
data = dict(label=label, permissions=permissions,
expires_at=expires_at, private_access=private_access)
key = self._coerce_key(data)
response = self.client.post(self.url(), data={'key': key})
response.raise_for_status()
location = response.headers['Location']
key.api_key = _id_from_url(location)
return key
def list(self, feed_id=None):
"""List all API keys for this account or for the given feed.
:param feed_id: Returns api keys limited to that feed and its
datastreams.
"""
url = self.url()
params = {}
if feed_id is not None:
params['feed_id'] = feed_id
response = self.client.get(url, params=params)
response.raise_for_status()
json = response.json()
for data in json['keys']:
key = self._coerce_key(data)
yield key
def get(self, key_id):
"""Fetch and return an API key by its id.
:param key_id: The ID of the key to get.
.. note: Unless a master API key is used, the only key that can be read
is this key. A master key is a non-resource restricted,
private key, which has permissions to perform all HTTP
methods.
"""
url = self.url(key_id)
response = self.client.get(url)
response.raise_for_status()
data = response.json()
key = self._coerce_key(data['key'])
return key
def delete(self, key_id):
"""Delete the specified key.
:param key_id: The key ID
.. note: You must use a master key to delete an API Key. A master key
is a non-resource restricted, private key, which has
permissions to perform all HTTP methods.
"""
url = self.url(key_id)
response = self.client.delete(url)
response.raise_for_status()
def _coerce_key(self, data):
api_key = data.get('api_key')
permissions_data = data.get('permissions', [])
data = {k: v for (k, v) in data.items() if k != 'api_key'}
permissions = []
for permission_data in permissions_data:
permission = self._coerce_permission(permission_data)
permissions.append(permission)
data['permissions'] = permissions
key = Key(**data)
key._data['api_key'] = api_key
key._manager = self
return key
def _coerce_permission(self, data):
if isinstance(data, Permission):
return data
resources_data = data.get('resources')
data = {k: v for (k, v) in data.items() if k != 'resources'}
permission = Permission(**data)
if resources_data:
resources = []
for resource_data in resources_data:
resource = self._coerce_resource(resource_data)
resources.append(resource)
permission._data['resources'] = resources
return permission
def _coerce_resource(self, data):
resource = Resource(**data)
return resource
def _id_from_url(url):
"""Return the last part or a url
>>> _id_from_url('http://api.xively.com/v2/feeds/1234')
'1234'
"""
id = url.rsplit('/', 1)[1]
return id
| [
"xively.models.Feed",
"xively.models.Waypoint",
"xively.models.Resource",
"datetime.datetime.strptime",
"xively.models.Datapoint",
"xively.models.Key",
"xively.models.Trigger",
"xively.models.Unit",
"xively.models.Datastream",
"datetime.datetime.now",
"xively.models.Location",
"xively.models.P... | [((1288, 1337), 'datetime.datetime.strptime', 'datetime.strptime', (['value', '"""%Y-%m-%dT%H:%M:%S.%fZ"""'], {}), "(value, '%Y-%m-%dT%H:%M:%S.%fZ')\n", (1305, 1337), False, 'from datetime import datetime\n'), ((10483, 10500), 'xively.models.Feed', 'Feed', ([], {}), '(**feed_data)\n', (10487, 10500), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((22438, 22458), 'xively.models.Datapoint', 'Datapoint', (['at', 'value'], {}), '(at, value)\n', (22447, 22458), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((29123, 29143), 'xively.models.Datapoint', 'Datapoint', ([], {}), '(**d._data)\n', (29132, 29143), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((33568, 33580), 'xively.models.Trigger', 'Trigger', ([], {}), '(**d)\n', (33575, 33580), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((37714, 37725), 'xively.models.Key', 'Key', ([], {}), '(**data)\n', (37717, 37725), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((38055, 38073), 'xively.models.Permission', 'Permission', ([], {}), '(**data)\n', (38065, 38073), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((38422, 38438), 'xively.models.Resource', 'Resource', ([], {}), '(**data)\n', (38430, 38438), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((11131, 11141), 'xively.models.Location', 'Location', ([], {}), '()\n', (11139, 11141), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((12053, 12078), 'xively.models.Location', 'Location', ([], {}), '(**location_data)\n', (12061, 12078), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((12428, 12451), 'xively.models.Waypoint', 'Waypoint', ([], {'at': 'at'}), '(at=at, **data)\n', (12436, 12451), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((19227, 19248), 'xively.models.Unit', 'Unit', ([], {}), '(**instance_data)\n', (19231, 19248), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((19775, 19790), 'xively.models.Datastream', 'Datastream', ([], {}), '(**d)\n', (19785, 19790), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n'), ((22403, 22417), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22415, 22417), False, 'from datetime import datetime\n'), ((28998, 29012), 'xively.models.Datapoint', 'Datapoint', ([], {}), '(**d)\n', (29007, 29012), False, 'from xively.models import Datapoint, Datastream, Feed, Key, Location, Permission, Resource, Trigger, Unit, Waypoint\n')] |
from django.shortcuts import render
from django.views.generic import CreateView
from django.urls import reverse_lazy
from dal import autocomplete
# from .models import Country, Person
# from .forms import PersonForm
from .models import Country
from .forms import CountryForm
# Create your views here.
# class PersonCreateView(CreateView):
# model = Person
# form_class = PersonForm
# template_name = 'person_form.html'
# view_name = 'create-person'
# success_url = reverse_lazy(view_name)
# Create your views here.
class CountryCreateView(CreateView):
model = Country
form_class = CountryForm
template_name = 'person_form.html'
view_name = 'create-country'
success_url = reverse_lazy(view_name)
class CountryAutocompleteView(autocomplete.Select2QuerySetView):
def get_queryset(self):
# model = Country
# paginate_by = 50
# ordering = ['name']
# if self.request.user.is_authenticated:
# return Country.objects.none()
qs = Country.objects.all()
# country = self.forwarded.get('country', None)
if self.q:
qs = qs.filter(name__icontains=self.q)
# qs = qs.filter(name__icontains=self.q)
return qs
def get_create_option(self, context, q):
"""Form the correct create_option to append to results."""
create_option = []
display_create_option = False
if self.create_field and q:
page_obj = context.get('page_obj', None)
if page_obj is None or page_obj.number == 1:
display_create_option = True
# Don't offer to create a new option if a
# case-insensitive) identical one already exists
existing_options = (self.get_result_label(result).lower()
for result in context['object_list'])
if q.lower() in existing_options:
display_create_option = False
print("RANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN")
if display_create_option and self.has_add_permission(self.request):
create_option = [{
'id': q,
'text': ('"%(new_value)s"') % {'new_value': q},
'create_id': True,
}]
print("create_optionNNNNNNNN :", create_option)
return create_option
def has_add_permission(self, request):
print("ORRRRRRRRRRRRRRRRRRRRRRRR")
return True | [
"django.urls.reverse_lazy"
] | [((714, 737), 'django.urls.reverse_lazy', 'reverse_lazy', (['view_name'], {}), '(view_name)\n', (726, 737), False, 'from django.urls import reverse_lazy\n')] |
#! /usr/bin/python3
from library_traverser import traverse_module, MemberVisitor, MemberInfoExtractor
import re
import inspect
import pymongo
import flask
import pkgutil
import importlib
sub_modules = [m for m in pkgutil.iter_modules(flask.__path__) if m[2]]
for m in sub_modules:
importlib.import_module("flask.%s" % m[1], m)
# From tensorflow source
do_not_descend_map = {
}
prefix_black_list = {
".".join([prefix, name])
for prefix in do_not_descend_map
for name in do_not_descend_map[prefix]
}
class FlaskMemberInfoExtractor(MemberInfoExtractor):
_args_doc_regex = re.compile(
r"((\n:param (\w+): ([\S ]+(\n\ {16}[\S ]+)*))+)")
_arg_item_doc_regex = re.compile(
r":param (\w+): ([\S ]+(\n\ {16}[\S ]+)*)")
# _returns_doc_regex = re.compile(r"(Returns:\n)((\ {2}[\S\ ]+\n)+)")
# _raises_doc_regex = re.compile(r"(Raises:\n)((\ {2}[\S\ ]+\n)+)")
def extract_args_doc(self, doc):
arg_doc_match = next(self._args_doc_regex.finditer(doc or ""), None)
if not arg_doc_match:
return {}
arg_doc = arg_doc_match.group(1)
return {
match.group(1): match.group(2)
for match in self._arg_item_doc_regex.finditer(arg_doc)
}
def extract_returns_doc(self, doc):
return None
# match = next(self._returns_doc_regex.finditer(doc or ""), None)
# return match.group(2) if match else None
def extract_raise_doc(self, doc):
return None
# match = next(self._raises_doc_regex.finditer(doc or ""), None)
# return match.group(2) if match else None
def is_deprecated(self, name, member):
doc = inspect.getdoc(member)
return False if not doc else "DEPRECATED" in doc
mongn_client = pymongo.MongoClient(host="172.17.0.2")
db = mongn_client.get_database("DeepLearningAPIEvoluation")
collection = db.get_collection("Flask_APIs_%s" % flask.__version__)
collection.drop()
def insert_db(data):
collection.insert(data,check_keys=False)
extractor = FlaskMemberInfoExtractor()
visitor = MemberVisitor(insert_db, inspect, extractor)
traverse_module(("flask", flask), visitor, "flask", prefix_black_list)
mongn_client.close()
| [
"inspect.getdoc",
"importlib.import_module",
"re.compile",
"library_traverser.MemberVisitor",
"library_traverser.traverse_module",
"pymongo.MongoClient",
"pkgutil.iter_modules"
] | [((1774, 1812), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {'host': '"""172.17.0.2"""'}), "(host='172.17.0.2')\n", (1793, 1812), False, 'import pymongo\n'), ((2078, 2122), 'library_traverser.MemberVisitor', 'MemberVisitor', (['insert_db', 'inspect', 'extractor'], {}), '(insert_db, inspect, extractor)\n', (2091, 2122), False, 'from library_traverser import traverse_module, MemberVisitor, MemberInfoExtractor\n'), ((2124, 2194), 'library_traverser.traverse_module', 'traverse_module', (["('flask', flask)", 'visitor', '"""flask"""', 'prefix_black_list'], {}), "(('flask', flask), visitor, 'flask', prefix_black_list)\n", (2139, 2194), False, 'from library_traverser import traverse_module, MemberVisitor, MemberInfoExtractor\n'), ((289, 334), 'importlib.import_module', 'importlib.import_module', (["('flask.%s' % m[1])", 'm'], {}), "('flask.%s' % m[1], m)\n", (312, 334), False, 'import importlib\n'), ((597, 663), 're.compile', 're.compile', (['"""((\\\\n:param (\\\\w+): ([\\\\S ]+(\\\\n\\\\ {16}[\\\\S ]+)*))+)"""'], {}), "('((\\\\n:param (\\\\w+): ([\\\\S ]+(\\\\n\\\\ {16}[\\\\S ]+)*))+)')\n", (607, 663), False, 'import re\n'), ((694, 752), 're.compile', 're.compile', (['""":param (\\\\w+): ([\\\\S ]+(\\\\n\\\\ {16}[\\\\S ]+)*)"""'], {}), "(':param (\\\\w+): ([\\\\S ]+(\\\\n\\\\ {16}[\\\\S ]+)*)')\n", (704, 752), False, 'import re\n'), ((216, 252), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['flask.__path__'], {}), '(flask.__path__)\n', (236, 252), False, 'import pkgutil\n'), ((1678, 1700), 'inspect.getdoc', 'inspect.getdoc', (['member'], {}), '(member)\n', (1692, 1700), False, 'import inspect\n')] |
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import dragon as dg
from dragon.vm.torch.tensor import *
from dragon.vm.torch.c_api import device as _Device
def UnifyDevices(tensors, key='Inputs'):
types, indices = [t.device.type for t in tensors], [0]
if len(set(types)) != 1:
raise ValueError('{} from different device type: [{}].'
.format(key, ', '.join(types)))
if types[0] == 'cuda':
indices = [t.device.index for t in tensors]
if len(set(indices)) != 1:
raise ValueError('{} from different cuda device: [{}].'
.format(key, ', '.join([str(d) for d in indices])))
return _Device(types[0], indices[0])
def MakeDevice(inputs=(), outputs=()):
# Case #1: [], [] -> CPU
# Case #2: [...], [] -> Refer Inputs
# Case #3: [], [...] -> Refer Outputs
# Case #4: [...], [...] -> Refer Outputs
if len(outputs) > 0: return UnifyDevices(outputs, 'Outputs')
if len(inputs) > 0: return UnifyDevices(inputs, 'Inputs')
return _Device()
def WrapScalar(scalar, dtype, device):
# We use (DType + Value) to hash different scalars
# Setting a Tensor with same DType and shape will not deconstruct it
if 'float' in dtype: scalar = float(scalar)
if 'int' in dtype: scalar = int(scalar)
name = '/share/scalar/{}/{}'.format(dtype, str(scalar))
if not dg.workspace.HasTensor(name):
dg.workspace.FeedTensor(name, np.array(scalar, dtype=dtype))
t = Tensor(name=name, dtype=dtype, device=device, own_storage=False)
t.requires_grad = False
return t | [
"dragon.workspace.HasTensor",
"numpy.array",
"dragon.vm.torch.c_api.device"
] | [((1121, 1150), 'dragon.vm.torch.c_api.device', '_Device', (['types[0]', 'indices[0]'], {}), '(types[0], indices[0])\n', (1128, 1150), True, 'from dragon.vm.torch.c_api import device as _Device\n'), ((1487, 1496), 'dragon.vm.torch.c_api.device', '_Device', ([], {}), '()\n', (1494, 1496), True, 'from dragon.vm.torch.c_api import device as _Device\n'), ((1829, 1857), 'dragon.workspace.HasTensor', 'dg.workspace.HasTensor', (['name'], {}), '(name)\n', (1851, 1857), True, 'import dragon as dg\n'), ((1897, 1926), 'numpy.array', 'np.array', (['scalar'], {'dtype': 'dtype'}), '(scalar, dtype=dtype)\n', (1905, 1926), True, 'import numpy as np\n')] |
from Scripts.Resources import Bot, timer
from Scripts.Classes import Markup, button, User
from Scripts.Additional import all_groups, yes_no_markup, get_info
from Scripts.MainMenu import send_student_menu
from DataBase.DataBaseWork import get_query
bot = Bot()
def confirm_user(update, user, params):
answer, student_id = params
student = User(student_id)
if answer == 'no':
student.action = 'ban'
bot.delete_message(user.id, update.message_id, 'Отказано')
else:
if answer == 'headman':
get_query('UPDATE groups SET headman_id = ? WHERE id = ?;', values=(student_id, student.group_id))
all_groups[student.group_id] = *all_groups[student.group_id][:-1], int(student_id)
student.action = None
send_student_menu(student)
bot.delete_message(user.id, update.message_id, 'Принят')
student.update()
def add_group_id(update, user, params):
bot.answer_callback_query(update.query_id, 'Запрос на добавление в группу послан', True)
bot.edit_message(user, update.message_id, 'Ожидай подтверждения ⏳')
_, group_id, send_id, need_new_admin = params
user.group_id = int(group_id)
user.action = 'wait'
user.update()
markup = yes_no_markup('reg', (user.id, ))
if need_new_admin == 'True':
markup.row([button('Сделать старостой', 'ans=reg=headman=%d' % user.id)])
text = '<pre>Подтвердите добавление </pre>\n%s %s\nВ группу <b>%s</b>' % (get_info(bot, user.id), user.name,
all_groups[user.group_id][1])
bot.send_message(User(send_id), text, markup, clear_markup=False)
def first_message(user):
bot.send_message(user, 'Пришли свою фамилию')
user.action = 'registration'
user.update()
def choose_group(user):
markup = Markup()
for group_id, name, teacher_id, headman_id in all_groups.values():
markup.add([button(name, 'g_id=%d=%d=%s' % (group_id, headman_id or teacher_id, headman_id is None))])
bot.send_message(user, 'Выбери свою группу', markup)
def registration(update, user):
if not user.name:
if not update.text:
bot.send_message(user, 'Мне нужна твоя фамилия...')
else:
user.name = str(update.text)[:15].capitalize()
if user.update() is Exception:
bot.send_message(user, 'Эта фамилия уже занята. Отправь мне свою')
user.name = None
else:
choose_group(user)
else:
choose_group(user)
| [
"Scripts.Additional.yes_no_markup",
"Scripts.Additional.all_groups.values",
"Scripts.Classes.Markup",
"DataBase.DataBaseWork.get_query",
"Scripts.Additional.get_info",
"Scripts.Classes.button",
"Scripts.Resources.Bot",
"Scripts.Classes.User",
"Scripts.MainMenu.send_student_menu"
] | [((255, 260), 'Scripts.Resources.Bot', 'Bot', ([], {}), '()\n', (258, 260), False, 'from Scripts.Resources import Bot, timer\n'), ((349, 365), 'Scripts.Classes.User', 'User', (['student_id'], {}), '(student_id)\n', (353, 365), False, 'from Scripts.Classes import Markup, button, User\n'), ((1239, 1271), 'Scripts.Additional.yes_no_markup', 'yes_no_markup', (['"""reg"""', '(user.id,)'], {}), "('reg', (user.id,))\n", (1252, 1271), False, 'from Scripts.Additional import all_groups, yes_no_markup, get_info\n'), ((1846, 1854), 'Scripts.Classes.Markup', 'Markup', ([], {}), '()\n', (1852, 1854), False, 'from Scripts.Classes import Markup, button, User\n'), ((1906, 1925), 'Scripts.Additional.all_groups.values', 'all_groups.values', ([], {}), '()\n', (1923, 1925), False, 'from Scripts.Additional import all_groups, yes_no_markup, get_info\n'), ((776, 802), 'Scripts.MainMenu.send_student_menu', 'send_student_menu', (['student'], {}), '(student)\n', (793, 802), False, 'from Scripts.MainMenu import send_student_menu\n'), ((1630, 1643), 'Scripts.Classes.User', 'User', (['send_id'], {}), '(send_id)\n', (1634, 1643), False, 'from Scripts.Classes import Markup, button, User\n'), ((543, 646), 'DataBase.DataBaseWork.get_query', 'get_query', (['"""UPDATE groups SET headman_id = ? WHERE id = ?;"""'], {'values': '(student_id, student.group_id)'}), "('UPDATE groups SET headman_id = ? WHERE id = ?;', values=(\n student_id, student.group_id))\n", (552, 646), False, 'from DataBase.DataBaseWork import get_query\n'), ((1494, 1516), 'Scripts.Additional.get_info', 'get_info', (['bot', 'user.id'], {}), '(bot, user.id)\n', (1502, 1516), False, 'from Scripts.Additional import all_groups, yes_no_markup, get_info\n'), ((1326, 1385), 'Scripts.Classes.button', 'button', (['"""Сделать старостой"""', "('ans=reg=headman=%d' % user.id)"], {}), "('Сделать старостой', 'ans=reg=headman=%d' % user.id)\n", (1332, 1385), False, 'from Scripts.Classes import Markup, button, User\n'), ((1947, 2040), 'Scripts.Classes.button', 'button', (['name', "('g_id=%d=%d=%s' % (group_id, headman_id or teacher_id, headman_id is None))"], {}), "(name, 'g_id=%d=%d=%s' % (group_id, headman_id or teacher_id, \n headman_id is None))\n", (1953, 2040), False, 'from Scripts.Classes import Markup, button, User\n')] |
from google.cloud import storage
# Explicitly use service account credentials by specifying the private key
# file.
storage_client = storage.Client.from_service_account_json(
'credentials.json')
# Make an authenticated API request
buckets = list(storage_client.list_buckets())
print(buckets) | [
"google.cloud.storage.Client.from_service_account_json"
] | [((134, 194), 'google.cloud.storage.Client.from_service_account_json', 'storage.Client.from_service_account_json', (['"""credentials.json"""'], {}), "('credentials.json')\n", (174, 194), False, 'from google.cloud import storage\n')] |