code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/python
#https://github.com/mitsuhiko/flask/tree/master/examples/flaskr
from flask import Flask, render_template, session, redirect, url_for, \
request, flash
from utils.logger import Logger
from query.query_parser import QueryParser
from db.mongodb import MongoDB
from pager import Pager
import pymongo, ConfigParser
from datetime import datetime
#from werkzeug.security import check_password_hash, generate_password_hash
app = Flask(__name__)
logger = Logger().getLogger("web.events")
config = ConfigParser.ConfigParser()
parser = QueryParser()
db = MongoDB('esm')
page_size = 25
quest_dict = {}
app.secret_key = '^\<KEY>'
def get_users(user_name):
cursor = db.get_user(user_name)
users = {}
for u in cursor:
users[u['username']] = u['password']
return users
@app.route('/')
def index():
if not 'user_id' in session:
return redirect('/login')
else:
return redirect('/event')
@app.route('/login', methods=['POST', 'GET'])
def login():
error = None
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
users = get_users(username)
if not users.has_key(username) :
error = 'Invalid username'
elif password != users[username]:
logger.debug('pwd is ' + password)
error = 'Invalid password'
else:
session['user_id'] = username
flash('You were logged in')
# return redirect(url_for('event'))
return redirect('/event')
# the code below this is executed if the request method
# was GET or the credentials were invalid
return render_template('login.tpl', error=error)
@app.route('/logout')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
return redirect(url_for('login'))
@app.route('/help')
def help_controller():
return render_template('help.tpl')
@app.route('/event', methods=['GET'])
def events_default_controller(page_num=1):
if not 'user_id' in session:
return redirect('/login')
else:
if 'event_query' in session:
session['event_query'] = None
events = get_events(page_size, 1, {})
pager = Pager(page_size, count_events({}), 1)
return render_template('events.tpl', events = events, pager = pager)
def count_events(query):
return db.count("events", query)
def get_events(size, page, query):
event_cursor = db.find('events',size, page, 'create_time',pymongo.DESCENDING, query)
events = []
i = size * (page - 1) + 1
for e in event_cursor:
event = {}
event['index'] = i
event['time'] = e.get('create_time').strftime("%Y-%m-%dT%H:%M:%SZ")
event['domain'] = e.get('domain')
event['ip'] = e.get('ip')
event['size'] = e.get('size')
events.append(event)
i += 1
return events
@app.route('/event/<int:page>', methods=['GET'])
def events_page_controller(page=1):
'''Controller to initialize the quest Form'''
if not 'user_id' in session:
return redirect('/login')
else:
if 'event_query' in session:
query_string = session['event_query']
if query_string is not None and len(query_string) > 0:
query = parser.parse(query_string)
logger.debug('query_string=' + query_string)
else:
session['event_query'] = None
query = {}
else:
query = {}
events = get_events(page_size, page, query)
pager = Pager(page_size, count_events(query), page)
return render_template('events.tpl', events = events, pager = pager)
@app.route('/event', methods=['POST'])
def events_form_controller():
if not 'user_id' in session:
return redirect('/login')
else:
query_string = request.form['event_query'].strip()
if query_string is not None and len(query_string) > 0:
session['event_query'] = query_string
query = parser.parse(query_string)
logger.debug('query_string=' + query_string)
else:
query = {}
events = get_events(page_size, 1, query)
pager = Pager(page_size, count_events(query), 1)
return render_template('events.tpl', events = events, pager = pager)
@app.route('/stat', methods=['GET'])
def stat_form_controller():
'''Controller to initialize the statistics Form'''
if not 'user_id' in session:
return redirect('/login')
else:
#default statics is current day's statistics
current_date = datetime.now().strftime("%Y-%m-%d")
stat_quest = 'time in ' + current_date
db_query = parser.parse(stat_quest)
logger.debug(db_query)
stats = get_stats(db_query)
logger.debug(stats)
return render_template('stat.tpl', stats = stats, time = current_date)
@app.route('/monthly', methods=['GET'])
def stat_monthly_controller():
'''Controller to initialize the statistics Form'''
if not 'user_id' in session:
return redirect('/login')
else:
#default statics is current day's statistics
current_month = datetime.now().strftime("%Y-%m")
stat_quest = 'time in ' + current_month
db_query = parser.parse(stat_quest)
logger.debug(db_query)
stats = get_stats(db_query)
# logger.debug(stats)
# stats = []
return render_template('stat.tpl', stats = stats, time = current_month)
def get_stats(query):
stats_cursor = db.countSize('events',query)
stats = []
for s in stats_cursor:
stat = {}
stat['ip'] = s.get('ip')
stat['size'] = s.get('size')
stats.append(stat)
return stats
@app.route('/stat', methods=['POST'])
def stat_controller():
'''Controller to initialize the statistics Form'''
if not 'user_id' in session:
return redirect('/login')
else:
stat_quest = request.form['stat_query'].strip()
#default statics is current day's statistics
db_query = parser.parse(stat_quest)
logger.debug(db_query)
stats = get_stats(db_query)
return render_template('stat.tpl', stats = stats)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
|
[
"db.mongodb.MongoDB",
"flask.flash",
"flask.session.pop",
"flask.redirect",
"flask.Flask",
"query.query_parser.QueryParser",
"flask.url_for",
"utils.logger.Logger",
"flask.render_template",
"ConfigParser.ConfigParser",
"datetime.datetime.now"
] |
[((445, 460), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (450, 460), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((512, 539), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (537, 539), False, 'import pymongo, ConfigParser\n'), ((550, 563), 'query.query_parser.QueryParser', 'QueryParser', ([], {}), '()\n', (561, 563), False, 'from query.query_parser import QueryParser\n'), ((569, 583), 'db.mongodb.MongoDB', 'MongoDB', (['"""esm"""'], {}), "('esm')\n", (576, 583), False, 'from db.mongodb import MongoDB\n'), ((1691, 1732), 'flask.render_template', 'render_template', (['"""login.tpl"""'], {'error': 'error'}), "('login.tpl', error=error)\n", (1706, 1732), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((1803, 1831), 'flask.flash', 'flash', (['"""You were logged out"""'], {}), "('You were logged out')\n", (1808, 1831), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((1836, 1864), 'flask.session.pop', 'session.pop', (['"""user_id"""', 'None'], {}), "('user_id', None)\n", (1847, 1864), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((1958, 1985), 'flask.render_template', 'render_template', (['"""help.tpl"""'], {}), "('help.tpl')\n", (1973, 1985), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((470, 478), 'utils.logger.Logger', 'Logger', ([], {}), '()\n', (476, 478), False, 'from utils.logger import Logger\n'), ((887, 905), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (895, 905), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((931, 949), 'flask.redirect', 'redirect', (['"""/event"""'], {}), "('/event')\n", (939, 949), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((1885, 1901), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (1892, 1901), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((2116, 2134), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (2124, 2134), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((2344, 2401), 'flask.render_template', 'render_template', (['"""events.tpl"""'], {'events': 'events', 'pager': 'pager'}), "('events.tpl', events=events, pager=pager)\n", (2359, 2401), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((3163, 3181), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (3171, 3181), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((3722, 3779), 'flask.render_template', 'render_template', (['"""events.tpl"""'], {'events': 'events', 'pager': 'pager'}), "('events.tpl', events=events, pager=pager)\n", (3737, 3779), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((3906, 3924), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (3914, 3924), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((4378, 4435), 'flask.render_template', 'render_template', (['"""events.tpl"""'], {'events': 'events', 'pager': 'pager'}), "('events.tpl', events=events, pager=pager)\n", (4393, 4435), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((4614, 4632), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (4622, 4632), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((4956, 5015), 'flask.render_template', 'render_template', (['"""stat.tpl"""'], {'stats': 'stats', 'time': 'current_date'}), "('stat.tpl', stats=stats, time=current_date)\n", (4971, 5015), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((5195, 5213), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (5203, 5213), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((5557, 5617), 'flask.render_template', 'render_template', (['"""stat.tpl"""'], {'stats': 'stats', 'time': 'current_month'}), "('stat.tpl', stats=stats, time=current_month)\n", (5572, 5617), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((6037, 6055), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (6045, 6055), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((6301, 6341), 'flask.render_template', 'render_template', (['"""stat.tpl"""'], {'stats': 'stats'}), "('stat.tpl', stats=stats)\n", (6316, 6341), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((1461, 1488), 'flask.flash', 'flash', (['"""You were logged in"""'], {}), "('You were logged in')\n", (1466, 1488), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((1555, 1573), 'flask.redirect', 'redirect', (['"""/event"""'], {}), "('/event')\n", (1563, 1573), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash\n'), ((4719, 4733), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4731, 4733), False, 'from datetime import datetime\n'), ((5301, 5315), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5313, 5315), False, 'from datetime import datetime\n')]
|
from rasa.nlu.convert import convert_training_data
convert_training_data(data_file="./input.json", out_file="./nlu.md", output_format="md", language="")
|
[
"rasa.nlu.convert.convert_training_data"
] |
[((51, 156), 'rasa.nlu.convert.convert_training_data', 'convert_training_data', ([], {'data_file': '"""./input.json"""', 'out_file': '"""./nlu.md"""', 'output_format': '"""md"""', 'language': '""""""'}), "(data_file='./input.json', out_file='./nlu.md',\n output_format='md', language='')\n", (72, 156), False, 'from rasa.nlu.convert import convert_training_data\n')]
|
import re
import string
years = [str(x) for x in range(1990, 2200)]
number_re = re.compile(r'^\-?[0-9]*\.?[0-9]*$')
regex_trailing_As = re.compile(r'(?:\s*A\s*)*$')
regex_punctuation = ''
for c in string.punctuation:
regex_punctuation += f'\\{c}'
split_using_punctuation_re = re.compile(r'\w+|' + f'{regex_punctuation}')
def split_using_punctuation(s):
return list(map(str.strip,
split_using_punctuation_re.findall(s)))
def number(text):
if len(text) == 1 and text[0] == '-':
return False
return number_re.search(text) is not None
def remove_bad_endings(text):
s = text
bad_endings = ['\n', '%']
while len(s) > 0 and s[-1] in bad_endings:
s = s[:-1]
# if len(s) < len(text):
# print(f' {text} => {s}')
return s
def filter_bad_tokens(text):
if text is None or len(text) == 0 or len(text.strip()) == 0:
return False
return True
def remove_single_nonletters(text):
text = text.strip()
if (len(text) == 1) and \
(text[0] == '$'):
return None
else:
return text
def amount(text):
negative = False
# If a column heading contains a comma,
# ex. "Dec 31,", then this is a problem
# since each word is considered separately.
# Remove commas only when they appear
# within the word and not after it.
# This way ex. 33,256,192 is valid.
if text[-1:] == ',':
return None
if len(text) == 1 and text[0] == '.':
return None
if '(' in text:
negative = True
text = text.strip() \
.replace('(', '') \
.replace(',', '') \
.replace(')', '') \
.replace('%', '')
if len(text) == 0:
return None
if not number(text):
return None
# Check if text is single . after the strip
# has changed it's length
if len(text) == 1 and text[0] == '.':
return None
value = float(text)
if negative:
return -value
else:
return value
def row_headings(text):
if len(text) == 1 and text[0] in '$%)()—':
return None
num_digits_in_text = len(list(filter(str.isdigit, text)))
num_chars_in_text = len(text)
if num_chars_in_text > 0 and \
(num_chars_in_text -
num_digits_in_text)/num_chars_in_text > 0.5:
return text
else:
return None
def year(text):
if text in years:
return int(text)
else:
return None
def remove_all_trailing_As(row_headings):
return row_headings.transform(lambda x: regex_trailing_As.sub('', x))
def remove_nonascii_chars(str):
MAX_ASCII_VALUE = 127
def replace_nonascii(c):
if ord(c) > MAX_ASCII_VALUE:
return ' '
else:
return c
result = ''.join(map(replace_nonascii, list(str)))
return result
|
[
"re.compile"
] |
[((82, 118), 're.compile', 're.compile', (['"""^\\\\-?[0-9]*\\\\.?[0-9]*$"""'], {}), "('^\\\\-?[0-9]*\\\\.?[0-9]*$')\n", (92, 118), False, 'import re\n'), ((138, 167), 're.compile', 're.compile', (['"""(?:\\\\s*A\\\\s*)*$"""'], {}), "('(?:\\\\s*A\\\\s*)*$')\n", (148, 167), False, 'import re\n'), ((283, 327), 're.compile', 're.compile', (["('\\\\w+|' + f'{regex_punctuation}')"], {}), "('\\\\w+|' + f'{regex_punctuation}')\n", (293, 327), False, 'import re\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert to grayscale
grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform edge detection
edges = cv2.Canny(grayscale, 30, 100)
# detect lines in the image using hough lines technique
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 60, np.array([]), 50, 5)
# iterate over the output lines and draw them
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(image, (x1, y1), (x2, y2), color=(20, 220, 20), thickness=3)
# show the image
plt.imshow(image)
plt.show()
|
[
"cv2.line",
"cv2.Canny",
"matplotlib.pyplot.show",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"cv2.imread",
"numpy.array"
] |
[((105, 128), 'cv2.imread', 'cv2.imread', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (115, 128), False, 'import cv2\n'), ((168, 207), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (180, 207), False, 'import cv2\n'), ((245, 274), 'cv2.Canny', 'cv2.Canny', (['grayscale', '(30)', '(100)'], {}), '(grayscale, 30, 100)\n', (254, 274), False, 'import cv2\n'), ((605, 622), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (615, 622), True, 'import matplotlib.pyplot as plt\n'), ((624, 634), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (632, 634), True, 'import matplotlib.pyplot as plt\n'), ((384, 396), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (392, 396), True, 'import numpy as np\n'), ((514, 583), 'cv2.line', 'cv2.line', (['image', '(x1, y1)', '(x2, y2)'], {'color': '(20, 220, 20)', 'thickness': '(3)'}), '(image, (x1, y1), (x2, y2), color=(20, 220, 20), thickness=3)\n', (522, 583), False, 'import cv2\n')]
|
import torch
a = torch.cuda.is_available()
print (a)
|
[
"torch.cuda.is_available"
] |
[((17, 42), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (40, 42), False, 'import torch\n')]
|
import tensorflow
from tensorflow.keras.datasets import cifar10
from tensorflow import keras
import numpy as np
num_classes = 10
class EvalDataset(object):
def __init__(self, batch_size=100):
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
self.test_images = x_test
self.test_labels = y_test
def __len__(self):
return len(self.test_images)
def __getitem__(self, idx):
return self.test_images[idx], self.test_labels[idx]
from neural_compressor.experimental import Benchmark, common
evaluator = Benchmark('benchmark.yaml')
evaluator.model = common.Model('./baseline_model')
evaluator.b_dataloader = common.DataLoader(EvalDataset())
evaluator('performance')
|
[
"tensorflow.keras.utils.to_categorical",
"neural_compressor.experimental.Benchmark",
"tensorflow.keras.datasets.cifar10.load_data",
"neural_compressor.experimental.common.Model",
"numpy.mean"
] |
[((1031, 1058), 'neural_compressor.experimental.Benchmark', 'Benchmark', (['"""benchmark.yaml"""'], {}), "('benchmark.yaml')\n", (1040, 1058), False, 'from neural_compressor.experimental import Benchmark, common\n'), ((1077, 1109), 'neural_compressor.experimental.common.Model', 'common.Model', (['"""./baseline_model"""'], {}), "('./baseline_model')\n", (1089, 1109), False, 'from neural_compressor.experimental import Benchmark, common\n'), ((246, 265), 'tensorflow.keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (263, 265), False, 'from tensorflow.keras.datasets import cifar10\n'), ((433, 457), 'numpy.mean', 'np.mean', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (440, 457), True, 'import numpy as np\n'), ((598, 657), 'tensorflow.keras.utils.to_categorical', 'tensorflow.keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (635, 657), False, 'import tensorflow\n'), ((675, 733), 'tensorflow.keras.utils.to_categorical', 'tensorflow.keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (712, 733), False, 'import tensorflow\n')]
|
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Archspec Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Global objects with the content of the microarchitecture
JSON file and its schema
"""
import json
import os.path
try:
from collections.abc import MutableMapping # novm
except ImportError:
from collections import MutableMapping
class LazyDictionary(MutableMapping):
"""Lazy dictionary that gets constructed on first access to any object key
Args:
factory (callable): factory function to construct the dictionary
"""
def __init__(self, factory, *args, **kwargs):
self.factory = factory
self.args = args
self.kwargs = kwargs
self._data = None
@property
def data(self):
"""Returns the lazily constructed dictionary"""
if self._data is None:
self._data = self.factory(*self.args, **self.kwargs)
return self._data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def _load_json_file(json_file):
json_dir = os.path.join(os.path.dirname(__file__), "..", "json", "cpu")
json_dir = os.path.abspath(json_dir)
def _factory():
filename = os.path.join(json_dir, json_file)
with open(filename, "r") as file:
return json.load(file)
return _factory
#: In memory representation of the data in microarchitectures.json,
#: loaded on first access
TARGETS_JSON = LazyDictionary(_load_json_file("microarchitectures.json"))
#: JSON schema for microarchitectures.json, loaded on first access
SCHEMA = LazyDictionary(_load_json_file("microarchitectures_schema.json"))
|
[
"json.load"
] |
[((1616, 1631), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1625, 1631), False, 'import json\n')]
|
#OUTDATED
#The current version of this file uses full genes and not constructed genes
#Finds nearest gene in domains that DHS intergenic site has membership in
#domains_with_full_genes_#.csv generated from gene_analsysis.py
#DHS_with_domains.csv generated in domain_analsysis.py
#Exports DHS_#_nearest_full_gene_distance.csv
import pandas as pd
import math
import time
printed = False
files = ['1', '2', '4', '8']
last_time = 0
for file in files:
domains_with_genes = {}
DHSs_with_domains = {}
DHS_and_distance = []
DHSs = []
#path = "data/DHS_" + file + "_cell_with_domain.csv"
#domain data doesn't have uniform columns
printed = False
path = "data/mm10_data/domains/domains_with_genes_" + file + ".csv"
with open(path) as domain_genes:
#split file into lines
genes = []
lis = [line.split() for line in domain_genes]
for bulk in lis:
domain = None
#Scope of the genes from the domain data
#domain data and gene data is formatted: chr#:start-end
#These features are being loaded from that format
for domain_data in bulk:
gene_list = []
domain_data = domain_data.split(',')
#First element is domain id
domain = domain_data[0]
#All remaining elements are genes or empty space
domain_removed = domain_data[4:]
for element in domain_removed:
#filter out empty space
if len(element) > 0:
genes.append(element)
#Extract gene features and add to gene list
for gene in genes:
pre_chr = gene.split(':')
chr = pre_chr[0]
pre_loc = pre_chr[1]
pre_start = pre_loc.split('-')
start = pre_start[0]
end = pre_start[1]
#Append gene data to list of genes
gene_list.append([chr, start, end, gene])
#Create domain dictionary entry with genes in its scope
domains_with_genes[domain] = gene_list
printed = False
path = "data/mm10_data/domains/domains_with_DHSs_" + file + ".csv"
with open(path) as DHS_domains:
#split file into lines
lis = [line.split() for line in DHS_domains]
for bulk in lis:
dhs_list = []
DHS_id = None
#Scope of the domain data from the dhs data
for DHS_data in bulk:
DHS = []
data = DHS_data.split(',')
domain_id = data[0]
DHS_data = data[1:]
for element in DHS_data:
if len(element) > 0:
DHSs.append(element)
for dhs in DHSs:
pre_chr = domain.split(':')
chr = pre_chr[0]
pre_loc = pre_chr[1]
pre_start = pre_loc.split('-')
start = pre_start[0]
end = pre_start[1]
dhs_list.append([chr, start, end, dhs])
DHSs_with_domains[domain_id] = dhs_list
printed = False
#Find the genes that the DHS site may interact with in the domain
for domain in DHSs_with_domains.keys():
genes_in_domain = []
distances = []
on_left = []
on_right = []
DHS_matrix = []
gene_matrix = domains_with_genes[domain]
for gene in gene_matrix:
genes_in_domain.append(gene)
for dhs in DHSs_with_domains.values():
DHS_matrix.append(dhs)
for dhs_row in DHS_matrix:
for dhs in dhs_row:
start = int(dhs[1])
end = int(dhs[2])
id = dhs[3]
#Find the center of the DHS site
dhs_center = math.floor(( int(start) + int(end) ) / 2)
for gene in genes_in_domain:
if int(gene[2]) < dhs_center:
on_left.append(gene)
elif dhs_center < int(gene[1]):
on_right.append(gene)
for gene in on_left:
dis = abs(start - int(gene[2]))
distances.append(dis)
for gene in on_right:
dis = abs(int(gene[1]) - end)
distances.append(dis)
shortest_distance = min(distances)
DHS_and_distance.append([id, shortest_distance])
#Export data
Distance_df = pd.DataFrame(DHS_and_distance)
path = "data/mm10_data/nearest_gene_in_domain/DHS_" + file + "_nearest_gene_in_domain_distance.csv"
Distance_df.to_csv(path, index=False, header=False)
time_to_complete = time.process_time() - last_time
print("Time taken to process nearest genes for " + file + " cell: " + str(time_to_complete))
last_time = time_to_complete
|
[
"pandas.DataFrame",
"time.process_time"
] |
[((4567, 4597), 'pandas.DataFrame', 'pd.DataFrame', (['DHS_and_distance'], {}), '(DHS_and_distance)\n', (4579, 4597), True, 'import pandas as pd\n'), ((4781, 4800), 'time.process_time', 'time.process_time', ([], {}), '()\n', (4798, 4800), False, 'import time\n')]
|
## Read subset data .csv labels
## and get united filename-tokens format separated by comma for Word2Vec.
import os
import re
import sys
import warnings
import numpy as np
from PIL import Image
from config import Config
if __name__ == '__main__':
SUBSET_LIST = ['deviantart_verified', 'wikiart_verified']
FRACTION = 1.0 ## To prepare smaller dataset, make 1.0 to take all
if len(sys.argv) != 2:
print("Usage: python {} <output_dir>".format(sys.argv[0]))
exit()
OUTPUT_DIR = sys.argv[1]
os.makedirs(OUTPUT_DIR, exist_ok=True)
OUTPUT_FILE = os.path.join(OUTPUT_DIR, 'all_labels.csv')
if os.path.isfile(OUTPUT_FILE):
print(OUTPUT_FILE, 'exists. Exiting.')
exit()
warnings.filterwarnings('error')
config = Config()
image_file_counter = 0
all_lines = []
for subset in SUBSET_LIST:
labels_file = os.path.join(subset, 'labels.csv')
if not os.path.isfile(labels_file):
print(labels_file, 'not found. Passing')
continue
with open(labels_file, 'r') as f:
lines = f.readlines()
## Fraction the lines
lines = sorted(np.random.choice(lines, int(len(lines) * FRACTION), replace=False))
for line in lines:
row = line.strip().split(',')
image_filename = row[0]
labels = row[1:]
image_relative_file = os.path.join('data', subset, 'images', image_filename) ## Relative to project base
image_file = os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir, image_relative_file))
## Check image file existence
if not os.path.isfile(image_file):
continue
## Apply label conditions
if len(labels) < config.MIN_SENTENCE_LENGTH or len(labels) > config.MAX_SENTENCE_LENGTH:
continue
## Check image integrity
try:
img = Image.open(image_file)
if img.mode != 'RGB':
continue
except (Exception, Warning) as e:
print("Bad image", image_file, e)
continue
## Apply image shape conditions
w, h = img.size
if w < config.MIN_IMAGE_WIDTH or w > config.MAX_IMAGE_WIDTH:
continue
if h < config.MIN_IMAGE_HEIGHT or w > config.MAX_IMAGE_HEIGHT:
continue
if max(w, h) / (min(w, h) + 1e-7) > config.MAX_ASPECT_RATIO:
continue
## Normalize tokens
label_sentence = str(' '.join(labels)) ## First column is image filename
label_sentence = re.sub(r"[^A-Za-z0-9']+", " ", label_sentence).lower() ## Only alphanumeric characters
label_sentence = re.sub(r"\b[a-zA-Z]\b", " ", label_sentence) ## Replace single letters
label_sentence = re.sub(" ", " ", label_sentence) ## Go one space
label_sentence = re.sub(" ", " ", label_sentence) ## Go one space
label_sentence = label_sentence.replace(' ', ',')
all_lines.append(image_relative_file + ',' + label_sentence + '\n')
image_file_counter += 1
with open(OUTPUT_FILE, 'w') as f:
f.writelines(all_lines)
print(OUTPUT_FILE, 'was written with {} file'.format(image_file_counter))
|
[
"os.makedirs",
"config.Config",
"warnings.filterwarnings",
"PIL.Image.open",
"os.path.isfile",
"os.path.join",
"re.sub"
] |
[((529, 567), 'os.makedirs', 'os.makedirs', (['OUTPUT_DIR'], {'exist_ok': '(True)'}), '(OUTPUT_DIR, exist_ok=True)\n', (540, 567), False, 'import os\n'), ((586, 628), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""all_labels.csv"""'], {}), "(OUTPUT_DIR, 'all_labels.csv')\n", (598, 628), False, 'import os\n'), ((636, 663), 'os.path.isfile', 'os.path.isfile', (['OUTPUT_FILE'], {}), '(OUTPUT_FILE)\n', (650, 663), False, 'import os\n'), ((732, 764), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (755, 764), False, 'import warnings\n'), ((779, 787), 'config.Config', 'Config', ([], {}), '()\n', (785, 787), False, 'from config import Config\n'), ((888, 922), 'os.path.join', 'os.path.join', (['subset', '"""labels.csv"""'], {}), "(subset, 'labels.csv')\n", (900, 922), False, 'import os\n'), ((939, 966), 'os.path.isfile', 'os.path.isfile', (['labels_file'], {}), '(labels_file)\n', (953, 966), False, 'import os\n'), ((1410, 1464), 'os.path.join', 'os.path.join', (['"""data"""', 'subset', '"""images"""', 'image_filename'], {}), "('data', subset, 'images', image_filename)\n", (1422, 1464), False, 'import os\n'), ((2856, 2901), 're.sub', 're.sub', (['"""\\\\b[a-zA-Z]\\\\b"""', '""" """', 'label_sentence'], {}), "('\\\\b[a-zA-Z]\\\\b', ' ', label_sentence)\n", (2862, 2901), False, 'import re\n'), ((2971, 3005), 're.sub', 're.sub', (['""" """', '""" """', 'label_sentence'], {}), "(' ', ' ', label_sentence)\n", (2977, 3005), False, 'import re\n'), ((3076, 3109), 're.sub', 're.sub', (['""" """', '""" """', 'label_sentence'], {}), "(' ', ' ', label_sentence)\n", (3082, 3109), False, 'import re\n'), ((1535, 1610), 'os.path.join', 'os.path.join', (['__file__', 'os.path.pardir', 'os.path.pardir', 'image_relative_file'], {}), '(__file__, os.path.pardir, os.path.pardir, image_relative_file)\n', (1547, 1610), False, 'import os\n'), ((1674, 1700), 'os.path.isfile', 'os.path.isfile', (['image_file'], {}), '(image_file)\n', (1688, 1700), False, 'import os\n'), ((1969, 1991), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (1979, 1991), False, 'from PIL import Image\n'), ((2735, 2780), 're.sub', 're.sub', (['"""[^A-Za-z0-9\']+"""', '""" """', 'label_sentence'], {}), '("[^A-Za-z0-9\']+", \' \', label_sentence)\n', (2741, 2780), False, 'import re\n')]
|
"""
Miscellaneous utility functions and common data.
Attributes:
common_formulas: A set of common formulas. The keys to the data are strings
from :obj:`pymatgen.core.composition.Composition.reduced_formula`.
connected_geometries: A list of geometries that are considered
"connectable" polyhedra. E.g. Their face-sharing, edge-sharing, etc
properties are of interest.
geometry_to_polyhedra: A mapping from geometry type (e.g. octahedral) to the
plural polyhedra name (e.g. octahedra).
dimensionality_to_shape: A mapping from dimensionality to the component
shape.
"""
import re
from collections import defaultdict
from typing import Any, Dict, List, Union
from monty.json import MontyDecoder
from monty.serialization import loadfn
from pkg_resources import resource_filename
from pymatgen.core.periodic_table import Element, Species, get_el_sp
from pymatgen.util.string import latexify_spacegroup
common_formulas: Dict[str, str] = loadfn(
resource_filename("robocrys.condense", "formula_db.json.gz")
)
connected_geometries: List[str] = [
"tetrahedral",
"octahedral",
"trigonal pyramidal",
"square pyramidal",
"trigonal bipyramidal",
"pentagonal pyramidal",
"hexagonal pyramidal",
"pentagonal bipyramidal",
"hexagonal bipyramidal",
"cuboctahedral",
]
geometry_to_polyhedra: Dict[str, str] = {
"octahedral": "octahedra",
"tetrahedral": "tetrahedra",
"trigonal pyramidal": "trigonal pyramid",
"square pyramidal": "square pyramid",
"trigonal bipyramidal": "trigonal bipyramid",
"pentagonal pyramidal": "pentagonal pyramid",
"hexagonal pyramidal": "hexagonal pyramid",
"pentagonal bipyramidal": "pentagonal bipyramid",
"hexagonal bipyramidal": "hexagonal bipyramid",
"cuboctahedral": "cuboctahedra",
}
polyhedra_plurals: Dict[str, str] = {
"octahedra": "octahedra",
"tetrahedra": "tetrahedra",
"trigonal pyramid": "trigonal pyramids",
"square pyramid": "square pyramids",
"trigonal bipyramid": "trigonal bipyramids",
"pentagonal pyramid": "pentagonal pyramids",
"hexagonal pyramid": "hexagonal pyramids",
"pentagonal bipyramid": "pentagonal bipyramids",
"hexagonal bipyramid": "hexagonal bipyramids",
"cuboctahedra": "cuboctahedra",
}
dimensionality_to_shape: Dict[int, str] = {
3: "framework",
2: "sheet",
1: "ribbon",
0: "cluster",
}
def get_el(obj: Union[Element, Species, str, int]) -> str:
"""Utility method to get an element str from a symbol, Element, or Specie.
Args:
obj: An arbitrary object. Supported objects are Element/Species objects,
integers (representing atomic numbers), or strings (element
symbols or species strings).
Returns:
The element as a string.
"""
if isinstance(obj, str):
obj = get_el_sp(obj)
if isinstance(obj, Element):
return obj.name
elif isinstance(obj, Species):
return obj.element.name
elif isinstance(obj, int):
return Element.from_Z(obj).name
else:
raise ValueError(f"Unsupported element type: {type(obj)}.")
def get_formatted_el(
element: str,
sym_label: str,
use_oxi_state: bool = True,
use_sym_label: bool = True,
fmt: str = "raw",
):
"""Formats an element string.
Performs a variety of functions, including:
- Changing "Sn+0" to "Sn".
- Inserting the symmetry label between the element and oxidation state, if
required.
- Removing the oxidation state if required.
- Latexifying the element and oxidation state.
- Unicodeifying the element and oxidation state.
- Converting the element and oxidation state to html.
Args:
element: The element string (possibly including the oxidation state.
E.g. "Sn" or "Sn2+".
sym_label: The symmetry label. E.g. "(1)"
use_oxi_state: Whether to include the oxidation state, if present.
use_sym_label: Whether to use the symmetry label.
fmt: How to format the element strings. Options are:
- "raw" (default): Don't apply special formatting (e.g. "SnO2").
- "unicode": Format super/subscripts using unicode characters
(e.g. SnO₂).
- "latex": Use LaTeX markup for formatting (e.g. "SnO$_2$").
- "html": Use html markup for formatting (e.g. "SnO<sub>2</sub>").
Returns:
The formatted element string.
"""
specie = get_el_sp(element)
if isinstance(specie, Species):
oxi_state = specie.oxi_state
sign = "+" if oxi_state > 0 else "-"
if oxi_state == 0:
oxi_state = None
elif oxi_state % 1 == 0:
oxi_state = f"{int(abs(oxi_state)):d}{sign}"
else:
oxi_state = f"{abs(oxi_state):+.2f}{sign}"
else:
oxi_state = None
formatted_element = specie.name
if use_sym_label:
formatted_element += sym_label
if use_oxi_state and oxi_state:
if fmt == "latex":
oxi_state = f"^{{{oxi_state}}}"
elif fmt == "unicode":
oxi_state = superscript_number(oxi_state)
elif fmt == "html":
oxi_state = f"<sup>{oxi_state}</sup>"
formatted_element += oxi_state
return formatted_element
def superscript_number(string):
"""Converts a string containing numbers to superscript.
Will only convert the numbers 0-9, and the + and - characters.
Args:
string: A string containing the numbers 0-9 or +/- characters.
Returns:
The superscript string.
"""
if "." in string:
# no unicode period exists
return string
subscript_unicode_map = {
0: "⁰",
1: "¹",
2: "²",
3: "³",
4: "⁴",
5: "⁵",
6: "⁶",
7: "⁷",
8: "⁸",
9: "⁹",
"-": "⁻",
"+": "⁺",
}
for original_subscript, subscript_unicode in subscript_unicode_map.items():
string = string.replace(str(original_subscript), subscript_unicode)
return string
def unicodeify_spacegroup(spacegroup_symbol: str) -> str:
"""Formats a spacegroup using unicode symbols.
E.g. Fd-3m -> Fd̅3m
Args:
spacegroup_symbol: A spacegroup symbol.
Returns:
The unicode formatted spacegroup symbol.
"""
subscript_unicode_map = {
0: "₀",
1: "₁",
2: "₂",
3: "₃",
4: "₄",
5: "₅",
6: "₆",
7: "₇",
8: "₈",
9: "₉",
}
symbol = latexify_spacegroup(spacegroup_symbol)
for number, unicode_number in subscript_unicode_map.items():
symbol = symbol.replace("$_{" + str(number) + "}$", unicode_number)
overline = "\u0305" # u"\u0304" (macron) is also an option
symbol = symbol.replace("$\\overline{", overline)
symbol = symbol.replace("$", "")
symbol = symbol.replace("{", "")
symbol = symbol.replace("}", "")
return symbol
def htmlify_spacegroup(spacegroup_symbol: str) -> str:
"""Formats a spacegroup using unicode symbols.
E.g. P-42_1m -> P̅42<sub>1</sub>m
Args:
spacegroup_symbol: A spacegroup symbol.
Returns:
The html formatted spacegroup symbol.
"""
overline = "\u0305" # u"\u0304" (macron) is also an option
symbol = re.sub(r"_(\d+)", r"<sub>\1</sub>", spacegroup_symbol)
symbol = re.sub(r"-(\d)", fr"{overline}\1", symbol)
return symbol
def defaultdict_to_dict(dictionary: defaultdict) -> Dict:
"""Recursively convert nested :obj:`defaultdict` to :obj:`dict`.
Args:
dictionary: A defaultdict.
Returns:
The defaultdict as a :obj:`dict`.
"""
if isinstance(dictionary, defaultdict):
dictionary = {k: defaultdict_to_dict(v) for k, v in dictionary.items()}
return dictionary
def load_condensed_structure_json(filename: str) -> Dict[str, Any]:
"""Load condensed structure data from a file.
Args:
filename: The filename.
Returns:
The condensed structure data.
"""
# Json does not support using integeras a dictionary keys, therefore
# manually convert dictionary keys from str to int if possible.
def json_keys_to_int(x):
if isinstance(x, dict):
return {int(k) if k.isdigit() else k: v for k, v in x.items()}
return loadfn(filename, cls=MontyDecoder, object_hook=json_keys_to_int)
|
[
"pymatgen.util.string.latexify_spacegroup",
"pymatgen.core.periodic_table.get_el_sp",
"monty.serialization.loadfn",
"pkg_resources.resource_filename",
"pymatgen.core.periodic_table.Element.from_Z",
"re.sub"
] |
[((1001, 1061), 'pkg_resources.resource_filename', 'resource_filename', (['"""robocrys.condense"""', '"""formula_db.json.gz"""'], {}), "('robocrys.condense', 'formula_db.json.gz')\n", (1018, 1061), False, 'from pkg_resources import resource_filename\n'), ((4507, 4525), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['element'], {}), '(element)\n', (4516, 4525), False, 'from pymatgen.core.periodic_table import Element, Species, get_el_sp\n'), ((6600, 6638), 'pymatgen.util.string.latexify_spacegroup', 'latexify_spacegroup', (['spacegroup_symbol'], {}), '(spacegroup_symbol)\n', (6619, 6638), False, 'from pymatgen.util.string import latexify_spacegroup\n'), ((7382, 7436), 're.sub', 're.sub', (['"""_(\\\\d+)"""', '"""<sub>\\\\1</sub>"""', 'spacegroup_symbol'], {}), "('_(\\\\d+)', '<sub>\\\\1</sub>', spacegroup_symbol)\n", (7388, 7436), False, 'import re\n'), ((7450, 7492), 're.sub', 're.sub', (['"""-(\\\\d)"""', 'f"""{overline}\\\\1"""', 'symbol'], {}), "('-(\\\\d)', f'{overline}\\\\1', symbol)\n", (7456, 7492), False, 'import re\n'), ((8409, 8473), 'monty.serialization.loadfn', 'loadfn', (['filename'], {'cls': 'MontyDecoder', 'object_hook': 'json_keys_to_int'}), '(filename, cls=MontyDecoder, object_hook=json_keys_to_int)\n', (8415, 8473), False, 'from monty.serialization import loadfn\n'), ((2876, 2890), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['obj'], {}), '(obj)\n', (2885, 2890), False, 'from pymatgen.core.periodic_table import Element, Species, get_el_sp\n'), ((3062, 3081), 'pymatgen.core.periodic_table.Element.from_Z', 'Element.from_Z', (['obj'], {}), '(obj)\n', (3076, 3081), False, 'from pymatgen.core.periodic_table import Element, Species, get_el_sp\n')]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import importlib
import json
from bkbase.dataflow.core.exec.pipeline import Pipeline
from bkbase.dataflow.metrics.util.exceptions import MetricsVerifyException
from bkbase.dataflow.one_model.conf import deeplearning_conf
from bkbase.dataflow.one_model.exception.tensorflow_exception import (
TensorFlowReportException,
)
from bkbase.dataflow.one_model.metric.monitor_handler import MonitorHandler
from bkbase.dataflow.one_model.topo.deeplearning_source_node import (
ModelIcebergResultSetSourceNode,
)
from bkbase.dataflow.one_model.utils.deeplearning_constant import NodeType, ProcessType
from bkbase.dataflow.one_model.utils.deeplearning_logger import (
logger as deeplearning_logger,
)
class DeepLearningPipeline(Pipeline):
def __init__(self, topology):
super().__init__(topology)
self.monitors = {}
if deeplearning_conf.ENABLE_MONITOR:
self.enable_monitor()
self.dataframe_dict = {}
self.model_dict = {}
def source(self):
deeplearning_logger.info("Pipeline source start")
self.__create_source(self.topology.source_nodes)
def sink(self):
deeplearning_logger.info("Pipeline sink start")
self.__create_sink(self.topology.sink_nodes, self.topology.source_nodes)
def transform(self):
deeplearning_logger.info("Pipeline transform start")
return self.__create_transform(self.topology.transform_nodes)
def submit(self):
try:
self.source()
deeplearning_logger.info("source_data_dict: {}".format(self.dataframe_dict))
deeplearning_logger.info("model_dict:{}".format(self.model_dict))
transform_result = self.transform()
deeplearning_logger.info("transform_dict: {}".format(transform_result))
self.sink()
self.send_monitor()
deeplearning_logger.info("pipeline submit finished")
except Exception as e:
self.send_monitor()
raise e
def __create_source(self, source_nodes):
for node in source_nodes:
source = source_nodes[node].create_source()
node_type = source_nodes[node].type
if node_type == NodeType.MODEL.value:
self.model_dict[node] = source
else:
self.dataframe_dict[node] = source
def __create_sink(self, sink_nodes, source_nodes):
deeplearning_logger.info(sink_nodes)
for node in sink_nodes:
node_type = sink_nodes[node].type
if node_type == NodeType.MODEL.value:
if node in self.model_dict:
sink_nodes[node].create_sink(self.model_dict[node])
else:
# 将源dataset与目标dataset结合起来作为输出
target_dataset = self.dataframe_dict[node]
sink_nodes[node].create_sink(target_dataset)
def __create_transform(self, transform_nodes):
for node_id in transform_nodes:
process_type = transform_nodes[node_id].process_type
transform_module = importlib.import_module(transform_nodes[node_id].user_main_module)
if process_type == ProcessType.UNTRAINED_RUN.value:
result_dict = transform_module.transform(
self.dataframe_dict, self.model_dict, transform_nodes[node_id].user_args
)
elif process_type == ProcessType.TRAINED_RUN.value:
result_dict = transform_module.predict(self.dataframe_dict, self.model_dict)
elif process_type == ProcessType.TRAIN.value:
result_dict = transform_module.train(self.dataframe_dict, self.model_dict)
else:
raise Exception("unsuppoted operation:{}".format(process_type))
return result_dict
def send_monitor(self):
try:
if deeplearning_conf.ENABLE_MONITOR:
deeplearning_logger.info("Pipeline send monitor")
input_count_info = self.get_input_count_info()
for monitor in self.monitors:
self.monitors[monitor].report(input_count_info)
else:
deeplearning_logger.info("Monitor disabled, won't send report data")
except (ValueError, TypeError, AssertionError, MetricsVerifyException, TensorFlowReportException) as e:
deeplearning_logger.exception(e)
deeplearning_logger.error(e)
def get_input_count_info(self):
input_info = {}
source_nodes = self.topology.source_nodes
try:
for source_id in source_nodes:
source_obj = source_nodes[source_id]
if isinstance(source_obj, ModelIcebergResultSetSourceNode):
# rt表的时候才需要上报
start = source_obj.input["time_range_list"][0]["start_time"]
end = source_obj.input["time_range_list"][0]["end_time"]
count = MonitorHandler.get_rt_count(
start, end, source_obj.input["storage_conf"]["storekit_hdfs_conf"]
)
input_info[source_id] = {"count": count, "start": start, "end": end}
deeplearning_logger.info("input info:" + json.dumps(input_info))
return input_info
except Exception as e:
deeplearning_logger.error(e)
raise TensorFlowReportException("Get input information error:%s" % e)
def enable_monitor(self):
for sink_node in self.topology.sink_nodes:
sink_obj = self.topology.sink_nodes[sink_node]
# if isinstance(sink_obj, ModelIcebergResultSetSinkNode):
if sink_obj.fields:
# fields不为空,说明为表
self.monitors[sink_node] = MonitorHandler(self.topology, sink_node)
|
[
"bkbase.dataflow.one_model.metric.monitor_handler.MonitorHandler.get_rt_count",
"bkbase.dataflow.one_model.utils.deeplearning_logger.logger.info",
"importlib.import_module",
"bkbase.dataflow.one_model.exception.tensorflow_exception.TensorFlowReportException",
"bkbase.dataflow.one_model.utils.deeplearning_logger.logger.error",
"bkbase.dataflow.one_model.utils.deeplearning_logger.logger.exception",
"json.dumps",
"bkbase.dataflow.one_model.metric.monitor_handler.MonitorHandler"
] |
[((2385, 2434), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.info', 'deeplearning_logger.info', (['"""Pipeline source start"""'], {}), "('Pipeline source start')\n", (2409, 2434), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((2521, 2568), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.info', 'deeplearning_logger.info', (['"""Pipeline sink start"""'], {}), "('Pipeline sink start')\n", (2545, 2568), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((2684, 2736), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.info', 'deeplearning_logger.info', (['"""Pipeline transform start"""'], {}), "('Pipeline transform start')\n", (2708, 2736), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((3786, 3822), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.info', 'deeplearning_logger.info', (['sink_nodes'], {}), '(sink_nodes)\n', (3810, 3822), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((3236, 3288), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.info', 'deeplearning_logger.info', (['"""pipeline submit finished"""'], {}), "('pipeline submit finished')\n", (3260, 3288), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((4439, 4505), 'importlib.import_module', 'importlib.import_module', (['transform_nodes[node_id].user_main_module'], {}), '(transform_nodes[node_id].user_main_module)\n', (4462, 4505), False, 'import importlib\n'), ((5281, 5330), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.info', 'deeplearning_logger.info', (['"""Pipeline send monitor"""'], {}), "('Pipeline send monitor')\n", (5305, 5330), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((5542, 5610), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.info', 'deeplearning_logger.info', (['"""Monitor disabled, won\'t send report data"""'], {}), '("Monitor disabled, won\'t send report data")\n', (5566, 5610), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((5735, 5767), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.exception', 'deeplearning_logger.exception', (['e'], {}), '(e)\n', (5764, 5767), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((5780, 5808), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.error', 'deeplearning_logger.error', (['e'], {}), '(e)\n', (5805, 5808), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((6706, 6734), 'bkbase.dataflow.one_model.utils.deeplearning_logger.logger.error', 'deeplearning_logger.error', (['e'], {}), '(e)\n', (6731, 6734), True, 'from bkbase.dataflow.one_model.utils.deeplearning_logger import logger as deeplearning_logger\n'), ((6753, 6816), 'bkbase.dataflow.one_model.exception.tensorflow_exception.TensorFlowReportException', 'TensorFlowReportException', (["('Get input information error:%s' % e)"], {}), "('Get input information error:%s' % e)\n", (6778, 6816), False, 'from bkbase.dataflow.one_model.exception.tensorflow_exception import TensorFlowReportException\n'), ((7136, 7176), 'bkbase.dataflow.one_model.metric.monitor_handler.MonitorHandler', 'MonitorHandler', (['self.topology', 'sink_node'], {}), '(self.topology, sink_node)\n', (7150, 7176), False, 'from bkbase.dataflow.one_model.metric.monitor_handler import MonitorHandler\n'), ((6325, 6425), 'bkbase.dataflow.one_model.metric.monitor_handler.MonitorHandler.get_rt_count', 'MonitorHandler.get_rt_count', (['start', 'end', "source_obj.input['storage_conf']['storekit_hdfs_conf']"], {}), "(start, end, source_obj.input['storage_conf'][\n 'storekit_hdfs_conf'])\n", (6352, 6425), False, 'from bkbase.dataflow.one_model.metric.monitor_handler import MonitorHandler\n'), ((6609, 6631), 'json.dumps', 'json.dumps', (['input_info'], {}), '(input_info)\n', (6619, 6631), False, 'import json\n')]
|
from django.http import HttpResponsePermanentRedirect, HttpResponseNotFound, HttpResponseBadRequest
from django.core.files.storage import default_storage
from easy_thumbnails.files import get_thumbnailer
from easy_thumbnails.exceptions import InvalidImageFormatError
import re
SIZE_RE = re.compile(r'^(\d+),(\d+)$')
def resize(request, path):
thumbnail_opts = {}
if 'size' in request.GET:
if SIZE_RE.match(request.GET['size']):
thumbnail_opts['size'] = map(int, request.GET['size'].split(','))
if 'crop' in request.GET:
thumbnail_opts['crop'] = request.GET['crop']
else:
return HttpResponseBadRequest(u'Size must be expressed in the format "size=[integer],[integer]"')
else:
return HttpResponsePermanentRedirect(default_storage.url(path))
try:
thumbnailer = get_thumbnailer(default_storage, path)
thumbnail = thumbnailer.get_thumbnail(thumbnail_opts)
return HttpResponsePermanentRedirect(thumbnail.url)
except IOError:
return HttpResponseNotFound(u'File not found.')
except InvalidImageFormatError:
return HttpResponseBadRequest(u'File is not an image.')
|
[
"django.core.files.storage.default_storage.url",
"django.http.HttpResponseBadRequest",
"easy_thumbnails.files.get_thumbnailer",
"django.http.HttpResponseNotFound",
"django.http.HttpResponsePermanentRedirect",
"re.compile"
] |
[((289, 318), 're.compile', 're.compile', (['"""^(\\\\d+),(\\\\d+)$"""'], {}), "('^(\\\\d+),(\\\\d+)$')\n", (299, 318), False, 'import re\n'), ((862, 900), 'easy_thumbnails.files.get_thumbnailer', 'get_thumbnailer', (['default_storage', 'path'], {}), '(default_storage, path)\n', (877, 900), False, 'from easy_thumbnails.files import get_thumbnailer\n'), ((978, 1022), 'django.http.HttpResponsePermanentRedirect', 'HttpResponsePermanentRedirect', (['thumbnail.url'], {}), '(thumbnail.url)\n', (1007, 1022), False, 'from django.http import HttpResponsePermanentRedirect, HttpResponseNotFound, HttpResponseBadRequest\n'), ((658, 753), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', (['u"""Size must be expressed in the format "size=[integer],[integer]\\""""'], {}), '(\n u\'Size must be expressed in the format "size=[integer],[integer]"\')\n', (680, 753), False, 'from django.http import HttpResponsePermanentRedirect, HttpResponseNotFound, HttpResponseBadRequest\n'), ((804, 829), 'django.core.files.storage.default_storage.url', 'default_storage.url', (['path'], {}), '(path)\n', (823, 829), False, 'from django.core.files.storage import default_storage\n'), ((1058, 1098), 'django.http.HttpResponseNotFound', 'HttpResponseNotFound', (['u"""File not found."""'], {}), "(u'File not found.')\n", (1078, 1098), False, 'from django.http import HttpResponsePermanentRedirect, HttpResponseNotFound, HttpResponseBadRequest\n'), ((1150, 1198), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', (['u"""File is not an image."""'], {}), "(u'File is not an image.')\n", (1172, 1198), False, 'from django.http import HttpResponsePermanentRedirect, HttpResponseNotFound, HttpResponseBadRequest\n')]
|
import ast, gast
import inspect
import numpy as np
import sys
import typing
from chainer_compiler.elichika.typing import types
from chainer_compiler.elichika.typing.type_inference import InferenceEngine
from chainer_compiler.elichika.typing.utils import node_description, is_expr
from chainer_compiler.elichika.parser import utils
class IDAssignor(gast.NodeVisitor):
def __init__(self):
self.counter = 0
self.node2id = {}
def visit(self, node):
self.node2id[node] = self.counter
self.counter += 1
return super().visit(node)
def run(self, node, subroutine_node):
self.visit(node)
for ns in subroutine_node.values():
for n in ns:
self.visit(n)
return self.node2id
def generate_node2id(tree, subroutine_node):
a = IDAssignor()
node2id = a.run(tree, subroutine_node)
return node2id
def generate_id2node(node2id):
id2node = {}
for n, i in node2id.items():
id2node[i] = n
return id2node
def generate_node2type(tree, args, is_debug=False, module=None, type_hints={}):
reset_state()
tc = InferenceEngine(is_debug=is_debug, module=module)
func_body = tree.body[0] # XXX: only checks first function
try:
node2type = tc.infer_function_value_args(func_body, args, type_hints=type_hints)
return node2type, tc.subroutine_node
except Exception as e:
tc.dump_tyenv()
raise e
def generate_id2type(node2type, node2id):
id2type = {}
for n, t in node2type.items():
if n not in node2id.keys(): continue # user-defined modules in nn.Sequential
id2type[node2id[n]] = t
return id2type
def generate_assertion(type_table_name, id2type, id2node, ofile=None):
for i, t in sorted(id2type.items()):
node = id2node[i]
if not is_expr(node):
if not isinstance(node, gast.FunctionDef):
continue
output = " # === function {} ===".format(node.name)
else:
comment = "\t# " + node_description(node)
output = " self.assertEqual(str({}[{}]), \"{}\"){}".format( \
type_table_name, i, t, comment)
if ofile is None:
print(output)
else:
ofile.write(output + '\n')
# For testing
def generate_id2type_from_forward(model, args, is_debug=False):
code = utils.clip_head(inspect.getsource(model.forward))
tree = gast.ast_to_gast(ast.parse(code))
module = sys.modules[model.forward.__module__]
node2type, subroutine_node = generate_node2type(
tree, (model,) + args, is_debug=is_debug, module=module,
type_hints=typing.get_type_hints(model.forward))
node2id = generate_node2id(tree, subroutine_node)
id2type = generate_id2type(node2type, node2id)
return id2type
# For debug
def generate_type_inference_results(model, forward_args, is_debug=True):
code = utils.clip_head(inspect.getsource(model.forward))
node = gast.ast_to_gast(ast.parse(code))
# node = Canonicalizer().visit(node)
module = sys.modules[model.forward.__module__]
node2type, subroutine_node = generate_node2type(
node, (model,) + forward_args, is_debug=is_debug, module=module,
type_hints=typing.get_type_hints(model.forward))
node2id = generate_node2id(node, subroutine_node)
id2type = generate_id2type(node2type, node2id)
id2node = generate_id2node(node2id)
return id2type, id2node
def reset_state():
np.random.seed(42)
types.var_counter = 0
if __name__ == '__main__':
import argparse
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from tests.elichika_typing.EspNet_test import *
from tests.elichika_typing.Models_test import *
parser = argparse.ArgumentParser()
parser.add_argument("-e", help="Execute the script", action="store_true")
parser.add_argument("-o",
help="Specify file name to output the assertions", type=str)
args = parser.parse_args()
class Test():
def forward(self):
x = np.zeros((1, 1)).astype('float32')
y = F.pad_sequence([x], length=5)
return y
# model, forward_args = gen_MLP_model()
model, forward_args = gen_GoogLeNet_model()
# model, forward_args = gen_AttDot_model()
# model, forward_args = gen_AttLoc_model()
# model, forward_args = gen_BLSTM_model()
# model, forward_args = gen_VGG2L_model()
# model, forward_args = gen_StatelessLSTM_model()
# model, forward_args = gen_Decoder_model()
# model, forward_args = gen_E2E_model()
# model, forward_args = Test(), ()
if args.e:
model.forward(*forward_args)
else:
id2type, id2node = generate_type_inference_results(model, forward_args)
if args.o:
ofile = open(args.o, 'w')
generate_assertion("id2type", id2type, id2node, ofile)
|
[
"chainer_compiler.elichika.typing.type_inference.InferenceEngine",
"numpy.random.seed",
"argparse.ArgumentParser",
"chainer_compiler.elichika.typing.utils.is_expr",
"typing.get_type_hints",
"numpy.zeros",
"inspect.getsource",
"chainer.functions.pad_sequence",
"ast.parse",
"chainer_compiler.elichika.typing.utils.node_description"
] |
[((1137, 1186), 'chainer_compiler.elichika.typing.type_inference.InferenceEngine', 'InferenceEngine', ([], {'is_debug': 'is_debug', 'module': 'module'}), '(is_debug=is_debug, module=module)\n', (1152, 1186), False, 'from chainer_compiler.elichika.typing.type_inference import InferenceEngine\n'), ((3532, 3550), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3546, 3550), True, 'import numpy as np\n'), ((3852, 3877), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3875, 3877), False, 'import argparse\n'), ((2429, 2461), 'inspect.getsource', 'inspect.getsource', (['model.forward'], {}), '(model.forward)\n', (2446, 2461), False, 'import inspect\n'), ((2491, 2506), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (2500, 2506), False, 'import ast, gast\n'), ((2980, 3012), 'inspect.getsource', 'inspect.getsource', (['model.forward'], {}), '(model.forward)\n', (2997, 3012), False, 'import inspect\n'), ((3042, 3057), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (3051, 3057), False, 'import ast, gast\n'), ((1850, 1863), 'chainer_compiler.elichika.typing.utils.is_expr', 'is_expr', (['node'], {}), '(node)\n', (1857, 1863), False, 'from chainer_compiler.elichika.typing.utils import node_description, is_expr\n'), ((2704, 2740), 'typing.get_type_hints', 'typing.get_type_hints', (['model.forward'], {}), '(model.forward)\n', (2725, 2740), False, 'import typing\n'), ((3296, 3332), 'typing.get_type_hints', 'typing.get_type_hints', (['model.forward'], {}), '(model.forward)\n', (3317, 3332), False, 'import typing\n'), ((4204, 4233), 'chainer.functions.pad_sequence', 'F.pad_sequence', (['[x]'], {'length': '(5)'}), '([x], length=5)\n', (4218, 4233), True, 'import chainer.functions as F\n'), ((2061, 2083), 'chainer_compiler.elichika.typing.utils.node_description', 'node_description', (['node'], {}), '(node)\n', (2077, 2083), False, 'from chainer_compiler.elichika.typing.utils import node_description, is_expr\n'), ((4153, 4169), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (4161, 4169), True, 'import numpy as np\n')]
|
#!/bin/python3
#https://docs.python.org/3/howto/sockets.html
import sys
import socket
from datetime import datetime
if len(sys.argv) == 2:
target = socket.gethostbyname(sys.argv[1]) # traslate to ipv4
else:
print("invalid amoount of args")
print("syntax: pyhon3 scanner.py <ip>")
exit()
print("banner")
print(target)
print("time:"+str(datetime.now()))
exit()
HOST = target
PORT = "7777"
try:
for port in range(50,85):
# create an INET, STREAMing socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# now connect to the web server on port 80 - the normal http port
socket.setdefaulttimeout(1)
result = s.connect_ex((HOST, PORT))
if result == 0:
print("port {} is open".format(port))
s.close()
except KeyboardInterrupt:
print("exiting")
sys.exit()
except socket.gairror:
print("exiting")
sys.exit()
except socket.errort:
print("exiting")
sys.exit()
|
[
"socket.socket",
"socket.gethostbyname",
"socket.setdefaulttimeout",
"datetime.datetime.now",
"sys.exit"
] |
[((154, 187), 'socket.gethostbyname', 'socket.gethostbyname', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (174, 187), False, 'import socket\n'), ((498, 547), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (511, 547), False, 'import socket\n'), ((630, 657), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['(1)'], {}), '(1)\n', (654, 657), False, 'import socket\n'), ((845, 855), 'sys.exit', 'sys.exit', ([], {}), '()\n', (853, 855), False, 'import sys\n'), ((904, 914), 'sys.exit', 'sys.exit', ([], {}), '()\n', (912, 914), False, 'import sys\n'), ((962, 972), 'sys.exit', 'sys.exit', ([], {}), '()\n', (970, 972), False, 'import sys\n'), ((354, 368), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (366, 368), False, 'from datetime import datetime\n')]
|
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
##==================================
## Selectors
##==================================
out = dbc.Col([
dbc.Row([
# Class selector (what differentiates the data)
dbc.Col([
html.Div([
html.P('Classes: ')
], className = 'dropdown-label', style = dict(marginRight = 30)
),
html.Div([
dcc.Dropdown(
id='pca-class-select',
options = [],
placeholder = 'Select...',
clearable = True,
)
], className = 'dropdown-axis'
),
], className = 'dropdown-group'
),
], justify = 'center'),
],
id = 'pca-axis-dropdowns',
width = dict(size = 12, offset = 0),
style = dict(
marginTop = 30,
marginLeft = 0,
marginRight = 0,
),
align = 'center',
)
def get():
return out
|
[
"dash_html_components.P",
"dash_core_components.Dropdown"
] |
[((362, 384), 'dash_html_components.P', 'html.P', (['"""Classes: """'], {}), "('Classes: ')\n", (368, 384), True, 'import dash_html_components as html\n'), ((532, 624), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""pca-class-select"""', 'options': '[]', 'placeholder': '"""Select..."""', 'clearable': '(True)'}), "(id='pca-class-select', options=[], placeholder='Select...',\n clearable=True)\n", (544, 624), True, 'import dash_core_components as dcc\n')]
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: <NAME> (<EMAIL>)
from typing import Tuple, List, Dict
from collections import defaultdict
import uuid
from fim.graph.abc_property_graph import ABCPropertyGraph, PropertyGraphQueryException
from fim.graph.resources.abc_cbm import ABCCBMPropertyGraph
from fim.graph.resources.abc_bqm import ABCBQMPropertyGraph
from fim.graph.networkx_property_graph import NetworkXGraphImporter
from fim.graph.resources.networkx_abqm import NetworkXAggregateBQM
from fim.slivers.capacities_labels import Capacities
from fim.slivers.delegations import DelegationFormat
from fim.slivers.network_node import CompositeNodeSliver, NodeType
from fim.slivers.attached_components import ComponentSliver, ComponentType
from fim.slivers.interface_info import InterfaceType
from fim.slivers.network_service import ServiceType
class AggregatedBQMPlugin:
"""
Implement a plugin for simple aggregation of CBM into BQM, transforming site
topologies into CompositeNodes and linking them with (Composite)Links
This is based on fim.pluggable BrokerPluggable
"""
# set to true to test creating ABQM without talking to database or requiring
# an actor reference
DEBUG_FLAG = False
def __init__(self, actor, logger=None):
if not self.DEBUG_FLAG:
assert actor is not None
self.actor = actor
self.logger = logger
@staticmethod
def _remove_none_entries(d):
return {k: v for (k, v) in d.items() if v}
def __occupied_node_capacity(self, *, node_id: str) -> Tuple[Capacities,
Dict[ComponentType, Dict[str, Capacities]]]:
"""
Figure out the total capacity occupied in the network node and return a tuple of
capacities occupied in this node and a dict of component capacities that are occupied
organized by component type and model.
"""
assert node_id is not None
# get existing reservations for this node
existing_reservations = self.actor.get_plugin().get_database().get_reservations(graph_node_id=node_id)
# node capacities
occupied_capacities = Capacities()
occupied_component_capacities = defaultdict(dict)
# Remove allocated capacities to the reservations
if existing_reservations is not None:
for reservation in existing_reservations:
# For Active or Ticketed or Ticketing reservations; compute the counts from available
allocated_sliver = None
if reservation.is_ticketing() and reservation.get_approved_resources() is not None:
allocated_sliver = reservation.get_approved_resources().get_sliver()
if (reservation.is_active() or reservation.is_ticketed()) and \
reservation.get_resources() is not None:
allocated_sliver = reservation.get_resources().get_sliver()
if allocated_sliver is not None:
occupied_capacities = occupied_capacities + allocated_sliver.get_capacities()
if allocated_sliver.attached_components_info is not None:
for allocated_component in allocated_sliver.attached_components_info.devices.values():
rt = allocated_component.resource_type
rm = allocated_component.resource_model
if occupied_component_capacities[rt].get(rm) is None:
occupied_component_capacities[rt][rm] = Capacities()
occupied_component_capacities[rt][rm] = occupied_component_capacities[rt][rm] + \
allocated_component.capacity_allocations
return occupied_capacities, occupied_component_capacities
def plug_produce_bqm(self, *, cbm: ABCCBMPropertyGraph, **kwargs) -> ABCBQMPropertyGraph:
"""
Take a CBM, sort nodes by site, aggregate servers, components and interfaces to
create a site-based advertisement. Use a NetworkX-based implementation.
:param cbm:
:param kwargs:
:return:
"""
if kwargs.get('query_level', None) is None or kwargs['query_level'] != 1:
return cbm.clone_graph(new_graph_id=str(uuid.uuid4()))
# do a one-pass aggregation of servers, their components and interfaces
# this includes facilities
nnodes = cbm.get_all_nodes_by_class(label=ABCPropertyGraph.CLASS_NetworkNode)
slivers_by_site = defaultdict(list)
for n in nnodes:
# build deep slivers for each advertised server, aggregate by site
node_sliver = cbm.build_deep_node_sliver(node_id=n)
slivers_by_site[node_sliver.site].append(node_sliver)
# create a new blank Aggregated BQM NetworkX graph
abqm = NetworkXAggregateBQM(graph_id=str(uuid.uuid4()),
importer=NetworkXGraphImporter(logger=self.logger),
logger=self.logger)
site_to_composite_node_id = dict()
site_to_ns_node_id = dict()
facilities_by_site = defaultdict(list)
for s, ls in slivers_by_site.items():
# add up capacities and delegated capacities, skip labels for now
# count up components and figure out links between site
site_sliver = CompositeNodeSliver()
# count what is taken
site_sliver.capacity_allocations = Capacities()
# count what is available
site_sliver.capacities = Capacities()
site_sliver.resource_name = s
site_sliver.resource_type = NodeType.Server
site_sliver.node_id = str(uuid.uuid4())
# available components organized by [type][model]
site_comps_by_type = defaultdict(dict)
# occupied component capacities organized by [type][model] into lists (by server)
site_allocated_comps_caps_by_type = defaultdict(dict)
loc = None
for sliver in ls:
if sliver.get_type() != NodeType.Server:
# skipping NAS, Facility and dataplane switches
if sliver.get_type() == NodeType.Facility:
# keep track of facilities for each site
facilities_by_site[s].append(sliver)
continue
if self.DEBUG_FLAG:
# for debugging and running in a test environment
allocated_comp_caps = dict()
else:
# query database for everything taken on this node
allocated_caps, allocated_comp_caps = self.__occupied_node_capacity(node_id=sliver.node_id)
site_sliver.capacity_allocations = site_sliver.capacity_allocations + allocated_caps
# get the location if available
if loc is None:
loc = sliver.get_location()
# calculate available node capacities based on delegations
if sliver.get_capacity_delegations() is not None:
# CBM only has one delegation if it has one
_, delegation = sliver.get_capacity_delegations().get_sole_delegation()
# FIXME: skip pool definitions and references for now
if delegation.get_format() == DelegationFormat.SinglePool:
site_sliver.capacities = site_sliver.capacities + \
delegation.get_details()
# merge allocated component capacities
for kt, v in allocated_comp_caps.items():
for km, vcap in v.items():
if site_allocated_comps_caps_by_type[kt].get(km) is None:
site_allocated_comps_caps_by_type[kt][km] = Capacities()
site_allocated_comps_caps_by_type[kt][km] = site_allocated_comps_caps_by_type[kt][km] + \
vcap
# collect available components in lists by type and model for the site (for later aggregation)
if sliver.attached_components_info is None:
continue
for comp in sliver.attached_components_info.list_devices():
rt = comp.resource_type
rm = comp.resource_model
if site_comps_by_type[rt].get(rm) is None:
site_comps_by_type[rt][rm] = list()
site_comps_by_type[rt][rm].append(comp)
# set location to whatever is available
site_sliver.set_location(loc)
site_sliver.set_site(s)
# create a Composite node for every site
site_to_composite_node_id[s] = site_sliver.node_id
site_props = abqm.node_sliver_to_graph_properties_dict(site_sliver)
abqm.add_node(node_id=site_sliver.node_id, label=ABCPropertyGraph.CLASS_CompositeNode,
props=site_props)
# add a network service
ns_id = str(uuid.uuid4())
site_to_ns_node_id[s] = ns_id
ns_props = {ABCPropertyGraph.PROP_NAME: s + '_ns',
ABCPropertyGraph.PROP_TYPE: str(ServiceType.MPLS)}
abqm.add_node(node_id=ns_id, label=ABCPropertyGraph.CLASS_NetworkService, props=ns_props)
abqm.add_link(node_a=site_sliver.node_id, rel=ABCPropertyGraph.REL_HAS, node_b=ns_id)
# create a component sliver for every component type/model pairing
# and add a node for it linking back to site node
for ctype, cdict in site_comps_by_type.items():
for cmodel, comp_list in cdict.items():
comp_sliver = ComponentSliver()
# count what is available
comp_sliver.capacities = Capacities()
# count what is taken (ignore those type/model pairings that were unused)
comp_sliver.capacity_allocations = site_allocated_comps_caps_by_type[ctype].get(cmodel) or \
Capacities()
comp_sliver.set_type(ctype)
comp_sliver.set_model(cmodel)
comp_sliver.set_name(str(ctype) + '-' + cmodel)
for comp in comp_list:
comp_sliver.capacities = comp_sliver.capacities + comp.capacities
comp_node_id = str(uuid.uuid4())
comp_props = abqm.component_sliver_to_graph_properties_dict(comp_sliver)
abqm.add_node(node_id=comp_node_id, label=ABCPropertyGraph.CLASS_Component,
props=comp_props)
abqm.add_link(node_a=site_sliver.node_id, rel=ABCPropertyGraph.REL_HAS,
node_b=comp_node_id)
# get all intersite links - add them to the aggregated BQM graph
intersite_links = cbm.get_intersite_links()
for l in intersite_links:
source_switch = l[0]
sink_switch = l[2]
link = l[1]
source_site = l[3]
sink_site = l[4]
source_cp = l[5]
sink_cp = l[6]
_, cbm_source_cp_props = cbm.get_node_properties(node_id=source_cp)
_, cbm_sink_cp_props = cbm.get_node_properties(node_id=sink_cp)
_, cbm_link_props = cbm.get_node_properties(node_id=link)
# add connection point, link, connection point between two NetworkServices
assert(site_to_ns_node_id.get(source_site) is not None and
site_to_ns_node_id.get(sink_site) is not None)
source_cp_id = str(uuid.uuid4())
sink_cp_id = str(uuid.uuid4())
source_cp_props = {ABCPropertyGraph.PROP_NAME: "_".join([source_site, sink_site]),
ABCPropertyGraph.PROP_TYPE: str(InterfaceType.TrunkPort),
ABCPropertyGraph.PROP_CLASS: ABCPropertyGraph.CLASS_ConnectionPoint,
ABCPropertyGraph.PROP_LABELS: cbm_source_cp_props.get(ABCPropertyGraph.PROP_LABELS),
ABCPropertyGraph.PROP_CAPACITIES: cbm_source_cp_props.get(ABCPropertyGraph.PROP_CAPACITIES)
}
source_cp_props = {k: v for (k, v) in source_cp_props.items() if v}
abqm.add_node(node_id=source_cp_id, label=ABCPropertyGraph.CLASS_ConnectionPoint,
props=source_cp_props)
# FIXME: CP names may not be unique if we are dealing with a multigraph
sink_cp_props = {ABCPropertyGraph.PROP_NAME: "_".join([sink_site, source_site]),
ABCPropertyGraph.PROP_TYPE: str(InterfaceType.TrunkPort),
ABCPropertyGraph.PROP_CLASS: ABCPropertyGraph.CLASS_ConnectionPoint,
ABCPropertyGraph.PROP_LABELS: cbm_sink_cp_props.get(ABCPropertyGraph.PROP_LABELS),
ABCPropertyGraph.PROP_CAPACITIES: cbm_sink_cp_props.get(ABCPropertyGraph.PROP_CAPACITIES)
}
sink_cp_props = {k: v for (k, v) in sink_cp_props.items() if v}
abqm.add_node(node_id=sink_cp_id, label=ABCPropertyGraph.CLASS_ConnectionPoint,
props=sink_cp_props)
# selectively replicate link node and its properties from CBM
new_link_props = {ABCPropertyGraph.PROP_NAME: cbm_link_props[ABCPropertyGraph.PROP_NAME],
ABCPropertyGraph.PROP_TYPE: cbm_link_props[ABCPropertyGraph.PROP_TYPE],
ABCPropertyGraph.PROP_CLASS: cbm_link_props[ABCPropertyGraph.PROP_CLASS],
ABCPropertyGraph.PROP_LAYER: cbm_link_props[ABCPropertyGraph.PROP_LAYER]
}
abqm.add_node(node_id=link, label=ABCPropertyGraph.CLASS_Link, props=new_link_props)
# connect them together
abqm.add_link(node_a=site_to_ns_node_id[source_site], rel=ABCPropertyGraph.REL_CONNECTS,
node_b=source_cp_id)
abqm.add_link(node_a=source_cp_id, rel=ABCPropertyGraph.REL_CONNECTS,
node_b=link)
abqm.add_link(node_a=link, rel=ABCPropertyGraph.REL_CONNECTS,
node_b=sink_cp_id)
abqm.add_link(node_a=sink_cp_id, rel=ABCPropertyGraph.REL_CONNECTS,
node_b=site_to_ns_node_id[sink_site])
# link facilities to their sites
for s, lf in facilities_by_site.items():
# multiple facilities per site possible
for fac_sliver in lf:
fac_nbs = cbm.get_first_and_second_neighbor(node_id=fac_sliver.node_id,
rel1=ABCPropertyGraph.REL_HAS,
node1_label=ABCPropertyGraph.CLASS_NetworkService,
rel2=ABCPropertyGraph.REL_CONNECTS,
node2_label=ABCPropertyGraph.CLASS_ConnectionPoint)
try:
fac_ns_node_id = fac_nbs[0][0]
fac_cp_node_id = fac_nbs[0][1]
except KeyError:
if self.logger:
self.logger.warning(f'Unable to trace facility ConnectionPoint for '
f'facility {fac_sliver.resource_name}, continuing')
else:
print(f'Unable to trace facility ConnectionPoint for '
f'facility {fac_sliver.resource_name}, continuing')
continue
_, fac_props = cbm.get_node_properties(node_id=fac_sliver.node_id)
_, fac_ns_props = cbm.get_node_properties(node_id=fac_ns_node_id)
_, fac_cp_props = cbm.get_node_properties(node_id=fac_cp_node_id)
# filter down only the needed properties then recreate the structure of facility in ABQM
new_fac_props = {ABCPropertyGraph.PROP_NAME: fac_props[ABCPropertyGraph.PROP_NAME],
ABCPropertyGraph.PROP_TYPE: fac_props[ABCPropertyGraph.PROP_TYPE]
}
abqm.add_node(node_id=fac_sliver.node_id, label=ABCPropertyGraph.CLASS_NetworkNode,
props=new_fac_props)
new_ns_props = {ABCPropertyGraph.PROP_NAME: fac_ns_props[ABCPropertyGraph.PROP_NAME],
ABCPropertyGraph.PROP_TYPE: fac_ns_props[ABCPropertyGraph.PROP_TYPE]
}
abqm.add_node(node_id=fac_ns_node_id, label=ABCPropertyGraph.CLASS_NetworkService,
props=new_ns_props)
new_cp_props = {ABCPropertyGraph.PROP_NAME: fac_cp_props[ABCPropertyGraph.PROP_NAME],
ABCPropertyGraph.PROP_TYPE: fac_cp_props[ABCPropertyGraph.PROP_TYPE],
ABCPropertyGraph.PROP_LABELS: fac_cp_props.get(ABCPropertyGraph.PROP_LABELS),
ABCPropertyGraph.PROP_CAPACITIES: fac_cp_props.get(ABCPropertyGraph.PROP_CAPACITIES)
}
new_cp_props = {k: v for (k, v) in new_cp_props.items() if v}
abqm.add_node(node_id=fac_cp_node_id, label=ABCPropertyGraph.CLASS_ConnectionPoint,
props=new_cp_props)
abqm.add_link(node_a=fac_sliver.node_id, rel=ABCPropertyGraph.REL_HAS, node_b=fac_ns_node_id)
abqm.add_link(node_a=fac_ns_node_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_cp_node_id)
# trace the link to a switch port/ConnectionPoint and replicate them for simplicity
fac_cp_nbs = cbm.get_first_and_second_neighbor(node_id=fac_cp_node_id,
rel1=ABCPropertyGraph.REL_CONNECTS,
node1_label=ABCPropertyGraph.CLASS_Link,
rel2=ABCPropertyGraph.REL_CONNECTS,
node2_label=ABCPropertyGraph.CLASS_ConnectionPoint)
if len(fac_cp_nbs) == 0 or len(fac_cp_nbs) > 1:
if self.logger:
self.logger.warning(f'Unable to trace switch port from Facility port '
f'for facility {fac_sliver.resource_name} {fac_cp_nbs}')
else:
print(f'Unable to trace switch port from Facility port '
f'for facility {fac_sliver.resource_name} {fac_cp_nbs}')
continue
fac_link_id = fac_cp_nbs[0][0]
fac_sp_id = fac_cp_nbs[0][1]
_, fac_link_props = cbm.get_node_properties(node_id=fac_link_id)
# selectively replicate link properties
new_link_props = {ABCPropertyGraph.PROP_NAME: fac_link_props[ABCPropertyGraph.PROP_NAME],
ABCPropertyGraph.PROP_TYPE: fac_link_props[ABCPropertyGraph.PROP_TYPE],
ABCPropertyGraph.PROP_LAYER: fac_link_props[ABCPropertyGraph.PROP_LAYER]
}
abqm.add_node(node_id=fac_link_id, label=ABCPropertyGraph.CLASS_Link,
props=new_link_props)
try:
abqm.get_node_properties(node_id=fac_sp_id)
except PropertyGraphQueryException:
# if the node doesn't exist we need to create it (it could have been created in the first pass)
_, fac_sp_props = cbm.get_node_properties(node_id=fac_sp_id)
new_sp_props = {ABCPropertyGraph.PROP_NAME: fac_sp_props[ABCPropertyGraph.PROP_NAME],
ABCPropertyGraph.PROP_TYPE: fac_sp_props[ABCPropertyGraph.PROP_TYPE],
ABCPropertyGraph.PROP_CAPACITIES: fac_sp_props.get(
ABCPropertyGraph.PROP_CAPACITIES),
ABCPropertyGraph.PROP_LABELS: fac_sp_props.get(ABCPropertyGraph.PROP_LABELS)
}
new_sp_props = {k: v for (k, v) in new_sp_props.items() if v}
abqm.add_node(node_id=fac_sp_id, label=ABCPropertyGraph.CLASS_ConnectionPoint,
props=new_sp_props)
# link these together
abqm.add_link(node_a=fac_cp_node_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_link_id)
abqm.add_link(node_a=fac_link_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_sp_id)
abqm.add_link(node_a=fac_sp_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=site_to_ns_node_id[s])
return abqm
|
[
"fim.slivers.attached_components.ComponentSliver",
"uuid.uuid4",
"fim.slivers.network_node.CompositeNodeSliver",
"collections.defaultdict",
"fim.graph.networkx_property_graph.NetworkXGraphImporter",
"fim.slivers.capacities_labels.Capacities"
] |
[((3293, 3305), 'fim.slivers.capacities_labels.Capacities', 'Capacities', ([], {}), '()\n', (3303, 3305), False, 'from fim.slivers.capacities_labels import Capacities\n'), ((3346, 3363), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3357, 3363), False, 'from collections import defaultdict\n'), ((5720, 5737), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5731, 5737), False, 'from collections import defaultdict\n'), ((6349, 6366), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6360, 6366), False, 'from collections import defaultdict\n'), ((6586, 6607), 'fim.slivers.network_node.CompositeNodeSliver', 'CompositeNodeSliver', ([], {}), '()\n', (6605, 6607), False, 'from fim.slivers.network_node import CompositeNodeSliver, NodeType\n'), ((6689, 6701), 'fim.slivers.capacities_labels.Capacities', 'Capacities', ([], {}), '()\n', (6699, 6701), False, 'from fim.slivers.capacities_labels import Capacities\n'), ((6777, 6789), 'fim.slivers.capacities_labels.Capacities', 'Capacities', ([], {}), '()\n', (6787, 6789), False, 'from fim.slivers.capacities_labels import Capacities\n'), ((7035, 7052), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (7046, 7052), False, 'from collections import defaultdict\n'), ((7195, 7212), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (7206, 7212), False, 'from collections import defaultdict\n'), ((6141, 6182), 'fim.graph.networkx_property_graph.NetworkXGraphImporter', 'NetworkXGraphImporter', ([], {'logger': 'self.logger'}), '(logger=self.logger)\n', (6162, 6182), False, 'from fim.graph.networkx_property_graph import NetworkXGraphImporter\n'), ((6926, 6938), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6936, 6938), False, 'import uuid\n'), ((10379, 10391), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10389, 10391), False, 'import uuid\n'), ((13047, 13059), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13057, 13059), False, 'import uuid\n'), ((13090, 13102), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13100, 13102), False, 'import uuid\n'), ((6081, 6093), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6091, 6093), False, 'import uuid\n'), ((11065, 11082), 'fim.slivers.attached_components.ComponentSliver', 'ComponentSliver', ([], {}), '()\n', (11080, 11082), False, 'from fim.slivers.attached_components import ComponentSliver, ComponentType\n'), ((11174, 11186), 'fim.slivers.capacities_labels.Capacities', 'Capacities', ([], {}), '()\n', (11184, 11186), False, 'from fim.slivers.capacities_labels import Capacities\n'), ((5477, 5489), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5487, 5489), False, 'import uuid\n'), ((11449, 11461), 'fim.slivers.capacities_labels.Capacities', 'Capacities', ([], {}), '()\n', (11459, 11461), False, 'from fim.slivers.capacities_labels import Capacities\n'), ((11800, 11812), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11810, 11812), False, 'import uuid\n'), ((9099, 9111), 'fim.slivers.capacities_labels.Capacities', 'Capacities', ([], {}), '()\n', (9109, 9111), False, 'from fim.slivers.capacities_labels import Capacities\n'), ((4706, 4718), 'fim.slivers.capacities_labels.Capacities', 'Capacities', ([], {}), '()\n', (4716, 4718), False, 'from fim.slivers.capacities_labels import Capacities\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2015 10X Genomics, Inc. All rights reserved.
#
import cPickle
from collections import defaultdict
from itertools import izip
import json
import numpy as np
import cellranger.constants as cr_constants
import cellranger.library_constants as lib_constants
from cellranger.molecule_counter import MoleculeCounter
import cellranger.rna.library as rna_library
import cellranger.utils as cr_utils
import tenkit.safe_json as tk_safe_json
import tenkit.stats as tk_stats
__MRO__ = """
stage SUBSAMPLE_READS(
in h5 molecule_info,
in csv filtered_barcodes,
out json summary,
src py "stages/counter/subsample_reads",
) split using (
in int chunk_start,
in int chunk_len,
in map[] subsample_info,
out pickle metrics,
)
"""
def get_cell_associated_barcodes(genomes, filtered_barcodes_csv):
""" Get cell-associated barcodes by genome.
Args:
genomes (list of str): Genome names.
filtered_barcodes_csv (str): Path to CSV file.
Returns:
dict of (str, set): Map genome to list of cell-assoc barcodes. Empty-string key is for all genomes."""
cell_bcs = {}
for genome in genomes:
# Get all cell-assoc barcodes (ignoring genome) for the "" (blank) genome string
cell_bcs[genome] = cr_utils.get_cell_associated_barcode_set(filtered_barcodes_csv,
genome)
# All cell-associated barcodes
cell_bcs[''] = reduce(lambda x,y: x | y, cell_bcs.itervalues(), set())
return cell_bcs
def split(args):
# Get required info from the mol info
mc = MoleculeCounter.open(args.molecule_info, 'r')
genomes = sorted(set(f.tags.get('genome', '') for f in mc.feature_reference.feature_defs))
cell_bcs_by_genome = get_cell_associated_barcodes(genomes, args.filtered_barcodes)
# Get cell counts per gem group
n_cells_per_gg = defaultdict(int)
for bc in cell_bcs_by_genome['']:
_, gem_group = cr_utils.split_barcode_seq(bc)
n_cells_per_gg[gem_group] += 1
# Assign gem group cell counts to their constituent libraries
# TODO FIXME: Need to allow for per-library cell counts
# because some feature types might only have a subset of the GEX cell-assoc barcodes.
n_cells_per_lib = np.zeros(len(mc.library_info), dtype=int)
for lib_idx, lib in enumerate(mc.library_info):
n_cells_per_lib[lib_idx] = n_cells_per_gg[lib['gem_group']]
if n_cells_per_lib.sum() == 0:
return {'chunks': []}
library_info = mc.library_info
raw_count_per_lib = np.array(mc.get_raw_read_pairs_per_library())
raw_rppc_per_lib = raw_count_per_lib.astype(float) / n_cells_per_lib
usable_count_per_lib = np.array(mc.get_usable_read_pairs_per_library())
subsamplings = list() # track subsample info definitions
library_types = sorted(set(lib['library_type'] for lib in library_info))
for library_type in library_types:
# All libraries w/ this type
lib_indexes = np.array([i for i,lib in enumerate(library_info) if lib['library_type'] == library_type])
# For plotting, we want a series of target depths that exist for all
# libraries w/ the same library type. When there's a single library
# per type (the common case), this is trivial - split it into deciles.
# But if there are multiple libraries with different depths, (e.g.,
# because gem-group-aggregation was used to increase cell numbers),
# we need to find depths that are achievable for all libraries.
# For now, let the lowest-depth library for a given type dictate this.
min_raw_rppc = np.min(raw_rppc_per_lib[lib_indexes])
# Use deciles of the raw read pairs per cell.
deciles = np.arange(0.1, 1.1, 0.1)
plot_targets = map(round, min_raw_rppc * deciles)
# TODO: separate this work (internal + non)
raw_targets = cr_constants.SUBSAMPLE_READS_PER_CELL + \
plot_targets
# TODO: separate this work (internal + non)
usable_targets = cr_constants.SUBSAMPLE_READS_PER_CELL + \
plot_targets
for targets, depth_type in \
((raw_targets, cr_constants.RAW_SUBSAMPLE_TYPE), \
((usable_targets, cr_constants.MAPPED_SUBSAMPLE_TYPE)),):
targets = sorted(list(set(map(int, targets))))
for target_rppc in targets:
if depth_type == cr_constants.RAW_SUBSAMPLE_TYPE:
# Infer the usable depth required to achieve this raw depth
usable_read_fracs = usable_count_per_lib.astype(float) / raw_count_per_lib
target_usable_counts = target_rppc * n_cells_per_lib * usable_read_fracs
else:
target_usable_counts = target_rppc * n_cells_per_lib
# Zero out libraries of the other types
rates = np.zeros(len(library_info), dtype=float)
rates[lib_indexes] = target_usable_counts[lib_indexes].astype(float) \
/ usable_count_per_lib[lib_indexes]
# Clamp rates that are close to 1 to 1
rates[np.absolute(rates - 1) < 1e-3] = 1
# Zero out the libraries for which we have fewer reads than the target
rates[rates > 1] = 0.0
enough_data = np.any((rates > 0) & (rates <= 1))
if not enough_data:
rates = np.zeros(len(rates))
subsamplings.append({
'library_type': library_type,
'subsample_type': depth_type,
'target_read_pairs_per_cell': int(target_rppc),
'library_subsample_rates': list(map(float, rates)),
})
# Each chunk needs to store a piece of the mol info h5
tgt_chunk_len = cr_constants.NUM_MOLECULE_INFO_ENTRIES_PER_CHUNK
# Split the molecule info h5 into equi-RAM chunks
chunks = []
for chunk_start, chunk_len in mc.get_chunks(tgt_chunk_len, preserve_boundaries=True):
chunks.append({
'chunk_start': chunk_start,
'chunk_len': chunk_len,
'subsample_info': subsamplings,
# The estimate_mem_gb only count the memory usage for the MoleculeCounter object, which is
# under-estimated the actual memory usage.
# Based on memory profiling with test case fuzzer_114, actual memory usageis ~4x more
# than estimate_mem_gb (without cap), here set scale = 6.
'__mem_gb': MoleculeCounter.estimate_mem_gb(chunk_len, scale=6),
})
join = {
'__mem_gb': 6,
}
mc.close()
# TODO: is this really necessary w/ martian 3
if len(chunks) == 0:
chunks.append({
'chunk_start': str(0),
'chunk_len': str(0),
'subsample_info': [],
})
return {'chunks': chunks, 'join': join}
def main(args, outs):
np.random.seed(0)
mc = MoleculeCounter.open(args.molecule_info, 'r')
# Get cell-associated barcodes
genomes = sorted(set(f.tags.get('genome', '') for f in mc.feature_reference.feature_defs))
cell_bcs_by_genome = get_cell_associated_barcodes(genomes, args.filtered_barcodes)
# Load chunk of relevant data from the mol_info
chunk = slice(int(args.chunk_start), int(args.chunk_start) + int(args.chunk_len))
mol_library_idx = mc.get_column_lazy('library_idx')[chunk]
mol_read_pairs = mc.get_column_lazy('count')[chunk]
mol_gem_group = mc.get_column_lazy('gem_group')[chunk]
mol_barcode_idx = mc.get_column_lazy('barcode_idx')[chunk]
mol_feature_idx = mc.get_column_lazy('feature_idx')[chunk]
barcodes = mc.get_ref_column('barcodes')
# Give each cell-associated barcode an integer index
cell_bcs = sorted(list(cell_bcs_by_genome['']))
cell_bc_to_int = {bc: i for i, bc in enumerate(cell_bcs)}
# Give each genome an integer index
genome_to_int = {g: i for i, g in enumerate(genomes)}
feature_int_to_genome_int = np.fromiter((genome_to_int[f.tags.get('genome', '')] for f in mc.feature_reference.feature_defs),
dtype=int)
mol_genome_idx = feature_int_to_genome_int[mol_feature_idx]
# determine which (library type, genome) pairs have any associated reads
lib_types = sorted(set(lib['library_type'] for lib in mc.library_info))
lib_type_to_int = {l: i for i, l in enumerate(lib_types)}
lib_idx_to_lib_type_idx = np.fromiter((lib_type_to_int[lib['library_type']] for lib in mc.library_info),
dtype=np.int)
lib_type_genome_any_reads = np.zeros((len(lib_types), len(genomes)), dtype=np.bool)
lib_genome_idx_pairs = set(izip(mol_library_idx[mol_read_pairs > 0],
mol_genome_idx[mol_read_pairs > 0]))
for (lib_idx, genome_idx) in lib_genome_idx_pairs:
lib_type_idx = lib_idx_to_lib_type_idx[lib_idx]
lib_type_genome_any_reads[lib_type_idx, genome_idx] = True
# Run each subsampling task on this chunk of data
n_tasks = len(args.subsample_info)
n_genomes = len(genomes)
n_cells = len(cell_bcs)
umis_per_bc = np.zeros((n_tasks, n_genomes, n_cells))
features_det_per_bc = np.zeros((n_tasks, n_genomes, n_cells))
read_pairs_per_task = np.zeros((n_tasks, n_genomes))
umis_per_task = np.zeros((n_tasks, n_genomes))
for task_idx, task in enumerate(args.subsample_info):
# Per-library subsampling rates
rates_per_library = np.array(task['library_subsample_rates'], dtype=float)
if np.count_nonzero(rates_per_library) == 0:
continue
mol_rate = rates_per_library[mol_library_idx]
# Subsampled read pairs per molecule
new_read_pairs = np.random.binomial(mol_read_pairs, mol_rate)
# Compute tallies for each barcode
group_keys = (mol_gem_group, mol_barcode_idx)
group_values = (mol_feature_idx, mol_genome_idx, new_read_pairs)
for (gg, bc_idx), (feature_idx, genome_idx, read_pairs) in \
cr_utils.numpy_groupby(group_values, group_keys):
barcode = cr_utils.format_barcode_seq(barcodes[bc_idx], gg)
cell_idx = cell_bc_to_int.get(barcode)
for this_genome_idx in xrange(len(genomes)):
umis = np.flatnonzero((read_pairs > 0) & (genome_idx == this_genome_idx))
this_genome_read_pairs = np.sum(read_pairs[genome_idx == this_genome_idx])
# Tally UMIs and median features detected
if barcode in cell_bcs_by_genome[genomes[this_genome_idx]]:
# This is a cell-associated barcode for this genome
umis_per_bc[task_idx, this_genome_idx, cell_idx] = len(umis)
features_det_per_bc[task_idx, this_genome_idx, cell_idx] = np.count_nonzero(np.bincount(feature_idx[umis]))
# Tally numbers for duplicate fraction
read_pairs_per_task[task_idx, this_genome_idx] += np.sum(this_genome_read_pairs)
umis_per_task[task_idx, this_genome_idx] += len(umis)
with open(outs.metrics, 'w') as f:
data = {
'umis_per_bc': umis_per_bc,
'features_det_per_bc': features_det_per_bc,
'read_pairs': read_pairs_per_task,
'umis': umis_per_task,
'lib_type_genome_any_reads': lib_type_genome_any_reads,
}
cPickle.dump(data, f, protocol = cPickle.HIGHEST_PROTOCOL)
def make_metric_name(name, library_type, genome, ss_type, ss_depth):
lt_prefix = rna_library.get_library_type_metric_prefix(library_type)
return '%s%s_%s_%s_%s' % (lt_prefix, genome, ss_type, ss_depth, name)
def compute_dup_frac(read_pairs, umis):
return tk_stats.robust_divide(read_pairs - umis, read_pairs) if read_pairs > 0 else 0.0
def join(args, outs, chunk_defs, chunk_outs):
# Merge tallies
data = None
for chunk in chunk_outs:
with open(chunk.metrics) as f:
chunk_data = cPickle.load(f)
if data is None:
data = chunk_data
else:
for k,v in data.iteritems():
data[k] += chunk_data[k]
# Compute metrics for each subsampling rate
summary = {}
with MoleculeCounter.open(args.molecule_info, 'r') as mc:
genomes = sorted(set(f.tags.get('genome', '') for f in mc.feature_reference.feature_defs))
lib_types = sorted(set(lib['library_type'] for lib in mc.library_info))
lib_type_map = dict((lt, idx) for (idx, lt) in enumerate(lib_types))
cell_bcs_by_genome = get_cell_associated_barcodes(genomes, args.filtered_barcodes)
# Give each cell-associated barcode an integer index
cell_bcs = sorted(list(cell_bcs_by_genome['']))
cell_bc_to_int = {bc: i for i, bc in enumerate(cell_bcs)}
subsample_info = chunk_defs[0].subsample_info if len(chunk_defs) > 0 else []
for i, task in enumerate(subsample_info):
lib_type = task['library_type']
lib_type_idx = lib_type_map[lib_type]
ss_type = task['subsample_type']
ss_depth = task['target_read_pairs_per_cell']
if rna_library.has_genomes(lib_type):
genome_ints = list(range(data['umis_per_bc'].shape[1]))
else:
genome_ints = [0]
# Per-genome metrics
for g in genome_ints:
if not data['lib_type_genome_any_reads'][lib_type_idx, g]:
continue
genome = genomes[g]
# Only compute on cell-associated barcodes for this genome.
# This only matters when there are multiple genomes present.
cell_inds = np.array(sorted(cell_bc_to_int[bc] for bc in cell_bcs_by_genome[genome]))
median_umis_per_cell = np.median(data['umis_per_bc'][i,g,cell_inds])
summary[make_metric_name('subsampled_filtered_bcs_median_counts',
lib_type, genome, ss_type, ss_depth)] = median_umis_per_cell
median_features_per_cell = np.median(data['features_det_per_bc'][i,g,cell_inds])
summary[make_metric_name('subsampled_filtered_bcs_median_unique_genes_detected',
lib_type, genome, ss_type, ss_depth)] = median_features_per_cell
dup_frac = compute_dup_frac(data['read_pairs'][i,g], data['umis'][i,g])
summary[make_metric_name('subsampled_duplication_frac',
lib_type, genome, ss_type, ss_depth)] = dup_frac
# Whole-dataset duplication frac
all_read_pairs = np.sum(data['read_pairs'][i,:])
all_umis = np.sum(data['umis'][i,:])
dup_frac = compute_dup_frac(all_read_pairs, all_umis)
summary[make_metric_name('subsampled_duplication_frac',
lib_type, lib_constants.MULTI_REFS_PREFIX, ss_type, ss_depth)] = dup_frac
with open(outs.summary, 'w') as f:
json.dump(tk_safe_json.json_sanitize(summary), f, indent=4, sort_keys=True)
|
[
"numpy.absolute",
"numpy.random.seed",
"numpy.sum",
"cPickle.load",
"collections.defaultdict",
"cellranger.molecule_counter.MoleculeCounter.estimate_mem_gb",
"numpy.arange",
"numpy.fromiter",
"cellranger.molecule_counter.MoleculeCounter.open",
"cellranger.rna.library.get_library_type_metric_prefix",
"cellranger.utils.numpy_groupby",
"numpy.bincount",
"tenkit.stats.robust_divide",
"cellranger.utils.format_barcode_seq",
"numpy.random.binomial",
"numpy.median",
"cellranger.utils.get_cell_associated_barcode_set",
"numpy.min",
"tenkit.safe_json.json_sanitize",
"numpy.count_nonzero",
"cellranger.utils.split_barcode_seq",
"numpy.flatnonzero",
"numpy.zeros",
"numpy.any",
"cPickle.dump",
"cellranger.rna.library.has_genomes",
"numpy.array",
"itertools.izip"
] |
[((1651, 1696), 'cellranger.molecule_counter.MoleculeCounter.open', 'MoleculeCounter.open', (['args.molecule_info', '"""r"""'], {}), "(args.molecule_info, 'r')\n", (1671, 1696), False, 'from cellranger.molecule_counter import MoleculeCounter\n'), ((1938, 1954), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1949, 1954), False, 'from collections import defaultdict\n'), ((7077, 7094), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7091, 7094), True, 'import numpy as np\n'), ((7105, 7150), 'cellranger.molecule_counter.MoleculeCounter.open', 'MoleculeCounter.open', (['args.molecule_info', '"""r"""'], {}), "(args.molecule_info, 'r')\n", (7125, 7150), False, 'from cellranger.molecule_counter import MoleculeCounter\n'), ((8627, 8724), 'numpy.fromiter', 'np.fromiter', (["(lib_type_to_int[lib['library_type']] for lib in mc.library_info)"], {'dtype': 'np.int'}), "((lib_type_to_int[lib['library_type']] for lib in mc.\n library_info), dtype=np.int)\n", (8638, 8724), True, 'import numpy as np\n'), ((9346, 9385), 'numpy.zeros', 'np.zeros', (['(n_tasks, n_genomes, n_cells)'], {}), '((n_tasks, n_genomes, n_cells))\n', (9354, 9385), True, 'import numpy as np\n'), ((9412, 9451), 'numpy.zeros', 'np.zeros', (['(n_tasks, n_genomes, n_cells)'], {}), '((n_tasks, n_genomes, n_cells))\n', (9420, 9451), True, 'import numpy as np\n'), ((9478, 9508), 'numpy.zeros', 'np.zeros', (['(n_tasks, n_genomes)'], {}), '((n_tasks, n_genomes))\n', (9486, 9508), True, 'import numpy as np\n'), ((9529, 9559), 'numpy.zeros', 'np.zeros', (['(n_tasks, n_genomes)'], {}), '((n_tasks, n_genomes))\n', (9537, 9559), True, 'import numpy as np\n'), ((11760, 11816), 'cellranger.rna.library.get_library_type_metric_prefix', 'rna_library.get_library_type_metric_prefix', (['library_type'], {}), '(library_type)\n', (11802, 11816), True, 'import cellranger.rna.library as rna_library\n'), ((1312, 1383), 'cellranger.utils.get_cell_associated_barcode_set', 'cr_utils.get_cell_associated_barcode_set', (['filtered_barcodes_csv', 'genome'], {}), '(filtered_barcodes_csv, genome)\n', (1352, 1383), True, 'import cellranger.utils as cr_utils\n'), ((2016, 2046), 'cellranger.utils.split_barcode_seq', 'cr_utils.split_barcode_seq', (['bc'], {}), '(bc)\n', (2042, 2046), True, 'import cellranger.utils as cr_utils\n'), ((3710, 3747), 'numpy.min', 'np.min', (['raw_rppc_per_lib[lib_indexes]'], {}), '(raw_rppc_per_lib[lib_indexes])\n', (3716, 3747), True, 'import numpy as np\n'), ((3821, 3845), 'numpy.arange', 'np.arange', (['(0.1)', '(1.1)', '(0.1)'], {}), '(0.1, 1.1, 0.1)\n', (3830, 3845), True, 'import numpy as np\n'), ((8882, 8959), 'itertools.izip', 'izip', (['mol_library_idx[mol_read_pairs > 0]', 'mol_genome_idx[mol_read_pairs > 0]'], {}), '(mol_library_idx[mol_read_pairs > 0], mol_genome_idx[mol_read_pairs > 0])\n', (8886, 8959), False, 'from itertools import izip\n'), ((9687, 9741), 'numpy.array', 'np.array', (["task['library_subsample_rates']"], {'dtype': 'float'}), "(task['library_subsample_rates'], dtype=float)\n", (9695, 9741), True, 'import numpy as np\n'), ((9943, 9987), 'numpy.random.binomial', 'np.random.binomial', (['mol_read_pairs', 'mol_rate'], {}), '(mol_read_pairs, mol_rate)\n', (9961, 9987), True, 'import numpy as np\n'), ((10240, 10288), 'cellranger.utils.numpy_groupby', 'cr_utils.numpy_groupby', (['group_values', 'group_keys'], {}), '(group_values, group_keys)\n', (10262, 10288), True, 'import cellranger.utils as cr_utils\n'), ((11614, 11670), 'cPickle.dump', 'cPickle.dump', (['data', 'f'], {'protocol': 'cPickle.HIGHEST_PROTOCOL'}), '(data, f, protocol=cPickle.HIGHEST_PROTOCOL)\n', (11626, 11670), False, 'import cPickle\n'), ((11943, 11996), 'tenkit.stats.robust_divide', 'tk_stats.robust_divide', (['(read_pairs - umis)', 'read_pairs'], {}), '(read_pairs - umis, read_pairs)\n', (11965, 11996), True, 'import tenkit.stats as tk_stats\n'), ((12443, 12488), 'cellranger.molecule_counter.MoleculeCounter.open', 'MoleculeCounter.open', (['args.molecule_info', '"""r"""'], {}), "(args.molecule_info, 'r')\n", (12463, 12488), False, 'from cellranger.molecule_counter import MoleculeCounter\n'), ((13333, 13366), 'cellranger.rna.library.has_genomes', 'rna_library.has_genomes', (['lib_type'], {}), '(lib_type)\n', (13356, 13366), True, 'import cellranger.rna.library as rna_library\n'), ((14766, 14798), 'numpy.sum', 'np.sum', (["data['read_pairs'][i, :]"], {}), "(data['read_pairs'][i, :])\n", (14772, 14798), True, 'import numpy as np\n'), ((14817, 14843), 'numpy.sum', 'np.sum', (["data['umis'][i, :]"], {}), "(data['umis'][i, :])\n", (14823, 14843), True, 'import numpy as np\n'), ((9754, 9789), 'numpy.count_nonzero', 'np.count_nonzero', (['rates_per_library'], {}), '(rates_per_library)\n', (9770, 9789), True, 'import numpy as np\n'), ((10313, 10362), 'cellranger.utils.format_barcode_seq', 'cr_utils.format_barcode_seq', (['barcodes[bc_idx]', 'gg'], {}), '(barcodes[bc_idx], gg)\n', (10340, 10362), True, 'import cellranger.utils as cr_utils\n'), ((12200, 12215), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (12212, 12215), False, 'import cPickle\n'), ((13948, 13995), 'numpy.median', 'np.median', (["data['umis_per_bc'][i, g, cell_inds]"], {}), "(data['umis_per_bc'][i, g, cell_inds])\n", (13957, 13995), True, 'import numpy as np\n'), ((14210, 14265), 'numpy.median', 'np.median', (["data['features_det_per_bc'][i, g, cell_inds]"], {}), "(data['features_det_per_bc'][i, g, cell_inds])\n", (14219, 14265), True, 'import numpy as np\n'), ((15135, 15170), 'tenkit.safe_json.json_sanitize', 'tk_safe_json.json_sanitize', (['summary'], {}), '(summary)\n', (15161, 15170), True, 'import tenkit.safe_json as tk_safe_json\n'), ((5467, 5501), 'numpy.any', 'np.any', (['((rates > 0) & (rates <= 1))'], {}), '((rates > 0) & (rates <= 1))\n', (5473, 5501), True, 'import numpy as np\n'), ((6669, 6720), 'cellranger.molecule_counter.MoleculeCounter.estimate_mem_gb', 'MoleculeCounter.estimate_mem_gb', (['chunk_len'], {'scale': '(6)'}), '(chunk_len, scale=6)\n', (6700, 6720), False, 'from cellranger.molecule_counter import MoleculeCounter\n'), ((10496, 10562), 'numpy.flatnonzero', 'np.flatnonzero', (['((read_pairs > 0) & (genome_idx == this_genome_idx))'], {}), '((read_pairs > 0) & (genome_idx == this_genome_idx))\n', (10510, 10562), True, 'import numpy as np\n'), ((10604, 10653), 'numpy.sum', 'np.sum', (['read_pairs[genome_idx == this_genome_idx]'], {}), '(read_pairs[genome_idx == this_genome_idx])\n', (10610, 10653), True, 'import numpy as np\n'), ((11192, 11222), 'numpy.sum', 'np.sum', (['this_genome_read_pairs'], {}), '(this_genome_read_pairs)\n', (11198, 11222), True, 'import numpy as np\n'), ((5274, 5296), 'numpy.absolute', 'np.absolute', (['(rates - 1)'], {}), '(rates - 1)\n', (5285, 5296), True, 'import numpy as np\n'), ((11038, 11068), 'numpy.bincount', 'np.bincount', (['feature_idx[umis]'], {}), '(feature_idx[umis])\n', (11049, 11068), True, 'import numpy as np\n')]
|
import contextlib
import logging
import requests
from os.path import join, isfile
from django.apps import apps
from django.conf import settings
from django.db.models import Q
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.translation import get_language
import bs4
from apps.summary.renderers.approaches_2015 import Approaches2015FullSummaryRenderer
from apps.summary.renderers.technologies_2015 import \
Technology2015FullSummaryRenderer
from apps.summary.renderers.technologies_2018 import \
Technology2018FullSummaryRenderer
from wkhtmltopdf.views import PDFTemplateView, PDFTemplateResponse
from apps.questionnaire.models import Questionnaire
from apps.questionnaire.utils import get_query_status_filter, \
get_questionnaire_data_in_single_language
logger = logging.getLogger(__name__)
class CachedPDFTemplateResponse(PDFTemplateResponse):
"""
Creating the pdf includes two resource-heavy processes:
- extracting the json to markup (frontend)
- call to wkhtmltopdf (backend)
Therefore, the content is created only once per filename (which should
distinguish between new questionnaire edits). This only works with
reasonably precise file names!
"""
@property
def file_path(self):
return join(settings.SUMMARY_PDF_PATH, self.filename)
def get_rendered_content(self):
return super().rendered_content
def content_with_file_cache(self):
if isfile(self.file_path):
# Catch any exception, worst case is that the pdf is created from
# scratch again
with contextlib.suppress(Exception) as e:
return open(self.file_path, 'rb').read()
content = self.get_rendered_content()
with contextlib.suppress(Exception) as e:
open(self.file_path, 'wb').write(content)
return content
@property
def rendered_content(self):
if settings.DEBUG:
return self.get_rendered_content()
else:
return self.content_with_file_cache()
class RawTemplateResponse(TemplateResponse):
"""
Create HTML with the default template response, cast the markup to a table.
"""
def rows_to_tr(self):
"""
Prepend a row with 12 elements, forcing 'proper' width of following rows
"""
for row in self.soup.select('.row'):
table = self.soup.new_tag('table')
table.attrs['width'] = '100%'
row.wrap(table)
grid_12_columns = '<tr>'
for i in range(0, 12):
grid_12_columns += '<td width="8.3%"></td>'
grid_12_columns += '</tr>'
row.insert_before(bs4.BeautifulSoup(grid_12_columns, 'lxml'))
if 'range' not in row.attrs['class']:
row.unwrap()
def columns_to_td(self):
"""
Use columns-width as colspan.
"""
for column in self.soup.select('.columns'):
column.name = 'td'
for i, class_name in enumerate(column.attrs['class']):
if class_name.startswith('small'):
# Use number of grid rows for colspan
column.attrs['colspan'] = class_name[6:]
del column.attrs['class'][i]
def highlight_list_to_bold(self):
"""
CSS highlights can not be seen in word, make them bold.
"""
for highlight in self.soup.select('.highlights_list > .true'):
highlight.wrap(self.soup.new_tag('strong'))
def header_image_to_foreground(self):
"""
Copy the background-image to the front, so it is copied automatically.
"""
header = self.soup.select('div.header-img')
if header:
style_tag = header[0].attrs['style']
url = style_tag[style_tag.index('(') + 1:-1]
image = self.soup.new_tag('img', src=url, **{'class': 'header-img'})
header[0].attrs['style'] = ''
header[0].insert(0, image)
def range_to_table(self):
"""
Cast the 'ranges' to a more basic format: wrap the parent container with a table, and
cast the divs to tds.
"""
for range_min in self.soup.select('.range_min'):
range_container = range_min.parent.parent
range_table = self.soup.new_tag('table')
range_container.insert(0, range_table)
for i, div in enumerate(range_container.select('div')):
div.name = 'td'
extracted = div.extract()
range_table.insert(i, extracted)
for selected in self.soup.select('.range_true'):
selected.insert(0, bs4.NavigableString('x'))
def normalize_rotated_range(self):
"""
Normalize 'rotated' ranges, indicated by the class 'vertical-title'
"""
for container in self.soup.select('.vertical-title'):
# Extract the labels from the header.
for header_labels in container.select('.rotate'):
labels = []
header_labels.wrap(self.soup.new_tag('table'))
for div in header_labels.select('div'):
labels.append(div.text)
# Fill in the checked value as text, remove all ranges.
for sibling in container.find_next_siblings('div'):
squares = sibling.select('.range_square')
if squares:
# Get the position of the selected element
for i, square in enumerate(squares[0].parent.select('div')):
if 'range_true' in square.get('class', []):
# Print the text-label
squares[0].parent.parent.insert(0, bs4.NavigableString(labels[i]))
# Remove the squares.
squares[0].parent.decompose()
# Remove the header row.
container.decompose()
# Remove the additional lines with 'hr' tags.
for inline_comment in self.soup.select('.inline-comment'):
for hr in inline_comment.select('hr'):
hr.parent.decompose()
def approach_flow_chart_header(self):
"""
Move chart to bottom of the text.
"""
flow_chart_container = self.soup.select('.approach-flow-chart')
if flow_chart_container:
image = flow_chart_container[0].select('.img_in_text')
if image:
flow_chart_container[0].insert(-1, image[0].extract())
def html_to_table(self, html: str) -> str:
"""
Cast the 'fluid' markup to a table so the word-document looks
as expected by the researchers.
"""
self.soup = bs4.BeautifulSoup(html, 'lxml')
css = self.soup.find('link')
# Cache busting is only done with respect to 'summary.css', so in case
# changes on the summary_raw.css are made, also change a blank in
# 'summary.css'.
css.attrs['href'] = css.attrs['href'].replace(
'summary.css', 'summary_raw.css'
)
self.header_image_to_foreground()
self.approach_flow_chart_header()
self.highlight_list_to_bold()
self.columns_to_td()
self.range_to_table()
self.normalize_rotated_range()
self.rows_to_tr()
return str(self.soup)
@property
def rendered_content(self):
return self.html_to_table(super().rendered_content)
class SummaryPDFCreateView(PDFTemplateView):
"""
Put the questionnaire data to the context and return the rendered pdf.
"""
response_class = CachedPDFTemplateResponse
doc_response_class = RawTemplateResponse
summary_type = 'full' # Only one summary type is available right now
base_template_path = 'summary/'
http_method_names = ['get']
render_classes = {
'technologies_2018': {'full': Technology2018FullSummaryRenderer},
'technologies_2015': {'full': Technology2015FullSummaryRenderer},
'approaches_2015': {'full': Approaches2015FullSummaryRenderer}
}
footer_template = '{}layout/footer.html'.format(base_template_path)
# see: http://wkhtmltopdf.org/usage/wkhtmltopdf.txt
cmd_options = {
'dpi': '96',
'margin-top': '1cm',
'margin-bottom': '1cm',
}
default_quality = 'screen'
@property
def is_doc_file(self):
return self.request.GET.get('as', '') == 'doc'
@property
def css_class(self):
return f'is-{self.questionnaire.configuration.code}'
def get(self, request, *args, **kwargs):
self.questionnaire = self.get_object(questionnaire_id=self.kwargs['id'])
self.quality = self.request.GET.get('quality', self.default_quality)
# filename is set withing render_to_response, this is too late as it's
# used for caching.
self.filename = self.get_filename()
if self.is_doc_file:
self.response_class = self.doc_response_class
self.track_request()
return super().get(request, *args, **kwargs)
def get_template_names(self):
template = self.request.GET.get('template', 'base')
return '{}/layout/{}.html'.format(self.base_template_path, template)
def get_filename(self) -> str:
"""
The filename is specific enough to be used as 'pseudo cache-key' in the
CachedPDFTemplateResponse.
"""
return 'wocat-{identifier}-{edition}-{language}-{summary_type}-' \
'{quality}-{update}.pdf'.format(
identifier=self.questionnaire.id,
edition=self.questionnaire.configuration.id,
language=get_language(),
summary_type=self.summary_type,
quality=self.quality,
update=self.questionnaire.updated.strftime('%Y-%m-%d-%H-%M')
)
def get_object(self, questionnaire_id: int) -> Questionnaire:
"""
Get questionnaire and check status / permissions.
"""
status_filter = get_query_status_filter(self.request)
status_filter &= Q(id=questionnaire_id)
obj = Questionnaire.with_status.not_deleted().filter(
Q(id=questionnaire_id), status_filter
).distinct()
if not obj.exists() or obj.count() != 1:
raise Http404
return obj.first()
def get_summary_data(self, **data):
"""
Get summary data from renderer according to configuration.
"""
identifier = f'{self.questionnaire.configuration.code}_' \
f'{self.questionnaire.configuration.edition}'
try:
renderer = self.render_classes[identifier][self.summary_type]
except KeyError:
raise Http404
return renderer(
config=self.questionnaire.configuration_object,
questionnaire=self.questionnaire,
quality=self.quality,
base_url=self.request.build_absolute_uri('/'), **data
).render()
def get_prepared_data(self, questionnaire: Questionnaire) -> dict:
"""
Load the prepared JSON for given object in the current language.
"""
data = get_questionnaire_data_in_single_language(
questionnaire_data=questionnaire.data,
locale=get_language(),
original_locale=questionnaire.original_locale
)
return self.get_summary_data(**data)
def get_footer_context(self) -> dict:
"""
Provide variables used in the footer template.
"""
name = self.questionnaire.get_name()
if len(name) > 70:
name = '{}...'.format(name[:67])
return {
'footer_name': name,
'footer_config': self.questionnaire.configuration.code.title()
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['css_file_hash'] = apps.get_app_config('summary').css_file_hash
context['css_class'] = self.css_class
context['sections'] = self.get_prepared_data(self.questionnaire)
context.update(self.get_footer_context())
# For languages with no spaces between words (e.g. Lao, Khmer), add CSS
# line break rule if either the questionnaire or its original version is
# in one of these languages.
if self.questionnaire.original_locale in settings.WORD_WRAP_LANGUAGES \
or self.request.LANGUAGE_CODE in settings.WORD_WRAP_LANGUAGES:
context['break_words'] = True
return context
def track_request(self):
"""
Submit a summary-download event to matomo.
"""
if settings.PIWIK_SITE_ID:
# Downloads are not properly registered for relative urls, so build the complete url.
url = f'{self.request.scheme}://{self.request.META["HTTP_HOST"]}{self.request.path}'
payload = dict(
idsite=settings.PIWIK_SITE_ID,
token_auth=settings.PIWIK_AUTH_TOKEN,
rec=1,
apiv=1,
url=url,
download=url
)
if self.request.user.is_authenticated():
payload['_id'] = self.request.user.id
if settings.DEBUG:
# Also see https://developer.matomo.org/api-reference/tracking-api >
# Debugging the Tracker
payload['debug'] = 1
try:
requests.get(f'{settings.PIWIK_URL}matomo.php', params=payload)
except Exception: # Don't raise any kind of exception for failed tracking.
logger.error(
f'Cannot track summary download for url %s (%s)',
f'{settings.PIWIK_URL}matomo.php', payload
)
|
[
"bs4.NavigableString",
"django.db.models.Q",
"contextlib.suppress",
"apps.questionnaire.models.Questionnaire.with_status.not_deleted",
"os.path.isfile",
"django.apps.apps.get_app_config",
"requests.get",
"apps.questionnaire.utils.get_query_status_filter",
"bs4.BeautifulSoup",
"django.utils.translation.get_language",
"os.path.join",
"logging.getLogger"
] |
[((836, 863), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (853, 863), False, 'import logging\n'), ((1315, 1361), 'os.path.join', 'join', (['settings.SUMMARY_PDF_PATH', 'self.filename'], {}), '(settings.SUMMARY_PDF_PATH, self.filename)\n', (1319, 1361), False, 'from os.path import join, isfile\n'), ((1490, 1512), 'os.path.isfile', 'isfile', (['self.file_path'], {}), '(self.file_path)\n', (1496, 1512), False, 'from os.path import join, isfile\n'), ((6836, 6867), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (6853, 6867), False, 'import bs4\n'), ((10129, 10166), 'apps.questionnaire.utils.get_query_status_filter', 'get_query_status_filter', (['self.request'], {}), '(self.request)\n', (10152, 10166), False, 'from apps.questionnaire.utils import get_query_status_filter, get_questionnaire_data_in_single_language\n'), ((10192, 10214), 'django.db.models.Q', 'Q', ([], {'id': 'questionnaire_id'}), '(id=questionnaire_id)\n', (10193, 10214), False, 'from django.db.models import Q\n'), ((1791, 1821), 'contextlib.suppress', 'contextlib.suppress', (['Exception'], {}), '(Exception)\n', (1810, 1821), False, 'import contextlib\n'), ((12035, 12065), 'django.apps.apps.get_app_config', 'apps.get_app_config', (['"""summary"""'], {}), "('summary')\n", (12054, 12065), False, 'from django.apps import apps\n'), ((1637, 1667), 'contextlib.suppress', 'contextlib.suppress', (['Exception'], {}), '(Exception)\n', (1656, 1667), False, 'import contextlib\n'), ((2729, 2771), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['grid_12_columns', '"""lxml"""'], {}), "(grid_12_columns, 'lxml')\n", (2746, 2771), False, 'import bs4\n'), ((4730, 4754), 'bs4.NavigableString', 'bs4.NavigableString', (['"""x"""'], {}), "('x')\n", (4749, 4754), False, 'import bs4\n'), ((9779, 9793), 'django.utils.translation.get_language', 'get_language', ([], {}), '()\n', (9791, 9793), False, 'from django.utils.translation import get_language\n'), ((11401, 11415), 'django.utils.translation.get_language', 'get_language', ([], {}), '()\n', (11413, 11415), False, 'from django.utils.translation import get_language\n'), ((13586, 13649), 'requests.get', 'requests.get', (['f"""{settings.PIWIK_URL}matomo.php"""'], {'params': 'payload'}), "(f'{settings.PIWIK_URL}matomo.php', params=payload)\n", (13598, 13649), False, 'import requests\n'), ((10289, 10311), 'django.db.models.Q', 'Q', ([], {'id': 'questionnaire_id'}), '(id=questionnaire_id)\n', (10290, 10311), False, 'from django.db.models import Q\n'), ((10229, 10268), 'apps.questionnaire.models.Questionnaire.with_status.not_deleted', 'Questionnaire.with_status.not_deleted', ([], {}), '()\n', (10266, 10268), False, 'from apps.questionnaire.models import Questionnaire\n'), ((5843, 5873), 'bs4.NavigableString', 'bs4.NavigableString', (['labels[i]'], {}), '(labels[i])\n', (5862, 5873), False, 'import bs4\n')]
|
# Copyright (c) 2003, 2004 <NAME>
# contributions by <NAME>
# contributions by <NAME>
import sys
import types
import os
import hashlib
import dparser_swigc
class user_pyobjectsPtr:
def __init__(self, this):
self.this = this
def __setattr__(self, name, value):
if name == "t":
self.this.__setattr__(name, value)
return
self.__dict__[name] = value
def __getattr__(self, name):
if name == "t":
return self.this.__getattr__(name)
raise AttributeError(name)
def __repr__(self):
return "<C user_pyobjects instance>"
class d_loc_tPtr:
def __init__(self, this, d_parser):
self.this = this
self.d_parser = d_parser
def __setattr__(self, name, value):
if name == "s":
dparser_swigc.my_d_loc_t_s_set(self.this, self.d_parser, value)
elif name in ["pathname", "previous_col", "col", "line", "ws"]:
self.this.__setattr__(name, value)
else:
self.__dict__[name] = value
def __getattr__(self, name):
if name == "s":
return dparser_swigc.my_d_loc_t_s_get(self.this, self.d_parser)
elif name in ["pathname", "previous_col", "col", "line", "ws"]:
return self.this.__getattr__(name)
raise AttributeError(name)
def __repr__(self):
return "<C d_loc_t instance>"
class d_loc_t(d_loc_tPtr):
def __init__(self, this, d_parser, buf):
d_loc_tPtr.__init__(self, this, d_parser)
self.buf = buf
class D_ParseNodePtr:
def __init__(self, this):
self.this = this
def __setattr__(self, name, value):
if name == "end_skip":
dparser_swigc.my_D_ParseNode_end_skip_set(self.this, self.d_parser,
value)
elif name == "end":
dparser_swigc.my_D_ParseNode_end_set(self.this, self.d_parser,
value)
elif name in ["start_loc", "globals", "user"]:
self.this.__setattr__(name, value)
else:
self.__dict__[name] = value
def __getattr__(self, name):
if name == "symbol":
return dparser_swigc.my_D_ParseNode_symbol_get(
self.this, self.d_parser).decode('string_escape')
elif name == "end":
return dparser_swigc.my_D_ParseNode_end_get(
self.this, self.d_parser)
elif name == "end_skip":
return dparser_swigc.my_D_ParseNode_end_skip_get(
self.this, self.d_parser)
elif name == "globals":
return self.this.__getattr__(name)
elif name == "number_of_children":
return dparser_swigc.d_get_number_of_children(self.this)
elif name == "user":
return user_pyobjectsPtr(self.this.__getattr__(name))
elif name == "start_loc":
val = self.__dict__.get(name)
if not val:
val = self.__dict__[name] = d_loc_t(
self.this.start_loc, self.d_parser, self.buf)
return val
elif name == "c":
children = self.__dict__.get(name, None)
if not children:
dparser_swigc.d_get_number_of_children(self.this)
children = []
for i in xrange(dparser_swigc.d_get_number_off_children(
self.this)):
children.append(
D_ParseNode(dparser_swigc.d_get_child(self.this, i),
self.d_parser, self.buf)
)
self.__dict__[name] = children
return children
raise AttributeError(name)
def __repr__(self):
return "<C D_ParseNode instance>"
class D_ParseNode(D_ParseNodePtr):
def __init__(self, this, d_parser, buf):
D_ParseNodePtr.__init__(self, this)
self.d_parser = d_parser
self.buf = buf
dparser_swigc.add_parse_tree_viewer(self.d_parser)
def __del__(self):
dparser_swigc.remove_parse_tree_viewer(self.d_parser)
class Reject:
pass
class SyntaxErr(Exception):
pass
class AmbiguityException(Exception):
pass
def my_syntax_error_func(loc):
ee = '...'
be = '...'
width = 25
mn = loc.s - width
if mn < 0:
mn = 0
be = ''
mx = loc.s + 25
if mx > len(loc.buf):
mx = len(loc.buf)
ee = ''
begin = loc.buf[mn:loc.s].decode('utf-8')
end = loc.buf[loc.s:mx].decode('utf-8')
s = ('\n\nsyntax error, line:' + str(loc.line) + '\n\n' + be +
begin + '[syntax error]' + end + ee + '\n')
raise SyntaxErr(s)
def my_ambiguity_func(nodes):
raise AmbiguityException("\nunresolved ambiguity. Symbols:\n" +
'\n'.join([node.symbol for node in nodes]))
class Tables:
def __init__(self):
self.sig = hashlib.md5(u'1.31'.encode('utf-8'))
self.tables = None
def __del__(self):
if self.tables:
dparser_swigc.unload_parser_tables(self.tables)
def update(self, data):
self.sig.update(data.encode('utf-8'))
def sig_changed(self, filename):
filename = filename + '.md5'
if os.path.exists(filename):
with open(filename, 'rb') as fh:
return fh.read() != self.sig.digest()
return True
def load_tables(self, grammar_str, filename, make_grammar_file):
if make_grammar_file:
with open(filename, 'wb') as fh:
fh.write(grammar_str)
if self.sig_changed(filename):
dparser_swigc.make_tables(grammar_str, filename.encode())
with open(filename + '.md5', 'wb') as fh:
fh.write(self.sig.digest())
if self.tables:
dparser_swigc.unload_parser_tables(self.tables)
self.tables = dparser_swigc.load_parser_tables(
(filename + ".d_parser.dat").encode('utf-8'))
def getTables(self):
return self.tables
class ParsingException(Exception):
pass
class NoActionsFound(Exception):
pass
class Parser:
def __init__(self, modules=None, parser_folder=None,
file_prefix="d_parser_mach_gen", make_grammar_file=False):
self.tables = Tables()
self.actions = []
if not modules:
try:
raise RuntimeError
except RuntimeError:
traceback = sys.exc_info()[2]
dicts = [traceback.tb_frame.f_back.f_globals]
else:
if isinstance(modules, list):
dicts = [module.__dict__ for module in modules]
elif isinstance(modules, dict):
dicts = [modules]
else:
dicts = [modules.__dict__]
functions = []
for dictionary in dicts:
f = [val for name, val in dictionary.items()
if (isinstance(val, types.FunctionType)) and
name[0:2] == 'd_']
f = sorted(f, key=lambda x: (x.__code__.co_filename,
x.__code__.co_firstlineno))
functions.extend(f)
if len(functions) == 0:
raise "\nno actions found. Action names must start with 'd_'"
if parser_folder is None:
parser_folder = os.path.dirname(sys.argv[0])
if len(parser_folder) == 0:
parser_folder = os.getcwd()
parser_folder = parser_folder.replace('\\', '/')
self.filename = os.path.join(parser_folder, file_prefix + ".g")
grammar_str = []
self.takes_strings = 0
self.takes_globals = 0
for f in functions:
if f.__doc__:
grammar_str.append(f.__doc__)
self.tables.update(f.__doc__)
else:
raise "\naction missing doc string:\n\t" + f.__name__
grammar_str.append(" ${action};\n")
if f.__code__.co_argcount == 0:
raise ("\naction " + f.__name__ +
" must take at least one argument\n")
speculative = 0
arg_types = [0]
for i in range(1, f.__code__.co_argcount):
var = f.__code__.co_varnames[i]
if var == 'spec':
arg_types.append(1)
speculative = 1
elif var == 'g':
arg_types.append(2)
self.takes_globals = 1
elif var == 's':
arg_types.append(3)
self.takes_strings = 1
elif var == 'nodes':
arg_types.append(4)
elif var == 'this':
arg_types.append(5)
elif var == 'spec_only':
arg_types.append(6)
speculative = -1
elif var == 'parser':
arg_types.append(7)
else:
raise ("\nunknown argument name:\n\t" + var +
"\nin function:\n\t" + f.__name__)
self.actions.append((f, arg_types, speculative))
grammar_str = ''.join(grammar_str).encode()
self.tables.load_tables(grammar_str, self.filename, make_grammar_file)
def parse(self, buf, buf_offset=0,
initial_skip_space_fn=None,
syntax_error_fn=my_syntax_error_func,
ambiguity_fn=my_ambiguity_func,
make_token=None,
dont_fixup_internal_productions=False,
fixup_EBNF_productions=False,
dont_merge_epsilon_trees=False,
commit_actions_interval=100,
error_recovery=False,
print_debug_info=False,
partial_parses=False,
dont_compare_stacks=False,
dont_use_greediness_for_disambiguation=False,
dont_use_height_for_disambiguation=False,
start_symbol=''):
# workaround python3/2
t = str
try:
t = basestring
except NameError:
pass
if not isinstance(buf, t):
raise ParsingException(
"Message to parse is not a string: %r" % buf)
# dparser works with bytes
buf = buf.encode('utf-8')
parser = dparser_swigc.make_parser(
self.tables.getTables(), self, Reject, make_token, d_loc_t,
D_ParseNode,
self.actions, initial_skip_space_fn, syntax_error_fn, ambiguity_fn,
dont_fixup_internal_productions, fixup_EBNF_productions,
dont_merge_epsilon_trees, commit_actions_interval, error_recovery,
print_debug_info, partial_parses, dont_compare_stacks,
dont_use_greediness_for_disambiguation,
dont_use_height_for_disambiguation,
start_symbol.encode('utf-8'), self.takes_strings, self.takes_globals
)
result = dparser_swigc.run_parser(parser, buf, buf_offset)
return ParsedStructure(result)
class ParsedStructure:
def __init__(self, result):
self.string_left = ""
self.structure = None
self.top_node = None
if result:
if len(result) == 3:
self.string_left = result[2]
node = result[1]
# D_ParseNode(node.this, node.d_parser, node.buf)
self.top_node = node
self.structure = result[0]
def getStructure(self):
return self.structure
def getStringLeft(self):
return self.string_left
|
[
"dparser_swigc.d_get_child",
"dparser_swigc.my_D_ParseNode_end_skip_get",
"dparser_swigc.my_D_ParseNode_symbol_get",
"sys.exc_info",
"os.path.join",
"dparser_swigc.d_get_number_of_children",
"dparser_swigc.my_D_ParseNode_end_set",
"os.path.dirname",
"os.path.exists",
"dparser_swigc.my_d_loc_t_s_get",
"dparser_swigc.unload_parser_tables",
"dparser_swigc.add_parse_tree_viewer",
"dparser_swigc.remove_parse_tree_viewer",
"dparser_swigc.run_parser",
"dparser_swigc.d_get_number_off_children",
"dparser_swigc.my_D_ParseNode_end_skip_set",
"os.getcwd",
"dparser_swigc.my_D_ParseNode_end_get",
"dparser_swigc.my_d_loc_t_s_set"
] |
[((4036, 4086), 'dparser_swigc.add_parse_tree_viewer', 'dparser_swigc.add_parse_tree_viewer', (['self.d_parser'], {}), '(self.d_parser)\n', (4071, 4086), False, 'import dparser_swigc\n'), ((4119, 4172), 'dparser_swigc.remove_parse_tree_viewer', 'dparser_swigc.remove_parse_tree_viewer', (['self.d_parser'], {}), '(self.d_parser)\n', (4157, 4172), False, 'import dparser_swigc\n'), ((5319, 5343), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (5333, 5343), False, 'import os\n'), ((7634, 7681), 'os.path.join', 'os.path.join', (['parser_folder', "(file_prefix + '.g')"], {}), "(parser_folder, file_prefix + '.g')\n", (7646, 7681), False, 'import os\n'), ((11080, 11129), 'dparser_swigc.run_parser', 'dparser_swigc.run_parser', (['parser', 'buf', 'buf_offset'], {}), '(parser, buf, buf_offset)\n', (11104, 11129), False, 'import dparser_swigc\n'), ((811, 874), 'dparser_swigc.my_d_loc_t_s_set', 'dparser_swigc.my_d_loc_t_s_set', (['self.this', 'self.d_parser', 'value'], {}), '(self.this, self.d_parser, value)\n', (841, 874), False, 'import dparser_swigc\n'), ((1125, 1181), 'dparser_swigc.my_d_loc_t_s_get', 'dparser_swigc.my_d_loc_t_s_get', (['self.this', 'self.d_parser'], {}), '(self.this, self.d_parser)\n', (1155, 1181), False, 'import dparser_swigc\n'), ((1709, 1783), 'dparser_swigc.my_D_ParseNode_end_skip_set', 'dparser_swigc.my_D_ParseNode_end_skip_set', (['self.this', 'self.d_parser', 'value'], {}), '(self.this, self.d_parser, value)\n', (1750, 1783), False, 'import dparser_swigc\n'), ((5110, 5157), 'dparser_swigc.unload_parser_tables', 'dparser_swigc.unload_parser_tables', (['self.tables'], {}), '(self.tables)\n', (5144, 5157), False, 'import dparser_swigc\n'), ((5892, 5939), 'dparser_swigc.unload_parser_tables', 'dparser_swigc.unload_parser_tables', (['self.tables'], {}), '(self.tables)\n', (5926, 5939), False, 'import dparser_swigc\n'), ((7431, 7459), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (7446, 7459), False, 'import os\n'), ((1878, 1947), 'dparser_swigc.my_D_ParseNode_end_set', 'dparser_swigc.my_D_ParseNode_end_set', (['self.this', 'self.d_parser', 'value'], {}), '(self.this, self.d_parser, value)\n', (1914, 1947), False, 'import dparser_swigc\n'), ((2393, 2455), 'dparser_swigc.my_D_ParseNode_end_get', 'dparser_swigc.my_D_ParseNode_end_get', (['self.this', 'self.d_parser'], {}), '(self.this, self.d_parser)\n', (2429, 2455), False, 'import dparser_swigc\n'), ((7532, 7543), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7541, 7543), False, 'import os\n'), ((2235, 2300), 'dparser_swigc.my_D_ParseNode_symbol_get', 'dparser_swigc.my_D_ParseNode_symbol_get', (['self.this', 'self.d_parser'], {}), '(self.this, self.d_parser)\n', (2274, 2300), False, 'import dparser_swigc\n'), ((2529, 2596), 'dparser_swigc.my_D_ParseNode_end_skip_get', 'dparser_swigc.my_D_ParseNode_end_skip_get', (['self.this', 'self.d_parser'], {}), '(self.this, self.d_parser)\n', (2570, 2596), False, 'import dparser_swigc\n'), ((6544, 6558), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6556, 6558), False, 'import sys\n'), ((2759, 2808), 'dparser_swigc.d_get_number_of_children', 'dparser_swigc.d_get_number_of_children', (['self.this'], {}), '(self.this)\n', (2797, 2808), False, 'import dparser_swigc\n'), ((3274, 3323), 'dparser_swigc.d_get_number_of_children', 'dparser_swigc.d_get_number_of_children', (['self.this'], {}), '(self.this)\n', (3312, 3323), False, 'import dparser_swigc\n'), ((3386, 3436), 'dparser_swigc.d_get_number_off_children', 'dparser_swigc.d_get_number_off_children', (['self.this'], {}), '(self.this)\n', (3425, 3436), False, 'import dparser_swigc\n'), ((3545, 3584), 'dparser_swigc.d_get_child', 'dparser_swigc.d_get_child', (['self.this', 'i'], {}), '(self.this, i)\n', (3570, 3584), False, 'import dparser_swigc\n')]
|
from __future__ import unicode_literals
from django.contrib import admin
from djangoapps.features.models import Feature
class FeatureAdmin(admin.ModelAdmin):
list_display = ('id', 'active', 'feature_en', 'feature_es', 'description_en', 'description_es', 'created_at')
search_fields = ('feature_es',)
ordering = ('-created_at',)
admin.site.register(Feature, FeatureAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((350, 392), 'django.contrib.admin.site.register', 'admin.site.register', (['Feature', 'FeatureAdmin'], {}), '(Feature, FeatureAdmin)\n', (369, 392), False, 'from django.contrib import admin\n')]
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-09-18 13:25:04
# @Last Modified by: <NAME>
# @Last Modified time: 2018-09-18 13:35:04
"""
CUSTOM ESTIMATOR AS DECORATORS for Scikit-Learn Pipelines
"""
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
import pandas as pd
class SKTransform(BaseEstimator, TransformerMixin):
"""Sklearn Custom Transformer Decorator"""
def __init__(self, f):
self.transform_func = f
def __call__(self, X):
return self.transform_func(X)
def __iter__(self):
return (i for i in [self.transform_func.__name__, self])
def __getitem__(self, i):
return [self.transform_func.__name__, self][i]
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, pd.DataFrame):
return self.transform_func(X.values)
return self.transform_func(X)
class SKClassify(BaseEstimator, ClassifierMixin):
"""Sklearn Custom Classifier Decorator"""
def __init__(self, f):
self.predict_func = f
def __call__(self, X):
return self.predict_func(X)
def __iter__(self):
return (i for i in [self.predict_func.__name__, self])
def __getitem__(self, i):
return [self.predict_func.__name__, self][i]
def fit(self, X, y=None):
return self
def fit_predict(self, X, y=None):
return self.predict(X)
def predict(self, X, y=None):
if isinstance(X, pd.DataFrame):
return self.predict_func(X.values)
return self.predict_func(X)
if __name__ == '__main__':
from sklearn.pipeline import Pipeline
import numpy as np
@SKTransform
def power2(x):
return x**2
@SKClassify
def lessThan50(x):
return x < 50
ppl = Pipeline([
power2,
lessThan50,
])
print('Prediction:\n', ppl.predict(np.array([3, 6, 8, 10])))
|
[
"sklearn.pipeline.Pipeline",
"numpy.array"
] |
[((1813, 1843), 'sklearn.pipeline.Pipeline', 'Pipeline', (['[power2, lessThan50]'], {}), '([power2, lessThan50])\n', (1821, 1843), False, 'from sklearn.pipeline import Pipeline\n'), ((1906, 1929), 'numpy.array', 'np.array', (['[3, 6, 8, 10]'], {}), '([3, 6, 8, 10])\n', (1914, 1929), True, 'import numpy as np\n')]
|
import os
import urllib
import datetime
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
#Models
# file and directory
# Classes and Functions
class MainPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
nickname = user.nickname()
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
if url_linktext == "Login" :
template_values = {
'user': user,
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('login.html')
self.response.write(template.render(template_values))
else:
template_values = {
'path': path,
'user': user,
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/', MainPage),
], debug=True)
|
[
"google.appengine.api.users.get_current_user",
"os.path.dirname",
"webapp2.WSGIApplication",
"google.appengine.api.users.create_login_url",
"google.appengine.api.users.create_logout_url"
] |
[((1346, 1400), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/', MainPage)]"], {'debug': '(True)'}), "([('/', MainPage)], debug=True)\n", (1369, 1400), False, 'import webapp2\n'), ((442, 466), 'google.appengine.api.users.get_current_user', 'users.get_current_user', ([], {}), '()\n', (464, 466), False, 'from google.appengine.api import users\n'), ((222, 247), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (237, 247), False, 'import os\n'), ((541, 582), 'google.appengine.api.users.create_logout_url', 'users.create_logout_url', (['self.request.uri'], {}), '(self.request.uri)\n', (564, 582), False, 'from google.appengine.api import users\n'), ((651, 691), 'google.appengine.api.users.create_login_url', 'users.create_login_url', (['self.request.uri'], {}), '(self.request.uri)\n', (673, 691), False, 'from google.appengine.api import users\n')]
|
from jina import Executor, Document, DocumentArray, requests
from transformers import (
AutoTokenizer,
AutoModelForQuestionAnswering,
pipeline,
)
class Generator(Executor):
answer_model_name = "deepset/roberta-base-squad2"
answer_model = AutoModelForQuestionAnswering.from_pretrained(answer_model_name)
answer_tokenizer = AutoTokenizer.from_pretrained(answer_model_name)
nlp = pipeline(
"question-answering", model=answer_model, tokenizer=answer_tokenizer
)
@requests
def generate(self, docs: DocumentArray, **kwargs) -> DocumentArray:
for doc in docs.traverse_flat(('r',)):
context = " ".join([match.text for match in doc.matches])
qa_input = {"question": doc.text, "context": context}
result = self.nlp(qa_input)
result = DocumentArray(Document(result))
return result
|
[
"transformers.AutoTokenizer.from_pretrained",
"jina.Document",
"transformers.AutoModelForQuestionAnswering.from_pretrained",
"transformers.pipeline"
] |
[((260, 324), 'transformers.AutoModelForQuestionAnswering.from_pretrained', 'AutoModelForQuestionAnswering.from_pretrained', (['answer_model_name'], {}), '(answer_model_name)\n', (305, 324), False, 'from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline\n'), ((348, 396), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['answer_model_name'], {}), '(answer_model_name)\n', (377, 396), False, 'from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline\n'), ((407, 485), 'transformers.pipeline', 'pipeline', (['"""question-answering"""'], {'model': 'answer_model', 'tokenizer': 'answer_tokenizer'}), "('question-answering', model=answer_model, tokenizer=answer_tokenizer)\n", (415, 485), False, 'from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline\n'), ((845, 861), 'jina.Document', 'Document', (['result'], {}), '(result)\n', (853, 861), False, 'from jina import Executor, Document, DocumentArray, requests\n')]
|
import logging
from .s3_base_url import S3BaseUrl
from ..base import BaseDirectoryUrl, BaseFileUrl
from typing import IO, List, Optional
import threading
from time import sleep
from s3_concat import S3Concat
from smart_open.s3 import open as s3_open
logger = logging.getLogger(__name__)
class S3FileUrl(S3BaseUrl, BaseFileUrl):
def __str__(self) -> str:
return self.url
def filename(self) -> str:
return self.url[self.url.rfind("/")+1:]
def directory_in_this_directory(self, directory_name: str) -> BaseDirectoryUrl:
directory = self.containing_directory()
return self._directory(f"{directory.url}{directory_name}/")
def file_in_this_directory(self, filename: str) -> 'S3FileUrl':
return self._file(self.containing_directory().url + filename)
def upload_fileobj(self, fileobj: IO[bytes], mode: str = 'wb') -> int:
# https://github.com/boto/boto3/blob/b2affa81c9b55ebcb9cb3af6e928f4f5acf22cb9/docs/source/guide/s3-uploading-files.rst
if mode != 'wb':
# use the single-threaded method that handles all modes
return super().upload_fileobj(fileobj, mode=mode)
class _S3ProgressTracker:
def __init__(self) -> None:
self.length = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount: int) -> None:
with self._lock:
self.length += bytes_amount
callback = _S3ProgressTracker()
self.s3_client.upload_fileobj(Fileobj=fileobj,
Bucket=self.bucket,
Key=self.key,
Callback=callback)
return callback.length
def open(self, mode: str = "rb") -> IO[bytes]:
try:
return s3_open(bucket_id=self.bucket,
key_id=self.key,
mode=mode,
session=self._boto3_session)
except ValueError as e:
# Example: ValueError: 'b0KD9AkG7XA/_manifest' does not
# exist in the bucket 'vince-scratch', or is forbidden
# for access
#
# smart-open version: 1.8.x
if 'does not exist in the bucket' in str(e):
raise FileNotFoundError(f"{self} not found")
else:
raise e
except OSError as e:
# Example: OSError: unable to access bucket:
# 'vince-scratch' key: 'Mv0o7H_YejI/_manifest' version:
# None error: An error occurred (NoSuchKey) when calling
# the GetObject operation: The specified key does not
# exist.
#
# smart-open version: 2.0
if 'NoSuchKey' in str(e):
raise FileNotFoundError(f"{self} not found")
else:
raise e
def download_fileobj(self, fileobj: IO[bytes]) -> None:
self.s3_client.download_fileobj(Fileobj=fileobj, Bucket=self.bucket, Key=self.key)
def store_string(self, contents: str) -> None:
self.s3_resource.Object(self.bucket, self.key).put(Body=contents)
def rename_to(self, new: 'BaseFileUrl') -> 'S3FileUrl':
if not isinstance(new, S3FileUrl):
raise TypeError(f'Can only rename to same type, not {new}')
# https://stackoverflow.com/questions/32501995/boto3-s3-renaming-an-object-using-copy-object
copy_source = {'Bucket': self.bucket, 'Key': self.key}
self.s3_resource.Object(new.bucket, new.key).copy_from(CopySource=copy_source)
self.s3_resource.Object(self.bucket, self.key).delete()
logger.info("Renamed {old_url} to {new_url}".format(old_url=self.url, new_url=new.url))
return new
def delete(self) -> None:
self.s3_resource.Object(self.bucket, self.key).delete()
def wait_to_exist(self) -> None:
while True:
try:
with self.open():
return
except FileNotFoundError:
logger.info(f"Waiting for {self.url} to appear...")
fifty_milliseconds = 0.05
sleep(fifty_milliseconds)
def size(self) -> int:
response = self.s3_client.head_object(Bucket=self.bucket, Key=self.key)
return response['ContentLength']
def concatenate_from(self, other_locs: List['BaseFileUrl']) -> Optional[int]:
if not all([isinstance(loc, S3FileUrl) and loc.bucket == self.bucket
for loc in other_locs]):
logger.warning("Concatenating data locally - this may be slow for large data sets")
return super().concatenate_from(other_locs)
job = S3Concat(self.bucket,
self.key,
session=self._boto3_session,
# We want one file the end--S3Concat's other
# job in life is concatting small log files
# into larger ones, where a minimum file size
# is a hint for when to stop combining files
# together.
min_file_size=None)
for loc in other_locs:
assert isinstance(loc, S3FileUrl) # keep mypy happy
# Add files, can call multiple times to add files from other directories
job.add_file(loc.key)
out = job.concat()
assert len(out) == 1 # with above arg, this should provide only a single output file
return None
|
[
"time.sleep",
"threading.Lock",
"smart_open.s3.open",
"s3_concat.S3Concat",
"logging.getLogger"
] |
[((261, 288), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (278, 288), False, 'import logging\n'), ((4743, 4828), 's3_concat.S3Concat', 'S3Concat', (['self.bucket', 'self.key'], {'session': 'self._boto3_session', 'min_file_size': 'None'}), '(self.bucket, self.key, session=self._boto3_session, min_file_size=None\n )\n', (4751, 4828), False, 'from s3_concat import S3Concat\n'), ((1838, 1930), 'smart_open.s3.open', 's3_open', ([], {'bucket_id': 'self.bucket', 'key_id': 'self.key', 'mode': 'mode', 'session': 'self._boto3_session'}), '(bucket_id=self.bucket, key_id=self.key, mode=mode, session=self.\n _boto3_session)\n', (1845, 1930), True, 'from smart_open.s3 import open as s3_open\n'), ((1300, 1316), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1314, 1316), False, 'import threading\n'), ((4197, 4222), 'time.sleep', 'sleep', (['fifty_milliseconds'], {}), '(fifty_milliseconds)\n', (4202, 4222), False, 'from time import sleep\n')]
|
# -*- coding: utf-8 -*-
from datetime import datetime
from peewee import Model, ForeignKeyField, CharField, DateTimeField
class VisitorModel(Model):
class Meta:
table_name = 'visitor'
indexes = (
(('name', 'ip_addr'), True),
)
name = CharField()
ip_addr = CharField()
location = CharField(null=True)
class VisitModel(Model):
class Meta:
table_name = 'visit'
created_on = DateTimeField(default=datetime.now)
message = CharField(null=False)
visitor = ForeignKeyField(VisitorModel)
@classmethod
def extended(cls, *fields):
return cls.select(VisitModel, VisitorModel, *fields).join(VisitorModel)
|
[
"peewee.CharField",
"peewee.DateTimeField",
"peewee.ForeignKeyField"
] |
[((282, 293), 'peewee.CharField', 'CharField', ([], {}), '()\n', (291, 293), False, 'from peewee import Model, ForeignKeyField, CharField, DateTimeField\n'), ((308, 319), 'peewee.CharField', 'CharField', ([], {}), '()\n', (317, 319), False, 'from peewee import Model, ForeignKeyField, CharField, DateTimeField\n'), ((335, 355), 'peewee.CharField', 'CharField', ([], {'null': '(True)'}), '(null=True)\n', (344, 355), False, 'from peewee import Model, ForeignKeyField, CharField, DateTimeField\n'), ((446, 481), 'peewee.DateTimeField', 'DateTimeField', ([], {'default': 'datetime.now'}), '(default=datetime.now)\n', (459, 481), False, 'from peewee import Model, ForeignKeyField, CharField, DateTimeField\n'), ((496, 517), 'peewee.CharField', 'CharField', ([], {'null': '(False)'}), '(null=False)\n', (505, 517), False, 'from peewee import Model, ForeignKeyField, CharField, DateTimeField\n'), ((532, 561), 'peewee.ForeignKeyField', 'ForeignKeyField', (['VisitorModel'], {}), '(VisitorModel)\n', (547, 561), False, 'from peewee import Model, ForeignKeyField, CharField, DateTimeField\n')]
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.breakpad.dump_reader_multipart import DumpReaderLinux
from webkitpy.layout_tests.port import base
from webkitpy.layout_tests.port import win
_log = logging.getLogger(__name__)
class LinuxPort(base.Port):
port_name = 'linux'
SUPPORTED_VERSIONS = ('precise', 'trusty')
FALLBACK_PATHS = {}
FALLBACK_PATHS['trusty'] = ['linux'] + win.WinPort.latest_platform_fallback_path()
FALLBACK_PATHS['precise'] = ['linux-precise'] + FALLBACK_PATHS['trusty']
DEFAULT_BUILD_DIRECTORIES = ('out',)
BUILD_REQUIREMENTS_URL = 'https://chromium.googlesource.com/chromium/src/+/master/docs/linux_build_instructions.md'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name.endswith('linux'):
assert host.platform.is_linux()
version = host.platform.os_version
return port_name + '-' + version
return port_name
def __init__(self, host, port_name, **kwargs):
super(LinuxPort, self).__init__(host, port_name, **kwargs)
self._version = port_name[port_name.index('linux-') + len('linux-'):]
self._architecture = "x86_64"
assert self._version in self.SUPPORTED_VERSIONS
if not self.get_option('disable_breakpad'):
self._dump_reader = DumpReaderLinux(host, self._build_path())
self._original_home = None
def additional_driver_flag(self):
flags = super(LinuxPort, self).additional_driver_flag()
if not self.get_option('disable_breakpad'):
flags += ['--enable-crash-reporter', '--crash-dumps-dir=%s' % self._dump_reader.crash_dumps_directory()]
return flags
def check_build(self, needs_http, printer):
result = super(LinuxPort, self).check_build(needs_http, printer)
if result:
_log.error('For complete Linux build requirements, please see:')
_log.error('')
_log.error(' https://chromium.googlesource.com/chromium/src/+/master/docs/linux_build_instructions.md')
return result
def look_for_new_crash_logs(self, crashed_processes, start_time):
if self.get_option('disable_breakpad'):
return None
return self._dump_reader.look_for_new_crash_logs(crashed_processes, start_time)
def clobber_old_port_specific_results(self):
if not self.get_option('disable_breakpad'):
self._dump_reader.clobber_old_results()
def operating_system(self):
return 'linux'
def path_to_apache(self):
# The Apache binary path can vary depending on OS and distribution
# See http://wiki.apache.org/httpd/DistrosDefaultLayout
for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
if self._filesystem.exists(path):
return path
_log.error("Could not find apache. Not installed or unknown path.")
return None
def setup_test_run(self):
super(LinuxPort, self).setup_test_run()
self._setup_dummy_home_dir()
def clean_up_test_run(self):
super(LinuxPort, self).clean_up_test_run()
self._clean_up_dummy_home_dir()
#
# PROTECTED METHODS
#
def _setup_dummy_home_dir(self):
"""Creates a dummy home directory for running the test.
This is a workaround for crbug.com/595504; see crbug.com/612730.
If crbug.com/612730 is resolved in another way, then this may be
unnecessary.
"""
self._original_home = self.host.environ.get('HOME')
dummy_home = str(self._filesystem.mkdtemp())
self.host.environ['HOME'] = dummy_home
self._copy_files_to_dummy_home_dir(dummy_home)
def _copy_files_to_dummy_home_dir(self, dummy_home):
# Note: This may be unnecessary.
fs = self._filesystem
for filename in ['.Xauthority']:
original_path = fs.join(self._original_home, filename)
if not fs.exists(original_path):
continue
fs.copyfile(original_path, fs.join(dummy_home, filename))
def _clean_up_dummy_home_dir(self):
"""Cleans up the dummy dir and resets the HOME environment variable."""
dummy_home = self.host.environ['HOME']
assert dummy_home != self._original_home
self._filesystem.rmtree(dummy_home)
self.host.environ['HOME'] = self._original_home
def _check_apache_install(self):
result = self._check_file_exists(self.path_to_apache(), "apache2")
result = self._check_file_exists(self.path_to_apache_config_file(), "apache2 config file") and result
if not result:
_log.error(' Please install using: "sudo apt-get install apache2 libapache2-mod-php5"')
_log.error('')
return result
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install using "sudo apt-get install wdiff"'
def _path_to_driver(self, target=None):
binary_name = self.driver_name()
return self._build_path_with_target(target, binary_name)
|
[
"webkitpy.layout_tests.port.win.WinPort.latest_platform_fallback_path",
"logging.getLogger"
] |
[((1723, 1750), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1740, 1750), False, 'import logging\n'), ((1921, 1964), 'webkitpy.layout_tests.port.win.WinPort.latest_platform_fallback_path', 'win.WinPort.latest_platform_fallback_path', ([], {}), '()\n', (1962, 1964), False, 'from webkitpy.layout_tests.port import win\n')]
|
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
from hq.output import convert_results_to_output_text
from hq.soup_util import make_soup
from hq.hquery.hquery_processor import HqueryProcessor
from ..common_test_util import expected_result
from test.hquery.hquery_test_util import query_html_doc
def test_absolute_location_path_should_find_multiple_grandchildren():
actual = query_html_doc('<div>one</div><p>not a div</p><div>two</div>', '/html/body/div')
assert actual == expected_result("""
<div>
one
</div>
<div>
two
</div>""")
def test_path_to_root_tag_succeeds_despite_other_root_level_objects():
html = """
<!DOCTYPE html>
<!-- outside -->
<html>
<!-- inside -->
</html>"""
raw_result = HqueryProcessor('/*').query(make_soup(html))
actual = convert_results_to_output_text(raw_result)
assert actual == expected_result("""
<html>
<!-- inside -->
</html>""")
def test_relative_location_path_as_predicate():
html_body = """
<div>
<span>one</span>
</div>
<div>
<p>two</p>
</div>
<div>
<span>three</span>
</div>"""
actual = query_html_doc(html_body, '/html/body/div[span]')
assert actual == expected_result("""
<div>
<span>
one
</span>
</div>
<div>
<span>
three
</span>
</div>""")
def test_abbreviated_context_node_works_in_path():
html_body = """
<div>
<p>one</p>
</div>
<p>two</p>
<div>
<p>three</p>
</div>"""
actual = query_html_doc(html_body, '/html/body/div/./p')
assert actual == expected_result("""
<p>
one
</p>
<p>
three
</p>""")
def test_abbreviated_context_node_works_in_predicate():
html_body = """
<div>
<p>one</p>
</div>
<p>two</p>
<div>
three
</div>
<div>
<p>four</p>
</div>
"""
actual = query_html_doc(html_body, '/html/body/node()[./p]')
assert actual == expected_result("""
<div>
<p>
one
</p>
</div>
<div>
<p>
four
</p>
</div>""")
def test_abbreviated_parent_node_works_in_path():
html_body = """
<p>
<span>one</span>
</p>
<p>
<br/>
<span>two</span>
</p>"""
actual = query_html_doc(html_body, '//p/br/../span')
assert actual == expected_result("""
<span>
two
</span>""")
def test_abbreviated_parent_node_works_in_predicate():
html_body = """
<p>
<br/>
<span>one</span>
</p>
<p>
<span>two</span>
</p>
<p>
<br/>
<span>three</span>
</p>"""
actual = query_html_doc(html_body, '//span[../br]')
assert actual == expected_result("""
<span>
one
</span>
<span>
three
</span>""")
def test_double_slash_works_within_path():
html_body = """
<section>
<p>moe</p>
<div>
<div>
<p>larry</p>
</div>
<p>curly</p>
</div>
</section>
<p><NAME></p>
<section>
<p>shemp</p>
</section>"""
assert query_html_doc(html_body, '//section//p') == expected_result("""
<p>
moe
</p>
<p>
larry
</p>
<p>
curly
</p>
<p>
shemp
</p>""")
def test_predicate_can_be_applied_to_variable_containing_node_set():
html_body = """
<p>not selected</p>
<p id="foo">selected</p>"""
assert query_html_doc(html_body, 'let $x := //p return $x[@id="foo"]') == expected_result("""
<p id="foo">
selected
</p>""")
def test_no_space_between_text_runs_crossing_element_boundaries_in_element_string_value_if_there_was_none_in_doc():
html_body = """<p>"<span>so-called</span>" Klingon </p>"""
assert query_html_doc(html_body, 'string(//p)') == '"so-called" Klingon'
assert query_html_doc('<p>one <span>two</span></p>', 'string(//p)') == 'one two'
|
[
"hq.soup_util.make_soup",
"os.path.abspath",
"hq.output.convert_results_to_output_text",
"hq.hquery.hquery_processor.HqueryProcessor",
"test.hquery.hquery_test_util.query_html_doc"
] |
[((41, 65), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (56, 65), False, 'import os\n'), ((400, 485), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['"""<div>one</div><p>not a div</p><div>two</div>"""', '"""/html/body/div"""'], {}), "('<div>one</div><p>not a div</p><div>two</div>', '/html/body/div'\n )\n", (414, 485), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((840, 882), 'hq.output.convert_results_to_output_text', 'convert_results_to_output_text', (['raw_result'], {}), '(raw_result)\n', (870, 882), False, 'from hq.output import convert_results_to_output_text\n'), ((1192, 1241), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['html_body', '"""/html/body/div[span]"""'], {}), "(html_body, '/html/body/div[span]')\n", (1206, 1241), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((1587, 1634), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['html_body', '"""/html/body/div/./p"""'], {}), "(html_body, '/html/body/div/./p')\n", (1601, 1634), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((1964, 2015), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['html_body', '"""/html/body/node()[./p]"""'], {}), "(html_body, '/html/body/node()[./p]')\n", (1978, 2015), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((2348, 2391), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['html_body', '"""//p/br/../span"""'], {}), "(html_body, '//p/br/../span')\n", (2362, 2391), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((2718, 2760), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['html_body', '"""//span[../br]"""'], {}), "(html_body, '//span[../br]')\n", (2732, 2760), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((810, 825), 'hq.soup_util.make_soup', 'make_soup', (['html'], {}), '(html)\n', (819, 825), False, 'from hq.soup_util import make_soup\n'), ((3187, 3228), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['html_body', '"""//section//p"""'], {}), "(html_body, '//section//p')\n", (3201, 3228), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((3524, 3587), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['html_body', '"""let $x := //p return $x[@id="foo"]"""'], {}), '(html_body, \'let $x := //p return $x[@id="foo"]\')\n', (3538, 3587), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((3848, 3888), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['html_body', '"""string(//p)"""'], {}), "(html_body, 'string(//p)')\n", (3862, 3888), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((3925, 3985), 'test.hquery.hquery_test_util.query_html_doc', 'query_html_doc', (['"""<p>one <span>two</span></p>"""', '"""string(//p)"""'], {}), "('<p>one <span>two</span></p>', 'string(//p)')\n", (3939, 3985), False, 'from test.hquery.hquery_test_util import query_html_doc\n'), ((782, 803), 'hq.hquery.hquery_processor.HqueryProcessor', 'HqueryProcessor', (['"""/*"""'], {}), "('/*')\n", (797, 803), False, 'from hq.hquery.hquery_processor import HqueryProcessor\n')]
|
from setuptools import find_packages, setup
with open('README.rst') as f:
readme = f.read()
with open('LICENSE.txt') as f:
license = f.read()
setup(
name = 'unhashlib',
version = '0.1.0',
description = 'A string class enhancement',
long_description = readme,
#license = license,
packages=find_packages(exclude=('tests', 'docs')),
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/reale/unhashlib',
keywords = [ ],
install_requires = ['hashlib', 'requests'],
test_suite = 'nose.collector',
tests_require = ['nose'],
)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[
"setuptools.find_packages"
] |
[((323, 363), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests', 'docs')"}), "(exclude=('tests', 'docs'))\n", (336, 363), False, 'from setuptools import find_packages, setup\n')]
|
"""
Helper functions file for working with object buckets
"""
import logging
import os
import shlex
from uuid import uuid4
import boto3
from botocore.handlers import disable_signing
from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import TimeoutExpiredError, UnexpectedBehaviour
from ocs_ci.utility import templating
from ocs_ci.utility.ssl_certs import get_root_ca_cert
from ocs_ci.utility.utils import TimeoutSampler, run_cmd
from ocs_ci.helpers.helpers import create_resource
logger = logging.getLogger(__name__)
def craft_s3_command(cmd, mcg_obj=None, api=False, signed_request_creds=None):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG class instance
cmd: The AWSCLI command to run
api: True if the call is for s3api, false if s3
signed_request_creds: a dictionary containing AWS S3 creds for a signed request
Returns:
str: The crafted command, ready to be executed on the pod
"""
api = "api" if api else ""
if mcg_obj:
if mcg_obj.region:
region = f"AWS_DEFAULT_REGION={mcg_obj.region} "
else:
region = ""
base_command = (
f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '
f"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"{region}"
f"aws s3{api} "
f"--endpoint={mcg_obj.s3_internal_endpoint} "
)
string_wrapper = '"'
elif signed_request_creds:
if signed_request_creds.get("region"):
region = f'AWS_DEFAULT_REGION={signed_request_creds.get("region")} '
else:
region = ""
base_command = (
f'sh -c "AWS_ACCESS_KEY_ID={signed_request_creds.get("access_key_id")} '
f'AWS_SECRET_ACCESS_KEY={signed_request_creds.get("access_key")} '
f"{region}"
f"aws s3{api} "
f'--endpoint={signed_request_creds.get("endpoint")} '
)
string_wrapper = '"'
else:
base_command = f"aws s3{api} --no-sign-request "
string_wrapper = ""
return f"{base_command}{cmd}{string_wrapper}"
def verify_s3_object_integrity(original_object_path, result_object_path, awscli_pod):
"""
Verifies checksum between original object and result object on an awscli pod
Args:
original_object_path (str): The Object that is uploaded to the s3 bucket
result_object_path (str): The Object that is downloaded from the s3 bucket
awscli_pod (pod): A pod running the AWSCLI tools
Returns:
bool: True if checksum matches, False otherwise
"""
md5sum = shlex.split(
awscli_pod.exec_cmd_on_pod(
command=f"md5sum {original_object_path} {result_object_path}"
)
)
if md5sum[0] == md5sum[2]:
logger.info(
f"Passed: MD5 comparison for {original_object_path} and {result_object_path}"
)
return True
else:
logger.error(
f"Failed: MD5 comparison of {original_object_path} and {result_object_path} - "
f"{md5sum[0]} ≠ {md5sum[2]}"
)
return False
def retrieve_test_objects_to_pod(podobj, target_dir):
"""
Downloads all the test objects to a given directory in a given pod.
Args:
podobj (OCS): The pod object to download the objects to
target_dir: The fully qualified path of the download target folder
Returns:
list: A list of the downloaded objects' names
"""
sync_object_directory(podobj, f"s3://{constants.TEST_FILES_BUCKET}", target_dir)
downloaded_objects = podobj.exec_cmd_on_pod(f"ls -A1 {target_dir}").split(" ")
logger.info(f"Downloaded objects: {downloaded_objects}")
return downloaded_objects
def retrieve_anon_s3_resource():
"""
Returns an anonymous boto3 S3 resource by creating one and disabling signing
Disabling signing isn't documented anywhere, and this solution is based on
a comment by an AWS developer:
https://github.com/boto/boto3/issues/134#issuecomment-116766812
Returns:
boto3.resource(): An anonymous S3 resource
"""
anon_s3_resource = boto3.resource("s3")
anon_s3_resource.meta.client.meta.events.register(
"choose-signer.s3.*", disable_signing
)
return anon_s3_resource
def sync_object_directory(podobj, src, target, s3_obj=None, signed_request_creds=None):
"""
Syncs objects between a target and source directories
Args:
podobj (OCS): The pod on which to execute the commands and download the objects to
src (str): Fully qualified object source path
target (str): Fully qualified object target path
s3_obj (MCG, optional): The MCG object to use in case the target or source
are in an MCG
signed_request_creds (dictionary, optional): the access_key, secret_key,
endpoint and region to use when willing to send signed aws s3 requests
"""
logger.info(f"Syncing all objects and directories from {src} to {target}")
retrieve_cmd = f"sync {src} {target}"
if s3_obj:
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_internal_endpoint]
elif signed_request_creds:
secrets = [
signed_request_creds.get("access_key_id"),
signed_request_creds.get("access_key"),
signed_request_creds.get("endpoint"),
]
else:
secrets = None
podobj.exec_cmd_on_pod(
command=craft_s3_command(
retrieve_cmd, s3_obj, signed_request_creds=signed_request_creds
),
out_yaml_format=False,
secrets=secrets,
), "Failed to sync objects"
# Todo: check that all objects were synced successfully
def rm_object_recursive(podobj, target, mcg_obj, option=""):
"""
Remove bucket objects with --recursive option
Args:
podobj (OCS): The pod on which to execute the commands and download
the objects to
target (str): Fully qualified bucket target path
mcg_obj (MCG, optional): The MCG object to use in case the target or
source are in an MCG
option (str): Extra s3 remove command option
"""
rm_command = f"rm s3://{target} --recursive {option}"
podobj.exec_cmd_on_pod(
command=craft_s3_command(rm_command, mcg_obj),
out_yaml_format=False,
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
def get_rgw_restart_counts():
"""
Gets the restart count of the RGW pods
Returns:
list: restart counts of RGW pods
"""
# Internal import in order to avoid circular import
from ocs_ci.ocs.resources.pod import get_rgw_pods
rgw_pods = get_rgw_pods()
return [rgw_pod.restart_count for rgw_pod in rgw_pods]
def write_individual_s3_objects(
mcg_obj, awscli_pod, bucket_factory, downloaded_files, target_dir, bucket_name=None
):
"""
Writes objects one by one to an s3 bucket
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
awscli_pod (pod): A pod running the AWSCLI tools
bucket_factory: Calling this fixture creates a new bucket(s)
downloaded_files (list): List of downloaded object keys
target_dir (str): The fully qualified path of the download target folder
bucket_name (str): Name of the bucket
(default: none)
"""
bucketname = bucket_name or bucket_factory(1)[0].name
logger.info("Writing objects to bucket")
for obj_name in downloaded_files:
full_object_path = f"s3://{bucketname}/{obj_name}"
copycommand = f"cp {target_dir}{obj_name} {full_object_path}"
assert "Completed" in awscli_pod.exec_cmd_on_pod(
command=craft_s3_command(copycommand, mcg_obj),
out_yaml_format=False,
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
def upload_parts(
mcg_obj, awscli_pod, bucketname, object_key, body_path, upload_id, uploaded_parts
):
"""
Uploads individual parts to a bucket
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
awscli_pod (pod): A pod running the AWSCLI tools
bucketname (str): Name of the bucket to upload parts on
object_key (list): Unique object Identifier
body_path (str): Path of the directory on the aws pod which contains the parts to be uploaded
upload_id (str): Multipart Upload-ID
uploaded_parts (list): list containing the name of the parts to be uploaded
Returns:
list: List containing the ETag of the parts
"""
parts = []
secrets = [mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_internal_endpoint]
for count, part in enumerate(uploaded_parts, 1):
upload_cmd = (
f"upload-part --bucket {bucketname} --key {object_key}"
f" --part-number {count} --body {body_path}/{part}"
f" --upload-id {upload_id}"
)
# upload_cmd will return ETag, upload_id etc which is then split to get just the ETag
part = (
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command(upload_cmd, mcg_obj, api=True),
out_yaml_format=False,
secrets=secrets,
)
.split('"')[-3]
.split("\\")[0]
)
parts.append({"PartNumber": count, "ETag": f'"{part}"'})
return parts
def oc_create_aws_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with aws underlying storage using oc create command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
bs_data["spec"] = {
"type": "aws-s3",
"awsS3": {
"targetBucket": uls_name,
"region": region,
"secret": {"name": cld_mgr.aws_client.secret.name},
},
}
create_resource(**bs_data)
def cli_create_aws_backingstore(mcg_obj, cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with aws underlying storage using noobaa cli command
Args:
mcg_obj (MCG): Used for execution for the NooBaa CLI command
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create aws-s3 {backingstore_name} "
f"--access-key {cld_mgr.aws_client.access_key} "
f"--secret-key {cld_mgr.aws_client.secret_key} "
f"--target-bucket {uls_name} --region {region}"
)
def oc_create_google_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with GCP underlying storage using oc create command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["spec"] = {
"type": constants.BACKINGSTORE_TYPE_GOOGLE,
"googleCloudStorage": {
"targetBucket": uls_name,
"secret": {"name": cld_mgr.gcp_client.secret.name},
},
}
create_resource(**bs_data)
def cli_create_google_backingstore(
mcg_obj, cld_mgr, backingstore_name, uls_name, region
):
"""
Create a new backingstore with GCP underlying storage using a NooBaa CLI command
Args:
mcg_obj (MCG): Used for execution for the NooBaa CLI command
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create google-cloud-storage {backingstore_name} "
f"--private-key-json-file {constants.GOOGLE_CREDS_JSON_PATH} "
f"--target-bucket {uls_name}"
)
def oc_create_azure_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with Azure underlying storage using oc create command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["spec"] = {
"type": constants.BACKINGSTORE_TYPE_AZURE,
"azureBlob": {
"targetBlobContainer": uls_name,
"secret": {"name": cld_mgr.azure_client.secret.name},
},
}
create_resource(**bs_data)
def cli_create_azure_backingstore(
mcg_obj, cld_mgr, backingstore_name, uls_name, region
):
"""
Create a new backingstore with aws underlying storage using noobaa cli command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create azure-blob {backingstore_name} "
f"--account-key {cld_mgr.azure_client.credential} "
f"--account-name {cld_mgr.azure_client.account_name} "
f"--target-blob-container {uls_name}"
)
def oc_create_ibmcos_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with IBM COS underlying storage using oc create command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
bs_data["spec"] = {
"type": "ibm-cos",
"ibmCos": {
"targetBucket": uls_name,
"signatureVersion": "v2",
"endpoint": constants.IBM_COS_GEO_ENDPOINT_TEMPLATE.format(
cld_mgr.ibmcos_client.region.lower()
),
"secret": {"name": cld_mgr.ibmcos_client.secret.name},
},
}
create_resource(**bs_data)
def cli_create_ibmcos_backingstore(
mcg_obj, cld_mgr, backingstore_name, uls_name, region
):
"""
Create a new backingstore with IBM COS underlying storage using a NooBaa CLI command
Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create ibm-cos {backingstore_name} "
f"--access-key {cld_mgr.ibmcos_client.access_key} "
f"--secret-key {cld_mgr.ibmcos_client.secret_key} "
f"""--endpoint {
constants.IBM_COS_GEO_ENDPOINT_TEMPLATE.format(
cld_mgr.ibmcos_client.region.lower()
)
} """
f"--target-bucket {uls_name}"
)
def oc_create_s3comp_backingstore(cld_mgr, backingstore_name, uls_name, region):
pass
def cli_create_s3comp_backingstore(cld_mgr, backingstore_name, uls_name, region):
pass
def oc_create_pv_backingstore(backingstore_name, vol_num, size, storage_class):
"""
Create a new backingstore with pv underlying storage using oc create command
Args:
backingstore_name (str): backingstore name
vol_num (int): number of pv volumes
size (int): each volume size in GB
storage_class (str): which storage class to use
"""
bs_data = templating.load_yaml(constants.PV_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
bs_data["spec"]["pvPool"]["resources"]["requests"]["storage"] = str(size) + "Gi"
bs_data["spec"]["pvPool"]["numVolumes"] = vol_num
bs_data["spec"]["pvPool"]["storageClass"] = storage_class
create_resource(**bs_data)
wait_for_pv_backingstore(backingstore_name, config.ENV_DATA["cluster_namespace"])
def cli_create_pv_backingstore(
mcg_obj, backingstore_name, vol_num, size, storage_class
):
"""
Create a new backingstore with pv underlying storage using noobaa cli command
Args:
backingstore_name (str): backingstore name
vol_num (int): number of pv volumes
size (int): each volume size in GB
storage_class (str): which storage class to use
"""
mcg_obj.exec_mcg_cmd(
f"backingstore create pv-pool {backingstore_name} --num-volumes "
f"{vol_num} --pv-size-gb {size} --storage-class {storage_class}"
)
wait_for_pv_backingstore(backingstore_name, config.ENV_DATA["cluster_namespace"])
def wait_for_pv_backingstore(backingstore_name, namespace=None):
"""
wait for existing pv backing store to reach OPTIMAL state
Args:
backingstore_name (str): backingstore name
namespace (str): backing store's namespace
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
sample = TimeoutSampler(
timeout=240,
sleep=15,
func=check_pv_backingstore_status,
backingstore_name=backingstore_name,
namespace=namespace,
)
if not sample.wait_for_func_status(result=True):
logger.error(f"Backing Store {backingstore_name} never reached OPTIMAL state")
raise TimeoutExpiredError
else:
logger.info(f"Backing Store {backingstore_name} created successfully")
def check_pv_backingstore_status(
backingstore_name, namespace=None, desired_status=constants.HEALTHY_PV_BS
):
"""
check if existing pv backing store is in OPTIMAL state
Args:
backingstore_name (str): backingstore name
namespace (str): backing store's namespace
desired_status (str): desired state for the backing store, if None is given then desired
is the Healthy status
Returns:
bool: True if backing store is in the desired state
"""
kubeconfig = os.getenv("KUBECONFIG")
kubeconfig = f"--kubeconfig {kubeconfig}" if kubeconfig else ""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
cmd = (
f"oc get backingstore -n {namespace} {kubeconfig} {backingstore_name} "
"-o=jsonpath=`{.status.mode.modeCode}`"
)
res = run_cmd(cmd=cmd)
return True if res in desired_status else False
def create_multipart_upload(s3_obj, bucketname, object_key):
"""
Initiates Multipart Upload
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket on which multipart upload to be initiated on
object_key (str): Unique object Identifier
Returns:
str : Multipart Upload-ID
"""
mpu = s3_obj.s3_client.create_multipart_upload(Bucket=bucketname, Key=object_key)
upload_id = mpu["UploadId"]
return upload_id
def list_multipart_upload(s3_obj, bucketname):
"""
Lists the multipart upload details on a bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : Dictionary containing the multipart upload details
"""
return s3_obj.s3_client.list_multipart_uploads(Bucket=bucketname)
def list_uploaded_parts(s3_obj, bucketname, object_key, upload_id):
"""
Lists uploaded parts and their ETags
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
upload_id (str): Multipart Upload-ID
Returns:
dict : Dictionary containing the multipart upload details
"""
return s3_obj.s3_client.list_parts(
Bucket=bucketname, Key=object_key, UploadId=upload_id
)
def complete_multipart_upload(s3_obj, bucketname, object_key, upload_id, parts):
"""
Completes the Multipart Upload
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
upload_id (str): Multipart Upload-ID
parts (list): List containing the uploaded parts which includes ETag and part number
Returns:
dict : Dictionary containing the completed multipart upload details
"""
result = s3_obj.s3_client.complete_multipart_upload(
Bucket=bucketname,
Key=object_key,
UploadId=upload_id,
MultipartUpload={"Parts": parts},
)
return result
def abort_all_multipart_upload(s3_obj, bucketname, object_key):
"""
Abort all Multipart Uploads for this Bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
Returns:
list : List of aborted upload ids
"""
multipart_list = s3_obj.s3_client.list_multipart_uploads(Bucket=bucketname)
logger.info(f"Aborting{len(multipart_list)} uploads")
if "Uploads" in multipart_list:
return [
s3_obj.s3_client.abort_multipart_upload(
Bucket=bucketname, Key=object_key, UploadId=upload["UploadId"]
)
for upload in multipart_list["Uploads"]
]
else:
return None
def abort_multipart(s3_obj, bucketname, object_key, upload_id):
"""
Aborts a Multipart Upload for this Bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
upload_id (str): Multipart Upload-ID
Returns:
str : aborted upload id
"""
return s3_obj.s3_client.abort_multipart_upload(
Bucket=bucketname, Key=object_key, UploadId=upload_id
)
def put_bucket_policy(s3_obj, bucketname, policy):
"""
Adds bucket policy to a bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
policy (str): Bucket policy in Json format
Returns:
dict : Bucket policy response
"""
return s3_obj.s3_client.put_bucket_policy(Bucket=bucketname, Policy=policy)
def get_bucket_policy(s3_obj, bucketname):
"""
Gets bucket policy from a bucket
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : Get Bucket policy response
"""
return s3_obj.s3_client.get_bucket_policy(Bucket=bucketname)
def delete_bucket_policy(s3_obj, bucketname):
"""
Deletes bucket policy
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : Delete Bucket policy response
"""
return s3_obj.s3_client.delete_bucket_policy(Bucket=bucketname)
def s3_put_object(s3_obj, bucketname, object_key, data, content_type=""):
"""
Simple Boto3 client based Put object
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
data (str): string content to write to a new S3 object
content_type (str): Type of object data. eg: html, txt etc,
Returns:
dict : Put object response
"""
return s3_obj.s3_client.put_object(
Bucket=bucketname, Key=object_key, Body=data, ContentType=content_type
)
def s3_get_object(s3_obj, bucketname, object_key, versionid=""):
"""
Simple Boto3 client based Get object
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
versionid (str): Unique version number of an object
Returns:
dict : Get object response
"""
return s3_obj.s3_client.get_object(
Bucket=bucketname, Key=object_key, VersionId=versionid
)
def s3_delete_object(s3_obj, bucketname, object_key, versionid=None):
"""
Simple Boto3 client based Delete object
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier
versionid (str): Unique version number of an object
Returns:
dict : Delete object response
"""
if versionid:
return s3_obj.s3_client.delete_object(
Bucket=bucketname, Key=object_key, VersionId=versionid
)
else:
return s3_obj.s3_client.delete_object(Bucket=bucketname, Key=object_key)
def s3_put_bucket_website(s3_obj, bucketname, website_config):
"""
Boto3 client based Put bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
website_config (dict): Website configuration info
Returns:
dict : PutBucketWebsite response
"""
return s3_obj.s3_client.put_bucket_website(
Bucket=bucketname, WebsiteConfiguration=website_config
)
def s3_get_bucket_website(s3_obj, bucketname):
"""
Boto3 client based Get bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : GetBucketWebsite response
"""
return s3_obj.s3_client.get_bucket_website(Bucket=bucketname)
def s3_delete_bucket_website(s3_obj, bucketname):
"""
Boto3 client based Delete bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
Returns:
dict : DeleteBucketWebsite response
"""
return s3_obj.s3_client.delete_bucket_website(Bucket=bucketname)
def s3_put_bucket_versioning(s3_obj, bucketname, status="Enabled", s3_client=None):
"""
Boto3 client based Put Bucket Versioning function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
status (str): 'Enabled' or 'Suspended'. Default 'Enabled'
s3_client : Any s3 client resource
Returns:
dict : PutBucketVersioning response
"""
if s3_client:
return s3_client.put_bucket_versioning(
Bucket=bucketname, VersioningConfiguration={"Status": status}
)
else:
return s3_obj.s3_client.put_bucket_versioning(
Bucket=bucketname, VersioningConfiguration={"Status": status}
)
def s3_get_bucket_versioning(s3_obj, bucketname, s3_client=None):
"""
Boto3 client based Get Bucket Versioning function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
s3_client: Any s3 client resource
Returns:
dict : GetBucketVersioning response
"""
if s3_client:
return s3_client.get_bucket_versioning(Bucket=bucketname)
else:
return s3_obj.s3_client.get_bucket_versioning(Bucket=bucketname)
def s3_list_object_versions(s3_obj, bucketname, prefix=""):
"""
Boto3 client based list object Versionfunction
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
prefix (str): Object key prefix
Returns:
dict : List object version response
"""
return s3_obj.s3_client.list_object_versions(Bucket=bucketname, Prefix=prefix)
def s3_io_create_delete(mcg_obj, awscli_pod, bucket_factory):
"""
Running IOs on s3 bucket
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
awscli_pod (pod): A pod running the AWSCLI tools
bucket_factory: Calling this fixture creates a new bucket(s)
"""
target_dir = "/aws/" + uuid4().hex + "_original/"
downloaded_files = retrieve_test_objects_to_pod(awscli_pod, target_dir)
bucketname = bucket_factory(1)[0].name
uploaded_objects_paths = get_full_path_object(downloaded_files, bucketname)
write_individual_s3_objects(
mcg_obj,
awscli_pod,
bucket_factory,
downloaded_files,
target_dir,
bucket_name=bucketname,
)
del_objects(uploaded_objects_paths, awscli_pod, mcg_obj)
awscli_pod.exec_cmd_on_pod(command=f"rm -rf {target_dir}")
def del_objects(uploaded_objects_paths, awscli_pod, mcg_obj):
"""
Deleting objects from bucket
Args:
uploaded_objects_paths (list): List of object paths
awscli_pod (pod): A pod running the AWSCLI tools
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
"""
for uploaded_filename in uploaded_objects_paths:
logger.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command(mcg_obj, "rm " + uploaded_filename),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
def get_full_path_object(downloaded_files, bucket_name):
"""
Getting full of object in the bucket
Args:
downloaded_files (list): List of downloaded files
bucket_name (str): Name of the bucket
Returns:
uploaded_objects_paths (list) : List of full paths of objects
"""
uploaded_objects_paths = []
for uploaded_filename in downloaded_files:
uploaded_objects_paths.append(f"s3://{bucket_name}/{uploaded_filename}")
return uploaded_objects_paths
def obc_io_create_delete(mcg_obj, awscli_pod, bucket_factory):
"""
Running IOs on OBC interface
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
awscli_pod (pod): A pod running the AWSCLI tools
bucket_factory: Calling this fixture creates a new bucket(s)
"""
dir = "/aws/" + uuid4().hex + "_original/"
downloaded_files = retrieve_test_objects_to_pod(awscli_pod, dir)
bucket_name = bucket_factory(amount=1, interface="OC")[0].name
mcg_bucket_path = f"s3://{bucket_name}/"
uploaded_objects_paths = get_full_path_object(downloaded_files, bucket_name)
sync_object_directory(awscli_pod, dir, mcg_bucket_path, mcg_obj)
del_objects(uploaded_objects_paths, awscli_pod, mcg_obj)
awscli_pod.exec_cmd_on_pod(command=f"rm -rf {dir}")
def retrieve_verification_mode():
if config.ENV_DATA["platform"].lower() == "ibm_cloud":
verify = True
elif config.DEPLOYMENT.get("use_custom_ingress_ssl_cert"):
verify = get_root_ca_cert()
else:
verify = constants.DEFAULT_INGRESS_CRT_LOCAL_PATH
logger.debug(f"verification: '{verify}'")
return verify
def namespace_bucket_update(mcg_obj, bucket_name, read_resource, write_resource):
"""
Edits MCG namespace bucket resources
Args:
mcg_obj (obj): An MCG object containing the MCG S3 connection credentials
bucket_name (str): Name of the bucket
read_resource (list): Resource names to provide read access
write_resource (str): Resource name to provide write access
"""
mcg_obj.send_rpc_query(
"bucket_api",
"update_bucket",
{
"name": bucket_name,
"namespace": {
"read_resources": read_resource,
"write_resource": write_resource,
},
},
)
def write_random_objects_in_pod(io_pod, file_dir, amount, pattern="ObjKey"):
"""
Uses /dev/urandom to create and write random files in a given
directory in a pod
Args:
io_pod (ocs_ci.ocs.ocp.OCP): The pod object in which the files should be
generated and written
file_dir (str): A string describing the path in which
to write the files to
amount (int): The amount of files to generate
pattern (str): The file name pattern to use
Returns:
list: A list with the names of all written objects
"""
obj_lst = []
for i in range(amount):
object_key = pattern + "-{}".format(i)
obj_lst.append(object_key)
io_pod.exec_cmd_on_pod(
f"dd if=/dev/urandom of={file_dir}/{object_key} bs=1M count=1 status=none"
)
return obj_lst
def setup_base_objects(awscli_pod, original_dir, result_dir, amount=2):
"""
Prepares two directories and populate one of them with objects
Args:
awscli_pod (Pod): A pod running the AWS CLI tools
original_dir (str): original directory name
result_dir (str): result directory name
amount (Int): Number of test objects to create
"""
awscli_pod.exec_cmd_on_pod(command=f"mkdir {original_dir} {result_dir}")
write_random_objects_in_pod(awscli_pod, original_dir, amount)
def check_cached_objects_by_name(mcg_obj, bucket_name, expected_objects_names=None):
"""
Check if the names of cached objects in a cache bucket are as expected using rpc call
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
bucket_name (str): Name of the cache bucket
expected_objects_names (list): Expected objects to be cached
Returns:
bool: True if all the objects exist in the cache as expected, False otherwise
"""
res = mcg_obj.send_rpc_query(
"object_api",
"list_objects",
{
"bucket": bucket_name,
},
).json()
list_objects_res = [name["key"] for name in res.get("reply").get("objects")]
if not expected_objects_names:
expected_objects_names = []
if set(expected_objects_names) == set(list_objects_res):
logger.info("Files cached as expected")
return True
logger.warning(
"Objects did not cache properly, \n"
f"Expected: [{expected_objects_names}]\n"
f"Cached: [{list_objects_res}]"
)
return False
def wait_for_cache(mcg_obj, bucket_name, expected_objects_names=None):
"""
wait for existing cache bucket to cache all required objects
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
bucket_name (str): Name of the cache bucket
expected_objects_names (list): Expected objects to be cached
"""
sample = TimeoutSampler(
timeout=60,
sleep=10,
func=check_cached_objects_by_name,
mcg_obj=mcg_obj,
bucket_name=bucket_name,
expected_objects_names=expected_objects_names,
)
if not sample.wait_for_func_status(result=True):
logger.error("Objects were not able to cache properly")
raise UnexpectedBehaviour
def compare_directory(awscli_pod, original_dir, result_dir, amount=2):
"""
Compares object checksums on original and result directories
Args:
awscli_pod (pod): A pod running the AWS CLI tools
original_dir (str): original directory name
result_dir (str): result directory name
amount (int): Number of test objects to create
"""
for i in range(amount):
file_name = f"ObjKey-{i}"
assert verify_s3_object_integrity(
original_object_path=f"{original_dir}/{file_name}",
result_object_path=f"{result_dir}/{file_name}",
awscli_pod=awscli_pod,
), "Checksum comparision between original and result object failed"
def s3_copy_object(s3_obj, bucketname, source, object_key):
"""
Boto3 client based copy object
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
source (str): Source object key. eg: '<bucket>/<key>
object_key (str): Unique object Identifier for copied object
Returns:
dict : Copy object response
"""
return s3_obj.s3_client.copy_object(
Bucket=bucketname, CopySource=source, Key=object_key
)
def s3_upload_part_copy(
s3_obj, bucketname, copy_source, object_key, part_number, upload_id
):
"""
Boto3 client based upload_part_copy operation
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
copy_source (str): Name of the source bucket and key name. {bucket}/{key}
part_number (int): Part number
upload_id (str): Upload Id
object_key (str): Unique object Identifier for copied object
Returns:
dict : upload_part_copy response
"""
return s3_obj.s3_client.upload_part_copy(
Bucket=bucketname,
CopySource=copy_source,
Key=object_key,
PartNumber=part_number,
UploadId=upload_id,
)
def s3_get_object_acl(s3_obj, bucketname, object_key):
"""
Boto3 client based get_object_acl operation
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier for copied object
Returns:
dict : get object acl response
"""
return s3_obj.s3_client.get_object_acl(Bucket=bucketname, Key=object_key)
def s3_head_object(s3_obj, bucketname, object_key, if_match=None):
"""
Boto3 client based head_object operation to retrieve only metadata
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_key (str): Unique object Identifier for copied object
if_match (str): Return the object only if its entity tag (ETag)
is the same as the one specified,
Returns:
dict : head object response
"""
if if_match:
return s3_obj.s3_client.head_object(
Bucket=bucketname, Key=object_key, IfMatch=if_match
)
else:
return s3_obj.s3_client.head_object(Bucket=bucketname, Key=object_key)
def s3_list_objects_v1(
s3_obj, bucketname, prefix="", delimiter="", max_keys=1000, marker=""
):
"""
Boto3 client based list object version1
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
prefix (str): Limits the response to keys that begin with the specified prefix.
delimiter (str): Character used to group keys.
max_keys (int): Maximum number of keys returned in the response. Default 1,000 keys.
marker (str): key to start with when listing objects in a bucket.
Returns:
dict : list object v1 response
"""
return s3_obj.s3_client.list_objects(
Bucket=bucketname,
Prefix=prefix,
Delimiter=delimiter,
MaxKeys=max_keys,
Marker=marker,
)
def s3_list_objects_v2(
s3_obj,
bucketname,
prefix="",
delimiter="",
max_keys=1000,
con_token="",
fetch_owner=False,
):
"""
Boto3 client based list object version2
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
prefix (str): Limits the response to keys that begin with the specified prefix.
delimiter (str): Character used to group keys.
max_keys (int): Maximum number of keys returned in the response. Default 1,000 keys.
con_token (str): Token used to continue the list
fetch_owner (bool): Unique object Identifier
Returns:
dict : list object v2 response
"""
return s3_obj.s3_client.list_objects_v2(
Bucket=bucketname,
Prefix=prefix,
Delimiter=delimiter,
MaxKeys=max_keys,
ContinuationToken=con_token,
FetchOwner=fetch_owner,
)
def s3_delete_objects(s3_obj, bucketname, object_keys):
"""
Boto3 client based delete objects
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
object_keys (list): The objects to delete. Format: {'Key': 'object_key', 'VersionId': ''}
Returns:
dict : delete objects response
"""
return s3_obj.s3_client.delete_objects(
Bucket=bucketname, Delete={"Objects": object_keys}
)
def bucket_read_api(mcg_obj, bucket_name):
"""
Fetches the bucket metadata like size, tiers etc
Args:
mcg_obj (obj): MCG object
bucket_name (str): Name of the bucket
Returns:
dict : Bucket policy response
"""
resp = mcg_obj.send_rpc_query(
"bucket_api", "read_bucket", params={"name": bucket_name}
)
bucket_read_resp = resp.json().get("reply")
return bucket_read_resp
def get_bucket_available_size(mcg_obj, bucket_name):
"""
Function to get the bucket available size
Args:
mcg_obj (obj): MCG object
bucket_name (str): Name of the bucket
Returns:
int : Available size in the bucket
"""
resp = bucket_read_api(mcg_obj, bucket_name)
bucket_size = resp["storage"]["values"]["free"]
return bucket_size
def compare_bucket_object_list(mcg_obj, first_bucket_name, second_bucket_name):
"""
Compares the object lists of two given buckets
Args:
mcg_obj (MCG): An initialized MCG object
first_bucket_name (str): The name of the first bucket to compare
second_bucket_name (str): The name of the second bucket to compare
Returns:
bool: True if both buckets contain the same object names in all objects,
False otherwise
"""
def _comparison_logic():
first_bucket_object_set = {
obj.key for obj in mcg_obj.s3_list_all_objects_in_bucket(first_bucket_name)
}
second_bucket_object_set = {
obj.key for obj in mcg_obj.s3_list_all_objects_in_bucket(second_bucket_name)
}
if first_bucket_object_set == second_bucket_object_set:
logger.info("Objects in both buckets are identical")
return True
else:
logger.warning(
f"""Buckets {first_bucket_name} and {second_bucket_name} do not contain the same objects.
{first_bucket_name} objects:
{first_bucket_object_set}
{second_bucket_name} objects:
{second_bucket_object_set}
"""
)
return False
try:
for comparison_result in TimeoutSampler(600, 30, _comparison_logic):
if comparison_result:
return True
except TimeoutExpiredError:
logger.error(
"The compared buckets did not contain the same set of objects after ten minutes"
)
return False
def write_random_test_objects_to_bucket(
io_pod,
bucket_to_write,
file_dir,
amount=1,
mcg_obj=None,
s3_creds=None,
):
"""
Write files generated by /dev/urandom to a bucket
Args:
io_pod (ocs_ci.ocs.ocp.OCP): The pod which should handle all needed IO operations
bucket_to_write (str): The bucket name to write the random files to
file_dir (str): The path to the folder where all random files will be
generated and copied from
amount (int, optional): The amount of random objects to write. Defaults to 1.
mcg_obj (MCG, optional): An MCG class instance
s3_creds (dict, optional): A dictionary containing S3-compatible credentials
for writing objects directly to buckets outside of the MCG. Defaults to None.
Returns:
list: A list containing the names of the random files that were written
"""
full_object_path = f"s3://{bucket_to_write}"
obj_lst = write_random_objects_in_pod(io_pod, file_dir, amount)
sync_object_directory(
io_pod,
file_dir,
full_object_path,
s3_obj=mcg_obj,
signed_request_creds=s3_creds,
)
return obj_lst
|
[
"uuid.uuid4",
"ocs_ci.utility.ssl_certs.get_root_ca_cert",
"ocs_ci.utility.utils.run_cmd",
"ocs_ci.framework.config.DEPLOYMENT.get",
"ocs_ci.utility.templating.load_yaml",
"ocs_ci.helpers.helpers.create_resource",
"ocs_ci.ocs.resources.pod.get_rgw_pods",
"boto3.resource",
"os.getenv",
"logging.getLogger",
"ocs_ci.utility.utils.TimeoutSampler"
] |
[((538, 565), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (555, 565), False, 'import logging\n'), ((4334, 4354), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (4348, 4354), False, 'import boto3\n'), ((7020, 7034), 'ocs_ci.ocs.resources.pod.get_rgw_pods', 'get_rgw_pods', ([], {}), '()\n', (7032, 7034), False, 'from ocs_ci.ocs.resources.pod import get_rgw_pods\n'), ((10321, 10374), 'ocs_ci.utility.templating.load_yaml', 'templating.load_yaml', (['constants.MCG_BACKINGSTORE_YAML'], {}), '(constants.MCG_BACKINGSTORE_YAML)\n', (10341, 10374), False, 'from ocs_ci.utility import templating\n'), ((10725, 10751), 'ocs_ci.helpers.helpers.create_resource', 'create_resource', ([], {}), '(**bs_data)\n', (10740, 10751), False, 'from ocs_ci.helpers.helpers import create_resource\n'), ((12002, 12055), 'ocs_ci.utility.templating.load_yaml', 'templating.load_yaml', (['constants.MCG_BACKINGSTORE_YAML'], {}), '(constants.MCG_BACKINGSTORE_YAML)\n', (12022, 12055), False, 'from ocs_ci.utility import templating\n'), ((12339, 12365), 'ocs_ci.helpers.helpers.create_resource', 'create_resource', ([], {}), '(**bs_data)\n', (12354, 12365), False, 'from ocs_ci.helpers.helpers import create_resource\n'), ((13581, 13634), 'ocs_ci.utility.templating.load_yaml', 'templating.load_yaml', (['constants.MCG_BACKINGSTORE_YAML'], {}), '(constants.MCG_BACKINGSTORE_YAML)\n', (13601, 13634), False, 'from ocs_ci.utility import templating\n'), ((13917, 13943), 'ocs_ci.helpers.helpers.create_resource', 'create_resource', ([], {}), '(**bs_data)\n', (13932, 13943), False, 'from ocs_ci.helpers.helpers import create_resource\n'), ((15140, 15193), 'ocs_ci.utility.templating.load_yaml', 'templating.load_yaml', (['constants.MCG_BACKINGSTORE_YAML'], {}), '(constants.MCG_BACKINGSTORE_YAML)\n', (15160, 15193), False, 'from ocs_ci.utility import templating\n'), ((15697, 15723), 'ocs_ci.helpers.helpers.create_resource', 'create_resource', ([], {}), '(**bs_data)\n', (15712, 15723), False, 'from ocs_ci.helpers.helpers import create_resource\n'), ((17196, 17248), 'ocs_ci.utility.templating.load_yaml', 'templating.load_yaml', (['constants.PV_BACKINGSTORE_YAML'], {}), '(constants.PV_BACKINGSTORE_YAML)\n', (17216, 17248), False, 'from ocs_ci.utility import templating\n'), ((17582, 17608), 'ocs_ci.helpers.helpers.create_resource', 'create_resource', ([], {}), '(**bs_data)\n', (17597, 17608), False, 'from ocs_ci.helpers.helpers import create_resource\n'), ((18701, 18835), 'ocs_ci.utility.utils.TimeoutSampler', 'TimeoutSampler', ([], {'timeout': '(240)', 'sleep': '(15)', 'func': 'check_pv_backingstore_status', 'backingstore_name': 'backingstore_name', 'namespace': 'namespace'}), '(timeout=240, sleep=15, func=check_pv_backingstore_status,\n backingstore_name=backingstore_name, namespace=namespace)\n', (18715, 18835), False, 'from ocs_ci.utility.utils import TimeoutSampler, run_cmd\n'), ((19666, 19689), 'os.getenv', 'os.getenv', (['"""KUBECONFIG"""'], {}), "('KUBECONFIG')\n", (19675, 19689), False, 'import os\n'), ((19981, 19997), 'ocs_ci.utility.utils.run_cmd', 'run_cmd', ([], {'cmd': 'cmd'}), '(cmd=cmd)\n', (19988, 19997), False, 'from ocs_ci.utility.utils import TimeoutSampler, run_cmd\n'), ((35705, 35874), 'ocs_ci.utility.utils.TimeoutSampler', 'TimeoutSampler', ([], {'timeout': '(60)', 'sleep': '(10)', 'func': 'check_cached_objects_by_name', 'mcg_obj': 'mcg_obj', 'bucket_name': 'bucket_name', 'expected_objects_names': 'expected_objects_names'}), '(timeout=60, sleep=10, func=check_cached_objects_by_name,\n mcg_obj=mcg_obj, bucket_name=bucket_name, expected_objects_names=\n expected_objects_names)\n', (35719, 35874), False, 'from ocs_ci.utility.utils import TimeoutSampler, run_cmd\n'), ((31912, 31964), 'ocs_ci.framework.config.DEPLOYMENT.get', 'config.DEPLOYMENT.get', (['"""use_custom_ingress_ssl_cert"""'], {}), "('use_custom_ingress_ssl_cert')\n", (31933, 31964), False, 'from ocs_ci.framework import config\n'), ((43581, 43623), 'ocs_ci.utility.utils.TimeoutSampler', 'TimeoutSampler', (['(600)', '(30)', '_comparison_logic'], {}), '(600, 30, _comparison_logic)\n', (43595, 43623), False, 'from ocs_ci.utility.utils import TimeoutSampler, run_cmd\n'), ((31983, 32001), 'ocs_ci.utility.ssl_certs.get_root_ca_cert', 'get_root_ca_cert', ([], {}), '()\n', (31999, 32001), False, 'from ocs_ci.utility.ssl_certs import get_root_ca_cert\n'), ((29204, 29211), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (29209, 29211), False, 'from uuid import uuid4\n'), ((31311, 31318), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (31316, 31318), False, 'from uuid import uuid4\n')]
|
#!/usr/bin/env python
#
from __future__ import print_function
import argparse
import struct
import sys
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def main():
ESP32_TRACE_BLOCK_HDR_SZ = 8
ESP32_TRACE_BLOCK_TASK_IDX = 0
ESP32_TRACE_BLOCK_TS_IDX = 1
ESP32_TRACE_BLOCK_DATA_IDX = 2
parser = argparse.ArgumentParser(description='ESP32 App Trace Parse Tool')
parser.add_argument('file', help='Path to app trace file', type=str)
parser.add_argument('--print-tasks', '-p', help='Print tasks', action='store_true')
parser.add_argument('--print-details', '-d', help='Print detailed stats', action='store_true')
parser.add_argument('--no-errors', '-n', help='Do not print errors', action='store_true')
parser.add_argument('--block-len', '-b', help='Block length', type=int, default=1024)
args = parser.parse_args()
print("====================================================================")
try:
ftrc = open(args.file, 'rb')
except IOError as e:
print("Failed to open trace file (%s)!" % e)
sys.exit(2)
passed = True
off = 0
data_stats = {}
last_ts = None
tot_discont = 0
while True:
# ftrc.seek(off)
task = None
ts = 0
trc_buf = ftrc.read(args.block_len)
if len(trc_buf) == 0:
# print('EOF')
break
trc_data = struct.unpack('<LL%sB' % (len(trc_buf) - ESP32_TRACE_BLOCK_HDR_SZ), trc_buf)
if len(trc_data):
# print("%x %x, len %d" % (trc_data[0], trc_data[1], len(trc_data) - 2))
# print(trc_data[2:])
# sys.exit(0)
task = trc_data[ESP32_TRACE_BLOCK_TASK_IDX]
ts = trc_data[ESP32_TRACE_BLOCK_TS_IDX]
# print(ts)
if last_ts and last_ts >= ts:
# print("Global TS discontinuity %x -> %x, task %x, stamp %x at %x" % (last_ts, ts, task,
# data_stats[task]['stamp'], off))
if args.print_details:
print("Global TS discontinuity %x -> %x, task %x at %x" % (last_ts, ts, task, off))
# tot_discont += 1
# passed = False
last_ts = ts
if task not in data_stats:
print("%x: NEW TASK" % task)
data_stats[task] = {'stamp': trc_data[ESP32_TRACE_BLOCK_DATA_IDX], 'last_ts': ts, 'count': 1, 'discont_offs': [], 'inv_stamps_offs': []}
else:
if data_stats[task]['last_ts'] == ts:
print("Task TS discontinuity %x -> %x, task %x, stamp %x at %x" % (last_ts, ts, task, data_stats[task]['stamp'], off))
data_stats[task]['discont_offs'].append(off)
tot_discont += 1
passed = False
data_stats[task]['last_ts'] = ts
data_stats[task]['count'] += 1
if len(trc_data) > ESP32_TRACE_BLOCK_DATA_IDX:
# print("DATA = %x %x %x %x" % (trc_data[-4], trc_data[-3], trc_data[-2], trc_data[-1]))
if args.print_tasks:
print("Task[%d] %x, ts %08x, stamp %x" % (off / args.block_len, task, ts, trc_data[ESP32_TRACE_BLOCK_DATA_IDX]))
else:
print("%x: NO DATA" % task)
else:
print("Failed to unpack data!")
sys.exit(2)
# check data
for i in range(ESP32_TRACE_BLOCK_DATA_IDX, len(trc_data)):
if trc_data[i] != data_stats[task]['stamp']:
if not args.no_errors:
print("Invalid stamp %x->%x at %x, task %x" % (data_stats[task]['stamp'], trc_data[i], off + ESP32_TRACE_BLOCK_HDR_SZ + i, task))
passed = False
data_stats[task]['stamp'] = trc_data[i]
data_stats[task]['inv_stamps_offs'].append(off)
# break
if len(trc_buf) < args.block_len:
print('Last block (not full)')
break
if data_stats[task]['stamp'] is not None:
data_stats[task]['stamp'] = (data_stats[task]['stamp'] + 1) & 0xFF
# print("stamp=%x" % data_stats[task][ESP32_TRACE_STAMP_IDX])
off += args.block_len
ftrc.close()
print("====================================================================")
print("Trace size %d bytes, discont %d\n" % (off, tot_discont))
for t in data_stats:
print("Task %x. Total count %d. Inv stamps %d. TS Discontinuities %d." % (t, data_stats[t]['count'],
len(data_stats[t]['inv_stamps_offs']), len(data_stats[t]['discont_offs'])))
if args.print_details:
print('Invalid stamps offs: [{}]'.format(', '.join(hex(x) for x in data_stats[t]['inv_stamps_offs'])))
print('TS Discontinuities offs: [{}]'.format(', '.join(hex(x) for x in data_stats[t]['discont_offs'])))
print("\n")
if passed:
print("Data - OK")
else:
print("Data - FAILED!")
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"sys.exit"
] |
[((499, 564), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ESP32 App Trace Parse Tool"""'}), "(description='ESP32 App Trace Parse Tool')\n", (522, 564), False, 'import argparse\n'), ((1272, 1283), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1280, 1283), False, 'import sys\n'), ((3609, 3620), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3617, 3620), False, 'import sys\n')]
|
from django.db import models
CONTENT_TYPE=(
('html','html'),
('text','text')
)
class Newsletter(models.Model):
title=models.CharField(max_length=500)
content_type=models.CharField(choices=CONTENT_TYPE,max_length=30)
content=models.TextField()
subscribers=models.ManyToManyField('Subcriber',blank=True)
created=models.DateTimeField(auto_now_add=True)
class Subcriber(models.Model):
name=models.CharField(max_length=300)
email=models.EmailField(max_length=500)
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.EmailField",
"django.db.models.DateTimeField"
] |
[((131, 163), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (147, 163), False, 'from django.db import models\n'), ((181, 234), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'CONTENT_TYPE', 'max_length': '(30)'}), '(choices=CONTENT_TYPE, max_length=30)\n', (197, 234), False, 'from django.db import models\n'), ((246, 264), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (262, 264), False, 'from django.db import models\n'), ((281, 328), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Subcriber"""'], {'blank': '(True)'}), "('Subcriber', blank=True)\n", (303, 328), False, 'from django.db import models\n'), ((340, 379), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (360, 379), False, 'from django.db import models\n'), ((422, 454), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (438, 454), False, 'from django.db import models\n'), ((465, 498), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (482, 498), False, 'from django.db import models\n')]
|
"""Global fixtures for Porsche Connect integration."""
from typing import Any
from unittest.mock import patch
import pytest
from pyporscheconnectapi.exceptions import WrongCredentials
# from unittest.mock import Mock
pytest_plugins = "pytest_homeassistant_custom_component"
# This fixture is used to prevent HomeAssistant from attempting to create and dismiss persistent
# notifications. These calls would fail without this fixture since the persistent_notification
# integration is never loaded during a test.
@pytest.fixture(name="skip_notifications", autouse=True)
def skip_notifications_fixture():
"""Skip notification calls."""
with patch("homeassistant.components.persistent_notification.async_create"), patch(
"homeassistant.components.persistent_notification.async_dismiss"
):
yield
@pytest.fixture(name="auto_enable_custom_integrations", autouse=True)
def auto_enable_custom_integrations(
hass: Any, enable_custom_integrations: Any # noqa: F811
) -> None:
"""Enable custom integrations defined in the test dir."""
# This fixture, when used, will result in calls to async_get_data to return None. To have the call
# return a value, we would add the `return_value=<VALUE_TO_RETURN>` parameter to the patch call.
@pytest.fixture(name="bypass_connection_connect")
def bypass_connection_connect_fixture():
"""Skip calls to get data from API."""
with patch("pyporscheconnectapi.connection.Connection._login"), patch(
"pyporscheconnectapi.connection.Connection.getAllTokens"
):
yield
# In this fixture, we are forcing calls to async_get_data to raise an Exception. This is useful
# for exception handling.
@pytest.fixture(name="error_connection_connect")
def error_connection_connect_fixture():
"""Simulate error when retrieving data from API."""
with patch(
"pyporscheconnectapi.connection.Connection._login",
side_effect=WrongCredentials,
), patch("pyporscheconnectapi.connection.Connection.getAllTokens"):
yield
# This fixture, when used, will result in calls to async_get_data to return None. To have the call
# return a value, we would add the `return_value=<VALUE_TO_RETURN>` parameter to the patch call.
@pytest.fixture(name="bypass_get_data")
def bypass_get_data_fixture():
"""Skip calls to get data from API."""
with patch(
"custom_components.porscheconnect.PorscheConnectApiClient.async_get_data"
):
yield
# In this fixture, we are forcing calls to async_get_data to raise an Exception. This is useful
# for exception handling.
@pytest.fixture(name="error_on_get_data")
def error_get_data_fixture():
"""Simulate error when retrieving data from API."""
with patch(
"custom_components.porscheconnect.PorscheConnectApiClient.async_get_data",
side_effect=Exception,
):
yield
|
[
"unittest.mock.patch",
"pytest.fixture"
] |
[((517, 572), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""skip_notifications"""', 'autouse': '(True)'}), "(name='skip_notifications', autouse=True)\n", (531, 572), False, 'import pytest\n'), ((827, 895), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""auto_enable_custom_integrations"""', 'autouse': '(True)'}), "(name='auto_enable_custom_integrations', autouse=True)\n", (841, 895), False, 'import pytest\n'), ((1266, 1314), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""bypass_connection_connect"""'}), "(name='bypass_connection_connect')\n", (1280, 1314), False, 'import pytest\n'), ((1685, 1732), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""error_connection_connect"""'}), "(name='error_connection_connect')\n", (1699, 1732), False, 'import pytest\n'), ((2228, 2266), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""bypass_get_data"""'}), "(name='bypass_get_data')\n", (2242, 2266), False, 'import pytest\n'), ((2585, 2625), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""error_on_get_data"""'}), "(name='error_on_get_data')\n", (2599, 2625), False, 'import pytest\n'), ((651, 721), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.persistent_notification.async_create"""'], {}), "('homeassistant.components.persistent_notification.async_create')\n", (656, 721), False, 'from unittest.mock import patch\n'), ((723, 794), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.persistent_notification.async_dismiss"""'], {}), "('homeassistant.components.persistent_notification.async_dismiss')\n", (728, 794), False, 'from unittest.mock import patch\n'), ((1408, 1465), 'unittest.mock.patch', 'patch', (['"""pyporscheconnectapi.connection.Connection._login"""'], {}), "('pyporscheconnectapi.connection.Connection._login')\n", (1413, 1465), False, 'from unittest.mock import patch\n'), ((1467, 1530), 'unittest.mock.patch', 'patch', (['"""pyporscheconnectapi.connection.Connection.getAllTokens"""'], {}), "('pyporscheconnectapi.connection.Connection.getAllTokens')\n", (1472, 1530), False, 'from unittest.mock import patch\n'), ((1838, 1930), 'unittest.mock.patch', 'patch', (['"""pyporscheconnectapi.connection.Connection._login"""'], {'side_effect': 'WrongCredentials'}), "('pyporscheconnectapi.connection.Connection._login', side_effect=\n WrongCredentials)\n", (1843, 1930), False, 'from unittest.mock import patch\n'), ((1950, 2013), 'unittest.mock.patch', 'patch', (['"""pyporscheconnectapi.connection.Connection.getAllTokens"""'], {}), "('pyporscheconnectapi.connection.Connection.getAllTokens')\n", (1955, 2013), False, 'from unittest.mock import patch\n'), ((2350, 2435), 'unittest.mock.patch', 'patch', (['"""custom_components.porscheconnect.PorscheConnectApiClient.async_get_data"""'], {}), "('custom_components.porscheconnect.PorscheConnectApiClient.async_get_data'\n )\n", (2355, 2435), False, 'from unittest.mock import patch\n'), ((2721, 2829), 'unittest.mock.patch', 'patch', (['"""custom_components.porscheconnect.PorscheConnectApiClient.async_get_data"""'], {'side_effect': 'Exception'}), "('custom_components.porscheconnect.PorscheConnectApiClient.async_get_data'\n , side_effect=Exception)\n", (2726, 2829), False, 'from unittest.mock import patch\n')]
|
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
import pdb
from . import kf_2d, kf_3d, double_measurement_kf, imm
from . import linear_assignment
from . import iou_matching
from .track import Track
from . import JPDA_matching
from . import tracking_utils
import math
from nn_matching import NearestNeighborDistanceMetric
import cv2
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : EKF.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
def __init__(self, max_age=5, n_init=3,
JPDA=False, m_best_sol=1, assn_thresh=0.0,
matching_strategy=None,
kf_appearance_feature=None,
gate_full_state=False, lstm = None, cuda = False, appearance_model = None,
calib = None, kf_vel_params=(1./20, 1./160, 1, 1, 2), dummy_node_cost_iou=0.4, dummy_node_cost_app=0.2, nn_budget = None, use_imm=False, kf_walk_params=(1./20, 1./160, 1, 1, 2),
markov=(0.9, 0.7), uncertainty_limit=1.8, optical_flow=False, gate_limit=400):
self.max_age = max_age
self.n_init = n_init
self.metric = NearestNeighborDistanceMetric("euclidean", nn_budget)
if not use_imm:
self.kf = kf_2d.KalmanFilter2D(*kf_vel_params, gate_limit)
self.use_imm = False
else:
self.kf = imm.IMMFilter2D(kf_vel_params, kf_walk_params, markov=markov)
self.use_imm = True
self.tracks = []
self._next_id = 1
self.JPDA = JPDA
self.m_best_sol = m_best_sol
self.assn_thresh = assn_thresh
self.matching_strategy = matching_strategy
self.kf_appearance_feature = kf_appearance_feature
self.gate_only_position = not gate_full_state
self.lstm = lstm
self.cuda = cuda
self.dummy_node_cost_app = dummy_node_cost_app
self.dummy_node_cost_iou = dummy_node_cost_iou
self.appearance_model = appearance_model
self.prev_frame = None
self.uncertainty_limit = uncertainty_limit
self.optical_flow = optical_flow
# @profile
def gated_metric(self, tracks, dets, track_indices, detection_indices, compare_2d = False):
targets = np.array([tracks[i].track_id for i in track_indices])
if not compare_2d and self.metric.check_samples(targets):
compare_2d = True
if compare_2d:
features = np.array([dets[i].appearance_feature for i in detection_indices])
else:
features = np.array([dets[i].feature for i in detection_indices])
#cost_matrix = self.metric.distance(features, targets, compare_2d)
cost_matrix_appearance = self.metric.distance_torch(features, targets, compare_2d)
cost_matrix_iou = iou_matching.iou_cost(tracks, dets, track_indices, detection_indices)
gate_mask = linear_assignment.gate_cost_matrix(
self.kf, tracks, dets, track_indices,
detection_indices, only_position=self.gate_only_position)
cost_matrix = np.dstack((cost_matrix_appearance, cost_matrix_iou))
return cost_matrix, gate_mask
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.kf)
# @profile
def update(self, cur_frame, detections, compare_2d = False):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
self.cur_frame = cv2.cvtColor((255*cur_frame).permute(1,2,0).cpu().numpy(), cv2.COLOR_BGR2GRAY)
matches, unmatched_tracks, unmatched_detections = \
self._match(detections, compare_2d)
# update filter for each assigned track
# Only do this for non-JPDA because in JPDA the kf states are updated
# during the matching process
if not self.JPDA:
# Map matched tracks to detections
track_detection_map = {t:d for (t,d) in matches}
# Map unmatched tracks to -1 for no detection
for t in unmatched_tracks:
track_detection_map[t] = -1
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(self.kf, detections,
detection_idx=detection_idx, JPDA=self.JPDA,
cur_frame = self.cur_frame, appearance_model = self.appearance_model,
lstm = self.lstm)
# update track state for unmatched tracks
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
# create new tracks
self.prune_tracks()
flow = None
if unmatched_detections:
if self.optical_flow and self.prev_frame is not None:
flow = cv2.calcOpticalFlowFarneback(self.prev_frame, self.cur_frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx], flow)
# Update distance metric.
active_targets = [t.track_id for t in self.tracks]
features, features_2d, targets, targets_2d = [], [], [], []
for track in self.tracks:
features += track.features
features_2d += track.features_2d
targets += [track.track_id for _ in track.features]
targets_2d += [track.track_id for _ in track.features_2d]
track.features = []
track.features_2d = []
self.metric.partial_fit(
np.asarray(features), np.asarray(features_2d), np.asarray(targets), np.asarray(targets_2d), active_targets)
self.prev_frame = self.cur_frame
# @profile
def _match(self, detections, compare_2d):
# Associate all tracks using combined cost matrices.
if self.JPDA:
# Run JPDA on all tracks
marginalizations = \
linear_assignment.JPDA(self.gated_metric, self.dummy_node_cost_app, self.dummy_node_cost_iou, self.tracks, \
detections, m=self.m_best_sol, compare_2d = compare_2d)
# for track in self.tracks: #TODO: REMOVE
# print(track.track_id)
# print(marginalizations)
jpda_matcher = JPDA_matching.Matcher(
detections, marginalizations, range(len(self.tracks)),
self.matching_strategy, assignment_threshold=self.assn_thresh)
matches_a, unmatched_tracks_a, unmatched_detections = jpda_matcher.match()
# Map matched tracks to detections
# Map matched tracks to detections
track_detection_map = {t:d for (t,d) in matches_a}
# Map unmatched tracks to -1 for no detection
for t in unmatched_tracks_a:
track_detection_map[t] = -1
# update Kalman state
if marginalizations.shape[0] > 0:
for i in range(len(self.tracks)):
self.tracks[i].update(self.kf, detections,
marginalization=marginalizations[i,:], detection_idx=track_detection_map[i],
JPDA=self.JPDA, cur_frame = self.cur_frame, appearance_model = self.appearance_model, lstm = self.lstm)
else:
confirmed_tracks = [i for i, t in enumerate(self.tracks) if t.is_confirmed()]
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
self.gated_metric, self.dummy_node_cost_iou, self.max_age,
self.tracks, detections, confirmed_tracks, compare_2d = compare_2d)
return matches_a, unmatched_tracks_a, unmatched_detections
def _initiate_track(self, detection, flow=None):
if self.use_imm:
mean, covariance, model_probabilities = self.kf.initiate(detection.to_xywh(), flow)
else:
mean, covariance = self.kf.initiate(detection.to_xywh(), flow)
model_probabilities = None
self.tracks.append(Track(
mean, covariance, model_probabilities, self._next_id, self.n_init, self.max_age,
kf_appearance_feature = self.kf_appearance_feature,
feature=detection.feature, appearance_feature = detection.appearance_feature,
cuda = self.cuda, lstm = self.lstm, last_det = detection))
self._next_id += 1
def prune_tracks(self):
h, w = self.cur_frame.shape
for track in self.tracks:
# Check if track is leaving
if self.use_imm:
predicted_mean, predicted_cov = self.kf.combine_states(track.mean, track.covariance, track.model_probabilities) #TODO: This doesn't predict. Mean should def predict
else:
predicted_mean = self.kf.predict_mean(track.mean)
predicted_cov = track.covariance
predicted_pos = predicted_mean[:2]
predicted_vel = predicted_mean[4:6]
predicted_pos[0] -= w/2
predicted_pos[1] -= h/2
cos_theta = np.dot(predicted_pos, predicted_vel)/(np.linalg.norm(predicted_pos)*
np.linalg.norm(predicted_vel) + 1e-6)
predicted_pos[0] += w/2
predicted_pos[1] += h/2
# Thresholds for deciding whether track is outside image
BORDER_VALUE = 0
if (cos_theta > 0 and
(predicted_pos[0] - track.mean[2]/2<= BORDER_VALUE or
predicted_pos[0] + track.mean[2]/2 >= w - BORDER_VALUE)):
if track.is_exiting() and not track.matched:
track.delete_track()
else:
track.mark_exiting()
# Check if track is too uncertain
# cov_axis,_ = np.linalg.eigh(predicted_cov)
# if np.abs(np.sqrt(cov_axis[-1]))*6 > self.uncertainty_limit*np.linalg.norm(predicted_mean[2:4]):
# track.delete_track()
self.tracks = [t for t in self.tracks if not t.is_deleted()]
|
[
"numpy.dstack",
"nn_matching.NearestNeighborDistanceMetric",
"numpy.asarray",
"numpy.array",
"numpy.linalg.norm",
"cv2.calcOpticalFlowFarneback",
"numpy.dot"
] |
[((2036, 2089), 'nn_matching.NearestNeighborDistanceMetric', 'NearestNeighborDistanceMetric', (['"""euclidean"""', 'nn_budget'], {}), "('euclidean', nn_budget)\n", (2065, 2089), False, 'from nn_matching import NearestNeighborDistanceMetric\n'), ((3126, 3179), 'numpy.array', 'np.array', (['[tracks[i].track_id for i in track_indices]'], {}), '([tracks[i].track_id for i in track_indices])\n', (3134, 3179), True, 'import numpy as np\n'), ((3941, 3993), 'numpy.dstack', 'np.dstack', (['(cost_matrix_appearance, cost_matrix_iou)'], {}), '((cost_matrix_appearance, cost_matrix_iou))\n', (3950, 3993), True, 'import numpy as np\n'), ((3322, 3387), 'numpy.array', 'np.array', (['[dets[i].appearance_feature for i in detection_indices]'], {}), '([dets[i].appearance_feature for i in detection_indices])\n', (3330, 3387), True, 'import numpy as np\n'), ((3425, 3479), 'numpy.array', 'np.array', (['[dets[i].feature for i in detection_indices]'], {}), '([dets[i].feature for i in detection_indices])\n', (3433, 3479), True, 'import numpy as np\n'), ((6679, 6699), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (6689, 6699), True, 'import numpy as np\n'), ((6701, 6724), 'numpy.asarray', 'np.asarray', (['features_2d'], {}), '(features_2d)\n', (6711, 6724), True, 'import numpy as np\n'), ((6726, 6745), 'numpy.asarray', 'np.asarray', (['targets'], {}), '(targets)\n', (6736, 6745), True, 'import numpy as np\n'), ((6747, 6769), 'numpy.asarray', 'np.asarray', (['targets_2d'], {}), '(targets_2d)\n', (6757, 6769), True, 'import numpy as np\n'), ((5940, 6037), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['self.prev_frame', 'self.cur_frame', 'None', '(0.5)', '(3)', '(15)', '(3)', '(5)', '(1.2)', '(0)'], {}), '(self.prev_frame, self.cur_frame, None, 0.5, 3,\n 15, 3, 5, 1.2, 0)\n', (5968, 6037), False, 'import cv2\n'), ((10200, 10236), 'numpy.dot', 'np.dot', (['predicted_pos', 'predicted_vel'], {}), '(predicted_pos, predicted_vel)\n', (10206, 10236), True, 'import numpy as np\n'), ((10238, 10267), 'numpy.linalg.norm', 'np.linalg.norm', (['predicted_pos'], {}), '(predicted_pos)\n', (10252, 10267), True, 'import numpy as np\n'), ((10321, 10350), 'numpy.linalg.norm', 'np.linalg.norm', (['predicted_vel'], {}), '(predicted_vel)\n', (10335, 10350), True, 'import numpy as np\n')]
|
from os.path import join as pjoin
from glob import glob
import os
from tinydb import TinyDB
def pk(doc, pk_extra):
pk_doc = {"path": tuple(doc["path"]), "gold": doc["gold"]}
if "opts" in doc:
pk_doc.update(doc["opts"])
if pk_extra is not None:
pk_doc.update(pk_extra(doc))
return freeze(pk_doc)
def freeze(tree):
if isinstance(tree, dict):
return tuple(((k, freeze(v)) for k, v in sorted(tree.items())))
elif isinstance(tree, list):
return tuple((freeze(v) for v in tree))
else:
return tree
def all_docs(dbs):
for db in dbs:
for doc in db.all():
yield doc
def all_recent(dbs, pk_extra):
recents = {}
for doc in all_docs(dbs):
if "time" not in doc:
continue
key = pk(doc, pk_extra)
if key not in recents or doc["time"] > recents[key]["time"]:
recents[key] = doc
return recents.values()
def expand_db_paths(db_paths):
prev_db = None
def close_prev():
if prev_db is not None:
prev_db.close()
def open_db(path):
nonlocal prev_db
close_prev()
db = TinyDB(path)
prev_db = db
return db.table("results")
for db_path in db_paths:
if os.path.isdir(db_path):
for sub_path in glob(pjoin(db_path, "**", "*.db"), recursive=True):
yield open_db(sub_path)
else:
yield open_db(db_path)
close_prev()
def all_docs_from_dbs(db_paths, pk_extra):
return all_recent(expand_db_paths(db_paths), pk_extra)
|
[
"os.path.isdir",
"tinydb.TinyDB",
"os.path.join"
] |
[((1163, 1175), 'tinydb.TinyDB', 'TinyDB', (['path'], {}), '(path)\n', (1169, 1175), False, 'from tinydb import TinyDB\n'), ((1273, 1295), 'os.path.isdir', 'os.path.isdir', (['db_path'], {}), '(db_path)\n', (1286, 1295), False, 'import os\n'), ((1330, 1358), 'os.path.join', 'pjoin', (['db_path', '"""**"""', '"""*.db"""'], {}), "(db_path, '**', '*.db')\n", (1335, 1358), True, 'from os.path import join as pjoin\n')]
|
from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='s3-tar',
packages=['s3_tar'],
version='0.1.13',
description='Tar (and compress) files in s3',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email="<EMAIL>",
url="https://github.com/xtream1101/s3-tar",
license='MIT',
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
entry_points={
'console_scripts': [
's3-tar=s3_tar.cli:cli',
],
},
install_requires=[
'boto3',
],
)
|
[
"setuptools.setup"
] |
[((98, 685), 'setuptools.setup', 'setup', ([], {'name': '"""s3-tar"""', 'packages': "['s3_tar']", 'version': '"""0.1.13"""', 'description': '"""Tar (and compress) files in s3"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/xtream1101/s3-tar"""', 'license': '"""MIT"""', 'classifiers': "['Programming Language :: Python :: 3',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities']", 'entry_points': "{'console_scripts': ['s3-tar=s3_tar.cli:cli']}", 'install_requires': "['boto3']"}), "(name='s3-tar', packages=['s3_tar'], version='0.1.13', description=\n 'Tar (and compress) files in s3', long_description=long_description,\n long_description_content_type='text/markdown', author='<NAME>',\n author_email='<EMAIL>', url='https://github.com/xtream1101/s3-tar',\n license='MIT', classifiers=['Programming Language :: Python :: 3',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities'], entry_points={'console_scripts': [\n 's3-tar=s3_tar.cli:cli']}, install_requires=['boto3'])\n", (103, 685), False, 'from setuptools import setup\n')]
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowJobResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'begin_time': 'str',
'code': 'str',
'end_time': 'str',
'entities': 'JobEntities',
'error_code': 'str',
'fail_reason': 'str',
'job_id': 'str',
'job_type': 'str',
'message': 'str',
'status': 'str'
}
attribute_map = {
'begin_time': 'begin_time',
'code': 'code',
'end_time': 'end_time',
'entities': 'entities',
'error_code': 'error_code',
'fail_reason': 'fail_reason',
'job_id': 'job_id',
'job_type': 'job_type',
'message': 'message',
'status': 'status'
}
def __init__(self, begin_time=None, code=None, end_time=None, entities=None, error_code=None, fail_reason=None, job_id=None, job_type=None, message=None, status=None):
"""ShowJobResponse - a model defined in huaweicloud sdk"""
super(ShowJobResponse, self).__init__()
self._begin_time = None
self._code = None
self._end_time = None
self._entities = None
self._error_code = None
self._fail_reason = None
self._job_id = None
self._job_type = None
self._message = None
self._status = None
self.discriminator = None
if begin_time is not None:
self.begin_time = begin_time
if code is not None:
self.code = code
if end_time is not None:
self.end_time = end_time
if entities is not None:
self.entities = entities
if error_code is not None:
self.error_code = error_code
if fail_reason is not None:
self.fail_reason = fail_reason
if job_id is not None:
self.job_id = job_id
if job_type is not None:
self.job_type = job_type
if message is not None:
self.message = message
if status is not None:
self.status = status
@property
def begin_time(self):
"""Gets the begin_time of this ShowJobResponse.
开始时间。
:return: The begin_time of this ShowJobResponse.
:rtype: str
"""
return self._begin_time
@begin_time.setter
def begin_time(self, begin_time):
"""Sets the begin_time of this ShowJobResponse.
开始时间。
:param begin_time: The begin_time of this ShowJobResponse.
:type: str
"""
self._begin_time = begin_time
@property
def code(self):
"""Gets the code of this ShowJobResponse.
查询Job的API请求出现错误时,返回的错误码。
:return: The code of this ShowJobResponse.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ShowJobResponse.
查询Job的API请求出现错误时,返回的错误码。
:param code: The code of this ShowJobResponse.
:type: str
"""
self._code = code
@property
def end_time(self):
"""Gets the end_time of this ShowJobResponse.
结束时间。
:return: The end_time of this ShowJobResponse.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ShowJobResponse.
结束时间。
:param end_time: The end_time of this ShowJobResponse.
:type: str
"""
self._end_time = end_time
@property
def entities(self):
"""Gets the entities of this ShowJobResponse.
:return: The entities of this ShowJobResponse.
:rtype: JobEntities
"""
return self._entities
@entities.setter
def entities(self, entities):
"""Sets the entities of this ShowJobResponse.
:param entities: The entities of this ShowJobResponse.
:type: JobEntities
"""
self._entities = entities
@property
def error_code(self):
"""Gets the error_code of this ShowJobResponse.
Job执行失败时的错误码。 Job执行成功后,该值为null。
:return: The error_code of this ShowJobResponse.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this ShowJobResponse.
Job执行失败时的错误码。 Job执行成功后,该值为null。
:param error_code: The error_code of this ShowJobResponse.
:type: str
"""
self._error_code = error_code
@property
def fail_reason(self):
"""Gets the fail_reason of this ShowJobResponse.
Job执行失败时的错误原因。 Job执行成功后,该值为null。
:return: The fail_reason of this ShowJobResponse.
:rtype: str
"""
return self._fail_reason
@fail_reason.setter
def fail_reason(self, fail_reason):
"""Sets the fail_reason of this ShowJobResponse.
Job执行失败时的错误原因。 Job执行成功后,该值为null。
:param fail_reason: The fail_reason of this ShowJobResponse.
:type: str
"""
self._fail_reason = fail_reason
@property
def job_id(self):
"""Gets the job_id of this ShowJobResponse.
异步请求的任务ID。
:return: The job_id of this ShowJobResponse.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this ShowJobResponse.
异步请求的任务ID。
:param job_id: The job_id of this ShowJobResponse.
:type: str
"""
self._job_id = job_id
@property
def job_type(self):
"""Gets the job_type of this ShowJobResponse.
异步请求的任务类型。
:return: The job_type of this ShowJobResponse.
:rtype: str
"""
return self._job_type
@job_type.setter
def job_type(self, job_type):
"""Sets the job_type of this ShowJobResponse.
异步请求的任务类型。
:param job_type: The job_type of this ShowJobResponse.
:type: str
"""
self._job_type = job_type
@property
def message(self):
"""Gets the message of this ShowJobResponse.
查询Job的API请求出现错误时,返回的错误消息。
:return: The message of this ShowJobResponse.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ShowJobResponse.
查询Job的API请求出现错误时,返回的错误消息。
:param message: The message of this ShowJobResponse.
:type: str
"""
self._message = message
@property
def status(self):
"""Gets the status of this ShowJobResponse.
Job的状态。 - SUCCESS:成功。 - RUNNING:运行中。 - FAIL:失败。 - INIT:正在初始化。
:return: The status of this ShowJobResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowJobResponse.
Job的状态。 - SUCCESS:成功。 - RUNNING:运行中。 - FAIL:失败。 - INIT:正在初始化。
:param status: The status of this ShowJobResponse.
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowJobResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization",
"six.iteritems",
"sys.setdefaultencoding"
] |
[((7692, 7725), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (7705, 7725), False, 'import six\n'), ((8710, 8741), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (8732, 8741), False, 'import sys\n'), ((8768, 8800), 'huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization', 'sanitize_for_serialization', (['self'], {}), '(self)\n', (8794, 8800), False, 'from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n')]
|
import numcodecs
import pytest
import zarr
from zarr.util import InfoReporter
@pytest.mark.parametrize('array_size', [10, 15000])
def test_info(array_size):
# setup
g = zarr.group(store=dict(), chunk_store=dict(),
synchronizer=zarr.ThreadSynchronizer())
g.create_group('foo')
z = g.zeros('bar', shape=array_size, filters=[numcodecs.Adler32()])
# test group info
items = g.info_items()
keys = sorted([k for k, _ in items])
expected_keys = sorted([
'Type', 'Read-only', 'Synchronizer type', 'Store type', 'Chunk store type',
'No. members', 'No. arrays', 'No. groups', 'Arrays', 'Groups', 'Name'
])
assert expected_keys == keys
# can also get a string representation of info via the info attribute
assert isinstance(g.info, InfoReporter)
assert "Type" in repr(g.info)
# test array info
items = z.info_items()
keys = sorted([k for k, _ in items])
expected_keys = sorted([
'Type', 'Data type', 'Shape', 'Chunk shape', 'Order', 'Read-only', 'Filter [0]',
'Compressor', 'Synchronizer type', 'Store type', 'Chunk store type', 'No. bytes',
'No. bytes stored', 'Storage ratio', 'Chunks initialized', 'Name'
])
assert expected_keys == keys
# can also get a string representation of info via the info attribute
assert isinstance(z.info, InfoReporter)
assert "Type" in repr(z.info)
|
[
"numcodecs.Adler32",
"pytest.mark.parametrize",
"zarr.ThreadSynchronizer"
] |
[((82, 132), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""array_size"""', '[10, 15000]'], {}), "('array_size', [10, 15000])\n", (105, 132), False, 'import pytest\n'), ((258, 283), 'zarr.ThreadSynchronizer', 'zarr.ThreadSynchronizer', ([], {}), '()\n', (281, 283), False, 'import zarr\n'), ((361, 380), 'numcodecs.Adler32', 'numcodecs.Adler32', ([], {}), '()\n', (378, 380), False, 'import numcodecs\n')]
|
#!/usr/bin/env python
"""
Script that goes through all the users in a DB
and renames their names with random ones.
"""
import sqlalchemy
import zeeguu
from faker import Faker
fake = Faker()
from zeeguu.model import User
session = zeeguu.db.session
for user in User.query.all():
for _ in range(0,13):
try:
user.name = fake.name()
user.email = fake.email()
user.password = ""
session.add(user)
session.commit()
print (f"added {user.name}")
break
except sqlalchemy.exc.IntegrityError as e:
print (f"retrying...")
continue
|
[
"zeeguu.model.User.query.all",
"faker.Faker"
] |
[((192, 199), 'faker.Faker', 'Faker', ([], {}), '()\n', (197, 199), False, 'from faker import Faker\n'), ((272, 288), 'zeeguu.model.User.query.all', 'User.query.all', ([], {}), '()\n', (286, 288), False, 'from zeeguu.model import User\n')]
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("CHANGELOG.rst") as history_file:
history = history_file.read()
# requirements = ['Click>=7.0', ]
requirements = list(map(str.strip, open("requirements.txt").readlines()))
setup_requirements = []
test_requirements = []
setup(
author="<NAME>",
author_email="<EMAIL>",
python_requires=">=3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Virtual Finance API provides access to data from financial "
"sites as if it was a REST-API.",
entry_points={"console_scripts": ["vfapi=virtual_finance_api.cli:main"]},
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + "\n\n" + history,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="virtual_finance_api",
name="virtual_finance_api",
packages=find_packages(include=["virtual_finance_api", "virtual_finance_api.*"]),
setup_requires=requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/hootnot/virtual-finance-API",
version="0.6.0",
zip_safe=False,
)
|
[
"setuptools.find_packages"
] |
[((1381, 1452), 'setuptools.find_packages', 'find_packages', ([], {'include': "['virtual_finance_api', 'virtual_finance_api.*']"}), "(include=['virtual_finance_api', 'virtual_finance_api.*'])\n", (1394, 1452), False, 'from setuptools import setup, find_packages\n')]
|
# mbedRPC.py - mbed RPC interface for Python
#
# Copyright (c) 2010 ARM Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Example:
#>from mbedRPC import*
#>mbed = SerialRPC("COM5",9600);
#>myled = DigitalOut(LED1);
#>myled.write(1)
#>
import serial
import urllib2
import time
class pin():
def __init__(self, id):
self.name = id
LED1 = pin("LED1")
LED2 = pin("LED2")
LED3 = pin("LED3")
LED4 = pin("LED4")
p5 = pin("p5")
p6 = pin("p6")
p7 = pin("p7")
p8 = pin("p8")
p9 = pin("p9")
p10 = pin("p10")
p11 = pin("p11")
p12 = pin("p12")
p13 = pin("p13")
p14 = pin("p14")
p15 = pin("p15")
p16 = pin("p16")
p17 = pin("p17")
p18 = pin("p18")
p19 = pin("p19")
p20 = pin("p20")
p21 = pin("p21")
p22 = pin("p22")
p23 = pin("p23")
p24 = pin("p24")
p25 = pin("p25")
p26 = pin("p26")
p27 = pin("p27")
p28 = pin("p28")
p29 = pin("p29")
p30 = pin("p30")
# mbed super class
class mbed:
def __init__(self):
print("This will work as a demo but no transport mechanism has been selected")
def rpc(self, name, method, args):
print("Superclass method not overridden")
# Transport mechanisms, derived from mbed
class SerialRPC(mbed):
def __init__(self, port, baud):
self.ser = serial.Serial(port)
self.ser.setBaudrate(baud)
def rpc(self, name, method, args):
self.ser.write("/" + name + "/" + method + " " + " ".join(args) + "\n")
return self.ser.readline().strip()
class HTTPRPC(mbed):
def __init__(self, ip):
self.host = "http://" + ip
def rpc(self, name, method, args):
response = urllib2.urlopen(self.host + "/rpc/" + name + "/" + method + "," + ",".join(args))
return response.read().strip()
# mbed Interfaces
class DigitalOut():
def __init__(self, this_mbed, mpin):
self.mbed = this_mbed
if isinstance(mpin, str):
self.name = mpin
elif isinstance(mpin, pin):
self.name = self.mbed.rpc("DigitalOut", "new", [mpin.name])
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def write(self, value):
r = self.mbed.rpc(self.name, "write", [str(value)])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return int(r)
class AnalogIn():
def __init__(self, this_mbed, mpin):
self.mbed = this_mbed
if isinstance(mpin, str):
self.name = mpin
elif isinstance(mpin, pin):
self.name = self.mbed.rpc("AnalogIn", "new", [mpin.name])
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return float(r)
def read_u16(self):
r = self.mbed.rpc(self.name, "read_u16", [])
return int(r)
class AnalogOut():
def __init__(self, this_mbed, mpin):
self.mbed = this_mbed
if isinstance(mpin, str):
self.name = mpin
elif isinstance(mpin, pin):
self.name = self.mbed.rpc("AnalogOut", "new", [mpin.name])
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def write(self, value):
r = self.mbed.rpc(self.name, "write", [str(value)])
def write_u16(self, value):
r = self.mbed.rpc(self.name, "write_u16", [str(value)])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return float(r)
class DigitalIn():
def __init__(self, this_mbed, mpin):
self.mbed = this_mbed
if isinstance(mpin, str):
self.name = mpin
elif isinstance(mpin, pin):
self.name = self.mbed.rpc("DigitalIn", "new", [mpin.name])
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return int(r)
class PwmOut():
def __init__(self, this_mbed, mpin):
self.mbed = this_mbed
if isinstance(mpin, str):
self.name = mpin
elif isinstance(mpin, pin):
self.name = self.mbed.rpc("PwmOut", "new", [mpin.name])
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def write(self, value):
r = self.mbed.rpc(self.name, "write", [str(value)])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return float(r)
def period(self, value):
r = self.mbed.rpc(self.name, "period", [str(value)])
def period_ms(self, value):
r = self.mbed.rpc(self.name, "period_ms", [str(value)])
def period_us(self, value):
r = self.mbed.rpc(self.name, "period_us", [str(value)])
def puslewidth(self, value):
r = self.mbed.rpc(self.name, "pulsewidth", [str(value)])
def puslewidth_ms(self, value):
r = self.mbed.rpc(self.name, "pulsewidth_ms", [str(value)])
def puslewidth_us(self, value):
r = self.mbed.rpc(self.name, "pulsewidth_us", [str(value)])
class Serial():
def __init__(self, this_mbed, tx, rx=""):
self.mbed = this_mbed
if isinstance(tx, str):
self.name = mpin
elif isinstance(mpin, pin):
self.name = self.mbed.rpc("Serial", "new", [tx.name, rx.name])
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def putc(self, value):
r = self.mbed.rpc(self.name, "putc", [str(value)])
def puts(self, value):
r = self.mbed.rpc(self.name, "puts", ["\"" + str(value) + "\""])
def getc(self):
r = self.mbed.rpc(self.name, "getc", [])
return int(r)
class RPCFunction():
def __init__(self, this_mbed, name):
self.mbed = this_mbed
if isinstance(name, str):
self.name = name
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return int(r)
def run(self, input):
r = self.mbed.rpc(self.name, "run", [input])
return r
class RPCVariable():
def __init__(self, this_mbed, name):
self.mbed = this_mbed
if isinstance(name, str):
self.name = name
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def write(self, value):
self.mbed.rpc(self.name, "write", [str(value)])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return r
def wait(s):
time.sleep(s)
|
[
"serial.Serial",
"time.sleep"
] |
[((7735, 7748), 'time.sleep', 'time.sleep', (['s'], {}), '(s)\n', (7745, 7748), False, 'import time\n'), ((2335, 2354), 'serial.Serial', 'serial.Serial', (['port'], {}), '(port)\n', (2348, 2354), False, 'import serial\n')]
|
"""PhoSim Instance Catalog"""
from __future__ import absolute_import, division, print_function
import numpy as np
from lsst.sims.catUtils.exampleCatalogDefinitions import (PhoSimCatalogZPoint,
PhoSimCatalogPoint,
PhoSimCatalogSersic2D,
PhoSimCatalogSN)
from .twinklesVariabilityMixins import VariabilityTwinkles
from lsst.sims.catUtils.mixins import VariabilityAGN, PhotometryGalaxies
from lsst.sims.catUtils.exampleCatalogDefinitions.phoSimCatalogExamples import PhoSimSpecMap as psmp
from lsst.sims.catalogs.definitions import CompoundInstanceCatalog
from lsst.sims.catalogs.db import CompoundCatalogDBObject
#__all__ = ['TwinklesCatalogZPoint', 'TwinklesPhoSimCatalogSN']
__all__ = ["TwinklesCatalogPoint", "TwinklesCatalogSersic2D",
"TwinklesCatalogZPoint", "TwinklesCatalogSN", "TwinklesCompoundInstanceCatalog"]
twinkles_sn_sed_dir = 'spectra_files'
twinkles_spec_map = psmp
twinkles_spec_map.subdir_map['(^specFile_)'] = twinkles_sn_sed_dir
class TwinklesCatalogPoint(PhoSimCatalogPoint):
specFileMap = twinkles_spec_map
class TwinklesCatalogSersic2D(PhoSimCatalogSersic2D):
specFileMap = twinkles_spec_map
class TwinklesCatalogZPoint(PhoSimCatalogZPoint, VariabilityTwinkles, VariabilityAGN):
"""
PhoSim Instance Catalog Class for strongly lensed (and therefore time-delayed)
AGN
"""
specFileMap = twinkles_spec_map
catalog_type = 'twinkles_catalog_ZPOINT'
class TwinklesCatalogSN(PhoSimCatalogSN):
"""
Modification of the PhoSimCatalogSN mixin to provide shorter sedFileNames
by leaving out the parts of the directory name
"""
def get_shorterFileNames(self):
fnames = self.column_by_name('sedFilepath')
sep = 'spectra_files/specFile_'
split_names = []
for fname in fnames:
if 'None' not in fname:
fname = sep + fname.split(sep)[-1]
else:
fname = 'None'
split_names.append(fname)
return np.array(split_names)
# column_outputs = PhoSimCatalogSN.column_outputs
# column_outputs[PhoSimCatalogSN.column_outputs.index('sedFilepath')] = \
# 'shorterFileNames'
column_outputs = ['prefix', 'uniqueId', 'raPhoSim', 'decPhoSim',
'phoSimMagNorm', 'shorterFileNames', 'redshift',
'gamma1', 'gamma2', 'kappa', 'raOffset', 'decOffset',
'spatialmodel', 'internalExtinctionModel',
'galacticExtinctionModel', 'galacticAv', 'galacticRv']
cannot_be_null = ['x0', 't0', 'z', 'shorterFileNames']
class TwinklesCompoundInstanceCatalog(CompoundInstanceCatalog):
use_spec_map = twinkles_spec_map
def write_catalog(self, filename, chunk_size=None, write_header=True, write_mode='w'):
"""
Write the stored list of InstanceCatalogs to a single ASCII output catalog.
@param [in] filename is the name of the file to be written
@param [in] chunk_size is an optional parameter telling the CompoundInstanceCatalog
to query the database in manageable chunks (in case returning the whole catalog
takes too much memory)
@param [in] write_header a boolean specifying whether or not to add a header
to the output catalog (Note: only one header will be written; there will not be
a header for each InstanceCatalog in the CompoundInstanceCatalog; default True)
@param [in] write_mode is 'w' if you want to overwrite the output file or
'a' if you want to append to an existing output file (default: 'w')
"""
instantiated_ic_list = [None]*len(self._ic_list)
# first, loop over all of the InstanceCatalog and CatalogDBObject classes, pre-processing
# them (i.e. verifying that they have access to all of the columns they need)
for ix, (icClass, dboClass) in enumerate(zip(self._ic_list, self._dbo_list)):
dbo = dboClass()
ic = icClass(dbo, obs_metadata=self._obs_metadata)
# assign all non-private member variables of the CompoundInstanceCatalog
# to the instantiated InstanceCatalogs
for kk in self.__dict__:
if kk[0] != '_' and not hasattr(self.__dict__[kk], '__call__'):
setattr(ic, kk, self.__dict__[kk])
for kk in self.__class__.__dict__:
if kk[0] != '_' and not hasattr(self.__class__.__dict__[kk], '__call__'):
setattr(ic, kk, self.__class__.__dict__[kk])
ic._write_pre_process()
instantiated_ic_list[ix] = ic
for row in self._dbObjectGroupList:
if len(row) == 1:
ic = instantiated_ic_list[row[0]]
ic._query_and_write(filename, chunk_size=chunk_size,
write_header=write_header, write_mode=write_mode,
obs_metadata=self._obs_metadata,
constraint=self._constraint)
write_mode = 'a'
write_header = False
default_compound_dbo = None
if self._compoundDBclass is not None:
if not hasattr(self._compoundDBclass, '__getitem__'):
default_compound_dbo = CompoundCatalogDBObject
else:
for dbo in self._compoundDBclass:
if dbo._table_restriction is None:
default_compound_dbo = dbo
break
if default_compound_dbo is None:
default_compound_dbo is CompoundCatalogDBObject
for row in self._dbObjectGroupList:
if len(row) > 1:
dbObjClassList = [self._dbo_list[ix] for ix in row]
catList = [instantiated_ic_list[ix] for ix in row]
for cat in catList:
cat._pre_screen = True
if self._compoundDBclass is None:
compound_dbo = CompoundCatalogDBObject(dbObjClassList)
elif not hasattr(self._compoundDBclass, '__getitem__'):
# if self._compoundDBclass is not a list
try:
compound_dbo = self._compoundDBclass(dbObjClassList)
except:
compound_dbo = default_compound_dbo(dbObjClassList)
else:
compound_dbo = None
for candidate in self._compoundDBclass:
use_it = True
if False in [candidate._table_restriction is not None and
dbo.tableid in candidate._table_restriction
for dbo in dbObjClassList]:
use_it = False
if use_it:
compound_dbo = candidate(dbObjClassList)
break
if compound_dbo is None:
compound_dbo = default_compound_dbo(dbObjClassList)
compound_dbo.mjd = self._obs_metadata.mjd.TAI
compound_dbo.specFileMap = self.use_spec_map
self._write_compound(catList, compound_dbo, filename,
chunk_size=chunk_size, write_header=write_header,
write_mode=write_mode)
write_mode = 'a'
write_header = False
|
[
"lsst.sims.catalogs.db.CompoundCatalogDBObject",
"numpy.array"
] |
[((2155, 2176), 'numpy.array', 'np.array', (['split_names'], {}), '(split_names)\n', (2163, 2176), True, 'import numpy as np\n'), ((6160, 6199), 'lsst.sims.catalogs.db.CompoundCatalogDBObject', 'CompoundCatalogDBObject', (['dbObjClassList'], {}), '(dbObjClassList)\n', (6183, 6199), False, 'from lsst.sims.catalogs.db import CompoundCatalogDBObject\n')]
|
"""
:maintainer: <NAME> <<EMAIL>>
:maturity: new
:depends: None
:platform: Linux
.. versionadded:: 3004
"""
import logging
import re
import salt.exceptions
log = logging.getLogger(__name__)
def __virtual__():
"""rebootmgrctl command is required."""
if __utils__["path.which"]("rebootmgrctl") is not None:
return True
else:
return (False, "Module rebootmgt requires the command rebootmgrctl")
def _cmd(cmd, retcode=False):
"""Utility function to run commands."""
result = __salt__["cmd.run_all"](cmd)
if retcode:
return result["retcode"]
if result["retcode"]:
raise salt.exceptions.CommandExecutionError(result["stderr"])
return result["stdout"]
def version():
"""Return the version of rebootmgrd
CLI Example:
.. code-block:: bash
salt microos rebootmgr version
"""
cmd = ["rebootmgrctl", "--version"]
return _cmd(cmd).split()[-1]
def is_active():
"""Check if the rebootmgrd is running and active or not.
CLI Example:
.. code-block:: bash
salt microos rebootmgr is_active
"""
cmd = ["rebootmgrctl", "is_active", "--quiet"]
return _cmd(cmd, retcode=True) == 0
def reboot(order=None):
"""Tells rebootmgr to schedule a reboot.
With the [now] option, a forced reboot is done, no lock from etcd
is requested and a set maintenance window is ignored. With the
[fast] option, a lock from etcd is requested if needed, but a
defined maintenance window is ignored.
order
If specified, can be "now" or "fast"
CLI Example:
.. code-block:: bash
salt microos rebootmgr reboot
salt microos rebootmgt reboot order=now
"""
if order and order not in ("now", "fast"):
raise salt.exceptions.CommandExecutionError(
"Order parameter, if specified, must be 'now' or 'fast'"
)
cmd = ["rebootmgrctl", "reboot"]
if order:
cmd.append(order)
return _cmd(cmd)
def cancel():
"""Cancels an already running reboot.
CLI Example:
.. code-block:: bash
salt microos rebootmgr cancel
"""
cmd = ["rebootmgrctl", "cancel"]
return _cmd(cmd)
def status():
"""Returns the current status of rebootmgrd.
Valid returned values are:
0 - No reboot requested
1 - Reboot requested
2 - Reboot requested, waiting for maintenance window
3 - Reboot requested, waiting for etcd lock.
CLI Example:
.. code-block:: bash
salt microos rebootmgr status
"""
cmd = ["rebootmgrctl", "status", "--quiet"]
return _cmd(cmd, retcode=True)
def set_strategy(strategy=None):
"""A new strategy to reboot the machine is set and written into
/etc/rebootmgr.conf.
strategy
If specified, must be one of those options:
best-effort - This is the default strategy. If etcd is
running, etcd-lock is used. If no etcd is running, but a
maintenance window is specified, the strategy will be
maint-window. If no maintenance window is specified, the
machine is immediately rebooted (instantly).
etcd-lock - A lock at etcd for the specified lock-group will
be acquired before reboot. If a maintenance window is
specified, the lock is only acquired during this window.
maint-window - Reboot does happen only during a specified
maintenance window. If no window is specified, the
instantly strategy is followed.
instantly - Other services will be informed that a reboot will
happen. Reboot will be done without getting any locks or
waiting for a maintenance window.
off - Reboot requests are temporary
ignored. /etc/rebootmgr.conf is not modified.
CLI Example:
.. code-block:: bash
salt microos rebootmgr set_strategy stragegy=off
"""
if strategy and strategy not in (
"best-effort",
"etcd-lock",
"maint-window",
"instantly",
"off",
):
raise salt.exceptions.CommandExecutionError("Strategy parameter not valid")
cmd = ["rebootmgrctl", "set-strategy"]
if strategy:
cmd.append(strategy)
return _cmd(cmd)
def get_strategy():
"""The currently used reboot strategy of rebootmgrd will be printed.
CLI Example:
.. code-block:: bash
salt microos rebootmgr get_strategy
"""
cmd = ["rebootmgrctl", "get-strategy"]
return _cmd(cmd).split(":")[-1].strip()
def set_window(time, duration):
"""Set's the maintenance window.
time
The format of time is the same as described in
systemd.time(7).
duration
The format of duration is "[XXh][YYm]".
CLI Example:
.. code-block:: bash
salt microos rebootmgr set_window time="Thu,Fri 2020-*-1,5 11:12:13" duration=1h
"""
cmd = ["rebootmgrctl", "set-window", time, duration]
return _cmd(cmd)
def get_window():
"""The currently set maintenance window will be printed.
CLI Example:
.. code-block:: bash
salt microos rebootmgr get_window
"""
cmd = ["rebootmgrctl", "get-window"]
window = _cmd(cmd)
return dict(
zip(
("time", "duration"),
re.search(
r"Maintenance window is set to (.*), lasting (.*).", window
).groups(),
)
)
def set_group(group):
"""Set the group, to which this machine belongs to get a reboot lock
from etcd.
group
Group name
CLI Example:
.. code-block:: bash
salt microos rebootmgr set_group group=group_1
"""
cmd = ["rebootmgrctl", "set-group", group]
return _cmd(cmd)
def get_group():
"""The currently set lock group for etcd.
CLI Example:
.. code-block:: bash
salt microos rebootmgr get_group
"""
cmd = ["rebootmgrctl", "get-group"]
group = _cmd(cmd)
return re.search(r"Etcd lock group is set to (.*)", group).groups()[0]
def set_max(max_locks, group=None):
"""Set the maximal number of hosts in a group, which are allowed to
reboot at the same time.
number
Maximal number of hosts in a group
group
Group name
CLI Example:
.. code-block:: bash
salt microos rebootmgr set_max 4
"""
cmd = ["rebootmgrctl", "set-max"]
if group:
cmd.extend(["--group", group])
cmd.append(max_locks)
return _cmd(cmd)
def lock(machine_id=None, group=None):
"""Lock a machine. If no group is specified, the local default group
will be used. If no machine-id is specified, the local machine
will be locked.
machine_id
The machine-id is a network wide, unique ID. Per default the
ID from /etc/machine-id is used.
group
Group name
CLI Example:
.. code-block:: bash
salt microos rebootmgr lock group=group1
"""
cmd = ["rebootmgrctl", "lock"]
if group:
cmd.extend(["--group", group])
if machine_id:
cmd.append(machine_id)
return _cmd(cmd)
def unlock(machine_id=None, group=None):
"""Unlock a machine. If no group is specified, the local default group
will be used. If no machine-id is specified, the local machine
will be locked.
machine_id
The machine-id is a network wide, unique ID. Per default the
ID from /etc/machine-id is used.
group
Group name
CLI Example:
.. code-block:: bash
salt microos rebootmgr unlock group=group1
"""
cmd = ["rebootmgrctl", "unlock"]
if group:
cmd.extend(["--group", group])
if machine_id:
cmd.append(machine_id)
return _cmd(cmd)
|
[
"re.search",
"logging.getLogger"
] |
[((185, 212), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (202, 212), False, 'import logging\n'), ((6044, 6094), 're.search', 're.search', (['"""Etcd lock group is set to (.*)"""', 'group'], {}), "('Etcd lock group is set to (.*)', group)\n", (6053, 6094), False, 'import re\n'), ((5360, 5429), 're.search', 're.search', (['"""Maintenance window is set to (.*), lasting (.*)."""', 'window'], {}), "('Maintenance window is set to (.*), lasting (.*).', window)\n", (5369, 5429), False, 'import re\n')]
|
# coding=utf-8
"""
The NfsCollector collects nfs utilization metrics using /proc/net/rpc/nfs.
#### Dependencies
* /proc/net/rpc/nfs
"""
import diamond.collector
import os
class NfsCollector(diamond.collector.Collector):
PROC = '/proc/net/rpc/nfs'
def get_default_config_help(self):
config_help = super(NfsCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NfsCollector, self).get_default_config()
config.update({
'enabled': False,
'path': 'nfs'
})
return config
def collect(self):
"""
Collect stats
"""
if os.access(self.PROC, os.R_OK):
results = {}
# Open file
file = open(self.PROC)
for line in file:
line = line.split()
if line[0] == 'net':
results['net.packets'] = line[1]
results['net.udpcnt'] = line[2]
results['net.tcpcnt'] = line[3]
results['net.tcpconn'] = line[4]
elif line[0] == 'rpc':
results['rpc.calls'] = line[1]
results['rpc.retrans'] = line[2]
results['rpc.authrefrsh'] = line[3]
elif line[0] == 'proc2':
line.pop(1) # remove column-cnt field
results['v2.null'] = line[1]
results['v2.getattr'] = line[2]
results['v2.setattr'] = line[3]
results['v2.root'] = line[4]
results['v2.lookup'] = line[5]
results['v2.readlink'] = line[6]
results['v2.read'] = line[7]
results['v2.wrcache'] = line[8]
results['v2.write'] = line[9]
results['v2.create'] = line[10]
results['v2.remove'] = line[11]
results['v2.rename'] = line[12]
results['v2.link'] = line[13]
results['v2.symlink'] = line[14]
results['v2.mkdir'] = line[15]
results['v2.rmdir'] = line[16]
results['v2.readdir'] = line[17]
results['v2.fsstat'] = line[18]
elif line[0] == 'proc3':
line.pop(1) # remove column-cnt field
results['v3.null'] = line[1]
results['v3.getattr'] = line[2]
results['v3.setattr'] = line[3]
results['v3.lookup'] = line[4]
results['v3.access'] = line[5]
results['v3.readlink'] = line[6]
results['v3.read'] = line[7]
results['v3.write'] = line[8]
results['v3.create'] = line[9]
results['v3.mkdir'] = line[10]
results['v3.symlink'] = line[11]
results['v3.mknod'] = line[12]
results['v3.remove'] = line[13]
results['v3.rmdir'] = line[14]
results['v3.rename'] = line[15]
results['v3.link'] = line[16]
results['v3.readdir'] = line[17]
results['v3.readdirplus'] = line[18]
results['v3.fsstat'] = line[19]
results['v3.fsinfo'] = line[20]
results['v3.pathconf'] = line[21]
results['v3.commit'] = line[22]
elif line[0] == 'proc4':
line.pop(1) # remove column-cnt field
results['v4.null'] = line[1]
results['v4.read'] = line[2]
results['v4.write'] = line[3]
results['v4.commit'] = line[4]
results['v4.open'] = line[5]
results['v4.open_conf'] = line[6]
results['v4.open_noat'] = line[7]
results['v4.open_dgrd'] = line[8]
results['v4.close'] = line[9]
results['v4.setattr'] = line[10]
results['v4.fsinfo'] = line[11]
results['v4.renew'] = line[12]
results['v4.setclntid'] = line[13]
results['v4.confirm'] = line[14]
results['v4.lock'] = line[15]
results['v4.lockt'] = line[16]
results['v4.locku'] = line[17]
results['v4.access'] = line[18]
results['v4.getattr'] = line[19]
results['v4.lookup'] = line[20]
results['v4.lookup_root'] = line[21]
results['v4.remove'] = line[22]
results['v4.rename'] = line[23]
results['v4.link'] = line[24]
results['v4.symlink'] = line[25]
results['v4.create'] = line[26]
results['v4.pathconf'] = line[27]
results['v4.statfs'] = line[28]
results['v4.readlink'] = line[29]
results['v4.readdir'] = line[30]
try:
results['v4.server_caps'] = line[31]
except IndexError:
pass
try:
results['v4.delegreturn'] = line[32]
except IndexError:
pass
try:
results['v4.getacl'] = line[33]
except IndexError:
pass
try:
results['v4.setacl'] = line[34]
except IndexError:
pass
try:
results['v4.fs_locations'] = line[35]
except IndexError:
pass
try:
results['v4.rel_lkowner'] = line[36]
except IndexError:
pass
try:
results['v4.exchange_id'] = line[37]
except IndexError:
pass
try:
results['v4.create_ses'] = line[38]
except IndexError:
pass
try:
results['v4.destroy_ses'] = line[39]
except IndexError:
pass
try:
results['v4.sequence'] = line[40]
except IndexError:
pass
try:
results['v4.get_lease_t'] = line[41]
except IndexError:
pass
try:
results['v4.reclaim_comp'] = line[42]
except IndexError:
pass
try:
results['v4.layoutget'] = line[43]
except IndexError:
pass
try:
results['v4.layoutcommit'] = line[44]
except IndexError:
pass
try:
results['v4.layoutreturn'] = line[45]
except IndexError:
pass
try:
results['v4.getdevlist'] = line[46]
except IndexError:
pass
try:
results['v4.getdevinfo'] = line[47]
except IndexError:
pass
try:
results['v4.ds_write'] = line[48]
except IndexError:
pass
try:
results['v4.ds_commit'] = line[49]
except IndexError:
pass
try:
results['v4.getdevlist'] = line[50]
except IndexError:
pass
# Close File
file.close()
for stat in results.keys():
metric_name = stat
metric_value = long(float(results[stat]))
metric_value = self.derivative(metric_name, metric_value)
self.publish(metric_name, metric_value)
return True
return False
|
[
"os.access"
] |
[((810, 839), 'os.access', 'os.access', (['self.PROC', 'os.R_OK'], {}), '(self.PROC, os.R_OK)\n', (819, 839), False, 'import os\n')]
|
import datetime
from pytest_cases import THIS_MODULE, parametrize_with_cases
from statue.cli import statue_cli
from tests.util import evaluation_mock
def case_empty_history():
additional_flags = []
evaluations = []
output = "No previous evaluations.\n"
return additional_flags, evaluations, output
def case_one_successful_evaluation():
total_commands = 4
timestamp1 = datetime.datetime(
year=2020, month=4, day=15, hour=12, minute=7, second=42
)
additional_flags = []
evaluations = [
evaluation_mock(
timestamp=timestamp1,
successful_commands=total_commands,
total_commands=total_commands,
total_execution_duration=0.234,
)
]
output = "1) 04/15/2020, 12:07:42 - Success (4/4 successful, 0.23 seconds)\n"
return additional_flags, evaluations, output
def case_one_failed_evaluation():
additional_flags = []
timestamp1 = datetime.datetime(
year=2020, month=5, day=12, hour=14, minute=8, second=23
)
evaluations = [
evaluation_mock(
timestamp=timestamp1,
successful_commands=3,
total_commands=4,
total_execution_duration=0.591,
)
]
output = "1) 05/12/2020, 14:08:23 - Failure (3/4 successful, 0.59 seconds)\n"
return additional_flags, evaluations, output
def case_two_successful_evaluations():
total_commands1, total_commands2 = 4, 7
additional_flags = []
timestamp1, timestamp2 = (
datetime.datetime(year=2020, month=4, day=15, hour=12, minute=7, second=42),
datetime.datetime(year=2020, month=4, day=14, hour=18, minute=59, second=11),
)
evaluations = [
evaluation_mock(
timestamp=timestamp1,
successful_commands=total_commands1,
total_commands=total_commands1,
total_execution_duration=0.234,
),
evaluation_mock(
timestamp=timestamp2,
successful_commands=total_commands2,
total_commands=total_commands2,
total_execution_duration=0.189,
),
]
output = (
"1) 04/15/2020, 12:07:42 - Success (4/4 successful, 0.23 seconds)\n"
"2) 04/14/2020, 18:59:11 - Success (7/7 successful, 0.19 seconds)\n"
)
return additional_flags, evaluations, output
def case_one_failed_and_one_successful():
total_commands = 4
additional_flags = []
timestamp1, timestamp2 = (
datetime.datetime(year=2020, month=4, day=15, hour=12, minute=7, second=42),
datetime.datetime(year=2020, month=4, day=14, hour=18, minute=59, second=11),
)
evaluations = [
evaluation_mock(
timestamp=timestamp1,
successful_commands=total_commands,
total_commands=total_commands,
total_execution_duration=0.234,
),
evaluation_mock(
timestamp=timestamp2,
successful_commands=3,
total_commands=7,
total_execution_duration=0.189,
),
]
output = (
"1) 04/15/2020, 12:07:42 - Success (4/4 successful, 0.23 seconds)\n"
"2) 04/14/2020, 18:59:11 - Failure (3/7 successful, 0.19 seconds)\n"
)
return additional_flags, evaluations, output
def case_three_evaluations():
total_commands1, total_commands2 = 4, 10
additional_flags = []
timestamp1, timestamp2, timestamp3 = (
datetime.datetime(year=2020, month=4, day=15, hour=12, minute=7, second=42),
datetime.datetime(year=2020, month=4, day=14, hour=18, minute=59, second=11),
datetime.datetime(year=2020, month=4, day=14, hour=11, minute=31, second=22),
)
evaluations = [
evaluation_mock(
timestamp=timestamp1,
successful_commands=total_commands1,
total_commands=total_commands1,
total_execution_duration=0.234,
),
evaluation_mock(
timestamp=timestamp2,
successful_commands=3,
total_commands=7,
total_execution_duration=0.189,
),
evaluation_mock(
timestamp=timestamp3,
successful_commands=total_commands2,
total_commands=total_commands2,
total_execution_duration=0.03,
),
]
output = (
"1) 04/15/2020, 12:07:42 - Success (4/4 successful, 0.23 seconds)\n"
"2) 04/14/2020, 18:59:11 - Failure (3/7 successful, 0.19 seconds)\n"
"3) 04/14/2020, 11:31:22 - Success (10/10 successful, 0.03 seconds)\n"
)
return additional_flags, evaluations, output
def case_head_flag():
total_commands1, total_commands2 = 4, 10
additional_flags = ["--head=2"]
timestamp1, timestamp2, timestamp3 = (
datetime.datetime(year=2020, month=4, day=15, hour=12, minute=7, second=42),
datetime.datetime(year=2020, month=4, day=14, hour=18, minute=59, second=11),
datetime.datetime(year=2020, month=4, day=14, hour=11, minute=31, second=22),
)
evaluations = [
evaluation_mock(
timestamp=timestamp1,
successful_commands=total_commands1,
total_commands=total_commands1,
total_execution_duration=0.234,
),
evaluation_mock(
timestamp=timestamp2,
successful_commands=3,
total_commands=7,
total_execution_duration=0.189,
),
evaluation_mock(
timestamp=timestamp3,
successful_commands=total_commands2,
total_commands=total_commands2,
total_execution_duration=0.03,
),
]
output = (
"1) 04/15/2020, 12:07:42 - Success (4/4 successful, 0.23 seconds)\n"
"2) 04/14/2020, 18:59:11 - Failure (3/7 successful, 0.19 seconds)\n"
)
return additional_flags, evaluations, output
@parametrize_with_cases(
argnames=["additional_flags", "evaluations", "output"],
cases=THIS_MODULE,
)
def test_history_list(
additional_flags,
evaluations,
output,
cli_runner,
mock_build_configuration_from_file,
):
configuration = mock_build_configuration_from_file.return_value
configuration.cache.all_evaluations = evaluations
result = cli_runner.invoke(statue_cli, ["history", "list", *additional_flags])
assert (
result.exit_code == 0
), f"Execution failed with the following error: '{result.exception}'"
assert result.output == output
|
[
"pytest_cases.parametrize_with_cases",
"tests.util.evaluation_mock",
"datetime.datetime"
] |
[((5888, 5989), 'pytest_cases.parametrize_with_cases', 'parametrize_with_cases', ([], {'argnames': "['additional_flags', 'evaluations', 'output']", 'cases': 'THIS_MODULE'}), "(argnames=['additional_flags', 'evaluations',\n 'output'], cases=THIS_MODULE)\n", (5910, 5989), False, 'from pytest_cases import THIS_MODULE, parametrize_with_cases\n'), ((398, 473), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(15)', 'hour': '(12)', 'minute': '(7)', 'second': '(42)'}), '(year=2020, month=4, day=15, hour=12, minute=7, second=42)\n', (415, 473), False, 'import datetime\n'), ((955, 1030), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(12)', 'hour': '(14)', 'minute': '(8)', 'second': '(23)'}), '(year=2020, month=5, day=12, hour=14, minute=8, second=23)\n', (972, 1030), False, 'import datetime\n'), ((543, 683), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp1', 'successful_commands': 'total_commands', 'total_commands': 'total_commands', 'total_execution_duration': '(0.234)'}), '(timestamp=timestamp1, successful_commands=total_commands,\n total_commands=total_commands, total_execution_duration=0.234)\n', (558, 683), False, 'from tests.util import evaluation_mock\n'), ((1073, 1188), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp1', 'successful_commands': '(3)', 'total_commands': '(4)', 'total_execution_duration': '(0.591)'}), '(timestamp=timestamp1, successful_commands=3, total_commands\n =4, total_execution_duration=0.591)\n', (1088, 1188), False, 'from tests.util import evaluation_mock\n'), ((1531, 1606), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(15)', 'hour': '(12)', 'minute': '(7)', 'second': '(42)'}), '(year=2020, month=4, day=15, hour=12, minute=7, second=42)\n', (1548, 1606), False, 'import datetime\n'), ((1616, 1692), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(14)', 'hour': '(18)', 'minute': '(59)', 'second': '(11)'}), '(year=2020, month=4, day=14, hour=18, minute=59, second=11)\n', (1633, 1692), False, 'import datetime\n'), ((1728, 1870), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp1', 'successful_commands': 'total_commands1', 'total_commands': 'total_commands1', 'total_execution_duration': '(0.234)'}), '(timestamp=timestamp1, successful_commands=total_commands1,\n total_commands=total_commands1, total_execution_duration=0.234)\n', (1743, 1870), False, 'from tests.util import evaluation_mock\n'), ((1935, 2077), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp2', 'successful_commands': 'total_commands2', 'total_commands': 'total_commands2', 'total_execution_duration': '(0.189)'}), '(timestamp=timestamp2, successful_commands=total_commands2,\n total_commands=total_commands2, total_execution_duration=0.189)\n', (1950, 2077), False, 'from tests.util import evaluation_mock\n'), ((2497, 2572), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(15)', 'hour': '(12)', 'minute': '(7)', 'second': '(42)'}), '(year=2020, month=4, day=15, hour=12, minute=7, second=42)\n', (2514, 2572), False, 'import datetime\n'), ((2582, 2658), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(14)', 'hour': '(18)', 'minute': '(59)', 'second': '(11)'}), '(year=2020, month=4, day=14, hour=18, minute=59, second=11)\n', (2599, 2658), False, 'import datetime\n'), ((2694, 2834), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp1', 'successful_commands': 'total_commands', 'total_commands': 'total_commands', 'total_execution_duration': '(0.234)'}), '(timestamp=timestamp1, successful_commands=total_commands,\n total_commands=total_commands, total_execution_duration=0.234)\n', (2709, 2834), False, 'from tests.util import evaluation_mock\n'), ((2899, 3014), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp2', 'successful_commands': '(3)', 'total_commands': '(7)', 'total_execution_duration': '(0.189)'}), '(timestamp=timestamp2, successful_commands=3, total_commands\n =7, total_execution_duration=0.189)\n', (2914, 3014), False, 'from tests.util import evaluation_mock\n'), ((3455, 3530), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(15)', 'hour': '(12)', 'minute': '(7)', 'second': '(42)'}), '(year=2020, month=4, day=15, hour=12, minute=7, second=42)\n', (3472, 3530), False, 'import datetime\n'), ((3540, 3616), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(14)', 'hour': '(18)', 'minute': '(59)', 'second': '(11)'}), '(year=2020, month=4, day=14, hour=18, minute=59, second=11)\n', (3557, 3616), False, 'import datetime\n'), ((3626, 3702), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(14)', 'hour': '(11)', 'minute': '(31)', 'second': '(22)'}), '(year=2020, month=4, day=14, hour=11, minute=31, second=22)\n', (3643, 3702), False, 'import datetime\n'), ((3738, 3880), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp1', 'successful_commands': 'total_commands1', 'total_commands': 'total_commands1', 'total_execution_duration': '(0.234)'}), '(timestamp=timestamp1, successful_commands=total_commands1,\n total_commands=total_commands1, total_execution_duration=0.234)\n', (3753, 3880), False, 'from tests.util import evaluation_mock\n'), ((3945, 4060), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp2', 'successful_commands': '(3)', 'total_commands': '(7)', 'total_execution_duration': '(0.189)'}), '(timestamp=timestamp2, successful_commands=3, total_commands\n =7, total_execution_duration=0.189)\n', (3960, 4060), False, 'from tests.util import evaluation_mock\n'), ((4124, 4265), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp3', 'successful_commands': 'total_commands2', 'total_commands': 'total_commands2', 'total_execution_duration': '(0.03)'}), '(timestamp=timestamp3, successful_commands=total_commands2,\n total_commands=total_commands2, total_execution_duration=0.03)\n', (4139, 4265), False, 'from tests.util import evaluation_mock\n'), ((4788, 4863), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(15)', 'hour': '(12)', 'minute': '(7)', 'second': '(42)'}), '(year=2020, month=4, day=15, hour=12, minute=7, second=42)\n', (4805, 4863), False, 'import datetime\n'), ((4873, 4949), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(14)', 'hour': '(18)', 'minute': '(59)', 'second': '(11)'}), '(year=2020, month=4, day=14, hour=18, minute=59, second=11)\n', (4890, 4949), False, 'import datetime\n'), ((4959, 5035), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(14)', 'hour': '(11)', 'minute': '(31)', 'second': '(22)'}), '(year=2020, month=4, day=14, hour=11, minute=31, second=22)\n', (4976, 5035), False, 'import datetime\n'), ((5071, 5213), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp1', 'successful_commands': 'total_commands1', 'total_commands': 'total_commands1', 'total_execution_duration': '(0.234)'}), '(timestamp=timestamp1, successful_commands=total_commands1,\n total_commands=total_commands1, total_execution_duration=0.234)\n', (5086, 5213), False, 'from tests.util import evaluation_mock\n'), ((5278, 5393), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp2', 'successful_commands': '(3)', 'total_commands': '(7)', 'total_execution_duration': '(0.189)'}), '(timestamp=timestamp2, successful_commands=3, total_commands\n =7, total_execution_duration=0.189)\n', (5293, 5393), False, 'from tests.util import evaluation_mock\n'), ((5457, 5598), 'tests.util.evaluation_mock', 'evaluation_mock', ([], {'timestamp': 'timestamp3', 'successful_commands': 'total_commands2', 'total_commands': 'total_commands2', 'total_execution_duration': '(0.03)'}), '(timestamp=timestamp3, successful_commands=total_commands2,\n total_commands=total_commands2, total_execution_duration=0.03)\n', (5472, 5598), False, 'from tests.util import evaluation_mock\n')]
|
from __future__ import annotations
from typing import NamedTuple
class Fold(NamedTuple):
dim: str
val: int
class Paper:
def __init__(self, marks: list[list[bool]]) -> None:
self.marks = marks
self.height, self.width = len(marks), len(marks[0])
def mark(self, i: int, j: int) -> None:
self.marks[i][j] = True
def fold(self, fold: Fold) -> None:
if fold.dim == "x":
bf, af = fold.val - 1, fold.val + 1
while bf >= 0 and af <= self.width:
for i in range(self.height):
self.marks[i][bf] |= self.marks[i][af]
bf -= 1
af += 1
for i in range(self.height):
self.marks[i] = self.marks[i][: fold.val]
else:
bf, af = fold.val - 1, fold.val + 1
while bf >= 0 and af < self.height:
for j in range(self.width):
self.marks[bf][j] |= self.marks[af][j]
bf -= 1
af += 1
self.marks = self.marks[: fold.val]
self.height, self.width = len(self.marks), len(self.marks[0])
def __str__(self) -> str:
return "\n".join("".join("#" if m else "." for m in row) for row in self.marks)
def parse(filename: str) -> tuple[Paper, list[Fold]]:
with open(filename) as f:
coords, raw_folds = f.read().split("\n\n")
xs, ys = [], []
for line in coords.splitlines():
x, _, y = line.partition(",")
xs.append(int(x))
ys.append(int(y))
paper = Paper([[False] * (max(xs) + 1) for _ in range(max(ys) + 1)])
for j, i in zip(xs, ys):
paper.mark(i, j)
folds = []
for line in raw_folds.splitlines():
raw_dim, _, raw_val = line.partition("=")
dim, val = raw_dim[-1], int(raw_val)
folds.append(Fold(dim, val))
return paper, folds
def part1(filename: str) -> int:
paper, folds = parse(filename)
paper.fold(folds[0])
return sum(m for row in paper.marks for m in row)
def part2(filename: str) -> int:
paper, folds = parse(filename)
for fold in folds:
paper.fold(fold)
print(paper)
return -1
if __name__ == "__main__":
from _common import main
raise SystemExit(main(part1, part2))
|
[
"_common.main"
] |
[((2282, 2300), '_common.main', 'main', (['part1', 'part2'], {}), '(part1, part2)\n', (2286, 2300), False, 'from _common import main\n')]
|
# MODULE: TypeRig / Core / Collection (Functions)
# -----------------------------------------------------------
# (C) <NAME>, 2017-2021 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#------------------------------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies ------------------------
from __future__ import absolute_import, print_function, division
from itertools import islice
# - Init --------------------------------
__version__ = '0.26.7'
# - Functions ---------------------------
# -- Dictionary -------------------------
def mergeDicts(d1, d2, merge = lambda x, y : y):
''' Merges two dictionaries [d1, d2], combining values on duplicate keys as defined by the optional [merge] function.
--------
Example: merge(d1, d2, lambda x,y: x+y) -> {'a': 2, 'c': 6, 'b': 4} '''
mergeDict = dict(d1)
for key, value in d2.items():
if key in mergeDict:
mergeDict[key] = merge(mergeDict[key], value)
else:
mergeDict[key] = value
return mergeDict
# -- Lists ------------------------------
def flatten(listItems):
''' Unpacks all items form [listItems] containing other lists, sets and etc. '''
from itertools import chain
return list(chain(*listItems))
def group_recurring(listItems):
''' Combines recurring items in [listItems] and returns a list containing sets of grouped items '''
temp = [set(item) for item in listItems if item]
for indexA, valueA in enumerate(temp) :
for indexB, valueB in enumerate(temp[indexA + 1 :], indexA + 1):
if valueA & valueB:
temp[indexA] = valueA.union(temp.pop(indexB))
return group_recurring(temp)
return [tuple(item) for item in temp]
def group_consecutive(listItems, step = 1):
''' Build a list of lists containig consecutive numbers from [listItems] (number list) within [step] '''
tempList = []
groupList = [tempList]
expectedValue = None
for value in listItems:
if (value == expectedValue) or (expectedValue is None):
tempList.append(value)
else:
tempList = [value]
groupList.append(tempList)
expectedValue = value + step
return groupList
def group_conditional(iterable, condition):
'''Takes a sorted iterable and groups items according to condition and operator given
Args:
iterable list() or tuple(): Any iterable object
condition lambda x: A function to check condition
Returns:
generator object
Example:
l = [123, 124, 128, 160, 167, 213, 215, 230, 245, 255, 257, 400, 401, 402, 430]
g = grouper(l, lambda x: x <= 15)
dict(enumerate(g))
>>> {1: [123, 124, 128], 2: [160, 167], 3: [213, 215, 230, 245, 255, 257],....}
'''
prev = None
group = []
for item in iterable:
if prev is None or condition(abs(item - prev)):
group.append(item)
else:
yield group
group = [item]
prev = item
if group:
yield group
def sliding_window(sequence, window_size=2):
'''Returns a sliding window (of window_size) over data from the iterable.
Example: s -> (s0, s1, ... s[n-1]), (s1, s2, ... , sn), ... '''
iterator = iter(sequence)
result = tuple(islice(iterator, window_size))
if len(result) == window_size:
yield result
for element in iterator:
result = result[1:] + (element,)
yield result
if __name__ == '__main__':
d1 = {i:i+3 for i in range(10)}
d2 = {i:i*3 for i in range(10)}
print(mergeDicts(d1, d2, merge = lambda x, y : '%s+%s'%(x,y)))
a = ((3, 4), (4, 5), (67, 12), (899, 234, 2345, 2, 3), (4, 5, 7))
b = [123, 124, 128, 160, 167, 213, 215, 230, 245, 255, 257, 400, 401, 402, 430]
print(flatten(a))
print(group_recurring(a))
print(group_consecutive(flatten(a), step = 1))
print(list(sliding_window(flatten(a), window_size=2)))
print(list(group_conditional(b, lambda x: x <= 15)))
|
[
"itertools.chain",
"itertools.islice"
] |
[((1288, 1305), 'itertools.chain', 'chain', (['*listItems'], {}), '(*listItems)\n', (1293, 1305), False, 'from itertools import chain\n'), ((3126, 3155), 'itertools.islice', 'islice', (['iterator', 'window_size'], {}), '(iterator, window_size)\n', (3132, 3155), False, 'from itertools import islice\n')]
|
from typing import List, Dict, Any, Optional
import logging
from collections import Counter
from reval.dataset_utils import train_val_split
from reval.probing_task_example import ProbingTaskExample
logger = logging.getLogger(__name__)
def generate_task_examples(
data: List[Dict[str, Any]],
argument: str,
position: str,
pos2idx: [Dict[str, int]],
keep_tags: List[str],
split: str,
) -> List[ProbingTaskExample]:
probing_examples = []
for example in data:
entity_start, entity_end = example[argument]
pos = example["pos"]
if position == "left":
# no POS to the left of the entity
if entity_start == 0:
continue
pos_tag = pos[entity_start - 1]
else:
if entity_end == (len(pos) - 1):
continue
# TODO: make sure this holds for all datasets (end index inclusive)
pos_tag = pos[entity_end + 1]
if keep_tags and pos_tag not in keep_tags:
continue
probing_examples.append(
ProbingTaskExample(
tokens=example["tokens"],
label=pos2idx[pos_tag],
split=split,
head=example["head"],
tail=example["tail"],
ner=example["ner"],
pos=example["pos"],
dep=example["dep"],
dep_head=example["dep_head"],
id=example["id"],
)
)
return probing_examples
def generate(
train_data: List[Dict[str, Any]],
test_data: List[Dict[str, Any]],
argument: str,
position: str,
validation_size: float = 0.1,
validation_data: Optional[List[Dict[str, Any]]] = None,
keep_tags: Optional[List[str]] = None,
) -> List[ProbingTaskExample]:
logger.info(
"Generating dataset for probing task: "
+ f"PosTag{argument.capitalize()}{position.capitalize()}"
)
if argument not in ["head", "tail"]:
raise ValueError(f"'{argument}' is not a valid argument.")
if position not in ["left", "right"]:
raise ValueError(f"'{position}' is not a valid position.")
if validation_data is None:
train_data, validation_data = train_val_split(train_data, validation_size)
logger.info(f"Argument: {argument}")
logger.info(f"Position: {position}")
logger.info(f"Num train examples: {len(train_data)}")
logger.info(f"Num validation examples: {len(validation_data)}")
logger.info(f"Num test examples: {len(test_data)}")
all_pos_tags = Counter()
for data in [train_data, validation_data, test_data]:
for example in data:
all_pos_tags.update(example["pos"])
logger.info(f"Label distribution: {all_pos_tags}")
pos2idx = {pos_tag: i for i, pos_tag in enumerate(list(all_pos_tags))}
task_examples = []
train_task_examples = generate_task_examples(
train_data, argument, position, pos2idx, keep_tags, split="tr"
)
task_examples.extend(train_task_examples)
idx2pos = {v: k for k, v in pos2idx.items()}
class_distribution = Counter(
[idx2pos[example.label] for example in train_task_examples]
)
logger.info(f"CT: {class_distribution}")
validation_task_examples = generate_task_examples(
validation_data, argument, position, pos2idx, keep_tags, split="va"
)
task_examples.extend(validation_task_examples)
test_task_examples = generate_task_examples(
test_data, argument, position, pos2idx, keep_tags, split="te"
)
task_examples.extend(test_task_examples)
logger.info(f"Num train task examples: {len(train_task_examples)}")
logger.info(f"Num validation task examples: {len(validation_task_examples)}")
logger.info(f"Num test task examples: {len(test_task_examples)}")
return task_examples
|
[
"collections.Counter",
"reval.dataset_utils.train_val_split",
"reval.probing_task_example.ProbingTaskExample",
"logging.getLogger"
] |
[((209, 236), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (226, 236), False, 'import logging\n'), ((2583, 2592), 'collections.Counter', 'Counter', ([], {}), '()\n', (2590, 2592), False, 'from collections import Counter\n'), ((3133, 3201), 'collections.Counter', 'Counter', (['[idx2pos[example.label] for example in train_task_examples]'], {}), '([idx2pos[example.label] for example in train_task_examples])\n', (3140, 3201), False, 'from collections import Counter\n'), ((2253, 2297), 'reval.dataset_utils.train_val_split', 'train_val_split', (['train_data', 'validation_size'], {}), '(train_data, validation_size)\n', (2268, 2297), False, 'from reval.dataset_utils import train_val_split\n'), ((1082, 1328), 'reval.probing_task_example.ProbingTaskExample', 'ProbingTaskExample', ([], {'tokens': "example['tokens']", 'label': 'pos2idx[pos_tag]', 'split': 'split', 'head': "example['head']", 'tail': "example['tail']", 'ner': "example['ner']", 'pos': "example['pos']", 'dep': "example['dep']", 'dep_head': "example['dep_head']", 'id': "example['id']"}), "(tokens=example['tokens'], label=pos2idx[pos_tag], split=\n split, head=example['head'], tail=example['tail'], ner=example['ner'],\n pos=example['pos'], dep=example['dep'], dep_head=example['dep_head'],\n id=example['id'])\n", (1100, 1328), False, 'from reval.probing_task_example import ProbingTaskExample\n')]
|
#!/usr/bin/env python
# imports
import json # weather data
import time # time data
import urllib # fixing urls
from includes import epd2in13b # e ink library
import Image # Image manipulation
import ImageFont # Text Writing
import ImageDraw # Image drawing
import os # for relative path
try:
import requests # needed for getting data
except ImportError:
exit("This script requires the requests module\nInstall with: sudo pip install requests")
# statc vars
COLORED = 1
UNCOLORED = 0
CITY = "Minneapolis"
COUNTRYCODE = "US"
# Weather code taken from inky-phat example code
# Python 2 vs 3 breaking changes
def encode(qs):
val = ""
try:
val = urllib.urlencode(qs).replace("+", "%20")
except AttributeError:
val = urllib.parse.urlencode(qs).replace("+", "%20")
return val
# Query the Yahoo weather API to get current weather data
def get_weather(address):
base = "https://query.yahooapis.com/v1/public/yql?"
query = "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text=\"{}\")".format(address)
qs = {"q": query, "format": "json", "env": "store://datatables.org/alltableswithkeys"}
uri = base + encode(qs)
res = requests.get(uri)
if res.status_code == 200:
json_data = json.loads(res.text)
return json_data
return {}
##############################################################################
##############################################################################
# Start of main program
##############################################################################
##############################################################################
# startup display
disp=epd2in13b.EPD()
disp.init()
# image frames to be drawn
frame_black = [0xFF] * (disp.width * disp.height / 8)
frame_yellow = [0xFF] * (disp.width * disp.height / 8)
# fonts to be used
fontBold = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 35)
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSans.ttf', 35)
fontBoldBIG = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 40)
fontBIG = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSans.ttf', 40)
# Get the weather data for the given location
location_string = "{city}, {countrycode}".format(city=CITY, countrycode=COUNTRYCODE)
weather = get_weather(location_string)
# This maps the weather codes from the Yahoo weather API
# to the appropriate weather icons
icon_map = {
"snow": [5, 6, 7, 13, 14, 15, 16, 17, 18, 41, 42, 43, 46],
"rain": [8, 9, 10, 11, 12, 35, 40],
"cloud": [19, 20, 21, 22, 25, 26, 27, 28, 44],
"pcloud": [30, 44],
"pcloudnight": [29],
"night": [31,33],
"sun": [32, 34, 36],
"storm": [0, 1, 2, 3, 4, 37, 38, 39, 45, 47],
"wind": [23, 24]
}
# Placeholder variables
highT = 0
lowT =0
weather_icon = None
# Pull out the appropriate values from the weather data
if "channel" in weather["query"]["results"]:
results = weather["query"]["results"]["channel"]
highT = int(results["item"]["forecast"][0]["high"])
lowT = int(results["item"]["forecast"][0]["low"])
code = int(results["item"]["forecast"][0]["code"])
for icon in icon_map:
if code in icon_map[icon]:
weather_icon = icon
break
else:
print("Warning, no weather information found!")
# get date info
weekday=time.strftime('%a')
month=time.strftime('%b')
date=time.strftime('%d')
## draw images first
# black part
icon_file=''
if weather_icon == "snow":
icon_file='images/black/snow.bmp'
elif weather_icon == "rain":
icon_file='images/black/rain.bmp'
elif weather_icon == "storm":
icon_file='images/black/storm.bmp'
elif weather_icon == "wind":
icon_file='images/black/windy.bmp'
elif weather_icon=="cloud" or weather_icon=="pcloud" or weather_icon=="pcloudnight":
icon_file='images/black/cloudy.bmp'
# make sure we should be drawing the black image and do it
if icon_file != "":
dir = os.path.dirname(__file__)
file_path=os.path.join(dir,icon_file)
frame_black= disp.get_frame_buffer(Image.open(file_path))
# yellow part
icon_file_yellow=''
if weather_icon == "pcloud":
icon_file_yellow='images/yellow/pcloudysun.bmp'
elif weather_icon == "pcloudnight":
icon_file_yellow='images/yellow/pcloudymoon.bmp'
elif weather_icon == "storm":
icon_file_yellow='images/yellow/storm.bmp'
elif weather_icon == "sun":
icon_file_yellow='images/yellow/sun.bmp'
elif weather_icon=="night":
icon_file_yellow='images/yellow/moon.bmp'
# make sure we should be drawing the yellow image and do it
if icon_file_yellow != "":
dir = os.path.dirname(__file__)
file_path=os.path.join(dir,icon_file_yellow)
frame_yellow = disp.get_frame_buffer(Image.open(file_path))
## draw Text
disp.set_rotate(1)
# date
disp.draw_string_at(frame_black, 15, 5, weekday, fontBold, COLORED)
disp.draw_string_at(frame_black, 85, 5, month, font, COLORED)
disp.draw_string_at(frame_black, 160, 5, date, fontBold, COLORED)
# weather
disp.draw_string_at(frame_yellow, 90, 55, str(highT), fontBoldBIG, COLORED)
disp.draw_filled_rectangle(frame_black, 140, 55, 144, 100, COLORED)
disp.draw_string_at(frame_black, 150, 55, str(lowT), fontBIG, COLORED)
# display it
disp.display_frame(frame_black,frame_yellow)
|
[
"json.loads",
"urllib.parse.urlencode",
"includes.epd2in13b.EPD",
"os.path.dirname",
"Image.open",
"time.strftime",
"ImageFont.truetype",
"requests.get",
"urllib.urlencode",
"os.path.join"
] |
[((1817, 1832), 'includes.epd2in13b.EPD', 'epd2in13b.EPD', ([], {}), '()\n', (1830, 1832), False, 'from includes import epd2in13b\n'), ((2013, 2090), 'ImageFont.truetype', 'ImageFont.truetype', (['"""/usr/share/fonts/truetype/freefont/FreeSansBold.ttf"""', '(35)'], {}), "('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 35)\n", (2031, 2090), False, 'import ImageFont\n'), ((2098, 2171), 'ImageFont.truetype', 'ImageFont.truetype', (['"""/usr/share/fonts/truetype/freefont/FreeSans.ttf"""', '(35)'], {}), "('/usr/share/fonts/truetype/freefont/FreeSans.ttf', 35)\n", (2116, 2171), False, 'import ImageFont\n'), ((2186, 2263), 'ImageFont.truetype', 'ImageFont.truetype', (['"""/usr/share/fonts/truetype/freefont/FreeSansBold.ttf"""', '(40)'], {}), "('/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 40)\n", (2204, 2263), False, 'import ImageFont\n'), ((2274, 2347), 'ImageFont.truetype', 'ImageFont.truetype', (['"""/usr/share/fonts/truetype/freefont/FreeSans.ttf"""', '(40)'], {}), "('/usr/share/fonts/truetype/freefont/FreeSans.ttf', 40)\n", (2292, 2347), False, 'import ImageFont\n'), ((3526, 3545), 'time.strftime', 'time.strftime', (['"""%a"""'], {}), "('%a')\n", (3539, 3545), False, 'import time\n'), ((3552, 3571), 'time.strftime', 'time.strftime', (['"""%b"""'], {}), "('%b')\n", (3565, 3571), False, 'import time\n'), ((3577, 3596), 'time.strftime', 'time.strftime', (['"""%d"""'], {}), "('%d')\n", (3590, 3596), False, 'import time\n'), ((1321, 1338), 'requests.get', 'requests.get', (['uri'], {}), '(uri)\n', (1333, 1338), False, 'import requests\n'), ((4130, 4155), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4145, 4155), False, 'import os\n'), ((4170, 4198), 'os.path.join', 'os.path.join', (['dir', 'icon_file'], {}), '(dir, icon_file)\n', (4182, 4198), False, 'import os\n'), ((4792, 4817), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4807, 4817), False, 'import os\n'), ((4832, 4867), 'os.path.join', 'os.path.join', (['dir', 'icon_file_yellow'], {}), '(dir, icon_file_yellow)\n', (4844, 4867), False, 'import os\n'), ((1390, 1410), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (1400, 1410), False, 'import json\n'), ((4237, 4258), 'Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (4247, 4258), False, 'import Image\n'), ((4908, 4929), 'Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (4918, 4929), False, 'import Image\n'), ((777, 797), 'urllib.urlencode', 'urllib.urlencode', (['qs'], {}), '(qs)\n', (793, 797), False, 'import urllib\n'), ((859, 885), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['qs'], {}), '(qs)\n', (881, 885), False, 'import urllib\n')]
|
# coding: utf-8
from __future__ import print_function, division, absolute_import
import pytest
from cutadapt.seqio import Sequence
from cutadapt.adapters import (Adapter, Match, ColorspaceAdapter, FRONT, BACK,
parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser)
def test_issue_52():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=BACK,
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True)
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,
remove_before=False, adapter=adapter, read=read)
assert am.wildcards() == 'GGC'
"""
The result above should actually be 'CGGC' since the correct
alignment is this one:
adapter GAACTCCAGTCACNNNNN
mismatches X X
read CCCCAGAACTACAGTC-CCGGC
Since we do not keep the alignment, guessing 'GGC' is the best we
can currently do.
"""
def test_issue_80():
# This issue turned out to not be an actual issue with the alignment
# algorithm. The following alignment is found because it has more matches
# than the 'obvious' one:
#
# TCGTATGCCGTCTTC
# =========X==XX=
# TCGTATGCCCTC--C
#
# This is correct, albeit a little surprising, since an alignment without
# indels would have only two errors.
adapter = Adapter(
sequence="TCGTATGCCGTCTTC",
where=BACK,
max_error_rate=0.2,
min_overlap=3,
read_wildcards=False,
adapter_wildcards=False)
read = Sequence(name="seq2", sequence="TCGTATGCCCTCC")
result = adapter.match_to(read)
assert result.errors == 3, result
assert result.astart == 0, result
assert result.astop == 15, result
def test_str():
a = Adapter('ACGT', where=BACK, max_error_rate=0.1)
str(a)
str(a.match_to(Sequence(name='seq', sequence='TTACGT')))
ca = ColorspaceAdapter('0123', where=BACK, max_error_rate=0.1)
str(ca)
def test_color():
with pytest.raises(ValueError):
ColorspaceAdapter('0123', where=FRONT, max_error_rate=0.1)
def test_parse_braces():
assert parse_braces('') == ''
assert parse_braces('A') == 'A'
assert parse_braces('A{0}') == ''
assert parse_braces('A{1}') == 'A'
assert parse_braces('A{2}') == 'AA'
assert parse_braces('A{2}C') == 'AAC'
assert parse_braces('ACGTN{3}TGACCC') == 'ACGTNNNTGACCC'
assert parse_braces('ACGTN{10}TGACCC') == 'ACGTNNNNNNNNNNTGACCC'
assert parse_braces('ACGTN{3}TGA{4}CCC') == 'ACGTNNNTGAAAACCC'
assert parse_braces('ACGTN{0}TGA{4}CCC') == 'ACGTTGAAAACCC'
def test_parse_braces_fail():
for expression in ['{', '}', '{}', '{5', '{1}', 'A{-7}', 'A{', 'A{1', 'N{7', 'AN{7', 'A{4{}',
'A{4}{3}', 'A{b}', 'A{6X}', 'A{X6}']:
with pytest.raises(ValueError):
parse_braces(expression)
def test_linked_adapter():
linked_adapter = LinkedAdapter('AAAA', 'TTTT', min_overlap=4)
assert linked_adapter.front_adapter.min_overlap == 4
assert linked_adapter.back_adapter.min_overlap == 4
sequence = Sequence(name='seq', sequence='AAAACCCCCTTTT')
trimmed = linked_adapter.match_to(sequence).trimmed()
assert trimmed.name == 'seq'
assert trimmed.sequence == 'CCCCC'
def test_info_record():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=BACK,
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
name="Foo")
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2, remove_before=False,
adapter=adapter, read=read)
assert am.get_info_record() == (
"abc",
2,
5,
21,
'CCCCA',
'GAACTACAGTCCCGGC',
'',
'Foo',
'',
'',
''
)
def test_random_match_probabilities():
a = Adapter('A', where=BACK, max_error_rate=0.1).create_statistics()
assert a.back.random_match_probabilities(0.5) == [1, 0.25]
assert a.back.random_match_probabilities(0.2) == [1, 0.4]
for s in ('ACTG', 'XMWH'):
a = Adapter(s, where=BACK, max_error_rate=0.1).create_statistics()
assert a.back.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.back.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
a = Adapter('GTCA', where=FRONT, max_error_rate=0.1).create_statistics()
assert a.front.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.front.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
def test_add_adapter_statistics():
stats = Adapter('A', name='name', where=BACK, max_error_rate=0.1).create_statistics()
end_stats = stats.back
end_stats.adjacent_bases['A'] = 7
end_stats.adjacent_bases['C'] = 19
end_stats.adjacent_bases['G'] = 23
end_stats.adjacent_bases['T'] = 42
end_stats.adjacent_bases[''] = 45
end_stats.errors[10][0] = 100
end_stats.errors[10][1] = 11
end_stats.errors[10][2] = 3
end_stats.errors[20][0] = 600
end_stats.errors[20][1] = 66
end_stats.errors[20][2] = 6
stats2 = Adapter('A', name='name', where=BACK, max_error_rate=0.1).create_statistics()
end_stats2 = stats2.back
end_stats2.adjacent_bases['A'] = 43
end_stats2.adjacent_bases['C'] = 31
end_stats2.adjacent_bases['G'] = 27
end_stats2.adjacent_bases['T'] = 8
end_stats2.adjacent_bases[''] = 5
end_stats2.errors[10][0] = 234
end_stats2.errors[10][1] = 14
end_stats2.errors[10][3] = 5
end_stats2.errors[15][0] = 90
end_stats2.errors[15][1] = 17
end_stats2.errors[15][2] = 2
stats += stats2
r = stats.back
assert r.adjacent_bases == {'A': 50, 'C': 50, 'G': 50, 'T': 50, '': 50}
assert r.errors == {
10: {0: 334, 1: 25, 2: 3, 3: 5},
15: {0: 90, 1: 17, 2: 2},
20: {0: 600, 1: 66, 2: 6},
}
def test_issue_265():
"""Crash when accessing the matches property of non-anchored linked adapters"""
s = Sequence('name', 'AAAATTTT')
la = LinkedAdapter('GGG', 'TTT', front_restriction=None, back_restriction=None)
assert la.match_to(s).matches == 3
def test_parse_not_linked():
p = AdapterParser._parse_not_linked
assert p('A', 'front') == (None, 'A', None)
assert p('A', 'back') == (None, 'A', None)
assert p('A', 'anywhere') == (None, 'A', None)
assert p('^A', 'front') == ('anchored', 'A', None)
assert p('XXXA', 'front') == ('noninternal', 'A', None)
assert p('A$', 'back') == (None, 'A', 'anchored')
assert p('AXXXX', 'back') == (None, 'A', 'noninternal')
|
[
"cutadapt.adapters.Adapter",
"cutadapt.adapters.ColorspaceAdapter",
"cutadapt.adapters.parse_braces",
"cutadapt.adapters.Match",
"cutadapt.seqio.Sequence",
"pytest.raises",
"cutadapt.adapters.LinkedAdapter"
] |
[((309, 445), 'cutadapt.adapters.Adapter', 'Adapter', ([], {'sequence': '"""GAACTCCAGTCACNNNNN"""', 'where': 'BACK', 'max_error_rate': '(0.12)', 'min_overlap': '(5)', 'read_wildcards': '(False)', 'adapter_wildcards': '(True)'}), "(sequence='GAACTCCAGTCACNNNNN', where=BACK, max_error_rate=0.12,\n min_overlap=5, read_wildcards=False, adapter_wildcards=True)\n", (316, 445), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((463, 517), 'cutadapt.seqio.Sequence', 'Sequence', ([], {'name': '"""abc"""', 'sequence': '"""CCCCAGAACTACAGTCCCGGC"""'}), "(name='abc', sequence='CCCCAGAACTACAGTCCCGGC')\n", (471, 517), False, 'from cutadapt.seqio import Sequence\n'), ((524, 644), 'cutadapt.adapters.Match', 'Match', ([], {'astart': '(0)', 'astop': '(17)', 'rstart': '(5)', 'rstop': '(21)', 'matches': '(15)', 'errors': '(2)', 'remove_before': '(False)', 'adapter': 'adapter', 'read': 'read'}), '(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,\n remove_before=False, adapter=adapter, read=read)\n', (529, 644), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((1343, 1476), 'cutadapt.adapters.Adapter', 'Adapter', ([], {'sequence': '"""TCGTATGCCGTCTTC"""', 'where': 'BACK', 'max_error_rate': '(0.2)', 'min_overlap': '(3)', 'read_wildcards': '(False)', 'adapter_wildcards': '(False)'}), "(sequence='TCGTATGCCGTCTTC', where=BACK, max_error_rate=0.2,\n min_overlap=3, read_wildcards=False, adapter_wildcards=False)\n", (1350, 1476), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((1494, 1541), 'cutadapt.seqio.Sequence', 'Sequence', ([], {'name': '"""seq2"""', 'sequence': '"""TCGTATGCCCTCC"""'}), "(name='seq2', sequence='TCGTATGCCCTCC')\n", (1502, 1541), False, 'from cutadapt.seqio import Sequence\n'), ((1703, 1750), 'cutadapt.adapters.Adapter', 'Adapter', (['"""ACGT"""'], {'where': 'BACK', 'max_error_rate': '(0.1)'}), "('ACGT', where=BACK, max_error_rate=0.1)\n", (1710, 1750), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((1823, 1880), 'cutadapt.adapters.ColorspaceAdapter', 'ColorspaceAdapter', (['"""0123"""'], {'where': 'BACK', 'max_error_rate': '(0.1)'}), "('0123', where=BACK, max_error_rate=0.1)\n", (1840, 1880), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2768, 2812), 'cutadapt.adapters.LinkedAdapter', 'LinkedAdapter', (['"""AAAA"""', '"""TTTT"""'], {'min_overlap': '(4)'}), "('AAAA', 'TTTT', min_overlap=4)\n", (2781, 2812), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2933, 2979), 'cutadapt.seqio.Sequence', 'Sequence', ([], {'name': '"""seq"""', 'sequence': '"""AAAACCCCCTTTT"""'}), "(name='seq', sequence='AAAACCCCCTTTT')\n", (2941, 2979), False, 'from cutadapt.seqio import Sequence\n'), ((3138, 3286), 'cutadapt.adapters.Adapter', 'Adapter', ([], {'sequence': '"""GAACTCCAGTCACNNNNN"""', 'where': 'BACK', 'max_error_rate': '(0.12)', 'min_overlap': '(5)', 'read_wildcards': '(False)', 'adapter_wildcards': '(True)', 'name': '"""Foo"""'}), "(sequence='GAACTCCAGTCACNNNNN', where=BACK, max_error_rate=0.12,\n min_overlap=5, read_wildcards=False, adapter_wildcards=True, name='Foo')\n", (3145, 3286), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((3306, 3360), 'cutadapt.seqio.Sequence', 'Sequence', ([], {'name': '"""abc"""', 'sequence': '"""CCCCAGAACTACAGTCCCGGC"""'}), "(name='abc', sequence='CCCCAGAACTACAGTCCCGGC')\n", (3314, 3360), False, 'from cutadapt.seqio import Sequence\n'), ((3367, 3487), 'cutadapt.adapters.Match', 'Match', ([], {'astart': '(0)', 'astop': '(17)', 'rstart': '(5)', 'rstop': '(21)', 'matches': '(15)', 'errors': '(2)', 'remove_before': '(False)', 'adapter': 'adapter', 'read': 'read'}), '(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,\n remove_before=False, adapter=adapter, read=read)\n', (3372, 3487), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((5716, 5744), 'cutadapt.seqio.Sequence', 'Sequence', (['"""name"""', '"""AAAATTTT"""'], {}), "('name', 'AAAATTTT')\n", (5724, 5744), False, 'from cutadapt.seqio import Sequence\n'), ((5751, 5825), 'cutadapt.adapters.LinkedAdapter', 'LinkedAdapter', (['"""GGG"""', '"""TTT"""'], {'front_restriction': 'None', 'back_restriction': 'None'}), "('GGG', 'TTT', front_restriction=None, back_restriction=None)\n", (5764, 5825), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((1916, 1941), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1929, 1941), False, 'import pytest\n'), ((1945, 2003), 'cutadapt.adapters.ColorspaceAdapter', 'ColorspaceAdapter', (['"""0123"""'], {'where': 'FRONT', 'max_error_rate': '(0.1)'}), "('0123', where=FRONT, max_error_rate=0.1)\n", (1962, 2003), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2039, 2055), 'cutadapt.adapters.parse_braces', 'parse_braces', (['""""""'], {}), "('')\n", (2051, 2055), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2070, 2087), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""A"""'], {}), "('A')\n", (2082, 2087), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2103, 2123), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""A{0}"""'], {}), "('A{0}')\n", (2115, 2123), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2138, 2158), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""A{1}"""'], {}), "('A{1}')\n", (2150, 2158), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2174, 2194), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""A{2}"""'], {}), "('A{2}')\n", (2186, 2194), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2211, 2232), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""A{2}C"""'], {}), "('A{2}C')\n", (2223, 2232), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2250, 2280), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""ACGTN{3}TGACCC"""'], {}), "('ACGTN{3}TGACCC')\n", (2262, 2280), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2308, 2339), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""ACGTN{10}TGACCC"""'], {}), "('ACGTN{10}TGACCC')\n", (2320, 2339), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2374, 2407), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""ACGTN{3}TGA{4}CCC"""'], {}), "('ACGTN{3}TGA{4}CCC')\n", (2386, 2407), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((2438, 2471), 'cutadapt.adapters.parse_braces', 'parse_braces', (['"""ACGTN{0}TGA{4}CCC"""'], {}), "('ACGTN{0}TGA{4}CCC')\n", (2450, 2471), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((1775, 1814), 'cutadapt.seqio.Sequence', 'Sequence', ([], {'name': '"""seq"""', 'sequence': '"""TTACGT"""'}), "(name='seq', sequence='TTACGT')\n", (1783, 1814), False, 'from cutadapt.seqio import Sequence\n'), ((2666, 2691), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2679, 2691), False, 'import pytest\n'), ((2696, 2720), 'cutadapt.adapters.parse_braces', 'parse_braces', (['expression'], {}), '(expression)\n', (2708, 2720), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((3661, 3705), 'cutadapt.adapters.Adapter', 'Adapter', (['"""A"""'], {'where': 'BACK', 'max_error_rate': '(0.1)'}), "('A', where=BACK, max_error_rate=0.1)\n", (3668, 3705), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((4136, 4184), 'cutadapt.adapters.Adapter', 'Adapter', (['"""GTCA"""'], {'where': 'FRONT', 'max_error_rate': '(0.1)'}), "('GTCA', where=FRONT, max_error_rate=0.1)\n", (4143, 4184), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((4438, 4495), 'cutadapt.adapters.Adapter', 'Adapter', (['"""A"""'], {'name': '"""name"""', 'where': 'BACK', 'max_error_rate': '(0.1)'}), "('A', name='name', where=BACK, max_error_rate=0.1)\n", (4445, 4495), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((4910, 4967), 'cutadapt.adapters.Adapter', 'Adapter', (['"""A"""'], {'name': '"""name"""', 'where': 'BACK', 'max_error_rate': '(0.1)'}), "('A', name='name', where=BACK, max_error_rate=0.1)\n", (4917, 4967), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n'), ((3880, 3922), 'cutadapt.adapters.Adapter', 'Adapter', (['s'], {'where': 'BACK', 'max_error_rate': '(0.1)'}), '(s, where=BACK, max_error_rate=0.1)\n', (3887, 3922), False, 'from cutadapt.adapters import Adapter, Match, ColorspaceAdapter, FRONT, BACK, parse_braces, LinkedAdapter, AdapterStatistics, AdapterParser\n')]
|
#!/usr/bin/python
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import os
import subprocess
import sys
import time
import traceback
from xml.dom import minidom
def check_ready_to_run():
if sys.platform == 'win32':
pgrep_output = subprocess.check_output(
'wmic process where "CommandLine like \'%nni_gpu_tool.gpu_metrics_collector%\' and name like \'%python%\'" get processId')
pidList = pgrep_output.decode("utf-8").strip().split()
pidList.pop(0) # remove the key word 'ProcessId'
pidList = list(map(int, pidList))
pidList.remove(os.getpid())
return not pidList
else:
pgrep_output = subprocess.check_output('pgrep -fx \'python3 -m nni_gpu_tool.gpu_metrics_collector\'', shell=True)
pidList = []
for pid in pgrep_output.splitlines():
pidList.append(int(pid))
pidList.remove(os.getpid())
return not pidList
def main(argv):
metrics_output_dir = os.environ['METRIC_OUTPUT_DIR']
if check_ready_to_run() == False:
# GPU metrics collector is already running. Exit
exit(2)
cmd = 'nvidia-smi -q -x'.split()
while(True):
try:
smi_output = subprocess.check_output(cmd)
except Exception:
traceback.print_exc()
gen_empty_gpu_metric(metrics_output_dir)
break
parse_nvidia_smi_result(smi_output, metrics_output_dir)
# TODO: change to sleep time configurable via arguments
time.sleep(5)
def parse_nvidia_smi_result(smi, outputDir):
try:
old_umask = os.umask(0)
xmldoc = minidom.parseString(smi)
gpuList = xmldoc.getElementsByTagName('gpu')
with open(os.path.join(outputDir, "gpu_metrics"), 'a') as outputFile:
outPut = {}
outPut["Timestamp"] = time.asctime(time.localtime())
outPut["gpuCount"] = len(gpuList)
outPut["gpuInfos"] = []
for gpuIndex, gpu in enumerate(gpuList):
gpuInfo = {}
gpuInfo['index'] = gpuIndex
gpuInfo['gpuUtil'] = gpu.getElementsByTagName('utilization')[0]\
.getElementsByTagName('gpu_util')[0]\
.childNodes[0].data.replace("%", "").strip()
gpuInfo['gpuMemUtil'] = gpu.getElementsByTagName('utilization')[0]\
.getElementsByTagName('memory_util')[0]\
.childNodes[0].data.replace("%", "").strip()
processes = gpu.getElementsByTagName('processes')
runningProNumber = len(processes[0].getElementsByTagName('process_info'))
gpuInfo['activeProcessNum'] = runningProNumber
outPut["gpuInfos"].append(gpuInfo)
print(outPut)
outputFile.write("{}\n".format(json.dumps(outPut, sort_keys=True)))
outputFile.flush();
except:
# e_info = sys.exc_info()
print('xmldoc paring error')
finally:
os.umask(old_umask)
def gen_empty_gpu_metric(outputDir):
try:
old_umask = os.umask(0)
with open(os.path.join(outputDir, "gpu_metrics"), 'a') as outputFile:
outPut = {}
outPut["Timestamp"] = time.asctime(time.localtime())
outPut["gpuCount"] = 0
outPut["gpuInfos"] = []
print(outPut)
outputFile.write("{}\n".format(json.dumps(outPut, sort_keys=True)))
outputFile.flush()
except Exception:
traceback.print_exc()
finally:
os.umask(old_umask)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"traceback.print_exc",
"os.getpid",
"xml.dom.minidom.parseString",
"subprocess.check_output",
"json.dumps",
"time.sleep",
"os.umask",
"os.path.join",
"time.localtime"
] |
[((1332, 1488), 'subprocess.check_output', 'subprocess.check_output', (['"""wmic process where "CommandLine like \'%nni_gpu_tool.gpu_metrics_collector%\' and name like \'%python%\'" get processId"""'], {}), '(\n \'wmic process where "CommandLine like \\\'%nni_gpu_tool.gpu_metrics_collector%\\\' and name like \\\'%python%\\\'" get processId\'\n )\n', (1355, 1488), False, 'import subprocess\n'), ((1750, 1851), 'subprocess.check_output', 'subprocess.check_output', (['"""pgrep -fx \'python3 -m nni_gpu_tool.gpu_metrics_collector\'"""'], {'shell': '(True)'}), '(\n "pgrep -fx \'python3 -m nni_gpu_tool.gpu_metrics_collector\'", shell=True)\n', (1773, 1851), False, 'import subprocess\n'), ((2589, 2602), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2599, 2602), False, 'import time\n'), ((2678, 2689), 'os.umask', 'os.umask', (['(0)'], {}), '(0)\n', (2686, 2689), False, 'import os\n'), ((2707, 2731), 'xml.dom.minidom.parseString', 'minidom.parseString', (['smi'], {}), '(smi)\n', (2726, 2731), False, 'from xml.dom import minidom\n'), ((4087, 4106), 'os.umask', 'os.umask', (['old_umask'], {}), '(old_umask)\n', (4095, 4106), False, 'import os\n'), ((4174, 4185), 'os.umask', 'os.umask', (['(0)'], {}), '(0)\n', (4182, 4185), False, 'import os\n'), ((4634, 4653), 'os.umask', 'os.umask', (['old_umask'], {}), '(old_umask)\n', (4642, 4653), False, 'import os\n'), ((1677, 1688), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1686, 1688), False, 'import os\n'), ((1976, 1987), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1985, 1987), False, 'import os\n'), ((2293, 2321), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (2316, 2321), False, 'import subprocess\n'), ((4591, 4612), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4610, 4612), False, 'import traceback\n'), ((2360, 2381), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2379, 2381), False, 'import traceback\n'), ((2803, 2841), 'os.path.join', 'os.path.join', (['outputDir', '"""gpu_metrics"""'], {}), "(outputDir, 'gpu_metrics')\n", (2815, 2841), False, 'import os\n'), ((2934, 2950), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2948, 2950), False, 'import time\n'), ((4204, 4242), 'os.path.join', 'os.path.join', (['outputDir', '"""gpu_metrics"""'], {}), "(outputDir, 'gpu_metrics')\n", (4216, 4242), False, 'import os\n'), ((4335, 4351), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4349, 4351), False, 'import time\n'), ((3914, 3948), 'json.dumps', 'json.dumps', (['outPut'], {'sort_keys': '(True)'}), '(outPut, sort_keys=True)\n', (3924, 3948), False, 'import json\n'), ((4493, 4527), 'json.dumps', 'json.dumps', (['outPut'], {'sort_keys': '(True)'}), '(outPut, sort_keys=True)\n', (4503, 4527), False, 'import json\n')]
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from desktop.lib.django_util import render
from libsentry.sentry_site import get_hive_sentry_provider, get_sentry_server_admin_groups
def hive(request):
return render("hive.mako", request, {
'initial': json.dumps({
'user': request.user.username,
'sentry_provider': get_hive_sentry_provider(),
'is_sentry_admin': request.user.groups.filter(name__in=get_sentry_server_admin_groups()).exists()
}),
'has_impersonation_perm': _has_impersonation_perm(request.user),
})
def hive2(request):
return _sentry(request, component='hive')
def solr(request):
return _sentry(request, component='solr')
def _sentry(request, component):
return render("sentry.mako", request, {
'initial': json.dumps({
'component': component,
'user': request.user.username,
'sentry_provider': get_hive_sentry_provider(),
'is_sentry_admin': request.user.groups.filter(name__in=get_sentry_server_admin_groups()).exists()
}),
'has_impersonation_perm': _has_impersonation_perm(request.user) and component == 'hive',
'component': component
})
def hdfs(request):
return render("hdfs.mako", request, {
'initial': json.dumps({'user': request.user.username}),
'has_impersonation_perm': _has_impersonation_perm(request.user)
})
def _has_impersonation_perm(user):
return user.is_superuser or user.has_hue_permission(action="impersonate", app="security")
|
[
"libsentry.sentry_site.get_sentry_server_admin_groups",
"json.dumps",
"libsentry.sentry_site.get_hive_sentry_provider"
] |
[((2020, 2063), 'json.dumps', 'json.dumps', (["{'user': request.user.username}"], {}), "({'user': request.user.username})\n", (2030, 2063), False, 'import json\n'), ((1102, 1128), 'libsentry.sentry_site.get_hive_sentry_provider', 'get_hive_sentry_provider', ([], {}), '()\n', (1126, 1128), False, 'from libsentry.sentry_site import get_hive_sentry_provider, get_sentry_server_admin_groups\n'), ((1666, 1692), 'libsentry.sentry_site.get_hive_sentry_provider', 'get_hive_sentry_provider', ([], {}), '()\n', (1690, 1692), False, 'from libsentry.sentry_site import get_hive_sentry_provider, get_sentry_server_admin_groups\n'), ((1195, 1227), 'libsentry.sentry_site.get_sentry_server_admin_groups', 'get_sentry_server_admin_groups', ([], {}), '()\n', (1225, 1227), False, 'from libsentry.sentry_site import get_hive_sentry_provider, get_sentry_server_admin_groups\n'), ((1759, 1791), 'libsentry.sentry_site.get_sentry_server_admin_groups', 'get_sentry_server_admin_groups', ([], {}), '()\n', (1789, 1791), False, 'from libsentry.sentry_site import get_hive_sentry_provider, get_sentry_server_admin_groups\n')]
|
# https://leetcode.com/problems/merge-two-sorted-lists/
import sys
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
head = last = None
def append(node):
nonlocal head, last
if not head:
head = last = node
else:
last.next = node
last = node
while l1 or l2:
if l1 and l2:
if l1.val < l2.val:
append(l1)
l1 = l1.next
else:
append(l2)
l2 = l2.next
elif l1:
append(l1)
l1 = l1.next
else:
append(l2)
l2 = l2.next
return head
def read_list():
values = list(map(int, sys.stdin.readline().strip().split()))
head = last = None
for val in values:
if not last:
head = last = ListNode(val)
else:
last.next = ListNode(val)
last = last.next
return head
def print_list(l):
ptr = l
values = []
while ptr:
values.append(ptr.val)
ptr = ptr.next
print(' '.join(map(str, values)))
if __name__ == '__main__':
sol = Solution()
num_problems = int(sys.stdin.readline())
for _ in range(num_problems):
l1, l2 = read_list(), read_list()
res = sol.mergeTwoLists(l1, l2)
print_list(res)
|
[
"sys.stdin.readline"
] |
[((1393, 1413), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1411, 1413), False, 'import sys\n'), ((921, 941), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (939, 941), False, 'import sys\n')]
|
"""
Defines a rule set using one of the standard Iguanas representations. This rule
set can then be reformatted into the other standard Iguanas representations
using the class methods.
"""
from iguanas.rules._convert_rule_dicts_to_rule_strings import _ConvertRuleDictsToRuleStrings
from iguanas.rules._convert_rule_strings_to_rule_dicts import _ConvertRuleStringsToRuleDicts
from iguanas.rules._convert_rule_dicts_to_rule_lambdas import _ConvertRuleDictsToRuleLambdas
from iguanas.rules._convert_rule_lambdas_to_rule_strings import _ConvertRuleLambdasToRuleStrings
from iguanas.rules._get_rule_attributes import _GetRuleFeatures
from iguanas.rule_application import RuleApplier
from iguanas.utils.typing import KoalasDataFrameType, PandasDataFrameType
from typing import List, Dict, Callable, Union
class Rules(RuleApplier):
"""
Defines a set of rules using the following representations: string,
dictionary, lambda expression.
One of the above formats must be provided to define the rule set. The
rules can then be reformatted into one of the other representations.
Parameters
----------
rule_dicts : Dict[str, dict]
Set of rules defined using thestandard Iguanas dictionary format
(values) and their names (keys). Defaults to `None`.
rule_strings : Dict[str, str]
Set of rules defined using the standard Iguanas string format
(values) and their names (keys). Defaults to `None`.
rule_lambdas : Dict[str, Callable[[dict], str]]
Set of rules defined using the standard Iguanas lambda expression
format (values) and their names (keys). Must be given in
conjunction with either `lambda_kwargs` or `lambda_args`. Defaults
to `None`.
lambda_kwargs : Dict[str, dict]
For each rule (keys), a dictionary containing the features used in
the rule (keys) and the current values (values). Only populates
when `.as_lambda()` is used with the keyword argument
`with_kwargs=True`. Defaults to `None`.
lambda_args : Dict[str, list]
For each rule (keys), a list containing the current values used in
the rule. Only populates when `.as_lambda()` is used with the
keyword argument `with_kwargs=False`. Defaults to `None`.
Attributes
----------
rule_dicts : Dict[str, dict]
Set of rules defined using the standard Iguanas dictionary format
(values) and their names (keys).
rule_strings : Dict[str, str]
Set of rules defined using the standard Iguanas string format (values)
and their names (keys).
rule_lambdas : Dict[str, Callable[[dict], str]]
Set of rules defined using the standard Iguanas lambda expression
format (values) and their names (keys).
lambda_kwargs : Dict[str, dict]
For each rule (keys), a dictionary containing the features used in the
rule (keys) and the current values (values).
lambda_args : Dict[str, list]
For each rule (keys), a list containing the current values used in the
rule.
rule_features : Dict[str, set]
For each rule (keys), a set containing the features used in the rule
(only populates when the `.get_rules_features()` method is used).
"""
def __init__(self, rule_dicts=None, rule_strings=None,
rule_lambdas=None, lambda_kwargs=None, lambda_args=None):
if rule_dicts is None and rule_strings is None and rule_lambdas is None:
raise ValueError(
'`rule_dicts`, `rule_strings` or `rule_lambdas` must be given')
if rule_lambdas is not None and lambda_kwargs is None and \
lambda_args is None:
raise ValueError(
'`lambda_kwargs` or `lambda_args` must be given when `rule_lambdas` is provided')
self.rule_dicts = {} if rule_dicts is None else rule_dicts
self.rule_strings = {} if rule_strings is None else rule_strings
self.rule_lambdas = {} if rule_lambdas is None else rule_lambdas
self.lambda_kwargs = {} if lambda_kwargs is None else lambda_kwargs
self.lambda_args = {} if lambda_args is None else lambda_args
self.rule_features = {}
RuleApplier.__init__(
self, rule_strings=self.rule_strings,
)
def __repr__(self):
rules_lens = [
len(self.rule_dicts),
len(self.rule_strings),
len(self.rule_lambdas)
]
return f'Rules object containing {max(rules_lens)} rules'
def as_rule_dicts(self) -> Dict[str, dict]:
"""
Converts rules into the standard Iguanas dictionary format.
Returns
-------
Dict[str, dict]
Rules in the standard Iguanas dictionary format.
"""
if self.rule_strings != {}:
self._rule_strings_to_rule_dicts()
elif self.rule_lambdas != {}:
self._rule_lambdas_to_rule_strings()
self._rule_strings_to_rule_dicts()
return self.rule_dicts
def as_rule_strings(self, as_numpy: bool) -> Dict[str, str]:
"""
Converts rules into the standard Iguanas string format.
Parameters
----------
as_numpy : bool
If `True`, the conditions in the string format will uses Numpy
rather than Pandas. These rules are generally evaluated more
quickly on larger dataset stored as Pandas DataFrames.
Returns
-------
Dict[str, str]
Rules in the standard Iguanas string format.
"""
if self.rule_strings != {}:
self._rule_strings_to_rule_dicts()
elif self.rule_lambdas != {}:
self._rule_lambdas_to_rule_strings()
self._rule_strings_to_rule_dicts()
self._rule_dicts_to_rule_strings(as_numpy=as_numpy)
return self.rule_strings
def as_rule_lambdas(self, as_numpy: bool,
with_kwargs: bool) -> Dict[str, Callable[[dict], str]]:
"""
Converts rules into the standard Iguanas lambda expression format.
Parameters
----------
as_numpy : bool
If `True`, the conditions in the string format will uses Numpy
rather than Pandas. These rules are generally evaluated more
quickly on larger dataset stored as Pandas DataFrames.
with_kwargs : bool
If `True`, the string in the lambda expression is created such that
the inputs are keyword arguments. If `False`, the inputs are
positional arguments.
Returns
-------
Dict[str, Callable[[dict], str]]
Rules in the standard Iguanas lambda expression format.
"""
if self.rule_lambdas != {}:
self._rule_lambdas_to_rule_strings()
self._rule_strings_to_rule_dicts()
elif self.rule_strings != {}:
self._rule_strings_to_rule_dicts()
self._rule_dicts_to_rule_lambdas(
as_numpy=as_numpy, with_kwargs=with_kwargs)
return self.rule_lambdas
def transform(self,
X: Union[PandasDataFrameType, KoalasDataFrameType]) -> Union[PandasDataFrameType, KoalasDataFrameType]:
"""
Applies the set of rules to a dataset, `X`.
Parameters
----------
X : Union[PandasDataFrameType, KoalasDataFrameType]
The feature set on which the rules should be applied.
Returns
-------
Union[PandasDataFrameType, KoalasDataFrameType]
The binary columns of the rules.
"""
if self.rule_strings == {}:
self.rule_strings = self.as_rule_strings(as_numpy=False)
X_rules = RuleApplier.transform(self, X=X)
return X_rules
def filter_rules(self, include=None, exclude=None) -> None:
"""
Filters the rules by their names.
Parameters
----------
include : List[str], optional
The list of rule names to keep. Defaults to `None`.
exclude : List[str], optional
The list of rule names to drop. Defaults to `None`.
Raises
------
Exception
`include` and `exclude` cannot contain similar values.
"""
if include is not None and exclude is not None:
intersected = set.intersection(set(include), set(exclude))
if len(intersected) > 0:
raise Exception(
'`include` and `exclude` contain similar values')
for d in [self.rule_strings, self.rule_dicts, self.rule_lambdas]:
if d != {}:
rule_names = list(d.keys())
break
for rule_name in rule_names:
if (include is not None and rule_name not in include) or \
(exclude is not None and rule_name in exclude):
self.rule_strings.pop(rule_name, None)
self.rule_dicts.pop(rule_name, None)
self.rule_lambdas.pop(rule_name, None)
self.lambda_kwargs.pop(rule_name, None)
self.lambda_args.pop(rule_name, None)
self.rule_features.pop(rule_name, None)
def get_rule_features(self) -> Dict[str, set]:
"""
Returns the set of unique features present in each rule.
Returns
-------
Dict[str, set]
Set of unique features (values) in each rule (keys).
"""
if self.rule_dicts == {}:
_ = self.as_rule_dicts()
grf = _GetRuleFeatures(rule_dicts=self.rule_dicts)
self.rule_feature_set = grf.get()
return self.rule_feature_set
def _rule_dicts_to_rule_strings(self, as_numpy: bool) -> None:
"""
Converts the rules (each being represented in the standard Iguanas
dictionary format) into the standard Iguanas string format.
"""
if self.rule_dicts == {}:
raise ValueError('`rule_dicts` must be given')
converter = _ConvertRuleDictsToRuleStrings(
rule_dicts=self.rule_dicts)
self.rule_strings = converter.convert(as_numpy=as_numpy)
def _rule_strings_to_rule_dicts(self) -> None:
"""
Converts the rules (each being represented in the standard Iguanas string
format) into the standard Iguanas dictionary format.
"""
if self.rule_strings == {}:
raise ValueError('`rule_strings` must be given')
converter = _ConvertRuleStringsToRuleDicts(
rule_strings=self.rule_strings)
self.rule_dicts = converter.convert()
def _rule_dicts_to_rule_lambdas(self, as_numpy: bool,
with_kwargs: bool) -> None:
"""
Converts the rules (each being represented in the standard Iguanas
dictionary format) into the standard Iguanas lambda expression format.
This is useful for optimising rules.
"""
if self.rule_dicts == {}:
raise ValueError('`rule_dicts` must be given')
converter = _ConvertRuleDictsToRuleLambdas(rule_dicts=self.rule_dicts)
self.rule_lambdas = converter.convert(
as_numpy=as_numpy, with_kwargs=with_kwargs)
self.lambda_kwargs = converter.lambda_kwargs
self.lambda_args = converter.lambda_args
self.rule_features = converter.rule_features
def _rule_lambdas_to_rule_strings(self) -> None:
"""
Converts the rules (each being represented in the standard Iguanas lambda
expression format) into the standard Iguanas string format.
"""
if self.rule_lambdas == {}:
raise ValueError('`rule_lambdas` must be given')
if self.lambda_kwargs == {} and self.lambda_args == {}:
raise ValueError('`lambda_kwargs` or `lambda_args` must be given')
converter = _ConvertRuleLambdasToRuleStrings(
rule_lambdas=self.rule_lambdas, lambda_kwargs=self.lambda_kwargs,
lambda_args=self.lambda_args)
self.rule_strings = converter.convert()
|
[
"iguanas.rule_application.RuleApplier.__init__",
"iguanas.rules._convert_rule_dicts_to_rule_strings._ConvertRuleDictsToRuleStrings",
"iguanas.rules._get_rule_attributes._GetRuleFeatures",
"iguanas.rules._convert_rule_strings_to_rule_dicts._ConvertRuleStringsToRuleDicts",
"iguanas.rules._convert_rule_lambdas_to_rule_strings._ConvertRuleLambdasToRuleStrings",
"iguanas.rules._convert_rule_dicts_to_rule_lambdas._ConvertRuleDictsToRuleLambdas",
"iguanas.rule_application.RuleApplier.transform"
] |
[((4245, 4303), 'iguanas.rule_application.RuleApplier.__init__', 'RuleApplier.__init__', (['self'], {'rule_strings': 'self.rule_strings'}), '(self, rule_strings=self.rule_strings)\n', (4265, 4303), False, 'from iguanas.rule_application import RuleApplier\n'), ((7803, 7835), 'iguanas.rule_application.RuleApplier.transform', 'RuleApplier.transform', (['self'], {'X': 'X'}), '(self, X=X)\n', (7824, 7835), False, 'from iguanas.rule_application import RuleApplier\n'), ((9634, 9678), 'iguanas.rules._get_rule_attributes._GetRuleFeatures', '_GetRuleFeatures', ([], {'rule_dicts': 'self.rule_dicts'}), '(rule_dicts=self.rule_dicts)\n', (9650, 9678), False, 'from iguanas.rules._get_rule_attributes import _GetRuleFeatures\n'), ((10107, 10165), 'iguanas.rules._convert_rule_dicts_to_rule_strings._ConvertRuleDictsToRuleStrings', '_ConvertRuleDictsToRuleStrings', ([], {'rule_dicts': 'self.rule_dicts'}), '(rule_dicts=self.rule_dicts)\n', (10137, 10165), False, 'from iguanas.rules._convert_rule_dicts_to_rule_strings import _ConvertRuleDictsToRuleStrings\n'), ((10581, 10643), 'iguanas.rules._convert_rule_strings_to_rule_dicts._ConvertRuleStringsToRuleDicts', '_ConvertRuleStringsToRuleDicts', ([], {'rule_strings': 'self.rule_strings'}), '(rule_strings=self.rule_strings)\n', (10611, 10643), False, 'from iguanas.rules._convert_rule_strings_to_rule_dicts import _ConvertRuleStringsToRuleDicts\n'), ((11162, 11220), 'iguanas.rules._convert_rule_dicts_to_rule_lambdas._ConvertRuleDictsToRuleLambdas', '_ConvertRuleDictsToRuleLambdas', ([], {'rule_dicts': 'self.rule_dicts'}), '(rule_dicts=self.rule_dicts)\n', (11192, 11220), False, 'from iguanas.rules._convert_rule_dicts_to_rule_lambdas import _ConvertRuleDictsToRuleLambdas\n'), ((11967, 12099), 'iguanas.rules._convert_rule_lambdas_to_rule_strings._ConvertRuleLambdasToRuleStrings', '_ConvertRuleLambdasToRuleStrings', ([], {'rule_lambdas': 'self.rule_lambdas', 'lambda_kwargs': 'self.lambda_kwargs', 'lambda_args': 'self.lambda_args'}), '(rule_lambdas=self.rule_lambdas,\n lambda_kwargs=self.lambda_kwargs, lambda_args=self.lambda_args)\n', (11999, 12099), False, 'from iguanas.rules._convert_rule_lambdas_to_rule_strings import _ConvertRuleLambdasToRuleStrings\n')]
|
from __future__ import print_function
import os
import glob
def NameFile(filepath):
if os.path.exists(filepath):
newname = FindLatestFilename(filepath)
else:
newname = filepath
print('Saving file as...', newname)
return newname
def FindLatestFilename(filepath):
if len(filepath.rsplit('/', 1)) == 2:
foldername = filepath.rsplit('/', 1)[0] # Split out folder path
filename = filepath.rsplit('/', 1)[-1] # retrieve filename
filelist = glob.glob((foldername + '/' + filename.rsplit('.', 1)[0] + '*.' + filename.rsplit('.')[-1])) # find list of files within folder
nums = []
lastfile = None
for filen in filelist:
if filen.rsplit('.', 1)[-2].rsplit('-', 1)[-1].isdigit(): # if the filename ends in -digit...
nums.append(int(filen.rsplit('.', 1)[-2].rsplit('-', 1)[-1])) # append the number to the list
lastnum = max(nums) # find maximum number
lastfile = filen.rsplit('.', 1)[-2].rsplit('-', 1)[-2] + '-' + str(lastnum) + '.' + filen.rsplit('.', 1)[-1]
elif lastfile is not None:
lastfile = lastfile
else:
lastfile = filen.rsplit('.', 1)[-2] + '.' + filen.rsplit('.', 1)[-1]
newfilename = NumberFileNames(lastfile)
else:
foldername = ''
filename = filepath
newfilename = NumberFileNames(filename)
return newfilename
def NumberFileNames(filepath):
if len(filepath.rsplit('/', 1)) == 2:
# If the input filepath has a folder path included
foldername = filepath.rsplit('/', 1)[0]
filename = filepath.rsplit('/', 1)[-1]
else:
foldername = ''
filename = filepath
if filename.rsplit('-',1)[-1].rsplit('.', 1)[0].isdigit():
filename = filename.rsplit('-', 1)[0] + '-' + str(1+int(filename.rsplit('-', 1)[-1].rsplit('.', 1)[0])) + '.' + filename.rsplit('.', 1)[-1]
else:
filename = filename.rsplit('.', 1)[-2] + '-1' + '.' + filename.rsplit('.', 1)[-1]
filepath = os.path.join(foldername, filename)
return filepath
|
[
"os.path.exists",
"os.path.join"
] |
[((89, 113), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (103, 113), False, 'import os\n'), ((1836, 1870), 'os.path.join', 'os.path.join', (['foldername', 'filename'], {}), '(foldername, filename)\n', (1848, 1870), False, 'import os\n')]
|
from sqlalchemy.orm import relationship, backref
from sqlalchemy import (
Column,
String,
ForeignKey,
Float,
Integer,
DateTime,
Boolean
)
from sqlalchemy.orm import relationship
from libs.database import Base
from apps.account.models import Account, AccountPosition
from apps.model.models import Model, ModelPosition
from apps.portfolio.models import Portfolio
from apps.trade.models import Trade, TradeRequest
class RolesUsers(Base):
'''M-to-M relation between roles and users'''
__tablename__ = 'roles_users'
id = Column(Integer(), primary_key=True)
user_id = Column('user_id', Integer(), ForeignKey('users.id'))
role_id = Column('role_id', Integer(), ForeignKey('roles.id'))
class Role(Base):
__tablename__ = 'roles'
id = Column(Integer(), primary_key=True)
name = Column(String(80), unique=True)
description = Column(String(255))
class User(Base):
'''User table'''
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String(255), unique=True)
username = Column(String(255))
password = Column(String(255))
access = Column(String(255))
last_login_at = Column(DateTime())
current_login_at = Column(DateTime())
last_login_ip = Column(String(100))
current_login_ip = Column(String(100))
login_count = Column(Integer)
active = Column(Boolean())
confirmed_at = Column(DateTime())
quovo_user_id = Column(String)
tradeshop_id = Column(String)
first_name = Column(String)
last_name = Column(String)
company = Column(String)
phone_number = Column(String)
business = relationship('Business', back_populates='user', uselist=False)
roles = relationship(
'Role', secondary='roles_users',
backref=backref('users', lazy='dynamic')
)
def as_dict(self):
return {
'id': self.id,
'email': self.email,
'username': self.username,
'company': self.company,
'first_name': self.first_name,
'last_name': self.last_name,
'phone_number': self.phone_number,
}
class Business(Base):
'''User business'''
__tablename__ = 'businesses'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
models = relationship("Model", back_populates="business",
cascade="all, delete, delete-orphan")
portfolios = relationship(
"Portfolio", back_populates="business", cascade="all, delete, delete-orphan")
trades = relationship("Trade", back_populates="business",
cascade="all, delete, delete-orphan")
accounts = relationship(
"Account", back_populates="business", cascade="all, delete, delete-orphan")
user = relationship('User', back_populates='business', single_parent=True,
cascade="all, delete, delete-orphan")
def as_dict(self):
return ({'id': self.id, 'user_id': self.user_id})
class TradePortfolio(Base):
__tablename__ = 'trade_portfolios'
id = Column(Integer, primary_key=True)
trade_id = Column(Integer, ForeignKey('trades.id'))
portfolio_id = Column(Integer, ForeignKey('portfolios.id'), nullable=False)
trade = relationship("Trade", back_populates="portfolios")
portfolio = relationship("Portfolio", back_populates="trades")
def as_dict(self):
result = {'id': self.id, 'trade_id': self.trade_id, 'portfolio_id': self.portfolio_id}
return result
class Price(Base):
__tablename__ = 'prices'
id = Column(Integer, primary_key=True)
account_position_id = Column(Integer, ForeignKey('account_positions.id'))
model_position_id = Column(Integer, ForeignKey('model_positions.id'))
trade_id = Column(Integer, ForeignKey('trades.id'))
symbol = Column(String, nullable=False)
price = Column(Float)
trade = relationship("Trade", back_populates="prices")
model_position = relationship(
"ModelPosition", back_populates="trade_prices")
account_position = relationship(
"AccountPosition", back_populates="trade_prices")
def as_dict(self):
return {'id': self.id, 'trade_id': self.trade_id, 'symbol': self.symbol, 'price': str(self.price)}
|
[
"sqlalchemy.DateTime",
"sqlalchemy.ForeignKey",
"sqlalchemy.orm.relationship",
"sqlalchemy.Boolean",
"sqlalchemy.Column",
"sqlalchemy.String",
"sqlalchemy.orm.backref",
"sqlalchemy.Integer"
] |
[((1030, 1063), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1036, 1063), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1403, 1418), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (1409, 1418), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1511, 1525), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1517, 1525), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1546, 1560), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1552, 1560), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1579, 1593), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1585, 1593), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1611, 1625), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1617, 1625), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1641, 1655), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1647, 1655), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1676, 1690), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1682, 1690), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1707, 1769), 'sqlalchemy.orm.relationship', 'relationship', (['"""Business"""'], {'back_populates': '"""user"""', 'uselist': '(False)'}), "('Business', back_populates='user', uselist=False)\n", (1719, 1769), False, 'from sqlalchemy.orm import relationship\n'), ((2325, 2358), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (2331, 2358), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((2444, 2535), 'sqlalchemy.orm.relationship', 'relationship', (['"""Model"""'], {'back_populates': '"""business"""', 'cascade': '"""all, delete, delete-orphan"""'}), "('Model', back_populates='business', cascade=\n 'all, delete, delete-orphan')\n", (2456, 2535), False, 'from sqlalchemy.orm import relationship\n'), ((2576, 2671), 'sqlalchemy.orm.relationship', 'relationship', (['"""Portfolio"""'], {'back_populates': '"""business"""', 'cascade': '"""all, delete, delete-orphan"""'}), "('Portfolio', back_populates='business', cascade=\n 'all, delete, delete-orphan')\n", (2588, 2671), False, 'from sqlalchemy.orm import relationship\n'), ((2691, 2782), 'sqlalchemy.orm.relationship', 'relationship', (['"""Trade"""'], {'back_populates': '"""business"""', 'cascade': '"""all, delete, delete-orphan"""'}), "('Trade', back_populates='business', cascade=\n 'all, delete, delete-orphan')\n", (2703, 2782), False, 'from sqlalchemy.orm import relationship\n'), ((2821, 2914), 'sqlalchemy.orm.relationship', 'relationship', (['"""Account"""'], {'back_populates': '"""business"""', 'cascade': '"""all, delete, delete-orphan"""'}), "('Account', back_populates='business', cascade=\n 'all, delete, delete-orphan')\n", (2833, 2914), False, 'from sqlalchemy.orm import relationship\n'), ((2932, 3042), 'sqlalchemy.orm.relationship', 'relationship', (['"""User"""'], {'back_populates': '"""business"""', 'single_parent': '(True)', 'cascade': '"""all, delete, delete-orphan"""'}), "('User', back_populates='business', single_parent=True, cascade\n ='all, delete, delete-orphan')\n", (2944, 3042), False, 'from sqlalchemy.orm import relationship\n'), ((3231, 3264), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3237, 3264), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((3416, 3466), 'sqlalchemy.orm.relationship', 'relationship', (['"""Trade"""'], {'back_populates': '"""portfolios"""'}), "('Trade', back_populates='portfolios')\n", (3428, 3466), False, 'from sqlalchemy.orm import relationship\n'), ((3484, 3534), 'sqlalchemy.orm.relationship', 'relationship', (['"""Portfolio"""'], {'back_populates': '"""trades"""'}), "('Portfolio', back_populates='trades')\n", (3496, 3534), False, 'from sqlalchemy.orm import relationship\n'), ((3744, 3777), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3750, 3777), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((4003, 4033), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (4009, 4033), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((4047, 4060), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4053, 4060), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((4074, 4120), 'sqlalchemy.orm.relationship', 'relationship', (['"""Trade"""'], {'back_populates': '"""prices"""'}), "('Trade', back_populates='prices')\n", (4086, 4120), False, 'from sqlalchemy.orm import relationship\n'), ((4143, 4203), 'sqlalchemy.orm.relationship', 'relationship', (['"""ModelPosition"""'], {'back_populates': '"""trade_prices"""'}), "('ModelPosition', back_populates='trade_prices')\n", (4155, 4203), False, 'from sqlalchemy.orm import relationship\n'), ((4238, 4300), 'sqlalchemy.orm.relationship', 'relationship', (['"""AccountPosition"""'], {'back_populates': '"""trade_prices"""'}), "('AccountPosition', back_populates='trade_prices')\n", (4250, 4300), False, 'from sqlalchemy.orm import relationship\n'), ((592, 601), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (599, 601), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((654, 663), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (661, 663), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((665, 687), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (675, 687), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((722, 731), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (729, 731), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((733, 755), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""roles.id"""'], {}), "('roles.id')\n", (743, 755), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((830, 839), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (837, 839), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((878, 888), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (884, 888), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((929, 940), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (935, 940), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1084, 1095), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (1090, 1095), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1133, 1144), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (1139, 1144), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1169, 1180), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (1175, 1180), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1203, 1214), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (1209, 1214), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1244, 1254), 'sqlalchemy.DateTime', 'DateTime', ([], {}), '()\n', (1252, 1254), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1287, 1297), 'sqlalchemy.DateTime', 'DateTime', ([], {}), '()\n', (1295, 1297), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1327, 1338), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (1333, 1338), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1371, 1382), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (1377, 1382), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1440, 1449), 'sqlalchemy.Boolean', 'Boolean', ([], {}), '()\n', (1447, 1449), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1478, 1488), 'sqlalchemy.DateTime', 'DateTime', ([], {}), '()\n', (1486, 1488), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((2390, 2412), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (2400, 2412), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((3297, 3320), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""trades.id"""'], {}), "('trades.id')\n", (3307, 3320), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((3358, 3385), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""portfolios.id"""'], {}), "('portfolios.id')\n", (3368, 3385), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((3821, 3855), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""account_positions.id"""'], {}), "('account_positions.id')\n", (3831, 3855), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((3898, 3930), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""model_positions.id"""'], {}), "('model_positions.id')\n", (3908, 3930), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((3964, 3987), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""trades.id"""'], {}), "('trades.id')\n", (3974, 3987), False, 'from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Boolean\n'), ((1856, 1888), 'sqlalchemy.orm.backref', 'backref', (['"""users"""'], {'lazy': '"""dynamic"""'}), "('users', lazy='dynamic')\n", (1863, 1888), False, 'from sqlalchemy.orm import relationship, backref\n')]
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_run2_common_cff import run2_common
from Configuration.Eras.Modifier_run2_25ns_specific_cff import run2_25ns_specific
from Configuration.Eras.Modifier_stage2L1Trigger_cff import stage2L1Trigger
from Configuration.Eras.Modifier_ctpps_2016_cff import ctpps_2016
from Configuration.Eras.Modifier_run2_jme_2016_cff import run2_jme_2016
Run2_2016 = cms.ModifierChain(run2_common, run2_25ns_specific, stage2L1Trigger, ctpps_2016, run2_jme_2016)
|
[
"FWCore.ParameterSet.Config.ModifierChain"
] |
[((419, 517), 'FWCore.ParameterSet.Config.ModifierChain', 'cms.ModifierChain', (['run2_common', 'run2_25ns_specific', 'stage2L1Trigger', 'ctpps_2016', 'run2_jme_2016'], {}), '(run2_common, run2_25ns_specific, stage2L1Trigger,\n ctpps_2016, run2_jme_2016)\n', (436, 517), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
from threading import Thread, Event
from .stream import StreamIO
from ..buffers import SortingRingBuffer
from ..compat import queue
class SegmentedStreamWorker(Thread):
"""The general worker thread.
This thread is responsible for queueing up segments in the
writer thread.
"""
def __init__(self, reader):
self.closed = False
self.reader = reader
self.writer = reader.writer
self.stream = reader.stream
self.session = reader.stream.session
self.logger = reader.logger
self._last = 0
self._total_threads = self.session.get_option("hls-download-threads")
self._wait = None
Thread.__init__(self)
self.daemon = True
def close(self):
"""Shuts down the thread."""
if not self.closed:
self.logger.debug("Closing worker thread")
self.closed = True
if self._wait:
self._wait.set()
def wait(self, time):
"""Pauses the thread for a specified time.
Returns False if interrupted by another thread and True if the
time runs out normally.
"""
self._wait = Event()
return not self._wait.wait(time)
def iter_segments(self):
"""The iterator that generates segments for the worker thread.
Should be overridden by the inheriting class.
"""
return
yield
def _distribute_segment(self, segment):
# Simple segment delivery to each thread,
# a None segment would mean shutdown.
if segment is None:
for thread in self.writer:
thread.put(segment)
return
self.writer[self._last].put(segment)
if self._last == (self._total_threads - 1):
self._last = 0
else:
self._last += 1
def run(self):
for segment in self.iter_segments():
self._distribute_segment(segment)
# End of stream, tells the writer to exit
self._distribute_segment(None)
self.close()
class SegmentedStreamWriter(Thread):
"""The writer thread.
This thread is responsible for fetching segments, processing them
and finally writing the data to the buffer.
"""
def __init__(self, reader, size=10):
self.closed = False
self.queue = queue.Queue(size)
self.reader = reader
self.stream = reader.stream
self.session = reader.stream.session
self.logger = reader.logger
Thread.__init__(self)
self.daemon = True
def close(self):
"""Shuts down the thread."""
if not self.closed:
self.logger.debug("Closing writer thread")
self.closed = True
self.reader.buffer.close()
def put(self, segment):
"""Add a segment to the queue."""
while not self.closed:
try:
self.queue.put(segment, block=True, timeout=1)
break
except queue.Full:
continue
def write(self, segment):
"""Write the segment to the buffer.
Should be overridden by the inheriting class.
"""
pass
def run(self):
while not self.closed:
try:
segment = self.queue.get(block=True, timeout=0.5)
except queue.Empty:
continue
if segment is not None:
self.write(segment)
else:
break
self.close()
class SegmentedStreamReader(StreamIO):
__worker__ = SegmentedStreamWorker
__writer__ = SegmentedStreamWriter
def __init__(self, stream, timeout=60):
StreamIO.__init__(self)
self.session = stream.session
self.stream = stream
self.timeout = timeout
self.writer = []
def open(self):
buffer_size = self.session.get_option("ringbuffer-size")
downloader_threads = self.session.get_option("hls-download-threads")
self.buffer = SortingRingBuffer(buffer_size)
for i in range(downloader_threads):
self.writer.append(self.__writer__(self))
self.worker = self.__worker__(self)
for thread in self.writer:
thread.start()
self.worker.start()
def close(self):
self.worker.close()
for thread in self.writer:
thread.close()
for thread in [self.worker] + self.writer:
if thread.is_alive():
thread.join()
self.buffer.close()
def _any_alive(self):
# return is_alive() for any writer thread that's still running.
alive = False
for thread in self.writer:
if not alive:
alive = thread.is_alive()
return alive
def read(self, size):
if not self.buffer:
return b""
return self.buffer.read(size, block=self._any_alive(),
timeout=self.timeout)
|
[
"threading.Thread.__init__",
"threading.Event"
] |
[((678, 699), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (693, 699), False, 'from threading import Thread, Event\n'), ((1164, 1171), 'threading.Event', 'Event', ([], {}), '()\n', (1169, 1171), False, 'from threading import Thread, Event\n'), ((2518, 2539), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (2533, 2539), False, 'from threading import Thread, Event\n')]
|
#!/usr/bin/env python3
import itertools
from docs import εὕρηκα
def render_graph(𝛹):
color_palette = ["#72e5ef", "#fb2076", "#69ef7b", "#f365e7",
"#54a32f", "#bf9fff", "#c0e15c", "#753fc2",
"#e78607", "#8a0458", "#1c5e39", "#e46981",
"#509f87", "#db3c18", "#18519b", "#f2d174"]
def hex_format(𝑎):
return f"{𝑎:02x}"
def render_node(𝑎):
color = color_palette[𝛹.index(𝑎)]
print(f'"{hex_format(𝑎)}" [style="filled" fillcolor="{color}" color="{color}" fontcolor="{color}"]')
def render_edge(𝑎, 𝑏):
color = color_palette[𝛹.index(𝑎)]
print(f'"{hex_format(𝑎)}" -> "{hex_format(𝑏)}" [arrowsize=0.4 color="{color}"];')
print('strict digraph {')
print('layout=circo pad=0.2 mindist=1.8')
for 𝑥 in 𝛹:
render_node(𝑥)
for 𝑎, 𝑏, 𝑐 in itertools.product(𝛹, repeat=3):
𝑥 = 𝑎 ^ 𝑏 ^ 𝑐
render_edge(𝑎, 𝑥)
render_edge(𝑏, 𝑥)
render_edge(𝑐, 𝑥)
print('}')
if __name__ == "__main__":
𝛹 = εὕρηκα.get_magic_numbers()
render_graph(𝛹)
|
[
"docs.εὕρηκα.get_magic_numbers",
"itertools.product"
] |
[((878, 908), 'itertools.product', 'itertools.product', (['Ψ'], {'repeat': '(3)'}), '(Ψ, repeat=3)\n', (895, 908), False, 'import itertools\n'), ((1056, 1082), 'docs.εὕρηκα.get_magic_numbers', 'εὕρηκα.get_magic_numbers', ([], {}), '()\n', (1080, 1082), False, 'from docs import εὕρηκα\n')]
|
import shutil
import pytest # noqa
from pytest_factoryboy import register
from atv.tests.conftest import * # noqa
from services.tests.conftest import * # noqa
from users.tests.conftest import * # noqa
from .factories import AttachmentFactory, DocumentFactory
@pytest.fixture(autouse=True)
def custom_media_dir_for_files(settings, request):
settings.MEDIA_ROOT = "test_media"
# Teardown to remove the uploaded test files from the system
def remove_uploaded_files():
shutil.rmtree("test_media", ignore_errors=True)
request.addfinalizer(remove_uploaded_files)
register(DocumentFactory)
register(AttachmentFactory)
|
[
"shutil.rmtree",
"pytest.fixture",
"pytest_factoryboy.register"
] |
[((269, 297), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (283, 297), False, 'import pytest\n'), ((594, 619), 'pytest_factoryboy.register', 'register', (['DocumentFactory'], {}), '(DocumentFactory)\n', (602, 619), False, 'from pytest_factoryboy import register\n'), ((620, 647), 'pytest_factoryboy.register', 'register', (['AttachmentFactory'], {}), '(AttachmentFactory)\n', (628, 647), False, 'from pytest_factoryboy import register\n'), ((495, 542), 'shutil.rmtree', 'shutil.rmtree', (['"""test_media"""'], {'ignore_errors': '(True)'}), "('test_media', ignore_errors=True)\n", (508, 542), False, 'import shutil\n')]
|
import math
from torch.optim.lr_scheduler import _LRScheduler
class CosineAnnealingWithRestartsLR(_LRScheduler):
"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))
When last_epoch=-1, sets initial lr as lr.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. This implements
the cosine annealing part of SGDR, the restarts and number of iterations multiplier.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
T_mult (float): Multiply T_max by this number after each restart. Default: 1.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1, T_mult=1):
self.T_max = T_max
self.T_mult = T_mult
self.restart_every = T_max
self.eta_min = eta_min
self.restarts = 0
self.restarted_at = 0
super().__init__(optimizer, last_epoch)
def restart(self):
self.restart_every *= self.T_mult
self.restarted_at = self.last_epoch
def cosine(self, base_lr):
return self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.step_n / self.restart_every)) / 2
@property
def step_n(self):
return self.last_epoch - self.restarted_at
def get_lr(self):
if self.step_n >= self.restart_every:
self.restart()
return [self.cosine(base_lr) for base_lr in self.base_lrs]
def get_weight_decay(self):
return [self.cosine(base_weight_decay) if base_weight_decay else None
for base_weight_decay in self.base_weight_decays]
|
[
"math.cos"
] |
[((1648, 1700), 'math.cos', 'math.cos', (['(math.pi * self.step_n / self.restart_every)'], {}), '(math.pi * self.step_n / self.restart_every)\n', (1656, 1700), False, 'import math\n')]
|
from layered_vision.utils.dist import get_required_and_extras
class TestGetRequiredAndExtras:
def test_should_group_single_requirement(self):
assert get_required_and_extras(
[('req1==1.2.3', ['group1'])]
) == (
[],
{'group1': ['req1==1.2.3'], 'all': ['req1==1.2.3']}
)
def test_should_fallback_to_default(self):
assert get_required_and_extras(
[('req1==1.2.3', [None])]
) == (
['req1==1.2.3'],
{'all': ['req1==1.2.3']}
)
def test_should_group_multiple_requirement(self):
assert get_required_and_extras(
[('req1==1.2.3', ['group1']), ('req2==1.2.3', ['group2']), ('req3==1.2.3', [None])]
) == (
['req3==1.2.3'],
{
'group1': ['req1==1.2.3'],
'group2': ['req2==1.2.3'],
'all': ['req1==1.2.3', 'req2==1.2.3', 'req3==1.2.3']
}
)
|
[
"layered_vision.utils.dist.get_required_and_extras"
] |
[((163, 217), 'layered_vision.utils.dist.get_required_and_extras', 'get_required_and_extras', (["[('req1==1.2.3', ['group1'])]"], {}), "([('req1==1.2.3', ['group1'])])\n", (186, 217), False, 'from layered_vision.utils.dist import get_required_and_extras\n'), ((398, 448), 'layered_vision.utils.dist.get_required_and_extras', 'get_required_and_extras', (["[('req1==1.2.3', [None])]"], {}), "([('req1==1.2.3', [None])])\n", (421, 448), False, 'from layered_vision.utils.dist import get_required_and_extras\n'), ((622, 735), 'layered_vision.utils.dist.get_required_and_extras', 'get_required_and_extras', (["[('req1==1.2.3', ['group1']), ('req2==1.2.3', ['group2']), ('req3==1.2.3',\n [None])]"], {}), "([('req1==1.2.3', ['group1']), ('req2==1.2.3', [\n 'group2']), ('req3==1.2.3', [None])])\n", (645, 735), False, 'from layered_vision.utils.dist import get_required_and_extras\n')]
|
# more or less the same simulation, but split up in to chunks that fit into memory
# for large states (CA, IA, KS, OK, TX)
# chunks of size 2000 (2000 locations in one part calculated and create temporary file)
location_chunk = 2000
import argparse
import datetime
import glob
import math
import numpy as np
import os
import rasterio
import statsmodels.api as sm
import pandas as pd
import statsmodels.api as sm
import time
import xarray as xr
import sys
sys.path.append('../')
from functools import reduce
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
from utils import windpower_simulation_era5_large
from dask.diagnostics import ProgressBar
ProgressBar().register()
from paths_usa import *
# get state and GWA version
parser = argparse.ArgumentParser(description='Insert state and optionally GWA')
parser.add_argument('-state')
parser.add_argument('-GWA')
args = parser.parse_args()
state = args.state
if(args.GWA == None):
GWA = "3"
else:
GWA = args.GWA
if GWA == "2":
results_path = results_path + '/results_GWA2'
if not os.path.exists(results_path):
os.mkdir(results_path)
startGWA = '1987'
endGWA = '2016'
else:
startGWA = '2008'
endGWA = '2017'
# define start date for simulation
startyearmonth = '2000-12'
outfile = results_path + '/windpower_??_ERA5_GWA.nc'
if results_path + '/windpower_' + state + '_ERA5_GWA.nc' not in glob.glob(outfile):
wind = xr.open_mfdataset(era_path + "/eff_ws/era5_wind_USA_*.nc", chunks = {'time': 38})
alpha = xr.open_mfdataset(era_path + "/eff_ws/era5_alpha_USA_*.nc", chunks = {'time': 38})
# with GWA
turbine_data_era_gwa = pd.read_csv(usa_path + '/turbine_data_era_gwa' + GWA + '.csv', parse_dates=['commissioning'])
if GWA == "3":
if state == 'PR':
GWA = xr.open_rasterio(usa_path+'/GWA/GWA3_PR100m.tif')
else:
GWA = xr.open_rasterio(usa_path+'/GWA/GWA3_USA100m.tif')
else:
if state == 'AK':
GWA = xr.open_rasterio(usa_path+'/GWA/GWA_AK100m.tif')
elif state == 'HI':
GWA = xr.open_rasterio(usa_path+'/GWA/GWA_HI100m.tif')
elif state == 'PR':
GWA = xr.open_rasterio(usa_path+'/GWA/GWA_PR100m.tif')
else:
GWA = xr.open_rasterio(usa_path+'/GWA/GWA_USA100m.tif')
ind = turbine_data_era_gwa.state == state
print('calculating ERA5 ' + state + ' GWA')
t1 = time.time()
# number of locations in state
dat_len = sum(ind)
numit = round(dat_len/location_chunk+0.5) # number of necessary iterations
i1 = 0
i2 = i1 + location_chunk
for it in range(numit):
outfile_temp = results_path + "/wp_"+state+"_ERA5_GWA_temp" + str(it+1) +".nc"
if i2 > dat_len:
i2 = dat_len
if outfile_temp not in glob.glob(results_path + "/wp_"+state+"_ERA5_GWA_temp*.nc"):
wps = windpower_simulation_era5_large(wind.wh100,
alpha.alpha,
turbine_data_era_gwa.height[ind].values[i1:i2],
turbine_data_era_gwa.capacity[ind].values[i1:i2],
turbine_data_era_gwa.sp[ind].values[i1:i2],
turbine_data_era_gwa.lon[ind].values[i1:i2],
turbine_data_era_gwa.lat[ind].values[i1:i2],
pd.to_datetime(turbine_data_era_gwa.commissioning[ind].values[i1:i2]).year.values,
startyearmonth,
GWA,
startGWA,
endGWA)
# adapt numbers of locations in dataset
wps = wps.assign_coords(location = np.arange(i1,i2))
# save temporary file
print('saving to '+results_path + "/wp_"+state+"_ERA5_GWA_temp" + str(it+1) +".nc")
wps.to_dataset(name='wp').to_netcdf(results_path + "/wp_"+state+"_ERA5_GWA_temp" + str(it+1) +".nc")
print('saved to '+results_path + "/wp_"+state+"_ERA5_GWA_temp" + str(it+1) +".nc")
wps.close()
del(wps)
i1 = i2
i2 = i2 + location_chunk
print(round(i1/dat_len,3)*100,'% done in ',state)
# merge and delete temporary files
wps = xr.open_mfdataset(results_path + "/wp_"+state+"_ERA5_GWA_temp*.nc", chunks = {'time': 100})
print('saving to'+results_path + "/windpower_"+state+"_ERA5_GWA.nc")
wps.drop(['x','y']).to_netcdf(results_path + "/windpower_"+state+"_ERA5_GWA.nc")
print('saved to '+results_path + "/windpower_"+state+"_ERA5_GWA.nc")
t2 = time.time()
# remove temporary files
for file in glob.glob(results_path + "/wp_"+state+"_ERA5_GWA_temp*.nc"):
os.remove(file)
print(t2-t1)
|
[
"sys.path.append",
"os.mkdir",
"os.remove",
"argparse.ArgumentParser",
"pandas.read_csv",
"xarray.open_rasterio",
"os.path.exists",
"time.time",
"dask.diagnostics.ProgressBar",
"numpy.arange",
"pandas.to_datetime",
"glob.glob",
"xarray.open_mfdataset"
] |
[((457, 479), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (472, 479), False, 'import sys\n'), ((766, 836), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Insert state and optionally GWA"""'}), "(description='Insert state and optionally GWA')\n", (789, 836), False, 'import argparse\n'), ((1410, 1428), 'glob.glob', 'glob.glob', (['outfile'], {}), '(outfile)\n', (1419, 1428), False, 'import glob\n'), ((1442, 1521), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(era_path + '/eff_ws/era5_wind_USA_*.nc')"], {'chunks': "{'time': 38}"}), "(era_path + '/eff_ws/era5_wind_USA_*.nc', chunks={'time': 38})\n", (1459, 1521), True, 'import xarray as xr\n'), ((1536, 1621), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(era_path + '/eff_ws/era5_alpha_USA_*.nc')"], {'chunks': "{'time': 38}"}), "(era_path + '/eff_ws/era5_alpha_USA_*.nc', chunks={'time': 38}\n )\n", (1553, 1621), True, 'import xarray as xr\n'), ((1661, 1759), 'pandas.read_csv', 'pd.read_csv', (["(usa_path + '/turbine_data_era_gwa' + GWA + '.csv')"], {'parse_dates': "['commissioning']"}), "(usa_path + '/turbine_data_era_gwa' + GWA + '.csv', parse_dates=\n ['commissioning'])\n", (1672, 1759), True, 'import pandas as pd\n'), ((2431, 2442), 'time.time', 'time.time', ([], {}), '()\n', (2440, 2442), False, 'import time\n'), ((4574, 4671), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')"], {'chunks': "{'time': 100}"}), "(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc',\n chunks={'time': 100})\n", (4591, 4671), True, 'import xarray as xr\n'), ((4926, 4937), 'time.time', 'time.time', ([], {}), '()\n', (4935, 4937), False, 'import time\n'), ((4988, 5051), 'glob.glob', 'glob.glob', (["(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')"], {}), "(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')\n", (4997, 5051), False, 'import glob\n'), ((678, 691), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (689, 691), False, 'from dask.diagnostics import ProgressBar\n'), ((1079, 1107), 'os.path.exists', 'os.path.exists', (['results_path'], {}), '(results_path)\n', (1093, 1107), False, 'import os\n'), ((1117, 1139), 'os.mkdir', 'os.mkdir', (['results_path'], {}), '(results_path)\n', (1125, 1139), False, 'import os\n'), ((5057, 5072), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (5066, 5072), False, 'import os\n'), ((1818, 1869), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA3_PR100m.tif')"], {}), "(usa_path + '/GWA/GWA3_PR100m.tif')\n", (1834, 1869), True, 'import xarray as xr\n'), ((1900, 1952), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA3_USA100m.tif')"], {}), "(usa_path + '/GWA/GWA3_USA100m.tif')\n", (1916, 1952), True, 'import xarray as xr\n'), ((2005, 2055), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA_AK100m.tif')"], {}), "(usa_path + '/GWA/GWA_AK100m.tif')\n", (2021, 2055), True, 'import xarray as xr\n'), ((2816, 2879), 'glob.glob', 'glob.glob', (["(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')"], {}), "(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')\n", (2825, 2879), False, 'import glob\n'), ((2100, 2150), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA_HI100m.tif')"], {}), "(usa_path + '/GWA/GWA_HI100m.tif')\n", (2116, 2150), True, 'import xarray as xr\n'), ((2195, 2245), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA_PR100m.tif')"], {}), "(usa_path + '/GWA/GWA_PR100m.tif')\n", (2211, 2245), True, 'import xarray as xr\n'), ((2276, 2327), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA_USA100m.tif')"], {}), "(usa_path + '/GWA/GWA_USA100m.tif')\n", (2292, 2327), True, 'import xarray as xr\n'), ((3955, 3972), 'numpy.arange', 'np.arange', (['i1', 'i2'], {}), '(i1, i2)\n', (3964, 3972), True, 'import numpy as np\n'), ((3534, 3603), 'pandas.to_datetime', 'pd.to_datetime', (['turbine_data_era_gwa.commissioning[ind].values[i1:i2]'], {}), '(turbine_data_era_gwa.commissioning[ind].values[i1:i2])\n', (3548, 3603), True, 'import pandas as pd\n')]
|
"""
tcp连接接收数据
"""
# TCPclient.py
import socket
class TcpClient:
def __init__(self):
self.target_host = "192.168.3.11" # 服务器端地址
self.target_port = 3389 # 必须与服务器的端口号一致
def start(self):
while True:
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect((self.target_host,self.target_port))
data = input(">")
if not data:
break
client.send(data.encode())
response = client.recv(1024)
print(response)
client.close()
if __name__ == '__main__':
obj = TcpClient()
obj.start()
#
#
# import socket
# import threading
# client_list = []
# def read_server(client_socket):
# while True:
# content = client_socket.recv(2048).decode('UTF-8')
# if content is not None:
# print("content:",content)
#
# client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# ## 绑定USR-K7 开启的IP地址和端口号
# client_socket.connect(('192.168.0.7',23))
# threading.Thread(target=read_server,args=(client_socket,)).start()
# while True:
# line = input('')
# if line is None or line =='exit':
# break
# client_socket.send(line.encode("UTF-8"))
|
[
"socket.socket"
] |
[((256, 305), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (269, 305), False, 'import socket\n')]
|
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('news', views.news, name='flash_info'),
path('login', views.login_view, name='login'),
path('logout', views.logout_view, name='logout'),
path('articles/<int:id>', views.article),
# path('qui', views.qui, name='qui'),
# path('pourquoi', views.pourquoi, name='pourquoi'),
]
|
[
"django.urls.path"
] |
[((114, 147), 'django.urls.path', 'path', (['""""""', 'views.home'], {'name': '"""home"""'}), "('', views.home, name='home')\n", (118, 147), False, 'from django.urls import path, include\n'), ((153, 196), 'django.urls.path', 'path', (['"""news"""', 'views.news'], {'name': '"""flash_info"""'}), "('news', views.news, name='flash_info')\n", (157, 196), False, 'from django.urls import path, include\n'), ((202, 247), 'django.urls.path', 'path', (['"""login"""', 'views.login_view'], {'name': '"""login"""'}), "('login', views.login_view, name='login')\n", (206, 247), False, 'from django.urls import path, include\n'), ((253, 301), 'django.urls.path', 'path', (['"""logout"""', 'views.logout_view'], {'name': '"""logout"""'}), "('logout', views.logout_view, name='logout')\n", (257, 301), False, 'from django.urls import path, include\n'), ((307, 347), 'django.urls.path', 'path', (['"""articles/<int:id>"""', 'views.article'], {}), "('articles/<int:id>', views.article)\n", (311, 347), False, 'from django.urls import path, include\n')]
|
"""OVK learning, unit tests.
The :mod:`sklearn.tests.test_semisuo` tests semisup module.
"""
import operalib as ovk
import numpy as np
def test_semisup_linop():
"""Test ovk.semisup.SemisupLinop."""
np.random.seed()
n = 100
p = 5
lbda2 = .1
# supervised indices
B = np.random.randint(2, size=(n)).astype(np.bool)
# Graph Laplacian
n_unsup = np.sum(~B)
L = np.random.randn(n_unsup, n_unsup)
L = np.dot(L, L.T)
U, J = ovk.ridge._SemisupLinop(lbda2, B, L, p).gen()
y = np.random.randn(n, p)
# lbda2 * np.dot(L, y[~B, :]).ravel()
# print(np.concatenate((y[B, :].ravel(),
# lbda2 * np.dot(L, y[~B, :]).ravel())))
res = np.empty((n, p))
res[B, :] = y[B, :]
res[~B] = 0
assert np.allclose(J * y.ravel(), res.ravel())
res = np.empty((n, p))
res[B, :] = y[B, :]
res[~B] = lbda2 * np.dot(L, y[~B, :])
assert np.allclose(U * y.ravel(), res.ravel())
|
[
"numpy.random.seed",
"numpy.sum",
"numpy.random.randn",
"numpy.empty",
"numpy.random.randint",
"numpy.dot",
"operalib.ridge._SemisupLinop"
] |
[((210, 226), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (224, 226), True, 'import numpy as np\n'), ((383, 393), 'numpy.sum', 'np.sum', (['(~B)'], {}), '(~B)\n', (389, 393), True, 'import numpy as np\n'), ((402, 435), 'numpy.random.randn', 'np.random.randn', (['n_unsup', 'n_unsup'], {}), '(n_unsup, n_unsup)\n', (417, 435), True, 'import numpy as np\n'), ((444, 458), 'numpy.dot', 'np.dot', (['L', 'L.T'], {}), '(L, L.T)\n', (450, 458), True, 'import numpy as np\n'), ((526, 547), 'numpy.random.randn', 'np.random.randn', (['n', 'p'], {}), '(n, p)\n', (541, 547), True, 'import numpy as np\n'), ((713, 729), 'numpy.empty', 'np.empty', (['(n, p)'], {}), '((n, p))\n', (721, 729), True, 'import numpy as np\n'), ((832, 848), 'numpy.empty', 'np.empty', (['(n, p)'], {}), '((n, p))\n', (840, 848), True, 'import numpy as np\n'), ((895, 914), 'numpy.dot', 'np.dot', (['L', 'y[~B, :]'], {}), '(L, y[~B, :])\n', (901, 914), True, 'import numpy as np\n'), ((299, 327), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'n'}), '(2, size=n)\n', (316, 327), True, 'import numpy as np\n'), ((471, 510), 'operalib.ridge._SemisupLinop', 'ovk.ridge._SemisupLinop', (['lbda2', 'B', 'L', 'p'], {}), '(lbda2, B, L, p)\n', (494, 510), True, 'import operalib as ovk\n')]
|
import canmatrix.formats
from canmatrix.canmatrix import CanId
def list_pgn(db):
"""
:param db:
:return: pgn and id
"""
id = [x.Id for x in db.frames]
r = [CanId(t).tuples() for t in id]
return [t[1] for t in r], id
def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y):
for idx, pgnx in zip(id_x, pgn_x):
for idy, pgny in zip(id_y, pgn_y):
if pgnx == pgny:
yield (idx, idy)
def join_frame_by_signal_startbit(files):
targetDb = next(iter(canmatrix.formats.loadp(files.pop(0)).values()))
pgn_x, id_x = list_pgn(db=targetDb)
for f in files:
sourceDb = next(iter(canmatrix.formats.loadp(f).values()))
pgn_y, id_y = list_pgn(db=sourceDb)
same_pgn = ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y)
for idx, idy in same_pgn:
# print("{0:#x} {1:#x}".format(idx, idy))
targetFr = targetDb.frameById(idx)
sourceFr = sourceDb.frameById(idy)
to_add = []
for sig_t in targetFr.signals:
for sig_s in sourceFr.signals:
# print(sig.name)
if sig_t.startbit == sig_s.startbit:
# print("\t{0} {1}".format(sig_t.name, sig_s.name))
to_add.append(sig_s)
for s in to_add:
targetFr.addSignal(s)
return targetDb
def renameFrameWithID(sourceDb):
for frameSc in sourceDb.frames:
_, pgn, sa = CanId(frameSc.Id).tuples()
exten = "__{pgn:#04X}_{sa:#02X}_{sa:03d}d".format(pgn=pgn, sa=sa)
new_name = frameSc.name + exten
# print(new_name)
frameSc.name = new_name
def renameFrameWithSAEacronyme(sourceDb, targetDb):
pgn_x, id_x = list_pgn(db=targetDb)
pgn_y, id_y = list_pgn(db=sourceDb)
same_pgn = ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y)
for idx, idy in same_pgn:
targetFr = targetDb.frameById(idx)
sourceFr = sourceDb.frameById(idy)
new_name = sourceFr.name + "__" + targetFr.name
targetFr.name = new_name
def join_frame_for_manufacturer(db, files):
#targetDb = next(iter(im.importany(files.pop(0)).values()))
pgn_x, id_x = list_pgn(db=db)
for f in files:
sourceDb = next(iter(canmatrix.formats.loadp(f).values()))
pgn_y, id_y = list_pgn(db=sourceDb)
same_pgn = ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y)
for idx, idy in same_pgn:
# print("{0:#x} {1:#x}".format(idx, idy))
targetFr = db.frameById(idx)
sourceFr = sourceDb.frameById(idy)
_, pgn, sa = CanId(targetFr.Id).tuples()
if(sa < 128):
print('less', targetFr.name)
to_add = []
for sig_s in sourceFr.signals:
new_name = "{name}_{pgn:#04x}_{sa:03}".format(
name=sig_s.name, pgn=pgn, sa=sa)
sig_s.name = new_name
to_add.append(sig_s)
for s in to_add:
targetFr.addSignal(s)
|
[
"canmatrix.canmatrix.CanId"
] |
[((183, 191), 'canmatrix.canmatrix.CanId', 'CanId', (['t'], {}), '(t)\n', (188, 191), False, 'from canmatrix.canmatrix import CanId\n'), ((1497, 1514), 'canmatrix.canmatrix.CanId', 'CanId', (['frameSc.Id'], {}), '(frameSc.Id)\n', (1502, 1514), False, 'from canmatrix.canmatrix import CanId\n'), ((2647, 2665), 'canmatrix.canmatrix.CanId', 'CanId', (['targetFr.Id'], {}), '(targetFr.Id)\n', (2652, 2665), False, 'from canmatrix.canmatrix import CanId\n')]
|
# /usr/bin/env python3
import numpy as np
def operadores():
a=np.random.randint(3,10,size=10)
b=np.random.randint(4,78,size=10)
print(np.add(a,b))
print(np.subtract(b,a))
print(np.negative(a,b))
print(np.multiply(a,b))
print(np.divide(a,b))
print(np.floor_divide(b,a))
print(np.power(b,a))
print(np.mod(b,a))
#funion abs
#abs() : integrada en python
#np.abs() o np.absolute() : integrada en numpy
if __name__=="__main__":
operadores()
|
[
"numpy.divide",
"numpy.multiply",
"numpy.subtract",
"numpy.floor_divide",
"numpy.power",
"numpy.negative",
"numpy.mod",
"numpy.random.randint",
"numpy.add"
] |
[((69, 102), 'numpy.random.randint', 'np.random.randint', (['(3)', '(10)'], {'size': '(10)'}), '(3, 10, size=10)\n', (86, 102), True, 'import numpy as np\n'), ((108, 141), 'numpy.random.randint', 'np.random.randint', (['(4)', '(78)'], {'size': '(10)'}), '(4, 78, size=10)\n', (125, 141), True, 'import numpy as np\n'), ((151, 163), 'numpy.add', 'np.add', (['a', 'b'], {}), '(a, b)\n', (157, 163), True, 'import numpy as np\n'), ((175, 192), 'numpy.subtract', 'np.subtract', (['b', 'a'], {}), '(b, a)\n', (186, 192), True, 'import numpy as np\n'), ((204, 221), 'numpy.negative', 'np.negative', (['a', 'b'], {}), '(a, b)\n', (215, 221), True, 'import numpy as np\n'), ((233, 250), 'numpy.multiply', 'np.multiply', (['a', 'b'], {}), '(a, b)\n', (244, 250), True, 'import numpy as np\n'), ((262, 277), 'numpy.divide', 'np.divide', (['a', 'b'], {}), '(a, b)\n', (271, 277), True, 'import numpy as np\n'), ((289, 310), 'numpy.floor_divide', 'np.floor_divide', (['b', 'a'], {}), '(b, a)\n', (304, 310), True, 'import numpy as np\n'), ((322, 336), 'numpy.power', 'np.power', (['b', 'a'], {}), '(b, a)\n', (330, 336), True, 'import numpy as np\n'), ((348, 360), 'numpy.mod', 'np.mod', (['b', 'a'], {}), '(b, a)\n', (354, 360), True, 'import numpy as np\n')]
|
from django.urls import path
from . import views
urlpatterns = [
path('rt_value', views.get_rt_value),
path('generate_json',views.generate_json),
path('latest_rt_value', views.latest_rt),
path('before_15_rt', views.before_15_rt),
path('doubling_growth_value', views.doubling_growth_data),
path('latest_doubling_value', views.latest_doubling_value),
path('get_percentages', views.get_percentages),
path('get_comparison_doubling', views.get_comparison_doubling),
path('get_comparison_growth', views.get_comparison_growth),
]
|
[
"django.urls.path"
] |
[((71, 107), 'django.urls.path', 'path', (['"""rt_value"""', 'views.get_rt_value'], {}), "('rt_value', views.get_rt_value)\n", (75, 107), False, 'from django.urls import path\n'), ((113, 155), 'django.urls.path', 'path', (['"""generate_json"""', 'views.generate_json'], {}), "('generate_json', views.generate_json)\n", (117, 155), False, 'from django.urls import path\n'), ((160, 200), 'django.urls.path', 'path', (['"""latest_rt_value"""', 'views.latest_rt'], {}), "('latest_rt_value', views.latest_rt)\n", (164, 200), False, 'from django.urls import path\n'), ((206, 246), 'django.urls.path', 'path', (['"""before_15_rt"""', 'views.before_15_rt'], {}), "('before_15_rt', views.before_15_rt)\n", (210, 246), False, 'from django.urls import path\n'), ((252, 309), 'django.urls.path', 'path', (['"""doubling_growth_value"""', 'views.doubling_growth_data'], {}), "('doubling_growth_value', views.doubling_growth_data)\n", (256, 309), False, 'from django.urls import path\n'), ((315, 373), 'django.urls.path', 'path', (['"""latest_doubling_value"""', 'views.latest_doubling_value'], {}), "('latest_doubling_value', views.latest_doubling_value)\n", (319, 373), False, 'from django.urls import path\n'), ((379, 425), 'django.urls.path', 'path', (['"""get_percentages"""', 'views.get_percentages'], {}), "('get_percentages', views.get_percentages)\n", (383, 425), False, 'from django.urls import path\n'), ((431, 493), 'django.urls.path', 'path', (['"""get_comparison_doubling"""', 'views.get_comparison_doubling'], {}), "('get_comparison_doubling', views.get_comparison_doubling)\n", (435, 493), False, 'from django.urls import path\n'), ((499, 557), 'django.urls.path', 'path', (['"""get_comparison_growth"""', 'views.get_comparison_growth'], {}), "('get_comparison_growth', views.get_comparison_growth)\n", (503, 557), False, 'from django.urls import path\n')]
|
# coding:utf8
import json
import os
import re
import requests
STOCK_CODE_PATH = 'stock_codes.conf'
def update_stock_codes():
"""获取所有股票 ID 到 all_stock_code 目录下"""
all_stock_codes_url = 'http://www.shdjt.com/js/lib/astock.js'
grep_stock_codes = re.compile('~(\d+)`')
response = requests.get(all_stock_codes_url)
all_stock_codes = grep_stock_codes.findall(response.text)
with open(stock_code_path(), 'w') as f:
f.write(json.dumps(dict(stock=all_stock_codes)))
def get_stock_codes(realtime=False):
"""获取所有股票 ID 到 all_stock_code 目录下"""
if realtime:
all_stock_codes_url = 'http://www.shdjt.com/js/lib/astock.js'
grep_stock_codes = re.compile('~(\d+)`')
response = requests.get(all_stock_codes_url)
stock_codes = grep_stock_codes.findall(response.text)
with open(stock_code_path(), 'w') as f:
f.write(json.dumps(dict(stock=stock_codes)))
return stock_codes
else:
with open(stock_code_path()) as f:
return json.load(f)['stock']
def stock_code_path():
return os.path.join(os.path.dirname(__file__), STOCK_CODE_PATH)
|
[
"os.path.dirname",
"json.load",
"requests.get",
"re.compile"
] |
[((259, 281), 're.compile', 're.compile', (['"""~(\\\\d+)`"""'], {}), "('~(\\\\d+)`')\n", (269, 281), False, 'import re\n'), ((296, 329), 'requests.get', 'requests.get', (['all_stock_codes_url'], {}), '(all_stock_codes_url)\n', (308, 329), False, 'import requests\n'), ((692, 714), 're.compile', 're.compile', (['"""~(\\\\d+)`"""'], {}), "('~(\\\\d+)`')\n", (702, 714), False, 'import re\n'), ((733, 766), 'requests.get', 'requests.get', (['all_stock_codes_url'], {}), '(all_stock_codes_url)\n', (745, 766), False, 'import requests\n'), ((1122, 1147), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1137, 1147), False, 'import os\n'), ((1051, 1063), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1060, 1063), False, 'import json\n')]
|
# coding: utf-8
import struct
from common.constDefine import *
from logger.log import logger
def send(cmd, proto, session, table=None):
result = proto.SerializeToString()
fmt = ">iib{0}s".format(len(result))
data = struct.pack(fmt, len(result)+9, cmd, COMMUNICATION_TYPE, result)
try:
session.transport.write(data)
except Exception as e:
logger.error(e)
|
[
"logger.log.logger.error"
] |
[((377, 392), 'logger.log.logger.error', 'logger.error', (['e'], {}), '(e)\n', (389, 392), False, 'from logger.log import logger\n')]
|
import os
from pathlib import Path
class Config:
_10x_eula_cookie_key = "sw-eula-full"
@property
def ten_x_eula_cookie_key(self) -> str:
return type(self)._10x_eula_cookie_key
def __init__(self):
self.path_home = os.path.join(Path.home(), ".scing")
self.path_10x_eula_cfg = os.path.join(self.path_home, "10x-eula.cfg")
os.makedirs(self.path_home, exist_ok=True)
def write_10x_eula_cookie(self, value: str):
with open(self.path_10x_eula_cfg, "wt") as fout:
print(value, file=fout)
def read_10x_eula_cookie(self) -> str:
with open(self.path_10x_eula_cfg, "rt") as fout:
value = fout.read().strip()
cookies = dict()
cookies[self._10x_eula_cookie_key] = value
return cookies
def has_10x_eula_cookie(self) -> bool:
return os.path.exists(self.path_10x_eula_cfg)
|
[
"pathlib.Path.home",
"os.path.join",
"os.makedirs",
"os.path.exists"
] |
[((319, 363), 'os.path.join', 'os.path.join', (['self.path_home', '"""10x-eula.cfg"""'], {}), "(self.path_home, '10x-eula.cfg')\n", (331, 363), False, 'import os\n'), ((372, 414), 'os.makedirs', 'os.makedirs', (['self.path_home'], {'exist_ok': '(True)'}), '(self.path_home, exist_ok=True)\n', (383, 414), False, 'import os\n'), ((869, 907), 'os.path.exists', 'os.path.exists', (['self.path_10x_eula_cfg'], {}), '(self.path_10x_eula_cfg)\n', (883, 907), False, 'import os\n'), ((263, 274), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (272, 274), False, 'from pathlib import Path\n')]
|
import CONFIG
import torch
import torch.nn as nn
class LSTMModel(nn.Module):
def __init__(
self,
vocab_size,
embed_dims,
hidden_dims,
num_layers,
dropout,
bidirectional,
num_pos_class,
num_tag_class
):
super(LSTMModel, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_dims)
self.rnn = nn.GRU(
embed_dims,
hidden_dims,
num_layers=num_layers,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional
)
self.dropout = nn.Dropout(dropout)
self.hidden_out = hidden_dims*2 if bidirectional else hidden_dims
self.pos_out = nn.Linear(self.hidden_out, num_pos_class)
self.tag_out = nn.Linear(self.hidden_out, num_tag_class)
def forward(self, x):
x = self.dropout(self.embedding(x))
x, _ = self.rnn(x)
x = self.dropout(x)
pos_out = self.pos_out(x)
tag_out = self.tag_out(x)
return pos_out, tag_out
|
[
"torch.nn.Dropout",
"torch.nn.GRU",
"torch.nn.Embedding",
"torch.nn.Linear"
] |
[((352, 388), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embed_dims'], {}), '(vocab_size, embed_dims)\n', (364, 388), True, 'import torch.nn as nn\n'), ((408, 530), 'torch.nn.GRU', 'nn.GRU', (['embed_dims', 'hidden_dims'], {'num_layers': 'num_layers', 'batch_first': '(True)', 'dropout': 'dropout', 'bidirectional': 'bidirectional'}), '(embed_dims, hidden_dims, num_layers=num_layers, batch_first=True,\n dropout=dropout, bidirectional=bidirectional)\n', (414, 530), True, 'import torch.nn as nn\n'), ((632, 651), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (642, 651), True, 'import torch.nn as nn\n'), ((749, 790), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_out', 'num_pos_class'], {}), '(self.hidden_out, num_pos_class)\n', (758, 790), True, 'import torch.nn as nn\n'), ((814, 855), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_out', 'num_tag_class'], {}), '(self.hidden_out, num_tag_class)\n', (823, 855), True, 'import torch.nn as nn\n')]
|
#!/usr/bin/python3
import io
import utils
from block import Block, BlockHeader
with open('../../../usb-decrypted/bitcoin/blocks/blk00001.dat', 'rb') as block_reader:
block_reader.seek(0, io.SEEK_END)
fSize = block_reader.tell() - 80
block_reader.seek(0, io.SEEK_SET)
block = Block(block_reader)
block.transactions[1].stdout()
print(block.transactions[1].get_bytes().hex())
raw_bytes = block.transactions[1].get_bytes()
# The following cryptic code implements this step-to-step guide on
# transaction verification:
# https://bitcoin.stackexchange.com/questions/72657/signature-verification-in-python-using-compressed-public-key
part_one = 4 + 1 + 32 + 4
# 4: Version
# 1: Input Count
# 32: Prev. hash
# 4: Output Index
target_bytes = raw_bytes[:part_one]
print(f"target_bytes.hex(): {target_bytes.hex()}")
target_bytes += bytes.fromhex('1976a914b0d7b19af5aee9a07ef6a147a0ec6a4e1fdf9e7188ac')
print(f"target_bytes.hex(): {target_bytes.hex()}")
target_bytes += raw_bytes[part_one + 1 + raw_bytes[part_one]:]
print(f"target_bytes.hex(): {target_bytes.hex()}")
target_bytes += bytes.fromhex('01000000')
import hashlib
import binascii
hashval = binascii.hexlify(hashlib.sha256(target_bytes).digest())
txn_sha256 = bytes.decode(hashval)
print("txn_sha256 = %s" % (txn_sha256))
import ecdsa
sig_b = bytes.fromhex(block.transactions[1].inputs[0].signature)
txn_sha256_b = bytes.fromhex(txn_sha256)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(block.transactions[1].inputs[0].pubkey),curve=ecdsa.SECP256k1)
if vk.verify(sig_b, txn_sha256_b, hashlib.sha256) == True: # True
print("Signature is Valid")
else:
print("Signature is not Valid")
#0100000001959af1e21ddcb1fe07a2c8de6c28a1645a3d586c8aa4541b697791f27548ffa3000000001976a914b0d7b19af5aee9a07ef6a147a0ec6a4e1fdf9e7188acffffffff02809dc29c000000001976a9148fb7de32f84e119f6d4d75ecb9ad53dfae51b3a488acc0e27b0e000000001976a9140635412d152fe80abbdf71e73ba569abe28dd92288ac00000000
#0100000001959af1e21ddcb1fe07a2c8de6c28a1645a3d586c8aa4541b697791f27548ffa3000000001976a914b0d7b19af5aee9a07ef6a147a0ec6a4e1fdf9e7188acffffffff02809dc29c000000001976a9148fb7de32f84e119f6d4d75ecb9ad53dfae51b3a488acc0e27b0e000000001976a9140635412d152fe80abbdf71e73ba569abe28dd92288ac0000000001000000
|
[
"hashlib.sha256",
"block.Block"
] |
[((287, 306), 'block.Block', 'Block', (['block_reader'], {}), '(block_reader)\n', (292, 306), False, 'from block import Block, BlockHeader\n'), ((1174, 1202), 'hashlib.sha256', 'hashlib.sha256', (['target_bytes'], {}), '(target_bytes)\n', (1188, 1202), False, 'import hashlib\n')]
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Settings for UCCE: User-Centred Community Engagement
A project for Oxfam & Save the Children run with Eclipse Experience
"""
T = current.T
settings.base.system_name = T("User-Centred Community Engagement")
settings.base.system_name_short = T("UCCE")
# PrePopulate data
settings.base.prepopulate.append("UCCE")
settings.base.prepopulate_demo.append("UCCE/Demo")
# Theme (folder to use for views/layout.html)
settings.base.theme = "UCCE"
# Custom Logo
settings.ui.menu_logo = "/%s/static/themes/UCCE/img/ee_logo.png" % current.request.application
# Authentication settings
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
# Required for access to default realm permissions
settings.auth.registration_link_user_to = ["staff"]
settings.auth.registration_link_user_to_default = ["staff"]
# Allow master key login
settings.auth.masterkey = True
# Filter mobile forms by master key
settings.mobile.masterkey_filter = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
settings.security.policy = 6 # Controller, Function, Table ACLs and Entity Realm
# L10n settings
# Languages used in the deployment (used for Language Toolbar, GIS Locations, etc)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
settings.L10n.languages = OrderedDict([
("en-gb", "English"),
("es", "Spanish"),
("so", "Somali"),
])
# Default language for Language Toolbar (& GIS Locations in future)
settings.L10n.default_language = "en-gb"
l10n_options = {"es": "Spanish",
"so": "Somali",
}
# Pass to controllers.py (not a real deployment_setting)
settings.L10n.survey_languages = l10n_options
settings.dc.likert_options = {1: ["Very appropriate",
"Somewhat appropriate",
"Neither appropriate nor inappropriate",
"Somewhat inappropriate",
"Very inappropriate",
],
2: ["Extremely confident",
"Very confident",
"Moderately confident",
"Slightly confident",
"Not confident at all",
],
3: ["Always",
"Often",
"Occasionally",
"Rarely",
"Never",
],
4: ["Extremely safe",
"Very safe",
"Moderately safe",
"Slightly safe",
"Not safe at all",
],
5: ["Very satisfied",
"Somewhat satisfied",
"Neither satisfied nor dissatisfied",
"Somewhat dissatisfied",
"Very dissatisfied",
],
6: ["smiley-1",
"smiley-2",
"smiley-3",
"smiley-4",
"smiley-6",
],
7: ["smiley-3",
"smiley-4",
"smiley-5",
],
}
# -------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
#("sync", Storage(
# name_nice = T("Synchronization"),
# #description = "Synchronization",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu & access the controller
# module_type = None # This item is handled separately for the menu
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
# Needed for default realm permissions
# Currently-needed for Profile link:
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("dc", Storage(
name_nice = T("Assessments"),
#description = "Data collection tool",
restricted = True,
module_type = 5
)),
("project", Storage(
name_nice = T("Projects"),
restricted = True,
module_type = 5
)),
#("stats", Storage(
# name_nice = T("Statistics"),
# #description = "Manages statistics",
# restricted = True,
# module_type = None,
#)),
])
settings.ui.filter_clear = False
settings.search.filter_manager = False
settings.ui.custom_icons = {
"_base": "ucce",
#"attachment": "ucce-", # Will fallback
"bar-chart": "ucce-reports",
"comment-alt": "ucce-textbox",
"copy": "ucce-duplicate",
"delete": "ucce-delete",
"edit": "ucce-edit",
"export": "ucce-export",
"eye": "ucce-survey-preview",
"file-text-alt": "ucce-guides",
"folder-alt": "ucce-projects",
"hashtag": "ucce-number-question",
"info-circle": "ucce-info",
"instructions": "ucce-instructions",
"list": "ucce-mcq",
"minus": "ucce-minus",
"picture": "ucce-heatmap",
"plus": "ucce-plus",
"reports": "ucce-reports",
"section-break": "ucce-section-break",
"survey-copy": "ucce-survey-duplicate",
"survey-delete": "ucce-survey-delete",
"survey-edit": "ucce-survey-edit",
"survey-export": "ucce-survey-export",
"tasks": "ucce-likert-scale",
"upload": "ucce-survey-import",
}
# Use Status in Assessment Targets
settings.dc.target_status = True
# Fill-out Assessments on EdenMobile-only
#settings.dc.response_mobile = False
settings.dc.response_web = False
# -------------------------------------------------------------------------
def ucce_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
if table._tablename in ("dc_target",
"dc_template",
):
s3db = current.s3db
ltable = s3db.project_project_target
ptable = s3db.project_project
if table._tablename == "dc_target":
query = (ltable.target_id == row.id) & \
(ltable.project_id == ptable.id)
else:
ttable = s3db.dc_target
query = (ttable.template_id == row.id) & \
(ltable.target_id == ttable.id) & \
(ltable.project_id == ptable.id)
project = current.db(query).select(ptable.realm_entity,
limitby = (0, 1)
).first()
try:
return project.realm_entity
except AttributeError:
pass
# Use default rules
return 0
settings.auth.realm_entity = ucce_realm_entity
# -------------------------------------------------------------------------
def ucce_rheader(r):
"""
Custom rheaders
"""
if r.representation != "html":
# RHeaders only used in interactive views
return None
# Need to use this format as otherwise req_match?viewing=org_office.x
# doesn't have an rheader
from core import s3_rheader_resource, s3_rheader_tabs
tablename, record = s3_rheader_resource(r)
if record is None:
# List or Create form: rheader makes no sense here
return None
from gluon import A, DIV, TABLE, TR, TH, URL
if tablename == "dc_template":
#tabs = [(T("Basic Details"), None),
# (T("Participants"), "participant"),
# ]
#rheader_tabs = s3_rheader_tabs(r, tabs)
db = current.db
s3db = current.s3db
ttable = s3db.dc_target
target = db(ttable.template_id == record.id).select(ttable.id,
ttable.status,
limitby = (0, 1)
).first()
try:
target_id = target.id
target_status = target.status
except AttributeError:
target_id = None
target_status = None
if not target_status:
# No Target linked...something odd happening
button = ""
elif target_status == 2:
# Active
button = A(T("Deactivate"),
_href=URL(c="dc", f="target",
args=[target_id, "deactivate.popup"],
),
_class="action-btn s3_modal",
_title=T("Deactivate Survey"),
)
else:
# Draft / Deactivated
button = A(T("Activate"),
_href=URL(c="dc", f="target",
args=[target_id, "activate.popup"],
),
_class="action-btn s3_modal",
_title=T("Activate Survey"),
)
ptable = s3db.project_project
ltable = s3db.project_project_target
query = (ltable.target_id == target_id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.name,
limitby = (0, 1)
).first()
try:
project_name = project.name
except AttributeError:
project_name = ""
#table = r.table
rheader = DIV(TABLE(TR(# @ToDo: make this editable
TH("%s: " % T("Survey name")),
record.name,
TH("%s: " % T("Project")),
project_name,
button,
)),
#rheader_tabs,
)
return rheader
# -------------------------------------------------------------------------
def ucce_masterkey_context(masterkey):
"""
Provide context information for a masterkey (populates session
in mobile app when linking to this masterkey)
@param masterkey: the auth_masterkey Row
@returns: a JSON-serializable dict with the context data
"""
db = current.db
s3db = current.s3db
context = {}
# Look up the project linked to the master key
ptable = s3db.project_project
ltable = s3db.project_project_masterkey
query = (ltable.id == masterkey.id) & \
(ltable.deleted == False) & \
(ptable.id == ltable.project_id) & \
(ptable.deleted == False)
project = db(query).select(ptable.id,
ptable.name,
limitby = (0, 1),
).first()
if project:
# Provide the project title
context["projectTitle"] = project.name
# Provide a list of available translations in this project
languages = []
# Use translated language names
from core import IS_ISO639_2_LANGUAGE_CODE, s3_str
represent = IS_ISO639_2_LANGUAGE_CODE.represent_local
# Look up the languages
ttable = s3db.project_project_target
l10ntable = s3db.dc_target_l10n
query = (ttable.project_id == project.id) & \
(ttable.deleted == False) & \
(ttable.target_id == l10ntable.target_id) & \
(l10ntable.language != None)
# Build the language list
rows = db(query).select(l10ntable.language)
seen = set()
for row in rows:
code = row.language
if code not in seen:
languages.append((code, s3_str(represent(code))))
seen.add(code)
context["surveyLanguages"] = languages
return context
settings.auth.masterkey_context = ucce_masterkey_context
# -------------------------------------------------------------------------
def customise_dc_question_resource(r, tablename):
from gluon import IS_IN_SET
from core import S3Represent, S3SQLCustomForm
crud_form = S3SQLCustomForm((T("Type"), "field_type"),
(T("Question"), "name"),
(T("Make question mandatory"), "require_not_empty"),
(T("Choices"), "options"),
(T("Add graphic"), "file"),
)
type_opts = {1: T("Text box"),
2: T("Number question"),
6: T("Multiple choice question"),
12: T("Likert-scale"),
13: T("Heatmap"),
}
s3db = current.s3db
table = s3db.dc_question
table.field_type.represent = S3Represent(options=type_opts)
table.field_type.requires = IS_IN_SET(type_opts)
table.require_not_empty.comment = None
s3db.configure("dc_question",
crud_form = crud_form,
list_fields = [(T("Type"), "field_type"),
(T("Question"), "name"),
(T("Mandatory"), "require_not_empty"),
]
)
settings.customise_dc_question_resource = customise_dc_question_resource
# -----------------------------------------------------------------------------
def customise_dc_question_controller(**attr):
# Custom Methods
from templates.UCCE.controllers import dc_QuestionCreate
from templates.UCCE.controllers import dc_QuestionImageDelete
from templates.UCCE.controllers import dc_QuestionImageUpload
from templates.UCCE.controllers import dc_QuestionSave
set_method = current.s3db.set_method
set_method("dc_question",
method = "create_json",
action = dc_QuestionCreate())
set_method("dc_question",
method = "image_delete",
action = dc_QuestionImageDelete())
set_method("dc_question",
method = "image_upload",
action = dc_QuestionImageUpload())
set_method("dc_question",
method = "update_json",
action = dc_QuestionSave())
return attr
settings.customise_dc_question_controller = customise_dc_question_controller
# -------------------------------------------------------------------------
def dc_target_postprocess(form):
"""
Create a Template with the same name as the Target
Copy the masterkey to the s3_table
Link the Target to this new Template
"""
form_vars_get = form.vars.get
template_id = form_vars_get("template_id")
if template_id:
# We already have a template, e.g. prepop
return
db = current.db
s3db = current.s3db
target_id = form_vars_get("id")
name = form_vars_get("name")
# Create Template
template = {"name": name}
tetable = s3db.dc_template
template_id = tetable.insert(**template)
template["id"] = template_id
onaccept = s3db.get_config("dc_template", "create_onaccept")
onaccept(Storage(vars = template))
ltable = s3db.dc_target_l10n
l10n = db(ltable.target_id == target_id).select(ltable.language,
limitby = (0, 1)
).first()
if l10n:
# Create Template_l10n
template = {"template_id": template_id,
"language": l10n.language,
}
ltable = s3db.dc_template_l10n
ltable.insert(**template)
# Link Target to Template
tatable = s3db.dc_target
db(tatable.id == target_id).update(template_id = template_id)
# Disable mobile_form in Dynamic Table
new_vars = {"mobile_form": False}
# Link Dynamic Table to Masterkey
ltable = s3db.project_project_target
pmtable = s3db.project_project_masterkey
query = (ltable.target_id == target_id) & \
(ltable.project_id == pmtable.project_id)
link = db(query).select(pmtable.masterkey_id,
limitby = (0, 1)
).first()
if link:
new_vars["masterkey_id"] = link.masterkey_id
# Update Dynamic Table
query = (tatable.id == target_id) & \
(tetable.id == tatable.template_id)
template = db(query).select(tetable.table_id,
limitby = (0, 1)
).first()
db(s3db.s3_table.id == template.table_id).update(**new_vars)
# -------------------------------------------------------------------------
def dc_target_ondelete(form):
"""
Delete the associated Template
"""
db = current.db
s3db = current.s3db
target_id = form.id
table = s3db.dc_target
record = db(table.id == target_id).select(table.deleted_fk,
limitby = (0, 1),
).first()
if record:
import json
deleted_fks = json.loads(record.deleted_fk)
template_id = deleted_fks.get("template_id")
resource = s3db.resource("dc_template",
filter=(s3db.dc_template.id == template_id))
resource.delete()
# -------------------------------------------------------------------------
def customise_dc_target_resource(r, tablename):
from gluon import IS_EMPTY_OR, URL
from core import IS_ISO639_2_LANGUAGE_CODE, S3SQLCustomForm, S3TextFilter
from templates.UCCE.controllers import dc_target_list_layout
from templates.UCCE.controllers import text_filter_formstyle
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Survey"),
title_display = T("Survey Details"),
#title_list = T("Surveys"),
title_list = "",
title_update = T("Edit Survey"),
#title_upload = T("Import Surveys"),
label_list_button = T("List Surveys"),
label_delete_button = T("Delete Survey"),
msg_record_created = T("Survey added"),
msg_record_modified = T("Survey updated"),
msg_record_deleted = T("Survey deleted"),
msg_list_empty = T("No Surveys currently registered"))
s3db = current.s3db
# Lift mandatory link to template so that we can create the template onaccept
#s3db.dc_target.template_id.requires
s3db.dc_target_l10n.language.requires = IS_EMPTY_OR(IS_ISO639_2_LANGUAGE_CODE(select = l10n_options,
sort = True,
translate = False,
zero = "",
))
# Custom Component
s3db.add_components("dc_target",
dc_target_l10n = {"joinby": "target_id",
"multiple": False,
},
)
s3db.configure("dc_target",
create_next = URL(c="dc", f="template", vars={"target_id": "[id]"}),
crud_form = S3SQLCustomForm((T("Survey name"), "name"),
(T("Translation"), "target_l10n.language"),
postprocess = dc_target_postprocess,
),
listadd = False,
list_fields = ["name",
#"status",
"project_target.project_id",
],
list_layout = dc_target_list_layout,
ondelete = dc_target_ondelete,
filter_widgets = [S3TextFilter(["name",
"project.name",
],
formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search project or survey"),
),
],
)
settings.customise_dc_target_resource = customise_dc_target_resource
# -----------------------------------------------------------------------------
def customise_dc_target_controller(**attr):
# Custom Methods
from templates.UCCE.controllers import dc_TargetActivate
from templates.UCCE.controllers import dc_TargetDeactivate
from templates.UCCE.controllers import dc_TargetDelete
from templates.UCCE.controllers import dc_TargetEdit
from templates.UCCE.controllers import dc_TargetName
from templates.UCCE.controllers import dc_TargetL10n
from templates.UCCE.controllers import dc_TargetReport
from templates.UCCE.controllers import dc_TargetReportFilters
set_method = current.s3db.set_method
set_method("dc_target",
method = "activate",
action = dc_TargetActivate())
set_method("dc_target",
method = "deactivate",
action = dc_TargetDeactivate())
set_method("dc_target",
method = "delete_confirm",
action = dc_TargetDelete())
set_method("dc_target",
method = "edit_confirm",
action = dc_TargetEdit())
set_method("dc_target",
method = "name",
action = dc_TargetName())
set_method("dc_target",
method = "l10n",
action = dc_TargetL10n())
set_method("dc_target",
method = "report_custom",
action = dc_TargetReport())
set_method("dc_target",
method = "report_filters",
action = dc_TargetReportFilters())
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.method == "datalist":
# Filter out Draft Surveys
from core import FS
r.resource.add_filter(FS("status") != 1)
return result
s3.prep = prep
s3.dl_no_header = True
attr["dl_rowsize"] = 4
attr["rheader"] = None
return attr
settings.customise_dc_target_controller = customise_dc_target_controller
# -------------------------------------------------------------------------
def dc_template_update_onaccept(form):
"""
Ensure that the Survey using this Template has the same name as the Template
@ToDo: Language? (Depends on UI)
"""
s3db = current.s3db
form_vars_get = form.vars.get
template_id = form_vars_get("id")
name = form_vars_get("name")
current.db(s3db.dc_target.template_id == template_id).update(name = name)
# -------------------------------------------------------------------------
def customise_dc_template_resource(r, tablename):
current.response.s3.crud_strings[tablename].title_display = T("Editor")
s3db = current.s3db
# Custom Component
#s3db.add_components("dc_template",
# dc_template_l10n = {"joinby": "template_id",
# "multiple": False,
# },
# )
s3db.configure("dc_template",
update_onaccept = dc_template_update_onaccept
)
settings.customise_dc_template_resource = customise_dc_template_resource
# -----------------------------------------------------------------------------
def customise_dc_template_controller(**attr):
s3db = current.s3db
target_id = current.request.get_vars.get("target_id")
if target_id:
# Find the Template for this Target
ttable = s3db.dc_target
target = current.db(ttable.id == target_id).select(ttable.template_id,
limitby = (0, 1),
).first()
if target:
from gluon import redirect, URL
redirect(URL(c="dc", f="template", args=[target.template_id, "editor"]))
# Custom Methods
from templates.UCCE.controllers import dc_TemplateEditor
from templates.UCCE.controllers import dc_TemplateExportL10n
from templates.UCCE.controllers import dc_TemplateImportL10n
from templates.UCCE.controllers import dc_TemplateSave
set_method = s3db.set_method
set_method("dc_template",
method = "editor",
action = dc_TemplateEditor())
set_method("dc_template",
method = "export_l10n",
action = dc_TemplateExportL10n())
set_method("dc_template",
method = "upload_l10n",
action = dc_TemplateImportL10n())
set_method("dc_template",
method = "update_json",
action = dc_TemplateSave())
attr["rheader"] = ucce_rheader
return attr
settings.customise_dc_template_controller = customise_dc_template_controller
# -------------------------------------------------------------------------
def default_table_onaccept(form, tablename):
"""
Set the response_id
NB We don't actually use this in UCCE since 1 Target == 1 Template
& we can't actually use this method for cases where this isn't the case either!
"""
record_id = form.vars.get("id")
if not record_id:
current.log.error("Submitting to dynamic table...cannot find record_id")
return
db = current.db
s3db = current.s3db
# Read the Table
dtable = s3db.s3_table
table = db(dtable.name == tablename).select(dtable.id,
limitby = (0, 1)
).first()
try:
table_id = table.id
except AttributeError:
current.log.error("Submitting to dynamic table...cannot find table_id")
return
# Find the Template
tetable = s3db.dc_template
template = db(tetable.table_id == table_id).select(tetable.id,
limitby = (0, 1)
).first()
try:
template_id = template.id
except AttributeError:
current.log.error("Submitting to dynamic table...cannot find template_id")
return
# Find the Target
ttable = s3db.dc_target
target = db(ttable.template_id == template_id).select(ttable.id,
limitby = (0, 1)
).first()
try:
target_id = target.id
except AttributeError:
current.log.error("Submitting to dynamic table...cannot find target_id")
return
# Create a Response
response_id = s3db.dc_response.insert(target_id = target_id,
template_id = template_id,
# @ToDo?
#language = language,
)
# Update the row in the Dynamic Table with this response_id
dtable = s3db.table(tablename)
db(dtable.id == record_id).update(response_id = response_id)
# -------------------------------------------------------------------------
def customise_default_table_controller(**attr):
s3 = current.response.s3
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
if not standard_prep(r):
return False
tablename = r.tablename
r.resource.configure(onaccept = lambda form: default_table_onaccept(form, tablename))
return True
s3.prep = custom_prep
return attr
settings.customise_default_table_controller = customise_default_table_controller
# -------------------------------------------------------------------------
def customise_doc_document_resource(r, tablename):
from gluon import URL
from core import S3SQLCustomForm, S3TextFilter
from templates.UCCE.controllers import doc_document_list_layout
from templates.UCCE.controllers import text_filter_formstyle
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("New guide"),
title_display = T("Guide Details"),
#title_list = T("Guides"),
title_list = "",
title_update = T("Edit Guide"),
#title_upload = T("Import Guides"),
label_list_button = T("List Guides"),
label_delete_button = T("Delete Guide"),
msg_record_created = T("Guide added"),
msg_record_modified = T("Guide updated"),
msg_record_deleted = T("Guide deleted"),
msg_list_empty = T("No Guides currently registered")
)
s3db = current.s3db
f = s3db.doc_document.comments
f.comment = None
s3db.configure("doc_document",
create_next = URL(args="datalist"),
crud_form = S3SQLCustomForm("name",
"file",
"comments",
),
list_fields = ["name",
"file",
"comments",
],
list_layout = doc_document_list_layout,
filter_widgets = [S3TextFilter(["name",
],
formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search guides"),
),
],
)
settings.customise_doc_document_resource = customise_doc_document_resource
# -----------------------------------------------------------------------------
def customise_doc_document_controller(**attr):
s3 = current.response.s3
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.method == "datalist":
if "showadd_btn" in output:
from gluon import A, SPAN, URL
from core import ICON
output["showadd_btn"] = A(ICON("plus"),
SPAN(T("New guide")),
_class = "add-btn no-link s3_modal",
_href = URL(c="doc", f="document",
args = "create.popup",
vars = {"refresh": "datalist"},
),
)
return output
s3.postp = postp
s3.dl_no_header = True
attr["dl_rowsize"] = 2
return attr
settings.customise_doc_document_controller = customise_doc_document_controller
# -------------------------------------------------------------------------
def project_project_onaccept(form):
"""
Create & link the Master Key
"""
from random import randint
db = current.db
s3db = current.s3db
table = s3db.auth_masterkey
not_unique = True
while not_unique:
masterkey = "%s-%s-%s" % (str(randint(0,999)).zfill(3),
str(randint(0,999)).zfill(3),
str(randint(0,999)).zfill(3),
)
exists = db(table.name == masterkey).select(table.id,
limitby = (0, 1)
)
if not exists:
not_unique = False
utable = db.auth_user
user = db(utable.email == "<EMAIL>").select(utable.id,
limitby = (0, 1)
).first()
masterkey_id = table.insert(name = masterkey,
user_id = user.id,
)
s3db.project_project_masterkey.insert(masterkey_id = masterkey_id,
project_id = form.vars.get("id"),
)
# -------------------------------------------------------------------------
def project_project_ondelete(form):
"""
Delete the associated Targets & Templates
"""
import json
db = current.db
s3db = current.s3db
project_id = form.id
target_ids = []
template_ids = []
ltable = s3db.project_project_target
rows = db(ltable.deleted == True).select(ltable.deleted_fk)
for row in rows:
deleted_fks = json.loads(row.deleted_fk)
if deleted_fks.get("project_id") == project_id:
target_id = deleted_fks.get("target_id")
target_ids.append(target_id)
if not target_ids:
return
table = s3db.dc_target
targets = db(table.id.belongs(target_ids)).select(table.template_id)
for target in targets:
template_ids.append(target.template_id)
resource = s3db.resource("dc_template",
filter=(s3db.dc_template.id.belongs(template_ids)))
resource.delete()
# ondelete CASCADE will clear these:
#resource = s3db.resource("dc_target",
# filter=(s3db.dc_target.id.belongs(target_ids)))
#resource.delete()
# -------------------------------------------------------------------------
def project_project_target_create_onaccept(form):
"""
Copy the masterkey to the s3_table
- used during prepop
"""
form_vars_get = form.vars.get
project_id = form_vars_get("project_id")
target_id = form_vars_get("target_id")
db = current.db
s3db = current.s3db
pmtable = s3db.project_project_masterkey
link = db(pmtable.project_id == project_id).select(pmtable.masterkey_id,
limitby = (0, 1)
).first()
tatable = s3db.dc_target
tetable = s3db.dc_template
query = (tatable.id == target_id) & \
(tetable.id == tatable.template_id)
template = db(query).select(tetable.table_id,
limitby = (0, 1)
).first()
if template:
db(s3db.s3_table.id == template.table_id).update(masterkey_id = link.masterkey_id)
# -------------------------------------------------------------------------
def customise_project_project_resource(r, tablename):
from gluon import IS_EMPTY_OR, URL
from core import IS_ISO639_2_LANGUAGE_CODE, S3SQLCustomForm, S3TextFilter
from templates.UCCE.controllers import project_project_list_layout
from templates.UCCE.controllers import text_filter_formstyle
s3db = current.s3db
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("New project"),
#title_display = T("Project Details"),
# Only used in /target/create?
title_display = T("Editor"),
#title_list = T("Projects"),
title_list = "",
title_update = T("Edit project name"),
#title_upload = T("Import Projects"),
label_list_button = T("List Projects"),
label_delete_button = T("Delete Projects"),
msg_record_created = T("Project added"),
msg_record_modified = T("Project updated"),
msg_record_deleted = T("Project deleted"),
msg_list_empty = T("No Projects currently registered")
)
user = current.auth.user
organisation_id = user and user.organisation_id
if organisation_id:
f = s3db.project_project.organisation_id
f.default = organisation_id
f.readable = f.writable = False
s3db.configure("project_project",
create_next = URL(args="datalist"),
# No need to chain as default one not relevant for this usecase:
create_onaccept = project_project_onaccept,
crud_form = S3SQLCustomForm((T("Organization"), "organisation_id"),
(T("New project name"), "name"),
(T("Default Translation"), "l10n.language"),
),
# Ignored here as set in Prep in default controller
#list_fields = ["name",
# "project_target.target_id",
# ],
list_layout = project_project_list_layout,
ondelete = project_project_ondelete,
filter_widgets = [S3TextFilter(["name",
"target.name",
],
formstyle = text_filter_formstyle,
label = "",
_placeholder = T("Search project or survey"),
),
],
)
s3db.configure("project_project_target",
create_onaccept = project_project_target_create_onaccept,
)
settings.customise_project_project_resource = customise_project_project_resource
# -----------------------------------------------------------------------------
def customise_project_project_controller(**attr):
# Custom Method
from templates.UCCE.controllers import dc_ProjectDelete
s3db = current.s3db
s3db.set_method("project_project",
method = "delete_confirm",
action = dc_ProjectDelete())
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.id and not r.component and r.representation == "xls":
# Custom XLS Exporter to include all Responses.
r.custom_action = s3db.dc_TargetXLS
elif r.method == "datalist":
# Over-ride list_fields set in default prep
s3db.configure("project_project",
list_fields = ["name",
"project_target.target_id",
"masterkey.name",
],
)
# Inject JS to handle Switches & deletion of Inner cards
if s3.debug:
s3.scripts.append("/%s/static/themes/UCCE/js/projects.js" % r.application)
else:
s3.scripts.append("/%s/static/themes/UCCE/js/projects.min.js" % r.application)
return result
s3.prep = prep
# Custom postp
standard_postp = s3.postp
def postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.method == "datalist":
if "showadd_btn" in output:
from gluon import A, SPAN, URL
from core import ICON
output["showadd_btn"] = A(ICON("plus"),
SPAN(T("New project")),
_class = "add-btn no-link s3_modal",
_href = URL(c="project", f="project",
args = "create.popup",
vars = {"refresh": "datalist"},
),
)
return output
s3.postp = postp
s3.dl_no_header = True
attr["rheader"] = None
return attr
settings.customise_project_project_controller = customise_project_project_controller
# END =========================================================================
|
[
"gluon.IS_IN_SET",
"templates.UCCE.controllers.dc_TargetReport",
"templates.UCCE.controllers.dc_TargetL10n",
"templates.UCCE.controllers.dc_QuestionImageUpload",
"core.S3SQLCustomForm",
"templates.UCCE.controllers.dc_TemplateImportL10n",
"core.IS_ISO639_2_LANGUAGE_CODE",
"templates.UCCE.controllers.dc_TargetReportFilters",
"gluon.current.db",
"templates.UCCE.controllers.dc_TemplateExportL10n",
"json.loads",
"random.randint",
"templates.UCCE.controllers.dc_TemplateEditor",
"core.s3_rheader_resource",
"templates.UCCE.controllers.dc_TargetDelete",
"core.S3Represent",
"templates.UCCE.controllers.dc_QuestionCreate",
"core.FS",
"templates.UCCE.controllers.dc_TargetDeactivate",
"templates.UCCE.controllers.dc_TargetEdit",
"templates.UCCE.controllers.dc_ProjectDelete",
"templates.UCCE.controllers.dc_TargetActivate",
"templates.UCCE.controllers.dc_QuestionSave",
"gluon.current.request.get_vars.get",
"templates.UCCE.controllers.dc_TemplateSave",
"templates.UCCE.controllers.dc_TargetName",
"gluon.storage.Storage",
"core.ICON",
"templates.UCCE.controllers.dc_QuestionImageDelete",
"collections.OrderedDict",
"gluon.current.log.error",
"gluon.URL"
] |
[((2513, 2585), 'collections.OrderedDict', 'OrderedDict', (["[('en-gb', 'English'), ('es', 'Spanish'), ('so', 'Somali')]"], {}), "([('en-gb', 'English'), ('es', 'Spanish'), ('so', 'Somali')])\n", (2524, 2585), False, 'from collections import OrderedDict\n'), ((12230, 12252), 'core.s3_rheader_resource', 's3_rheader_resource', (['r'], {}), '(r)\n', (12249, 12252), False, 'from core import s3_rheader_resource, s3_rheader_tabs\n'), ((18348, 18378), 'core.S3Represent', 'S3Represent', ([], {'options': 'type_opts'}), '(options=type_opts)\n', (18359, 18378), False, 'from core import S3Represent, S3SQLCustomForm\n'), ((18415, 18435), 'gluon.IS_IN_SET', 'IS_IN_SET', (['type_opts'], {}), '(type_opts)\n', (18424, 18435), False, 'from gluon import IS_IN_SET\n'), ((30574, 30615), 'gluon.current.request.get_vars.get', 'current.request.get_vars.get', (['"""target_id"""'], {}), "('target_id')\n", (30602, 30615), False, 'from gluon import current\n'), ((20898, 20920), 'gluon.storage.Storage', 'Storage', ([], {'vars': 'template'}), '(vars=template)\n', (20905, 20920), False, 'from gluon.storage import Storage\n'), ((23043, 23072), 'json.loads', 'json.loads', (['record.deleted_fk'], {}), '(record.deleted_fk)\n', (23053, 23072), False, 'import json\n'), ((24570, 24657), 'core.IS_ISO639_2_LANGUAGE_CODE', 'IS_ISO639_2_LANGUAGE_CODE', ([], {'select': 'l10n_options', 'sort': '(True)', 'translate': '(False)', 'zero': '""""""'}), "(select=l10n_options, sort=True, translate=False,\n zero='')\n", (24595, 24657), False, 'from core import IS_ISO639_2_LANGUAGE_CODE, s3_str\n'), ((32537, 32609), 'gluon.current.log.error', 'current.log.error', (['"""Submitting to dynamic table...cannot find record_id"""'], {}), "('Submitting to dynamic table...cannot find record_id')\n", (32554, 32609), False, 'from gluon import current\n'), ((40843, 40869), 'json.loads', 'json.loads', (['row.deleted_fk'], {}), '(row.deleted_fk)\n', (40853, 40869), False, 'import json\n'), ((19496, 19515), 'templates.UCCE.controllers.dc_QuestionCreate', 'dc_QuestionCreate', ([], {}), '()\n', (19513, 19515), False, 'from templates.UCCE.controllers import dc_QuestionCreate\n'), ((19623, 19647), 'templates.UCCE.controllers.dc_QuestionImageDelete', 'dc_QuestionImageDelete', ([], {}), '()\n', (19645, 19647), False, 'from templates.UCCE.controllers import dc_QuestionImageDelete\n'), ((19755, 19779), 'templates.UCCE.controllers.dc_QuestionImageUpload', 'dc_QuestionImageUpload', ([], {}), '()\n', (19777, 19779), False, 'from templates.UCCE.controllers import dc_QuestionImageUpload\n'), ((19886, 19903), 'templates.UCCE.controllers.dc_QuestionSave', 'dc_QuestionSave', ([], {}), '()\n', (19901, 19903), False, 'from templates.UCCE.controllers import dc_QuestionSave\n'), ((25365, 25418), 'gluon.URL', 'URL', ([], {'c': '"""dc"""', 'f': '"""template"""', 'vars': "{'target_id': '[id]'}"}), "(c='dc', f='template', vars={'target_id': '[id]'})\n", (25368, 25418), False, 'from gluon import A, SPAN, URL\n'), ((27556, 27575), 'templates.UCCE.controllers.dc_TargetActivate', 'dc_TargetActivate', ([], {}), '()\n', (27573, 27575), False, 'from templates.UCCE.controllers import dc_TargetActivate\n'), ((27679, 27700), 'templates.UCCE.controllers.dc_TargetDeactivate', 'dc_TargetDeactivate', ([], {}), '()\n', (27698, 27700), False, 'from templates.UCCE.controllers import dc_TargetDeactivate\n'), ((27808, 27825), 'templates.UCCE.controllers.dc_TargetDelete', 'dc_TargetDelete', ([], {}), '()\n', (27823, 27825), False, 'from templates.UCCE.controllers import dc_TargetDelete\n'), ((27931, 27946), 'templates.UCCE.controllers.dc_TargetEdit', 'dc_TargetEdit', ([], {}), '()\n', (27944, 27946), False, 'from templates.UCCE.controllers import dc_TargetEdit\n'), ((28044, 28059), 'templates.UCCE.controllers.dc_TargetName', 'dc_TargetName', ([], {}), '()\n', (28057, 28059), False, 'from templates.UCCE.controllers import dc_TargetName\n'), ((28157, 28172), 'templates.UCCE.controllers.dc_TargetL10n', 'dc_TargetL10n', ([], {}), '()\n', (28170, 28172), False, 'from templates.UCCE.controllers import dc_TargetL10n\n'), ((28279, 28296), 'templates.UCCE.controllers.dc_TargetReport', 'dc_TargetReport', ([], {}), '()\n', (28294, 28296), False, 'from templates.UCCE.controllers import dc_TargetReport\n'), ((28404, 28428), 'templates.UCCE.controllers.dc_TargetReportFilters', 'dc_TargetReportFilters', ([], {}), '()\n', (28426, 28428), False, 'from templates.UCCE.controllers import dc_TargetReportFilters\n'), ((29561, 29614), 'gluon.current.db', 'current.db', (['(s3db.dc_target.template_id == template_id)'], {}), '(s3db.dc_target.template_id == template_id)\n', (29571, 29614), False, 'from gluon import current\n'), ((31549, 31568), 'templates.UCCE.controllers.dc_TemplateEditor', 'dc_TemplateEditor', ([], {}), '()\n', (31566, 31568), False, 'from templates.UCCE.controllers import dc_TemplateEditor\n'), ((31676, 31699), 'templates.UCCE.controllers.dc_TemplateExportL10n', 'dc_TemplateExportL10n', ([], {}), '()\n', (31697, 31699), False, 'from templates.UCCE.controllers import dc_TemplateExportL10n\n'), ((31807, 31830), 'templates.UCCE.controllers.dc_TemplateImportL10n', 'dc_TemplateImportL10n', ([], {}), '()\n', (31828, 31830), False, 'from templates.UCCE.controllers import dc_TemplateImportL10n\n'), ((31938, 31955), 'templates.UCCE.controllers.dc_TemplateSave', 'dc_TemplateSave', ([], {}), '()\n', (31953, 31955), False, 'from templates.UCCE.controllers import dc_TemplateSave\n'), ((33021, 33092), 'gluon.current.log.error', 'current.log.error', (['"""Submitting to dynamic table...cannot find table_id"""'], {}), "('Submitting to dynamic table...cannot find table_id')\n", (33038, 33092), False, 'from gluon import current\n'), ((33486, 33560), 'gluon.current.log.error', 'current.log.error', (['"""Submitting to dynamic table...cannot find template_id"""'], {}), "('Submitting to dynamic table...cannot find template_id')\n", (33503, 33560), False, 'from gluon import current\n'), ((33953, 34025), 'gluon.current.log.error', 'current.log.error', (['"""Submitting to dynamic table...cannot find target_id"""'], {}), "('Submitting to dynamic table...cannot find target_id')\n", (33970, 34025), False, 'from gluon import current\n'), ((36421, 36441), 'gluon.URL', 'URL', ([], {'args': '"""datalist"""'}), "(args='datalist')\n", (36424, 36441), False, 'from gluon import A, SPAN, URL\n'), ((36478, 36521), 'core.S3SQLCustomForm', 'S3SQLCustomForm', (['"""name"""', '"""file"""', '"""comments"""'], {}), "('name', 'file', 'comments')\n", (36493, 36521), False, 'from core import IS_ISO639_2_LANGUAGE_CODE, S3SQLCustomForm, S3TextFilter\n'), ((44309, 44329), 'gluon.URL', 'URL', ([], {'args': '"""datalist"""'}), "(args='datalist')\n", (44312, 44329), False, 'from gluon import A, SPAN, URL\n'), ((46366, 46384), 'templates.UCCE.controllers.dc_ProjectDelete', 'dc_ProjectDelete', ([], {}), '()\n', (46382, 46384), False, 'from templates.UCCE.controllers import dc_ProjectDelete\n'), ((31055, 31117), 'gluon.URL', 'URL', ([], {'c': '"""dc"""', 'f': '"""template"""', 'args': "[target.template_id, 'editor']"}), "(c='dc', f='template', args=[target.template_id, 'editor'])\n", (31058, 31117), False, 'from gluon import A, SPAN, URL\n'), ((28860, 28872), 'core.FS', 'FS', (['"""status"""'], {}), "('status')\n", (28862, 28872), False, 'from core import FS\n'), ((38102, 38114), 'core.ICON', 'ICON', (['"""plus"""'], {}), "('plus')\n", (38106, 38114), False, 'from core import ICON\n'), ((48077, 48089), 'core.ICON', 'ICON', (['"""plus"""'], {}), "('plus')\n", (48081, 48089), False, 'from core import ICON\n'), ((11368, 11385), 'gluon.current.db', 'current.db', (['query'], {}), '(query)\n', (11378, 11385), False, 'from gluon import current\n'), ((13521, 13582), 'gluon.URL', 'URL', ([], {'c': '"""dc"""', 'f': '"""target"""', 'args': "[target_id, 'deactivate.popup']"}), "(c='dc', f='target', args=[target_id, 'deactivate.popup'])\n", (13524, 13582), False, 'from gluon import A, SPAN, URL\n'), ((13935, 13994), 'gluon.URL', 'URL', ([], {'c': '"""dc"""', 'f': '"""target"""', 'args': "[target_id, 'activate.popup']"}), "(c='dc', f='target', args=[target_id, 'activate.popup'])\n", (13938, 13994), False, 'from gluon import A, SPAN, URL\n'), ((30743, 30777), 'gluon.current.db', 'current.db', (['(ttable.id == target_id)'], {}), '(ttable.id == target_id)\n', (30753, 30777), False, 'from gluon import current\n'), ((38321, 38398), 'gluon.URL', 'URL', ([], {'c': '"""doc"""', 'f': '"""document"""', 'args': '"""create.popup"""', 'vars': "{'refresh': 'datalist'}"}), "(c='doc', f='document', args='create.popup', vars={'refresh': 'datalist'})\n", (38324, 38398), False, 'from gluon import A, SPAN, URL\n'), ((48298, 48383), 'gluon.URL', 'URL', ([], {'c': '"""project"""', 'f': '"""project"""', 'args': '"""create.popup"""', 'vars': "{'refresh': 'datalist'}"}), "(c='project', f='project', args='create.popup', vars={'refresh': 'datalist'}\n )\n", (48301, 48383), False, 'from gluon import A, SPAN, URL\n'), ((39254, 39269), 'random.randint', 'randint', (['(0)', '(999)'], {}), '(0, 999)\n', (39261, 39269), False, 'from random import randint\n'), ((39322, 39337), 'random.randint', 'randint', (['(0)', '(999)'], {}), '(0, 999)\n', (39329, 39337), False, 'from random import randint\n'), ((39390, 39405), 'random.randint', 'randint', (['(0)', '(999)'], {}), '(0, 999)\n', (39397, 39405), False, 'from random import randint\n')]
|
"""This module creates a new dataframe with a movie id and its corresponding
mean sentiment score. Mean sentiment score is computed by taking the average of
the sentiment scores for all the movie's comments
"""
from os import listdir
import os.path as op
import pandas as pd
import numpy as np
from .analyze_comments_tblob import analyze_comments
import movie_analysis as mv
data_path = op.join(mv.__path__[0], 'data/movie_comments')
def get_sentiment_score():
"""This function makes a new df with the movie_id and sentiment score
as columns
"""
final_df = pd.DataFrame(columns=['movie_id', 'sentiment_score'])
filenames2 = find_csv_filenames(data_path)
for name in filenames2:
new_name = data_path+"/"+name
df = pd.read_csv(new_name, encoding='latin1')
sentiment_df = pd.DataFrame(data=df)
sentiment_df.columns = ["comment"]
if not sentiment_df.empty:
sentiment_df = analyze_comments(sentiment_df)
if name.endswith('.csv'):
name = name[:-4]
sentiment_score = np.asarray(sentiment_df.iloc[:, 1], dtype=np.float).mean()
final_df = add_row(name, sentiment_score, final_df)
# final_df.to_csv("sentiment_scores.csv", encoding='latin1')
return final_df
def find_csv_filenames(path_to_dir, suffix=".csv"):
"""This function returns a list of all the filenames that end with '.csv'
"""
filenames = listdir(path_to_dir)
return [filename for filename in filenames if filename.endswith(suffix)]
def add_row(movie_id, mean_score, final_df):
"""This function adds a row to the data frame with the movie_id and corresponding
sentiment_score values
"""
df2 = pd.DataFrame([[movie_id, mean_score]], columns=['movie_id', 'sentiment_score'])
final_df = final_df.append(df2, ignore_index=True)
return final_df
|
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.asarray",
"os.path.join",
"os.listdir"
] |
[((389, 435), 'os.path.join', 'op.join', (['mv.__path__[0]', '"""data/movie_comments"""'], {}), "(mv.__path__[0], 'data/movie_comments')\n", (396, 435), True, 'import os.path as op\n'), ((577, 630), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['movie_id', 'sentiment_score']"}), "(columns=['movie_id', 'sentiment_score'])\n", (589, 630), True, 'import pandas as pd\n'), ((1444, 1464), 'os.listdir', 'listdir', (['path_to_dir'], {}), '(path_to_dir)\n', (1451, 1464), False, 'from os import listdir\n'), ((1720, 1799), 'pandas.DataFrame', 'pd.DataFrame', (['[[movie_id, mean_score]]'], {'columns': "['movie_id', 'sentiment_score']"}), "([[movie_id, mean_score]], columns=['movie_id', 'sentiment_score'])\n", (1732, 1799), True, 'import pandas as pd\n'), ((757, 797), 'pandas.read_csv', 'pd.read_csv', (['new_name'], {'encoding': '"""latin1"""'}), "(new_name, encoding='latin1')\n", (768, 797), True, 'import pandas as pd\n'), ((821, 842), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'df'}), '(data=df)\n', (833, 842), True, 'import pandas as pd\n'), ((1080, 1131), 'numpy.asarray', 'np.asarray', (['sentiment_df.iloc[:, 1]'], {'dtype': 'np.float'}), '(sentiment_df.iloc[:, 1], dtype=np.float)\n', (1090, 1131), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 19 10:51:10 2018
@author: shlomi
"""
import platform
from pathlib import Path
path = Path().cwd()
if platform.system() == 'Linux':
if platform.node() == 'ziskin-XPS-8700':
work_path = Path('/home/ziskin/Work_Files/')
work_yuval = work_path / 'PW_yuval'
work_chaim = work_path / 'Chaim_Stratosphere_Data'
work_david = work_path / 'David_migration'
cwd = Path().cwd()
geo_path = Path('/home/ziskin/geo_ariel_home/')
adams_path = Path('/home/ziskin/adams_home/')
data11_path = Path('/home/ziskin/data11/')
savefig_path = Path('/home/ziskin/Pictures')
elif platform.node() == 'shlomipc':
work_path = Path('/mnt/DATA/Work_Files/')
work_yuval = work_path / 'PW_yuval'
work_chaim = work_path / 'Chaim_Stratosphere_Data'
work_david = work_path / 'David_migration'
cwd = Path().cwd()
geo_path = Path('/home/shlomi/geo_ariel_home/')
adams_path = Path('/home/shlomi/adams_home/')
data11_path = Path('/home/shlomi/data11/')
savefig_path = Path('/home/shlomi/Pictures')
elif platform.node() == 'geophysics1.yosh.ac.il':
work_path = Path('/home/ziskin/Work_Files/')
work_yuval = work_path / 'PW_yuval'
work_chaim = work_path / 'Chaim_Stratosphere_Data'
work_david = work_path / 'David_migration'
cwd = Path().cwd()
geo_path = Path('/home/ziskin/')
savefig_path = Path().cwd()
# geo_path = Path('/home/ziskin/geo_ariel_home/')
# adams_path = Path('/home/ziskin/adams_home/')
# data11_path = Path('/home/ziskin/data11/')
else:
work_path = Path('/home/ziskin/Work_Files/')
work_yuval = work_path / 'PW_yuval'
work_chaim = work_path / 'Chaim_Stratosphere_Data'
work_david = work_path / 'David_migration'
cwd = Path().cwd()
elif platform.system() == 'Darwin':
if platform.node() == 'Venus':
work_path = Path('/Users/shlomi/Documents/')
work_yuval = work_path / 'PW_yuval'
work_chaim = work_path / 'Chaim_Stratosphere_Data'
work_david = work_path / 'David_migration'
cwd = Path().cwd()
geo_path = Path('/Users/shlomi/geo_ariel_home/')
adams_path = Path('/Users/shlomi/adams_home/')
data11_path = Path('/Users/shlomi/data11/')
savefig_path = Path('/Users/shlomi/Pictures')
|
[
"platform.system",
"pathlib.Path",
"platform.node"
] |
[((172, 189), 'platform.system', 'platform.system', ([], {}), '()\n', (187, 189), False, 'import platform\n'), ((156, 162), 'pathlib.Path', 'Path', ([], {}), '()\n', (160, 162), False, 'from pathlib import Path\n'), ((209, 224), 'platform.node', 'platform.node', ([], {}), '()\n', (222, 224), False, 'import platform\n'), ((267, 299), 'pathlib.Path', 'Path', (['"""/home/ziskin/Work_Files/"""'], {}), "('/home/ziskin/Work_Files/')\n", (271, 299), False, 'from pathlib import Path\n'), ((500, 536), 'pathlib.Path', 'Path', (['"""/home/ziskin/geo_ariel_home/"""'], {}), "('/home/ziskin/geo_ariel_home/')\n", (504, 536), False, 'from pathlib import Path\n'), ((558, 590), 'pathlib.Path', 'Path', (['"""/home/ziskin/adams_home/"""'], {}), "('/home/ziskin/adams_home/')\n", (562, 590), False, 'from pathlib import Path\n'), ((613, 641), 'pathlib.Path', 'Path', (['"""/home/ziskin/data11/"""'], {}), "('/home/ziskin/data11/')\n", (617, 641), False, 'from pathlib import Path\n'), ((665, 694), 'pathlib.Path', 'Path', (['"""/home/ziskin/Pictures"""'], {}), "('/home/ziskin/Pictures')\n", (669, 694), False, 'from pathlib import Path\n'), ((1961, 1978), 'platform.system', 'platform.system', ([], {}), '()\n', (1976, 1978), False, 'import platform\n'), ((704, 719), 'platform.node', 'platform.node', ([], {}), '()\n', (717, 719), False, 'import platform\n'), ((755, 784), 'pathlib.Path', 'Path', (['"""/mnt/DATA/Work_Files/"""'], {}), "('/mnt/DATA/Work_Files/')\n", (759, 784), False, 'from pathlib import Path\n'), ((985, 1021), 'pathlib.Path', 'Path', (['"""/home/shlomi/geo_ariel_home/"""'], {}), "('/home/shlomi/geo_ariel_home/')\n", (989, 1021), False, 'from pathlib import Path\n'), ((1043, 1075), 'pathlib.Path', 'Path', (['"""/home/shlomi/adams_home/"""'], {}), "('/home/shlomi/adams_home/')\n", (1047, 1075), False, 'from pathlib import Path\n'), ((1098, 1126), 'pathlib.Path', 'Path', (['"""/home/shlomi/data11/"""'], {}), "('/home/shlomi/data11/')\n", (1102, 1126), False, 'from pathlib import Path\n'), ((1150, 1179), 'pathlib.Path', 'Path', (['"""/home/shlomi/Pictures"""'], {}), "('/home/shlomi/Pictures')\n", (1154, 1179), False, 'from pathlib import Path\n'), ((1999, 2014), 'platform.node', 'platform.node', ([], {}), '()\n', (2012, 2014), False, 'import platform\n'), ((2047, 2079), 'pathlib.Path', 'Path', (['"""/Users/shlomi/Documents/"""'], {}), "('/Users/shlomi/Documents/')\n", (2051, 2079), False, 'from pathlib import Path\n'), ((2280, 2317), 'pathlib.Path', 'Path', (['"""/Users/shlomi/geo_ariel_home/"""'], {}), "('/Users/shlomi/geo_ariel_home/')\n", (2284, 2317), False, 'from pathlib import Path\n'), ((2339, 2372), 'pathlib.Path', 'Path', (['"""/Users/shlomi/adams_home/"""'], {}), "('/Users/shlomi/adams_home/')\n", (2343, 2372), False, 'from pathlib import Path\n'), ((2395, 2424), 'pathlib.Path', 'Path', (['"""/Users/shlomi/data11/"""'], {}), "('/Users/shlomi/data11/')\n", (2399, 2424), False, 'from pathlib import Path\n'), ((2448, 2478), 'pathlib.Path', 'Path', (['"""/Users/shlomi/Pictures"""'], {}), "('/Users/shlomi/Pictures')\n", (2452, 2478), False, 'from pathlib import Path\n'), ((468, 474), 'pathlib.Path', 'Path', ([], {}), '()\n', (472, 474), False, 'from pathlib import Path\n'), ((1189, 1204), 'platform.node', 'platform.node', ([], {}), '()\n', (1202, 1204), False, 'import platform\n'), ((1254, 1286), 'pathlib.Path', 'Path', (['"""/home/ziskin/Work_Files/"""'], {}), "('/home/ziskin/Work_Files/')\n", (1258, 1286), False, 'from pathlib import Path\n'), ((1487, 1508), 'pathlib.Path', 'Path', (['"""/home/ziskin/"""'], {}), "('/home/ziskin/')\n", (1491, 1508), False, 'from pathlib import Path\n'), ((1742, 1774), 'pathlib.Path', 'Path', (['"""/home/ziskin/Work_Files/"""'], {}), "('/home/ziskin/Work_Files/')\n", (1746, 1774), False, 'from pathlib import Path\n'), ((953, 959), 'pathlib.Path', 'Path', ([], {}), '()\n', (957, 959), False, 'from pathlib import Path\n'), ((2248, 2254), 'pathlib.Path', 'Path', ([], {}), '()\n', (2252, 2254), False, 'from pathlib import Path\n'), ((1455, 1461), 'pathlib.Path', 'Path', ([], {}), '()\n', (1459, 1461), False, 'from pathlib import Path\n'), ((1532, 1538), 'pathlib.Path', 'Path', ([], {}), '()\n', (1536, 1538), False, 'from pathlib import Path\n'), ((1943, 1949), 'pathlib.Path', 'Path', ([], {}), '()\n', (1947, 1949), False, 'from pathlib import Path\n')]
|
from maggma.api.query_operator import PaginationQuery, SparseFieldsQuery
from maggma.api.resource import ReadOnlyResource
from emmet.core.phonon import PhononBSDOSDoc
def phonon_bsdos_resource(phonon_bs_store):
resource = ReadOnlyResource(
phonon_bs_store,
PhononBSDOSDoc,
query_operators=[
PaginationQuery(),
SparseFieldsQuery(
PhononBSDOSDoc, default_fields=["task_id", "last_updated"]
),
],
tags=["Phonon"],
enable_default_search=False,
disable_validation=True,
)
return resource
|
[
"maggma.api.query_operator.SparseFieldsQuery",
"maggma.api.query_operator.PaginationQuery"
] |
[((334, 351), 'maggma.api.query_operator.PaginationQuery', 'PaginationQuery', ([], {}), '()\n', (349, 351), False, 'from maggma.api.query_operator import PaginationQuery, SparseFieldsQuery\n'), ((365, 442), 'maggma.api.query_operator.SparseFieldsQuery', 'SparseFieldsQuery', (['PhononBSDOSDoc'], {'default_fields': "['task_id', 'last_updated']"}), "(PhononBSDOSDoc, default_fields=['task_id', 'last_updated'])\n", (382, 442), False, 'from maggma.api.query_operator import PaginationQuery, SparseFieldsQuery\n')]
|
from datetime import datetime
import numpy as np
import warnings
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime(2008, 8, 15)
__modified__ = datetime(2015, 7, 25)
__version__ = "1.5"
__status__ = "Development"
'''
Various vertical coordinates
Presently, only ocean s-coordinates are supported. Future plans will be to
include all of the vertical coordinate systems defined by the CF conventions.
vgrid.py function copied from https://github.com/kshedstrom/pyroms (Frederic Castruccio)
'''
def calculateVgrid(self):
print(("--->Setting up vertical coordinates using self.vtransform: %s self.vstretching: %s"%(self.vtransform,self.vstretching)))
if self.vtransform == 1:
vgrid = s_coordinate(self.h, self.theta_b, self.theta_s, self.tcline, self.nlevels, self.vtransform, self.vstretching, zeta=None)
elif self.vtransform == 2 and self.vstretching == 2:
vgrid = s_coordinate_2(self.h, self.theta_b, self.theta_s, self.tcline, self.nlevels, self.vtransform, self.vstretching, zeta=None)
elif self.vtransform == 2 and self.vstretching == 4:
vgrid = s_coordinate_4(self.h, self.theta_b, self.theta_s, self.tcline, self.nlevels, self.vtransform, self.vstretching, zeta=None)
else:
raise Warning('Unknow vertical transformation Vtrans')
self.z_r = vgrid.z_r[0,:]
self.z_w = vgrid.z_w[0,:]
self.Cs_rho = vgrid.Cs_r
self.Cs_w = vgrid.Cs_w
self.s_rho = vgrid.s_rho
self.s_w = vgrid.s_w
class s_coordinate(object):
"""
Song and Haidvogel (1994) vertical coordinate transformation (Vtransform=1) and
stretching functions (Vstretching=1).
return an object that can be indexed to return depths
s = s_coordinate(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, tcline, N, vtransform, vstretching, zeta=None):
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.tcline = tcline
self.N = int(N)
self.Np = self.N+1
self.vtransform = vtransform
self.vstretching = vstretching
self.hc = min(self.hmin, self.tcline)
self.Vtrans = 1
if self.vtransform==1:
if (self.tcline > self.hmin):
warnings.warn('Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1.' %(self.Tcline,self.hmin))
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
lev = np.arange(1,self.N+1,1)
ds = 1.0 / self.N
self.s_rho = -self.c1 + (lev - self.p5) * ds
def _get_s_w(self):
lev = np.arange(0,self.Np,1)
ds = 1.0 / (self.Np-1)
self.s_w = -self.c1 + lev * ds
def _get_Cs_r(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_rho) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_rho + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_r = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_w) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_w + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_w = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_w = self.s_w
class s_coordinate_2(s_coordinate):
"""
<NAME> (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=2).
return an object that can be indexed to return depths
s = s_coordinate_2(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, tcline, N, vtransform, vstretching, zeta=None):
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.tcline = tcline
self.N = int(N)
self.Np = self.N+1
self.vtransform = vtransform
self.vstretching = vstretching
self.hc = self.tcline
self.Vtrans = 2
self.Aweight = 1.0
self.Bweight = 1.0
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_2, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_2, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_rho + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_rho + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_rho + self.c1)**self.Bweight))
self.Cs_r = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_r = Csur
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_w + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_w + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_w + self.c1)**self.Bweight))
self.Cs_w = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_w = Csur
else:
self.Cs_w = self.s_w
class s_coordinate_4(s_coordinate):
"""
<NAME> (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=4).
return an object that can be indexed to return depths
s = s_coordinate_4(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, tcline, N, vtransform, vstretching, zeta=None):
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.tcline = tcline
self.N = int(N)
self.Np = self.N+1
self.vtransform = vtransform
self.vstretching = vstretching
self.hc = self.tcline
self.Vtrans = 4
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_4, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_4, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_rho**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
(self.c1 - np.exp(-self.theta_b))
self.Cs_r = Cbot
else:
self.Cs_r = Csur
def _get_Cs_w(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_w**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
( self.c1 - np.exp(-self.theta_b) )
self.Cs_w = Cbot
else:
self.Cs_w = Csur
class z_r(object):
"""
return an object that can be indexed to return depths of rho point
z_r = z_r(h, hc, N, s_rho, Cs_r, zeta, Vtrans)
"""
def __init__(self, h, hc, N, s_rho, Cs_r, zeta, Vtrans):
self.h = h
self.hc = hc
self.N = N
self.s_rho = s_rho
self.Cs_r = Cs_r
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_r = np.empty((ti, self.N) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.N):
z0 = self.hc * self.s_rho[k] + (self.h - self.hc) * self.Cs_r[k]
z_r[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4:
for n in range(ti):
for k in range(self.N):
z0 = (self.hc * self.s_rho[k] + self.h * self.Cs_r[k]) / \
(self.hc + self.h)
z_r[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_r[res_index])
class z_w(object):
"""
return an object that can be indexed to return depths of w point
z_w = z_w(h, hc, Np, s_w, Cs_w, zeta, Vtrans)
"""
def __init__(self, h, hc, Np, s_w, Cs_w, zeta, Vtrans):
self.h = h
self.hc = hc
self.Np = Np
self.s_w = s_w
self.Cs_w = Cs_w
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_w = np.empty((ti, self.Np) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.Np):
z0 = self.hc * self.s_w[k] + (self.h - self.hc) * self.Cs_w[k]
z_w[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4:
for n in range(ti):
for k in range(self.Np):
z0 = (self.hc * self.s_w[k] + self.h * self.Cs_w[k]) / \
(self.hc + self.h)
z_w[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_w[res_index])
def get_z_levels(self):
"""
Get a list of all the variables contained in netCDF file "filename"
"""
self.z_r=-self.h
if len(self.z_r)==0:
print(("No depth matrix found in file %s"%(self.selffilename)))
|
[
"numpy.tanh",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"datetime.datetime",
"numpy.arange",
"numpy.exp",
"numpy.squeeze",
"warnings.warn",
"numpy.cosh",
"numpy.sinh"
] |
[((131, 152), 'datetime.datetime', 'datetime', (['(2008)', '(8)', '(15)'], {}), '(2008, 8, 15)\n', (139, 152), False, 'from datetime import datetime\n'), ((168, 189), 'datetime.datetime', 'datetime', (['(2015)', '(7)', '(25)'], {}), '(2015, 7, 25)\n', (176, 189), False, 'from datetime import datetime\n'), ((1891, 1904), 'numpy.asarray', 'np.asarray', (['h'], {}), '(h)\n', (1901, 1904), True, 'import numpy as np\n'), ((3091, 3118), 'numpy.arange', 'np.arange', (['(1)', '(self.N + 1)', '(1)'], {}), '(1, self.N + 1, 1)\n', (3100, 3118), True, 'import numpy as np\n'), ((3233, 3257), 'numpy.arange', 'np.arange', (['(0)', 'self.Np', '(1)'], {}), '(0, self.Np, 1)\n', (3242, 3257), True, 'import numpy as np\n'), ((4563, 4576), 'numpy.asarray', 'np.asarray', (['h'], {}), '(h)\n', (4573, 4576), True, 'import numpy as np\n'), ((7425, 7438), 'numpy.asarray', 'np.asarray', (['h'], {}), '(h)\n', (7435, 7438), True, 'import numpy as np\n'), ((10228, 10270), 'numpy.empty', 'np.empty', (['((ti, self.N) + self.h.shape)', '"""d"""'], {}), "((ti, self.N) + self.h.shape, 'd')\n", (10236, 10270), True, 'import numpy as np\n'), ((10863, 10889), 'numpy.squeeze', 'np.squeeze', (['z_r[res_index]'], {}), '(z_r[res_index])\n', (10873, 10889), True, 'import numpy as np\n'), ((11836, 11879), 'numpy.empty', 'np.empty', (['((ti, self.Np) + self.h.shape)', '"""d"""'], {}), "((ti, self.Np) + self.h.shape, 'd')\n", (11844, 11879), True, 'import numpy as np\n'), ((12470, 12496), 'numpy.squeeze', 'np.squeeze', (['z_w[res_index]'], {}), '(z_w[res_index])\n', (12480, 12496), True, 'import numpy as np\n'), ((2689, 2706), 'numpy.zeros', 'np.zeros', (['h.shape'], {}), '(h.shape)\n', (2697, 2706), True, 'import numpy as np\n'), ((5051, 5068), 'numpy.zeros', 'np.zeros', (['h.shape'], {}), '(h.shape)\n', (5059, 5068), True, 'import numpy as np\n'), ((7858, 7875), 'numpy.zeros', 'np.zeros', (['h.shape'], {}), '(h.shape)\n', (7866, 7875), True, 'import numpy as np\n'), ((2312, 2578), 'warnings.warn', 'warnings.warn', (['("""Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1."""\n % (self.Tcline, self.hmin))'], {}), '(\n """Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1."""\n % (self.Tcline, self.hmin))\n', (2325, 2578), False, 'import warnings\n'), ((3405, 3439), 'numpy.sinh', 'np.sinh', (['(self.theta_s * self.s_rho)'], {}), '(self.theta_s * self.s_rho)\n', (3412, 3439), True, 'import numpy as np\n'), ((3442, 3463), 'numpy.sinh', 'np.sinh', (['self.theta_s'], {}), '(self.theta_s)\n', (3449, 3463), True, 'import numpy as np\n'), ((3822, 3854), 'numpy.sinh', 'np.sinh', (['(self.theta_s * self.s_w)'], {}), '(self.theta_s * self.s_w)\n', (3829, 3854), True, 'import numpy as np\n'), ((3857, 3878), 'numpy.sinh', 'np.sinh', (['self.theta_s'], {}), '(self.theta_s)\n', (3864, 3878), True, 'import numpy as np\n'), ((3485, 3531), 'numpy.tanh', 'np.tanh', (['(self.theta_s * (self.s_rho + self.p5))'], {}), '(self.theta_s * (self.s_rho + self.p5))\n', (3492, 3531), True, 'import numpy as np\n'), ((3900, 3944), 'numpy.tanh', 'np.tanh', (['(self.theta_s * (self.s_w + self.p5))'], {}), '(self.theta_s * (self.s_w + self.p5))\n', (3907, 3944), True, 'import numpy as np\n'), ((5640, 5674), 'numpy.cosh', 'np.cosh', (['(self.theta_s * self.s_rho)'], {}), '(self.theta_s * self.s_rho)\n', (5647, 5674), True, 'import numpy as np\n'), ((5702, 5723), 'numpy.cosh', 'np.cosh', (['self.theta_s'], {}), '(self.theta_s)\n', (5709, 5723), True, 'import numpy as np\n'), ((6381, 6413), 'numpy.cosh', 'np.cosh', (['(self.theta_s * self.s_w)'], {}), '(self.theta_s * self.s_w)\n', (6388, 6413), True, 'import numpy as np\n'), ((6441, 6462), 'numpy.cosh', 'np.cosh', (['self.theta_s'], {}), '(self.theta_s)\n', (6448, 6462), True, 'import numpy as np\n'), ((8454, 8488), 'numpy.cosh', 'np.cosh', (['(self.theta_s * self.s_rho)'], {}), '(self.theta_s * self.s_rho)\n', (8461, 8488), True, 'import numpy as np\n'), ((8516, 8537), 'numpy.cosh', 'np.cosh', (['self.theta_s'], {}), '(self.theta_s)\n', (8523, 8537), True, 'import numpy as np\n'), ((8648, 8675), 'numpy.exp', 'np.exp', (['(self.theta_b * Csur)'], {}), '(self.theta_b * Csur)\n', (8654, 8675), True, 'import numpy as np\n'), ((8722, 8743), 'numpy.exp', 'np.exp', (['(-self.theta_b)'], {}), '(-self.theta_b)\n', (8728, 8743), True, 'import numpy as np\n'), ((8913, 8945), 'numpy.cosh', 'np.cosh', (['(self.theta_s * self.s_w)'], {}), '(self.theta_s * self.s_w)\n', (8920, 8945), True, 'import numpy as np\n'), ((8973, 8994), 'numpy.cosh', 'np.cosh', (['self.theta_s'], {}), '(self.theta_s)\n', (8980, 8994), True, 'import numpy as np\n'), ((9103, 9130), 'numpy.exp', 'np.exp', (['(self.theta_b * Csur)'], {}), '(self.theta_b * Csur)\n', (9109, 9130), True, 'import numpy as np\n'), ((9178, 9199), 'numpy.exp', 'np.exp', (['(-self.theta_b)'], {}), '(-self.theta_b)\n', (9184, 9199), True, 'import numpy as np\n'), ((3569, 3600), 'numpy.tanh', 'np.tanh', (['(self.p5 * self.theta_s)'], {}), '(self.p5 * self.theta_s)\n', (3576, 3600), True, 'import numpy as np\n'), ((3982, 4013), 'numpy.tanh', 'np.tanh', (['(self.p5 * self.theta_s)'], {}), '(self.p5 * self.theta_s)\n', (3989, 4013), True, 'import numpy as np\n'), ((5794, 5840), 'numpy.sinh', 'np.sinh', (['(self.theta_b * (self.s_rho + self.c1))'], {}), '(self.theta_b * (self.s_rho + self.c1))\n', (5801, 5840), True, 'import numpy as np\n'), ((5868, 5889), 'numpy.sinh', 'np.sinh', (['self.theta_b'], {}), '(self.theta_b)\n', (5875, 5889), True, 'import numpy as np\n'), ((6533, 6577), 'numpy.sinh', 'np.sinh', (['(self.theta_b * (self.s_w + self.c1))'], {}), '(self.theta_b * (self.s_w + self.c1))\n', (6540, 6577), True, 'import numpy as np\n'), ((6605, 6626), 'numpy.sinh', 'np.sinh', (['self.theta_b'], {}), '(self.theta_b)\n', (6612, 6626), True, 'import numpy as np\n')]
|
import sys
import pytest
import numpy as np
from numpy.testing import assert_array_equal, IS_PYPY
class TestDLPack:
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_dunder_dlpack_refcount(self):
x = np.arange(5)
y = x.__dlpack__()
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
def test_dunder_dlpack_stream(self):
x = np.arange(5)
x.__dlpack__(stream=None)
with pytest.raises(RuntimeError):
x.__dlpack__(stream=1)
def test_strides_not_multiple_of_itemsize(self):
dt = np.dtype([('int', np.int32), ('char', np.int8)])
y = np.zeros((5,), dtype=dt)
z = y['int']
with pytest.raises(RuntimeError):
np._from_dlpack(z)
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_from_dlpack_refcount(self):
x = np.arange(5)
y = np._from_dlpack(x)
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
@pytest.mark.parametrize("dtype", [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.complex64, np.complex128
])
def test_dtype_passthrough(self, dtype):
x = np.arange(5, dtype=dtype)
y = np._from_dlpack(x)
assert y.dtype == x.dtype
assert_array_equal(x, y)
def test_invalid_dtype(self):
x = np.asarray(np.datetime64('2021-05-27'))
with pytest.raises(TypeError):
np._from_dlpack(x)
def test_invalid_byte_swapping(self):
dt = np.dtype('=i8').newbyteorder()
x = np.arange(5, dtype=dt)
with pytest.raises(TypeError):
np._from_dlpack(x)
def test_non_contiguous(self):
x = np.arange(25).reshape((5, 5))
y1 = x[0]
assert_array_equal(y1, np._from_dlpack(y1))
y2 = x[:, 0]
assert_array_equal(y2, np._from_dlpack(y2))
y3 = x[1, :]
assert_array_equal(y3, np._from_dlpack(y3))
y4 = x[1]
assert_array_equal(y4, np._from_dlpack(y4))
y5 = np.diagonal(x).copy()
assert_array_equal(y5, np._from_dlpack(y5))
@pytest.mark.parametrize("ndim", range(33))
def test_higher_dims(self, ndim):
shape = (1,) * ndim
x = np.zeros(shape, dtype=np.float64)
assert shape == np._from_dlpack(x).shape
def test_dlpack_device(self):
x = np.arange(5)
assert x.__dlpack_device__() == (1, 0)
y = np._from_dlpack(x)
assert y.__dlpack_device__() == (1, 0)
z = y[::2]
assert z.__dlpack_device__() == (1, 0)
def dlpack_deleter_exception(self):
x = np.arange(5)
_ = x.__dlpack__()
raise RuntimeError
def test_dlpack_destructor_exception(self):
with pytest.raises(RuntimeError):
self.dlpack_deleter_exception()
def test_readonly(self):
x = np.arange(5)
x.flags.writeable = False
with pytest.raises(TypeError):
x.__dlpack__()
def test_ndim0(self):
x = np.array(1.0)
y = np._from_dlpack(x)
assert_array_equal(x, y)
def test_size1dims_arrays(self):
x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
buffer=np.ones(1000, dtype=np.uint8), order='F')
y = np._from_dlpack(x)
assert_array_equal(x, y)
|
[
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"numpy.dtype",
"numpy.zeros",
"numpy.ones",
"sys.getrefcount",
"pytest.raises",
"pytest.mark.skipif",
"numpy.arange",
"numpy._from_dlpack",
"numpy.array",
"pytest.mark.parametrize",
"numpy.diagonal"
] |
[((124, 187), 'pytest.mark.skipif', 'pytest.mark.skipif', (['IS_PYPY'], {'reason': '"""PyPy can\'t get refcounts."""'}), '(IS_PYPY, reason="PyPy can\'t get refcounts.")\n', (142, 187), False, 'import pytest\n'), ((808, 871), 'pytest.mark.skipif', 'pytest.mark.skipif', (['IS_PYPY'], {'reason': '"""PyPy can\'t get refcounts."""'}), '(IS_PYPY, reason="PyPy can\'t get refcounts.")\n', (826, 871), False, 'import pytest\n'), ((1067, 1258), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.\n uint64, np.float16, np.float32, np.float64, np.complex64, np.complex128]'], {}), "('dtype', [np.int8, np.int16, np.int32, np.int64, np\n .uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.\n float64, np.complex64, np.complex128])\n", (1090, 1258), False, 'import pytest\n'), ((243, 255), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (252, 255), True, 'import numpy as np\n'), ((429, 441), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (438, 441), True, 'import numpy as np\n'), ((621, 669), 'numpy.dtype', 'np.dtype', (["[('int', np.int32), ('char', np.int8)]"], {}), "([('int', np.int32), ('char', np.int8)])\n", (629, 669), True, 'import numpy as np\n'), ((682, 706), 'numpy.zeros', 'np.zeros', (['(5,)'], {'dtype': 'dt'}), '((5,), dtype=dt)\n', (690, 706), True, 'import numpy as np\n'), ((925, 937), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (934, 937), True, 'import numpy as np\n'), ((950, 968), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (965, 968), True, 'import numpy as np\n'), ((1344, 1369), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'dtype'}), '(5, dtype=dtype)\n', (1353, 1369), True, 'import numpy as np\n'), ((1382, 1400), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (1397, 1400), True, 'import numpy as np\n'), ((1444, 1468), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (1462, 1468), False, 'from numpy.testing import assert_array_equal, IS_PYPY\n'), ((1726, 1748), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'dt'}), '(5, dtype=dt)\n', (1735, 1748), True, 'import numpy as np\n'), ((2403, 2436), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float64'}), '(shape, dtype=np.float64)\n', (2411, 2436), True, 'import numpy as np\n'), ((2534, 2546), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2543, 2546), True, 'import numpy as np\n'), ((2606, 2624), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (2621, 2624), True, 'import numpy as np\n'), ((2791, 2803), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2800, 2803), True, 'import numpy as np\n'), ((3039, 3051), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (3048, 3051), True, 'import numpy as np\n'), ((3191, 3204), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (3199, 3204), True, 'import numpy as np\n'), ((3217, 3235), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (3232, 3235), True, 'import numpy as np\n'), ((3244, 3268), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (3262, 3268), False, 'from numpy.testing import assert_array_equal, IS_PYPY\n'), ((3464, 3482), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (3479, 3482), True, 'import numpy as np\n'), ((3491, 3515), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (3509, 3515), False, 'from numpy.testing import assert_array_equal, IS_PYPY\n'), ((298, 316), 'sys.getrefcount', 'sys.getrefcount', (['x'], {}), '(x)\n', (313, 316), False, 'import sys\n'), ((351, 369), 'sys.getrefcount', 'sys.getrefcount', (['x'], {}), '(x)\n', (366, 369), False, 'import sys\n'), ((490, 517), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (503, 517), False, 'import pytest\n'), ((742, 769), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (755, 769), False, 'import pytest\n'), ((783, 801), 'numpy._from_dlpack', 'np._from_dlpack', (['z'], {}), '(z)\n', (798, 801), True, 'import numpy as np\n'), ((984, 1002), 'sys.getrefcount', 'sys.getrefcount', (['x'], {}), '(x)\n', (999, 1002), False, 'import sys\n'), ((1037, 1055), 'sys.getrefcount', 'sys.getrefcount', (['x'], {}), '(x)\n', (1052, 1055), False, 'import sys\n'), ((1527, 1554), 'numpy.datetime64', 'np.datetime64', (['"""2021-05-27"""'], {}), "('2021-05-27')\n", (1540, 1554), True, 'import numpy as np\n'), ((1570, 1594), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1583, 1594), False, 'import pytest\n'), ((1608, 1626), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (1623, 1626), True, 'import numpy as np\n'), ((1763, 1787), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1776, 1787), False, 'import pytest\n'), ((1801, 1819), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (1816, 1819), True, 'import numpy as np\n'), ((1948, 1967), 'numpy._from_dlpack', 'np._from_dlpack', (['y1'], {}), '(y1)\n', (1963, 1967), True, 'import numpy as np\n'), ((2022, 2041), 'numpy._from_dlpack', 'np._from_dlpack', (['y2'], {}), '(y2)\n', (2037, 2041), True, 'import numpy as np\n'), ((2096, 2115), 'numpy._from_dlpack', 'np._from_dlpack', (['y3'], {}), '(y3)\n', (2111, 2115), True, 'import numpy as np\n'), ((2167, 2186), 'numpy._from_dlpack', 'np._from_dlpack', (['y4'], {}), '(y4)\n', (2182, 2186), True, 'import numpy as np\n'), ((2255, 2274), 'numpy._from_dlpack', 'np._from_dlpack', (['y5'], {}), '(y5)\n', (2270, 2274), True, 'import numpy as np\n'), ((2924, 2951), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2937, 2951), False, 'import pytest\n'), ((3099, 3123), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3112, 3123), False, 'import pytest\n'), ((1683, 1698), 'numpy.dtype', 'np.dtype', (['"""=i8"""'], {}), "('=i8')\n", (1691, 1698), True, 'import numpy as np\n'), ((1868, 1881), 'numpy.arange', 'np.arange', (['(25)'], {}), '(25)\n', (1877, 1881), True, 'import numpy as np\n'), ((2202, 2216), 'numpy.diagonal', 'np.diagonal', (['x'], {}), '(x)\n', (2213, 2216), True, 'import numpy as np\n'), ((2462, 2480), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (2477, 2480), True, 'import numpy as np\n'), ((3410, 3439), 'numpy.ones', 'np.ones', (['(1000)'], {'dtype': 'np.uint8'}), '(1000, dtype=np.uint8)\n', (3417, 3439), True, 'import numpy as np\n')]
|
# Copyright 2012 <NAME>
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# IGMP v1/v2
#
# 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Ver * | Type | MRT/Unused ** | Checksum |
# +-------+-------+---------------+-------------------------------+
# | Group Address |
# +-------------------------------+-------------------------------+
#
# * In v2, there is no Version field, and Type is the whole 8 bits
# ** Max Response Time in v2 only
#
#======================================================================
#TODO: Support for IGMP v3
import struct
from packet_utils import *
from packet_base import packet_base
from pox.lib.addresses import *
MEMBERSHIP_QUERY = 0x11
MEMBERSHIP_REPORT = 0x12
MEMBERSHIP_REPORT_V2 = 0x16
LEAVE_GROUP_V2 = 0x17
# IGMP multicast address
IGMP_ADDRESS = IPAddr("172.16.31.10")
# IGMP IP protocol
IGMP_PROTOCOL = 2
class igmp (packet_base):
"""
IGMP Message
"""
MIN_LEN = 8
IGMP_ADDRESS = IGMP_ADDRESS
IGMP_PROTOCOL = IGMP_PROTOCOL
MEMBERSHIP_QUERY = MEMBERSHIP_QUERY
MEMBERSHIP_REPORT = MEMBERSHIP_REPORT
MEMBERSHIP_REPORT_V2 = MEMBERSHIP_REPORT_V2
LEAVE_GROUP_V2 = LEAVE_GROUP_V2
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.ver_and_type = 0
self.max_response_time = 0
self.csum = 0
self.address = None
self.extra = b''
if raw is not None:
self.parse(raw)
self._init(kw)
def hdr (self, payload):
s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time,
0, self.address.toSigned(networkOrder=False))
s += self.extra
self.csum = checksum(s)
s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time,
self.csum, self.address.toSigned(networkOrder=False))
s += self.extra
return s
def parse (self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('packet data too short to parse')
return None
self.ver_and_type, self.max_response_time, self.csum, ip = \
struct.unpack("!BBHi", raw[:self.MIN_LEN])
self.extra = raw[self.MIN_LEN:]
self.address = IPAddr(ip, networkOrder = False)
s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time,
0, self.address.toSigned(networkOrder=False))
s += self.extra
csum = checksum(s)
if csum != self.csum:
self.err("IGMP hecksums don't match")
else:
self.parsed = True
def __str__ (self):
s = "[IGMP "
s += "vt:%02x %s" % (self.ver_and_type, self.address)
return s + "]"
|
[
"packet_base.packet_base.__init__",
"struct.unpack"
] |
[((2286, 2312), 'packet_base.packet_base.__init__', 'packet_base.__init__', (['self'], {}), '(self)\n', (2306, 2312), False, 'from packet_base import packet_base\n'), ((3184, 3226), 'struct.unpack', 'struct.unpack', (['"""!BBHi"""', 'raw[:self.MIN_LEN]'], {}), "('!BBHi', raw[:self.MIN_LEN])\n", (3197, 3226), False, 'import struct\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python objects for modeling Consumer Price Index (CPI) data structures.
"""
import collections
from datetime import date
from pandas import json_normalize
# CPI tools
from .errors import CPIObjectDoesNotExist
from .defaults import DEFAULTS_SERIES_ATTRS
# Logging
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class MappingList(list):
"""
A custom list that allows for lookups by attribute.
"""
def __init__(self):
self._id_dict = {}
self._name_dict = {}
def get_by_id(self, value):
try:
return self._id_dict[value]
except KeyError:
raise CPIObjectDoesNotExist("Object with id {} could not be found".format(value))
def get_by_name(self, value):
try:
return self._name_dict[value]
except KeyError:
raise CPIObjectDoesNotExist("Object with id {} could not be found".format(value))
def append(self, item):
"""
Override to default append method that allows dictionary-style lookups
"""
# Add to dictionary lookup
self._id_dict[item.id] = item
self._name_dict[item.name] = item
# Append to list
super(MappingList, self).append(item)
class SeriesList(list):
"""
A custom list of indexes in a series.
"""
SURVEYS = {
'All urban consumers': 'CU',
'Urban wage earners and clerical workers': 'CW'
}
SEASONALITIES = {
True: 'S',
False: 'U'
}
def __init__(self, periodicities, areas, items):
self.periodicities = periodicities
self.areas = areas
self.items = items
self._dict = {}
def to_dataframe(self):
"""
Returns the list as a pandas DataFrame.
"""
dict_list = [obj.__dict__() for obj in self]
return json_normalize(dict_list, sep="_")
def append(self, item):
"""
Override to default append method that allows validation and dictionary-style lookups
"""
# Valid item type
if not isinstance(item, Series):
raise TypeError("Only Series objects can be added to this list.")
# Add to dictionary lookup
self._dict[item.id] = item
# Append to list
super(SeriesList, self).append(item)
def get_by_id(self, value):
"""
Returns the CPI series object with the provided identifier code.
"""
logger.debug("Retrieving series with id {}".format(value))
try:
return self._dict[value]
except KeyError:
raise CPIObjectDoesNotExist("Object with id {} could not be found".format(value))
def get(
self,
survey=DEFAULTS_SERIES_ATTRS['survey'],
seasonally_adjusted=DEFAULTS_SERIES_ATTRS['seasonally_adjusted'],
periodicity=DEFAULTS_SERIES_ATTRS['periodicity'],
area=DEFAULTS_SERIES_ATTRS['area'],
items=DEFAULTS_SERIES_ATTRS['items']
):
"""
Returns a single CPI Series object based on the input.
The default series is returned if not configuration is made to the keyword arguments.
"""
# Get all the codes for these humanized input.
try:
survey_code = self.SURVEYS[survey]
except KeyError:
raise CPIObjectDoesNotExist("Survey with the name {} does not exist".format(survey))
try:
seasonality_code = self.SEASONALITIES[seasonally_adjusted]
except KeyError:
raise CPIObjectDoesNotExist("Seasonality {} does not exist".format(seasonally_adjusted))
# Generate the series id
series_id = "{}{}{}{}{}".format(
survey_code,
seasonality_code,
self.periodicities.get_by_name(periodicity).code,
self.areas.get_by_name(area).code,
self.items.get_by_name(items).code
)
# Pull the series
return self.get_by_id(series_id)
class BaseObject(object):
"""
An abstract base class for all the models.
"""
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.__str__())
def __eq__(self, other):
return self.id == other.id
def __str__(self):
return self.name
class Area(BaseObject):
"""
A geographical area where prices are gathered monthly.
"""
def __init__(self, code, name):
self.id = code
self.code = code
self.name = name
def __dict__(self):
return {
"id": self.id,
"code": self.code,
"name": self.name
}
class Item(BaseObject):
"""
A consumer item that has its price tracked.
"""
def __init__(self, code, name):
self.id = code
self.code = code
self.name = name
def __dict__(self):
return {
"id": self.id,
"code": self.code,
"name": self.name
}
class Period(BaseObject):
"""
A time period tracked by the CPI.
"""
def __init__(self, code, abbreviation, name):
self.id = code
self.code = code
self.abbreviation = abbreviation
self.name = name
def __dict__(self):
return {
"id": self.id,
"code": self.code,
"abbreviation": self.abbreviation,
"name": self.name,
"month": self.month,
"type": self.type
}
@property
def month(self):
"""
Returns the month integer for the period.
"""
if self.id in ["M13", "S01", "S03"]:
return 1
elif self.id == "S02":
return 7
else:
return int(self.id.replace("M", ""))
@property
def type(self):
"""
Returns a string classifying the period.
"""
if self.id in ["M13", "S03"]:
return "annual"
elif self.id in ["S01", "S02"]:
return "semiannual"
else:
return "monthly"
class Periodicity(BaseObject):
"""
A time interval tracked by the CPI.
"""
def __init__(self, code, name):
self.id = code
self.code = code
self.name = name
def __dict__(self):
return {
"id": self.id,
"code": self.code,
"name": self.name
}
class Series(BaseObject):
"""
A set of CPI data observed over an extended period of time over consistent time intervals ranging from
a specific consumer item in a specific geographical area whose price is gathered monthly to a category
of worker in a specific industry whose employment rate is being recorded monthly, etc.
Yes, that's the offical government definition. I'm not kidding.
"""
def __init__(
self,
id,
title,
survey,
seasonally_adjusted,
periodicity,
area,
items
):
self.id = id
self.title = title
self.survey = survey
self.seasonally_adjusted = seasonally_adjusted
self.periodicity = periodicity
self.area = area
self.items = items
self._indexes = {
'annual': collections.OrderedDict(),
'monthly': collections.OrderedDict(),
'semiannual': collections.OrderedDict(),
}
def __str__(self):
return "{}: {}".format(self.id, self.title)
def __dict__(self):
return {
"id": self.id,
"title": self.title,
"survey": self.survey,
"seasonally_adjusted": self.seasonally_adjusted,
"periodicity": self.periodicity.__dict__(),
"area": self.area.__dict__(),
"items": self.items.__dict__()
}
def to_dataframe(self):
"""
Returns this series and all its indexes as a pandas DataFrame.
"""
dict_list = [obj.__dict__() for obj in self.indexes]
return json_normalize(dict_list, sep="_")
@property
def indexes(self):
flat = []
for obj in self._indexes.values():
flat.extend(obj.values())
return flat
@property
def latest_month(self):
if not self._indexes['monthly']:
return None
return max([i.date for i in self._indexes['monthly'].values()])
@property
def latest_year(self):
if not self._indexes['annual']:
return None
return max([i.year for i in self._indexes['annual'].values()])
def get_index_by_date(self, date, period_type='annual'):
try:
return self._indexes[period_type][date]
except KeyError:
raise CPIObjectDoesNotExist("Index of {} type for {} does not exist".format(period_type, date))
class Index(BaseObject):
"""
A Consumer Price Index value generated by the Bureau of Labor Statistics.
"""
def __init__(self, series, year, period, value):
self.series = series
self.year = year
self.period = period
self.value = value
def __str__(self):
return "{} ({}): {}".format(self.date, self.period, self.value)
def __eq__(self, other):
return (
self.value == other.value and
self.series == other.series and
self.year == other.year and
self.period == other.period
)
def __dict__(self):
return {
"series": self.series.__dict__(),
"year": self.year,
"date": str(self.date),
"period": self.period.__dict__(),
"value": self.value
}
@property
def date(self):
"""
Accepts a row from the raw BLS data. Returns a Python date object based on its period.
"""
return date(self.year, self.period.month, 1)
|
[
"pandas.json_normalize",
"datetime.date",
"logging.NullHandler",
"collections.OrderedDict",
"logging.getLogger"
] |
[((340, 367), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (357, 367), False, 'import logging\n'), ((386, 407), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (405, 407), False, 'import logging\n'), ((1930, 1964), 'pandas.json_normalize', 'json_normalize', (['dict_list'], {'sep': '"""_"""'}), "(dict_list, sep='_')\n", (1944, 1964), False, 'from pandas import json_normalize\n'), ((8084, 8118), 'pandas.json_normalize', 'json_normalize', (['dict_list'], {'sep': '"""_"""'}), "(dict_list, sep='_')\n", (8098, 8118), False, 'from pandas import json_normalize\n'), ((9908, 9945), 'datetime.date', 'date', (['self.year', 'self.period.month', '(1)'], {}), '(self.year, self.period.month, 1)\n', (9912, 9945), False, 'from datetime import date\n'), ((7319, 7344), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (7342, 7344), False, 'import collections\n'), ((7369, 7394), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (7392, 7394), False, 'import collections\n'), ((7422, 7447), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (7445, 7447), False, 'import collections\n')]
|
import logging
from monai.apps.deepgrow.interaction import Interaction
from monai.apps.deepgrow.transforms import (
AddGuidanceSignald,
AddInitialSeedPointd,
AddRandomGuidanced,
FindAllValidSlicesd,
FindDiscrepancyRegionsd,
SpatialCropForegroundd,
)
from monai.inferers import SimpleInferer
from monai.losses import DiceLoss
from monai.transforms import (
Activationsd,
AddChanneld,
AsChannelFirstd,
AsDiscreted,
Compose,
LoadImaged,
NormalizeIntensityd,
Orientationd,
Resized,
Spacingd,
ToNumpyd,
ToTensord,
)
from monailabel.utils.train.basic_train import BasicTrainTask
from .handler import DeepgrowStatsHandler
from .transforms import Random2DSlice
logger = logging.getLogger(__name__)
class TrainDeepgrow(BasicTrainTask):
def __init__(
self,
output_dir,
train_datalist,
val_datalist,
network,
dimension,
roi_size,
model_size,
max_train_interactions,
max_val_interactions,
**kwargs,
):
super().__init__(output_dir, train_datalist, val_datalist, network, **kwargs)
self.dimension = dimension
self.roi_size = roi_size
self.model_size = model_size
self.max_train_interactions = max_train_interactions
self.max_val_interactions = max_val_interactions
def get_click_transforms(self):
return Compose(
[
Activationsd(keys="pred", sigmoid=True),
ToNumpyd(keys=("image", "label", "pred", "probability", "guidance")),
FindDiscrepancyRegionsd(label="label", pred="pred", discrepancy="discrepancy"),
AddRandomGuidanced(guidance="guidance", discrepancy="discrepancy", probability="probability"),
AddGuidanceSignald(image="image", guidance="guidance", batched=True),
ToTensord(keys=("image", "label")),
]
)
def loss_function(self):
return DiceLoss(sigmoid=True, squared_pred=True)
def train_pre_transforms(self):
# Dataset preparation
t = [
LoadImaged(keys=("image", "label")),
AsChannelFirstd(keys=("image", "label")),
Spacingd(keys=("image", "label"), pixdim=(1.0, 1.0, 1.0), mode=("bilinear", "nearest")),
Orientationd(keys=("image", "label"), axcodes="RAS"),
]
# Pick random slice (run more epochs to cover max slices for 2D training)
if self.dimension == 2:
t.append(Random2DSlice(image="image", label="label"))
# Training
t.extend(
[
AddChanneld(keys=("image", "label")),
SpatialCropForegroundd(keys=("image", "label"), source_key="label", spatial_size=self.roi_size),
Resized(keys=("image", "label"), spatial_size=self.model_size, mode=("area", "nearest")),
NormalizeIntensityd(keys="image"),
]
)
if self.dimension == 3:
t.append(FindAllValidSlicesd(label="label", sids="sids"))
t.extend(
[
AddInitialSeedPointd(label="label", guidance="guidance", sids="sids"),
AddGuidanceSignald(image="image", guidance="guidance"),
ToTensord(keys=("image", "label")),
]
)
return Compose(t)
def train_post_transforms(self):
return Compose(
[
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold_values=True, logit_thresh=0.5),
]
)
def train_handlers(self):
handlers = super().train_handlers()
handlers.append(DeepgrowStatsHandler(log_dir=self.output_dir, tag_name="val_dice", image_interval=1))
return handlers
def val_pre_transforms(self):
return self.train_pre_transforms()
def val_inferer(self):
return SimpleInferer()
def train_iteration_update(self):
return Interaction(
transforms=self.get_click_transforms(),
max_interactions=self.max_train_interactions,
key_probability="probability",
train=True,
)
def val_iteration_update(self):
return Interaction(
transforms=self.get_click_transforms(),
max_interactions=self.max_val_interactions,
key_probability="probability",
train=False,
)
|
[
"monai.transforms.AddChanneld",
"monai.transforms.ToNumpyd",
"monai.apps.deepgrow.transforms.AddGuidanceSignald",
"monai.transforms.LoadImaged",
"monai.transforms.AsChannelFirstd",
"monai.transforms.Orientationd",
"monai.inferers.SimpleInferer",
"monai.transforms.NormalizeIntensityd",
"monai.apps.deepgrow.transforms.AddInitialSeedPointd",
"monai.losses.DiceLoss",
"monai.transforms.Activationsd",
"monai.apps.deepgrow.transforms.AddRandomGuidanced",
"monai.apps.deepgrow.transforms.FindAllValidSlicesd",
"monai.transforms.Resized",
"monai.transforms.AsDiscreted",
"monai.transforms.ToTensord",
"monai.transforms.Spacingd",
"monai.transforms.Compose",
"monai.apps.deepgrow.transforms.SpatialCropForegroundd",
"monai.apps.deepgrow.transforms.FindDiscrepancyRegionsd",
"logging.getLogger"
] |
[((738, 765), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (755, 765), False, 'import logging\n'), ((2006, 2047), 'monai.losses.DiceLoss', 'DiceLoss', ([], {'sigmoid': '(True)', 'squared_pred': '(True)'}), '(sigmoid=True, squared_pred=True)\n', (2014, 2047), False, 'from monai.losses import DiceLoss\n'), ((3375, 3385), 'monai.transforms.Compose', 'Compose', (['t'], {}), '(t)\n', (3382, 3385), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((3956, 3971), 'monai.inferers.SimpleInferer', 'SimpleInferer', ([], {}), '()\n', (3969, 3971), False, 'from monai.inferers import SimpleInferer\n'), ((2141, 2176), 'monai.transforms.LoadImaged', 'LoadImaged', ([], {'keys': "('image', 'label')"}), "(keys=('image', 'label'))\n", (2151, 2176), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((2190, 2230), 'monai.transforms.AsChannelFirstd', 'AsChannelFirstd', ([], {'keys': "('image', 'label')"}), "(keys=('image', 'label'))\n", (2205, 2230), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((2244, 2335), 'monai.transforms.Spacingd', 'Spacingd', ([], {'keys': "('image', 'label')", 'pixdim': '(1.0, 1.0, 1.0)', 'mode': "('bilinear', 'nearest')"}), "(keys=('image', 'label'), pixdim=(1.0, 1.0, 1.0), mode=('bilinear',\n 'nearest'))\n", (2252, 2335), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((2345, 2397), 'monai.transforms.Orientationd', 'Orientationd', ([], {'keys': "('image', 'label')", 'axcodes': '"""RAS"""'}), "(keys=('image', 'label'), axcodes='RAS')\n", (2357, 2397), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((1465, 1504), 'monai.transforms.Activationsd', 'Activationsd', ([], {'keys': '"""pred"""', 'sigmoid': '(True)'}), "(keys='pred', sigmoid=True)\n", (1477, 1504), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((1522, 1590), 'monai.transforms.ToNumpyd', 'ToNumpyd', ([], {'keys': "('image', 'label', 'pred', 'probability', 'guidance')"}), "(keys=('image', 'label', 'pred', 'probability', 'guidance'))\n", (1530, 1590), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((1608, 1686), 'monai.apps.deepgrow.transforms.FindDiscrepancyRegionsd', 'FindDiscrepancyRegionsd', ([], {'label': '"""label"""', 'pred': '"""pred"""', 'discrepancy': '"""discrepancy"""'}), "(label='label', pred='pred', discrepancy='discrepancy')\n", (1631, 1686), False, 'from monai.apps.deepgrow.transforms import AddGuidanceSignald, AddInitialSeedPointd, AddRandomGuidanced, FindAllValidSlicesd, FindDiscrepancyRegionsd, SpatialCropForegroundd\n'), ((1704, 1801), 'monai.apps.deepgrow.transforms.AddRandomGuidanced', 'AddRandomGuidanced', ([], {'guidance': '"""guidance"""', 'discrepancy': '"""discrepancy"""', 'probability': '"""probability"""'}), "(guidance='guidance', discrepancy='discrepancy',\n probability='probability')\n", (1722, 1801), False, 'from monai.apps.deepgrow.transforms import AddGuidanceSignald, AddInitialSeedPointd, AddRandomGuidanced, FindAllValidSlicesd, FindDiscrepancyRegionsd, SpatialCropForegroundd\n'), ((1815, 1883), 'monai.apps.deepgrow.transforms.AddGuidanceSignald', 'AddGuidanceSignald', ([], {'image': '"""image"""', 'guidance': '"""guidance"""', 'batched': '(True)'}), "(image='image', guidance='guidance', batched=True)\n", (1833, 1883), False, 'from monai.apps.deepgrow.transforms import AddGuidanceSignald, AddInitialSeedPointd, AddRandomGuidanced, FindAllValidSlicesd, FindDiscrepancyRegionsd, SpatialCropForegroundd\n'), ((1901, 1935), 'monai.transforms.ToTensord', 'ToTensord', ([], {'keys': "('image', 'label')"}), "(keys=('image', 'label'))\n", (1910, 1935), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((2658, 2694), 'monai.transforms.AddChanneld', 'AddChanneld', ([], {'keys': "('image', 'label')"}), "(keys=('image', 'label'))\n", (2669, 2694), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((2712, 2811), 'monai.apps.deepgrow.transforms.SpatialCropForegroundd', 'SpatialCropForegroundd', ([], {'keys': "('image', 'label')", 'source_key': '"""label"""', 'spatial_size': 'self.roi_size'}), "(keys=('image', 'label'), source_key='label',\n spatial_size=self.roi_size)\n", (2734, 2811), False, 'from monai.apps.deepgrow.transforms import AddGuidanceSignald, AddInitialSeedPointd, AddRandomGuidanced, FindAllValidSlicesd, FindDiscrepancyRegionsd, SpatialCropForegroundd\n'), ((2825, 2917), 'monai.transforms.Resized', 'Resized', ([], {'keys': "('image', 'label')", 'spatial_size': 'self.model_size', 'mode': "('area', 'nearest')"}), "(keys=('image', 'label'), spatial_size=self.model_size, mode=('area',\n 'nearest'))\n", (2832, 2917), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((2931, 2964), 'monai.transforms.NormalizeIntensityd', 'NormalizeIntensityd', ([], {'keys': '"""image"""'}), "(keys='image')\n", (2950, 2964), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((3043, 3090), 'monai.apps.deepgrow.transforms.FindAllValidSlicesd', 'FindAllValidSlicesd', ([], {'label': '"""label"""', 'sids': '"""sids"""'}), "(label='label', sids='sids')\n", (3062, 3090), False, 'from monai.apps.deepgrow.transforms import AddGuidanceSignald, AddInitialSeedPointd, AddRandomGuidanced, FindAllValidSlicesd, FindDiscrepancyRegionsd, SpatialCropForegroundd\n'), ((3140, 3209), 'monai.apps.deepgrow.transforms.AddInitialSeedPointd', 'AddInitialSeedPointd', ([], {'label': '"""label"""', 'guidance': '"""guidance"""', 'sids': '"""sids"""'}), "(label='label', guidance='guidance', sids='sids')\n", (3160, 3209), False, 'from monai.apps.deepgrow.transforms import AddGuidanceSignald, AddInitialSeedPointd, AddRandomGuidanced, FindAllValidSlicesd, FindDiscrepancyRegionsd, SpatialCropForegroundd\n'), ((3227, 3281), 'monai.apps.deepgrow.transforms.AddGuidanceSignald', 'AddGuidanceSignald', ([], {'image': '"""image"""', 'guidance': '"""guidance"""'}), "(image='image', guidance='guidance')\n", (3245, 3281), False, 'from monai.apps.deepgrow.transforms import AddGuidanceSignald, AddInitialSeedPointd, AddRandomGuidanced, FindAllValidSlicesd, FindDiscrepancyRegionsd, SpatialCropForegroundd\n'), ((3299, 3333), 'monai.transforms.ToTensord', 'ToTensord', ([], {'keys': "('image', 'label')"}), "(keys=('image', 'label'))\n", (3308, 3333), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((3478, 3517), 'monai.transforms.Activationsd', 'Activationsd', ([], {'keys': '"""pred"""', 'sigmoid': '(True)'}), "(keys='pred', sigmoid=True)\n", (3490, 3517), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n'), ((3535, 3600), 'monai.transforms.AsDiscreted', 'AsDiscreted', ([], {'keys': '"""pred"""', 'threshold_values': '(True)', 'logit_thresh': '(0.5)'}), "(keys='pred', threshold_values=True, logit_thresh=0.5)\n", (3546, 3600), False, 'from monai.transforms import Activationsd, AddChanneld, AsChannelFirstd, AsDiscreted, Compose, LoadImaged, NormalizeIntensityd, Orientationd, Resized, Spacingd, ToNumpyd, ToTensord\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 07:10:55 2020
@author: sj
"""
import numpy as np
# left corner as (0,0), US in West and Asia in East
# only validated in Asia, North and East
#x as latitude (180), y as longitude (360),
# x = 114288 # latitude, filepath
# y = 214078 # longitude, filename
# z = zoom level
# x,y,z = 114288,214078,18
def g2latlng(x,y,z=18,vbs=0):
x,y = x+0.5,y+0.5 # to center
n = np.power(2,z)
lng = y / n * 360.0 - 180.0
lat_rad = np.arctan(np.sinh(np.pi * (1 - 2 * x / n)))
lat = lat_rad * 180.0 / np.pi
if vbs:
print(x,lat,y,lng)
return lat,lng
def to60(lat,lng,vbs=0):
lat0 = np.floor(lat)
tmp = (lat - lat0) * 60
lat1 = np.floor(tmp)
lat2 = (tmp - lat1) * 60
lat = int(lat0),int(lat1),lat2
lng0 = np.floor(lng)
tmp = (lng - lng0) * 60
lng1 = np.floor(tmp)
lng2 = (tmp - lng1) * 60
lng = int(lng0),int(lng1),lng2
if vbs:
print(lat,lng)
return lat,lng
if __name__ == '__main__':
if True:
x,y,z = 114453,213769,18
lat,lng = g2latlng(x,y,z,vbs=1)
to60(lat,lng,vbs=1)
|
[
"numpy.power",
"numpy.floor",
"numpy.sinh"
] |
[((444, 458), 'numpy.power', 'np.power', (['(2)', 'z'], {}), '(2, z)\n', (452, 458), True, 'import numpy as np\n'), ((686, 699), 'numpy.floor', 'np.floor', (['lat'], {}), '(lat)\n', (694, 699), True, 'import numpy as np\n'), ((741, 754), 'numpy.floor', 'np.floor', (['tmp'], {}), '(tmp)\n', (749, 754), True, 'import numpy as np\n'), ((843, 856), 'numpy.floor', 'np.floor', (['lng'], {}), '(lng)\n', (851, 856), True, 'import numpy as np\n'), ((898, 911), 'numpy.floor', 'np.floor', (['tmp'], {}), '(tmp)\n', (906, 911), True, 'import numpy as np\n'), ((516, 548), 'numpy.sinh', 'np.sinh', (['(np.pi * (1 - 2 * x / n))'], {}), '(np.pi * (1 - 2 * x / n))\n', (523, 548), True, 'import numpy as np\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
from airflow.providers.google.cloud.transfers.gdrive_to_gcs import GoogleDriveToGCSOperator
FOLDER_ID = os.environ.get("GCP_GDRIVE_FOLDER_ID", "abcd1234")
DRIVE_ID = os.environ.get("GCP_GDRIVE_DRIVE_ID", "abcd1234")
FILE_NAME = os.environ.get("GCP_GDRIVE_TO_GCS_FILE_NAME", "gdrive_to_gcs_file.txt")
BUCKET = os.environ.get("GCP_GDRIVE_TO_GCS_BUCKET", "gdrive-to-gcs-bucket")
OBJECT = "prefix/test.txt"
GCP_CONN_ID = "google_cloud_default"
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
class TestGoogleDriveToGCSOperator:
@mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_gcs.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_gcs.GoogleDriveHook")
def test_execute(self, mock_gdrive_hook, mock_gcs_hook):
context = {}
op = GoogleDriveToGCSOperator(
task_id="test_task",
folder_id=FOLDER_ID,
file_name=FILE_NAME,
drive_id=DRIVE_ID,
bucket_name=BUCKET,
object_name=OBJECT,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
meta = {"id": "123xyz"}
mock_gdrive_hook.return_value.get_file_id.return_value = meta
op.execute(context)
mock_gdrive_hook.return_value.get_file_id.assert_called_once_with(
folder_id=FOLDER_ID, file_name=FILE_NAME, drive_id=DRIVE_ID
)
mock_gdrive_hook.return_value.download_file.assert_called_once_with(
file_id=meta["id"], file_handle=mock.ANY
)
mock_gcs_hook.return_value.provide_file_and_upload.assert_called_once_with(
bucket_name=BUCKET, object_name=OBJECT
)
assert op.dry_run() is None
|
[
"os.environ.get",
"airflow.providers.google.cloud.transfers.gdrive_to_gcs.GoogleDriveToGCSOperator",
"unittest.mock.patch"
] |
[((928, 978), 'os.environ.get', 'os.environ.get', (['"""GCP_GDRIVE_FOLDER_ID"""', '"""abcd1234"""'], {}), "('GCP_GDRIVE_FOLDER_ID', 'abcd1234')\n", (942, 978), False, 'import os\n'), ((990, 1039), 'os.environ.get', 'os.environ.get', (['"""GCP_GDRIVE_DRIVE_ID"""', '"""abcd1234"""'], {}), "('GCP_GDRIVE_DRIVE_ID', 'abcd1234')\n", (1004, 1039), False, 'import os\n'), ((1052, 1123), 'os.environ.get', 'os.environ.get', (['"""GCP_GDRIVE_TO_GCS_FILE_NAME"""', '"""gdrive_to_gcs_file.txt"""'], {}), "('GCP_GDRIVE_TO_GCS_FILE_NAME', 'gdrive_to_gcs_file.txt')\n", (1066, 1123), False, 'import os\n'), ((1133, 1199), 'os.environ.get', 'os.environ.get', (['"""GCP_GDRIVE_TO_GCS_BUCKET"""', '"""gdrive-to-gcs-bucket"""'], {}), "('GCP_GDRIVE_TO_GCS_BUCKET', 'gdrive-to-gcs-bucket')\n", (1147, 1199), False, 'import os\n'), ((1369, 1445), 'unittest.mock.patch', 'mock.patch', (['"""airflow.providers.google.cloud.transfers.gdrive_to_gcs.GCSHook"""'], {}), "('airflow.providers.google.cloud.transfers.gdrive_to_gcs.GCSHook')\n", (1379, 1445), False, 'from unittest import mock\n'), ((1451, 1540), 'unittest.mock.patch', 'mock.patch', (['"""airflow.providers.google.cloud.transfers.gdrive_to_gcs.GoogleDriveHook"""'], {}), "(\n 'airflow.providers.google.cloud.transfers.gdrive_to_gcs.GoogleDriveHook')\n", (1461, 1540), False, 'from unittest import mock\n'), ((1631, 1852), 'airflow.providers.google.cloud.transfers.gdrive_to_gcs.GoogleDriveToGCSOperator', 'GoogleDriveToGCSOperator', ([], {'task_id': '"""test_task"""', 'folder_id': 'FOLDER_ID', 'file_name': 'FILE_NAME', 'drive_id': 'DRIVE_ID', 'bucket_name': 'BUCKET', 'object_name': 'OBJECT', 'gcp_conn_id': 'GCP_CONN_ID', 'impersonation_chain': 'IMPERSONATION_CHAIN'}), "(task_id='test_task', folder_id=FOLDER_ID,\n file_name=FILE_NAME, drive_id=DRIVE_ID, bucket_name=BUCKET, object_name\n =OBJECT, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)\n", (1655, 1852), False, 'from airflow.providers.google.cloud.transfers.gdrive_to_gcs import GoogleDriveToGCSOperator\n')]
|
from PIL import Image
import numpy as np
import sys, os
from progress_bar import ProgressBar
def get_bit(pos, img):
# avoids modifying thumbnail
size = img.shape[0]*img.shape[1] - 4096
rgb = pos//size
if rgb > 2:
raise IndexError("Position is too large")
pos = pos % size + 4096
x,y = pos // img.shape[1], pos % img.shape[1]
return img[x][y][rgb] & 1
with Image.open(sys.argv[1]) as img:
with open(sys.argv[2], "w+") as out:
arrimg = np.array(img)
pos = 0
cur_char = ''
size_str = ""
while cur_char != "|":
ord_chr = 0
for i in range(8):
bit = get_bit(pos, arrimg)
pos += 1
ord_chr = ord_chr | bit << i
cur_char = chr(ord_chr)
size_str += cur_char
size = int(size_str[:-1])
pb = ProgressBar(size)
pb.begin()
for i in range(size):
ord_chr = 0
for i in range(8):
bit = get_bit(pos, arrimg)
pos += 1
ord_chr = ord_chr | bit << i
out.write(chr(ord_chr))
pb.add_progress()
|
[
"progress_bar.ProgressBar",
"numpy.array",
"PIL.Image.open"
] |
[((395, 418), 'PIL.Image.open', 'Image.open', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (405, 418), False, 'from PIL import Image\n'), ((485, 498), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (493, 498), True, 'import numpy as np\n'), ((874, 891), 'progress_bar.ProgressBar', 'ProgressBar', (['size'], {}), '(size)\n', (885, 891), False, 'from progress_bar import ProgressBar\n')]
|
from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat
class Subject(Parser):
name = ctx
class ThroughSubjects(Parser):
subject = Delegate(Subject, ctx)
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = Delegate(Tag, ctx)
class Organization(Parser):
name = ctx
class Publisher(Parser):
agent = Delegate(Organization, ctx)
class Person(Parser):
given_name = ParseName(ctx).first
family_name = ParseName(ctx).last
class Creator(Parser):
agent = Delegate(Person, ctx)
cited_as = ctx
order_cited = ctx('index')
class WorkIdentifier(Parser):
uri = IRI(ctx)
class Article(Parser):
title = ctx.title
description = Try(ctx.description)
language = ctx.language
date_published = ParseDate(ctx.date)
date_updated = ParseDate(ctx.date)
identifiers = Map(
Delegate(WorkIdentifier),
ctx.doi,
ctx.pdf_url,
ctx.fulltext_html_url,
RunPython(lambda x: 'https://www.ncbi.nlm.nih.gov/pubmed/{}'.format(x) if x else None, Try(ctx.identifiers.pubmed)),
RunPython(lambda x: 'https://www.ncbi.nlm.nih.gov/pmc/articles/{}'.format(x) if x else None, Try(ctx.identifiers.pmc)),
)
subjects = Map(Delegate(ThroughSubjects), Subjects(ctx.subjects))
tags = Map(Delegate(ThroughTags), Try(ctx.keywords), Try(ctx.subjects))
related_agents = Concat(
Map(Delegate(Creator), ctx.author),
Map(Delegate(Publisher), ctx.publisher),
)
class Extra:
volume = Try(ctx.volume)
journal_title = Try(ctx.journal_title)
journal_abbrev = Try(ctx.journal_abbrev)
description_html = Try(ctx['description-html'])
issn = Try(ctx.issn)
class Preprint(Article):
class Extra:
modified = ParseDate(ctx.date)
subjects = ctx.subjects
identifiers = Try(ctx.identifiers)
emails = Try(ctx.author_email)
description_html = Try(ctx['description-html'])
class PeerJTransformer(ChainTransformer):
VERSION = 1
def get_root_parser(self, unwrapped, emitted_type=None, **kwargs):
if emitted_type == 'preprint':
return Preprint
return Article
|
[
"share.transform.chain.Subjects",
"share.transform.chain.ParseDate",
"share.transform.chain.ctx",
"share.transform.chain.IRI",
"share.transform.chain.Try",
"share.transform.chain.Delegate",
"share.transform.chain.ParseName"
] |
[((227, 249), 'share.transform.chain.Delegate', 'Delegate', (['Subject', 'ctx'], {}), '(Subject, ctx)\n', (235, 249), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((325, 343), 'share.transform.chain.Delegate', 'Delegate', (['Tag', 'ctx'], {}), '(Tag, ctx)\n', (333, 343), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((428, 455), 'share.transform.chain.Delegate', 'Delegate', (['Organization', 'ctx'], {}), '(Organization, ctx)\n', (436, 455), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((593, 614), 'share.transform.chain.Delegate', 'Delegate', (['Person', 'ctx'], {}), '(Person, ctx)\n', (601, 614), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((652, 664), 'share.transform.chain.ctx', 'ctx', (['"""index"""'], {}), "('index')\n", (655, 664), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((707, 715), 'share.transform.chain.IRI', 'IRI', (['ctx'], {}), '(ctx)\n', (710, 715), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((781, 801), 'share.transform.chain.Try', 'Try', (['ctx.description'], {}), '(ctx.description)\n', (784, 801), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((851, 870), 'share.transform.chain.ParseDate', 'ParseDate', (['ctx.date'], {}), '(ctx.date)\n', (860, 870), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((890, 909), 'share.transform.chain.ParseDate', 'ParseDate', (['ctx.date'], {}), '(ctx.date)\n', (899, 909), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((497, 511), 'share.transform.chain.ParseName', 'ParseName', (['ctx'], {}), '(ctx)\n', (506, 511), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((536, 550), 'share.transform.chain.ParseName', 'ParseName', (['ctx'], {}), '(ctx)\n', (545, 550), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((942, 966), 'share.transform.chain.Delegate', 'Delegate', (['WorkIdentifier'], {}), '(WorkIdentifier)\n', (950, 966), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1316, 1341), 'share.transform.chain.Delegate', 'Delegate', (['ThroughSubjects'], {}), '(ThroughSubjects)\n', (1324, 1341), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1343, 1365), 'share.transform.chain.Subjects', 'Subjects', (['ctx.subjects'], {}), '(ctx.subjects)\n', (1351, 1365), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1382, 1403), 'share.transform.chain.Delegate', 'Delegate', (['ThroughTags'], {}), '(ThroughTags)\n', (1390, 1403), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1405, 1422), 'share.transform.chain.Try', 'Try', (['ctx.keywords'], {}), '(ctx.keywords)\n', (1408, 1422), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1424, 1441), 'share.transform.chain.Try', 'Try', (['ctx.subjects'], {}), '(ctx.subjects)\n', (1427, 1441), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1607, 1622), 'share.transform.chain.Try', 'Try', (['ctx.volume'], {}), '(ctx.volume)\n', (1610, 1622), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1647, 1669), 'share.transform.chain.Try', 'Try', (['ctx.journal_title'], {}), '(ctx.journal_title)\n', (1650, 1669), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1695, 1718), 'share.transform.chain.Try', 'Try', (['ctx.journal_abbrev'], {}), '(ctx.journal_abbrev)\n', (1698, 1718), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1746, 1774), 'share.transform.chain.Try', 'Try', (["ctx['description-html']"], {}), "(ctx['description-html'])\n", (1749, 1774), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1790, 1803), 'share.transform.chain.Try', 'Try', (['ctx.issn'], {}), '(ctx.issn)\n', (1793, 1803), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1868, 1887), 'share.transform.chain.ParseDate', 'ParseDate', (['ctx.date'], {}), '(ctx.date)\n', (1877, 1887), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1942, 1962), 'share.transform.chain.Try', 'Try', (['ctx.identifiers'], {}), '(ctx.identifiers)\n', (1945, 1962), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1980, 2001), 'share.transform.chain.Try', 'Try', (['ctx.author_email'], {}), '(ctx.author_email)\n', (1983, 2001), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((2029, 2057), 'share.transform.chain.Try', 'Try', (["ctx['description-html']"], {}), "(ctx['description-html'])\n", (2032, 2057), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1132, 1159), 'share.transform.chain.Try', 'Try', (['ctx.identifiers.pubmed'], {}), '(ctx.identifiers.pubmed)\n', (1135, 1159), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1263, 1287), 'share.transform.chain.Try', 'Try', (['ctx.identifiers.pmc'], {}), '(ctx.identifiers.pmc)\n', (1266, 1287), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1485, 1502), 'share.transform.chain.Delegate', 'Delegate', (['Creator'], {}), '(Creator)\n', (1493, 1502), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n'), ((1529, 1548), 'share.transform.chain.Delegate', 'Delegate', (['Publisher'], {}), '(Publisher)\n', (1537, 1548), False, 'from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat\n')]
|
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
from netmiko import ConnectHandler
from mydevices import nxos1, nxos2
from pprint import pprint
import textfsm
import time
import re
from colorama import Fore, Back, Style
env = Environment(undefined=StrictUndefined)
env.loader = FileSystemLoader("./templates/")
template_file = "question2.j2"
interface = "1"
nxos1_vars = {
"device_name": "nxos1",
"local_as": 22,
"interface": interface,
"ip_address": "10.1.100.1",
"netmask": "24"
}
nxos2_vars = {
"device_name": "nxos2",
"local_as": 22,
"interface": interface,
"ip_address": "10.1.100.2",
"netmask": "24"
}
nxos1_vars["peer_ip"] = nxos2_vars["ip_address"]
nxos2_vars["peer_ip"] = nxos1_vars["ip_address"]
# Add Jinja2 vars to be included in the Netmiko device dictionary
nxos1["j2_vars"] = nxos1_vars
nxos2["j2_vars"] = nxos2_vars
template = env.get_template(template_file)
def config():
for device in [nxos1,nxos2]:
### Pop the device dict 'j2_vars' to 'device_var',
### leaving 'device' with just the netmiko parameters
device_var = device.pop('j2_vars')
cfg = template.render(**device_var)
Node = {
"host": device['host'],
"username": device['username'],
"password": device['password'],
"device_type": device['device_type']
}
net_connect = ConnectHandler(**Node)
print(f"Updating {device['host']} ".center(80, "#"))
output = net_connect.send_config_set(cfg)
print('Completed' + '\n')
def verify():
for device in [nxos1,nxos2]:
Node = {
"host": device['host'],
"username": device['username'],
"password": device['password'],
"device_type": device['device_type']
}
net_connect = ConnectHandler(**Node)
raw_text_data = net_connect.send_command('show ip bgp sum')
net_connect.disconnect()
textfsm_file = "templates/question3.template"
textfsm_template = open(textfsm_file)
# with open("show_ip_bgp_sum.txt") as f:
# raw_text_data = f.read()
# The argument 'template' is a file handle and 'raw_text_data' is a string.
re_table = textfsm.TextFSM(textfsm_template)
bgp_status = re_table.ParseText(raw_text_data)[0][0]
bgp_state = re_table.ParseText(raw_text_data)[0][1]
textfsm_template.close()
### Regular expressions to match the bgp variables above
regex_status = re.compile(r'[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}')
regex_state = re.compile(r'\d+')
if regex_status.match(bgp_status) and regex_state.match(bgp_state):
''' These two conditions are to match
- Whether or not there is an time counter
- Whether or not the bgp state is a number, and NOT a building bgp state
'''
print(f"BGP has been established on: {device['host']}")
else:
print(f"The current BGP State of {device['host']} is: {bgp_state}. Please review")
def run():
config()
time.sleep(15)
verify()
if __name__ == "__main__":
run()
|
[
"re.compile",
"jinja2.environment.Environment",
"time.sleep",
"jinja2.FileSystemLoader",
"netmiko.ConnectHandler",
"textfsm.TextFSM"
] |
[((275, 313), 'jinja2.environment.Environment', 'Environment', ([], {'undefined': 'StrictUndefined'}), '(undefined=StrictUndefined)\n', (286, 313), False, 'from jinja2.environment import Environment\n'), ((327, 359), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['"""./templates/"""'], {}), "('./templates/')\n", (343, 359), False, 'from jinja2 import FileSystemLoader, StrictUndefined\n'), ((3133, 3147), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (3143, 3147), False, 'import time\n'), ((1433, 1455), 'netmiko.ConnectHandler', 'ConnectHandler', ([], {}), '(**Node)\n', (1447, 1455), False, 'from netmiko import ConnectHandler\n'), ((1855, 1877), 'netmiko.ConnectHandler', 'ConnectHandler', ([], {}), '(**Node)\n', (1869, 1877), False, 'from netmiko import ConnectHandler\n'), ((2271, 2304), 'textfsm.TextFSM', 'textfsm.TextFSM', (['textfsm_template'], {}), '(textfsm_template)\n', (2286, 2304), False, 'import textfsm\n'), ((2548, 2594), 're.compile', 're.compile', (['"""[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}"""'], {}), "('[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}')\n", (2558, 2594), False, 'import re\n'), ((2618, 2636), 're.compile', 're.compile', (['"""\\\\d+"""'], {}), "('\\\\d+')\n", (2628, 2636), False, 'import re\n')]
|
from typing import Text
from aiogram import types
from aiogram.dispatcher.filters.builtin import Text
from aiogram.dispatcher import FSMContext
from keyboards.default.admin_panel import admin_panel_kb
from middlewares.states.admin_panel_states import check_user
from utils.db_api import sql
from loader import dp
'''
data = {"username": "",
"telegram_id": "", # int
"phone": "",
"data_add": ""}
'''
@dp.message_handler(text="/cancel", state="*")
async def back(message: types.Message, state: FSMContext):
await message.answer("Действие отменено!", reply_markup=admin_panel_kb)
await state.finish()
@dp.message_handler(state=check_user.step1)
async def first_step(message: types.Message, state: FSMContext):
data = {"telegram_id": int(message.text)}
res = await sql.check(data)
if res == "not in base":
await message.answer("Пользователя нет в базе")
await state.finish()
elif res == "is in base":
await message.answer("Пользователь есть в базе")
await state.finish()
|
[
"utils.db_api.sql.check",
"loader.dp.message_handler"
] |
[((428, 473), 'loader.dp.message_handler', 'dp.message_handler', ([], {'text': '"""/cancel"""', 'state': '"""*"""'}), "(text='/cancel', state='*')\n", (446, 473), False, 'from loader import dp\n'), ((636, 678), 'loader.dp.message_handler', 'dp.message_handler', ([], {'state': 'check_user.step1'}), '(state=check_user.step1)\n', (654, 678), False, 'from loader import dp\n'), ((806, 821), 'utils.db_api.sql.check', 'sql.check', (['data'], {}), '(data)\n', (815, 821), False, 'from utils.db_api import sql\n')]
|
import requests
from threading import Thread
from six.moves.queue import Queue
def flatten_kwargs(index, **kwargs):
kwargs = dict(kwargs)
for arg in kwargs:
if isinstance(kwargs[arg], list):
kwargs[arg] = kwargs[arg][index]
return kwargs
class WebRunner:
resp_queue = None
def __init__(self):
self.resp_queue = Queue()
def request(self, index, url, **kwargs):
kwargs = flatten_kwargs(index, **kwargs)
try:
method = kwargs.pop('method', 'GET')
self.resp_queue.put(
(index, requests.request(method=method, url=url, **kwargs))
)
except Exception as e:
self.resp_queue.put((index, None))
print('Failed to download %s because %s.' % (url, e))
def runner(self, **kwargs):
while True:
index, url = self.work_queue.get()
if 'http://' not in url and 'https://' not in url:
url = 'http://' + url
self.request(index, url, **kwargs)
self.work_queue.task_done()
def run(self, urls, concurrency=4, **kwargs):
self.work_queue = Queue()
for url in enumerate(urls):
self.work_queue.put(url)
for i in range(concurrency):
t = Thread(target=self.runner, kwargs=kwargs)
t.daemon = True
t.start()
self.work_queue.join()
responses = list(self.resp_queue.queue)
responses = sorted(responses, key=lambda x: x[0])
return [r[1] for r in responses]
|
[
"threading.Thread",
"requests.request",
"six.moves.queue.Queue"
] |
[((365, 372), 'six.moves.queue.Queue', 'Queue', ([], {}), '()\n', (370, 372), False, 'from six.moves.queue import Queue\n'), ((1165, 1172), 'six.moves.queue.Queue', 'Queue', ([], {}), '()\n', (1170, 1172), False, 'from six.moves.queue import Queue\n'), ((1299, 1340), 'threading.Thread', 'Thread', ([], {'target': 'self.runner', 'kwargs': 'kwargs'}), '(target=self.runner, kwargs=kwargs)\n', (1305, 1340), False, 'from threading import Thread\n'), ((588, 638), 'requests.request', 'requests.request', ([], {'method': 'method', 'url': 'url'}), '(method=method, url=url, **kwargs)\n', (604, 638), False, 'import requests\n')]
|
import ast
print(ast)
source = """
def foo():
print('bar')
pass
"""
n = ast.parse(source)
print(n)
print(n.body)
print(n.body[0].name)
assert n.body[0].name == 'foo'
foo = n.body[0]
assert foo.lineno == 2
print(foo.body)
assert len(foo.body) == 2
print(foo.body[0])
print(foo.body[0].value.func.id)
assert foo.body[0].value.func.id == 'print'
assert foo.body[0].lineno == 3
assert foo.body[1].lineno == 4
|
[
"ast.parse"
] |
[((82, 99), 'ast.parse', 'ast.parse', (['source'], {}), '(source)\n', (91, 99), False, 'import ast\n')]
|
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import AggregationPrimitive
class CustomMean(AggregationPrimitive):
name = "custom_mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
|
[
"woodwork.column_schema.ColumnSchema"
] |
[((256, 295), 'woodwork.column_schema.ColumnSchema', 'ColumnSchema', ([], {'semantic_tags': "{'numeric'}"}), "(semantic_tags={'numeric'})\n", (268, 295), False, 'from woodwork.column_schema import ColumnSchema\n'), ((197, 236), 'woodwork.column_schema.ColumnSchema', 'ColumnSchema', ([], {'semantic_tags': "{'numeric'}"}), "(semantic_tags={'numeric'})\n", (209, 236), False, 'from woodwork.column_schema import ColumnSchema\n')]
|
import statistics
target_root_path = r"G:\GE\skin_12_data"
print("Region", end='')
for i in range(5):
print("\t\tAverage\tMedian\t%", end='')
print()
cell_types = ['all', 'CD68', 'T-Helper', 'T-Killer', 'T-Reg']
region_list = [11, 3, 8, 9, 1, 12, 5, 4, 2, 10, 7]
for region_id in region_list:
target_file_path = target_root_path + rf"\region_{region_id}\nuclei.csv"
cell_file = open(target_file_path, 'r')
cell_file.readline()
cell_lines = cell_file.readlines()
distances = {'all': []}
for line in cell_lines:
content = line.split(',')
type = content[4]
distance = content[5]
if type in cell_types:
if type not in distances:
distances[type] = []
else:
distances[type].append(float(distance))
distances['all'].append(float(distance))
# print("Region ", region_id)
# for key in distances:
# print("\t", key)
# print("\t\tAverage Distance: ", statistics.mean(distances[key]))
# print("\t\tMedian Distance: ", statistics.median(distances[key]))
print(region_id, end='')
for key in cell_types:
print("\t", key, end='')
if key in distances:
dis_mean = statistics.mean(distances[key])
dis_median = statistics.median(distances[key])
percentage = len(distances[key]) / len(distances['all'])
else:
dis_mean = 0
dis_median = 0
percentage = 0
print(f"\t{dis_mean}", end='')
print(f"\t{dis_median}", end='')
print(f"\t{percentage}", end='')
print()
#
# distances = {'all': []}
# for region_id in [1, 3, 6, 8, 9, 11]:
# target_file_path = target_root_path + rf"\region_{region_id}\nuclei.csv"
#
# cell_file = open(target_file_path, 'r')
# cell_file.readline()
# cell_lines = cell_file.readlines()
#
# for line in cell_lines:
# content = line.split(',')
# type = content[4]
# distance = content[5]
# if type not in distances:
# distances[type] = []
# else:
# distances[type].append(float(distance))
# distances['all'].append(float(distance))
#
# print("Sun", end='')
# for key in ['all', 'CD68', 'T-Reg', 'T-Helper']:
# print("\t", key, end='')
# print(f"\t{statistics.mean(distances[key])}", end='')
# print(f"\t{statistics.median(distances[key])}", end='')
# print(f"\t{len(distances[key]) / len(distances['all'])}", end='')
# print()
#
# distances = {'all': []}
# for region_id in [2, 4, 5, 7, 10, 12]:
# target_file_path = target_root_path + rf"\region_{region_id}\nuclei.csv"
#
# cell_file = open(target_file_path, 'r')
# cell_file.readline()
# cell_lines = cell_file.readlines()
#
# for line in cell_lines:
# content = line.split(',')
# type = content[4]
# distance = content[5]
# if type not in distances:
# distances[type] = []
# else:
# distances[type].append(float(distance))
# distances['all'].append(float(distance))
#
# print("Non-sun", end='')
# for key in ['all', 'CD68', 'T-Reg', 'T-Helper']:
# print("\t", key, end='')
# print(f"\t{statistics.mean(distances[key])}", end='')
# print(f"\t{statistics.median(distances[key])}", end='')
# print(f"\t{len(distances[key]) / len(distances['all'])}", end='')
# print()
#
# distances = {'all': []}
# for region_id in region_list:
# target_file_path = target_root_path + rf"\region_{region_id}\nuclei.csv"
#
# cell_file = open(target_file_path, 'r')
# cell_file.readline()
# cell_lines = cell_file.readlines()
#
# for line in cell_lines:
# content = line.split(',')
# type = content[4]
# distance = content[5]
# if type in cell_types:
# if type not in distances:
# distances[type] = []
# else:
# distances[type].append(float(distance))
# distances['all'].append(float(distance))
#
# print("All ", end='')
# for key in cell_types:
# print("\t", key, end='')
# print(f"\t{statistics.mean(distances[key])}", end='')
# print(f"\t{statistics.median(distances[key])}", end='')
# print(f"\t{len(distances[key]) / len(distances['all'])}", end='')
# print()
|
[
"statistics.median",
"statistics.mean"
] |
[((1254, 1285), 'statistics.mean', 'statistics.mean', (['distances[key]'], {}), '(distances[key])\n', (1269, 1285), False, 'import statistics\n'), ((1311, 1344), 'statistics.median', 'statistics.median', (['distances[key]'], {}), '(distances[key])\n', (1328, 1344), False, 'import statistics\n')]
|
# Iterative Conway's game of life in Python / CUDA C
# this version is meant to illustrate the use of shared kernel memory in CUDA.
# written by <NAME> for "Hands on GPU Programming with Python and CUDA"
import pycuda.autoinit
import pycuda.driver as drv
from pycuda import gpuarray
from pycuda.compiler import SourceModule
import numpy as np
import matplotlib.pyplot as plt
from time import time
shared_ker = SourceModule("""
#define _iters 1000000
#define _X ( threadIdx.x + blockIdx.x * blockDim.x )
#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )
#define _WIDTH ( blockDim.x * gridDim.x )
#define _HEIGHT ( blockDim.y * gridDim.y )
#define _XM(x) ( (x + _WIDTH) % _WIDTH )
#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )
#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )
// return the number of living neighbors for a given cell
__device__ int nbrs(int x, int y, int * in)
{
return ( in[ _INDEX(x -1, y+1) ] + in[ _INDEX(x-1, y) ] + in[ _INDEX(x-1, y-1) ] \
+ in[ _INDEX(x, y+1)] + in[_INDEX(x, y - 1)] \
+ in[ _INDEX(x+1, y+1) ] + in[ _INDEX(x+1, y) ] + in[ _INDEX(x+1, y-1) ] );
}
// p_lattice will now be the pointer to the global lattice, lattice to the shared
__global__ void conway_ker_shared(int * p_lattice, int iters)
{
// x, y are the appropriate values for the cell covered by this thread
int x = _X, y = _Y;
__shared__ int lattice[32*32];
lattice[_INDEX(x,y)] = p_lattice[_INDEX(x,y)];
__syncthreads(); // each thread copies its own value into the shared lattice, we need to wait for them to finisch
for (int i = 0; i < iters; i++)
{
// count the number of neighbors around the current cell
int n = nbrs(x, y, lattice);
int cell_value;
// if the current cell is alive, then determine if it lives or dies for the next generation.
if ( lattice[_INDEX(x,y)] == 1)
switch(n)
{
// if the cell is alive: it remains alive only if it has 2 or 3 neighbors.
case 2:
case 3: cell_value = 1;
break;
default: cell_value = 0;
}
else if( lattice[_INDEX(x,y)] == 0 )
switch(n)
{
// a dead cell comes to life only if it has 3 neighbors that are alive.
case 3: cell_value = 1;
break;
default: cell_value = 0;
}
__syncthreads();
lattice[_INDEX(x,y)] = cell_value;
__syncthreads();
}
__syncthreads();
p_lattice[_INDEX(x,y)] = lattice[_INDEX(x,y)];
__syncthreads();
}
""")
conway_ker_shared = shared_ker.get_function("conway_ker_shared")
if __name__ == '__main__':
# set lattice size
N = 32
lattice = np.int32( np.random.choice([1,0], N*N, p=[0.25, 0.75]).reshape(N, N) )
lattice_gpu = gpuarray.to_gpu(lattice)
time1 = time()
conway_ker_shared(lattice_gpu, np.int32(1e6), grid=(1,1,1), block=(32,32,1))
print("calc needed", (time() - time1)*1000, "ms")
fig = plt.figure(1)
plt.imshow(lattice_gpu.get())
plt.show()
|
[
"numpy.random.choice",
"pycuda.compiler.SourceModule",
"matplotlib.pyplot.show",
"time.time",
"matplotlib.pyplot.figure",
"numpy.int32",
"pycuda.gpuarray.to_gpu"
] |
[((417, 2837), 'pycuda.compiler.SourceModule', 'SourceModule', (['""" \n#define _iters 1000000 \n\n#define _X ( threadIdx.x + blockIdx.x * blockDim.x )\n#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )\n\n#define _WIDTH ( blockDim.x * gridDim.x )\n#define _HEIGHT ( blockDim.y * gridDim.y )\n\n#define _XM(x) ( (x + _WIDTH) % _WIDTH )\n#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )\n\n#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )\n\n// return the number of living neighbors for a given cell \n__device__ int nbrs(int x, int y, int * in)\n{\n return ( in[ _INDEX(x -1, y+1) ] + in[ _INDEX(x-1, y) ] + in[ _INDEX(x-1, y-1) ] + in[ _INDEX(x, y+1)] + in[_INDEX(x, y - 1)] + in[ _INDEX(x+1, y+1) ] + in[ _INDEX(x+1, y) ] + in[ _INDEX(x+1, y-1) ] );\n}\n\n// p_lattice will now be the pointer to the global lattice, lattice to the shared\n__global__ void conway_ker_shared(int * p_lattice, int iters)\n{\n // x, y are the appropriate values for the cell covered by this thread\n int x = _X, y = _Y;\n __shared__ int lattice[32*32];\n \n \n lattice[_INDEX(x,y)] = p_lattice[_INDEX(x,y)];\n __syncthreads(); // each thread copies its own value into the shared lattice, we need to wait for them to finisch\n\n for (int i = 0; i < iters; i++)\n {\n \n // count the number of neighbors around the current cell\n int n = nbrs(x, y, lattice);\n \n int cell_value;\n \n \n // if the current cell is alive, then determine if it lives or dies for the next generation.\n if ( lattice[_INDEX(x,y)] == 1)\n switch(n)\n {\n // if the cell is alive: it remains alive only if it has 2 or 3 neighbors.\n case 2:\n case 3: cell_value = 1;\n break;\n default: cell_value = 0; \n }\n else if( lattice[_INDEX(x,y)] == 0 )\n switch(n)\n {\n // a dead cell comes to life only if it has 3 neighbors that are alive.\n case 3: cell_value = 1;\n break;\n default: cell_value = 0; \n }\n \n __syncthreads();\n lattice[_INDEX(x,y)] = cell_value;\n __syncthreads();\n \n }\n \n __syncthreads();\n p_lattice[_INDEX(x,y)] = lattice[_INDEX(x,y)];\n __syncthreads();\n \n}\n"""'], {}), '(\n """ \n#define _iters 1000000 \n\n#define _X ( threadIdx.x + blockIdx.x * blockDim.x )\n#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )\n\n#define _WIDTH ( blockDim.x * gridDim.x )\n#define _HEIGHT ( blockDim.y * gridDim.y )\n\n#define _XM(x) ( (x + _WIDTH) % _WIDTH )\n#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )\n\n#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )\n\n// return the number of living neighbors for a given cell \n__device__ int nbrs(int x, int y, int * in)\n{\n return ( in[ _INDEX(x -1, y+1) ] + in[ _INDEX(x-1, y) ] + in[ _INDEX(x-1, y-1) ] + in[ _INDEX(x, y+1)] + in[_INDEX(x, y - 1)] + in[ _INDEX(x+1, y+1) ] + in[ _INDEX(x+1, y) ] + in[ _INDEX(x+1, y-1) ] );\n}\n\n// p_lattice will now be the pointer to the global lattice, lattice to the shared\n__global__ void conway_ker_shared(int * p_lattice, int iters)\n{\n // x, y are the appropriate values for the cell covered by this thread\n int x = _X, y = _Y;\n __shared__ int lattice[32*32];\n \n \n lattice[_INDEX(x,y)] = p_lattice[_INDEX(x,y)];\n __syncthreads(); // each thread copies its own value into the shared lattice, we need to wait for them to finisch\n\n for (int i = 0; i < iters; i++)\n {\n \n // count the number of neighbors around the current cell\n int n = nbrs(x, y, lattice);\n \n int cell_value;\n \n \n // if the current cell is alive, then determine if it lives or dies for the next generation.\n if ( lattice[_INDEX(x,y)] == 1)\n switch(n)\n {\n // if the cell is alive: it remains alive only if it has 2 or 3 neighbors.\n case 2:\n case 3: cell_value = 1;\n break;\n default: cell_value = 0; \n }\n else if( lattice[_INDEX(x,y)] == 0 )\n switch(n)\n {\n // a dead cell comes to life only if it has 3 neighbors that are alive.\n case 3: cell_value = 1;\n break;\n default: cell_value = 0; \n }\n \n __syncthreads();\n lattice[_INDEX(x,y)] = cell_value;\n __syncthreads();\n \n }\n \n __syncthreads();\n p_lattice[_INDEX(x,y)] = lattice[_INDEX(x,y)];\n __syncthreads();\n \n}\n"""\n )\n', (429, 2837), False, 'from pycuda.compiler import SourceModule\n'), ((3074, 3098), 'pycuda.gpuarray.to_gpu', 'gpuarray.to_gpu', (['lattice'], {}), '(lattice)\n', (3089, 3098), False, 'from pycuda import gpuarray\n'), ((3120, 3126), 'time.time', 'time', ([], {}), '()\n', (3124, 3126), False, 'from time import time\n'), ((3281, 3294), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3291, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3341, 3343), True, 'import matplotlib.pyplot as plt\n'), ((3162, 3181), 'numpy.int32', 'np.int32', (['(1000000.0)'], {}), '(1000000.0)\n', (3170, 3181), True, 'import numpy as np\n'), ((2995, 3042), 'numpy.random.choice', 'np.random.choice', (['[1, 0]', '(N * N)'], {'p': '[0.25, 0.75]'}), '([1, 0], N * N, p=[0.25, 0.75])\n', (3011, 3042), True, 'import numpy as np\n'), ((3234, 3240), 'time.time', 'time', ([], {}), '()\n', (3238, 3240), False, 'from time import time\n')]
|
#!/usr/bin/python3
"""
File containing the class BaseModel
"""
from datetime import datetime
import models
import uuid
class BaseModel:
"""a class that defines all common attributes/methods for other classes"""
def __init__(self, *args, **kwargs):
"""Function for initializing the base model"""
if kwargs is None or len(kwargs) < 1:
self.id = str(uuid.uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
models.storage.new(self)
else:
for key, value in kwargs.items():
if key != "__class__":
if key == "created_at" or key == "updated_at":
value = datetime.strptime(
value, '%Y-%m-%dT%H:%M:%S.%f')
setattr(self, key, value)
def __str__(self,):
"""String representation of the BaseModel class"""
x = "[{}] ({}) {}".format(self.__class__.__name__,
self.id, self.__dict__)
return x
def save(self):
"""updates the class with the current time"""
self.updated_at = datetime.now()
models.storage.new(self)
models.storage.save()
def to_dict(self):
"""returns a dictionary containing all key-value pairs"""
dic_n = self.__dict__.copy()
dic_n['created_at'] = self.created_at.isoformat()
dic_n['updated_at'] = self.updated_at.isoformat()
dic_n['__class__'] = self.__class__.__name__
return dic_n
|
[
"uuid.uuid4",
"datetime.datetime.strptime",
"models.storage.save",
"datetime.datetime.now",
"models.storage.new"
] |
[((1168, 1182), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1180, 1182), False, 'from datetime import datetime\n'), ((1191, 1215), 'models.storage.new', 'models.storage.new', (['self'], {}), '(self)\n', (1209, 1215), False, 'import models\n'), ((1224, 1245), 'models.storage.save', 'models.storage.save', ([], {}), '()\n', (1243, 1245), False, 'import models\n'), ((430, 444), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (442, 444), False, 'from datetime import datetime\n'), ((475, 489), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (487, 489), False, 'from datetime import datetime\n'), ((502, 526), 'models.storage.new', 'models.storage.new', (['self'], {}), '(self)\n', (520, 526), False, 'import models\n'), ((386, 398), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (396, 398), False, 'import uuid\n'), ((725, 773), 'datetime.datetime.strptime', 'datetime.strptime', (['value', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(value, '%Y-%m-%dT%H:%M:%S.%f')\n", (742, 773), False, 'from datetime import datetime\n')]
|
import random
import pyodbc
import pickle
import cv2
import time
from RaspberryPi.CollectingTrainingData.Commands import Commands
from imgaug import augmenters as iaa
# server = 'amaanrobotics.database.windows.net'
# database = 'AmaanRoboticsCloudDB'
# username = ''
# password = ''
# driver= '{ODBC Driver 13 for SQL Server}'
# cnxn = pyodbc.connect('DRIVER='+driver+';PORT=1433;SERVER='+server+';PORT=1443;DATABASE='+database+';UID='+username+';PWD='+ password)
def parseCommand(cmd):
if cmd == Commands.NO_CMD.value:
return "NO_CMD"
elif cmd == Commands.LEFT.value:
return "LEFT"
elif cmd == Commands.RIGHT.value:
return 'RIGHT'
elif cmd == Commands.FORWARD.value:
return 'FORWARD'
elif cmd == Commands.BACK.value:
return 'BACK'
elif cmd == Commands.STOP_ALL_MOTORS.value:
return 'STOP ALL MOTORS'
elif cmd == Commands.BACK_MOTOR_STOP.value:
return 'BACK_MOTOR_STOP'
elif cmd == Commands.RESET_STEER.value:
return 'RESET_STEER'
else:
return None
def applyAdaptiveThreshold(imgs):
threshImgs = []
for img in imgs:
thresh = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
thresh = cv2.medianBlur(thresh, 5) #5 - kernel size
thresh = cv2.adaptiveThreshold(thresh, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
threshImgs.append(thresh)
return threshImgs
def applyOtsuThreshold(imgs):
threshImgs = []
for img in imgs:
thresh = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
thresh = cv2.GaussianBlur(thresh, (3,3), 0) #apply guassian blur
ret3, thresh = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
threshImgs.append(thresh)
return threshImgs
def getTrainingData(cursor):
'''
:param cursor: pyodbc cursor cnxn
:return: the imgList and cmdList
'''
# master_list = [] #contains all framelets
imgList = []
cmdList = []
noCmdCount = 0
cursor.execute("Select X.* From (SELECT *,Row_Number() over (Order by FileName) as RN from poc.TrainingData) As X where X.Evaluation='Good' and X.Id < 20")
row = cursor.fetchone() #fetch one row
while row is not None:
currentFramelet_list= pickle.loads(row.ImageByteArray) #a video clip of framelets
for framelet in currentFramelet_list:
# master_list.append(framelet)
if(framelet.cmd != Commands.NO_CMD.value):
imgList.append(framelet.frame[120:240]) #flatten the 3-d array into 1-d --> -1 means 1 dimensional flatten AND take lower half of image
cmdList.append(framelet.cmd[0:4]) #TODO: THIS IS TEMPERORARY, IT SHOULD ACTUALLY RETURN ME THE WHOLE CMD
else:
noCmdCount += 1
row = cursor.fetchone()
print("noCmdCount = {}".format(noCmdCount))
return imgList #TODO: returns list not numpy array, Do we need numpy array instead?
server = ''
database = ''
username = ''
password = ''
driver= '{ODBC Driver 13 for SQL Server}'
cnxn = pyodbc.connect('DRIVER='+driver+';SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
cursor = cnxn.cursor()
query1 ="Select X.* From (SELECT *,Row_Number() over (Order by FileName) as RN from poc.TrainingData) As X where X.RN BETWEEN 7 and 11 "
query2 = "Select X.* From (SELECT *,Row_Number() over (Order by FileName) as RN from poc.TrainingData) As X where X.Evaluation='Good'"
query3 = "SELECT * FROM [AmaanRoboticsDB].[].[TrainingData] where Id between 5 and 14"
seq = iaa.Sequential([
# iaa.Sometimes(0.2, iaa.PerspectiveTransform(scale=(0.005, 0.05))), #20% of time do this
# iaa.Sometimes(1, iaa.GaussianBlur(sigma=(0,9))), #30% of time do this
iaa.Sometimes(0.20, iaa.OneOf([ #do one of these 2, and do that 30% of time
iaa.ContrastNormalization((0.75, 1.5)), #increase/decrease contrast
iaa.Multiply((0.5, 1.5)) #brightness
]))
])
cursor.execute("SELECT TOP 10 * FROM trainData.SteeringAngleData order by 1 desc")
row = cursor.fetchone() #fetch all rows
while row is not None:
print(row.Id)
p = pickle.loads(row.ImageByteArray)
for framelet in p:
img = cv2.resize(framelet.frame[112:228],(288,116))
img = seq.augment_image(img)
cv2.imshow('i', img)
# print(parseCommand(framelet.cmd))
# time.sleep()
if cv2.waitKey(1) & 0xFF == ord("q"):
break
time.sleep(0.1)
# time.sleep
row = cursor.fetchone()
#show shuffled
# cursor.execute("Select X.* From (SELECT *,Row_Number() over (Order by FileName) as RN from poc.TrainingData) As X where X.Evaluation='Good' and Id < 50")
# imgs = getTrainingData(cursor)
#
#
# for img in imgs:
# print(img.shape)
# cv2.imshow('i', img)
# if cv2.waitKey(1) & 0xFF == ord("q"):
# break
# time.sleep(0.01)
|
[
"pickle.loads",
"cv2.GaussianBlur",
"cv2.medianBlur",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.threshold",
"cv2.adaptiveThreshold",
"time.sleep",
"imgaug.augmenters.ContrastNormalization",
"imgaug.augmenters.Multiply",
"pyodbc.connect",
"cv2.imshow",
"cv2.resize"
] |
[((3039, 3167), 'pyodbc.connect', 'pyodbc.connect', (["('DRIVER=' + driver + ';SERVER=' + server + ';DATABASE=' + database +\n ';UID=' + username + ';PWD=' + password)"], {}), "('DRIVER=' + driver + ';SERVER=' + server + ';DATABASE=' +\n database + ';UID=' + username + ';PWD=' + password)\n", (3053, 3167), False, 'import pyodbc\n'), ((4167, 4199), 'pickle.loads', 'pickle.loads', (['row.ImageByteArray'], {}), '(row.ImageByteArray)\n', (4179, 4199), False, 'import pickle\n'), ((1153, 1190), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1165, 1190), False, 'import cv2\n'), ((1208, 1233), 'cv2.medianBlur', 'cv2.medianBlur', (['thresh', '(5)'], {}), '(thresh, 5)\n', (1222, 1233), False, 'import cv2\n'), ((1268, 1361), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['thresh', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(thresh, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (1289, 1361), False, 'import cv2\n'), ((1502, 1539), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1514, 1539), False, 'import cv2\n'), ((1557, 1592), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['thresh', '(3, 3)', '(0)'], {}), '(thresh, (3, 3), 0)\n', (1573, 1592), False, 'import cv2\n'), ((1636, 1702), 'cv2.threshold', 'cv2.threshold', (['thresh', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(thresh, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (1649, 1702), False, 'import cv2\n'), ((2238, 2270), 'pickle.loads', 'pickle.loads', (['row.ImageByteArray'], {}), '(row.ImageByteArray)\n', (2250, 2270), False, 'import pickle\n'), ((4237, 4284), 'cv2.resize', 'cv2.resize', (['framelet.frame[112:228]', '(288, 116)'], {}), '(framelet.frame[112:228], (288, 116))\n', (4247, 4284), False, 'import cv2\n'), ((4328, 4348), 'cv2.imshow', 'cv2.imshow', (['"""i"""', 'img'], {}), "('i', img)\n", (4338, 4348), False, 'import cv2\n'), ((4488, 4503), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4498, 4503), False, 'import time\n'), ((4427, 4441), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4438, 4441), False, 'import cv2\n'), ((3844, 3882), 'imgaug.augmenters.ContrastNormalization', 'iaa.ContrastNormalization', (['(0.75, 1.5)'], {}), '((0.75, 1.5))\n', (3869, 3882), True, 'from imgaug import augmenters as iaa\n'), ((3928, 3952), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(0.5, 1.5)'], {}), '((0.5, 1.5))\n', (3940, 3952), True, 'from imgaug import augmenters as iaa\n')]
|
# -*- coding: utf-8 -*-
import datetime
from decimal import Decimal, ROUND_HALF_UP
import logging
import time
import xlrd
from datapro.framework.util import to_utc
logger = logging.getLogger(__name__)
class Validator(object):
def __init__(self, level='warn'):
self._level = level
self._message_template = None
self.properties = None
self.valid = None
self.reset()
def _check_null(self, key, value, blank_is_null, nulls_ok, level, message_template):
if value is None:
if nulls_ok:
self.properties[key] = value
else:
self._fail(key, level, 'Nulls are not allowed', message_template)
return False
elif isinstance(value, str) and value == '':
if blank_is_null:
if nulls_ok:
self.properties[key] = None
else:
self._fail(key, level, 'Blank strings are not allowed', message_template)
return False
return True
def _fail(self, key, level, message, message_template):
self.valid = False
if not level:
level = self._level
if not message_template:
message_template = self._message_template
# print(key)
# print(message_template)
# # print(message_template.format(**{'key': key, 'message': message})
# exit()
# getattr(logger, level)(message_template.format(**{'key': key, 'message': message}))
getattr(logger, level)(message_template.format(key=key, message=message))
def reset(self, mesage_template=None):
if mesage_template:
self._message_template = mesage_template
self.properties = {}
self.valid = True
def date(self, key, value, format, nulls_ok=False, level=None, message_template=None):
if self._check_null(key, value, True, nulls_ok, level, message_template):
if isinstance(format, str):
try:
self.properties[key] = datetime.date(*(time.strptime(value, format)[:3]))
except:
self._fail(key, level, '"{0}" could not be converted to a date'.format(value), message_template)
else:
try:
self.properties[key] = datetime.date(*(xlrd.xldate_as_tuple(value, format)[:3]))
except:
self._fail(key, level, '"{0}" could not be converted to a date'.format(value), message_template)
def datetime(self, key, value, format, tz=None, nulls_ok=False, level=None, message_template=None):
if self._check_null(key, value, True, nulls_ok, level, message_template):
if isinstance(format, str):
try:
dt = datetime.datetime(*(time.strptime(value, format)[:6]))
except:
self._fail(key, level, '"{0}" could not be converted to a date and time'.format(value), message_template)
return
else:
try:
dt = datetime.datetime(*(xlrd.xldate_as_tuple(value, format)[:6]))
except:
self._fail(key, level, '"{0}" could not be converted to a date and time'.format(value), message_template)
return
if tz is not None:
dt = to_utc(dt, tz)
self.properties[key] = dt
def decimal(self, key, value, precision, nulls_ok=False, level=None, message_template=None):
if self._check_null(key, value, True, nulls_ok, level, message_template):
qp = '1.{places}'.format(places='0'*precision)
try:
self.properties[key] = Decimal(value).quantize(Decimal(qp), rounding=ROUND_HALF_UP)
except:
self._fail(key, level, '"{0}" could not be converted to a number'.format(value), message_template)
def int(self, key, value, nulls_ok=False, level=None, message_template=None):
if self._check_null(key, value, True, nulls_ok, level, message_template):
try:
self.properties[key] = int(value)
except:
self._fail(key, level, '"{0}" could not be converted to an integer'.format(value), message_template)
def string(self, key, value, blanks_ok=False, convert_nulls_to_blank=False, max_length=0, nulls_ok=False, level=None, message_template=None):
if value is None:
if convert_nulls_to_blank:
if blanks_ok:
self.properties[key] = ''
else:
self._fail(key, level, 'Interpreted null as blank, but blank strings are not allowed', message_template)
elif not nulls_ok:
self._fail(key, level, 'Nulls are not allowed', message_template)
elif value == '' and not blanks_ok:
self._fail(key, level, 'Blank strings are not allowed', message_template)
else:
if 0 < max_length < len(value):
self._fail(
key,
level,
'"{0}" is {1} characters long, but the maximum allowed length is {2}'.format(
value,
len(value),
max_length
),
message_template
)
else:
self.properties[key] = value
|
[
"decimal.Decimal",
"datapro.framework.util.to_utc",
"xlrd.xldate_as_tuple",
"time.strptime",
"logging.getLogger"
] |
[((176, 203), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (193, 203), False, 'import logging\n'), ((3401, 3415), 'datapro.framework.util.to_utc', 'to_utc', (['dt', 'tz'], {}), '(dt, tz)\n', (3407, 3415), False, 'from datapro.framework.util import to_utc\n'), ((3773, 3784), 'decimal.Decimal', 'Decimal', (['qp'], {}), '(qp)\n', (3780, 3784), False, 'from decimal import Decimal, ROUND_HALF_UP\n'), ((3749, 3763), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (3756, 3763), False, 'from decimal import Decimal, ROUND_HALF_UP\n'), ((2083, 2111), 'time.strptime', 'time.strptime', (['value', 'format'], {}), '(value, format)\n', (2096, 2111), False, 'import time\n'), ((2357, 2392), 'xlrd.xldate_as_tuple', 'xlrd.xldate_as_tuple', (['value', 'format'], {}), '(value, format)\n', (2377, 2392), False, 'import xlrd\n'), ((2833, 2861), 'time.strptime', 'time.strptime', (['value', 'format'], {}), '(value, format)\n', (2846, 2861), False, 'import time\n'), ((3129, 3164), 'xlrd.xldate_as_tuple', 'xlrd.xldate_as_tuple', (['value', 'format'], {}), '(value, format)\n', (3149, 3164), False, 'import xlrd\n')]
|
#!/usr/bin/env python
'''======================================================
Created by: <NAME> and <NAME>
Last updated: March 2015
File name: DF_Plots.py
Organization: RISC Lab, Utah State University
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import pylab as p
import matplotlib.pyplot as plt
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import * # states,controls,trajectory
from sensor_msgs.msg import Joy
#========================#
# Globals #
#========================#
rate = 20 # Hz
states = Cortex()
traj = Trajectories()
ctrl = Controls()
start_time = 0
euler_max = 45*np.pi/180
Button_pushed = False
plot_button = 3
#===================================#
# Plotting Variables #
#===================================#
states_of_interest = 16
storage_mat = np.asmatrix(np.zeros((1,states_of_interest)))
index = [0]
name = ['Initial']
#==================#
# Get States #
#==================#
def GetStates(I):
global states
states = I
#=========================#
# Get Joystick Data #
#=========================#
def GetJoy(I):
global Button_pushed
Button_pushed = I.buttons[plot_button]
#======================#
# Get Trajectory #
#======================#
def GetTrajectory(I):
global traj
traj = I
#====================#
# Get Controls #
#====================#
def GetControls(I):
global ctrl
ctrl = I
def Plots():
global storage_mat, index, name
if len(index) > 2:
for i in range(len(index)-1):
# assign data vectors
f = index[i+1]
if i+2 == len(index):
b = -1
else:
b = index[i+2]
x_act = storage_mat[f:b,0]
y_act = storage_mat[f:b,1]
z_act = storage_mat[f:b,2]
x_des = storage_mat[f:b,3]
y_des = storage_mat[f:b,4]
z_des = storage_mat[f:b,5]
phi_des = storage_mat[f:b,6]
theta_des = storage_mat[f:b,7]
psi_des = storage_mat[f:b,8]
phi_act = storage_mat[f:b,9]
theta_act = storage_mat[f:b,10]
psi_act = storage_mat[f:b,11]
xdot_err = storage_mat[f:b,12]
ydot_err = storage_mat[f:b,13]
zdot_err = storage_mat[f:b,14]
t = storage_mat[f:b,15]
# 3d plot
plot3d(name[i+1],x_act,y_act,z_act,x_des,y_des,z_des)
# Roll
plot2d(name[i+1] + ' Roll',phi_act,phi_des,t,'Time (s)','Angle (Deg)')
# Pitch
plot2d(name[i+1] + ' Pitch',theta_act,theta_des,t,'Time (s)','Angle (Deg)')
# Errors
plot3err(name[i+1] + ' Position Errors',x_des-x_act,y_des-y_act,z_des-z_act,t,'Time (s)', 'Error (m)', 'x', 'y', 'z')
plot3err(name[i+1] + ' Velocity Errors',xdot_err,ydot_err,zdot_err,t,'Time (s)', 'Error (m/s)', 'xdot', 'ydot', 'zdot')
plt.show(block=False)
else:
rospy.loginfo("insufficient data")
#==========================#
# Plotting Functions #
#==========================#
def plot3d(Traj_name,x_act,y_act,z_act,x_des,y_des,z_des):
x_act=list(np.array(x_act).reshape(-1))
y_act=list(np.array(y_act).reshape(-1))
z_act=list(np.array(z_act).reshape(-1))
x_des=list(np.array(x_des).reshape(-1))
y_des=list(np.array(y_des).reshape(-1))
z_des=list(np.array(z_des).reshape(-1))
fig = plt.figure(Traj_name)
ax = fig.gca(projection='3d')
ax.plot(x_act, y_act, z_act,'k-', label='Actual')
ax.plot(x_des, y_des, z_des,'r-', label='Desired')
ax.legend()
ax.set_title(Traj_name + ' Trajectory', fontsize=16)
ax.set_xlabel(r'X (m)', fontsize=14)
ax.set_ylabel(r'Y (m)', fontsize=14)
ax.set_zlabel(r'Z (m)', fontsize=14)
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
ax.set_zlim([0, 2])
def plot3err(plot_name,err1,err2,err3,time,xaxis_label, yaxis_label,label1, label2, label3):
Err1 = list(np.array(err1).reshape(-1))
Err2 = list(np.array(err2).reshape(-1))
Err3 = list(np.array(err3).reshape(-1))
time = list(np.array(time).reshape(-1))
fig = plt.figure(plot_name)
plt.plot(time, Err1,'b-', label=label1)
plt.plot(time, Err2,'k-', label=label2)
plt.plot(time, Err3,'r-', label=label3)
plt.legend()
plt.title(plot_name, fontsize=16)
plt.xlabel(xaxis_label, fontsize=14)
plt.ylabel(yaxis_label, fontsize=14)
plt.xlim((time[0],time[-1]))
y_min = min([min(Err1),min(Err2),min(Err3)])
y_min = y_min - .2*abs(y_min)
y_max = max([max(Err1),max(Err2),max(Err3)])
y_max = y_max + .2*abs(y_max)
plt.ylim((y_min,y_max))
def plot2d(plot_name,actual_data,commanded_data,time,xaxis_label, yaxis_label):
actual_data = list(np.array(actual_data).reshape(-1))
commanded_data = list(np.array(commanded_data).reshape(-1))
time = list(np.array(time).reshape(-1))
fig = plt.figure(plot_name)
plt.plot(time, actual_data, 'b-', label='actual')
plt.plot(time, commanded_data,'r:', label='Commanded')
plt.legend()
plt.title(plot_name, fontsize=16)
plt.xlabel(xaxis_label, fontsize=14)
plt.ylabel(yaxis_label, fontsize=14)
plt.xlim((time[0],time[-1]))
y_min = min([min(actual_data),min(commanded_data)])
y_min = y_min - .2*abs(y_min)
y_max = max([max(actual_data),max(commanded_data)])
y_max = y_max + .2*abs(y_max)
plt.ylim((y_min,y_max))
#==================#
# Datalogger #
#==================#
def Datalogger():
global start_time,states,ctrl, traj, euler_max
#======================================================#
# If all states of interest are present log data #
#======================================================#
if len(traj.Obj) > 0 and len(states.Obj) > 0 and len(ctrl.Obj) > 0:
global storage_mat
rospy.loginfo("logging data...")
x_act = states.Obj[0].x
y_act = states.Obj[0].y
z_act = states.Obj[0].z
x_des = traj.Obj[0].x
y_des = traj.Obj[0].y
z_des = traj.Obj[0].z
phi_traj = ctrl.Obj[0].phi*euler_max*180/np.pi
theta_traj = ctrl.Obj[0].theta*euler_max*180/np.pi
psi_traj = traj.Obj[0].psi*180/np.pi
phi_cort = states.Obj[0].phi
theta_cort = states.Obj[0].theta
psi_cort = states.Obj[0].psi
u_cort_err = traj.Obj[0].xdot - states.Obj[0].u
v_cort_err = traj.Obj[0].ydot - states.Obj[0].v
w_cort_err = traj.Obj[0].zdot - states.Obj[0].w
t = float(rospy.get_time() - start_time)
new_stack = np.asmatrix(np.array([x_act, y_act, z_act, z_des, y_des, z_des,\
phi_traj, theta_traj, psi_traj, phi_cort, theta_cort, psi_cort,\
u_cort_err, v_cort_err, w_cort_err,t]))
storage_mat = np.append(storage_mat,new_stack,0)
#==========================================================================#
# If there is a new trajectory store the index and trajectory name #
#==========================================================================#
global storage_mat, states_of_interest,name,index
if len(traj.Obj) > 0 and name[-1] != traj.Obj[0].name:
name.append(traj.Obj[0].name)
index.append(storage_mat.shape[0] -1)
start_time = rospy.get_time()
#===================#
# Main #
#===================#
if __name__=='__main__':
rospy.init_node('DF_Plotter')
start_time = rospy.get_time()
euler_max = float(rospy.get_param("euler_angle_max", ".78537")) #in radians
plot_button = int(rospy.get_param("plot_button", "3"))
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_states = rospy.Subscriber('/cortex_raw' , Cortex, GetStates)
sub_traj = rospy.Subscriber('/trajectory',Trajectories, GetTrajectory)
sub_cntrl = rospy.Subscriber('/controls' , Controls, GetControls)
sub_joy = rospy.Subscriber('/joy' , Joy, GetJoy)
Datalogger()
if Button_pushed:
Plots()
answer = raw_input('Erase plots and reset datalogger?')
if answer == 'y' or answer == 'yes' or answer == 'I guess' or answer == 'sure':
rospy.loginfo("Resetting datalogger and erasing plots...")
plt.clf()
start_time = rospy.get_time()
storage_mat = np.asmatrix(np.zeros((1,states_of_interest)))
plt.close('all')
else:
plt.clf()
plt.close('all')
rospy.signal_shutdown(0)
r.sleep()
|
[
"matplotlib.pyplot.title",
"rospy.Subscriber",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"roslib.load_manifest",
"matplotlib.pyplot.close",
"rospy.Rate",
"rospy.signal_shutdown",
"numpy.append",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_time",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"rospy.loginfo",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"numpy.zeros",
"rospy.get_param",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((301, 334), 'roslib.load_manifest', 'roslib.load_manifest', (['"""risc_msgs"""'], {}), "('risc_msgs')\n", (321, 334), False, 'import roslib\n'), ((1180, 1213), 'numpy.zeros', 'np.zeros', (['(1, states_of_interest)'], {}), '((1, states_of_interest))\n', (1188, 1213), True, 'import numpy as np\n'), ((3923, 3944), 'matplotlib.pyplot.figure', 'plt.figure', (['Traj_name'], {}), '(Traj_name)\n', (3933, 3944), True, 'import matplotlib.pyplot as plt\n'), ((4651, 4672), 'matplotlib.pyplot.figure', 'plt.figure', (['plot_name'], {}), '(plot_name)\n', (4661, 4672), True, 'import matplotlib.pyplot as plt\n'), ((4677, 4717), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'Err1', '"""b-"""'], {'label': 'label1'}), "(time, Err1, 'b-', label=label1)\n", (4685, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4721, 4761), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'Err2', '"""k-"""'], {'label': 'label2'}), "(time, Err2, 'k-', label=label2)\n", (4729, 4761), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4805), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'Err3', '"""r-"""'], {'label': 'label3'}), "(time, Err3, 'r-', label=label3)\n", (4773, 4805), True, 'import matplotlib.pyplot as plt\n'), ((4809, 4821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4819, 4821), True, 'import matplotlib.pyplot as plt\n'), ((4826, 4859), 'matplotlib.pyplot.title', 'plt.title', (['plot_name'], {'fontsize': '(16)'}), '(plot_name, fontsize=16)\n', (4835, 4859), True, 'import matplotlib.pyplot as plt\n'), ((4864, 4900), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xaxis_label'], {'fontsize': '(14)'}), '(xaxis_label, fontsize=14)\n', (4874, 4900), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4941), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yaxis_label'], {'fontsize': '(14)'}), '(yaxis_label, fontsize=14)\n', (4915, 4941), True, 'import matplotlib.pyplot as plt\n'), ((4946, 4975), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(time[0], time[-1])'], {}), '((time[0], time[-1]))\n', (4954, 4975), True, 'import matplotlib.pyplot as plt\n'), ((5146, 5170), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(y_min, y_max)'], {}), '((y_min, y_max))\n', (5154, 5170), True, 'import matplotlib.pyplot as plt\n'), ((5441, 5462), 'matplotlib.pyplot.figure', 'plt.figure', (['plot_name'], {}), '(plot_name)\n', (5451, 5462), True, 'import matplotlib.pyplot as plt\n'), ((5467, 5516), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'actual_data', '"""b-"""'], {'label': '"""actual"""'}), "(time, actual_data, 'b-', label='actual')\n", (5475, 5516), True, 'import matplotlib.pyplot as plt\n'), ((5521, 5576), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'commanded_data', '"""r:"""'], {'label': '"""Commanded"""'}), "(time, commanded_data, 'r:', label='Commanded')\n", (5529, 5576), True, 'import matplotlib.pyplot as plt\n'), ((5580, 5592), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5590, 5592), True, 'import matplotlib.pyplot as plt\n'), ((5597, 5630), 'matplotlib.pyplot.title', 'plt.title', (['plot_name'], {'fontsize': '(16)'}), '(plot_name, fontsize=16)\n', (5606, 5630), True, 'import matplotlib.pyplot as plt\n'), ((5635, 5671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xaxis_label'], {'fontsize': '(14)'}), '(xaxis_label, fontsize=14)\n', (5645, 5671), True, 'import matplotlib.pyplot as plt\n'), ((5676, 5712), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yaxis_label'], {'fontsize': '(14)'}), '(yaxis_label, fontsize=14)\n', (5686, 5712), True, 'import matplotlib.pyplot as plt\n'), ((5717, 5746), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(time[0], time[-1])'], {}), '((time[0], time[-1]))\n', (5725, 5746), True, 'import matplotlib.pyplot as plt\n'), ((5931, 5955), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(y_min, y_max)'], {}), '((y_min, y_max))\n', (5939, 5955), True, 'import matplotlib.pyplot as plt\n'), ((8031, 8060), 'rospy.init_node', 'rospy.init_node', (['"""DF_Plotter"""'], {}), "('DF_Plotter')\n", (8046, 8060), False, 'import rospy\n'), ((8078, 8094), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (8092, 8094), False, 'import rospy\n'), ((8379, 8395), 'rospy.Rate', 'rospy.Rate', (['rate'], {}), '(rate)\n', (8389, 8395), False, 'import rospy\n'), ((3441, 3475), 'rospy.loginfo', 'rospy.loginfo', (['"""insufficient data"""'], {}), "('insufficient data')\n", (3454, 3475), False, 'import rospy\n'), ((6395, 6427), 'rospy.loginfo', 'rospy.loginfo', (['"""logging data..."""'], {}), "('logging data...')\n", (6408, 6427), False, 'import rospy\n'), ((7407, 7443), 'numpy.append', 'np.append', (['storage_mat', 'new_stack', '(0)'], {}), '(storage_mat, new_stack, 0)\n', (7416, 7443), True, 'import numpy as np\n'), ((7905, 7921), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (7919, 7921), False, 'import rospy\n'), ((8120, 8164), 'rospy.get_param', 'rospy.get_param', (['"""euler_angle_max"""', '""".78537"""'], {}), "('euler_angle_max', '.78537')\n", (8135, 8164), False, 'import rospy\n'), ((8201, 8236), 'rospy.get_param', 'rospy.get_param', (['"""plot_button"""', '"""3"""'], {}), "('plot_button', '3')\n", (8216, 8236), False, 'import rospy\n'), ((8410, 8429), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (8427, 8429), False, 'import rospy\n'), ((8453, 8503), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/cortex_raw"""', 'Cortex', 'GetStates'], {}), "('/cortex_raw', Cortex, GetStates)\n", (8469, 8503), False, 'import rospy\n'), ((8527, 8587), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/trajectory"""', 'Trajectories', 'GetTrajectory'], {}), "('/trajectory', Trajectories, GetTrajectory)\n", (8543, 8587), False, 'import rospy\n'), ((8609, 8661), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/controls"""', 'Controls', 'GetControls'], {}), "('/controls', Controls, GetControls)\n", (8625, 8661), False, 'import rospy\n'), ((8685, 8722), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/joy"""', 'Joy', 'GetJoy'], {}), "('/joy', Joy, GetJoy)\n", (8701, 8722), False, 'import rospy\n'), ((3401, 3422), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3409, 3422), True, 'import matplotlib.pyplot as plt\n'), ((7184, 7347), 'numpy.array', 'np.array', (['[x_act, y_act, z_act, z_des, y_des, z_des, phi_traj, theta_traj, psi_traj,\n phi_cort, theta_cort, psi_cort, u_cort_err, v_cort_err, w_cort_err, t]'], {}), '([x_act, y_act, z_act, z_des, y_des, z_des, phi_traj, theta_traj,\n psi_traj, phi_cort, theta_cort, psi_cort, u_cort_err, v_cort_err,\n w_cort_err, t])\n', (7192, 7347), True, 'import numpy as np\n'), ((3663, 3678), 'numpy.array', 'np.array', (['x_act'], {}), '(x_act)\n', (3671, 3678), True, 'import numpy as np\n'), ((3707, 3722), 'numpy.array', 'np.array', (['y_act'], {}), '(y_act)\n', (3715, 3722), True, 'import numpy as np\n'), ((3751, 3766), 'numpy.array', 'np.array', (['z_act'], {}), '(z_act)\n', (3759, 3766), True, 'import numpy as np\n'), ((3795, 3810), 'numpy.array', 'np.array', (['x_des'], {}), '(x_des)\n', (3803, 3810), True, 'import numpy as np\n'), ((3839, 3854), 'numpy.array', 'np.array', (['y_des'], {}), '(y_des)\n', (3847, 3854), True, 'import numpy as np\n'), ((3883, 3898), 'numpy.array', 'np.array', (['z_des'], {}), '(z_des)\n', (3891, 3898), True, 'import numpy as np\n'), ((4471, 4485), 'numpy.array', 'np.array', (['err1'], {}), '(err1)\n', (4479, 4485), True, 'import numpy as np\n'), ((4518, 4532), 'numpy.array', 'np.array', (['err2'], {}), '(err2)\n', (4526, 4532), True, 'import numpy as np\n'), ((4565, 4579), 'numpy.array', 'np.array', (['err3'], {}), '(err3)\n', (4573, 4579), True, 'import numpy as np\n'), ((4612, 4626), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (4620, 4626), True, 'import numpy as np\n'), ((5277, 5298), 'numpy.array', 'np.array', (['actual_data'], {}), '(actual_data)\n', (5285, 5298), True, 'import numpy as np\n'), ((5338, 5362), 'numpy.array', 'np.array', (['commanded_data'], {}), '(commanded_data)\n', (5346, 5362), True, 'import numpy as np\n'), ((5402, 5416), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (5410, 5416), True, 'import numpy as np\n'), ((7121, 7137), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (7135, 7137), False, 'import rospy\n'), ((8973, 9031), 'rospy.loginfo', 'rospy.loginfo', (['"""Resetting datalogger and erasing plots..."""'], {}), "('Resetting datalogger and erasing plots...')\n", (8986, 9031), False, 'import rospy\n'), ((9049, 9058), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9056, 9058), True, 'import matplotlib.pyplot as plt\n'), ((9089, 9105), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (9103, 9105), False, 'import rospy\n'), ((9200, 9216), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9209, 9216), True, 'import matplotlib.pyplot as plt\n'), ((9253, 9262), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9260, 9262), True, 'import matplotlib.pyplot as plt\n'), ((9280, 9296), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9289, 9296), True, 'import matplotlib.pyplot as plt\n'), ((9314, 9338), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['(0)'], {}), '(0)\n', (9335, 9338), False, 'import rospy\n'), ((9149, 9182), 'numpy.zeros', 'np.zeros', (['(1, states_of_interest)'], {}), '((1, states_of_interest))\n', (9157, 9182), True, 'import numpy as np\n')]
|
import numpy as np
import anndata as ad
# -------------------------------------------------------------------------------
# Some test data
# -------------------------------------------------------------------------------
X_list = [ # data matrix of shape n_obs x n_vars
[1, 2, 3], [4, 5, 6], [7, 8, 9]]
obs_dict = { # annotation of observations / rows
'row_names': ['name1', 'name2', 'name3'], # row annotation
'oanno1': ['cat1', 'cat2', 'cat2'], # categorical annotation
'oanno2': ['o1', 'o2', 'o3'], # string annotation
'oanno3': [2.1, 2.2, 2.3]} # float annotation
var_dict = { # annotation of variables / columns
'vanno1': [3.1, 3.2, 3.3]}
uns_dict = { # unstructured annotation
'oanno1_colors': ['#000000', '#FFFFFF'],
'uns2': ['some annotation']}
# -------------------------------------------------------------------------------
# The test functions
# -------------------------------------------------------------------------------
def test_views():
X = np.array(X_list)
adata = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict, dtype='int32')
assert adata[:, 0].isview
assert adata[:, 0].X.tolist() == [1, 4, 7]
adata[:2, 0].X = [0, 0]
assert adata[:, 0].X.tolist() == [0, 0, 7]
adata_subset = adata[:2, [0, 1]]
assert adata_subset.isview
# now transition to actual object
adata_subset.obs['foo'] = range(2)
assert not adata_subset.isview
assert adata_subset.obs['foo'].tolist() == list(range(2))
def test_slice_copy():
adata = ad.AnnData(np.empty((100, 100)))
adata.obsm['o'] = np.empty((100, 50))
adata = adata[:50]
adata.obsm['o'] = np.ones((50, 20))
|
[
"anndata.AnnData",
"numpy.empty",
"numpy.array",
"numpy.ones"
] |
[((1049, 1065), 'numpy.array', 'np.array', (['X_list'], {}), '(X_list)\n', (1057, 1065), True, 'import numpy as np\n'), ((1078, 1148), 'anndata.AnnData', 'ad.AnnData', (['X'], {'obs': 'obs_dict', 'var': 'var_dict', 'uns': 'uns_dict', 'dtype': '"""int32"""'}), "(X, obs=obs_dict, var=var_dict, uns=uns_dict, dtype='int32')\n", (1088, 1148), True, 'import anndata as ad\n'), ((1641, 1660), 'numpy.empty', 'np.empty', (['(100, 50)'], {}), '((100, 50))\n', (1649, 1660), True, 'import numpy as np\n'), ((1707, 1724), 'numpy.ones', 'np.ones', (['(50, 20)'], {}), '((50, 20))\n', (1714, 1724), True, 'import numpy as np\n'), ((1597, 1617), 'numpy.empty', 'np.empty', (['(100, 100)'], {}), '((100, 100))\n', (1605, 1617), True, 'import numpy as np\n')]
|
from django.db import models
class Author(models.Model):
name = models.CharField('Name', null=True, blank=True, max_length=255)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def __unicode__(self):
return self.name
class SortableBook(models.Model):
title = models.CharField('Title', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
author = models.ForeignKey(Author, null=True, on_delete=models.CASCADE)
class Meta(object):
ordering = ['my_order']
def __str__(self):
return self.title
def __unicode__(self):
return self.title
class Chapter(models.Model):
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True, on_delete=models.CASCADE)
my_order = models.PositiveIntegerField(blank=False, null=False)
class Meta(object):
ordering = ['my_order']
def __str__(self):
return 'Chapter: {0}'.format(self.title)
def __unicode__(self):
return 'Chapter: {0}'.format(self.title)
class Notes(models.Model):
note = models.CharField('Note', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True, on_delete=models.CASCADE)
def __str__(self):
return 'Note: {0}'.format(self.note)
def __unicode__(self):
return 'Note: {0}'.format(self.note)
|
[
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.ForeignKey"
] |
[((70, 133), 'django.db.models.CharField', 'models.CharField', (['"""Name"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), "('Name', null=True, blank=True, max_length=255)\n", (86, 133), False, 'from django.db import models\n'), ((329, 393), 'django.db.models.CharField', 'models.CharField', (['"""Title"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), "('Title', null=True, blank=True, max_length=255)\n", (345, 393), False, 'from django.db import models\n'), ((409, 472), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'blank': '(False)', 'null': '(False)'}), '(default=0, blank=False, null=False)\n', (436, 472), False, 'from django.db import models\n'), ((486, 548), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Author'], {'null': '(True)', 'on_delete': 'models.CASCADE'}), '(Author, null=True, on_delete=models.CASCADE)\n', (503, 548), False, 'from django.db import models\n'), ((753, 817), 'django.db.models.CharField', 'models.CharField', (['"""Title"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), "('Title', null=True, blank=True, max_length=255)\n", (769, 817), False, 'from django.db import models\n'), ((829, 897), 'django.db.models.ForeignKey', 'models.ForeignKey', (['SortableBook'], {'null': '(True)', 'on_delete': 'models.CASCADE'}), '(SortableBook, null=True, on_delete=models.CASCADE)\n', (846, 897), False, 'from django.db import models\n'), ((913, 965), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(False)', 'null': '(False)'}), '(blank=False, null=False)\n', (940, 965), False, 'from django.db import models\n'), ((1213, 1276), 'django.db.models.CharField', 'models.CharField', (['"""Note"""'], {'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), "('Note', null=True, blank=True, max_length=255)\n", (1229, 1276), False, 'from django.db import models\n'), ((1288, 1356), 'django.db.models.ForeignKey', 'models.ForeignKey', (['SortableBook'], {'null': '(True)', 'on_delete': 'models.CASCADE'}), '(SortableBook, null=True, on_delete=models.CASCADE)\n', (1305, 1356), False, 'from django.db import models\n')]
|
from scivision_plankton_models import resnet50
import PIL
import torch
import torchvision
X = PIL.Image.open('Pia1.2017-10-12.0711+N00207451_hc.tif')
X = torchvision.transforms.ToTensor()(X)
X = torchvision.transforms.Resize((256,256))(X)
X = torch.unsqueeze(X, 0)
model = resnet50()
y = model.predict(X)
_, preds = torch.max(y, 1)
print(preds)
|
[
"PIL.Image.open",
"scivision_plankton_models.resnet50",
"torch.max",
"torch.unsqueeze",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] |
[((95, 150), 'PIL.Image.open', 'PIL.Image.open', (['"""Pia1.2017-10-12.0711+N00207451_hc.tif"""'], {}), "('Pia1.2017-10-12.0711+N00207451_hc.tif')\n", (109, 150), False, 'import PIL\n'), ((245, 266), 'torch.unsqueeze', 'torch.unsqueeze', (['X', '(0)'], {}), '(X, 0)\n', (260, 266), False, 'import torch\n'), ((276, 286), 'scivision_plankton_models.resnet50', 'resnet50', ([], {}), '()\n', (284, 286), False, 'from scivision_plankton_models import resnet50\n'), ((319, 334), 'torch.max', 'torch.max', (['y', '(1)'], {}), '(y, 1)\n', (328, 334), False, 'import torch\n'), ((156, 189), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (187, 189), False, 'import torchvision\n'), ((197, 238), 'torchvision.transforms.Resize', 'torchvision.transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (226, 238), False, 'import torchvision\n')]
|