code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import gzip
import pickle
import validators
def gzip_to_plaintext(path, destination):
"""
Saves a file unzipped and unpickled, for manual inspection
:param path: input file path
:param destination: where the plain text file will be saved
:return: None
"""
with gzip.open(path, "rb") as infile:
doc = pickle.load(infile)
with open(destination, "wb+") as outfile:
outfile.write(doc)
outfile.close()
def load_gzip_pickle(path):
"""
Loads a gzipped pickle
:param path: file path
:return: The python object
"""
with gzip.open(path, "rb") as infile:
return pickle.load(infile)
def save_gzip_pickle(path, object):
"""
Saves python object as a gzipped pickle
:param path: file path
:return: The python object
"""
with gzip.open(path, "wb+") as file:
pickle.dump(object, file)
def load_gzip_html(path):
with gzip.open(path, "rb") as file:
return file.read().decode("utf-8")
def save_gzip_html(path, html):
with gzip.open(path, "wb+") as file:
file.write(html.encode("utf-8"))
def append_line(path, line):
with gzip.open(path, "w+") as outfile:
outfile.write(line + "\n")
def is_url(string):
if validators.url(string):
return True
else:
return False # We just want boolean values, no ValidationFailure objects
def convert_si_to_number(number):
number = number.replace(",","")
if 'K' in number:
return int(float(number.replace('K', '')) * 1000)
elif 'M' in number:
return int(float(number.replace('M', '')) * 1000000)
elif 'B' in number:
return int(float(number.replace('B', '')) * 1000000000)
else:
return int(number)
| [
"validators.url",
"pickle.dump",
"pickle.load",
"gzip.open"
] | [((1271, 1293), 'validators.url', 'validators.url', (['string'], {}), '(string)\n', (1285, 1293), False, 'import validators\n'), ((291, 312), 'gzip.open', 'gzip.open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (300, 312), False, 'import gzip\n'), ((338, 357), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (349, 357), False, 'import pickle\n'), ((607, 628), 'gzip.open', 'gzip.open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (616, 628), False, 'import gzip\n'), ((655, 674), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (666, 674), False, 'import pickle\n'), ((840, 862), 'gzip.open', 'gzip.open', (['path', '"""wb+"""'], {}), "(path, 'wb+')\n", (849, 862), False, 'import gzip\n'), ((880, 905), 'pickle.dump', 'pickle.dump', (['object', 'file'], {}), '(object, file)\n', (891, 905), False, 'import pickle\n'), ((943, 964), 'gzip.open', 'gzip.open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (952, 964), False, 'import gzip\n'), ((1060, 1082), 'gzip.open', 'gzip.open', (['path', '"""wb+"""'], {}), "(path, 'wb+')\n", (1069, 1082), False, 'import gzip\n'), ((1173, 1194), 'gzip.open', 'gzip.open', (['path', '"""w+"""'], {}), "(path, 'w+')\n", (1182, 1194), False, 'import gzip\n')] |
# coding: utf-8
from collections import Counter
n, k = [int(i) for i in input().split()]
card = input()
c = Counter(card)
ans = 0
while k > 0:
tmp = c.pop(c.most_common(1)[0][0])
if k > tmp:
ans += tmp*tmp
k -= tmp
else:
ans += k*k
k = 0
print(ans)
| [
"collections.Counter"
] | [((108, 121), 'collections.Counter', 'Counter', (['card'], {}), '(card)\n', (115, 121), False, 'from collections import Counter\n')] |
import os
import warnings
import itertools
import pandas
import time
class SlurmJobArray():
""" Selects a single condition from an array of parameters using the SLURM_ARRAY_TASK_ID environment variable.
The parameters need to be supplied as a dictionary. if the task is not in a slurm environment,
the test parameters will supersede the parameters, and the job_id would be taken as 0. Example:
parameters={"epsilon":[100],
"aligned":[True,False],
"actinLen":[20,40,60,80,100,120,140,160,180,200,220,240,260,280,300],
"repetition":range(5),
"temperature":[300],
"system2D":[False],
"simulation_platform":["OpenCL"]}
test_parameters={"simulation_platform":"CPU"}
sjob=SlurmJobArray("ActinSimv6", parameters, test_parameters)
:var test_run: Boolean: This simulation is a test
:var job_id: SLURM_ARRAY_TASK_ID
:var all_parameters: Parameters used to initialize the job
:var parameters: Parameters for this particular job
:var name: The name (and relative path) of the output
"""
def __init__(self, name, parameters, test_parameters={}, test_id=0):
self.all_parameters = parameters
self.test_parameters = test_parameters
# Parse the slurm variables
self.slurm_variables = {}
for key in os.environ:
if len(key.split("_")) > 1 and key.split("_")[0] == 'SLURM':
self.slurm_variables.update({key: os.environ[key]})
# Check if there is a job id
self.test_run = False
try:
self.job_id = int(self.slurm_variables["SLURM_ARRAY_TASK_ID"])
except KeyError:
self.test_run = True
warnings.warn("Test Run: SLURM_ARRAY_TASK_ID not in environment variables")
self.job_id = test_id
keys = parameters.keys()
self.all_conditions = list(itertools.product(*[parameters[k] for k in keys]))
self.parameter = dict(zip(keys, self.all_conditions[self.job_id]))
# The name only includes enough information to differentiate the simulations.
self.name = f"{name}_{self.job_id:03d}_" + '_'.join(
[f"{a[0]}_{self[a]}" for a in self.parameter if len(self.all_parameters[a]) > 1])
def __getitem__(self, name):
if self.test_run:
try:
return self.test_parameters[name]
except KeyError:
return self.parameter[name]
else:
return self.parameter[name]
def __getattr__(self, name: str):
""" The keys of the parameters can be called as attributes
"""
if name in self.__dict__:
return object.__getattribute__(self, name)
elif name in self.parameter:
return self[name]
else:
return object.__getattribute__(self, name)
def __repr__(self):
return str(self.parameter)
def keys(self):
return str(self.parameters.keys())
def print_parameters(self):
print(f"Number of conditions: {len(self.all_conditions)}")
print("Running Conditions")
for k in self.parameter.keys():
print(f"{k} :", f"{self[k]}")
print()
def print_slurm_variables(self):
print("Slurm Variables")
for key in self.slurm_variables:
print(key, ":", self.slurm_variables[key])
print()
def write_csv(self, out=""):
s = pandas.concat([pandas.Series(self.parameter), pandas.Series(self.slurm_variables)])
s['test_run'] = self.test_run
s['date'] = time.strftime("%Y_%m_%d")
s['name'] = self.name
s['job_id'] = self.job_id
if out == '':
s.to_csv(self.name + '.param')
else:
s.to_csv(out)
| [
"warnings.warn",
"itertools.product",
"time.strftime",
"pandas.Series"
] | [((3691, 3716), 'time.strftime', 'time.strftime', (['"""%Y_%m_%d"""'], {}), "('%Y_%m_%d')\n", (3704, 3716), False, 'import time\n'), ((1997, 2046), 'itertools.product', 'itertools.product', (['*[parameters[k] for k in keys]'], {}), '(*[parameters[k] for k in keys])\n', (2014, 2046), False, 'import itertools\n'), ((1818, 1893), 'warnings.warn', 'warnings.warn', (['"""Test Run: SLURM_ARRAY_TASK_ID not in environment variables"""'], {}), "('Test Run: SLURM_ARRAY_TASK_ID not in environment variables')\n", (1831, 1893), False, 'import warnings\n'), ((3564, 3593), 'pandas.Series', 'pandas.Series', (['self.parameter'], {}), '(self.parameter)\n', (3577, 3593), False, 'import pandas\n'), ((3595, 3630), 'pandas.Series', 'pandas.Series', (['self.slurm_variables'], {}), '(self.slurm_variables)\n', (3608, 3630), False, 'import pandas\n')] |
# Tout ce code est un snippet volé sur plusieurs threads Stackoverflow
# Il permet de créer un décorateur qui vérifie si un utilisateur est bien identifié,
# et le redirige vers la page de login sinon. Django ne fait pas cette dernière partie seul...
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from steam.steamid import SteamID
from sourcebans.models import SbAdmins
default_message = "You must login first"
def user_passes_test(test_func, message=default_message):
"""
Decorator for views that checks that the user passes the given test,
setting a message in case of no success. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if not test_func(request.user):
messages.error(request, message)
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator
def login_required_message(function=None, message=default_message):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
message=message,
)
if function:
return actual_decorator(function)
return actual_decorator
def login_required_messsage_and_redirect(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None, message=default_message):
if function:
return login_required_message(
login_required(function, redirect_field_name, login_url),
message
)
return lambda deferred_function: login_required_message_and_redirect(deferred_function, redirect_field_name, login_url, message)
def admin_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
def is_admin(u):
if u.is_authenticated:
steamid = SteamID(u.steamid).as_steam2
Results = SbAdmins.objects.get(authid=steamid)
if Results or u.is_superuser:
return True
return False
actual_decorator = user_passes_test(
lambda u: is_admin(u)
)
if function:
return actual_decorator(function)
return actual_decorator | [
"django.contrib.messages.error",
"steam.steamid.SteamID",
"sourcebans.models.SbAdmins.objects.get",
"django.contrib.auth.decorators.login_required"
] | [((1733, 1789), 'django.contrib.auth.decorators.login_required', 'login_required', (['function', 'redirect_field_name', 'login_url'], {}), '(function, redirect_field_name, login_url)\n', (1747, 1789), False, 'from django.contrib.auth.decorators import login_required\n'), ((2266, 2302), 'sourcebans.models.SbAdmins.objects.get', 'SbAdmins.objects.get', ([], {'authid': 'steamid'}), '(authid=steamid)\n', (2286, 2302), False, 'from sourcebans.models import SbAdmins\n'), ((1041, 1073), 'django.contrib.messages.error', 'messages.error', (['request', 'message'], {}), '(request, message)\n', (1055, 1073), False, 'from django.contrib import messages\n'), ((2221, 2239), 'steam.steamid.SteamID', 'SteamID', (['u.steamid'], {}), '(u.steamid)\n', (2228, 2239), False, 'from steam.steamid import SteamID\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import TypeDecorator
db = SQLAlchemy()
class AnyArray(TypeDecorator):
impl = db.TEXT
def process_bind_param(self, value, dialect):
if value is None:
return value
if not value:
return '{}'
return '{{"{}"}}'.format('","'.join(value))
def process_result_value(self, value, dialect):
if value is None:
return value
if isinstance(value, list):
return value
if not value:
return []
# TODO: Value should be Unicode already
value = value.decode('utf-8')
# TODO: Enhance field decoding (eg. core_user.created)
return value.strip('{}"').split(',')
# TODO: make the model read-only
class PGStats(db.Model):
__tablename__ = 'pg_stats'
schema = db.Column('schemaname', db.TEXT(), primary_key=True)
table = db.Column('tablename', db.TEXT(), primary_key=True)
column = db.Column('attname', db.TEXT(), primary_key=True)
inherited = db.Column('inherited', db.BOOLEAN())
null_frac = db.Column('null_frac', db.REAL())
avg_width = db.Column('avg_width', db.INTEGER())
n_distinct = db.Column('n_distinct', db.REAL())
most_common_vals = db.Column('most_common_vals', AnyArray())
most_common_freqs = db.Column('most_common_freqs', AnyArray())
histogram_bounds = db.Column('histogram_bounds', AnyArray())
correlation = db.Column('correlation', db.REAL())
most_common_elems = db.Column('most_common_elems', AnyArray())
most_common_elem_freqs = db.Column('most_common_elem_freqs', postgresql.ARRAY(db.REAL))
elem_count_histogram = db.Column('elem_count_histogram', postgresql.ARRAY(db.REAL))
def to_dict(self):
result = OrderedDict()
for key in self.__mapper__.columns.keys():
result[key] = getattr(self, key)
return result
| [
"collections.OrderedDict",
"flask.ext.sqlalchemy.SQLAlchemy",
"sqlalchemy.dialects.postgresql.ARRAY"
] | [((303, 315), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (313, 315), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n'), ((1847, 1872), 'sqlalchemy.dialects.postgresql.ARRAY', 'postgresql.ARRAY', (['db.REAL'], {}), '(db.REAL)\n', (1863, 1872), False, 'from sqlalchemy.dialects import postgresql\n'), ((1935, 1960), 'sqlalchemy.dialects.postgresql.ARRAY', 'postgresql.ARRAY', (['db.REAL'], {}), '(db.REAL)\n', (1951, 1960), False, 'from sqlalchemy.dialects import postgresql\n'), ((2003, 2016), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2014, 2016), False, 'from collections import OrderedDict\n')] |
import pltk as pl
from pltk import load_data
from pltk import get_file_contents # Load single file
from pltk import get_folder_contents # Load all files in folder
from pltk import tokenize # create desired tokens
from pltk import vectorize # convert list of string to list of vectors
from pltk import unvectorize # convert list of vectors to list of strings
from pltk import get_string # Un-vectorize integer
from pltk import get_vector # Vectorize string
from pltk import tokenize_file
from pltk import tokenize_folder # tokenize a list of files
from pltk import vectorize_file
from pltk import vectorize_folder
from pltk import unvectorize_file
from pltk import unvectorize_folder
# from pltk import list_to_vectors # convert list of string to list of vectors
# from pltk import vectors_to_list # convert list of vectors to list of strings
from pltk import write_list_to_file # writes a list to a file
from pltk import change_list_dimensions
from pltk import split_lists
from pltk import join_lists
# LOAD DATA
print('Load Data: ')
test_load_file = load_data('good_strcpy.c')
test_load_file_from_directory = load_data('/home/rod/PycharmProjects/ProgrammingLanguageToolkit/input/good_strcpy.c')
test_load_folder = load_data('multifile')
test_load_my_folder = load_data('/home/rod/PycharmProjects/ProgrammingLanguageToolkit/multifile')
print('Load a file: ', test_load_file)
print('Load a file from a directory: ', test_load_file_from_directory)
print('Load a folder: ', test_load_folder)
print('Load a user defined folder: ', test_load_my_folder)
# TOKENIZE
char_tokens = tokenize(test_load_file, "chars")
word_tokens = tokenize(test_load_file, "words")
line_tokens = tokenize(test_load_file, "lines")
function_tokens = tokenize(test_load_file, "functions")
file_tokens = tokenize(test_load_file, "files")
char_folder_tokens = tokenize(test_load_folder, "chars")
word_folder_tokens = tokenize(test_load_folder, "words")
line_folder_tokens = tokenize(test_load_folder, "lines")
function_folder_tokens = tokenize(test_load_folder, "functions")
file_folder_tokens = tokenize(test_load_folder, "files")
print('\nTokenize Files: ')
print('Char Tokens: ' , char_tokens)
print('Word Tokens: ' , word_tokens)
print('Line Tokens: ' , line_tokens)
print('Function Tokens: ' , function_tokens)
print('File Tokens: ' , file_tokens)
print('\nTokenize Folders: ')
print('Char Folder Tokens: ' , char_folder_tokens)
print('Word Folder Tokens: ' , word_folder_tokens)
print('Line Folder Tokens: ' , line_folder_tokens)
print('Function Folder Tokens: ' , function_folder_tokens)
print('File Folder Tokens: ' , file_folder_tokens)
# VECTORIZE (CURREENT METHODS AND MY OWN)
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer() # create the transform
vectorizer.fit(test_load_file[0]) # tokenize and build vocab
print('\nSklearn\'s Count Vectorizer used with pltk\'s load_data method')
print('Tokens: ', vectorizer.vocabulary_) # summarize
vector = vectorizer.transform(test_load_file[0]) # encode document
print('Vector Shape (Lists,Tokens): ', vector.shape) # summarize encoded vector
print('Vectorized Array: ', vector.toarray())
print('Count Vectorizer Features: ', vectorizer.get_feature_names())
# VECTORIZATION
char_tokens_vectorized = vectorize_file(char_tokens)
word_tokens_vectorized = vectorize_file(word_tokens)
file_tokens_vectorized = vectorize_file(file_tokens)
char_folder_tokens_vectorized = vectorize_folder(char_folder_tokens)
word_folder_tokens_vectorized = vectorize_folder(word_folder_tokens)
file_folder_tokens_vectorized = vectorize_folder(file_folder_tokens)
print('\nVectorize a File into Tokens - chars, words, lines, functions, files: ')
print('Vectorize Char Tokens: ', char_tokens_vectorized)
print('Vectorize Word Tokens: ', word_tokens_vectorized)
print('Vectorize File Tokens: ', file_tokens_vectorized)
print('\nVectorize a Folder into Tokens - chars, words, lines, functions, files: ')
print('Vectorize Char Tokens: ', char_folder_tokens_vectorized)
print('Vectorize Word Tokens: ', word_folder_tokens_vectorized)
print('Vectorize File Tokens: ', file_folder_tokens_vectorized)
char_any_tokens_vectorized = vectorize(char_tokens)
word_any_tokens_vectorized = vectorize(word_tokens)
file_any_tokens_vectorized = vectorize(file_tokens)
char_anyf_tokens_vectorized = vectorize(char_folder_tokens)
word_anyf_tokens_vectorized = vectorize(word_folder_tokens)
file_anyf_tokens_vectorized = vectorize(file_folder_tokens)
print('\nPLTK Vectorize a File into Tokens - chars, words, lines, functions, files: ')
print('PLTK Vectorize Char Tokens: ', char_any_tokens_vectorized)
print('PLTK Vectorize Word Tokens: ', word_any_tokens_vectorized)
print('PLTK Vectorize File Tokens: ', file_any_tokens_vectorized)
print('\nPLTK Vectorize a Folder into Tokens - chars, words, lines, functions, files: ')
print('PLTK Vectorize Char Tokens: ', char_anyf_tokens_vectorized)
print('PLTK Vectorize Word Tokens: ', word_anyf_tokens_vectorized)
print('PLTK Vectorize File Tokens: ', file_anyf_tokens_vectorized)
# UNVECTORIZATION
char_file_tokens_unvectorized = unvectorize(char_tokens_vectorized)
word_file_tokens_unvectorized = unvectorize(word_tokens_vectorized)
file_file_tokens_unvectorized = unvectorize(file_tokens_vectorized)
char_folder_tokens_unvectorized = unvectorize(char_folder_tokens_vectorized)
word_folder_tokens_unvectorized = unvectorize(word_folder_tokens_vectorized)
file_folder_tokens_unvectorized = unvectorize(file_folder_tokens_vectorized)
print('\nUn-Vectorizing a file: ')
print('Un-vectorized Char Tokens: ', char_file_tokens_unvectorized)
print('Un-vectorized Word Tokens: ', word_file_tokens_unvectorized)
print('Un-vectorized File Tokens: ', file_file_tokens_unvectorized)
print('Un-vectorized Char Tokens: ', char_folder_tokens_unvectorized)
print('Un-vectorized Word Tokens: ', word_folder_tokens_unvectorized)
print('Un-vectorized File Tokens: ', file_folder_tokens_unvectorized)
new_list_dimension = join_lists(word_file_tokens_unvectorized)
new_list_dimension2d = split_lists(word_file_tokens_unvectorized)
print('DIMENSION 1d file into 2d array: ', new_list_dimension)
print('DIMENSION 1d file into 2d array: ', new_list_dimension2d)
test1d = change_list_dimensions(new_list_dimension)
test2d = change_list_dimensions(new_list_dimension2d)
print('Test convert 1d: ', test1d)
print('Test convert 2d: ', test2d)
# Write to file
#write_list_to_file('test.c', test_unvectorize_list, folder=None) | [
"pltk.tokenize",
"pltk.vectorize",
"pltk.split_lists",
"pltk.unvectorize",
"sklearn.feature_extraction.text.CountVectorizer",
"pltk.change_list_dimensions",
"pltk.join_lists",
"pltk.load_data",
"pltk.vectorize_file",
"pltk.vectorize_folder"
] | [((1067, 1093), 'pltk.load_data', 'load_data', (['"""good_strcpy.c"""'], {}), "('good_strcpy.c')\n", (1076, 1093), False, 'from pltk import load_data\n'), ((1126, 1216), 'pltk.load_data', 'load_data', (['"""/home/rod/PycharmProjects/ProgrammingLanguageToolkit/input/good_strcpy.c"""'], {}), "(\n '/home/rod/PycharmProjects/ProgrammingLanguageToolkit/input/good_strcpy.c')\n", (1135, 1216), False, 'from pltk import load_data\n'), ((1231, 1253), 'pltk.load_data', 'load_data', (['"""multifile"""'], {}), "('multifile')\n", (1240, 1253), False, 'from pltk import load_data\n'), ((1276, 1351), 'pltk.load_data', 'load_data', (['"""/home/rod/PycharmProjects/ProgrammingLanguageToolkit/multifile"""'], {}), "('/home/rod/PycharmProjects/ProgrammingLanguageToolkit/multifile')\n", (1285, 1351), False, 'from pltk import load_data\n'), ((1591, 1624), 'pltk.tokenize', 'tokenize', (['test_load_file', '"""chars"""'], {}), "(test_load_file, 'chars')\n", (1599, 1624), False, 'from pltk import tokenize\n'), ((1639, 1672), 'pltk.tokenize', 'tokenize', (['test_load_file', '"""words"""'], {}), "(test_load_file, 'words')\n", (1647, 1672), False, 'from pltk import tokenize\n'), ((1687, 1720), 'pltk.tokenize', 'tokenize', (['test_load_file', '"""lines"""'], {}), "(test_load_file, 'lines')\n", (1695, 1720), False, 'from pltk import tokenize\n'), ((1739, 1776), 'pltk.tokenize', 'tokenize', (['test_load_file', '"""functions"""'], {}), "(test_load_file, 'functions')\n", (1747, 1776), False, 'from pltk import tokenize\n'), ((1791, 1824), 'pltk.tokenize', 'tokenize', (['test_load_file', '"""files"""'], {}), "(test_load_file, 'files')\n", (1799, 1824), False, 'from pltk import tokenize\n'), ((1846, 1881), 'pltk.tokenize', 'tokenize', (['test_load_folder', '"""chars"""'], {}), "(test_load_folder, 'chars')\n", (1854, 1881), False, 'from pltk import tokenize\n'), ((1903, 1938), 'pltk.tokenize', 'tokenize', (['test_load_folder', '"""words"""'], {}), "(test_load_folder, 'words')\n", (1911, 1938), False, 'from pltk import tokenize\n'), ((1960, 1995), 'pltk.tokenize', 'tokenize', (['test_load_folder', '"""lines"""'], {}), "(test_load_folder, 'lines')\n", (1968, 1995), False, 'from pltk import tokenize\n'), ((2021, 2060), 'pltk.tokenize', 'tokenize', (['test_load_folder', '"""functions"""'], {}), "(test_load_folder, 'functions')\n", (2029, 2060), False, 'from pltk import tokenize\n'), ((2082, 2117), 'pltk.tokenize', 'tokenize', (['test_load_folder', '"""files"""'], {}), "(test_load_folder, 'files')\n", (2090, 2117), False, 'from pltk import tokenize\n'), ((2749, 2766), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (2764, 2766), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((3288, 3315), 'pltk.vectorize_file', 'vectorize_file', (['char_tokens'], {}), '(char_tokens)\n', (3302, 3315), False, 'from pltk import vectorize_file\n'), ((3341, 3368), 'pltk.vectorize_file', 'vectorize_file', (['word_tokens'], {}), '(word_tokens)\n', (3355, 3368), False, 'from pltk import vectorize_file\n'), ((3394, 3421), 'pltk.vectorize_file', 'vectorize_file', (['file_tokens'], {}), '(file_tokens)\n', (3408, 3421), False, 'from pltk import vectorize_file\n'), ((3454, 3490), 'pltk.vectorize_folder', 'vectorize_folder', (['char_folder_tokens'], {}), '(char_folder_tokens)\n', (3470, 3490), False, 'from pltk import vectorize_folder\n'), ((3523, 3559), 'pltk.vectorize_folder', 'vectorize_folder', (['word_folder_tokens'], {}), '(word_folder_tokens)\n', (3539, 3559), False, 'from pltk import vectorize_folder\n'), ((3592, 3628), 'pltk.vectorize_folder', 'vectorize_folder', (['file_folder_tokens'], {}), '(file_folder_tokens)\n', (3608, 3628), False, 'from pltk import vectorize_folder\n'), ((4189, 4211), 'pltk.vectorize', 'vectorize', (['char_tokens'], {}), '(char_tokens)\n', (4198, 4211), False, 'from pltk import vectorize\n'), ((4241, 4263), 'pltk.vectorize', 'vectorize', (['word_tokens'], {}), '(word_tokens)\n', (4250, 4263), False, 'from pltk import vectorize\n'), ((4293, 4315), 'pltk.vectorize', 'vectorize', (['file_tokens'], {}), '(file_tokens)\n', (4302, 4315), False, 'from pltk import vectorize\n'), ((4346, 4375), 'pltk.vectorize', 'vectorize', (['char_folder_tokens'], {}), '(char_folder_tokens)\n', (4355, 4375), False, 'from pltk import vectorize\n'), ((4406, 4435), 'pltk.vectorize', 'vectorize', (['word_folder_tokens'], {}), '(word_folder_tokens)\n', (4415, 4435), False, 'from pltk import vectorize\n'), ((4466, 4495), 'pltk.vectorize', 'vectorize', (['file_folder_tokens'], {}), '(file_folder_tokens)\n', (4475, 4495), False, 'from pltk import vectorize\n'), ((5123, 5158), 'pltk.unvectorize', 'unvectorize', (['char_tokens_vectorized'], {}), '(char_tokens_vectorized)\n', (5134, 5158), False, 'from pltk import unvectorize\n'), ((5191, 5226), 'pltk.unvectorize', 'unvectorize', (['word_tokens_vectorized'], {}), '(word_tokens_vectorized)\n', (5202, 5226), False, 'from pltk import unvectorize\n'), ((5259, 5294), 'pltk.unvectorize', 'unvectorize', (['file_tokens_vectorized'], {}), '(file_tokens_vectorized)\n', (5270, 5294), False, 'from pltk import unvectorize\n'), ((5329, 5371), 'pltk.unvectorize', 'unvectorize', (['char_folder_tokens_vectorized'], {}), '(char_folder_tokens_vectorized)\n', (5340, 5371), False, 'from pltk import unvectorize\n'), ((5406, 5448), 'pltk.unvectorize', 'unvectorize', (['word_folder_tokens_vectorized'], {}), '(word_folder_tokens_vectorized)\n', (5417, 5448), False, 'from pltk import unvectorize\n'), ((5483, 5525), 'pltk.unvectorize', 'unvectorize', (['file_folder_tokens_vectorized'], {}), '(file_folder_tokens_vectorized)\n', (5494, 5525), False, 'from pltk import unvectorize\n'), ((5998, 6039), 'pltk.join_lists', 'join_lists', (['word_file_tokens_unvectorized'], {}), '(word_file_tokens_unvectorized)\n', (6008, 6039), False, 'from pltk import join_lists\n'), ((6063, 6105), 'pltk.split_lists', 'split_lists', (['word_file_tokens_unvectorized'], {}), '(word_file_tokens_unvectorized)\n', (6074, 6105), False, 'from pltk import split_lists\n'), ((6244, 6286), 'pltk.change_list_dimensions', 'change_list_dimensions', (['new_list_dimension'], {}), '(new_list_dimension)\n', (6266, 6286), False, 'from pltk import change_list_dimensions\n'), ((6296, 6340), 'pltk.change_list_dimensions', 'change_list_dimensions', (['new_list_dimension2d'], {}), '(new_list_dimension2d)\n', (6318, 6340), False, 'from pltk import change_list_dimensions\n')] |
from django.contrib import admin
from fridge.models import (FridgeEnvirontment, MeasureUnit, Stock,
StockCategory, Supplier)
# Register your models here.
@admin.register(Stock)
class StockAdmin(admin.ModelAdmin):
list_display = ('name','category','quantity','measure_unit','supplier')
search_fields = ['name',]
list_per_page = 15
@admin.register(Supplier)
class SupplierAdmin(admin.ModelAdmin):
list_display = ('name','is_company','phone','address','email')
admin.site.register(StockCategory)
admin.site.register(MeasureUnit)
admin.site.register(FridgeEnvirontment)
| [
"django.contrib.admin.register",
"django.contrib.admin.site.register"
] | [((185, 206), 'django.contrib.admin.register', 'admin.register', (['Stock'], {}), '(Stock)\n', (199, 206), False, 'from django.contrib import admin\n'), ((375, 399), 'django.contrib.admin.register', 'admin.register', (['Supplier'], {}), '(Supplier)\n', (389, 399), False, 'from django.contrib import admin\n'), ((511, 545), 'django.contrib.admin.site.register', 'admin.site.register', (['StockCategory'], {}), '(StockCategory)\n', (530, 545), False, 'from django.contrib import admin\n'), ((546, 578), 'django.contrib.admin.site.register', 'admin.site.register', (['MeasureUnit'], {}), '(MeasureUnit)\n', (565, 578), False, 'from django.contrib import admin\n'), ((579, 618), 'django.contrib.admin.site.register', 'admin.site.register', (['FridgeEnvirontment'], {}), '(FridgeEnvirontment)\n', (598, 618), False, 'from django.contrib import admin\n')] |
import pickle
import pytest
import numpy as np
from astropy import units as u
from astropy import modeling
from specutils.utils import QuantityModel
from ..utils.wcs_utils import refraction_index, vac_to_air, air_to_vac
wavelengths = [300, 500, 1000] * u.nm
data_index_refraction = {
'Griesen2006': np.array([3.07393068, 2.9434858 , 2.8925797 ]),
'Edlen1953': np.array([2.91557413, 2.78963801, 2.74148172]),
'Edlen1966': np.array([2.91554272, 2.7895973 , 2.74156098]),
'PeckReeder1972': np.array([2.91554211, 2.78960005, 2.74152561]),
'Morton2000': np.array([2.91568573, 2.78973402, 2.74169531]),
'Ciddor1996': np.array([2.91568633, 2.78973811, 2.74166131])
}
def test_quantity_model():
c = modeling.models.Chebyshev1D(3)
uc = QuantityModel(c, u.AA, u.km)
assert uc(10*u.nm).to(u.m) == 0*u.m
def test_pickle_quantity_model(tmp_path):
"""
Check that a QuantityModel can roundtrip through pickling, as it
would if fit in a multiprocessing pool.
"""
c = modeling.models.Chebyshev1D(3)
uc = QuantityModel(c, u.AA, u.km)
pkl_file = tmp_path / "qmodel.pkl"
with open(pkl_file, "wb") as f:
pickle.dump(uc, f)
with open(pkl_file, "rb") as f:
new_model = pickle.load(f)
assert new_model.input_units == uc.input_units
assert new_model.return_units == uc.return_units
assert type(new_model.unitless_model) == type(uc.unitless_model)
assert np.all(new_model.unitless_model.parameters == uc.unitless_model.parameters)
@pytest.mark.parametrize("method", data_index_refraction.keys())
def test_refraction_index(method):
tmp = (refraction_index(wavelengths, method) - 1) * 1e4
assert np.isclose(tmp, data_index_refraction[method], atol=1e-7).all()
@pytest.mark.parametrize("method", data_index_refraction.keys())
def test_air_to_vac(method):
tmp = refraction_index(wavelengths, method)
assert np.isclose(wavelengths.value * tmp,
air_to_vac(wavelengths, method=method, scheme='inversion').value,
rtol=1e-6).all()
assert np.isclose(wavelengths.value,
air_to_vac(vac_to_air(wavelengths, method=method),
method=method, scheme='iteration').value,
atol=1e-12).all()
| [
"astropy.modeling.models.Chebyshev1D",
"pickle.dump",
"specutils.utils.QuantityModel",
"numpy.isclose",
"pickle.load",
"numpy.array",
"numpy.all"
] | [((305, 349), 'numpy.array', 'np.array', (['[3.07393068, 2.9434858, 2.8925797]'], {}), '([3.07393068, 2.9434858, 2.8925797])\n', (313, 349), True, 'import numpy as np\n'), ((369, 415), 'numpy.array', 'np.array', (['[2.91557413, 2.78963801, 2.74148172]'], {}), '([2.91557413, 2.78963801, 2.74148172])\n', (377, 415), True, 'import numpy as np\n'), ((433, 478), 'numpy.array', 'np.array', (['[2.91554272, 2.7895973, 2.74156098]'], {}), '([2.91554272, 2.7895973, 2.74156098])\n', (441, 478), True, 'import numpy as np\n'), ((502, 548), 'numpy.array', 'np.array', (['[2.91554211, 2.78960005, 2.74152561]'], {}), '([2.91554211, 2.78960005, 2.74152561])\n', (510, 548), True, 'import numpy as np\n'), ((567, 613), 'numpy.array', 'np.array', (['[2.91568573, 2.78973402, 2.74169531]'], {}), '([2.91568573, 2.78973402, 2.74169531])\n', (575, 613), True, 'import numpy as np\n'), ((632, 678), 'numpy.array', 'np.array', (['[2.91568633, 2.78973811, 2.74166131]'], {}), '([2.91568633, 2.78973811, 2.74166131])\n', (640, 678), True, 'import numpy as np\n'), ((717, 747), 'astropy.modeling.models.Chebyshev1D', 'modeling.models.Chebyshev1D', (['(3)'], {}), '(3)\n', (744, 747), False, 'from astropy import modeling\n'), ((757, 785), 'specutils.utils.QuantityModel', 'QuantityModel', (['c', 'u.AA', 'u.km'], {}), '(c, u.AA, u.km)\n', (770, 785), False, 'from specutils.utils import QuantityModel\n'), ((1008, 1038), 'astropy.modeling.models.Chebyshev1D', 'modeling.models.Chebyshev1D', (['(3)'], {}), '(3)\n', (1035, 1038), False, 'from astropy import modeling\n'), ((1048, 1076), 'specutils.utils.QuantityModel', 'QuantityModel', (['c', 'u.AA', 'u.km'], {}), '(c, u.AA, u.km)\n', (1061, 1076), False, 'from specutils.utils import QuantityModel\n'), ((1438, 1513), 'numpy.all', 'np.all', (['(new_model.unitless_model.parameters == uc.unitless_model.parameters)'], {}), '(new_model.unitless_model.parameters == uc.unitless_model.parameters)\n', (1444, 1513), True, 'import numpy as np\n'), ((1162, 1180), 'pickle.dump', 'pickle.dump', (['uc', 'f'], {}), '(uc, f)\n', (1173, 1180), False, 'import pickle\n'), ((1238, 1252), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1249, 1252), False, 'import pickle\n'), ((1686, 1744), 'numpy.isclose', 'np.isclose', (['tmp', 'data_index_refraction[method]'], {'atol': '(1e-07)'}), '(tmp, data_index_refraction[method], atol=1e-07)\n', (1696, 1744), True, 'import numpy as np\n')] |
import logging
import os
from datetime import timedelta
# layers
import sys
sys.path.append('/opt')
import cv2
from common.config import LOG_LEVEL, FRAME_RESIZE_WIDTH, FRAME_RESIZE_HEIGHT, STORE_FRAMES, \
DDB_FRAME_TABLE, UTC_TIME_FMT
from common.utils import upload_to_s3, put_item_ddb
logger = logging.getLogger('FrameExtractor')
logger.setLevel(LOG_LEVEL)
S3_KEY_DATE_FMT = "%Y/%m/%d/%H/%M:%S:%f"
def extract_frames(stream_id, segment_s3_key, video_chunk, video_start_datetime, s3_bucket, frame_s3_prefix,
sample_fps=1):
if STORE_FRAMES not in ["all", "original", "resized"]:
raise ValueError(f'Invalid STORE_FRAMES option: {STORE_FRAMES} (Valid: all, original, resized)')
store_original_frames = STORE_FRAMES in ["all", "original"]
store_resized_frames = STORE_FRAMES in ["all", "resized"]
logger.info(f'Store original sized frame? {store_original_frames}, Store resized frames? {store_resized_frames}')
cap = cv2.VideoCapture(video_chunk)
extracted_frames_metadata = []
try:
video_metadata = extract_video_metadata(cap)
hop = round(video_metadata['fps'] / sample_fps)
if hop == 0:
hop = 1 # if sample_fps is invalid extract every frame
logger.info(f'Extracting every {hop} frame.')
frame_count = 0
extracted_frames = 0
while cap.isOpened():
success, frame = cap.read()
if success:
if frame_count % hop == 0:
# timestamp relative to start of video
frame_timestamp_millis = cap.get(cv2.CAP_PROP_POS_MSEC)
# absolute timestamp of the frame
frame_datetime = video_start_datetime + timedelta(milliseconds=frame_timestamp_millis)
segment_id = f'{stream_id}:{video_start_datetime.strftime(UTC_TIME_FMT)}'
frame_metadata = {'Stream_ID': stream_id,
'DateTime': frame_datetime.strftime(UTC_TIME_FMT),
'Segment': segment_id,
'Segment_Millis': int(frame_timestamp_millis),
'Segment_Frame_Num': frame_count,
'S3_Bucket': s3_bucket}
if store_original_frames:
jpg = cv2.imencode(".jpg", frame)[1]
# use absolute timestamps for s3 key. might be easier to reason about.
frame_key = os.path.join(frame_s3_prefix, 'original',
f'{frame_datetime.strftime(S3_KEY_DATE_FMT)}.jpg')
# TODO: Should we also store the frame metadata in the s3 object?
s3_object_metadata = {'ContentType': 'image/jpeg'}
upload_to_s3(s3_bucket, frame_key, bytearray(jpg), **s3_object_metadata)
frame_metadata['S3_Key'] = frame_key
frame_metadata['Frame_Width'] = int(video_metadata['original_frame_width'])
frame_metadata['Frame_Height'] = int(video_metadata['original_frame_height'])
if store_resized_frames:
resized_frame = cv2.resize(frame, (FRAME_RESIZE_WIDTH, FRAME_RESIZE_HEIGHT))
resized_jpg = cv2.imencode(".jpg", resized_frame)[1]
# use absolute timestamps for s3 key. might be easier to reason about.
resized_frame_key = os.path.join(frame_s3_prefix, 'resized',
f'{frame_datetime.strftime(S3_KEY_DATE_FMT)}.jpg')
s3_object_metadata = {'ContentType': 'image/jpeg'}
upload_to_s3(s3_bucket, resized_frame_key, bytearray(resized_jpg), **s3_object_metadata)
if 'S3_Key' in frame_metadata:
frame_metadata['Resized_S3_Key'] = resized_frame_key
else:
frame_metadata['S3_Key'] = frame_key
frame_metadata['Frame_Width'] = FRAME_RESIZE_WIDTH
frame_metadata['Frame_Height'] = FRAME_RESIZE_HEIGHT
# persist frame metadata in database
put_item_ddb(DDB_FRAME_TABLE, frame_metadata)
extracted_frames_metadata.append(frame_metadata)
extracted_frames += 1
frame_count += 1
else:
break
logger.info(f'Extracted {extracted_frames} out of {frame_count} frames from {video_chunk}')
return extracted_frames_metadata
finally:
cv2.destroyAllWindows()
cap.release()
def extract_video_metadata(cap):
metadata = {
'original_frame_width': cap.get(cv2.CAP_PROP_FRAME_WIDTH),
'original_frame_height': cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
'fourcc': cap.get(cv2.CAP_PROP_FOURCC),
'frame_count': int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
'format': cap.get(cv2.CAP_PROP_FORMAT),
'mode': cap.get(cv2.CAP_PROP_MODE),
'fps': cap.get(cv2.CAP_PROP_FPS),
}
logger.info(f'video metadata: {metadata}')
return metadata
| [
"logging.getLogger",
"cv2.resize",
"cv2.imencode",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"datetime.timedelta",
"common.utils.put_item_ddb",
"sys.path.append"
] | [((77, 100), 'sys.path.append', 'sys.path.append', (['"""/opt"""'], {}), "('/opt')\n", (92, 100), False, 'import sys\n'), ((303, 338), 'logging.getLogger', 'logging.getLogger', (['"""FrameExtractor"""'], {}), "('FrameExtractor')\n", (320, 338), False, 'import logging\n'), ((973, 1002), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_chunk'], {}), '(video_chunk)\n', (989, 1002), False, 'import cv2\n'), ((4776, 4799), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4797, 4799), False, 'import cv2\n'), ((4384, 4429), 'common.utils.put_item_ddb', 'put_item_ddb', (['DDB_FRAME_TABLE', 'frame_metadata'], {}), '(DDB_FRAME_TABLE, frame_metadata)\n', (4396, 4429), False, 'from common.utils import upload_to_s3, put_item_ddb\n'), ((1740, 1786), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': 'frame_timestamp_millis'}), '(milliseconds=frame_timestamp_millis)\n', (1749, 1786), False, 'from datetime import timedelta\n'), ((3302, 3362), 'cv2.resize', 'cv2.resize', (['frame', '(FRAME_RESIZE_WIDTH, FRAME_RESIZE_HEIGHT)'], {}), '(frame, (FRAME_RESIZE_WIDTH, FRAME_RESIZE_HEIGHT))\n', (3312, 3362), False, 'import cv2\n'), ((2388, 2415), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (2400, 2415), False, 'import cv2\n'), ((3401, 3436), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'resized_frame'], {}), "('.jpg', resized_frame)\n", (3413, 3436), False, 'import cv2\n')] |
"""Collection of mixins."""
import json
from typing import Any, Dict, List, Union, Generator, Optional, Callable
import asyncpraw
import asyncprawcore
from asyncpraw.models import ListingGenerator
from client.models import RedditHelper
class Storage:
"""Mixin for storing data."""
def __init__(self, filename: str = "data.json") -> None:
"""Initialize the mixin."""
self.filename = filename
def get(
self,
default: Optional[Dict[str, Any]] = None,
callback: Optional[Callable] = None,
) -> dict:
"""Retrieves data from the given filename as a serialized json object.
Or creates a file with the default if it doesn't exist.
"""
data = default or {}
try:
with open(self.filename, "r") as file:
data = json.load(file)
except (json.decoder.JSONDecodeError, FileNotFoundError):
self.set(data)
if callback:
callback()
return data
def set(self, data: Dict[str, Any], callback: Optional[Callable] = None) -> None:
"""Saves the given data to the given filename in json format."""
with open(self.filename, "w") as file:
json.dump(data, file)
if callback:
callback()
class Reddit:
"""Base mixin for reddit functionality."""
def __init__(
self,
client_id: str,
client_secret: str,
filename: str = "data.json",
callback: Optional[Callable] = None,
) -> None:
"""Initialize the mixin."""
self.storage = Storage(filename=filename)
self.subreddits = self.storage.get(
default={"subscribed": [], "banned": []}, callback=callback
)
self.request = asyncpraw.Reddit(
client_id=client_id,
client_secret=client_secret,
user_agent=f"DISCORD_BOT:{client_id}:1.0",
)
async def subreddit_exists(self, subreddit: str) -> bool:
"""Check if a subreddit exists."""
subreddit_exists = False
try:
_ = [
sub
async for sub in self.request.subreddits.search_by_name(
query=subreddit, exact=True
)
]
subreddit_exists = True
except (asyncprawcore.NotFound, asyncprawcore.exceptions.Redirect):
pass
return subreddit_exists
async def fetch(
self,
subreddit_or_redditor: str,
search_type: Optional[str] = "subreddit",
search_term: Optional[str] = None,
fetch: Optional[bool] = True,
sort: Optional[str] = None,
limit: Optional[int] = 1,
*args,
**kwargs,
) -> Union[ListingGenerator, List]:
"""Fetch posts from a subreddit or a redditor."""
if not await self.subreddit_exists(subreddit=subreddit_or_redditor):
search_type = "redditor"
if not search_term:
sort = "new"
results = []
try:
helper = RedditHelper(reddit=self.request, method=search_type)
results = await helper.filter(
query=subreddit_or_redditor,
search_term=search_term,
fetch=fetch,
sort=sort,
limit=limit,
*args,
**kwargs,
)
except asyncprawcore.exceptions.Redirect:
pass
return results
def manage_subscription(
self,
channel_id: int,
subreddit: str,
subscribe: bool = True,
callback: Optional[Callable] = None,
) -> None:
"""Store the channel id and subreddit to subscribe to. Subscribes by default."""
subscription = {"channel_id": channel_id, "subreddit": subreddit}
if subscribe:
self.subreddits.setdefault("subscribed", []).append(subscription)
else:
try:
self.subreddits.get("subscribed", []).remove(subscription)
except ValueError:
pass
self.storage.set(self.subreddits, callback=callback)
def manage_moderation(
self,
subreddit: str,
ban: bool = True,
callback: Optional[Callable] = None,
) -> None:
"""Manages bans. Bans by default."""
if ban:
[
self.manage_subscription(**sub, subscribe=False)
for sub in self.subreddits.get("subscribed", [])
if sub.get("subreddit") == subreddit
]
self.subreddits.setdefault("banned", []).append(subreddit)
else:
try:
self.subreddits.get("banned", []).remove(subreddit)
except ValueError:
pass
self.storage.set(self.subreddits, callback=callback)
def subreddit_is_banned(self, subreddit: str) -> bool:
"""Checks if the given subreddit is banned."""
return subreddit in self.subreddits.get("banned", [])
def subreddit_is_subscribed(self, channel_id: str, subreddit: str) -> bool:
"""Checks if the given subreddit is subscribed."""
return any(
channel_id == sub.get("channel_id") and subreddit == sub.get("subreddit")
for sub in self.subreddits.get("subscribed", [])
)
def get_subscriptions(self) -> Generator:
"""Returns a generator with subscribed subreddits."""
return (sub.values() for sub in self.subreddits.get("subscribed", []))
| [
"json.load",
"client.models.RedditHelper",
"json.dump",
"asyncpraw.Reddit"
] | [((1768, 1881), 'asyncpraw.Reddit', 'asyncpraw.Reddit', ([], {'client_id': 'client_id', 'client_secret': 'client_secret', 'user_agent': 'f"""DISCORD_BOT:{client_id}:1.0"""'}), "(client_id=client_id, client_secret=client_secret,\n user_agent=f'DISCORD_BOT:{client_id}:1.0')\n", (1784, 1881), False, 'import asyncpraw\n'), ((1222, 1243), 'json.dump', 'json.dump', (['data', 'file'], {}), '(data, file)\n', (1231, 1243), False, 'import json\n'), ((3057, 3110), 'client.models.RedditHelper', 'RedditHelper', ([], {'reddit': 'self.request', 'method': 'search_type'}), '(reddit=self.request, method=search_type)\n', (3069, 3110), False, 'from client.models import RedditHelper\n'), ((830, 845), 'json.load', 'json.load', (['file'], {}), '(file)\n', (839, 845), False, 'import json\n')] |
from __future__ import absolute_import
import re
import os
import json
import xml.etree.ElementTree as ET
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils import is_py2_old
from svtplay_dl.error import ServiceError
from svtplay_dl.log import log
from svtplay_dl.fetcher.rtmp import RTMP
from svtplay_dl.fetcher.hls import hlsparse
# This is _very_ similar to mtvservices..
class Mtvnn(Service, OpenGraphThumbMixin):
supported_domains = ['nickelodeon.se', "nickelodeon.nl", "nickelodeon.no", "www.comedycentral.se", "nickelodeon.dk"]
def get(self):
data = self.get_urldata()
match = re.search(r'data-mrss=[\'"](http://gakusei-cluster.mtvnn.com/v2/mrss.xml[^\'"]+)[\'"]', data)
if not match:
yield ServiceError("Can't find id for the video")
return
data = self.http.request("get", match.group(1)).content
xml = ET.XML(data)
mediagen = xml.find("channel").find("item").find("{http://search.yahoo.com/mrss/}group")
title = xml.find("channel").find("item").find("title").text
if self.options.output_auto:
directory = os.path.dirname(self.options.output)
if len(directory):
self.options.output = os.path.join(directory, title)
else:
self.options.output = title
if self.exclude():
yield ServiceError("Excluding video")
return
swfurl = mediagen.find("{http://search.yahoo.com/mrss/}player").attrib["url"]
self.options.other = "-W %s" % self.http.check_redirect(swfurl)
contenturl = mediagen.find("{http://search.yahoo.com/mrss/}content").attrib["url"]
filename = os.path.basename(contenturl)
data = self.http.request("get", "http://videos.mtvnn.com/api/v2/%s.js?video_format=hls" % filename).text
dataj = json.loads(data)
content = self.http.request("get", contenturl).content
xml = ET.XML(content)
ss = xml.find("video").find("item")
if is_py2_old:
sa = list(ss.getiterator("rendition"))
else:
sa = list(ss.iter("rendition"))
for i in sa:
yield RTMP(self.options, i.find("src").text, i.attrib["bitrate"])
streams = hlsparse(self.options, self.http.request("get", dataj["src"]), dataj["src"])
if streams:
for n in list(streams.keys()):
yield streams[n]
def find_all_episodes(self, options):
match = re.search(r"data-franchise='([^']+)'", self.get_urldata())
if match is None:
log.error("Couldn't program id")
return
programid = match.group(1)
match = re.findall(r"<li class='([a-z]+ )?playlist-item( [a-z]+)*?'( data-[-a-z]+='[^']+')* data-item-id='([^']+)'", self.get_urldata())
if not match:
log.error("Couldn't retrieve episode list")
return
episodNr = []
for i in match:
episodNr.append(i[3])
episodes = []
n = 0
for i in sorted(episodNr):
if n == options.all_last:
break
episodes.append("http://www.nickelodeon.se/serier/%s-something/videos/%s-something" % (programid, i))
n += 1
return episodes
| [
"json.loads",
"xml.etree.ElementTree.XML",
"svtplay_dl.log.log.error",
"svtplay_dl.error.ServiceError",
"os.path.join",
"os.path.dirname",
"os.path.basename",
"re.search"
] | [((643, 751), 're.search', 're.search', (['"""data-mrss=[\\\\\'"](http://gakusei-cluster.mtvnn.com/v2/mrss.xml[^\\\\\'"]+)[\\\\\'"]"""', 'data'], {}), '(\n \'data-mrss=[\\\\\\\'"](http://gakusei-cluster.mtvnn.com/v2/mrss.xml[^\\\\\\\'"]+)[\\\\\\\'"]\'\n , data)\n', (652, 751), False, 'import re\n'), ((919, 931), 'xml.etree.ElementTree.XML', 'ET.XML', (['data'], {}), '(data)\n', (925, 931), True, 'import xml.etree.ElementTree as ET\n'), ((1724, 1752), 'os.path.basename', 'os.path.basename', (['contenturl'], {}), '(contenturl)\n', (1740, 1752), False, 'import os\n'), ((1882, 1898), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1892, 1898), False, 'import json\n'), ((1976, 1991), 'xml.etree.ElementTree.XML', 'ET.XML', (['content'], {}), '(content)\n', (1982, 1991), True, 'import xml.etree.ElementTree as ET\n'), ((1158, 1194), 'os.path.dirname', 'os.path.dirname', (['self.options.output'], {}), '(self.options.output)\n', (1173, 1194), False, 'import os\n'), ((2615, 2647), 'svtplay_dl.log.log.error', 'log.error', (['"""Couldn\'t program id"""'], {}), '("Couldn\'t program id")\n', (2624, 2647), False, 'from svtplay_dl.log import log\n'), ((2881, 2924), 'svtplay_dl.log.log.error', 'log.error', (['"""Couldn\'t retrieve episode list"""'], {}), '("Couldn\'t retrieve episode list")\n', (2890, 2924), False, 'from svtplay_dl.log import log\n'), ((777, 820), 'svtplay_dl.error.ServiceError', 'ServiceError', (['"""Can\'t find id for the video"""'], {}), '("Can\'t find id for the video")\n', (789, 820), False, 'from svtplay_dl.error import ServiceError\n'), ((1264, 1294), 'os.path.join', 'os.path.join', (['directory', 'title'], {}), '(directory, title)\n', (1276, 1294), False, 'import os\n'), ((1403, 1434), 'svtplay_dl.error.ServiceError', 'ServiceError', (['"""Excluding video"""'], {}), "('Excluding video')\n", (1415, 1434), False, 'from svtplay_dl.error import ServiceError\n')] |
##############################################################################
# Copyright 2019 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from pyds8k.resources.ds8k.v1.common.types import DS8K_PPRC
from ...data import get_response_data_by_type
from .base import TestDS8KWithConnect
from pyds8k.resources.ds8k.v1.volumes import Volume
from pyds8k.resources.ds8k.v1.pprc import PPRC
from pyds8k.resources.ds8k.v1.systems import System
class TestPPRC(TestDS8KWithConnect):
def test_related_resource_field(self):
pprc_info = get_response_data_by_type(
DS8K_PPRC
)['data'][DS8K_PPRC][0]
sourcevolume_id = pprc_info['sourcevolume'][Volume.id_field]
targetvolume_id = pprc_info['targetvolume'][Volume.id_field]
targetsystem_id = pprc_info['targetsystem'][System.id_field]
pprc = PPRC(self.client, info=pprc_info)
self.assertEqual(pprc.sourcevolume, sourcevolume_id)
self.assertEqual(pprc.representation['sourcevolume'], sourcevolume_id)
self.assertIsInstance(pprc._sourcevolume, Volume)
self.assertEqual(pprc._sourcevolume.id, sourcevolume_id)
self.assertEqual(pprc.targetvolume, targetvolume_id)
self.assertEqual(pprc.representation['targetvolume'], targetvolume_id)
self.assertIsInstance(pprc._targetvolume, Volume)
self.assertEqual(pprc._targetvolume.id, targetvolume_id)
self.assertEqual(pprc.targetsystem, targetsystem_id)
self.assertEqual(pprc.representation['targetsystem'], targetsystem_id)
self.assertIsInstance(pprc._targetsystem, System)
self.assertEqual(pprc._targetsystem.id, targetsystem_id)
| [
"pyds8k.resources.ds8k.v1.pprc.PPRC"
] | [((1433, 1466), 'pyds8k.resources.ds8k.v1.pprc.PPRC', 'PPRC', (['self.client'], {'info': 'pprc_info'}), '(self.client, info=pprc_info)\n', (1437, 1466), False, 'from pyds8k.resources.ds8k.v1.pprc import PPRC\n')] |
import os
os.environ['QT_MAC_WANTS_LAYER'] = '1'
import sys
import random
from PySide2.QtCore import Qt
from PySide2.QtWidgets import (QApplication, QWidget,
QPushButton, QLabel, QGridLayout)
import chess
import aiagents
class Board(QWidget):
def __init__(self, player1=None, player2=None, board=chess.Board()):
QWidget.__init__(self)
self.lastClicked = None
self.board = board
self.player1 = player1
self.player2 = player2
self.fowardmoves = []
self.layout = QGridLayout()
self.setWindowTitle("WHITE")
self.buttons = [[None for _ in range(8)] for _ in range(8)]
for r in range(8):
for c in range(8):
self.buttons[r][c] = QPushButton("HELLO")
self.updateTile(r, c, False)
self.buttons[r][c].clicked.connect(self.clickedTile(r, c))
self.layout.addWidget(self.buttons[r][c], r, c)
goagainbtn = QPushButton("AI Go Again")
goagainbtn.clicked.connect(self.takeAITurn)
self.layout.addWidget(goagainbtn, 8, 0)
backbtn = QPushButton("<")
backbtn.clicked.connect(self.gobackonemove)
self.layout.addWidget(backbtn, 8, 4)
fwdbtn = QPushButton(">")
fwdbtn.clicked.connect(self.gofwdonemove)
self.layout.addWidget(fwdbtn, 8, 5)
self.setLayout(self.layout)
self.takeAITurn()
def refreshboard(self):
for r in range(8):
for c in range(8):
self.updateTile(r, c, False)
def gobackonemove(self):
self.fowardmoves.append(self.board.pop())
self.refreshboard()
def gofwdonemove(self):
if len(self.fowardmoves) > 0:
self.board.push(self.fowardmoves[0])
del self.fowardmoves[0]
self.refreshboard()
def takeAITurn(self):
player = self.player1 if self.board.turn else self.player2
if player is None:
return
move = player(self.board, self.board.generate_legal_moves())
self.makeMove(move)
def rowsToIndex(self, row, col):
return (7 - row) * 8 + col
def indexToRows(self, index):
return (7 - (index // 8), index % 8)
def updateTile(self, row, col, isClicked):
text = str(self.board.piece_at(self.rowsToIndex(row, col)))
if text == "None":
text = ""
if isClicked:
text += "CLICKED"
self.buttons[row][col].setText(text)
def makeMove(self, move):
if not(self.board.is_legal(move)):
# default promote queen
if not(move is None):
move.promotion = 5
if self.board.is_legal(move):
self.board.push(move)
self.fowardmoves = []
self.refreshboard()
if self.board.is_game_over():
if self.board.is_checkmate():
self.setWindowTitle("BLACK WINS!!!" if self.board.turn else "WHITE WINS!!!")
else:
self.setWindowTitle("TIE")
else:
self.setWindowTitle("WHITE" if self.board.turn else "BLACK")
self.takeAITurn()
r, c = self.indexToRows(move.from_square)
self.updateTile(r, c, False)
r, c = self.indexToRows(move.to_square)
self.updateTile(r, c, False)
def makePersonMove(self, fromRC, toRC):
player = self.player1 if self.board.turn else self.player2
if not(player is None):
return
m1 = self.rowsToIndex(fromRC[0], fromRC[1])
m2 = self.rowsToIndex(toRC[0], toRC[1])
move = chess.Move(m1, m2)
self.makeMove(move)
def clickedTile(self, row, col):
def f():
if self.lastClicked is None:
self.lastClicked = (row, col)
self.updateTile(row, col, True)
return
r, c = self.lastClicked
if not(row == r and col == c):
self.makePersonMove((r, c), (row, col))
self.updateTile(r, c, False)
self.updateTile(row, col, False)
self.lastClicked = None
return f
if __name__ == "__main__":
app = QApplication(sys.argv)
#widget = Board(aiagents.createPVS(chess.WHITE, 2, 0.0))
widget = Board(None, aiagents.createPVS(chess.BLACK, 2, 3, 20.0, 0.0))
#widget = Board(aiagents.createMiniMaxAIAgent(chess.WHITE, 2, 0.1))
#widget = Board(None, aiagents.createMiniMaxAIAgent(chess.BLACK, 2, 0.1))
# widget = Board(aiagents.createMiniMaxAIAgent(chess.WHITE, 2, 0.25), aiagents.createMiniMaxAIAgent(chess.BLACK, 2, 0.25))
widget.resize(800, 600)
widget.show()
sys.exit(app.exec_())
| [
"PySide2.QtWidgets.QGridLayout",
"PySide2.QtWidgets.QPushButton",
"chess.Board",
"PySide2.QtWidgets.QApplication",
"aiagents.createPVS",
"PySide2.QtWidgets.QWidget.__init__",
"chess.Move"
] | [((4204, 4226), 'PySide2.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (4216, 4226), False, 'from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QGridLayout\n'), ((305, 318), 'chess.Board', 'chess.Board', ([], {}), '()\n', (316, 318), False, 'import chess\n'), ((329, 351), 'PySide2.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self'], {}), '(self)\n', (345, 351), False, 'from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QGridLayout\n'), ((528, 541), 'PySide2.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (539, 541), False, 'from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QGridLayout\n'), ((971, 997), 'PySide2.QtWidgets.QPushButton', 'QPushButton', (['"""AI Go Again"""'], {}), "('AI Go Again')\n", (982, 997), False, 'from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QGridLayout\n'), ((1117, 1133), 'PySide2.QtWidgets.QPushButton', 'QPushButton', (['"""<"""'], {}), "('<')\n", (1128, 1133), False, 'from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QGridLayout\n'), ((1249, 1265), 'PySide2.QtWidgets.QPushButton', 'QPushButton', (['""">"""'], {}), "('>')\n", (1260, 1265), False, 'from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QGridLayout\n'), ((3621, 3639), 'chess.Move', 'chess.Move', (['m1', 'm2'], {}), '(m1, m2)\n', (3631, 3639), False, 'import chess\n'), ((4314, 4362), 'aiagents.createPVS', 'aiagents.createPVS', (['chess.BLACK', '(2)', '(3)', '(20.0)', '(0.0)'], {}), '(chess.BLACK, 2, 3, 20.0, 0.0)\n', (4332, 4362), False, 'import aiagents\n'), ((743, 763), 'PySide2.QtWidgets.QPushButton', 'QPushButton', (['"""HELLO"""'], {}), "('HELLO')\n", (754, 763), False, 'from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QGridLayout\n')] |
#!/usr/bin/env python
import ncs as ncs
import _ncs
from ncs.dp import Action, Daemon
from ncs.maapi import Maapi
from ncs.log import Log
import socket
import sys
import signal
from lxml import etree
import time
class MyLog(object):
def info(self, arg):
print("info: %s" % arg)
def error(self, arg):
print("error: %s" % arg)
class WaitForPending(Action):
def recv_all_and_close(self, c_sock, c_id):
data = ''
while True:
buf = c_sock.recv(4096)
if buf:
data += buf.decode('utf-8')
else:
c_sock.close()
return data
def read_config(self, trans, path):
dev_flags= (_ncs.maapi.CONFIG_XML_PRETTY+
_ncs.maapi.CONFIG_WITH_OPER+
_ncs.maapi.CONFIG_UNHIDE_ALL)
c_id = trans.save_config(dev_flags, path)
c_sock = socket.socket()
_ncs.stream_connect(c_sock, c_id, 0, '127.0.0.1', _ncs.PORT)
data = self.recv_all_and_close(c_sock, c_id);
return data
@Action.action
def cb_action(self, uinfo, name, kp, input, output):
while True:
with ncs.maapi.single_read_trans('admin', 'admin') as t:
save_data = self.read_config(t, "/netconf-ned-builder/project{router 1.0}/module/status")
xml_str = str(save_data)
if xml_str.find("selected pending") != -1:
time.sleep(1);
else:
return;
def load_schemas():
with Maapi():
pass
if __name__ == "__main__":
load_schemas()
logger = Log(MyLog(), add_timestamp=True)
d = Daemon(name='myactiond', log=logger)
a = []
a.append(WaitForPending(daemon=d, actionpoint='wait-for-pending', log=logger))
logger.info('--- Daemon myaction STARTED ---')
d.start()
signal.pause()
d.finish()
logger.info('--- Daemon myaction FINISHED ---')
| [
"socket.socket",
"ncs.dp.Daemon",
"signal.pause",
"ncs.maapi.Maapi",
"time.sleep",
"ncs.maapi.single_read_trans",
"_ncs.stream_connect"
] | [((1676, 1712), 'ncs.dp.Daemon', 'Daemon', ([], {'name': '"""myactiond"""', 'log': 'logger'}), "(name='myactiond', log=logger)\n", (1682, 1712), False, 'from ncs.dp import Action, Daemon\n'), ((1876, 1890), 'signal.pause', 'signal.pause', ([], {}), '()\n', (1888, 1890), False, 'import signal\n'), ((904, 919), 'socket.socket', 'socket.socket', ([], {}), '()\n', (917, 919), False, 'import socket\n'), ((928, 988), '_ncs.stream_connect', '_ncs.stream_connect', (['c_sock', 'c_id', '(0)', '"""127.0.0.1"""', '_ncs.PORT'], {}), "(c_sock, c_id, 0, '127.0.0.1', _ncs.PORT)\n", (947, 988), False, 'import _ncs\n'), ((1552, 1559), 'ncs.maapi.Maapi', 'Maapi', ([], {}), '()\n', (1557, 1559), False, 'from ncs.maapi import Maapi\n'), ((1178, 1223), 'ncs.maapi.single_read_trans', 'ncs.maapi.single_read_trans', (['"""admin"""', '"""admin"""'], {}), "('admin', 'admin')\n", (1205, 1223), True, 'import ncs as ncs\n'), ((1456, 1469), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1466, 1469), False, 'import time\n')] |
import kivy
kivy.require('1.7.2')
from kivy.app import App
from kivy.clock import Clock
from kivy.metrics import Metrics
from kivy.properties import NumericProperty
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.scatter import Scatter
from kivy.uix.treeview import TreeView, TreeViewLabel
from kivy.uix.widget import Widget
from kivy.config import Config
from pynetworktables import *
Config.set('graphics', 'width', '600')
Config.set('graphics', 'height', '400')
Config.set('graphics', 'resizable', '0')
Config.set('kivy', 'exit_on_escape', '0')
NetworkTable.SetIPAddress("192.168.1.104")
NetworkTable.SetClientMode()
NetworkTable.Initialize()
RoboTable = NetworkTable.GetTable("Robot")
DriveTable = RoboTable.GetSubTable("Drivetrain")
CatcherTable = RoboTable.GetSubTable("Catcher")
SensorsTable = RoboTable.GetTable("Sensors")
Dash = None
class Dashboard(FloatLayout):
batteryValue = NumericProperty(50)
get_items = list()
watch = list()
set_items = list()
def __init__(self):
super(Dashboard, self).__init__()
def add_get_item(self, _id, value, default):
self.watch.append([_id, self.__update_value])
#self.ids[_id].value = self.nt(value, default)
print("Added Get Item '{0}' reading value '{1}' with default value {2}".format(_id, value, default))
def add_set_item(self, _id, value):
self.set_items.append([_id, value])
print("Added Set Item '{0}' providing value '{1}'".format(_id, value))
def do_action(self):
print("Test")
def change_drivemode(self, mode):
DriveTable.PutNumber("drivemode", mode * 1.0)
def change_spindle_scale(self, speed):
a = float(speed)
if a > 10.0:
a = 10.0
self.ids['spindle_scaler'].text = str(a)
if a < 0.0:
a = 0.0
self.ids['spindle_scaler'].text = str(a)
CatcherTable.PutNumber("spindleScale", a/10)
def change_move_scale(self, speed):
a = float(speed)
if a > 10.0:
a = 10.0
self.ids['move_scaler'].text = str(a)
if a < 0.0:
a = 0.0
self.ids['move_scaler'].text = str(a)
DriveTable.PutNumber("move_scale", a/10)
def __update_value(self, _id, value):
self.ids[_id].value = value
class DashboardApp(App):
def build(self):
global Dash
Dash = Dashboard()
return Dash
def dashChanger(_id, value):
Dash.ids[_id].value = value
def dashTextChanger(_id, value):
Dash.ids[_id].text = str(value)
def modeChanger(mode):
{
1: Dash.ids['toggle_drivemode_arcade'],
2: Dash.ids['toggle_drivemode_tank'],
3: Dash.ids['toggle_drivemode_mecanum'],
4: Dash.ids['toggle_drivemode_mecatank']
}[mode]._do_press()
class RobotTableListener(ITableListener):
def ValueChanged(self, table, key, value, isNew):
if key == "battery_level":
dashChanger('battery_level_bar', table.GetValue(key))
roboListener = RobotTableListener()
RoboTable.AddTableListener(roboListener)
class DriveTableListener(ITableListener):
def ValueChanged(self, table, key, value, isNew):
if key == "drivemode_string":
dashTextChanger('text_drivemode', table.GetValue(key))
elif key == "drivemode":
modeChanger(table.GetValue(key))
elif key == "move_scale":
dashTextChanger('move_scaler', table.GetValue(key)*10)
driveListener = DriveTableListener()
DriveTable.AddTableListener(driveListener)
class CatcherTableListener(ITableListener):
def ValueChanged(self, table, key, value, isNew):
if key == "spindleScale":
dashTextChanger('spindle_scaler', table.GetValue(key)*10)
catchListener = CatcherTableListener()
CatcherTable.AddTableListener(catchListener)
class SensorTableListener(ITableListener):
def ValueChanged(self, table, key, value, isNew):
if key == "distance":
dashTextChanger("ultrasonic_out", table.GetValue(key))
sensorListener = SensorTableListener()
SensorsTable.AddTableListener(sensorListener)
if __name__ == '__main__':
DashboardApp().run() | [
"kivy.require",
"kivy.config.Config.set",
"kivy.properties.NumericProperty"
] | [((13, 34), 'kivy.require', 'kivy.require', (['"""1.7.2"""'], {}), "('1.7.2')\n", (25, 34), False, 'import kivy\n'), ((726, 764), 'kivy.config.Config.set', 'Config.set', (['"""graphics"""', '"""width"""', '"""600"""'], {}), "('graphics', 'width', '600')\n", (736, 764), False, 'from kivy.config import Config\n'), ((765, 804), 'kivy.config.Config.set', 'Config.set', (['"""graphics"""', '"""height"""', '"""400"""'], {}), "('graphics', 'height', '400')\n", (775, 804), False, 'from kivy.config import Config\n'), ((805, 845), 'kivy.config.Config.set', 'Config.set', (['"""graphics"""', '"""resizable"""', '"""0"""'], {}), "('graphics', 'resizable', '0')\n", (815, 845), False, 'from kivy.config import Config\n'), ((846, 887), 'kivy.config.Config.set', 'Config.set', (['"""kivy"""', '"""exit_on_escape"""', '"""0"""'], {}), "('kivy', 'exit_on_escape', '0')\n", (856, 887), False, 'from kivy.config import Config\n'), ((1240, 1259), 'kivy.properties.NumericProperty', 'NumericProperty', (['(50)'], {}), '(50)\n', (1255, 1259), False, 'from kivy.properties import NumericProperty\n')] |
import pytest
from pymlconf import Root
def test_delattribute():
root = Root('''
app:
name: MyApp
''')
assert hasattr(root.app, 'name')
del root.app.name
assert not hasattr(root.app, 'name')
with pytest.raises(AttributeError):
del root.app.invalidattribute
| [
"pymlconf.Root",
"pytest.raises"
] | [((79, 131), 'pymlconf.Root', 'Root', (['"""\n app:\n name: MyApp\n """'], {}), '("""\n app:\n name: MyApp\n """)\n', (83, 131), False, 'from pymlconf import Root\n'), ((243, 272), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (256, 272), False, 'import pytest\n')] |
# Generated by Django 2.1.4 on 2019-03-03 19:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("filamentcolors", "0004_filamenttype_manufacturer"),
]
operations = [
migrations.AddField(
model_name="swatch",
name="filament_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="filamentcolors.FilamentType",
),
),
migrations.AddField(
model_name="swatch",
name="manufacturer",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="filamentcolors.Manufacturer",
),
),
]
| [
"django.db.models.ForeignKey"
] | [((388, 512), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""filamentcolors.FilamentType"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='filamentcolors.FilamentType')\n", (405, 512), False, 'from django.db import migrations, models\n'), ((712, 836), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""filamentcolors.Manufacturer"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='filamentcolors.Manufacturer')\n", (729, 836), False, 'from django.db import migrations, models\n')] |
"""Test the Poem model."""
import pytest
from run.models import Meter, Poet, Poem
def test_Poem_repr():
"""Test the __repr__ method."""
assert repr(
Poem(
title='title',
keyword='keyword',
raw_text='raw text',
poet_id=1,
meter_id=1,
)
) == "<Poem 'title'>"
def test_Poem_ValueError(app):
"""Check that insert_samples() raises ValueError if meter/poet absent."""
with pytest.raises(ValueError) as excinfo:
Poem.insert_samples()
assert "This poet does not exist." in str(excinfo.value)
Poet.insert_samples()
with pytest.raises(ValueError) as excinfo:
Poem.insert_samples()
assert "This meter pattern does not exist." in str(excinfo.value)
def test_Poem_insert_samples(app):
"""Test the insert_samples static method of Poem."""
assert Poem.query.first() is None
Meter.insert_samples()
Poet.insert_samples()
Poem.insert_samples()
assert Poem.query.first() is not None
poems = Poem.query.all()
for poem in poems:
assert isinstance(poem.title, str)
assert isinstance(poem.keyword, str)
assert isinstance(poem.raw_text, str)
# assert isinstance(poem.HTML, str)
assert isinstance(poem.author, Poet)
assert isinstance(poem.meter, Meter)
Poem.insert_samples()
# Check the operation is idempotent
assert poems == Poem.query.all()
| [
"run.models.Poem.query.first",
"run.models.Poem",
"run.models.Poem.query.all",
"run.models.Poet.insert_samples",
"pytest.raises",
"run.models.Poem.insert_samples",
"run.models.Meter.insert_samples"
] | [((601, 622), 'run.models.Poet.insert_samples', 'Poet.insert_samples', ([], {}), '()\n', (620, 622), False, 'from run.models import Meter, Poet, Poem\n'), ((907, 929), 'run.models.Meter.insert_samples', 'Meter.insert_samples', ([], {}), '()\n', (927, 929), False, 'from run.models import Meter, Poet, Poem\n'), ((934, 955), 'run.models.Poet.insert_samples', 'Poet.insert_samples', ([], {}), '()\n', (953, 955), False, 'from run.models import Meter, Poet, Poem\n'), ((960, 981), 'run.models.Poem.insert_samples', 'Poem.insert_samples', ([], {}), '()\n', (979, 981), False, 'from run.models import Meter, Poet, Poem\n'), ((1037, 1053), 'run.models.Poem.query.all', 'Poem.query.all', ([], {}), '()\n', (1051, 1053), False, 'from run.models import Meter, Poet, Poem\n'), ((1350, 1371), 'run.models.Poem.insert_samples', 'Poem.insert_samples', ([], {}), '()\n', (1369, 1371), False, 'from run.models import Meter, Poet, Poem\n'), ((468, 493), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (481, 493), False, 'import pytest\n'), ((514, 535), 'run.models.Poem.insert_samples', 'Poem.insert_samples', ([], {}), '()\n', (533, 535), False, 'from run.models import Meter, Poet, Poem\n'), ((632, 657), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (645, 657), False, 'import pytest\n'), ((678, 699), 'run.models.Poem.insert_samples', 'Poem.insert_samples', ([], {}), '()\n', (697, 699), False, 'from run.models import Meter, Poet, Poem\n'), ((875, 893), 'run.models.Poem.query.first', 'Poem.query.first', ([], {}), '()\n', (891, 893), False, 'from run.models import Meter, Poet, Poem\n'), ((993, 1011), 'run.models.Poem.query.first', 'Poem.query.first', ([], {}), '()\n', (1009, 1011), False, 'from run.models import Meter, Poet, Poem\n'), ((1432, 1448), 'run.models.Poem.query.all', 'Poem.query.all', ([], {}), '()\n', (1446, 1448), False, 'from run.models import Meter, Poet, Poem\n'), ((168, 254), 'run.models.Poem', 'Poem', ([], {'title': '"""title"""', 'keyword': '"""keyword"""', 'raw_text': '"""raw text"""', 'poet_id': '(1)', 'meter_id': '(1)'}), "(title='title', keyword='keyword', raw_text='raw text', poet_id=1,\n meter_id=1)\n", (172, 254), False, 'from run.models import Meter, Poet, Poem\n')] |
import logging
from datetime import datetime
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core import cache, exceptions
from django.core.urlresolvers import reverse
from geo.models import Country, Currency
from pytz import timezone
from rest_framework import serializers, pagination
#
# Serializers
#
class BaseSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
exclude = ('last_modified_user', 'security_hash')
class CountrySerializer(BaseSerializer):
class Meta:
model = Country
class CountryLookupSerializer(BaseSerializer):
class Meta:
model = Country
fields = ('name', 'id')
class CurrencySerializer(BaseSerializer):
class Meta:
model = Currency
#
# Views
#
class AbstractListView(ListAPIView):
permission_classes = (AllowAny, )
authentication_classes = (TokenAuthentication, SessionAuthentication, BasicAuthentication)
#permission_classes = (permissions.IsAuthenticated, IsOwner)
renderer_classes = (UnicodeJSONRenderer, JSONPRenderer, BrowsableAPIRenderer, YAMLRenderer, XMLRenderer)
paginate_by = 10
paginate_by_param = 'page_size'
max_paginate_by = 100
class CountryListView(AbstractListView):
search_fields = ['name', 'fullname', ]
serializer_class = CountrySerializer
pagination_serializer_class = CustomPaginationSerializer
filter_backends = (filters.DjangoFilterBackend, filters.SearchFilter)
filter_fields = ('iso_code', 'iso3_code', 'num_code', 'name')
def get_queryset(self):
return Country.objects.all().order_by('name')
| [
"geo.models.Country.objects.all"
] | [((1580, 1601), 'geo.models.Country.objects.all', 'Country.objects.all', ([], {}), '()\n', (1599, 1601), False, 'from geo.models import Country, Currency\n')] |
#数値微分の例
import numpy as np
from common_function import function_1, numerical_diff
import matplotlib.pylab as plt
def tangent_line(f, x):
d = numerical_diff(f, x)
print(d)
y = f(x) - d*x
return lambda t: d*t + y
x = np.arange(0.0, 20.0, 0.1) #0から20まで0.1刻みのx配列
y = function_1(x)
plt.xlabel("x")
plt.ylabel("f(x)")
tf = tangent_line(function_1, 5)
y2 = tf(x)
plt.plot(x, y)
plt.plot(x, y2)
plt.show()
| [
"matplotlib.pylab.xlabel",
"common_function.function_1",
"matplotlib.pylab.show",
"common_function.numerical_diff",
"matplotlib.pylab.plot",
"numpy.arange",
"matplotlib.pylab.ylabel"
] | [((234, 259), 'numpy.arange', 'np.arange', (['(0.0)', '(20.0)', '(0.1)'], {}), '(0.0, 20.0, 0.1)\n', (243, 259), True, 'import numpy as np\n'), ((282, 295), 'common_function.function_1', 'function_1', (['x'], {}), '(x)\n', (292, 295), False, 'from common_function import function_1, numerical_diff\n'), ((297, 312), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (307, 312), True, 'import matplotlib.pylab as plt\n'), ((313, 331), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""f(x)"""'], {}), "('f(x)')\n", (323, 331), True, 'import matplotlib.pylab as plt\n'), ((378, 392), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (386, 392), True, 'import matplotlib.pylab as plt\n'), ((393, 408), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y2'], {}), '(x, y2)\n', (401, 408), True, 'import matplotlib.pylab as plt\n'), ((409, 419), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (417, 419), True, 'import matplotlib.pylab as plt\n'), ((146, 166), 'common_function.numerical_diff', 'numerical_diff', (['f', 'x'], {}), '(f, x)\n', (160, 166), False, 'from common_function import function_1, numerical_diff\n')] |
import platform
import sys
from datetime import datetime, timedelta
from functools import partial
from types import ModuleType
import pytest
import pytz
from apscheduler.util import (
datetime_ceil, get_callable_name, obj_to_ref, ref_to_obj, maybe_ref, check_callable_args)
class DummyClass(object):
def meth(self):
pass
@staticmethod
def staticmeth():
pass
@classmethod
def classmeth(cls):
pass
def __call__(self):
pass
class InnerDummyClass(object):
@classmethod
def innerclassmeth(cls):
pass
class InheritedDummyClass(DummyClass):
@classmethod
def classmeth(cls):
pass
@pytest.mark.parametrize('input,expected', [
(datetime(2009, 4, 7, 2, 10, 16, 4000), datetime(2009, 4, 7, 2, 10, 17)),
(datetime(2009, 4, 7, 2, 10, 16), datetime(2009, 4, 7, 2, 10, 16))
], ids=['milliseconds', 'exact'])
def test_datetime_ceil(input, expected):
assert datetime_ceil(input) == expected
class TestGetCallableName(object):
@pytest.mark.parametrize('input,expected', [
(open, 'open'),
(DummyClass.staticmeth, 'DummyClass.staticmeth' if
hasattr(DummyClass, '__qualname__') else 'staticmeth'),
(DummyClass.classmeth, 'DummyClass.classmeth'),
(DummyClass.meth, 'DummyClass.meth'),
(DummyClass().meth, 'DummyClass.meth'),
(DummyClass, 'DummyClass'),
(DummyClass(), 'DummyClass')
], ids=['function', 'static method', 'class method', 'unbounded method', 'bounded method',
'class', 'instance'])
def test_inputs(self, input, expected):
assert get_callable_name(input) == expected
def test_bad_input(self):
pytest.raises(TypeError, get_callable_name, object())
class TestObjToRef(object):
@pytest.mark.parametrize('obj, error', [
(partial(DummyClass.meth), 'Cannot create a reference to a partial()'),
(lambda: None, 'Cannot create a reference to a lambda')
], ids=['partial', 'lambda'])
def test_errors(self, obj, error):
exc = pytest.raises(ValueError, obj_to_ref, obj)
assert str(exc.value) == error
def test_nested_function_error(self):
def nested():
pass
exc = pytest.raises(ValueError, obj_to_ref, nested)
assert str(exc.value) == 'Cannot create a reference to a nested function'
@pytest.mark.parametrize('input,expected', [
(DummyClass.meth, 'test_util:DummyClass.meth'),
(DummyClass.classmeth, 'test_util:DummyClass.classmeth'),
(DummyClass.InnerDummyClass.innerclassmeth,
'test_util:DummyClass.InnerDummyClass.innerclassmeth'),
(DummyClass.staticmeth, 'test_util:DummyClass.staticmeth'),
(InheritedDummyClass.classmeth, 'test_util:InheritedDummyClass.classmeth'),
(timedelta, 'datetime:timedelta'),
], ids=['unbound method', 'class method', 'inner class method', 'static method',
'inherited class method', 'timedelta'])
def test_valid_refs(self, input, expected):
assert obj_to_ref(input) == expected
class TestRefToObj(object):
def test_valid_ref(self):
from logging.handlers import RotatingFileHandler
assert ref_to_obj('logging.handlers:RotatingFileHandler') is RotatingFileHandler
def test_complex_path(self):
pkg1 = ModuleType('pkg1')
pkg1.pkg2 = 'blah'
pkg2 = ModuleType('pkg1.pkg2')
pkg2.varname = 'test'
sys.modules['pkg1'] = pkg1
sys.modules['pkg1.pkg2'] = pkg2
assert ref_to_obj('pkg1.pkg2:varname') == 'test'
@pytest.mark.parametrize('input,error', [
(object(), TypeError),
('module', ValueError),
('module:blah', LookupError)
], ids=['raw object', 'module', 'module attribute'])
def test_lookup_error(self, input, error):
pytest.raises(error, ref_to_obj, input)
@pytest.mark.parametrize('input,expected', [
('datetime:timedelta', timedelta),
(timedelta, timedelta)
], ids=['textref', 'direct'])
def test_maybe_ref(input, expected):
assert maybe_ref(input) == expected
class TestCheckCallableArgs(object):
def test_invalid_callable_args(self):
"""
Tests that attempting to create a job with an invalid number of arguments raises an
exception.
"""
exc = pytest.raises(ValueError, check_callable_args, lambda x: None, [1, 2], {})
assert str(exc.value) == (
'The list of positional arguments is longer than the target callable can handle '
'(allowed: 1, given in args: 2)')
def test_invalid_callable_kwargs(self):
"""
Tests that attempting to schedule a job with unmatched keyword arguments raises an
exception.
"""
exc = pytest.raises(ValueError, check_callable_args, lambda x: None, [], {'x': 0, 'y': 1})
assert str(exc.value) == ('The target callable does not accept the following keyword '
'arguments: y')
def test_missing_callable_args(self):
"""Tests that attempting to schedule a job with missing arguments raises an exception."""
exc = pytest.raises(ValueError, check_callable_args, lambda x, y, z: None, [1], {'y': 0})
assert str(exc.value) == 'The following arguments have not been supplied: z'
def test_default_args(self):
"""Tests that default values for arguments are properly taken into account."""
exc = pytest.raises(ValueError, check_callable_args, lambda x, y, z=1: None, [1], {})
assert str(exc.value) == 'The following arguments have not been supplied: y'
def test_conflicting_callable_args(self):
"""
Tests that attempting to schedule a job where the combination of args and kwargs are in
conflict raises an exception.
"""
exc = pytest.raises(ValueError, check_callable_args, lambda x, y: None, [1, 2], {'y': 1})
assert str(exc.value) == 'The following arguments are supplied in both args and kwargs: y'
def test_signature_positional_only(self):
"""Tests that a function where signature() fails is accepted."""
check_callable_args(object().__setattr__, ('blah', 1), {})
@pytest.mark.skipif(platform.python_implementation() == 'PyPy',
reason='PyPy does not expose signatures of builtins')
def test_positional_only_args(self):
"""
Tests that an attempt to use keyword arguments for positional-only arguments raises an
exception.
"""
exc = pytest.raises(ValueError, check_callable_args, object.__setattr__, ['blah'],
{'value': 1})
assert str(exc.value) == ('The following arguments cannot be given as keyword arguments: '
'value')
def test_unfulfilled_kwargs(self):
"""
Tests that attempting to schedule a job where not all keyword-only arguments are fulfilled
raises an exception.
"""
func = eval("lambda x, *, y, z=1: None")
exc = pytest.raises(ValueError, check_callable_args, func, [1], {})
assert str(exc.value) == ('The following keyword-only arguments have not been supplied in '
'kwargs: y')
| [
"apscheduler.util.maybe_ref",
"datetime.datetime",
"platform.python_implementation",
"apscheduler.util.get_callable_name",
"apscheduler.util.datetime_ceil",
"types.ModuleType",
"apscheduler.util.ref_to_obj",
"apscheduler.util.obj_to_ref",
"pytest.mark.parametrize",
"pytest.raises",
"functools.pa... | [((3911, 4045), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input,expected"""', "[('datetime:timedelta', timedelta), (timedelta, timedelta)]"], {'ids': "['textref', 'direct']"}), "('input,expected', [('datetime:timedelta', timedelta\n ), (timedelta, timedelta)], ids=['textref', 'direct'])\n", (3934, 4045), False, 'import pytest\n'), ((2399, 2975), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input,expected"""', "[(DummyClass.meth, 'test_util:DummyClass.meth'), (DummyClass.classmeth,\n 'test_util:DummyClass.classmeth'), (DummyClass.InnerDummyClass.\n innerclassmeth, 'test_util:DummyClass.InnerDummyClass.innerclassmeth'),\n (DummyClass.staticmeth, 'test_util:DummyClass.staticmeth'), (\n InheritedDummyClass.classmeth,\n 'test_util:InheritedDummyClass.classmeth'), (timedelta,\n 'datetime:timedelta')]"], {'ids': "['unbound method', 'class method', 'inner class method', 'static method',\n 'inherited class method', 'timedelta']"}), "('input,expected', [(DummyClass.meth,\n 'test_util:DummyClass.meth'), (DummyClass.classmeth,\n 'test_util:DummyClass.classmeth'), (DummyClass.InnerDummyClass.\n innerclassmeth, 'test_util:DummyClass.InnerDummyClass.innerclassmeth'),\n (DummyClass.staticmeth, 'test_util:DummyClass.staticmeth'), (\n InheritedDummyClass.classmeth,\n 'test_util:InheritedDummyClass.classmeth'), (timedelta,\n 'datetime:timedelta')], ids=['unbound method', 'class method',\n 'inner class method', 'static method', 'inherited class method',\n 'timedelta'])\n", (2422, 2975), False, 'import pytest\n'), ((972, 992), 'apscheduler.util.datetime_ceil', 'datetime_ceil', (['input'], {}), '(input)\n', (985, 992), False, 'from apscheduler.util import datetime_ceil, get_callable_name, obj_to_ref, ref_to_obj, maybe_ref, check_callable_args\n'), ((2086, 2128), 'pytest.raises', 'pytest.raises', (['ValueError', 'obj_to_ref', 'obj'], {}), '(ValueError, obj_to_ref, obj)\n', (2099, 2128), False, 'import pytest\n'), ((2265, 2310), 'pytest.raises', 'pytest.raises', (['ValueError', 'obj_to_ref', 'nested'], {}), '(ValueError, obj_to_ref, nested)\n', (2278, 2310), False, 'import pytest\n'), ((3362, 3380), 'types.ModuleType', 'ModuleType', (['"""pkg1"""'], {}), "('pkg1')\n", (3372, 3380), False, 'from types import ModuleType\n'), ((3423, 3446), 'types.ModuleType', 'ModuleType', (['"""pkg1.pkg2"""'], {}), "('pkg1.pkg2')\n", (3433, 3446), False, 'from types import ModuleType\n'), ((3868, 3907), 'pytest.raises', 'pytest.raises', (['error', 'ref_to_obj', 'input'], {}), '(error, ref_to_obj, input)\n', (3881, 3907), False, 'import pytest\n'), ((4099, 4115), 'apscheduler.util.maybe_ref', 'maybe_ref', (['input'], {}), '(input)\n', (4108, 4115), False, 'from apscheduler.util import datetime_ceil, get_callable_name, obj_to_ref, ref_to_obj, maybe_ref, check_callable_args\n'), ((4359, 4433), 'pytest.raises', 'pytest.raises', (['ValueError', 'check_callable_args', '(lambda x: None)', '[1, 2]', '{}'], {}), '(ValueError, check_callable_args, lambda x: None, [1, 2], {})\n', (4372, 4433), False, 'import pytest\n'), ((4803, 4891), 'pytest.raises', 'pytest.raises', (['ValueError', 'check_callable_args', '(lambda x: None)', '[]', "{'x': 0, 'y': 1}"], {}), "(ValueError, check_callable_args, lambda x: None, [], {'x': 0,\n 'y': 1})\n", (4816, 4891), False, 'import pytest\n'), ((5188, 5276), 'pytest.raises', 'pytest.raises', (['ValueError', 'check_callable_args', '(lambda x, y, z: None)', '[1]', "{'y': 0}"], {}), "(ValueError, check_callable_args, lambda x, y, z: None, [1], {\n 'y': 0})\n", (5201, 5276), False, 'import pytest\n'), ((5492, 5571), 'pytest.raises', 'pytest.raises', (['ValueError', 'check_callable_args', '(lambda x, y, z=1: None)', '[1]', '{}'], {}), '(ValueError, check_callable_args, lambda x, y, z=1: None, [1], {})\n', (5505, 5571), False, 'import pytest\n'), ((5877, 5965), 'pytest.raises', 'pytest.raises', (['ValueError', 'check_callable_args', '(lambda x, y: None)', '[1, 2]', "{'y': 1}"], {}), "(ValueError, check_callable_args, lambda x, y: None, [1, 2], {\n 'y': 1})\n", (5890, 5965), False, 'import pytest\n'), ((6588, 6682), 'pytest.raises', 'pytest.raises', (['ValueError', 'check_callable_args', 'object.__setattr__', "['blah']", "{'value': 1}"], {}), "(ValueError, check_callable_args, object.__setattr__, ['blah'],\n {'value': 1})\n", (6601, 6682), False, 'import pytest\n'), ((7105, 7166), 'pytest.raises', 'pytest.raises', (['ValueError', 'check_callable_args', 'func', '[1]', '{}'], {}), '(ValueError, check_callable_args, func, [1], {})\n', (7118, 7166), False, 'import pytest\n'), ((742, 779), 'datetime.datetime', 'datetime', (['(2009)', '(4)', '(7)', '(2)', '(10)', '(16)', '(4000)'], {}), '(2009, 4, 7, 2, 10, 16, 4000)\n', (750, 779), False, 'from datetime import datetime, timedelta\n'), ((781, 812), 'datetime.datetime', 'datetime', (['(2009)', '(4)', '(7)', '(2)', '(10)', '(17)'], {}), '(2009, 4, 7, 2, 10, 17)\n', (789, 812), False, 'from datetime import datetime, timedelta\n'), ((820, 851), 'datetime.datetime', 'datetime', (['(2009)', '(4)', '(7)', '(2)', '(10)', '(16)'], {}), '(2009, 4, 7, 2, 10, 16)\n', (828, 851), False, 'from datetime import datetime, timedelta\n'), ((853, 884), 'datetime.datetime', 'datetime', (['(2009)', '(4)', '(7)', '(2)', '(10)', '(16)'], {}), '(2009, 4, 7, 2, 10, 16)\n', (861, 884), False, 'from datetime import datetime, timedelta\n'), ((1650, 1674), 'apscheduler.util.get_callable_name', 'get_callable_name', (['input'], {}), '(input)\n', (1667, 1674), False, 'from apscheduler.util import datetime_ceil, get_callable_name, obj_to_ref, ref_to_obj, maybe_ref, check_callable_args\n'), ((3077, 3094), 'apscheduler.util.obj_to_ref', 'obj_to_ref', (['input'], {}), '(input)\n', (3087, 3094), False, 'from apscheduler.util import datetime_ceil, get_callable_name, obj_to_ref, ref_to_obj, maybe_ref, check_callable_args\n'), ((3239, 3289), 'apscheduler.util.ref_to_obj', 'ref_to_obj', (['"""logging.handlers:RotatingFileHandler"""'], {}), "('logging.handlers:RotatingFileHandler')\n", (3249, 3289), False, 'from apscheduler.util import datetime_ceil, get_callable_name, obj_to_ref, ref_to_obj, maybe_ref, check_callable_args\n'), ((3567, 3598), 'apscheduler.util.ref_to_obj', 'ref_to_obj', (['"""pkg1.pkg2:varname"""'], {}), "('pkg1.pkg2:varname')\n", (3577, 3598), False, 'from apscheduler.util import datetime_ceil, get_callable_name, obj_to_ref, ref_to_obj, maybe_ref, check_callable_args\n'), ((6272, 6304), 'platform.python_implementation', 'platform.python_implementation', ([], {}), '()\n', (6302, 6304), False, 'import platform\n'), ((1864, 1888), 'functools.partial', 'partial', (['DummyClass.meth'], {}), '(DummyClass.meth)\n', (1871, 1888), False, 'from functools import partial\n')] |
import re
import os
class Twitter():
def __init__(self):
pass
def removeHastag(self, tweet):
"""
Remove hastag in a tweet. (Tweet içerisindeki hastag'i kaldırır.)
Example:
Input:
\ttext [string] => "Merhaba #Hello"
Output:
\ttext [string] => "Merhaba"
"""
return re.sub(r"#\S+","", tweet)
def removeMention(self, tweet):
"""
Remove mention in a tweet. (Tweet içerisindeki mention'ı kaldırır.)
Example:
Input:
\ttext [string] => "Merhaba @example"
Output:
\ttext [string] => "Merhaba"
"""
return re.sub(r"@\S+","", tweet).strip()
def removeRT(self, tweet):
"""
Remove retweet in a tweet. (Tweet içerisindeki retweet'i kaldırır.)
Example:
Input:
\ttext [string] => "rt Bugün hava çok güzel"
Output:
\ttext [string] => "Bugün hava çok güzel"
"""
return re.sub(r"\brt\b","", tweet)
def getTweet(self, q = "", from_ = "", maxResult = 500, since = "", until = "", showQuery = True):
"""
Get retweet using snscrape library. (snscrape kütüphanesini kullanarak tweet toplar.)
Params:
* q: Query word. (Sorgu kelimesi)
* from_: Twitter user name. Ex. jack (Twitter kullanıcı adı. Örn. jack)
* maxResult: Count of tweets. (Tweet sayısı)
* since: Start date. Usage yyyy-mm-dd. (Başlangıç tarihi. Kullanımı yyyy-mm-dd.)
* until: Finish date. Usage yyyy-mm-dd. (Bitiş tarihi. Kullanımı yyyy-mm-dd.)
* showQuery: snscrape query (snscrape sorgusu)
Example:
Input:
\ttext (query)
Output:
\tfile (json)
"""
if until != "": until = " until:" + until
if from_ != "": from_ = " from:" + from_
if since != "": since = "--since " + since
if showQuery:
# Using OS library to call CLI commands in Python
print(f'snscrape --jsonl --max-results {maxResult} {since} twitter-search "{q}{until}{from_}" > text-query-tweets.json')
os.system(f'snscrape --jsonl --max-results {maxResult} {since} twitter-search "{q}{until}{from_}" > text-query-tweets.json')
| [
"re.sub",
"os.system"
] | [((399, 425), 're.sub', 're.sub', (['"""#\\\\S+"""', '""""""', 'tweet'], {}), "('#\\\\S+', '', tweet)\n", (405, 425), False, 'import re\n'), ((1131, 1160), 're.sub', 're.sub', (['"""\\\\brt\\\\b"""', '""""""', 'tweet'], {}), "('\\\\brt\\\\b', '', tweet)\n", (1137, 1160), False, 'import re\n'), ((2459, 2593), 'os.system', 'os.system', (['f"""snscrape --jsonl --max-results {maxResult} {since} twitter-search "{q}{until}{from_}" > text-query-tweets.json"""'], {}), '(\n f\'snscrape --jsonl --max-results {maxResult} {since} twitter-search "{q}{until}{from_}" > text-query-tweets.json\'\n )\n', (2468, 2593), False, 'import os\n'), ((756, 782), 're.sub', 're.sub', (['"""@\\\\S+"""', '""""""', 'tweet'], {}), "('@\\\\S+', '', tweet)\n", (762, 782), False, 'import re\n')] |
from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd("mc"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 5)
await event.edit("mein")
animation_chars = [
"madarchod",
"Hu jo ",
"Isme",
"Aya",
"**Mein madarchod hu jo isme aya**"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i %5 ])
| [
"userbot.utils.admin_cmd",
"asyncio.sleep"
] | [((91, 106), 'userbot.utils.admin_cmd', 'admin_cmd', (['"""mc"""'], {}), "('mc')\n", (100, 106), False, 'from userbot.utils import admin_cmd\n'), ((477, 510), 'asyncio.sleep', 'asyncio.sleep', (['animation_interval'], {}), '(animation_interval)\n', (490, 510), False, 'import asyncio\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>, <NAME>
# *****************************************************************************/
"""
Brief:
telemetry_util.py - Utility routines for Telemetry data processing
Classes:
Enter GetHeaders("telemetry_util.py") to display Class listings.
"""
from __future__ import absolute_import, division, print_function, unicode_literals # , nested_scopes, generators, generator_stop, with_statement, annotations
import os
from array import array
#### import Utility functions
from src.software.parse.output_log import OutputLog
TelemetryLogBlockSize = 512 # defined by NVMe version 1.3 specification
MAX_BLOCK_SIZE = 4096
BYTES_PER_DWORD = 4
def GetTruncatedDataBuffer(imageObject, blockSize, maxBlockSize=131072):
"""
From the input image object, pull and return a byte buffer truncated to maxBlockSize bytes
Input: imageObject - Input binary data object
blockSize - Size in bytes of the input data object
maxBlockSize - Maximum data buffer size to return
"""
if (blockSize > maxBlockSize):
OutputLog.DebugPrint(2, format("Buffer truncated to %d bytes" % (maxBlockSize)))
returnBlockSize = maxBlockSize
else:
returnBlockSize = blockSize
if hasattr(imageObject, 'read'):
data = array('B', imageObject.read(returnBlockSize))
if len(data) < returnBlockSize:
OutputLog.DebugPrint(2, format("read EOF - returning zero data"))
return returnBlockSize, array('B', [0] * returnBlockSize)
else:
return returnBlockSize, data
else:
return returnBlockSize, imageObject
def cleanDir(dirName):
if (os.path.exists(dirName) is False):
folder = os.path.join(os.getcwd(), dirName)
else:
folder = dirName
if (os.path.exists(folder)):
for fileName in os.listdir(folder):
try:
deleteFile = os.path.join(folder, fileName)
if (os.path.isfile(deleteFile)):
os.remove(deleteFile)
except:
OutputLog.DebugPrint(2, format("Unable to clear file \"%s\"" % (deleteFile)))
else:
os.mkdir(folder)
return
def openWriteFile(fileName, text=False):
### open output file ###
if (text is True):
mode = "wt"
else:
mode = "wb"
try:
outputFile = open(fileName, mode)
except IOError:
OutputLog.Error(format("Unable to open file \"%s\" for output\n" % (fileName)))
outputFile = None
return outputFile
def openReadFile(fileName):
### open input file ###
try:
inputFile = open(fileName, "rb")
except IOError:
OutputLog.Error(format("Unable to open file \"%s\" for reading\n" % (fileName)))
inputFile = None
return inputFile
| [
"os.path.exists",
"os.listdir",
"array.array",
"os.path.join",
"os.getcwd",
"os.path.isfile",
"os.mkdir",
"os.remove"
] | [((1970, 1992), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1984, 1992), False, 'import os\n'), ((1836, 1859), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (1850, 1859), False, 'import os\n'), ((2020, 2038), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2030, 2038), False, 'import os\n'), ((2348, 2364), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (2356, 2364), False, 'import os\n'), ((1902, 1913), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1911, 1913), False, 'import os\n'), ((1652, 1685), 'array.array', 'array', (['"""B"""', '([0] * returnBlockSize)'], {}), "('B', [0] * returnBlockSize)\n", (1657, 1685), False, 'from array import array\n'), ((2088, 2118), 'os.path.join', 'os.path.join', (['folder', 'fileName'], {}), '(folder, fileName)\n', (2100, 2118), False, 'import os\n'), ((2140, 2166), 'os.path.isfile', 'os.path.isfile', (['deleteFile'], {}), '(deleteFile)\n', (2154, 2166), False, 'import os\n'), ((2190, 2211), 'os.remove', 'os.remove', (['deleteFile'], {}), '(deleteFile)\n', (2199, 2211), False, 'import os\n')] |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from backend.views import app_urls
from server import settings
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'server.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
(r'^paypal/', include('paypal.standard.ipn.urls')),
)
urlpatterns += app_urls
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"django.conf.urls.include",
"django.conf.urls.static.static",
"django.contrib.admin.autodiscover"
] | [((193, 213), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (211, 213), False, 'from django.contrib import admin\n'), ((524, 585), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (530, 585), False, 'from django.conf.urls.static import static\n'), ((375, 399), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (382, 399), False, 'from django.conf.urls import patterns, include, url\n'), ((420, 455), 'django.conf.urls.include', 'include', (['"""paypal.standard.ipn.urls"""'], {}), "('paypal.standard.ipn.urls')\n", (427, 455), False, 'from django.conf.urls import patterns, include, url\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from perlin import generate_perlin
def gaussian_2d_fast(size, amp, mu_x, mu_y, sigma):
x = np.arange(0, 1, 1/size[0])
y = np.arange(0, 1, 1/size[1])
xs, ys = np.meshgrid(x,y)
dxs = np.minimum(np.abs(xs-mu_x), 1-np.abs(xs-mu_x))
dys = np.minimum(np.abs(ys-mu_y), 1-np.abs(ys-mu_y))
heat_map = amp*np.exp(-(dxs**2+dys**2)/(2*sigma**2))
return heat_map
def excitability_matrix(sigma_e, sigma_i, perlin_scale, grid_offset,
p_e=0.05, p_i=0.05, we=0.22, g=4,
n_row_e=120, n_row_i=60, mu_gwn=0, multiple_connections=True,
expected_connectivity=True, is_plot=True):
n_pop_e = n_row_e**2
n_pop_i = n_row_i**2
gL = 25 * 1e-9 # Siemens
p_max_e = p_e / (2 * np.pi * sigma_e**2)
p_max_i = p_i / (2 * np.pi * sigma_i**2)
# Two landscapes: e and i. The contribution of each neuron is stored separately in the n_row_e**2 matrices
e_landscape = np.zeros((n_row_e**2, n_row_e, n_row_e))
i_landscape = np.zeros((n_row_i**2, n_row_e, n_row_e))
perlin = generate_perlin(n_row_e, perlin_scale, seed_value=0)
x = np.arange(0,1,1/n_row_e)
y = np.arange(0,1,1/n_row_e)
X, Y = np.meshgrid(x,y)
U = np.cos(perlin)
V = np.sin(perlin)
# Excitatory
mu_xs = np.arange(0,1,1/n_row_e)
mu_ys = np.arange(0,1,1/n_row_e)
counter = 0
for i, mu_x in enumerate(mu_xs):
for j, mu_y in enumerate(mu_ys):
x_offset = grid_offset / n_row_e * np.cos(perlin[i,j])
y_offset = grid_offset / n_row_e * np.sin(perlin[i,j])
mh = gaussian_2d_fast((n_row_e, n_row_e), p_max_e, mu_x+x_offset, mu_y+y_offset, sigma_e)
if not multiple_connections:
#clip probabilities at 1
e_landscape[counter] = np.minimum(mh, np.ones(mh.shape))
else:
e_landscape[counter] = mh
counter += 1
# Inhibitory
mu_xs = np.arange(1/n_row_e,1+1/n_row_e,1/n_row_i)
mu_ys = np.arange(1/n_row_e,1+1/n_row_e,1/n_row_i)
counter = 0
for mu_x in mu_xs:
for mu_y in mu_ys:
mh = gaussian_2d_fast((n_row_e, n_row_e), p_max_i, mu_x, mu_y, sigma_i)
if not multiple_connections:
#clip probabilities at 1
i_landscape[counter] = np.minimum(mh, np.ones(mh.shape))
else:
i_landscape[counter] = mh
counter += 1
# in total there should be n_pop_e * (n_pop_e * p_max_e) = 10 368 000 e-connections
# and n_pop_i * (n_pop_e * 0.05) = 2 592 000 i-connections
num_e_connections = np.sum(e_landscape)
num_i_connections = np.sum(i_landscape)
if multiple_connections:
e_calibration = 1
i_calibration = 1
else:
e_calibration = n_pop_e * n_pop_e * p_e / num_e_connections
i_calibration = n_pop_i * n_pop_e * p_i / num_i_connections
print('e_calibration is ', e_calibration)
print('i_calibration is ', i_calibration)
if expected_connectivity:
# calculate expected number of connections
e_landscape = n_row_e**2*np.mean(e_landscape, axis=0)
i_landscape = n_row_i**2*np.mean(i_landscape, axis=0)
else: # we sample
sample_e_landscape = np.zeros((n_row_e, n_row_e))
for i in range(n_row_e):
for j in range(n_row_e):
neuron = e_landscape[:, i, j]
random_numbers = np.random.random(n_row_e**2)
num_connected = len(np.where(random_numbers<neuron)[0])
sample_e_landscape[i, j] = num_connected
sample_i_landscape = np.zeros((n_row_e, n_row_e))
for i in range(n_row_e):
for j in range(n_row_e):
neuron = i_landscape[:, i, j]
random_numbers = np.random.random(n_row_i**2)
num_connected = len(np.where(random_numbers<neuron)[0])
sample_i_landscape[i, j] = num_connected
e_landscape = sample_e_landscape
i_landscape = sample_i_landscape
# Now we fill a landscape with physical units (mV)
rest_pot = -70 # mV
thres_pot = -55 # mV
ext_pot = mu_gwn / gL * 1e3 #mV
no_activity_pot = rest_pot + ext_pot # -56 mV when mu_gwn = 350 pA
landscape = no_activity_pot * np.ones((n_row_e, n_row_e))
# Synapse strengths
we = we * e_calibration #mV
wi = -g * we * i_calibration / e_calibration #mV
landscape += we * e_landscape
landscape += wi * i_landscape
# scale X and Y quiver according to values in ei_landscape. first normalize landscape
norm_landscape = np.copy(landscape)
norm_landscape -= np.amin(norm_landscape)
norm_landscape /= np.amax(norm_landscape)
U = 0.5*np.multiply(U, norm_landscape)
V = 0.5*np.multiply(V, norm_landscape)
if is_plot:
# Plot
plt.figure(figsize=(8,8))
if expected_connectivity:
mode = 'Expected '
else:
mode = 'Sampled '
plt.title(mode+'EI landscape')
plt.imshow(landscape, origin='lower', extent=[0,1,0,1])
norm = mpl.colors.Normalize(vmin=round(np.amin(landscape)), vmax=round(np.amax(landscape)))
plt.colorbar(mpl.cm.ScalarMappable(norm=norm), label='mV')
plt.quiver(X, Y, U, V, units='xy', scale=50)
plt.suptitle(r'$\sigma_e=$'+str(sigma_e)+r', $\sigma_i=$'+str(sigma_i)+', perlin scale='+str(perlin_scale)+', g='+str(g),
fontsize=15)
plt.show()
# Plot binary landscape (below/above threshold)
above_thres = np.where(np.reshape(landscape, 14400)>thres_pot)
binary_landscape = np.zeros(14400)
binary_landscape[above_thres] = 1
binary_landscape = np.reshape(binary_landscape,(120, 120))
plt.figure(figsize=(8,8))
plt.title(mode+'EI landscape (binary)')
plt.imshow(binary_landscape, origin='lower', extent=[0,1,0,1])
plt.quiver(X, Y, U, V, units='xy', scale=50)
plt.suptitle(r'$\sigma_e=$'+str(sigma_e)+r', $\sigma_i=$'+str(sigma_i)+', perlin scale='+str(perlin_scale)+', g='+str(g),
fontsize=15)
plt.show()
return landscape, X, Y, U, V
| [
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.multiply",
"numpy.reshape",
"numpy.random.random",
"numpy.where",
"numpy.exp",
"matplotlib.cm.ScalarMappable",
"perlin.generate_perlin",
"numpy.meshgrid",
"numpy.abs",
"matplotlib.pyplot.quiver",
"numpy.amin",
... | [((172, 200), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / size[0])'], {}), '(0, 1, 1 / size[0])\n', (181, 200), True, 'import numpy as np\n'), ((207, 235), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / size[1])'], {}), '(0, 1, 1 / size[1])\n', (216, 235), True, 'import numpy as np\n'), ((247, 264), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (258, 264), True, 'import numpy as np\n'), ((1046, 1088), 'numpy.zeros', 'np.zeros', (['(n_row_e ** 2, n_row_e, n_row_e)'], {}), '((n_row_e ** 2, n_row_e, n_row_e))\n', (1054, 1088), True, 'import numpy as np\n'), ((1105, 1147), 'numpy.zeros', 'np.zeros', (['(n_row_i ** 2, n_row_e, n_row_e)'], {}), '((n_row_i ** 2, n_row_e, n_row_e))\n', (1113, 1147), True, 'import numpy as np\n'), ((1159, 1211), 'perlin.generate_perlin', 'generate_perlin', (['n_row_e', 'perlin_scale'], {'seed_value': '(0)'}), '(n_row_e, perlin_scale, seed_value=0)\n', (1174, 1211), False, 'from perlin import generate_perlin\n'), ((1220, 1248), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n_row_e)'], {}), '(0, 1, 1 / n_row_e)\n', (1229, 1248), True, 'import numpy as np\n'), ((1253, 1281), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n_row_e)'], {}), '(0, 1, 1 / n_row_e)\n', (1262, 1281), True, 'import numpy as np\n'), ((1289, 1306), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1300, 1306), True, 'import numpy as np\n'), ((1314, 1328), 'numpy.cos', 'np.cos', (['perlin'], {}), '(perlin)\n', (1320, 1328), True, 'import numpy as np\n'), ((1337, 1351), 'numpy.sin', 'np.sin', (['perlin'], {}), '(perlin)\n', (1343, 1351), True, 'import numpy as np\n'), ((1382, 1410), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n_row_e)'], {}), '(0, 1, 1 / n_row_e)\n', (1391, 1410), True, 'import numpy as np\n'), ((1419, 1447), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n_row_e)'], {}), '(0, 1, 1 / n_row_e)\n', (1428, 1447), True, 'import numpy as np\n'), ((2047, 2099), 'numpy.arange', 'np.arange', (['(1 / n_row_e)', '(1 + 1 / n_row_e)', '(1 / n_row_i)'], {}), '(1 / n_row_e, 1 + 1 / n_row_e, 1 / n_row_i)\n', (2056, 2099), True, 'import numpy as np\n'), ((2102, 2154), 'numpy.arange', 'np.arange', (['(1 / n_row_e)', '(1 + 1 / n_row_e)', '(1 / n_row_i)'], {}), '(1 / n_row_e, 1 + 1 / n_row_e, 1 / n_row_i)\n', (2111, 2154), True, 'import numpy as np\n'), ((2713, 2732), 'numpy.sum', 'np.sum', (['e_landscape'], {}), '(e_landscape)\n', (2719, 2732), True, 'import numpy as np\n'), ((2757, 2776), 'numpy.sum', 'np.sum', (['i_landscape'], {}), '(i_landscape)\n', (2763, 2776), True, 'import numpy as np\n'), ((4719, 4737), 'numpy.copy', 'np.copy', (['landscape'], {}), '(landscape)\n', (4726, 4737), True, 'import numpy as np\n'), ((4760, 4783), 'numpy.amin', 'np.amin', (['norm_landscape'], {}), '(norm_landscape)\n', (4767, 4783), True, 'import numpy as np\n'), ((4806, 4829), 'numpy.amax', 'np.amax', (['norm_landscape'], {}), '(norm_landscape)\n', (4813, 4829), True, 'import numpy as np\n'), ((286, 303), 'numpy.abs', 'np.abs', (['(xs - mu_x)'], {}), '(xs - mu_x)\n', (292, 303), True, 'import numpy as np\n'), ((343, 360), 'numpy.abs', 'np.abs', (['(ys - mu_y)'], {}), '(ys - mu_y)\n', (349, 360), True, 'import numpy as np\n'), ((399, 448), 'numpy.exp', 'np.exp', (['(-(dxs ** 2 + dys ** 2) / (2 * sigma ** 2))'], {}), '(-(dxs ** 2 + dys ** 2) / (2 * sigma ** 2))\n', (405, 448), True, 'import numpy as np\n'), ((3364, 3392), 'numpy.zeros', 'np.zeros', (['(n_row_e, n_row_e)'], {}), '((n_row_e, n_row_e))\n', (3372, 3392), True, 'import numpy as np\n'), ((3730, 3758), 'numpy.zeros', 'np.zeros', (['(n_row_e, n_row_e)'], {}), '((n_row_e, n_row_e))\n', (3738, 3758), True, 'import numpy as np\n'), ((4399, 4426), 'numpy.ones', 'np.ones', (['(n_row_e, n_row_e)'], {}), '((n_row_e, n_row_e))\n', (4406, 4426), True, 'import numpy as np\n'), ((4843, 4873), 'numpy.multiply', 'np.multiply', (['U', 'norm_landscape'], {}), '(U, norm_landscape)\n', (4854, 4873), True, 'import numpy as np\n'), ((4886, 4916), 'numpy.multiply', 'np.multiply', (['V', 'norm_landscape'], {}), '(V, norm_landscape)\n', (4897, 4916), True, 'import numpy as np\n'), ((4957, 4983), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4967, 4983), True, 'import matplotlib.pyplot as plt\n'), ((5100, 5132), 'matplotlib.pyplot.title', 'plt.title', (["(mode + 'EI landscape')"], {}), "(mode + 'EI landscape')\n", (5109, 5132), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5197), 'matplotlib.pyplot.imshow', 'plt.imshow', (['landscape'], {'origin': '"""lower"""', 'extent': '[0, 1, 0, 1]'}), "(landscape, origin='lower', extent=[0, 1, 0, 1])\n", (5149, 5197), True, 'import matplotlib.pyplot as plt\n'), ((5370, 5414), 'matplotlib.pyplot.quiver', 'plt.quiver', (['X', 'Y', 'U', 'V'], {'units': '"""xy"""', 'scale': '(50)'}), "(X, Y, U, V, units='xy', scale=50)\n", (5380, 5414), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5588), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5586, 5588), True, 'import matplotlib.pyplot as plt\n'), ((5744, 5759), 'numpy.zeros', 'np.zeros', (['(14400)'], {}), '(14400)\n', (5752, 5759), True, 'import numpy as np\n'), ((5829, 5869), 'numpy.reshape', 'np.reshape', (['binary_landscape', '(120, 120)'], {}), '(binary_landscape, (120, 120))\n', (5839, 5869), True, 'import numpy as np\n'), ((5877, 5903), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (5887, 5903), True, 'import matplotlib.pyplot as plt\n'), ((5911, 5952), 'matplotlib.pyplot.title', 'plt.title', (["(mode + 'EI landscape (binary)')"], {}), "(mode + 'EI landscape (binary)')\n", (5920, 5952), True, 'import matplotlib.pyplot as plt\n'), ((5959, 6024), 'matplotlib.pyplot.imshow', 'plt.imshow', (['binary_landscape'], {'origin': '"""lower"""', 'extent': '[0, 1, 0, 1]'}), "(binary_landscape, origin='lower', extent=[0, 1, 0, 1])\n", (5969, 6024), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6074), 'matplotlib.pyplot.quiver', 'plt.quiver', (['X', 'Y', 'U', 'V'], {'units': '"""xy"""', 'scale': '(50)'}), "(X, Y, U, V, units='xy', scale=50)\n", (6040, 6074), True, 'import matplotlib.pyplot as plt\n'), ((6238, 6248), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6246, 6248), True, 'import matplotlib.pyplot as plt\n'), ((305, 322), 'numpy.abs', 'np.abs', (['(xs - mu_x)'], {}), '(xs - mu_x)\n', (311, 322), True, 'import numpy as np\n'), ((362, 379), 'numpy.abs', 'np.abs', (['(ys - mu_y)'], {}), '(ys - mu_y)\n', (368, 379), True, 'import numpy as np\n'), ((3219, 3247), 'numpy.mean', 'np.mean', (['e_landscape'], {'axis': '(0)'}), '(e_landscape, axis=0)\n', (3226, 3247), True, 'import numpy as np\n'), ((3281, 3309), 'numpy.mean', 'np.mean', (['i_landscape'], {'axis': '(0)'}), '(i_landscape, axis=0)\n', (3288, 3309), True, 'import numpy as np\n'), ((5316, 5348), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm'}), '(norm=norm)\n', (5337, 5348), True, 'import matplotlib as mpl\n'), ((1587, 1607), 'numpy.cos', 'np.cos', (['perlin[i, j]'], {}), '(perlin[i, j])\n', (1593, 1607), True, 'import numpy as np\n'), ((1654, 1674), 'numpy.sin', 'np.sin', (['perlin[i, j]'], {}), '(perlin[i, j])\n', (1660, 1674), True, 'import numpy as np\n'), ((3542, 3572), 'numpy.random.random', 'np.random.random', (['(n_row_e ** 2)'], {}), '(n_row_e ** 2)\n', (3558, 3572), True, 'import numpy as np\n'), ((3908, 3938), 'numpy.random.random', 'np.random.random', (['(n_row_i ** 2)'], {}), '(n_row_i ** 2)\n', (3924, 3938), True, 'import numpy as np\n'), ((5677, 5705), 'numpy.reshape', 'np.reshape', (['landscape', '(14400)'], {}), '(landscape, 14400)\n', (5687, 5705), True, 'import numpy as np\n'), ((1913, 1930), 'numpy.ones', 'np.ones', (['mh.shape'], {}), '(mh.shape)\n', (1920, 1930), True, 'import numpy as np\n'), ((2432, 2449), 'numpy.ones', 'np.ones', (['mh.shape'], {}), '(mh.shape)\n', (2439, 2449), True, 'import numpy as np\n'), ((5242, 5260), 'numpy.amin', 'np.amin', (['landscape'], {}), '(landscape)\n', (5249, 5260), True, 'import numpy as np\n'), ((5274, 5292), 'numpy.amax', 'np.amax', (['landscape'], {}), '(landscape)\n', (5281, 5292), True, 'import numpy as np\n'), ((3607, 3640), 'numpy.where', 'np.where', (['(random_numbers < neuron)'], {}), '(random_numbers < neuron)\n', (3615, 3640), True, 'import numpy as np\n'), ((3973, 4006), 'numpy.where', 'np.where', (['(random_numbers < neuron)'], {}), '(random_numbers < neuron)\n', (3981, 4006), True, 'import numpy as np\n')] |
"""
Module: libfmp.c8.c8s2_salience
Author: <NAME>, <NAME>
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the FMP Notebooks (https://www.audiolabs-erlangen.de/FMP)
"""
import numpy as np
import librosa
from scipy import ndimage
from numba import jit
import libfmp.b
import libfmp.c8
@jit(nopython=True)
def principal_argument(v):
"""Principal argument function
| Notebook: C6/C6S1_NoveltyPhase.ipynb, see also
| Notebook: C8/C8S2_InstantFreqEstimation.ipynb
Args:
v (float or np.ndarray): Value (or vector of values)
Returns:
w (float or np.ndarray): Principle value of v
"""
w = np.mod(v + 0.5, 1) - 0.5
return w
@jit(nopython=True)
def compute_if(X, Fs, N, H):
"""Instantenous frequency (IF) estamation
| Notebook: C8/C8S2_InstantFreqEstimation.ipynb, see also
| Notebook: C6/C6S1_NoveltyPhase.ipynb
Args:
X (np.ndarray): STFT
Fs (scalar): Sampling rate
N (int): Window size in samples
H (int): Hop size in samples
Returns:
F_coef_IF (np.ndarray): Matrix of IF values
"""
phi_1 = np.angle(X[:, 0:-1]) / (2 * np.pi)
phi_2 = np.angle(X[:, 1:]) / (2 * np.pi)
K = X.shape[0]
index_k = np.arange(0, K).reshape(-1, 1)
# Bin offset (FMP, Eq. (8.45))
kappa = (N / H) * principal_argument(phi_2 - phi_1 - index_k * H / N)
# Instantaneous frequencies (FMP, Eq. (8.44))
F_coef_IF = (index_k + kappa) * Fs / N
# Extend F_coef_IF by copying first column to match dimensions of X
F_coef_IF = np.hstack((np.copy(F_coef_IF[:, 0]).reshape(-1, 1), F_coef_IF))
return F_coef_IF
@jit(nopython=True)
def f_coef(k, Fs, N):
"""STFT center frequency
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
k (int): Coefficient number
Fs (scalar): Sampling rate in Hz
N (int): Window length in samples
Returns:
freq (float): STFT center frequency
"""
return k * Fs / N
@jit(nopython=True)
def frequency_to_bin_index(F, R=10.0, F_ref=55.0):
"""| Binning function with variable frequency resolution
| Note: Indexing starts with 0 (opposed to [FMP, Eq. (8.49)])
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
F (float): Frequency in Hz
R (float): Frequency resolution in cents (Default value = 10.0)
F_ref (float): Reference frequency in Hz (Default value = 55.0)
Returns:
bin_index (int): Index for bin (starting with index 0)
"""
bin_index = np.floor((1200 / R) * np.log2(F / F_ref) + 0.5).astype(np.int64)
return bin_index
@jit(nopython=True)
def p_bin(b, freq, R=10.0, F_ref=55.0):
"""Computes binning mask [FMP, Eq. (8.50)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
b (int): Bin index
freq (float): Center frequency
R (float): Frequency resolution in cents (Default value = 10.0)
F_ref (float): Reference frequency in Hz (Default value = 55.0)
Returns:
mask (float): Binning mask
"""
mask = frequency_to_bin_index(freq, R, F_ref) == b
mask = mask.reshape(-1, 1)
return mask
@jit(nopython=True)
def compute_y_lf_bin(Y, Fs, N, R=10.0, F_min=55.0, F_max=1760.0):
"""Log-frequency Spectrogram with variable frequency resolution using binning
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
Y (np.ndarray): Magnitude spectrogram
Fs (scalar): Sampling rate in Hz
N (int): Window length in samples
R (float): Frequency resolution in cents (Default value = 10.0)
F_min (float): Lower frequency bound (reference frequency) (Default value = 55.0)
F_max (float): Upper frequency bound (is included) (Default value = 1760.0)
Returns:
Y_LF_bin (np.ndarray): Binned log-frequency spectrogram
F_coef_hertz (np.ndarray): Frequency axis in Hz
F_coef_cents (np.ndarray): Frequency axis in cents
"""
# [FMP, Eq. (8.51)]
B = frequency_to_bin_index(np.array([F_max]), R, F_min)[0] + 1
F_coef_hertz = 2 ** (np.arange(0, B) * R / 1200) * F_min
F_coef_cents = np.arange(0, B*R, R)
Y_LF_bin = np.zeros((B, Y.shape[1]))
K = Y.shape[0]
freq = f_coef(np.arange(0, K), Fs, N)
freq_lim_idx = np.where(np.logical_and(freq >= F_min, freq <= F_max))[0]
freq_lim = freq[freq_lim_idx]
Y_lim = Y[freq_lim_idx, :]
for b in range(B):
coef_mask = p_bin(b, freq_lim, R, F_min)
Y_LF_bin[b, :] = (Y_lim*coef_mask).sum(axis=0)
return Y_LF_bin, F_coef_hertz, F_coef_cents
@jit(nopython=True)
def p_bin_if(b, F_coef_IF, R=10.0, F_ref=55.0):
"""Computes binning mask for instantaneous frequency binning [FMP, Eq. (8.52)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
b (int): Bin index
F_coef_IF (float): Instantaneous frequencies
R (float): Frequency resolution in cents (Default value = 10.0)
F_ref (float): Reference frequency in Hz (Default value = 55.0)
Returns:
mask (np.ndarray): Binning mask
"""
mask = frequency_to_bin_index(F_coef_IF, R, F_ref) == b
return mask
@jit(nopython=True)
def compute_y_lf_if_bin(X, Fs, N, H, R=10, F_min=55.0, F_max=1760.0, gamma=0.0):
"""Binned Log-frequency Spectrogram with variable frequency resolution based on instantaneous frequency
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
X (np.ndarray): Complex spectrogram
Fs (scalar): Sampling rate in Hz
N (int): Window length in samples
H (int): Hopsize in samples
R (float): Frequency resolution in cents (Default value = 10)
F_min (float): Lower frequency bound (reference frequency) (Default value = 55.0)
F_max (float): Upper frequency bound (Default value = 1760.0)
gamma (float): Logarithmic compression factor (Default value = 0.0)
Returns:
Y_LF_IF_bin (np.ndarray): Binned log-frequency spectrogram using instantaneous frequency
F_coef_hertz (np.ndarray): Frequency axis in Hz
F_coef_cents (np.ndarray): Frequency axis in cents
"""
# Compute instantaneous frequencies
F_coef_IF = libfmp.c8.compute_if(X, Fs, N, H)
freq_lim_mask = np.logical_and(F_coef_IF >= F_min, F_coef_IF < F_max)
F_coef_IF = F_coef_IF * freq_lim_mask
# Initialize ouput array and compute frequency axis
B = frequency_to_bin_index(np.array([F_max]), R, F_min)[0] + 1
F_coef_hertz = 2 ** (np.arange(0, B) * R / 1200) * F_min
F_coef_cents = np.arange(0, B*R, R)
Y_LF_IF_bin = np.zeros((B, X.shape[1]))
# Magnitude binning
if gamma == 0:
Y = np.abs(X) ** 2
else:
Y = np.log(1 + np.float32(gamma)*np.abs(X))
for b in range(B):
coef_mask = p_bin_if(b, F_coef_IF, R, F_min)
Y_LF_IF_bin[b, :] = (Y * coef_mask).sum(axis=0)
return Y_LF_IF_bin, F_coef_hertz, F_coef_cents
@jit(nopython=True)
def harmonic_summation(Y, num_harm=10, alpha=1.0):
"""Harmonic summation for spectrogram [FMP, Eq. (8.54)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
Y (np.ndarray): Magnitude spectrogram
num_harm (int): Number of harmonics (Default value = 10)
alpha (float): Weighting parameter (Default value = 1.0)
Returns:
Y_HS (np.ndarray): Spectrogram after harmonic summation
"""
Y_HS = np.zeros(Y.shape)
Y_zero_pad = np.vstack((Y, np.zeros((Y.shape[0]*num_harm, Y.shape[1]))))
K = Y.shape[0]
for k in range(K):
harm_idx = np.arange(1, num_harm+1)*(k)
weights = alpha ** (np.arange(1, num_harm+1) - 1).reshape(-1, 1)
Y_HS[k, :] = (Y_zero_pad[harm_idx, :] * weights).sum(axis=0)
return Y_HS
@jit(nopython=True)
def harmonic_summation_lf(Y_LF_bin, R, num_harm=10, alpha=1.0):
"""Harmonic summation for log-frequency spectrogram [FMP, Eq. (8.55)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
Y_LF_bin (np.ndarray): Log-frequency spectrogram
R (float): Frequency resolution in cents
num_harm (int): Number of harmonics (Default value = 10)
alpha (float): Weighting parameter (Default value = 1.0)
Returns:
Y_LF_bin_HS (np.ndarray): Log-frequency spectrogram after harmonic summation
"""
Y_LF_bin_HS = np.zeros(Y_LF_bin.shape)
pad_len = int(np.floor(np.log2(num_harm) * 1200 / R))
Y_LF_bin_zero_pad = np.vstack((Y_LF_bin, np.zeros((pad_len, Y_LF_bin.shape[1]))))
B = Y_LF_bin.shape[0]
for b in range(B):
harmonics = np.arange(1, num_harm+1)
harm_idx = b + np.floor(np.log2(harmonics) * 1200 / R).astype(np.int64)
weights = alpha ** (np.arange(1, num_harm+1) - 1).reshape(-1, 1)
Y_LF_bin_HS[b, :] = (Y_LF_bin_zero_pad[harm_idx, :] * weights).sum(axis=0)
return Y_LF_bin_HS
def compute_salience_rep(x, Fs, N, H, R, F_min=55.0, F_max=1760.0, num_harm=10, freq_smooth_len=11, alpha=1.0,
gamma=0.0):
"""Salience representation [FMP, Eq. (8.56)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
x (np.ndarray): Audio signal
Fs (scalar): Sampling frequency
N (int): Window length in samples
H (int): Hopsize in samples
R (float): Frequency resolution in cents
F_min (float): Lower frequency bound (reference frequency) (Default value = 55.0)
F_max (float): Upper frequency bound (Default value = 1760.0)
num_harm (int): Number of harmonics (Default value = 10)
freq_smooth_len (int): Filter length for vertical smoothing (Default value = 11)
alpha (float): Weighting parameter (Default value = 1.0)
gamma (float): Logarithmic compression factor (Default value = 0.0)
Returns:
Z (np.ndarray): Salience representation
F_coef_hertz (np.ndarray): Frequency axis in Hz
F_coef_cents (np.ndarray): Frequency axis in cents
"""
X = librosa.stft(x, n_fft=N, hop_length=H, win_length=N, pad_mode='constant')
Y_LF_IF_bin, F_coef_hertz, F_coef_cents = compute_y_lf_if_bin(X, Fs, N, H, R, F_min, F_max, gamma=gamma)
# smoothing
Y_LF_IF_bin = ndimage.filters.convolve1d(Y_LF_IF_bin, np.hanning(freq_smooth_len), axis=0, mode='constant')
Z = harmonic_summation_lf(Y_LF_IF_bin, R=R, num_harm=num_harm, alpha=alpha)
return Z, F_coef_hertz, F_coef_cents
| [
"numpy.hanning",
"numpy.abs",
"numpy.copy",
"numpy.logical_and",
"numpy.angle",
"numpy.array",
"numpy.zeros",
"numba.jit",
"librosa.stft",
"numpy.log2",
"numpy.mod",
"numpy.float32",
"numpy.arange"
] | [((325, 343), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (328, 343), False, 'from numba import jit\n'), ((709, 727), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (712, 727), False, 'from numba import jit\n'), ((1672, 1690), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1675, 1690), False, 'from numba import jit\n'), ((2016, 2034), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2019, 2034), False, 'from numba import jit\n'), ((2645, 2663), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2648, 2663), False, 'from numba import jit\n'), ((3186, 3204), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3189, 3204), False, 'from numba import jit\n'), ((4608, 4626), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4611, 4626), False, 'from numba import jit\n'), ((5186, 5204), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5189, 5204), False, 'from numba import jit\n'), ((6955, 6973), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (6958, 6973), False, 'from numba import jit\n'), ((7767, 7785), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (7770, 7785), False, 'from numba import jit\n'), ((4163, 4185), 'numpy.arange', 'np.arange', (['(0)', '(B * R)', 'R'], {}), '(0, B * R, R)\n', (4172, 4185), True, 'import numpy as np\n'), ((4199, 4224), 'numpy.zeros', 'np.zeros', (['(B, Y.shape[1])'], {}), '((B, Y.shape[1]))\n', (4207, 4224), True, 'import numpy as np\n'), ((6270, 6323), 'numpy.logical_and', 'np.logical_and', (['(F_coef_IF >= F_min)', '(F_coef_IF < F_max)'], {}), '(F_coef_IF >= F_min, F_coef_IF < F_max)\n', (6284, 6323), True, 'import numpy as np\n'), ((6570, 6592), 'numpy.arange', 'np.arange', (['(0)', '(B * R)', 'R'], {}), '(0, B * R, R)\n', (6579, 6592), True, 'import numpy as np\n'), ((6609, 6634), 'numpy.zeros', 'np.zeros', (['(B, X.shape[1])'], {}), '((B, X.shape[1]))\n', (6617, 6634), True, 'import numpy as np\n'), ((7421, 7438), 'numpy.zeros', 'np.zeros', (['Y.shape'], {}), '(Y.shape)\n', (7429, 7438), True, 'import numpy as np\n'), ((8348, 8372), 'numpy.zeros', 'np.zeros', (['Y_LF_bin.shape'], {}), '(Y_LF_bin.shape)\n', (8356, 8372), True, 'import numpy as np\n'), ((9984, 10057), 'librosa.stft', 'librosa.stft', (['x'], {'n_fft': 'N', 'hop_length': 'H', 'win_length': 'N', 'pad_mode': '"""constant"""'}), "(x, n_fft=N, hop_length=H, win_length=N, pad_mode='constant')\n", (9996, 10057), False, 'import librosa\n'), ((668, 686), 'numpy.mod', 'np.mod', (['(v + 0.5)', '(1)'], {}), '(v + 0.5, 1)\n', (674, 686), True, 'import numpy as np\n'), ((1147, 1167), 'numpy.angle', 'np.angle', (['X[:, 0:-1]'], {}), '(X[:, 0:-1])\n', (1155, 1167), True, 'import numpy as np\n'), ((1194, 1212), 'numpy.angle', 'np.angle', (['X[:, 1:]'], {}), '(X[:, 1:])\n', (1202, 1212), True, 'import numpy as np\n'), ((4263, 4278), 'numpy.arange', 'np.arange', (['(0)', 'K'], {}), '(0, K)\n', (4272, 4278), True, 'import numpy as np\n'), ((8586, 8612), 'numpy.arange', 'np.arange', (['(1)', '(num_harm + 1)'], {}), '(1, num_harm + 1)\n', (8595, 8612), True, 'import numpy as np\n'), ((10241, 10268), 'numpy.hanning', 'np.hanning', (['freq_smooth_len'], {}), '(freq_smooth_len)\n', (10251, 10268), True, 'import numpy as np\n'), ((1261, 1276), 'numpy.arange', 'np.arange', (['(0)', 'K'], {}), '(0, K)\n', (1270, 1276), True, 'import numpy as np\n'), ((4315, 4359), 'numpy.logical_and', 'np.logical_and', (['(freq >= F_min)', '(freq <= F_max)'], {}), '(freq >= F_min, freq <= F_max)\n', (4329, 4359), True, 'import numpy as np\n'), ((6691, 6700), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (6697, 6700), True, 'import numpy as np\n'), ((7470, 7515), 'numpy.zeros', 'np.zeros', (['(Y.shape[0] * num_harm, Y.shape[1])'], {}), '((Y.shape[0] * num_harm, Y.shape[1]))\n', (7478, 7515), True, 'import numpy as np\n'), ((7577, 7603), 'numpy.arange', 'np.arange', (['(1)', '(num_harm + 1)'], {}), '(1, num_harm + 1)\n', (7586, 7603), True, 'import numpy as np\n'), ((8476, 8514), 'numpy.zeros', 'np.zeros', (['(pad_len, Y_LF_bin.shape[1])'], {}), '((pad_len, Y_LF_bin.shape[1]))\n', (8484, 8514), True, 'import numpy as np\n'), ((4047, 4064), 'numpy.array', 'np.array', (['[F_max]'], {}), '([F_max])\n', (4055, 4064), True, 'import numpy as np\n'), ((6454, 6471), 'numpy.array', 'np.array', (['[F_max]'], {}), '([F_max])\n', (6462, 6471), True, 'import numpy as np\n'), ((1594, 1618), 'numpy.copy', 'np.copy', (['F_coef_IF[:, 0]'], {}), '(F_coef_IF[:, 0])\n', (1601, 1618), True, 'import numpy as np\n'), ((4108, 4123), 'numpy.arange', 'np.arange', (['(0)', 'B'], {}), '(0, B)\n', (4117, 4123), True, 'import numpy as np\n'), ((6515, 6530), 'numpy.arange', 'np.arange', (['(0)', 'B'], {}), '(0, B)\n', (6524, 6530), True, 'import numpy as np\n'), ((6739, 6756), 'numpy.float32', 'np.float32', (['gamma'], {}), '(gamma)\n', (6749, 6756), True, 'import numpy as np\n'), ((6757, 6766), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (6763, 6766), True, 'import numpy as np\n'), ((8400, 8417), 'numpy.log2', 'np.log2', (['num_harm'], {}), '(num_harm)\n', (8407, 8417), True, 'import numpy as np\n'), ((2578, 2596), 'numpy.log2', 'np.log2', (['(F / F_ref)'], {}), '(F / F_ref)\n', (2585, 2596), True, 'import numpy as np\n'), ((7634, 7660), 'numpy.arange', 'np.arange', (['(1)', '(num_harm + 1)'], {}), '(1, num_harm + 1)\n', (7643, 7660), True, 'import numpy as np\n'), ((8719, 8745), 'numpy.arange', 'np.arange', (['(1)', '(num_harm + 1)'], {}), '(1, num_harm + 1)\n', (8728, 8745), True, 'import numpy as np\n'), ((8643, 8661), 'numpy.log2', 'np.log2', (['harmonics'], {}), '(harmonics)\n', (8650, 8661), True, 'import numpy as np\n')] |
#a2.t2 #This program is to create a function to assess humidity
#taking advantage of python statistics library
import statistics
def get_humidity_value(humidity_value):
if statistics.median(humidity_value) < 30:
return "It is Dry"
elif statistics.median(humidity_value) > 60:
return "High Humidity"
else:
return "It's OK"
| [
"statistics.median"
] | [((176, 209), 'statistics.median', 'statistics.median', (['humidity_value'], {}), '(humidity_value)\n', (193, 209), False, 'import statistics\n'), ((252, 285), 'statistics.median', 'statistics.median', (['humidity_value'], {}), '(humidity_value)\n', (269, 285), False, 'import statistics\n')] |
import argparse
import ast
class PrintStatementParser(ast.NodeVisitor):
def __init__(self):
self.print_nodes = []
def visit_Call(self, node):
if not isinstance(node.func, ast.Name):
return
if node.func.id != 'print':
return
self.print_nodes.append(node)
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)
detected_print_nodes = False
for filename in args.filenames:
with open(filename) as f:
file_content = f.read()
try:
ast_object = ast.parse(file_content, filename=filename)
except SyntaxError:
print("Could not parse AST of {0}".format(filename))
return 1
file_lines = file_content.split("\n")
visitor = PrintStatementParser()
visitor.visit(ast_object)
for node in visitor.print_nodes:
print("{filename}:{lineno} called `print`: {source}".format(
filename=filename,
lineno=node.lineno,
source=file_lines[node.lineno - 1],
))
if visitor.print_nodes:
detected_print_nodes = True
if detected_print_nodes:
print("")
print("There were calls to `print` detected.")
print("Have you considered using the standard library logger instead?")
return 0
| [
"ast.parse",
"argparse.ArgumentParser"
] | [((359, 384), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (382, 384), False, 'import argparse\n'), ((648, 690), 'ast.parse', 'ast.parse', (['file_content'], {'filename': 'filename'}), '(file_content, filename=filename)\n', (657, 690), False, 'import ast\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Any, Optional, Tuple
import torch
import torch.distributed as dist
from colossalai.communication import all_gather, all_reduce, reduce_scatter
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
class linear_3d(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
input_: Tensor,
weight: Tensor,
bias: Optional[Tensor],
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
assert input_.shape[-1] == weight.shape[0], \
'Invalid shapes: input = {}, weight = {}.'.format(input_.shape, weight.shape)
ctx.use_bias = bias is not None
input_ = all_gather(input_, input_dim, input_parallel_mode)
input_ = torch.cat(input_, dim=input_dim)
# weight = all_gather(weight, weight_dim, weight_parallel_mode)
ctx.save_for_backward(input_, weight)
output = torch.matmul(input_, weight)
output = reduce_scatter(output, output_dim, output_parallel_mode)
if bias is not None:
# ranks_in_group = gpc.get_ranks_in_group(output_parallel_mode)
# src_rank = ranks_in_group[gpc.get_local_rank(input_parallel_mode)]
# dist.broadcast(bias,
# src=src_rank,
# group=gpc.get_group(output_parallel_mode))
# bias = all_gather(bias, -1, weight_parallel_mode)
output += bias
# ctx.src_rank = src_rank
# ctx.save_for_backward(input_, weight)
# output = torch.matmul(input_, weight)
# dist.all_reduce(output, group=gpc.get_group(output_parallel_mode))
# output += bias
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
ctx.input_dim = input_dim
ctx.weight_dim = weight_dim
ctx.output_dim = output_dim
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_, weight = ctx.saved_tensors
with torch.no_grad():
# input_grad = torch.matmul(output_grad, weight.transpose(0, 1))
# dist.all_reduce(input_grad,
# group=gpc.get_group(ctx.input_parallel_mode))
# weight_grad = torch.matmul(
# input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
# output_grad.reshape(-1, output_grad.shape[-1]))
# dist.all_reduce(weight_grad,
# group=gpc.get_group(ctx.weight_parallel_mode))
# bias_grad = torch.sum(output_grad,
# dim=tuple(
# range(len(output_grad.shape))[:-1]))
# bias_grad = reduce_scatter(bias_grad, -1,
# ctx.weight_parallel_mode)
# dist.reduce(bias_grad,
# dst=ctx.src_rank,
# group=gpc.get_group(ctx.output_parallel_mode))
# if gpc.get_local_rank(
# ctx.output_parallel_mode) != gpc.get_local_rank(
# ctx.input_parallel_mode):
# bias_grad = None
# input_ = all_gather(input_, ctx.input_dim, ctx.input_parallel_mode)
# weight = all_gather(weight, ctx.weight_dim,
# ctx.weight_parallel_mode)
output_grad = all_gather(output_grad, ctx.output_dim,
ctx.output_parallel_mode)
output_grad = torch.cat(output_grad, dim=ctx.output_dim)
input_grad = torch.matmul(output_grad, weight.transpose(0, 1))
input_grad, input_op = reduce_scatter(input_grad, ctx.input_dim,
ctx.input_parallel_mode,
async_op=True)
weight_grad = torch.matmul(
input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
output_grad.reshape(-1, output_grad.shape[-1]))
# weight_grad = torch.matmul(
# input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
# output_grad.reshape(-1, output_grad.shape[-1]))
# weight_grad = reduce_scatter(weight_grad, ctx.weight_dim,
# ctx.weight_parallel_mode)
if ctx.use_bias:
bias_grad = torch.sum(output_grad,
dim=tuple(
range(len(output_grad.shape))[:-1]))
# bias_grad =all_reduce(bias_grad, ctx.output_parallel_mode)
# dist.all_reduce(bias_grad,
# group=gpc.get_group(ctx.weight_parallel_mode))
weight_grad = torch.cat([weight_grad, torch.unsqueeze(bias_grad, dim=0)])
weight_grad, weight_op = all_reduce(weight_grad, ctx.weight_parallel_mode, async_op=True)
input_op.wait()
weight_op.wait()
if ctx.use_bias:
bias_grad = weight_grad[-1]
weight_grad = weight_grad[:-1]
return input_grad, weight_grad, bias_grad, None, None, None, None, None, None
class layer_norm_3d(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, weight: Tensor, bias: Tensor,
normalized_shape: int, eps: float,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# mean = torch.sum(input_, dim=-1)
# dist.all_reduce(mean, group=gpc.get_group(output_parallel_mode))
# mean /= normalized_shape
# mu = input_ - mean
# var = torch.sum(torch.pow(mu, 2), dim=-1)
# dist.all_reduce(var, group=gpc.get_group(output_parallel_mode))
# var /= normalized_shape
# std_dev = torch.sqrt(var + eps)
# ctx.save_for_backward(input_, mu, std_dev, weight)
# output = weight * mu / std_dev + bias
mean = all_reduce(torch.sum(input_, dim=-1, keepdim=True),
output_parallel_mode) / normalized_shape
mu = input_ - mean
var = all_reduce(torch.sum(mu**2, dim=-1, keepdim=True),
output_parallel_mode) / normalized_shape
sigma = torch.sqrt(var + eps)
# ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
# src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
# transforms = torch.stack([weight, bias]).contiguous()
# dist.broadcast(transforms,
# src=src_rank,
# group=gpc.get_group(input_parallel_mode))
# transforms = all_gather(transforms, -1, weight_parallel_mode)
# weight, bias = transforms[0], transforms[1]
ctx.save_for_backward(mu, sigma, weight)
z = mu / sigma
output = weight * z + bias
# ctx.src_rank = src_rank
ctx.normalized_shape = normalized_shape
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
mu, sigma, weight = ctx.saved_tensors
with torch.no_grad():
bias_grad, weight_grad = output_grad, output_grad * mu / sigma
grads = torch.stack([bias_grad, weight_grad]).contiguous()
grads = torch.sum(grads, dim=tuple(range(len(grads.shape))[1:-1]))
grads = all_reduce(grads, ctx.weight_parallel_mode)
grads = all_reduce(grads, ctx.input_parallel_mode)
bias_grad, weight_grad = grads[0], grads[1]
# grads = reduce_scatter(grads, -1, ctx.weight_parallel_mode)
# dist.reduce(grads,
# dst=ctx.src_rank,
# group=gpc.get_group(ctx.input_parallel_mode))
# if gpc.get_local_rank(
# ctx.input_parallel_mode) == gpc.get_local_rank(
# ctx.output_parallel_mode):
# bias_grad, weight_grad = grads[0], grads[1]
# else:
# bias_grad, weight_grad = None, None
dz = output_grad * weight
dvar = dz * mu * (-0.5) * sigma**(-3)
dvar = all_reduce(torch.sum(dvar, dim=-1, keepdim=True), ctx.output_parallel_mode)
dmean = dz * (-1 / sigma) + dvar * -2 * mu / ctx.normalized_shape
dmean = all_reduce(torch.sum(dmean, dim=-1, keepdim=True), ctx.output_parallel_mode)
input_grad = dz / sigma + dvar * 2 * mu / ctx.normalized_shape + dmean / ctx.normalized_shape
return input_grad, weight_grad, bias_grad, None, None, None, None, None
class Matmul_AB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, h/q]
ctx.save_for_backward(A, B)
assert A.shape[-1] == B.shape[0], \
'Invalid shapes: A={}, B={}.'.format(A.shape, B.shape)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ABT_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB^T`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, h/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, k/q]
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp.transpose(0, 1))
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_AB_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(output_grad, A, ctx.depth,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.C_dim,
ctx.A_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ATB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A^TB`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = 0,
output_dim: int = -1) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [m/q^2, n, h/q]
# C: [k/q, h/q^2]
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
A_temp = A_temp.reshape(-1, A.shape[-1])
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
B_temp = B_temp.reshape(-1, B.shape[-1])
C = torch.matmul(A_temp.transpose(0, 1), B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(B, output_grad, ctx.depth,
ctx.B_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.B_dim,
ctx.C_dim, ctx.A_dim)
B_grad = Matmul_AB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Add_3D(torch.autograd.Function):
"""Matrix add bias: :math:`C = A + b`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
out = input_ + bias_temp
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
# [h/q]
grad = torch.sum(output_grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return output_grad, bias_grad, None, None, None, None
class Mul_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A * b`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
# [h/q^2]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
# empty_cache()
ctx.save_for_backward(input_, bias_temp)
out = torch.mul(input_, bias_temp)
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
input_, bias = ctx.saved_tensors
# [m/q^2, n, h/q]
input_grad = torch.mul(output_grad, bias)
# [h/q]
grad = torch.mul(output_grad, input_)
grad = torch.sum(grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return input_grad, bias_grad, None, None, None, None
class Sum_3D(torch.autograd.Function):
"""Compute the sum of input tensors
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
input_: Tensor,
dim: int,
depth: int,
parallel_mode: ParallelMode,
keepdim: bool = False) -> Tensor:
# input: [m/q^2, n, h/q]
out = torch.sum(input_, dim=dim, keepdim=keepdim)
dist.all_reduce(out, group=gpc.get_group(parallel_mode))
ctx.input_shape = input_.shape
ctx.depth = depth
ctx.group = parallel_mode
ctx.dim = dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
with torch.no_grad():
output_grad = output_grad.contiguous()
dist.all_reduce(output_grad, group=gpc.get_group(ctx.group))
if len(output_grad.shape) < len(ctx.input_shape):
output_grad = torch.unsqueeze(output_grad, ctx.dim)
dims = [1 for _ in range(len(output_grad.shape))]
dims[ctx.dim] = ctx.input_shape[ctx.dim]
input_grad = output_grad.repeat(tuple(dims))
return input_grad, None, None, None, None, None
class Reduce_3D(torch.autograd.Function):
"""Reduce input tensors
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, depth: int,
parallel_mode: ParallelMode) -> Tensor:
dist.all_reduce(input_, group=gpc.get_group(parallel_mode))
return input_.clone()
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
return output_grad, None, None
# class Slice_3D(torch.autograd.Function):
# """Slice input tensor
# """
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx: Any, input_: Tensor, dim: int, depth: int,
# parallel_mode: ParallelMode) -> Tensor:
# rank = gpc.get_local_rank(parallel_mode)
# out = torch.chunk(input_, depth, dim=dim)[rank].contiguous()
# ctx.depth = depth
# ctx.parallel_mode = parallel_mode
# ctx.dim = dim
# ctx.input_shape = input_.shape
# return out
# @staticmethod
# @custom_bwd
# def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# with torch.no_grad():
# input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
# input_grad.reshape(ctx.input_shape)
# return input_grad, None, None, None
| [
"torch.cuda.amp.custom_fwd",
"torch.mul",
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.all_gather",
"torch.unsqueeze",
"colossalai.communication.reduce_scatter",
"torch.stack",
"torch.sqrt",
"colossalai.core.global_context.get_group",
"torch.matmul",
"torch.sum",
... | [((462, 499), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (472, 499), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((5920, 5957), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (5930, 5957), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((9720, 9757), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (9730, 9757), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((11983, 12020), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (11993, 12020), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((14145, 14182), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (14155, 14182), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((16390, 16427), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (16400, 16427), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((18313, 18350), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (18323, 18350), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((20498, 20535), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (20508, 20535), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((21761, 21798), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (21771, 21798), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((1110, 1160), 'colossalai.communication.all_gather', 'all_gather', (['input_', 'input_dim', 'input_parallel_mode'], {}), '(input_, input_dim, input_parallel_mode)\n', (1120, 1160), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((1178, 1210), 'torch.cat', 'torch.cat', (['input_'], {'dim': 'input_dim'}), '(input_, dim=input_dim)\n', (1187, 1210), False, 'import torch\n'), ((1347, 1375), 'torch.matmul', 'torch.matmul', (['input_', 'weight'], {}), '(input_, weight)\n', (1359, 1375), False, 'import torch\n'), ((1393, 1449), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['output', 'output_dim', 'output_parallel_mode'], {}), '(output, output_dim, output_parallel_mode)\n', (1407, 1449), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((7051, 7072), 'torch.sqrt', 'torch.sqrt', (['(var + eps)'], {}), '(var + eps)\n', (7061, 7072), False, 'import torch\n'), ((10393, 10438), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (10403, 10438), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((10456, 10503), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (10466, 10503), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((10517, 10545), 'torch.matmul', 'torch.matmul', (['A_temp', 'B_temp'], {}), '(A_temp, B_temp)\n', (10529, 10545), False, 'import torch\n'), ((10560, 10611), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (10574, 10611), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12544, 12589), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (12554, 12589), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12607, 12654), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (12617, 12654), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12727, 12778), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (12741, 12778), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14706, 14751), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (14716, 14751), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14818, 14865), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (14828, 14865), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14987, 15038), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (15001, 15038), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((16744, 16787), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (16766, 16787), True, 'from colossalai.core import global_context as gpc\n'), ((17069, 17116), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (17079, 17116), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((18667, 18710), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (18689, 18710), True, 'from colossalai.core import global_context as gpc\n'), ((19010, 19057), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (19020, 19057), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((19147, 19175), 'torch.mul', 'torch.mul', (['input_', 'bias_temp'], {}), '(input_, bias_temp)\n', (19156, 19175), False, 'import torch\n'), ((20790, 20833), 'torch.sum', 'torch.sum', (['input_'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(input_, dim=dim, keepdim=keepdim)\n', (20799, 20833), False, 'import torch\n'), ((2572, 2587), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2585, 2587), False, 'import torch\n'), ((4006, 4071), 'colossalai.communication.all_gather', 'all_gather', (['output_grad', 'ctx.output_dim', 'ctx.output_parallel_mode'], {}), '(output_grad, ctx.output_dim, ctx.output_parallel_mode)\n', (4016, 4071), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((4135, 4177), 'torch.cat', 'torch.cat', (['output_grad'], {'dim': 'ctx.output_dim'}), '(output_grad, dim=ctx.output_dim)\n', (4144, 4177), False, 'import torch\n'), ((4302, 4388), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['input_grad', 'ctx.input_dim', 'ctx.input_parallel_mode'], {'async_op': '(True)'}), '(input_grad, ctx.input_dim, ctx.input_parallel_mode, async_op\n =True)\n', (4316, 4388), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((5519, 5583), 'colossalai.communication.all_reduce', 'all_reduce', (['weight_grad', 'ctx.weight_parallel_mode'], {'async_op': '(True)'}), '(weight_grad, ctx.weight_parallel_mode, async_op=True)\n', (5529, 5583), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((8102, 8117), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8115, 8117), False, 'import torch\n'), ((8364, 8407), 'colossalai.communication.all_reduce', 'all_reduce', (['grads', 'ctx.weight_parallel_mode'], {}), '(grads, ctx.weight_parallel_mode)\n', (8374, 8407), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((8428, 8470), 'colossalai.communication.all_reduce', 'all_reduce', (['grads', 'ctx.input_parallel_mode'], {}), '(grads, ctx.input_parallel_mode)\n', (8438, 8470), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((11073, 11088), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11086, 11088), False, 'import torch\n'), ((13240, 13255), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13253, 13255), False, 'import torch\n'), ((15500, 15515), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15513, 15515), False, 'import torch\n'), ((16822, 16862), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (16840, 16862), True, 'from colossalai.core import global_context as gpc\n'), ((17558, 17573), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17571, 17573), False, 'import torch\n'), ((17737, 17788), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (17751, 17788), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((18745, 18785), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (18763, 18785), True, 'from colossalai.core import global_context as gpc\n'), ((19583, 19598), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19596, 19598), False, 'import torch\n'), ((19700, 19728), 'torch.mul', 'torch.mul', (['output_grad', 'bias'], {}), '(output_grad, bias)\n', (19709, 19728), False, 'import torch\n'), ((19768, 19798), 'torch.mul', 'torch.mul', (['output_grad', 'input_'], {}), '(output_grad, input_)\n', (19777, 19798), False, 'import torch\n'), ((19934, 19985), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (19948, 19985), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((21159, 21174), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21172, 21174), False, 'import torch\n'), ((6768, 6807), 'torch.sum', 'torch.sum', (['input_'], {'dim': '(-1)', 'keepdim': '(True)'}), '(input_, dim=-1, keepdim=True)\n', (6777, 6807), False, 'import torch\n'), ((6928, 6968), 'torch.sum', 'torch.sum', (['(mu ** 2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(mu ** 2, dim=-1, keepdim=True)\n', (6937, 6968), False, 'import torch\n'), ((9166, 9203), 'torch.sum', 'torch.sum', (['dvar'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dvar, dim=-1, keepdim=True)\n', (9175, 9203), False, 'import torch\n'), ((9340, 9378), 'torch.sum', 'torch.sum', (['dmean'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dmean, dim=-1, keepdim=True)\n', (9349, 9378), False, 'import torch\n'), ((16997, 17031), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (17010, 17031), True, 'from colossalai.core import global_context as gpc\n'), ((17953, 17998), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (17971, 17998), True, 'from colossalai.core import global_context as gpc\n'), ((18023, 18068), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (18041, 18068), True, 'from colossalai.core import global_context as gpc\n'), ((18938, 18972), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (18951, 18972), True, 'from colossalai.core import global_context as gpc\n'), ((20150, 20195), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (20168, 20195), True, 'from colossalai.core import global_context as gpc\n'), ((20220, 20265), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (20238, 20265), True, 'from colossalai.core import global_context as gpc\n'), ((20869, 20897), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (20882, 20897), True, 'from colossalai.core import global_context as gpc\n'), ((21392, 21429), 'torch.unsqueeze', 'torch.unsqueeze', (['output_grad', 'ctx.dim'], {}), '(output_grad, ctx.dim)\n', (21407, 21429), False, 'import torch\n'), ((21947, 21975), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (21960, 21975), True, 'from colossalai.core import global_context as gpc\n'), ((8214, 8251), 'torch.stack', 'torch.stack', (['[bias_grad, weight_grad]'], {}), '([bias_grad, weight_grad])\n', (8225, 8251), False, 'import torch\n'), ((17896, 17936), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (17909, 17936), True, 'from colossalai.core import global_context as gpc\n'), ((20093, 20133), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (20106, 20133), True, 'from colossalai.core import global_context as gpc\n'), ((21274, 21298), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.group'], {}), '(ctx.group)\n', (21287, 21298), True, 'from colossalai.core import global_context as gpc\n'), ((5445, 5478), 'torch.unsqueeze', 'torch.unsqueeze', (['bias_grad'], {'dim': '(0)'}), '(bias_grad, dim=0)\n', (5460, 5478), False, 'import torch\n')] |
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils.timezone import now
from ordered_model.models import OrderedModel
User = get_user_model()
class Habit(OrderedModel):
name = models.CharField(max_length=255)
days = ArrayField(base_field=models.IntegerField())
morning = models.BooleanField()
reminder_time = models.CharField(max_length=10, blank=True, default='')
active = models.BooleanField(default=True)
streaks = models.BooleanField(default=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
order_with_respect_to = 'user'
class Meta(OrderedModel.Meta):
pass
def __str__(self):
return f'{self.name} on {",".join(map(str, self.days))}'
class Task(models.Model):
name = models.CharField(max_length=255)
completed = models.BooleanField(default=False)
date = models.DateField(default=now)
order = models.IntegerField()
todoist_id = models.BigIntegerField()
habit = models.ForeignKey(Habit, on_delete=models.CASCADE, null=True)
class Meta:
ordering = ['-date', 'order']
def __str__(self):
return f'{self.name} on {self.date}'
| [
"django.contrib.auth.get_user_model",
"django.db.models.DateField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.db.models.BigIntegerField",
"django.db.models.CharField"
] | [((222, 238), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (236, 238), False, 'from django.contrib.auth import get_user_model\n'), ((279, 311), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (295, 311), False, 'from django.db import models\n'), ((382, 403), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (401, 403), False, 'from django.db import models\n'), ((424, 479), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)', 'default': '""""""'}), "(max_length=10, blank=True, default='')\n", (440, 479), False, 'from django.db import models\n'), ((493, 526), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (512, 526), False, 'from django.db import models\n'), ((541, 574), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (560, 574), False, 'from django.db import models\n'), ((586, 635), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (603, 635), False, 'from django.db import models\n'), ((848, 880), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (864, 880), False, 'from django.db import models\n'), ((897, 931), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (916, 931), False, 'from django.db import models\n'), ((943, 972), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'now'}), '(default=now)\n', (959, 972), False, 'from django.db import models\n'), ((985, 1006), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1004, 1006), False, 'from django.db import models\n'), ((1024, 1048), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (1046, 1048), False, 'from django.db import models\n'), ((1061, 1122), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Habit'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(Habit, on_delete=models.CASCADE, null=True)\n', (1078, 1122), False, 'from django.db import models\n'), ((345, 366), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (364, 366), False, 'from django.db import models\n')] |
from pandas import Series
from igraph import *
from numba import jit
import numpy as np
import os
# import time
# Gather all the files.
files = os.listdir('timeseries/')
# Concatenate (or stack) all the files.
# Approx 12.454981 seconds
i = 0
for f in files:
if i == 0:
ts_matrix = np.loadtxt('timeseries/' + f).T
i += 1
else:
new_ts = np.loadtxt('timeseries/' + f).T
ts_matrix = np.hstack((ts_matrix, new_ts))
"""
Compute the correlation matrix
"""
corr_mat = np.corrcoef(ts_matrix.T)
# Save in .npz file
# np.savez_compressed('corr_mat.npz', corr_mat=corr_mat)
# X = np.load('corr_mat.npz')
# X = X['corr_mat']
# a flatten function optimized by numba
@jit
def fast_flatten(X):
k = 0
length = X.shape[0] * X.shape[1]
X_flat = empty(length)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
X_flat[k] = X[i, j]
k += 1
return X_flat
# helper function that returns the min of the number of
# unique values depending on the threshold
def min_thresh_val(X, threshold):
X_flat = fast_flatten(X)
index = int(len(X_flat) * threshold)
return unique(sort(X_flat))[::-1][:index].min()
# Computes the threshold matrix without killing the python kernel
@jit
def thresh_mat(X, threshold):
min_val = min_thresh_val(X, threshold)
print("Done with min_thresh_val")
# M = zeros((X.shape[0], X.shape[1]))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
# if X[i, j] >= min_val:
# M[i, j] = X[i, j]
if X[i, j] < min_val:
X[i, j] = 0
thresh_mat(X, .01)
print("Finished Threshold Matrix")
# savez_compressed('threshold_mat.npz', threshold_mat=X)
# from: http://stackoverflow.com/questions/29655111/igraph-graph-from-numpy-or-pandas-adjacency-matrix
# get the row, col indices of the non-zero elements in your adjacency matrix
conn_indices = np.where(X)
# get the weights corresponding to these indices
weights = X[conn_indices]
# a sequence of (i, j) tuples, each corresponding to an edge from i -> j
edges = zip(*conn_indices)
# initialize the graph from the edge sequence
G = Graph(edges=edges, directed=False)
# assign node names and weights to be attributes of the vertices and edges
# respectively
G.vs['label'] = np.arange(X.shape[0])
G.es['weight'] = weights
# get the vertex clustering corresponding to the best modularity
cm = G.community_multilevel()
# save the cluster membership of each node in a csv file
Series(cm.membership).to_csv('mem.csv', index=False)
| [
"pandas.Series",
"os.listdir",
"numpy.hstack",
"numpy.where",
"numpy.corrcoef",
"numpy.loadtxt",
"numpy.arange"
] | [((145, 170), 'os.listdir', 'os.listdir', (['"""timeseries/"""'], {}), "('timeseries/')\n", (155, 170), False, 'import os\n'), ((504, 528), 'numpy.corrcoef', 'np.corrcoef', (['ts_matrix.T'], {}), '(ts_matrix.T)\n', (515, 528), True, 'import numpy as np\n'), ((1929, 1940), 'numpy.where', 'np.where', (['X'], {}), '(X)\n', (1937, 1940), True, 'import numpy as np\n'), ((2311, 2332), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (2320, 2332), True, 'import numpy as np\n'), ((422, 452), 'numpy.hstack', 'np.hstack', (['(ts_matrix, new_ts)'], {}), '((ts_matrix, new_ts))\n', (431, 452), True, 'import numpy as np\n'), ((2512, 2533), 'pandas.Series', 'Series', (['cm.membership'], {}), '(cm.membership)\n', (2518, 2533), False, 'from pandas import Series\n'), ((296, 325), 'numpy.loadtxt', 'np.loadtxt', (["('timeseries/' + f)"], {}), "('timeseries/' + f)\n", (306, 325), True, 'import numpy as np\n'), ((370, 399), 'numpy.loadtxt', 'np.loadtxt', (["('timeseries/' + f)"], {}), "('timeseries/' + f)\n", (380, 399), True, 'import numpy as np\n')] |
import tensorflow as tf
from tensorflow import keras
def build_mlp(
mlp_input,
output_size,
n_layers,
size,
output_activation = None):
x = mlp_input
for _ in range(n_layers) :
x = keras.layers.Dense(units = size, activation = 'relu')(x)
return keras.layers.Dense(units = output_size, activation = output_activation)(x) | [
"tensorflow.keras.layers.Dense"
] | [((286, 353), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': 'output_size', 'activation': 'output_activation'}), '(units=output_size, activation=output_activation)\n', (304, 353), False, 'from tensorflow import keras\n'), ((218, 267), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': 'size', 'activation': '"""relu"""'}), "(units=size, activation='relu')\n", (236, 267), False, 'from tensorflow import keras\n')] |
import numpy as np
# Part 1
data = np.loadtxt('data.csv')
def get_number_of_times_count_increased(data):
increased_counter = 0
for i in range(len(data)-1):
if data[i+1]>data[i]:
increased_counter +=1
return increased_counter
data = np.loadtxt('data.csv')
increased_counter = get_number_of_times_count_increased(data)
print(f'{increased_counter} of times is the number larger than before')
# Part II
window_size = 3
window_sums = []
for i in range(len(data) - window_size + 1):
print(data[i: i + window_size])
window_sum = np.sum(data[i: i+window_size])
print(window_sum)
window_sums.append(window_sum)
increased_counter_window = get_number_of_times_count_increased(window_sums)
print(f'{increased_counter_window} of times is the number larger than before')
| [
"numpy.sum",
"numpy.loadtxt"
] | [((37, 59), 'numpy.loadtxt', 'np.loadtxt', (['"""data.csv"""'], {}), "('data.csv')\n", (47, 59), True, 'import numpy as np\n'), ((268, 290), 'numpy.loadtxt', 'np.loadtxt', (['"""data.csv"""'], {}), "('data.csv')\n", (278, 290), True, 'import numpy as np\n'), ((568, 599), 'numpy.sum', 'np.sum', (['data[i:i + window_size]'], {}), '(data[i:i + window_size])\n', (574, 599), True, 'import numpy as np\n')] |
from flask import render_template, request, redirect, url_for
from flask_login import login_user, logout_user
from application import app
from application.auth.models import User
from application.auth.forms import LoginForm
@app.route("/auth/login", methods = ["GET", "POST"])
def auth_login():
if request.method == "GET":
return render_template("auth/loginform.html", form = LoginForm())
form = LoginForm(request.form)
# mahdolliset validoinnit
user = User.query.filter_by(username=form.username.data, password=form.password.data).first()
if not user:
return render_template("auth/loginform.html", form = form,
error = "No such username or password")
login_user(user)
return redirect(url_for("index"))
@app.route("/auth/logout")
def auth_logout():
logout_user()
return redirect(url_for("index")) | [
"flask.render_template",
"flask_login.login_user",
"flask_login.logout_user",
"application.auth.forms.LoginForm",
"flask.url_for",
"application.app.route",
"application.auth.models.User.query.filter_by"
] | [((229, 278), 'application.app.route', 'app.route', (['"""/auth/login"""'], {'methods': "['GET', 'POST']"}), "('/auth/login', methods=['GET', 'POST'])\n", (238, 278), False, 'from application import app\n'), ((792, 817), 'application.app.route', 'app.route', (['"""/auth/logout"""'], {}), "('/auth/logout')\n", (801, 817), False, 'from application import app\n'), ((417, 440), 'application.auth.forms.LoginForm', 'LoginForm', (['request.form'], {}), '(request.form)\n', (426, 440), False, 'from application.auth.forms import LoginForm\n'), ((731, 747), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (741, 747), False, 'from flask_login import login_user, logout_user\n'), ((841, 854), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (852, 854), False, 'from flask_login import login_user, logout_user\n'), ((602, 694), 'flask.render_template', 'render_template', (['"""auth/loginform.html"""'], {'form': 'form', 'error': '"""No such username or password"""'}), "('auth/loginform.html', form=form, error=\n 'No such username or password')\n", (617, 694), False, 'from flask import render_template, request, redirect, url_for\n'), ((768, 784), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (775, 784), False, 'from flask import render_template, request, redirect, url_for\n'), ((875, 891), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (882, 891), False, 'from flask import render_template, request, redirect, url_for\n'), ((483, 561), 'application.auth.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'form.username.data', 'password': 'form.password.data'}), '(username=form.username.data, password=form.password.data)\n', (503, 561), False, 'from application.auth.models import User\n'), ((392, 403), 'application.auth.forms.LoginForm', 'LoginForm', ([], {}), '()\n', (401, 403), False, 'from application.auth.forms import LoginForm\n')] |
# Copyright (c) 2019 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import logging
import cv2
import numpy as np
import time
from datetime import datetime
from random import uniform
"""Visual trigger for PCB anomaly detection.
"""
class Udf:
"""PCB anomaly detection trigger object.
"""
def __init__(self, n_right_px, n_left_px,
n_total_px, training_mode, scale_ratio):
"""Udf constructor
:param n_right_px: minimum number of pixels
to the right of PCB mask (default 1000)
:type n_right_px: int
:param n_left_px: minimum number of pixels
to the left of the PCB mask (default 1000)
:type n_left_px: int
:param n_total_px: minimum number of pixels
in the PCB mask (default 300000)
:type n_total_px: int
:param training_mode: flag to save image ROI's
for training (default false)
:type training_mode: bool
"""
self.log = logging.getLogger('PCB_FILTER')
self.log.debug("In ctor")
self.ratio = scale_ratio
# Initialize background subtractor
self.fgbg = cv2.createBackgroundSubtractorMOG2()
# Total white pixel # on MOG applied
# frame after morphological operations
self.n_total_px = n_total_px/(self.ratio*self.ratio)
# Total white pixel # on left edge of MOG
# applied frame after morphological operations
self.n_left_px = n_left_px/(self.ratio*self.ratio)
# Total white pixel # on right edge of MOG
# applied frame after morphological operations
self.n_right_px = n_right_px/(self.ratio*self.ratio)
# Flag to lock trigger from forwarding frames to classifier
self.filter_lock = False
self.training_mode = training_mode
self.profiling = False
self.count = 0
self.lock_frame_count = 0
self.threads = 0
def _check_frame(self, frame):
"""Determines if the given frame is the key frame of interest for
further processing or not
:param frame: frame blob
:type frame: numpy.ndarray
:return: True if the given frame is a key frame, else False
:rtype: bool
"""
# Apply background subtractor on frame
fgmask = self.fgbg.apply(frame)
rows, columns = fgmask.shape
if self.filter_lock is False:
# Applying morphological operations
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
ret, thresh = cv2.threshold(fgmask, 0, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# Crop left and right edges of frame
left = thresh[:, 0:10]
right = thresh[:, (columns - 10):(columns)]
# Count the # of white pixels in thresh
n_total = np.sum(thresh == 255)
n_left = np.sum(left == 255)
n_right = np.sum(right == 255)
# If the PCB is in view of camera & is not
# touching the left, right edge of frame
if (n_total > self.n_total_px) & \
(n_left < self.n_left_px) & \
(n_right < self.n_right_px):
# Find the PCB contour
contours, hier = cv2.findContours(thresh.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
if len(contours) != 0:
# Contour with largest area would be bounding the PCB
c = max(contours, key=cv2.contourArea)
# Obtain the bounding rectangle
# for the contour and calculate the center
x, y, w, h = cv2.boundingRect(c)
cX = int(x + (w / 2))
# If the rectangle bounding the
# PCB doesn't touch the left or right edge
# of frame and the center x lies within
if (x != 0) & ((x + w) != columns) & \
((columns/2 - (100/self.ratio)) <= cX and
cX <= (columns/2 + (100/self.ratio))):
return True
else:
return False
return False
def process(self, frame, metadata):
"""Processes every frame it receives based on the filter logic used
:param frame: frame blob
:type frame: numpy.ndarray
:param metadata: frame's metadata
:type metadata: str
:return: (should the frame be dropped, has the frame been updated,
new metadata for the frame if any)
:rtype: (bool, numpy.ndarray, str)
"""
frame_height, frame_width = frame.shape[:-1]
resized_frame = cv2.resize(frame, (int(frame_width/self.ratio),
int(frame_height/self.ratio)))
if self.training_mode is True:
self.count = self.count + 1
cv2.imwrite("/EII/test_videos/"+str(self.count)+".png", frame)
return True, None, None
else:
if self.filter_lock is False:
if self._check_frame(resized_frame):
self.filter_lock = True
# Re-initialize frame count during trigger lock to 0
self.lock_frame_count = 0
return False, None, metadata
else:
return True, None, None
else:
# Continue applying background subtractor to
# keep track of PCB positions
self._check_frame(resized_frame)
# Increment frame count during trigger lock phase
self.lock_frame_count = self.lock_frame_count + 1
if self.lock_frame_count == 7:
# Clear trigger lock after timeout
# period (measured in frame count here)
self.filter_lock = False
return True, None, None
| [
"logging.getLogger",
"cv2.createBackgroundSubtractorMOG2",
"cv2.threshold",
"cv2.morphologyEx",
"numpy.sum",
"cv2.getStructuringElement",
"cv2.boundingRect"
] | [((2080, 2111), 'logging.getLogger', 'logging.getLogger', (['"""PCB_FILTER"""'], {}), "('PCB_FILTER')\n", (2097, 2111), False, 'import logging\n'), ((2242, 2278), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (2276, 2278), False, 'import cv2\n'), ((3565, 3616), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(20, 20)'], {}), '(cv2.MORPH_RECT, (20, 20))\n', (3590, 3616), False, 'import cv2\n'), ((3643, 3709), 'cv2.threshold', 'cv2.threshold', (['fgmask', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(fgmask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3656, 3709), False, 'import cv2\n'), ((3769, 3818), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(thresh, cv2.MORPH_CLOSE, kernel)\n', (3785, 3818), False, 'import cv2\n'), ((4034, 4055), 'numpy.sum', 'np.sum', (['(thresh == 255)'], {}), '(thresh == 255)\n', (4040, 4055), True, 'import numpy as np\n'), ((4077, 4096), 'numpy.sum', 'np.sum', (['(left == 255)'], {}), '(left == 255)\n', (4083, 4096), True, 'import numpy as np\n'), ((4119, 4139), 'numpy.sum', 'np.sum', (['(right == 255)'], {}), '(right == 255)\n', (4125, 4139), True, 'import numpy as np\n'), ((4957, 4976), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (4973, 4976), False, 'import cv2\n')] |
import requests
import base64
from flask import Flask, request
from flask_restful import Resource, Api
from web import common
app = Flask(__name__)
api = Api(app)
class PyOpenOcrBase64(Resource):
def post(self):
decoded = base64.decodebytes(request.data)
return common.data_to_text(decoded, request.args)
class PyOpenOcrURL(Resource):
def post(self):
url = request.data.decode("utf-8")
data = requests.get(url, allow_redirects=True, verify=False)
return common.data_to_text(data.content, request.args)
api.add_resource(PyOpenOcrBase64, '/ocr-base64')
api.add_resource(PyOpenOcrURL, '/ocr-url')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8443, ssl_context='adhoc') | [
"flask_restful.Api",
"flask.Flask",
"flask.request.data.decode",
"requests.get",
"web.common.data_to_text",
"base64.decodebytes"
] | [((136, 151), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (141, 151), False, 'from flask import Flask, request\n'), ((158, 166), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (161, 166), False, 'from flask_restful import Resource, Api\n'), ((240, 272), 'base64.decodebytes', 'base64.decodebytes', (['request.data'], {}), '(request.data)\n', (258, 272), False, 'import base64\n'), ((288, 330), 'web.common.data_to_text', 'common.data_to_text', (['decoded', 'request.args'], {}), '(decoded, request.args)\n', (307, 330), False, 'from web import common\n'), ((398, 426), 'flask.request.data.decode', 'request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (417, 426), False, 'from flask import Flask, request\n'), ((442, 495), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(True)', 'verify': '(False)'}), '(url, allow_redirects=True, verify=False)\n', (454, 495), False, 'import requests\n'), ((511, 558), 'web.common.data_to_text', 'common.data_to_text', (['data.content', 'request.args'], {}), '(data.content, request.args)\n', (530, 558), False, 'from web import common\n')] |
import numpy as np
from scipy.sparse import diags
from sklearn.metrics import pairwise_distances
from fclsp.reshaping_utils import vec_hollow_sym
def get_lap_coef(V, w, var_type, shape):
"""
Computes the Laplacian coefficent vector
TODO: finish documenting
Parameters
----------
V: array-like
w: array-like
var_type: str
Type of the variable. Must be one of ['hollow_sym', 'rect', 'multi'].
shape: tuple of ints
Shape of the variable.
Output
------
lap_coef:
"""
assert var_type in ['hollow_sym', 'rect', 'multi']
if var_type == 'hollow_sym':
return get_lap_coef_hollow_sym(V=V, w=w)
elif var_type == 'rect':
return get_lap_coef_rect(V=V, w=w, shape=shape)
elif var_type == 'multi':
return get_lap_coef_multi(V=V, w=w, shape=shape)
def get_lap_coef_hollow_sym(V, w):
"""
Returns the Laplacian coefficent for an adjaceny matrix.
Let A(x) in R^{d x d} be an adjacency matrix parametatized by its edges x in R^{d choose 2}. Also let V in R^{d times K} and w in R^K for K <= d.
The laplacian coefficient M(V, w) in R^{d choose 2} is the vector such that
M(V, w)^T x = Tr(V^T Laplacian(A(x)) V diag(w))
Parameters
----------
V: array-like, (n_nodes, K)
The input matrix.
w: array-like, (K, )
The input vector.
Output
-------
M(V, w): array-like, (n_nodes choose 2, )
The Laplacian coefficient vector.
"""
assert V.shape[1] == len(w)
coef = pairwise_distances(V @ diags(np.sqrt(w)), metric='euclidean',
n_jobs=None) # TODO: give option
coef = vec_hollow_sym(coef) ** 2
return coef
def get_lap_coef_rect(V, w, shape):
"""
Returns the Laplacian coefficent for a rectuangular matrix.
Parameters
----------
V: array-like, (n_nodes, K)
The input matrix.
w: array-like, (K, )
The input vector.
shape: tuple of two ints
Size of the rectangular matrix matrix.
Output
-------
M(V, w): array-like, (sum(shape), )
The Laplacian coefficient vector.
"""
raise NotImplementedError
def get_lap_coef_multi(V, w, shape):
"""
Returns the Laplacian coefficent for a multi-array.
Parameters
----------
V: array-like, (n_nodes, K)
The input matrix.
w: array-like, (K, )
The input vector.
shape: tuple of two ints
Size of the rectangular matrix matrix.
Output
-------
M(V, w): array-like
The Laplacian coefficient vector.
"""
raise NotImplementedError
| [
"fclsp.reshaping_utils.vec_hollow_sym",
"numpy.sqrt"
] | [((1688, 1708), 'fclsp.reshaping_utils.vec_hollow_sym', 'vec_hollow_sym', (['coef'], {}), '(coef)\n', (1702, 1708), False, 'from fclsp.reshaping_utils import vec_hollow_sym\n'), ((1579, 1589), 'numpy.sqrt', 'np.sqrt', (['w'], {}), '(w)\n', (1586, 1589), True, 'import numpy as np\n')] |
#
#
import sys, bisect
from file_ops import (
read_sb_solution_wordlist,
read_sb_wordlist,
read_found_wordlist,
write_sb_wordlist,
)
from utils import add_words, remove_words, get_solve_date
def main(days_ago=1):
# First, determine the solve date in iso format
iso_solve_date = get_solve_date(days_ago)
# Then, insert words from the solution file to the sb_wordlist
sb_wordlist, sb_wordlist_len = read_sb_wordlist()
sb_solutions = read_sb_solution_wordlist(iso_solve_date)
sb_wordlist, added_words, num_of_added_words = add_words(sb_wordlist, sb_solutions)
print("\nNum of added words:\t\t\t", num_of_added_words)
print("\nAdded words")
print("===========")
print(added_words)
# Save the sb_wordlist containing the added words
print("\nNumber of words before additions:\t", sb_wordlist_len)
sb_wordlist_len_after_adding = write_sb_wordlist(sb_wordlist)
print("\nNumber of words after additions:\t", sb_wordlist_len_after_adding)
print(
"\nNumber of words added:\t",
sb_wordlist_len_after_adding - sb_wordlist_len,
"\n",
)
# Second, remove words from the sb_wordlist that were found but not included
# in the sb_solutions
try:
found_words = read_found_wordlist(iso_solve_date)
except FileNotFoundError:
return
words_to_remove = []
for word in found_words:
if word not in sb_solutions:
words_to_remove.append(word)
sb_wordlist, removed_words, num_of_removed_words = remove_words(
sb_wordlist, words_to_remove
)
print("\nNumber of removed words:\t\t", num_of_removed_words, "\n")
print("Removed Words")
print("=============")
print(removed_words)
sb_wordlist_len_after_removal = write_sb_wordlist(sb_wordlist)
print("\nNumber of words after removal:\t\t", sb_wordlist_len_after_removal)
net_change_wordlist = sb_wordlist_len_after_removal - sb_wordlist_len
if net_change_wordlist < 0:
change_text = "removed"
else:
change_text = "added"
txt = (
"\nNet number of words " + change_text + ":\t\t" + str(abs(net_change_wordlist))
)
print(txt)
if __name__ == "__main__":
if len(sys.argv) > 1:
try:
main(sys.argv[1])
except:
print("\nError: incorrect argument for days_ago.\n")
else:
main(1)
# print("argv:", sys.argv[1])
| [
"file_ops.read_sb_solution_wordlist",
"file_ops.write_sb_wordlist",
"utils.add_words",
"file_ops.read_sb_wordlist",
"utils.remove_words",
"utils.get_solve_date",
"file_ops.read_found_wordlist"
] | [((304, 328), 'utils.get_solve_date', 'get_solve_date', (['days_ago'], {}), '(days_ago)\n', (318, 328), False, 'from utils import add_words, remove_words, get_solve_date\n'), ((432, 450), 'file_ops.read_sb_wordlist', 'read_sb_wordlist', ([], {}), '()\n', (448, 450), False, 'from file_ops import read_sb_solution_wordlist, read_sb_wordlist, read_found_wordlist, write_sb_wordlist\n'), ((471, 512), 'file_ops.read_sb_solution_wordlist', 'read_sb_solution_wordlist', (['iso_solve_date'], {}), '(iso_solve_date)\n', (496, 512), False, 'from file_ops import read_sb_solution_wordlist, read_sb_wordlist, read_found_wordlist, write_sb_wordlist\n'), ((565, 601), 'utils.add_words', 'add_words', (['sb_wordlist', 'sb_solutions'], {}), '(sb_wordlist, sb_solutions)\n', (574, 601), False, 'from utils import add_words, remove_words, get_solve_date\n'), ((896, 926), 'file_ops.write_sb_wordlist', 'write_sb_wordlist', (['sb_wordlist'], {}), '(sb_wordlist)\n', (913, 926), False, 'from file_ops import read_sb_solution_wordlist, read_sb_wordlist, read_found_wordlist, write_sb_wordlist\n'), ((1542, 1584), 'utils.remove_words', 'remove_words', (['sb_wordlist', 'words_to_remove'], {}), '(sb_wordlist, words_to_remove)\n', (1554, 1584), False, 'from utils import add_words, remove_words, get_solve_date\n'), ((1787, 1817), 'file_ops.write_sb_wordlist', 'write_sb_wordlist', (['sb_wordlist'], {}), '(sb_wordlist)\n', (1804, 1817), False, 'from file_ops import read_sb_solution_wordlist, read_sb_wordlist, read_found_wordlist, write_sb_wordlist\n'), ((1272, 1307), 'file_ops.read_found_wordlist', 'read_found_wordlist', (['iso_solve_date'], {}), '(iso_solve_date)\n', (1291, 1307), False, 'from file_ops import read_sb_solution_wordlist, read_sb_wordlist, read_found_wordlist, write_sb_wordlist\n')] |
# See ciDifference.ipynb for derivation, implementation notes, and test
def cidifference(datagen, umin, umax, wmin, wmax, alpha=0.05,
rmin=0, rmax=1, raiseonerr=False):
import numpy as np
from cvxopt import solvers, matrix
from math import log, exp
from scipy.stats import f
from .estimatediff import estimatediff
assert umin >= 0
assert umin < 1
assert umax > 1
assert wmin >= 0
assert wmin < 1
assert wmax > 1
assert rmax >= rmin
_, mle = estimatediff(datagen, umin, umax, wmin, wmax, rmin, rmax, raiseonerr=raiseonerr)
num = mle['num']
Delta = 0.5 * f.isf(q=alpha, dfn=1, dfd=num-1)
phi = Delta - mle['primal']
rscale = max(1.0, rmax - rmin)
def dualobjective(p, sign):
beta, gamma, tau = p
logcost = -phi
n = 0
for c, u, w, r in datagen():
if c > 0:
n += c
denom = beta + gamma * u + tau * w + sign * (u - w) * r
logcost += c * log(denom)
logcost /= n
cost = exp(logcost)
return (- beta - gamma - tau + n * cost) / rscale
def jacdualobjective(p, sign):
beta, gamma, tau = p
logcost = -phi
jaclogcost = np.zeros(3)
n = 0
for c, u, w, r in datagen():
if c > 0:
n += c
denom = beta + gamma * u + tau * w + sign * (u - w) * r
logcost += c * log(denom)
gradlogcost = c / denom
jaclogcost[0] += gradlogcost
jaclogcost[1] += u * gradlogcost
jaclogcost[2] += w * gradlogcost
logcost /= n
cost = exp(logcost)
return (-np.ones(3) + exp(logcost) * jaclogcost) / rscale
def hessdualobjective(p, sign):
beta, gamma, tau = p
logcost = -phi
jaclogcost = np.zeros(3)
hesslogcost = np.zeros((3,3))
n = 0
for c, u, w, r in datagen():
if c > 0:
n += c
denom = beta + gamma * u + tau * w + sign * (u - w) * r
logcost += c * log(denom)
gradlogcost = c / denom
jaclogcost[0] += gradlogcost
jaclogcost[1] += u * gradlogcost
jaclogcost[2] += w * gradlogcost
gradgradlogcost = -c / denom**2
hesslogcost[0, 0] += gradgradlogcost
hesslogcost[0, 1] += gradgradlogcost * u
hesslogcost[0, 2] += gradgradlogcost * w
hesslogcost[1, 1] += gradgradlogcost * u**2
hesslogcost[1, 2] += gradgradlogcost * u * w
hesslogcost[2, 2] += gradgradlogcost * w**2
logcost /= n
cost = exp(logcost)
hesslogcost[1, 0] = hesslogcost[0, 1]
hesslogcost[2, 0] = hesslogcost[0, 2]
hesslogcost[2, 1] = hesslogcost[1, 2]
return (cost * (hesslogcost + np.outer(jaclogcost, jaclogcost) / n)) / rscale
# solve
consE = np.array([
[ 1, u, w ]
for u in (umin, umax)
for w in (wmin, wmax)
for r in (rmin, rmax)
], dtype='float64')
retvals = []
easybounds = [ (mle['deltavmin'] <= (rmin - rmax) + 1e-4, rmin - rmax),
(mle['deltavmax'] >= (rmax - rmin) - 1e-4, rmax - rmin) ]
for what in range(2):
if easybounds[what][0]:
retvals.append((easybounds[what][1], None))
continue
sign = 1 - 2 * what
x0 = np.array([num, -rmin, rmax] if sign > 0 else [num, rmax, -rmin],
dtype='float64')
d = np.array([ -sign * (u - w) * r + 1e-4
for u in (umin, umax)
for w in (wmin, wmax)
for r in (rmin, rmax)
], dtype='float64')
# from .gradcheck import gradcheck, hesscheck
# gradcheck(f=lambda p: dualobjective(p, sign),
# jac=lambda p: jacdualobjective(p, sign),
# x=x0,
# what='dualobjective')
# hesscheck(jac=lambda p: jacdualobjective(p, sign),
# hess=lambda p: hessdualobjective(p, sign),
# x=x0,
# what='jacdualobjective')
def F(x=None, z=None):
if x is None: return 0, matrix(x0)
f = -dualobjective(x, sign)
jf = -jacdualobjective(x, sign)
Df = matrix(jf).T
if z is None: return f, Df
hf = -z[0] * hessdualobjective(x, sign)
H = matrix(hf, hf.shape)
return f, Df, H
soln = solvers.cp(F,
G=-matrix(consE, consE.shape),
h=-matrix(d),
options={'show_progress': False})
if raiseonerr:
from pprint import pformat
assert soln['status'] == 'optimal', pformat({
'soln': soln,
'phi': phi,
'mle': mle,
})
betastar, gammastar, taustar = soln['x']
fstar = -rscale * soln['primal objective']
kappastar = (fstar + betastar + gammastar + taustar) / num
qfunc = lambda c, u, w, r, kappa=kappastar, beta=betastar, gamma=gammastar, tau=taustar: c*kappa / (beta + gamma * u + tau * w + (u - w) * r)
vbound = sign * fstar
retvals.append( ( vbound,
{
'kappastar': kappastar,
'betastar': betastar,
'gammastar': gammastar,
'taustar': taustar,
'qfunc': qfunc,
'phi': phi,
'mle': mle,
}
) )
return (retvals[0][0], retvals[1][0]), (retvals[0][1], retvals[1][1])
| [
"scipy.stats.f.isf",
"numpy.ones",
"pprint.pformat",
"math.log",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"cvxopt.matrix",
"math.exp"
] | [((3035, 3144), 'numpy.array', 'np.array', (['[[1, u, w] for u in (umin, umax) for w in (wmin, wmax) for r in (rmin, rmax)]'], {'dtype': '"""float64"""'}), "([[1, u, w] for u in (umin, umax) for w in (wmin, wmax) for r in (\n rmin, rmax)], dtype='float64')\n", (3043, 3144), True, 'import numpy as np\n'), ((633, 667), 'scipy.stats.f.isf', 'f.isf', ([], {'q': 'alpha', 'dfn': '(1)', 'dfd': '(num - 1)'}), '(q=alpha, dfn=1, dfd=num - 1)\n', (638, 667), False, 'from scipy.stats import f\n'), ((1076, 1088), 'math.exp', 'exp', (['logcost'], {}), '(logcost)\n', (1079, 1088), False, 'from math import log, exp\n'), ((1258, 1269), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1266, 1269), True, 'import numpy as np\n'), ((1701, 1713), 'math.exp', 'exp', (['logcost'], {}), '(logcost)\n', (1704, 1713), False, 'from math import log, exp\n'), ((1892, 1903), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1900, 1903), True, 'import numpy as np\n'), ((1926, 1942), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1934, 1942), True, 'import numpy as np\n'), ((2770, 2782), 'math.exp', 'exp', (['logcost'], {}), '(logcost)\n', (2773, 2782), False, 'from math import log, exp\n'), ((3529, 3615), 'numpy.array', 'np.array', (['([num, -rmin, rmax] if sign > 0 else [num, rmax, -rmin])'], {'dtype': '"""float64"""'}), "([num, -rmin, rmax] if sign > 0 else [num, rmax, -rmin], dtype=\n 'float64')\n", (3537, 3615), True, 'import numpy as np\n'), ((3646, 3776), 'numpy.array', 'np.array', (['[(-sign * (u - w) * r + 0.0001) for u in (umin, umax) for w in (wmin, wmax) for\n r in (rmin, rmax)]'], {'dtype': '"""float64"""'}), "([(-sign * (u - w) * r + 0.0001) for u in (umin, umax) for w in (\n wmin, wmax) for r in (rmin, rmax)], dtype='float64')\n", (3654, 3776), True, 'import numpy as np\n'), ((4604, 4624), 'cvxopt.matrix', 'matrix', (['hf', 'hf.shape'], {}), '(hf, hf.shape)\n', (4610, 4624), False, 'from cvxopt import solvers, matrix\n'), ((4951, 4998), 'pprint.pformat', 'pformat', (["{'soln': soln, 'phi': phi, 'mle': mle}"], {}), "({'soln': soln, 'phi': phi, 'mle': mle})\n", (4958, 4998), False, 'from pprint import pformat\n'), ((4484, 4494), 'cvxopt.matrix', 'matrix', (['jf'], {}), '(jf)\n', (4490, 4494), False, 'from cvxopt import solvers, matrix\n'), ((1028, 1038), 'math.log', 'log', (['denom'], {}), '(denom)\n', (1031, 1038), False, 'from math import log, exp\n'), ((1470, 1480), 'math.log', 'log', (['denom'], {}), '(denom)\n', (1473, 1480), False, 'from math import log, exp\n'), ((1732, 1742), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1739, 1742), True, 'import numpy as np\n'), ((1745, 1757), 'math.exp', 'exp', (['logcost'], {}), '(logcost)\n', (1748, 1757), False, 'from math import log, exp\n'), ((2142, 2152), 'math.log', 'log', (['denom'], {}), '(denom)\n', (2145, 2152), False, 'from math import log, exp\n'), ((4372, 4382), 'cvxopt.matrix', 'matrix', (['x0'], {}), '(x0)\n', (4378, 4382), False, 'from cvxopt import solvers, matrix\n'), ((4712, 4738), 'cvxopt.matrix', 'matrix', (['consE', 'consE.shape'], {}), '(consE, consE.shape)\n', (4718, 4738), False, 'from cvxopt import solvers, matrix\n'), ((4769, 4778), 'cvxopt.matrix', 'matrix', (['d'], {}), '(d)\n', (4775, 4778), False, 'from cvxopt import solvers, matrix\n'), ((2961, 2993), 'numpy.outer', 'np.outer', (['jaclogcost', 'jaclogcost'], {}), '(jaclogcost, jaclogcost)\n', (2969, 2993), True, 'import numpy as np\n')] |
from nimblenet.activation_functions import sigmoid_function
from nimblenet.cost_functions import *
from nimblenet.learning_algorithms import *
from nimblenet.neuralnet import NeuralNet
from nimblenet.preprocessing import construct_preprocessor, standarize
from nimblenet.data_structures import Instance
from nimblenet.tools import *
import task
import os
import json
import numpy as np
import random
import time
import rehearsal
import settings
import method
"""
Parameters:
"""
save = settings.save
inputNodes = settings.inputNodes
hiddenNodes = settings.hiddenNodes
outputNodes = settings.outputNodes
numInterventions = settings.numInterventions
numPatterns = settings.numPatterns
populationSize = settings.populationSize
repeats = settings.repeats
auto = settings.auto
learningConstant = settings.learningConstant
momentumConstant = settings.momentumConstant
errorCriterion = settings.errorCriterion
cost_function = settings.cost_function
batch_size = settings.batch_size
learningAlgorithm = settings.learningAlgorithm
maxIterations = settings.maxIterations
printRate = settings.printRate
outputFile = settings.outputFile
#
# Main routine
#
# Training set
totalError = [0 for i in range(numInterventions+1)]
for i in range(repeats):
print("\n{} Repeats completed.\n".format(i))
mytask = task.Task(
inputNodes=inputNodes,
hiddenNodes=hiddenNodes,
outputNodes=outputNodes,
populationSize=numPatterns,
auto=auto,
learningConstant=learningConstant,
momentumConstant=momentumConstant
)
# Intervening task
interventions = [mytask.popTask() for a in range(0, numInterventions)]
inputs = mytask.task['inputPatterns']
teacher = mytask.task['teacher']
dataset = []
for i in range(len(inputs)):
dataset.append(Instance(inputs[i], teacher[i]))
training_data = dataset
test_data = dataset
layers = [ (hiddenNodes, sigmoid_function) for i in range(settings.numLayers) ]
layers.append((outputNodes, sigmoid_function))
print("Layers: {}".format(layers))
mysettings = {
"n_inputs" : inputNodes, # Number of network input signals
"layers" : layers,
"initial_bias_value" : 0.01,
"weights_low" : -0.3, # Lower bound on the initial weight value
"weights_high" : 0.3,
}
# initialize the neural network
network = NeuralNet( mysettings )
network.check_gradient( training_data, cost_function )
# Train the network using backpropagation
learningAlgorithm(
network, # the network to train
training_data, # specify the training set
test_data, # specify the test set
cost_function, # specify the cost function to calculate error
ERROR_LIMIT = errorCriterion, # define an acceptable error limit
max_iterations = maxIterations, # continues until the error limit is reach if this argument is skipped
batch_size = batch_size, # 1 := no batch learning, 0 := entire trainingset as a batch, anything else := batch size
print_rate = printRate, # print error status every `print_rate` epoch.
learning_rate = learningConstant, # learning rate
momentum_factor = momentumConstant, # momentum
input_layer_dropout = 0.0, # dropout fraction of the input layer
hidden_layer_dropout = 0.0, # dropout fraction in all hidden layers
save_trained_network = False # Whether to write the trained weights to disk
)
"""
Form inital goodness
"""
goodness = getGoodness(network=network, testset=test_data)
totalError[0] += goodness
"""
Run interventions
"""
alreadyLearned = mytask.task
learnt = [
Instance(alreadyLearned['inputPatterns'][i], alreadyLearned['teacher'][i])
for i in range(len(alreadyLearned['inputPatterns']))
]
for j in range(0, len(interventions)):
print("\nRunning Intervention", j+1)
intervention = Instance(
interventions[j]['inputPatterns'][0],
interventions[j]['teacher'][0]
)
meth = settings.mymethod
if meth == method.catastrophicForgetting:
rehearsal.catastrophicForgetting(
network=network,
intervention=intervention
)
if meth == method.recency:
rehearsal.recency(
network=network,
intervention=intervention,
learnt=learnt,
random=False
)
elif meth == method.random:
rehearsal.recency(
network=network,
intervention=intervention,
learnt=learnt,
random=True
)
elif meth == method.pseudo:
rehearsal.pseudo(
network=network,
intervention=intervention,
numPseudoItems=settings.numPseudoItems
)
elif meth == method.sweep:
rehearsal.sweep(
network=network,
intervention=intervention,
learnt=learnt
)
elif meth == method.pseudoSweep:
rehearsal.pseudoSweep(
network=network,
intervention=intervention,
numPseudoItems=settings.numPseudoItems
)
else:
print("Method not valid")
exit(0)
# Add intervening item to learnt items
learnt.append(intervention)
print("Goodness", getGoodness(network=network, testset=test_data))
totalError[j+1] += getGoodness(network=network, testset=test_data)
averageError = [i/repeats for i in totalError]
if save:
[outputFile.write(str(i)+"\n") for i in averageError]
| [
"nimblenet.neuralnet.NeuralNet",
"task.Task",
"rehearsal.recency",
"rehearsal.sweep",
"rehearsal.catastrophicForgetting",
"rehearsal.pseudoSweep",
"nimblenet.data_structures.Instance",
"rehearsal.pseudo"
] | [((1303, 1504), 'task.Task', 'task.Task', ([], {'inputNodes': 'inputNodes', 'hiddenNodes': 'hiddenNodes', 'outputNodes': 'outputNodes', 'populationSize': 'numPatterns', 'auto': 'auto', 'learningConstant': 'learningConstant', 'momentumConstant': 'momentumConstant'}), '(inputNodes=inputNodes, hiddenNodes=hiddenNodes, outputNodes=\n outputNodes, populationSize=numPatterns, auto=auto, learningConstant=\n learningConstant, momentumConstant=momentumConstant)\n', (1312, 1504), False, 'import task\n'), ((2479, 2500), 'nimblenet.neuralnet.NeuralNet', 'NeuralNet', (['mysettings'], {}), '(mysettings)\n', (2488, 2500), False, 'from nimblenet.neuralnet import NeuralNet\n'), ((4073, 4147), 'nimblenet.data_structures.Instance', 'Instance', (["alreadyLearned['inputPatterns'][i]", "alreadyLearned['teacher'][i]"], {}), "(alreadyLearned['inputPatterns'][i], alreadyLearned['teacher'][i])\n", (4081, 4147), False, 'from nimblenet.data_structures import Instance\n'), ((4327, 4405), 'nimblenet.data_structures.Instance', 'Instance', (["interventions[j]['inputPatterns'][0]", "interventions[j]['teacher'][0]"], {}), "(interventions[j]['inputPatterns'][0], interventions[j]['teacher'][0])\n", (4335, 4405), False, 'from nimblenet.data_structures import Instance\n'), ((1809, 1840), 'nimblenet.data_structures.Instance', 'Instance', (['inputs[i]', 'teacher[i]'], {}), '(inputs[i], teacher[i])\n', (1817, 1840), False, 'from nimblenet.data_structures import Instance\n'), ((4552, 4628), 'rehearsal.catastrophicForgetting', 'rehearsal.catastrophicForgetting', ([], {'network': 'network', 'intervention': 'intervention'}), '(network=network, intervention=intervention)\n', (4584, 4628), False, 'import rehearsal\n'), ((4726, 4820), 'rehearsal.recency', 'rehearsal.recency', ([], {'network': 'network', 'intervention': 'intervention', 'learnt': 'learnt', 'random': '(False)'}), '(network=network, intervention=intervention, learnt=learnt,\n random=False)\n', (4743, 4820), False, 'import rehearsal\n'), ((4943, 5036), 'rehearsal.recency', 'rehearsal.recency', ([], {'network': 'network', 'intervention': 'intervention', 'learnt': 'learnt', 'random': '(True)'}), '(network=network, intervention=intervention, learnt=learnt,\n random=True)\n', (4960, 5036), False, 'import rehearsal\n'), ((5159, 5264), 'rehearsal.pseudo', 'rehearsal.pseudo', ([], {'network': 'network', 'intervention': 'intervention', 'numPseudoItems': 'settings.numPseudoItems'}), '(network=network, intervention=intervention, numPseudoItems\n =settings.numPseudoItems)\n', (5175, 5264), False, 'import rehearsal\n'), ((5369, 5443), 'rehearsal.sweep', 'rehearsal.sweep', ([], {'network': 'network', 'intervention': 'intervention', 'learnt': 'learnt'}), '(network=network, intervention=intervention, learnt=learnt)\n', (5384, 5443), False, 'import rehearsal\n'), ((5559, 5668), 'rehearsal.pseudoSweep', 'rehearsal.pseudoSweep', ([], {'network': 'network', 'intervention': 'intervention', 'numPseudoItems': 'settings.numPseudoItems'}), '(network=network, intervention=intervention,\n numPseudoItems=settings.numPseudoItems)\n', (5580, 5668), False, 'import rehearsal\n')] |
from django.urls import reverse, resolve
from rest_framework.test import APITestCase
from product.views import (
ProduceAPI,
AddProductAPI,
ProduceDetailsAPI,
ProduceEditDelete
)
class TestProduceUrlsCase(APITestCase):
def test_get_produce_resolves(self):
url = reverse('products')
self.assertEquals(resolve(url).func.view_class, ProduceAPI)
def test_add_produce_resolves(self):
url = reverse('add-product')
self.assertEquals(resolve(url).func.view_class, AddProductAPI)
def test_produce_details_resolves(self):
url = reverse('detail-product', args=['1'])
self.assertEquals(resolve(url).func.view_class, ProduceDetailsAPI)
def test_produce_edit_delete_resolves(self):
url = reverse('product-edit-delete', args=['1'])
self.assertEquals(resolve(url).func.view_class, ProduceEditDelete)
| [
"django.urls.resolve",
"django.urls.reverse"
] | [((294, 313), 'django.urls.reverse', 'reverse', (['"""products"""'], {}), "('products')\n", (301, 313), False, 'from django.urls import reverse, resolve\n'), ((438, 460), 'django.urls.reverse', 'reverse', (['"""add-product"""'], {}), "('add-product')\n", (445, 460), False, 'from django.urls import reverse, resolve\n'), ((592, 629), 'django.urls.reverse', 'reverse', (['"""detail-product"""'], {'args': "['1']"}), "('detail-product', args=['1'])\n", (599, 629), False, 'from django.urls import reverse, resolve\n'), ((769, 811), 'django.urls.reverse', 'reverse', (['"""product-edit-delete"""'], {'args': "['1']"}), "('product-edit-delete', args=['1'])\n", (776, 811), False, 'from django.urls import reverse, resolve\n'), ((340, 352), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (347, 352), False, 'from django.urls import reverse, resolve\n'), ((487, 499), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (494, 499), False, 'from django.urls import reverse, resolve\n'), ((656, 668), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (663, 668), False, 'from django.urls import reverse, resolve\n'), ((838, 850), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (845, 850), False, 'from django.urls import reverse, resolve\n')] |
from urllib.request import urlopen
from bs4 import BeautifulSoup
from youtube_dl import YoutubeDL
import pyexcel
# Part 1
url = "https://www.apple.com/itunes/charts/songs/"
html_content = urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(html_content,"html.parser")
section = soup.find("section","section chart-grid").div.ul
li_list = section.find_all("li")
table = []
for li in li_list:
song = li.h3.a.string
artist = li.h4.a.string
table.append({"Songs": song, "Artists": artist})
pyexcel.save_as(records=table, dest_file_name="itunes_top_songs.xlsx")
# Part 2
options = {
'default_search': 'ytsearch',
'max_downloads': len(table)
}
dl = YoutubeDL(options)
for song in table:
dl.download([song["Songs"]+song["Artists"]])
| [
"bs4.BeautifulSoup",
"youtube_dl.YoutubeDL",
"urllib.request.urlopen",
"pyexcel.save_as"
] | [((232, 274), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_content', '"""html.parser"""'], {}), "(html_content, 'html.parser')\n", (245, 274), False, 'from bs4 import BeautifulSoup\n'), ((505, 575), 'pyexcel.save_as', 'pyexcel.save_as', ([], {'records': 'table', 'dest_file_name': '"""itunes_top_songs.xlsx"""'}), "(records=table, dest_file_name='itunes_top_songs.xlsx')\n", (520, 575), False, 'import pyexcel\n'), ((670, 688), 'youtube_dl.YoutubeDL', 'YoutubeDL', (['options'], {}), '(options)\n', (679, 688), False, 'from youtube_dl import YoutubeDL\n'), ((188, 200), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (195, 200), False, 'from urllib.request import urlopen\n')] |
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
filename = "experiment_data/012522-16_29_48-data.csv"
data = np.genfromtxt(filename, delimiter=',', skip_header=2)
timestamps = data[:, 0]
timestamps -= timestamps[0]
cf1_actual_position = data[:, 18:21]
human_1_position = data[:,25:28]
def init_animation():
cf1_line.set_data([],[])
human1_line.set_data([],[])
human2_line.set_data([],[])
return cf1_line, human1_line, human2_line
def update_animation(frame):
cf1_line.set_data(cf1_actual_position[0:frame, 0],
cf1_actual_position[0:frame, 1])
cf1_line.set_3d_properties(cf1_actual_position[0:frame, 2])
return cf1_line, human1_line, human2_line
# Attaching 3D axis to the figure
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.set_xlim3d([-2.0, 2.0])
ax.set_xlabel('X')
ax.set_ylim3d([-2.0, 2.0])
ax.set_ylabel('Y')
ax.set_zlim3d([-2.0, 2.0])
ax.set_zlabel('Z')
cf1_line = ax.plot([],[],[], label="CF1 Position")[0]
human1_line = ax.plot([],[],[])[0]
human2_line = ax.plot([],[],[])[0]
line_ani = animation.FuncAnimation(fig,
update_animation,
init_func=init_animation,
frames=len(timestamps),
interval=50,
blit=False)
plt.show()
| [
"mpl_toolkits.mplot3d.axes3d.Axes3D",
"matplotlib.pyplot.figure",
"numpy.genfromtxt",
"matplotlib.pyplot.show"
] | [((197, 250), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': '""","""', 'skip_header': '(2)'}), "(filename, delimiter=',', skip_header=2)\n", (210, 250), True, 'import numpy as np\n'), ((832, 844), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (842, 844), True, 'import matplotlib.pyplot as plt\n'), ((850, 864), 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'p3.Axes3D', (['fig'], {}), '(fig)\n', (859, 864), True, 'import mpl_toolkits.mplot3d.axes3d as p3\n'), ((1439, 1449), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1447, 1449), True, 'import matplotlib.pyplot as plt\n')] |
import pygame
import random
from constants import *
class Snake:
def __init__(self, start: Vector = START, colour=BLUE, target: Vector = START):
self.body = [start.copy()]
self.speed_factor = 1.0
self.colour = colour
self.direction = Vector()
self.target = target
self.state = 'main'
@property
def speed(self):
return BASE_SPEED * self.speed_factor
@property
def radius(self):
return BASE_SIZE + len(self.body) // SIZE_INC
@property
def position(self):
return self.body[0]
def gradient(self, i):
boost = BOOST_OFFSET if self.speed_factor == BOOST_FACTOR else 0
if i // PING_PONG % 2 == 1:
i = PING_PONG - i % PING_PONG
else:
i %= PING_PONG
r = min(max(self.colour[0] + i + boost, 0), 255)
g = min(max(self.colour[1] + i + boost, 0), 255)
b = min(max(self.colour[2] + i + boost, 0), 255)
return r, g, b
def __str__(self):
return f'Snake(position={self.position}, length={len(self.body)}, state={self.state})'
__repr__ = __str__
def render(self, screen):
if self.state == 'dead':
return
for i, vector in list(enumerate(self.body))[::-1]:
pygame.draw.circle(screen, self.gradient(i), (round(vector.x), round(vector.y)), self.radius)
direct = self.position - (self.body[1] if len(self.body) > 1 else self.target)
for b in (True, False):
offset = direct.perpendicular(b) * (self.radius // 2)
pygame.draw.circle(screen, WHITE, (self.position + offset).tuple(), EYE_SIZE + len(self.body) // EYE_INC)
pygame.draw.circle(screen, BLACK, (self.position + offset).tuple(), PUPIL_SIZE + len(self.body)
// PUPIL_INC)
def move(self):
if self.state == 'boost':
if pygame.time.get_ticks() % BOOST_DCR == 0:
self.body.pop()
self.speed_factor = BOOST_FACTOR
else:
self.speed_factor = 1
moved, self.direction = self.body[0].lerp(self.target, self.speed)
for i in range(1, len(self.body)):
_, self.direction = self.body[i].lerp(self.body[i-1], moved, MAX_DISTANCE)
self.state = 'main'
def grow(self):
if self.state == 'dead':
return
for _ in range(len(self.body) // GROWTH_INC + 1):
self.body.append(self.body[len(self.body) - 1] - self.direction.normalized() * MIN_DISTANCE)
def boost(self):
if self.state == 'dead':
return
if len(self.body) > BOOST_MIN:
self.state = 'boost'
def collide_snake(self, other, head=False):
for piece in other.body[0 if head else 1:]:
min_length = self.radius + other.radius
difference = (self.position - piece).mag_squared()
if min_length ** 2 >= difference:
return True
return False
def collide_circle(self, other: Circle):
min_length = self.radius + other.radius
difference = (self.position - other.position).mag_squared()
return min_length ** 2 >= difference
def die(self, foods):
for i, piece in list(enumerate(self.body))[::-FOOD_DEATH]:
foods.append(Circle(round(piece.x) + random.randint(0, self.radius),
round(piece.y) + random.randint(0, self.radius),
DEAD_FOOD_RADIUS, colour=self.gradient(i)))
self.state = 'dead'
| [
"random.randint",
"pygame.time.get_ticks"
] | [((1912, 1935), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (1933, 1935), False, 'import pygame\n'), ((3358, 3388), 'random.randint', 'random.randint', (['(0)', 'self.radius'], {}), '(0, self.radius)\n', (3372, 3388), False, 'import random\n'), ((3439, 3469), 'random.randint', 'random.randint', (['(0)', 'self.radius'], {}), '(0, self.radius)\n', (3453, 3469), False, 'import random\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of Simple Wallpaper Randomizer
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import sys
import os
import comun
from comun import get_desktop_environment
from simplewallpaperrandomizer import get_not_displayed_files
from simplewallpaperrandomizer import add_file_to_displayed_files
import shutil
def change_gesettings(filename):
shutil.copyfile(filename, comun.SELECTED_WALLPAPER)
PARAMS = 'export DISPLAY=:0;\
export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/bus;\
export GSETTINGS_BACKEND=dconf'
GSET_GNOME = 'gsettings set org.gnome.desktop.background picture-uri \
"file://%s"'
GSET_MATE = 'gsettings set org.mate.background picture-filename "%s"'
GSET_CINNAMON = 'gsettings set org.cinnamon.desktop.background picture-uri \
"file://%s"'
GSET_XFCE = 'xfconf-query -c xfce4-desktop -p \
/backdrop/screen0/monitorDisplayPort-1/workspace0/last-image --set "%s"'
if os.path.exists(comun.SELECTED_WALLPAPER):
params = PARAMS % os.getuid()
desktop_environment = get_desktop_environment()
if desktop_environment == 'gnome' or \
desktop_environment == 'unity' or \
desktop_environment == 'budgie-desktop':
gset = GSET_GNOME % comun.SELECTED_WALLPAPER
elif desktop_environment == 'mate':
gset = GSET_MATE % comun.SELECTED_WALLPAPER
elif desktop_environment == 'cinnamon':
gset = GSET_CINNAMON % comun.SELECTED_WALLPAPER
elif desktop_environment == 'xfce4':
gset = GSET_XFCE % comun.SELECTED_WALLPAPER
else:
gset = None
if gset is not None:
command = '{0};{1}'.format(params, gset)
os.system(command)
add_file_to_displayed_files(filename)
if __name__ == '__main__':
filename = random.choice(get_not_displayed_files())
if len(sys.argv) > 1:
change_gesettings(filename)
else:
add_file_to_displayed_files(filename)
shutil.copyfile(filename, comun.SELECTED_WALLPAPER)
print(comun.SELECTED_WALLPAPER)
| [
"os.path.exists",
"comun.get_desktop_environment",
"simplewallpaperrandomizer.add_file_to_displayed_files",
"os.getuid",
"simplewallpaperrandomizer.get_not_displayed_files",
"shutil.copyfile",
"os.system"
] | [((1469, 1520), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'comun.SELECTED_WALLPAPER'], {}), '(filename, comun.SELECTED_WALLPAPER)\n', (1484, 1520), False, 'import shutil\n'), ((2036, 2076), 'os.path.exists', 'os.path.exists', (['comun.SELECTED_WALLPAPER'], {}), '(comun.SELECTED_WALLPAPER)\n', (2050, 2076), False, 'import os\n'), ((2146, 2171), 'comun.get_desktop_environment', 'get_desktop_environment', ([], {}), '()\n', (2169, 2171), False, 'from comun import get_desktop_environment\n'), ((2953, 2978), 'simplewallpaperrandomizer.get_not_displayed_files', 'get_not_displayed_files', ([], {}), '()\n', (2976, 2978), False, 'from simplewallpaperrandomizer import get_not_displayed_files\n'), ((3060, 3097), 'simplewallpaperrandomizer.add_file_to_displayed_files', 'add_file_to_displayed_files', (['filename'], {}), '(filename)\n', (3087, 3097), False, 'from simplewallpaperrandomizer import add_file_to_displayed_files\n'), ((3106, 3157), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'comun.SELECTED_WALLPAPER'], {}), '(filename, comun.SELECTED_WALLPAPER)\n', (3121, 3157), False, 'import shutil\n'), ((2104, 2115), 'os.getuid', 'os.getuid', ([], {}), '()\n', (2113, 2115), False, 'import os\n'), ((2826, 2844), 'os.system', 'os.system', (['command'], {}), '(command)\n', (2835, 2844), False, 'import os\n'), ((2857, 2894), 'simplewallpaperrandomizer.add_file_to_displayed_files', 'add_file_to_displayed_files', (['filename'], {}), '(filename)\n', (2884, 2894), False, 'from simplewallpaperrandomizer import add_file_to_displayed_files\n')] |
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
from .log import Log
class HttpServer(BaseHTTPRequestHandler):
request_handler_class = None
@staticmethod
def start(url, port, request_handler_class):
HttpServer.request_handler_class = request_handler_class
server = HTTPServer((url, port), HttpServer)
return server
def do_GET(self):
try:
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
s = self.path
payload = s
address = self.client_address
request_handler = HttpServer.request_handler_class(self)
request_handler.handle(address, payload)
except Exception as inst:
self.wfile.write("error({})".format(inst))
Log.error("error({})".format(inst))
def send(self, message):
self.wfile.write(message) | [
"http.server.HTTPServer"
] | [((330, 365), 'http.server.HTTPServer', 'HTTPServer', (['(url, port)', 'HttpServer'], {}), '((url, port), HttpServer)\n', (340, 365), False, 'from http.server import HTTPServer\n')] |
# Generated by Django 3.1.1 on 2020-09-28 19:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sales', '0015_auto_20200920_1049'),
]
operations = [
migrations.AlterField(
model_name='orderline',
name='buyer_product',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='sales.buyerproduct'),
),
]
| [
"django.db.models.ForeignKey"
] | [((377, 495), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""sales.buyerproduct"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.DO_NOTHING, to='sales.buyerproduct')\n", (394, 495), False, 'from django.db import migrations, models\n')] |
# ////////////////////////////////////////////////////////////////////////////
# // This file is part of NIID-Net. For more information
# // see <https://github.com/zju3dv/NIID-Net>.
# // If you use this code, please cite the corresponding publications as
# // listed on the above website.
# //
# // Copyright (c) ZJU-SenseTime Joint Lab of 3D Vision. All Rights Reserved.
# //
# // Permission to use, copy, modify and distribute this software and its
# // documentation for educational, research and non-profit purposes only.
# //
# // The above copyright notice and this permission notice shall be included in all
# // copies or substantial portions of the Software.
# //
# // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# // SOFTWARE.
# ////////////////////////////////////////////////////////////////////////////
import random
import numpy as np
import torch
def set_(with_random=True, determine=False, SEED=999):
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
if not with_random:
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
np.random.seed(SEED)
random.seed(SEED)
# torch.cuda.set_device(opt.gpu_devices[0])
# torch.multiprocessing.set_sharing_strategy('file_system')
torch.backends.cudnn.deterministic = determine
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"random.seed",
"numpy.random.seed",
"torch.cuda.manual_seed"
] | [((1479, 1502), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (1496, 1502), False, 'import torch\n'), ((1511, 1539), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (1533, 1539), False, 'import torch\n'), ((1548, 1580), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['SEED'], {}), '(SEED)\n', (1574, 1580), False, 'import torch\n'), ((1589, 1609), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1603, 1609), True, 'import numpy as np\n'), ((1618, 1635), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (1629, 1635), False, 'import random\n')] |
from city import City
from zoo import Zoo
vienna = City("vienna")
assert vienna.zoo
assert isinstance(vienna.zoo, Zoo)
assert vienna.zoo.size == 130
assert vienna.zoo._owner_name == "<NAME>"
print(
f"City: {vienna.name}\n"
f"Zoo owner: {vienna.zoo._owner_name}\n"
f"Zoo's size: {vienna.zoo.size}\n"
f"Zoo's animals: {', '.join([animal.name for animal in vienna.zoo.animals])}"
)
| [
"city.City"
] | [((53, 67), 'city.City', 'City', (['"""vienna"""'], {}), "('vienna')\n", (57, 67), False, 'from city import City\n')] |
"""
test specfile helpers
"""
import unittest
from pathlib import Path
from packaging_utils.specfile import helpers
class TestSpecfileHelpers(unittest.TestCase):
def test_source_filenames_diffoscope(self):
source_path = Path(__file__).parent / 'diffoscope.spec'
self.assertListEqual(
helpers.get_source_filename(str(source_path)),
['diffoscope-177.tar.bz2', 'diffoscope-177.tar.bz2.asc'])
def test_source_urls_diffoscope(self):
source_path = Path(__file__).parent / 'diffoscope.spec'
self.assertEqual(
helpers.get_source_urls(str(source_path)),
['https://diffoscope.org/archive/diffoscope-177.tar.bz2',
'https://diffoscope.org/archive/diffoscope-177.tar.bz2.asc'])
| [
"pathlib.Path"
] | [((234, 248), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (238, 248), False, 'from pathlib import Path\n'), ((501, 515), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (505, 515), False, 'from pathlib import Path\n')] |
from twisted.internet import defer
import config
import log
from color import colorize
from err import *
class Scanner(object):
def __init__(self, target, checks, title=None, verbose=False, runningResults=False):
self.target = target
self.checks = checks
self.title = title
self.scans = []
self.verbose = verbose
self.runningResults = runningResults
def __repr__(self):
return "<Scanner({0.target}, {0.title}, v={0.verbose})>".format(self)
def checkFinished(self, check):
if self.runningResults:
self.showResult(check.result)
def showResult(self, result):
# TODO: RESULT_SUB master results should be shown with warningsOnly on!
output = self.formatResult(result)
if output:
print(output)
for subresult in result.check.subresults:
output = self.formatResult(subresult, sub=True)
if output:
print(output)
def showResults(self):
if not self.runningResults:
if config.warningsOnly:
hasWarning = False
for scan in self.scans:
if not scan.result.status:
hasWarning = True
for subresult in scan.subresults:
if not subresult.status:
hasWarning = True
if not hasWarning:
return
print("")
print(colorize("@W{0}@x".format(self.title)))
print(config.bar)
for scan in self.scans:
self.showResult(scan.result)
print("")
def formatResult(self, result, sub=False):
if result.extra:
extra = colorize('@B--@x ') + result.extra
else:
extra = ''
if not result.status:
last = colorize('@B[@R!!!@B]@x')
elif config.warningsOnly:
return
elif result.status == CHECK_NOT_APPLICABLE:
last = '@B[@D - @B]@x'
elif result.status == CHECK_RESULT_HIDDEN:
last = ''
elif result.status == CHECK_RESULT_SUB:
last = colorize('@B[---]@x')
elif result.status == CHECK_RESULT_UNCERTAIN:
last = colorize('@B[@Y ? @B]@x')
else:
last = colorize('@B[ @G- @B]@x')
if sub:
output = colorize(" @y-@x {0:49} {1}{2}".format(result.text, last, extra))
else:
output = colorize(" @Y*@x {0:51} {1}{2}".format(result.text, last, extra))
return output
def run(self):
if self.runningResults:
print("")
print(colorize("@W{0}@x".format(self.title)))
print(config.bar)
for check in self.checks:
c = check(self.target)
# "lambda x: c, x" takes the (same) c in scope each loop; we have to do
# manual assignment to get the current c.
def checkFinishedTrigger(value, c=c):
self.checkFinished(c)
d = (c.run()
.addBoth(checkFinishedTrigger))
c.deferred = d
self.scans.append(c)
dl = (defer.DeferredList([c.deferred for c in self.scans])
.addCallback(lambda x: self.showResults())
.addErrback(log.err)) # Uncaught error
return dl
class DomainScanner(Scanner):
pass
class HostScanner(Scanner):
pass
class LocalScanner(Scanner):
pass
| [
"color.colorize",
"twisted.internet.defer.DeferredList"
] | [((1887, 1912), 'color.colorize', 'colorize', (['"""@B[@R!!!@B]@x"""'], {}), "('@B[@R!!!@B]@x')\n", (1895, 1912), False, 'from color import colorize\n'), ((1766, 1785), 'color.colorize', 'colorize', (['"""@B--@x """'], {}), "('@B--@x ')\n", (1774, 1785), False, 'from color import colorize\n'), ((3208, 3260), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['[c.deferred for c in self.scans]'], {}), '([c.deferred for c in self.scans])\n', (3226, 3260), False, 'from twisted.internet import defer\n'), ((2193, 2214), 'color.colorize', 'colorize', (['"""@B[---]@x"""'], {}), "('@B[---]@x')\n", (2201, 2214), False, 'from color import colorize\n'), ((2288, 2313), 'color.colorize', 'colorize', (['"""@B[@Y ? @B]@x"""'], {}), "('@B[@Y ? @B]@x')\n", (2296, 2313), False, 'from color import colorize\n'), ((2347, 2372), 'color.colorize', 'colorize', (['"""@B[ @G- @B]@x"""'], {}), "('@B[ @G- @B]@x')\n", (2355, 2372), False, 'from color import colorize\n')] |
from typing import Dict, Generator, Optional, Tuple, Union
import numpy as np
from joblib import ( # type: ignore
delayed,
Parallel,
)
from numpy import linalg
from sklearn.metrics import accuracy_score
from sklearn.base import BaseEstimator
from libifbtsvm.functions import (
fuzzy_membership,
train_model,
)
from libifbtsvm.models.ifbtsvm import (
ClassificationModel,
FuzzyMembership,
Hyperparameters,
Hyperplane,
)
TrainingSet = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
DAGSubSet = Union[TrainingSet, Generator[TrainingSet, None, None]]
class iFBTSVM(BaseEstimator):
def __init__(self, parameters: Hyperparameters, n_jobs=1):
super().__init__()
self.parameters = parameters
self._classifiers: Dict = {}
self.n_jobs = n_jobs
self.kernel = parameters.kernel
@classmethod
def _compute_score(cls, score, c):
"""
:param score:
:param c:
:return:
"""
if score is None:
score = np.asarray(c)
sc = np.ones(len(c))
score = np.array((score, sc))
else:
res, indices_score, indices_c = np.intersect1d(score[0], np.asarray(c), return_indices=True)
score[1][indices_score] += 1
diff = np.setdiff1d(np.asarray(c), score[0])
if diff.any():
_zdiff = np.ones(len(diff))
new_score = np.array((diff, _zdiff))
score_0 = np.append(score[0], [new_score[0]])
score_1 = np.append(score[1], [new_score[1]])
score = np.asarray([score_0, score_1])
return score
@staticmethod
def _decrement(candidates, score, alphas, fuzzy, data) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
:return:
"""
sco0 = np.delete(score[0], candidates)
sco1 = np.delete(score[1], candidates)
score = np.asarray([sco0, sco1])
alphas = np.delete(alphas, candidates)
fuzzy = np.delete(fuzzy, candidates)
data = np.delete(data, candidates, axis=0)
return score, alphas, fuzzy, data
@staticmethod
def _filter_gradients(weights: np.ndarray, gradients: np.ndarray, data:
np.ndarray, label: np.ndarray) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
"""
Filters data points based on the projected gradients.
Kept data will include only values for which the projected gradients that will expand the support
vectors, meaning that are outside boundaries of current support vectors of the classifier.
:param gradients: The gradients with which to perform the computation
:param data: Data to filter
:return: Filtered data
"""
_data = np.append(data, np.ones((len(data), 1)), axis=1)
_new_grads = np.matmul(-_data, weights) - 1
_del = np.argwhere(np.logical_or(_new_grads <= min(gradients), _new_grads >= max(gradients)))
index = np.reshape(_del, newshape=_del.shape[0],)
if not len(index):
return data, label
_data = np.delete(data, index, axis=0)
_label = np.delete(label, index)
return _data, _label
@classmethod
def _fit_dag_step(cls, subset: TrainingSet, parameters: Hyperparameters) -> ClassificationModel:
"""
Trains a classifier based on a sub-set of data, as a step in the DAG classifier algorithm.
:param subset: Sub-set of data containing the training data for this DAG step
:param parameters: The classifier hyperparameters
:returns: A classification model for this subset
"""
# Features (x_p) of the current "positive" class
x_p = subset[0]
y_p = subset[1]
# Features (x_n) of the current "negative" class
x_n = subset[2]
y_n = subset[3]
# Calculate fuzzy membership for points
membership: FuzzyMembership = fuzzy_membership(params=parameters, class_p=x_p, class_n=x_n)
# Build H matrix which is [X_p/n, e] where "e" is an extra column of ones ("1") appended at the end of the
# matrix
# i.e.
#
# if X_p = | 1 2 3 | and e = | 1 | then H_p = | 1 2 3 1 |
# | 4 5 6 | | 1 | | 4 5 6 1 |
# | 7 8 9 | | 1 | | 7 8 9 1 |
#
H_p = np.append(x_p, np.ones((x_p.shape[0], 1)), axis=1)
H_n = np.append(x_n, np.ones((x_n.shape[0], 1)), axis=1)
_C1 = parameters.C1 * membership.sn
_C3 = parameters.C3 * membership.sp
_C2 = parameters.C2
_C4 = parameters.C4
# Train the model using the algorithm described by (de Mello et al. 2019)
# Python
hyperplane_p: Hyperplane = train_model(parameters=parameters, H=H_n, G=H_p, C=_C4, CCx=_C3)
hyperplane_n: Hyperplane = train_model(parameters=parameters, H=H_p, G=H_n, C=_C2, CCx=_C1)
hyperplane_n.weights = -hyperplane_n.weights
return ClassificationModel(class_p=y_p[0],
class_n=y_n[0],
fuzzy=membership,
weights_p=hyperplane_p,
weights_n=hyperplane_n,
data_p=x_p,
data_n=x_n)
@classmethod
def _increment_dag_step(cls, subset: TrainingSet, parameters: Hyperparameters,
classifier: ClassificationModel) -> ClassificationModel:
"""
Increment already trained DAG models
:param subset: Sub-set of data containing the update data for this DAG step
:param parameters: The classifier hyperparameters
:param classifier: The classifier to update
:return: The updated classifier
"""
# Features (x_p) of the current "positive" class
x_p = subset[0]
y_p = subset[1]
# Features (x_n) of the current "negative" class
x_n = subset[2]
y_n = subset[3]
_batch_xp, _batch_yp = cls._filter_gradients(weights=classifier.p.weights,
gradients=classifier.p.projected_gradients,
data=x_p, label=y_p)
if _batch_xp is None:
return classifier
_batch_xn, _batch_yn = None, None
if x_n.any() and y_n.any():
_batch_xn, _batch_yn = cls._filter_gradients(weights=classifier.p.weights,
gradients=classifier.p.projected_gradients,
data=x_n, label=y_n)
_data_xp = classifier.data_p
if _batch_xp is not None and _batch_xp.any():
_data_xp = np.concatenate((_data_xp, _batch_xp)) if _batch_xp is not None else classifier.data_p
_data_xn = classifier.data_n
if _batch_xn is not None and _batch_xn.any():
_data_xn = np.concatenate((_data_xn, _batch_xn)) if _batch_xn is not None else classifier.data_n
# Calculate fuzzy membership for points
membership: FuzzyMembership = fuzzy_membership(params=parameters, class_p=_data_xp, class_n=_data_xn)
# Build H matrix which is [X_p/n, e] where "e" is an extra column of ones ("1") appended at the end of the
# matrix
# i.e.
#
# if X_p = | 1 2 3 | and e = | 1 | then H_p = | 1 2 3 1 |
# | 4 5 6 | | 1 | | 4 5 6 1 |
# | 7 8 9 | | 1 | | 7 8 9 1 |
#
H_p = np.append(_data_xp, np.ones((_data_xp.shape[0], 1)), axis=1)
H_n = np.append(_data_xn, np.ones((_data_xn.shape[0], 1)), axis=1)
_C1 = parameters.C1 * membership.sn
_C3 = parameters.C3 * membership.sp
_C2 = parameters.C2
_C4 = parameters.C4
# Recompute the training with the update data
hyperplane_p: Hyperplane = train_model(parameters=parameters, H=H_n, G=H_p, C=_C4, CCx=_C3)
hyperplane_n: Hyperplane = train_model(parameters=parameters, H=H_p, G=H_n, C=_C2, CCx=_C1)
hyperplane_n.weights = -hyperplane_n.weights
classifier.p = hyperplane_p
classifier.n = hyperplane_n
classifier.fuzzy_membership = membership
classifier.data_p = _data_xp
classifier.data_n = _data_xn
c_pos = np.argwhere(classifier.p.alpha <= parameters.phi)
c_pos = np.reshape(c_pos, newshape=(c_pos.shape[0],))
c_neg = np.argwhere(classifier.n.alpha <= parameters.phi)
c_neg = np.reshape(c_neg, newshape=(c_neg.shape[0],))
classifier.score_p = cls._compute_score(classifier.score_p, c_pos)
classifier.score_n = cls._compute_score(classifier.score_n, c_neg)
_candidates_p = np.argwhere(classifier.score_p[1] >= parameters.forget_score)
_candidates_p = np.reshape(_candidates_p, newshape=(_candidates_p.shape[0], ))
_candidates_n = np.argwhere(classifier.score_n[1] >= parameters.forget_score)
_candidates_n = np.reshape(_candidates_n, newshape=(_candidates_n.shape[0], ))
if _candidates_p.any():
score, alpha, fuzzy, data = cls._decrement(candidates=_candidates_p,
score=classifier.score_p,
alphas=classifier.p.alpha,
fuzzy=classifier.fuzzy_membership.sp,
data=_data_xp)
classifier.p.alpha = alpha
classifier.fuzzy_membership.sp = fuzzy
classifier.data_p = data
classifier.score_p = score
if _candidates_n.any():
score, alpha, fuzzy, data = cls._decrement(candidates=_candidates_n,
score=classifier.score_n,
alphas=classifier.n.alpha,
fuzzy=classifier.fuzzy_membership.sn,
data=_data_xn)
classifier.n.alpha = alpha
classifier.fuzzy_membership.sn = fuzzy
classifier.data_n = data
classifier.score_n = score
return classifier
@classmethod
def _generate_sub_sets(cls, X: np.ndarray, y: np.ndarray) -> DAGSubSet:
"""
Generates sub-data sets based on the DAG classification principle.
Example, for 4 classes, the function will return the following:
[0]: Values and labels of Class 1 and 2
[1]: Values and labels of Class 1 and 3
[2]: Values and labels of Class 1 and 4
[3]: Values and labels of Class 2 and 3
[4]: Values and labels of Class 2 and 4
[5]: Values and labels of Class 3 and 4
:param X: The full training set
:param y: The full training labels set
:return: Generator of tuple containing values and labels for positive and negative class
based on the current iteration in the classification DAG.
- [0] Values for current X positive
- [1] Labels for current X positive
- [2] Values for current X negative
- [3] Labels for current X negative
"""
classes = np.unique(y)
if len(classes) == 1:
return X[classes[0]], y[classes[0]], np.ndarray(), np.ndarray()
for _p in range(classes.size):
for _n in range(_p + 1, classes.size):
_index_p = np.where(y == classes[_p])[0]
_index_n = np.where(y == classes[_n])[0]
yield X[_index_p], y[_index_p], X[_index_n], y[_index_n]
def decision_function(self, X):
"""
Evalutes the decision function over X.
:param X: Array of features to evaluate the decision on.
:return: Array of decision evaluation.
"""
pass
def fit(self, X: np.ndarray, y: np.ndarray, sample_weight=None):
"""
Trains a iFBTSVM model
:param X: The training samples
:param y: The class labels for each training sample
:param sample_weight: (Not supported)
"""
X = self.kernel.fit_transform(X=X, y=y) if self.kernel else X # type: ignore
# Train the DAG models in parallel
trained_hyperplanes = Parallel(n_jobs=self.n_jobs, prefer='processes')(
delayed(self._fit_dag_step)(subset, self.parameters) for subset in self._generate_sub_sets(X, y)
)
# Create the DAG Model here
for hypp in trained_hyperplanes:
_clsf = self._classifiers.get(hypp.class_p, {})
_clsf[hypp.class_n] = hypp
self._classifiers[hypp.class_p] = _clsf
def update(self, X: np.ndarray, y: np.ndarray, batch_size: int = None):
"""
Update an already trained classifier
:param X: The training data with which to update the models.
:param y: The training labels with which to update the models.
:param batch_size: The batch size for updating models
"""
if not batch_size:
batch_size = len(y)
i = 0
while i < len(X):
batch_x = X[i: i + batch_size]
batch_y = y[i: i + batch_size]
batch_x = self.kernel.transform(X=batch_x) if self.kernel else batch_x # type: ignore
# Update the DAG models in parallel
updated_hyperplanes = Parallel(n_jobs=self.n_jobs, prefer='processes')(
delayed(self._increment_dag_step)
(
subset,
self.parameters,
self._classifiers[subset[1][0]][subset[3][0]] # Get classifier for ClassP/ClassN of this subset
)
for subset in self._generate_sub_sets(batch_x, batch_y)
)
# Create the DAG Model here
for hypp in updated_hyperplanes:
_clsf = self._classifiers.get(hypp.class_p, {})
_clsf[hypp.class_n] = hypp
self._classifiers[hypp.class_p] = _clsf
i += batch_size
def predict(self, X):
"""
Performs classification X.
:param X: Array of features to classify
:return: Array of classification result
"""
X = self.kernel.transform(X=X) if self.kernel else X
lh_keys = list(set(self._classifiers.keys()))
rh_keys = set()
for value in self._classifiers.values():
for key, _ in value.items():
rh_keys.add(key)
rh_keys = list(rh_keys)
classes = []
for row in X:
_dag_index_p = 0
_dag_index_n = 0
f_pos = 0
f_neg = 0
class_p = None
class_n = None
while True:
try:
class_p = lh_keys[_dag_index_p]
class_n = rh_keys[_dag_index_n]
model: ClassificationModel = self._classifiers[class_p][class_n]
f_pos = np.divide(np.matmul(row, model.p.weights[:-1]) + model.p.weights[-1],
linalg.norm(model.p.weights[:-1]))
f_neg = np.divide(np.matmul(row, model.n.weights[:-1]) + model.n.weights[-1],
linalg.norm(model.n.weights[:-1]))
if abs(f_pos) < abs(f_neg):
_dag_index_p = _dag_index_n + 1
_dag_index_n += 1
else:
_dag_index_n += 1
except (StopIteration, IndexError):
if abs(f_pos) < abs(f_neg):
classes.append(class_n)
else:
classes.append(class_p)
break
return classes
def score(self, X, y, sample_weight=None):
"""
Returns the accuracy of a classification.
:param X: Array of features to classify
:param y: 1-D Array of truth values for the features
:param sample_weight: Not supported
:return: Accuracy score of the classification
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
| [
"numpy.reshape",
"numpy.unique",
"numpy.ones",
"libifbtsvm.functions.train_model",
"numpy.delete",
"numpy.where",
"numpy.asarray",
"joblib.Parallel",
"numpy.array",
"numpy.append",
"numpy.argwhere",
"numpy.matmul",
"numpy.ndarray",
"numpy.concatenate",
"numpy.linalg.norm",
"joblib.dela... | [((1870, 1901), 'numpy.delete', 'np.delete', (['score[0]', 'candidates'], {}), '(score[0], candidates)\n', (1879, 1901), True, 'import numpy as np\n'), ((1917, 1948), 'numpy.delete', 'np.delete', (['score[1]', 'candidates'], {}), '(score[1], candidates)\n', (1926, 1948), True, 'import numpy as np\n'), ((1966, 1990), 'numpy.asarray', 'np.asarray', (['[sco0, sco1]'], {}), '([sco0, sco1])\n', (1976, 1990), True, 'import numpy as np\n'), ((2009, 2038), 'numpy.delete', 'np.delete', (['alphas', 'candidates'], {}), '(alphas, candidates)\n', (2018, 2038), True, 'import numpy as np\n'), ((2055, 2083), 'numpy.delete', 'np.delete', (['fuzzy', 'candidates'], {}), '(fuzzy, candidates)\n', (2064, 2083), True, 'import numpy as np\n'), ((2099, 2134), 'numpy.delete', 'np.delete', (['data', 'candidates'], {'axis': '(0)'}), '(data, candidates, axis=0)\n', (2108, 2134), True, 'import numpy as np\n'), ((3059, 3099), 'numpy.reshape', 'np.reshape', (['_del'], {'newshape': '_del.shape[0]'}), '(_del, newshape=_del.shape[0])\n', (3069, 3099), True, 'import numpy as np\n'), ((3177, 3207), 'numpy.delete', 'np.delete', (['data', 'index'], {'axis': '(0)'}), '(data, index, axis=0)\n', (3186, 3207), True, 'import numpy as np\n'), ((3225, 3248), 'numpy.delete', 'np.delete', (['label', 'index'], {}), '(label, index)\n', (3234, 3248), True, 'import numpy as np\n'), ((4021, 4082), 'libifbtsvm.functions.fuzzy_membership', 'fuzzy_membership', ([], {'params': 'parameters', 'class_p': 'x_p', 'class_n': 'x_n'}), '(params=parameters, class_p=x_p, class_n=x_n)\n', (4037, 4082), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((4890, 4954), 'libifbtsvm.functions.train_model', 'train_model', ([], {'parameters': 'parameters', 'H': 'H_n', 'G': 'H_p', 'C': '_C4', 'CCx': '_C3'}), '(parameters=parameters, H=H_n, G=H_p, C=_C4, CCx=_C3)\n', (4901, 4954), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((4990, 5054), 'libifbtsvm.functions.train_model', 'train_model', ([], {'parameters': 'parameters', 'H': 'H_p', 'G': 'H_n', 'C': '_C2', 'CCx': '_C1'}), '(parameters=parameters, H=H_p, G=H_n, C=_C2, CCx=_C1)\n', (5001, 5054), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((5124, 5269), 'libifbtsvm.models.ifbtsvm.ClassificationModel', 'ClassificationModel', ([], {'class_p': 'y_p[0]', 'class_n': 'y_n[0]', 'fuzzy': 'membership', 'weights_p': 'hyperplane_p', 'weights_n': 'hyperplane_n', 'data_p': 'x_p', 'data_n': 'x_n'}), '(class_p=y_p[0], class_n=y_n[0], fuzzy=membership,\n weights_p=hyperplane_p, weights_n=hyperplane_n, data_p=x_p, data_n=x_n)\n', (5143, 5269), False, 'from libifbtsvm.models.ifbtsvm import ClassificationModel, FuzzyMembership, Hyperparameters, Hyperplane\n'), ((7327, 7398), 'libifbtsvm.functions.fuzzy_membership', 'fuzzy_membership', ([], {'params': 'parameters', 'class_p': '_data_xp', 'class_n': '_data_xn'}), '(params=parameters, class_p=_data_xp, class_n=_data_xn)\n', (7343, 7398), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((8181, 8245), 'libifbtsvm.functions.train_model', 'train_model', ([], {'parameters': 'parameters', 'H': 'H_n', 'G': 'H_p', 'C': '_C4', 'CCx': '_C3'}), '(parameters=parameters, H=H_n, G=H_p, C=_C4, CCx=_C3)\n', (8192, 8245), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((8281, 8345), 'libifbtsvm.functions.train_model', 'train_model', ([], {'parameters': 'parameters', 'H': 'H_p', 'G': 'H_n', 'C': '_C2', 'CCx': '_C1'}), '(parameters=parameters, H=H_p, G=H_n, C=_C2, CCx=_C1)\n', (8292, 8345), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((8613, 8662), 'numpy.argwhere', 'np.argwhere', (['(classifier.p.alpha <= parameters.phi)'], {}), '(classifier.p.alpha <= parameters.phi)\n', (8624, 8662), True, 'import numpy as np\n'), ((8679, 8724), 'numpy.reshape', 'np.reshape', (['c_pos'], {'newshape': '(c_pos.shape[0],)'}), '(c_pos, newshape=(c_pos.shape[0],))\n', (8689, 8724), True, 'import numpy as np\n'), ((8741, 8790), 'numpy.argwhere', 'np.argwhere', (['(classifier.n.alpha <= parameters.phi)'], {}), '(classifier.n.alpha <= parameters.phi)\n', (8752, 8790), True, 'import numpy as np\n'), ((8807, 8852), 'numpy.reshape', 'np.reshape', (['c_neg'], {'newshape': '(c_neg.shape[0],)'}), '(c_neg, newshape=(c_neg.shape[0],))\n', (8817, 8852), True, 'import numpy as np\n'), ((9029, 9090), 'numpy.argwhere', 'np.argwhere', (['(classifier.score_p[1] >= parameters.forget_score)'], {}), '(classifier.score_p[1] >= parameters.forget_score)\n', (9040, 9090), True, 'import numpy as np\n'), ((9115, 9176), 'numpy.reshape', 'np.reshape', (['_candidates_p'], {'newshape': '(_candidates_p.shape[0],)'}), '(_candidates_p, newshape=(_candidates_p.shape[0],))\n', (9125, 9176), True, 'import numpy as np\n'), ((9202, 9263), 'numpy.argwhere', 'np.argwhere', (['(classifier.score_n[1] >= parameters.forget_score)'], {}), '(classifier.score_n[1] >= parameters.forget_score)\n', (9213, 9263), True, 'import numpy as np\n'), ((9288, 9349), 'numpy.reshape', 'np.reshape', (['_candidates_n'], {'newshape': '(_candidates_n.shape[0],)'}), '(_candidates_n, newshape=(_candidates_n.shape[0],))\n', (9298, 9349), True, 'import numpy as np\n'), ((11603, 11615), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (11612, 11615), True, 'import numpy as np\n'), ((1043, 1056), 'numpy.asarray', 'np.asarray', (['c'], {}), '(c)\n', (1053, 1056), True, 'import numpy as np\n'), ((1110, 1131), 'numpy.array', 'np.array', (['(score, sc)'], {}), '((score, sc))\n', (1118, 1131), True, 'import numpy as np\n'), ((2908, 2934), 'numpy.matmul', 'np.matmul', (['(-_data)', 'weights'], {}), '(-_data, weights)\n', (2917, 2934), True, 'import numpy as np\n'), ((4508, 4534), 'numpy.ones', 'np.ones', (['(x_p.shape[0], 1)'], {}), '((x_p.shape[0], 1))\n', (4515, 4534), True, 'import numpy as np\n'), ((4573, 4599), 'numpy.ones', 'np.ones', (['(x_n.shape[0], 1)'], {}), '((x_n.shape[0], 1))\n', (4580, 4599), True, 'import numpy as np\n'), ((7829, 7860), 'numpy.ones', 'np.ones', (['(_data_xp.shape[0], 1)'], {}), '((_data_xp.shape[0], 1))\n', (7836, 7860), True, 'import numpy as np\n'), ((7904, 7935), 'numpy.ones', 'np.ones', (['(_data_xn.shape[0], 1)'], {}), '((_data_xn.shape[0], 1))\n', (7911, 7935), True, 'import numpy as np\n'), ((12669, 12717), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'prefer': '"""processes"""'}), "(n_jobs=self.n_jobs, prefer='processes')\n", (12677, 12717), False, 'from joblib import delayed, Parallel\n'), ((1216, 1229), 'numpy.asarray', 'np.asarray', (['c'], {}), '(c)\n', (1226, 1229), True, 'import numpy as np\n'), ((1325, 1338), 'numpy.asarray', 'np.asarray', (['c'], {}), '(c)\n', (1335, 1338), True, 'import numpy as np\n'), ((1450, 1474), 'numpy.array', 'np.array', (['(diff, _zdiff)'], {}), '((diff, _zdiff))\n', (1458, 1474), True, 'import numpy as np\n'), ((1502, 1537), 'numpy.append', 'np.append', (['score[0]', '[new_score[0]]'], {}), '(score[0], [new_score[0]])\n', (1511, 1537), True, 'import numpy as np\n'), ((1564, 1599), 'numpy.append', 'np.append', (['score[1]', '[new_score[1]]'], {}), '(score[1], [new_score[1]])\n', (1573, 1599), True, 'import numpy as np\n'), ((1624, 1654), 'numpy.asarray', 'np.asarray', (['[score_0, score_1]'], {}), '([score_0, score_1])\n', (1634, 1654), True, 'import numpy as np\n'), ((6953, 6990), 'numpy.concatenate', 'np.concatenate', (['(_data_xp, _batch_xp)'], {}), '((_data_xp, _batch_xp))\n', (6967, 6990), True, 'import numpy as np\n'), ((7154, 7191), 'numpy.concatenate', 'np.concatenate', (['(_data_xn, _batch_xn)'], {}), '((_data_xn, _batch_xn))\n', (7168, 7191), True, 'import numpy as np\n'), ((11695, 11707), 'numpy.ndarray', 'np.ndarray', ([], {}), '()\n', (11705, 11707), True, 'import numpy as np\n'), ((11709, 11721), 'numpy.ndarray', 'np.ndarray', ([], {}), '()\n', (11719, 11721), True, 'import numpy as np\n'), ((13786, 13834), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'prefer': '"""processes"""'}), "(n_jobs=self.n_jobs, prefer='processes')\n", (13794, 13834), False, 'from joblib import delayed, Parallel\n'), ((11841, 11867), 'numpy.where', 'np.where', (['(y == classes[_p])'], {}), '(y == classes[_p])\n', (11849, 11867), True, 'import numpy as np\n'), ((11898, 11924), 'numpy.where', 'np.where', (['(y == classes[_n])'], {}), '(y == classes[_n])\n', (11906, 11924), True, 'import numpy as np\n'), ((12731, 12758), 'joblib.delayed', 'delayed', (['self._fit_dag_step'], {}), '(self._fit_dag_step)\n', (12738, 12758), False, 'from joblib import delayed, Parallel\n'), ((13852, 13885), 'joblib.delayed', 'delayed', (['self._increment_dag_step'], {}), '(self._increment_dag_step)\n', (13859, 13885), False, 'from joblib import delayed, Parallel\n'), ((15524, 15557), 'numpy.linalg.norm', 'linalg.norm', (['model.p.weights[:-1]'], {}), '(model.p.weights[:-1])\n', (15535, 15557), False, 'from numpy import linalg\n'), ((15695, 15728), 'numpy.linalg.norm', 'linalg.norm', (['model.n.weights[:-1]'], {}), '(model.n.weights[:-1])\n', (15706, 15728), False, 'from numpy import linalg\n'), ((15426, 15462), 'numpy.matmul', 'np.matmul', (['row', 'model.p.weights[:-1]'], {}), '(row, model.p.weights[:-1])\n', (15435, 15462), True, 'import numpy as np\n'), ((15597, 15633), 'numpy.matmul', 'np.matmul', (['row', 'model.n.weights[:-1]'], {}), '(row, model.n.weights[:-1])\n', (15606, 15633), True, 'import numpy as np\n')] |
import threading
import random
import time
import datetime
import pickle, os
class session:
def __init__(self, sesid, timeout = 259200):
self.id = sesid
self.timeout = timeout
self.idle = datetime.datetime.now()
self.data = {}
def setData(self, param, value):
self.data[param] = value
self.idle = datetime.datetime.now()
def getData(self, param):
if param in self.data:
self.idle = datetime.datetime.now()
return self.data[param]
return None
def delData(self, param):
del self.data[param]
self.idle = datetime.datetime.now()
def delAllData(self):
self.data = {}
self.idle = datetime.datetime.now()
class SessionHandler(threading.Thread):
def __init__(self, path_save):
threading.Thread.__init__(self)
self.path_save = path_save
self.isClose = threading.Event()
if os.path.isfile(self.path_save):
with open(self.path_save, 'rb') as input:
self.sessionList = pickle.load(input)
else:
self.sessionList = {}
def generate_id_cookies(self):
sesid = ''
for x in range(26):
c = random.randint(1, 31)
c2 = random.randint(0, 1)
if(c < 27):
if c2:
c = c | 64
else:
c = c | 96
sesid += chr(c)
else:
c = c % 27
if c2:
c += 5
sesid += str(c)
return sesid
def set_key(self, requestHeandler):
session_id = self.generate_id_cookies()
while session_id in self.sessionList:
session_id = self.generate_id_cookies()
requestHeandler.set_respone_header('Set-Cookie', 'PySessID='+session_id+';path=/')
return session_id
def get_cookies(self, requestHeandler):
cookies = {}
if 'Cookie' in requestHeandler.headers:
for cookies_header in requestHeandler.headers['Cookie'].split('; '):
key, val = cookies_header.split('=', 1)
cookies[key] = val
return cookies
def create(self, requestHeandler):
cookies = self.get_cookies(requestHeandler)
if 'PySessID' in cookies:
session_id = cookies['PySessID']
if not session_id in self.sessionList:
session_id = self.set_key(requestHeandler)
self.sessionList[session_id] = session(session_id)
return self.sessionList[session_id]
session_id = self.set_key(requestHeandler)
self.sessionList[session_id] = session(session_id)
return self.sessionList[session_id]
def run(self):
while not self.isClose.wait(60):
now = datetime.datetime.now()
s_l = sorted(self.sessionList.values(), key=lambda g:g.idle+datetime.timedelta(seconds=g.timeout))
for s in s_l:
if now > s.idle+datetime.timedelta(seconds=s.timeout):
del self.sessionList[s.id]
else:
break
def close(self):
self.isClose.set()
with open(self.path_save, 'wb') as output: # Overwrites any existing file.
pickle.dump(self.sessionList, output, pickle.HIGHEST_PROTOCOL)
| [
"threading.Thread.__init__",
"pickle.dump",
"pickle.load",
"threading.Event",
"datetime.datetime.now",
"os.path.isfile",
"datetime.timedelta",
"random.randint"
] | [((216, 239), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (237, 239), False, 'import datetime\n'), ((353, 376), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (374, 376), False, 'import datetime\n'), ((621, 644), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (642, 644), False, 'import datetime\n'), ((714, 737), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (735, 737), False, 'import datetime\n'), ((821, 852), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (846, 852), False, 'import threading\n'), ((911, 928), 'threading.Event', 'threading.Event', ([], {}), '()\n', (926, 928), False, 'import threading\n'), ((940, 970), 'os.path.isfile', 'os.path.isfile', (['self.path_save'], {}), '(self.path_save)\n', (954, 970), False, 'import pickle, os\n'), ((462, 485), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (483, 485), False, 'import datetime\n'), ((1226, 1247), 'random.randint', 'random.randint', (['(1)', '(31)'], {}), '(1, 31)\n', (1240, 1247), False, 'import random\n'), ((1265, 1285), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1279, 1285), False, 'import random\n'), ((2836, 2859), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2857, 2859), False, 'import datetime\n'), ((3307, 3369), 'pickle.dump', 'pickle.dump', (['self.sessionList', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(self.sessionList, output, pickle.HIGHEST_PROTOCOL)\n', (3318, 3369), False, 'import pickle, os\n'), ((1061, 1079), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (1072, 1079), False, 'import pickle, os\n'), ((3029, 3066), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 's.timeout'}), '(seconds=s.timeout)\n', (3047, 3066), False, 'import datetime\n'), ((2932, 2969), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'g.timeout'}), '(seconds=g.timeout)\n', (2950, 2969), False, 'import datetime\n')] |
# Simple Python script to compute feature importance using univariate statistical analysis, recursive feature elimination, and elastic net
# by <NAME> and <NAME>, 2018
import numpy as np
import sys
# feature selection methods
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import GenericUnivariateSelect
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import RFE
# this is a common feature selection method from bio-informatics that exploits ElasticNet
from sklearn.linear_model import ElasticNetCV
# other functions from sklearn
from sklearn.svm import SVC
# these are a few useful functions
from pandas import read_csv # incredibly useful!
from datetime import datetime
# local functions
import genericFunctions
def main() :
# hard-coded constants
methodologies = dict()
methodologies["univariate"] = SelectKBest(k=100)
# WARNING: RFE can take A LOT of time to complete. Be patient (or comment the following line)
methodologies["recursive-feature-elimination-svc"] = RFE( SVC(kernel='linear'), n_features_to_select=100, verbose=1 )
methodologies["elastic-net"] = ElasticNetCV()
featuresEFSFile = "../results/feature-importance-efs.csv"
print("Loading dataset...")
X, y, biomarkerNames = genericFunctions.loadTCGADataset()
for methodName in methodologies :
start_time = datetime.now()
print("\nComputing most relevant features using methodology \"" + methodName + "\"...")
featureSelectionMethod = methodologies[methodName]
featureSelectionMethod.fit(X, y)
delta_time = datetime.now() - start_time
# create list of tuples
sortedFeatures = None
if methodName.find("select-from-model") != -1 or methodName.find("recursive-feature-elimination") != -1 :
featureIndices = featureSelectionMethod.get_support(indices=True)
sortedFeatures = [ (1.0, biomarkerNames[i]) for i in featureIndices ]
elif methodName.find("elastic-net") != -1 :
coefficients = featureSelectionMethod.coef_
sortedFeatures = list( zip( list(coefficients), biomarkerNames ) )
else :
sortedFeatures = list( zip( list(featureSelectionMethod.scores_), biomarkerNames ) )
# remove all 'nan' values and sort on first element
sortedFeatures = [ x for x in sortedFeatures if not np.isnan(x[0]) ]
sortedFeatures = sorted( sortedFeatures, key=lambda x : x[0], reverse=True )
# save everything to file
outputFile = "feature-importance-" + methodName + ".csv"
with open(outputFile, "w") as fp :
for score, feature in sortedFeatures :
print(feature + ": " + str(score))
fp.write(feature + "," + str(score) + "\n")
# also, try a comparison with the features obtained through ML
featuresML = []
with open(featuresEFSFile, "r") as fp :
lines = fp.readlines()
lines.pop(0)
featuresML = [ lines[i].rstrip().split(',')[0] for i in range(0,100) ]
logFile = "feature-importance-" + methodName + ".log"
with open(logFile, "w") as fp :
commonFeatures = 0
for f in sortedFeatures[:100] :
if f[1] in featuresML :
commonFeatures += 1
string = "Feature \"" + f[1] + "\" is common to both ML and univariate feature selection."
print(string)
fp.write(string + "\n")
string = "\nA total of " + str(commonFeatures) + " features are common to method \"" + methodName + "\" and ensemble ML feature selection."
print(string)
fp.write(string + "\n")
string = "Total time taken by method \"" + methodName + "\": " + str(delta_time)
print(string)
fp.write(string + "\n")
return
if __name__ == "__main__" :
sys.exit( main() )
| [
"sklearn.linear_model.ElasticNetCV",
"sklearn.feature_selection.SelectKBest",
"datetime.datetime.now",
"genericFunctions.loadTCGADataset",
"numpy.isnan",
"sklearn.svm.SVC"
] | [((876, 894), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'k': '(100)'}), '(k=100)\n', (887, 894), False, 'from sklearn.feature_selection import SelectKBest\n'), ((1141, 1155), 'sklearn.linear_model.ElasticNetCV', 'ElasticNetCV', ([], {}), '()\n', (1153, 1155), False, 'from sklearn.linear_model import ElasticNetCV\n'), ((1271, 1305), 'genericFunctions.loadTCGADataset', 'genericFunctions.loadTCGADataset', ([], {}), '()\n', (1303, 1305), False, 'import genericFunctions\n'), ((1049, 1069), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (1052, 1069), False, 'from sklearn.svm import SVC\n'), ((1361, 1375), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1373, 1375), False, 'from datetime import datetime\n'), ((1576, 1590), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1588, 1590), False, 'from datetime import datetime\n'), ((2278, 2292), 'numpy.isnan', 'np.isnan', (['x[0]'], {}), '(x[0])\n', (2286, 2292), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2019 Creative Commons
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# from __future__ imports must occur at the beginning of the file. DO NOT CHANGE!
from __future__ import annotations
import unittest
from errors import TemplateFileException
from linkedin.message import Template
Template_Business = """Hi {{name}},
I'm looking to expand my network with fellow business owners and professionals. I would love to learn about what you do and see
if there's any way we can support each other.
Cheers!"""
Template_Sales = """Hi {{name}},
I'm looking to connect with like-minded professionals specifically who are on the revenue generating side of things.
Let's connect!"""
Template_Real_Estate = """Hey {{name}},
Came across your profile and saw your work in real estate. I'm reaching out to connect with other like-minded people. Would be
happy to make your acquaintance.
Have a good day!"""
Template_Creative_Industry = """Hi {{name}},
LinkedIn showed me your profile multiple times now, so I checked what you do. I really like your work and as we are both in the
creative industy - I thought I'll reach out. It's always great to be connected with like-minded individuals, isn't it?
{{my_name}}"""
Template_Hr = """Hey {{name}},
I hope your week is off to a great start, I noticed we both work in the HR/Employee Experience field together.
I would love to connect with you."""
Template_Include_Industry = """Hi {{name}},
I hope you're doing great! I'm on a personal mission to grow my connections on LinkedIn, especially in the field of {{industry}}.
So even though we're practically strangers, I'd love to connect with you.
Have a great day!"""
Template_Ben_Franklin = """Hi {{name}},
The Ben Franklin effect - when we do a person a favor, we tend to like them more as a result. Anything I can do for you?
Best, {{my_name}}"""
Template_Virtual_Coffee = """Hi {{name}},
I hope you're doing well. I'm {{my_name}}, {{my_position}} of {{my_company_name}}. We're looking for {{position}} and it would be
great to connect over a 'virtual' coffee/chat and see what we can do together?"""
Template_Common_Connection_Request = [
"""Hey {{name}},
I notice we share a mutual connection or two & would love to add you to my network of professionals.
If you're open to that let's connect!""",
"""Hi {{name}},
I see we have some mutual connections. I always like networking with new people, and thought this would be an easy way for us to
introduce ourselves.""",
"""Hi {{name}},
Life is both long and short. We have quite a few mutual connections. I would like to invite you to join my network on LinkedIn
platform. Hopefully, our paths will cross professionally down the line. Until then, wishing you and yours an incredible {{year}}.
{{my_name}}""",
"""Hi {{name}},
I was looking at your profile and noticed we had a few shared connections. I thought it would be nice to reach out to connect with
you and share out networks.
Thank you and hope all is well!""",
"""Hey {{first_name}},
I saw you're based in {{location}} and work on {{keyword}}, I'd love to connect.
Thanks, {{my_name}}"""
]
class TestTemplateApi(unittest.TestCase):
def test_static_method_get_template_by_name(
self: TestTemplateApi) -> None:
template = Template(
None, var_template=None, grammar_check=False,
use_template='template_ben_franklin')
self.assertEqual(template.get_template_by_name(
'template_ben_franklin'), Template_Ben_Franklin)
self.assertEqual(template.get_template_by_name(
'template_business'), Template_Business)
self.assertEqual(template.get_template_by_name(
'template_sales'), Template_Sales)
self.assertEqual(template.get_template_by_name(
'template_real_estate'), Template_Real_Estate)
self.assertEqual(template.get_template_by_name(
'template_creative_industry'),
Template_Creative_Industry)
self.assertEqual(
template.get_template_by_name('template_hr'),
Template_Hr)
self.assertEqual(template.get_template_by_name(
'template_include_industry'),
Template_Include_Industry)
self.assertEqual(template.get_template_by_name(
'template_virtual_coffee'), Template_Virtual_Coffee)
for i in range(5):
self.assertEqual(
template.get_template_by_name(
'template_common_connection_request')[i],
Template_Common_Connection_Request[i])
with self.assertRaises(TemplateFileException):
template.get_template_by_name('any_unknown_template')
def test_method_parse_with_template_business(
self: TestTemplateApi) -> None:
template = Template(None, var_template=None,
use_template='template_business',
grammar_check=False)
template.set_data({
'name': 'Ayush',
})
template_business = Template_Business.replace(
'{{name}}', 'Ayush')
self.assertEqual(template.parse(), template_business)
| [
"linkedin.message.Template"
] | [((4314, 4411), 'linkedin.message.Template', 'Template', (['None'], {'var_template': 'None', 'grammar_check': '(False)', 'use_template': '"""template_ben_franklin"""'}), "(None, var_template=None, grammar_check=False, use_template=\n 'template_ben_franklin')\n", (4322, 4411), False, 'from linkedin.message import Template\n'), ((5698, 5790), 'linkedin.message.Template', 'Template', (['None'], {'var_template': 'None', 'use_template': '"""template_business"""', 'grammar_check': '(False)'}), "(None, var_template=None, use_template='template_business',\n grammar_check=False)\n", (5706, 5790), False, 'from linkedin.message import Template\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-01 09:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MysqlAduit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=32)),
('mysqluser', models.CharField(max_length=64)),
('mysqlhost', models.CharField(default='', max_length=16)),
('login_host', models.CharField(default='localhost', max_length=16)),
('pridate', models.DateTimeField(auto_now=True)),
('pri_database', models.CharField(default='', max_length=32)),
('pri_table', models.CharField(default='', max_length=32)),
('privilege', models.CharField(default='', max_length=64)),
],
),
migrations.CreateModel(
name='OptionLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=32)),
('option', models.CharField(max_length=10)),
('mysqlhost', models.CharField(max_length=16)),
('mysqluser', models.CharField(max_length=64)),
('pridate', models.DateTimeField(auto_now_add=True)),
('pri_database', models.CharField(default='', max_length=32)),
('pri_table', models.CharField(default='', max_length=32)),
('privilege', models.CharField(max_length=48)),
],
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((387, 480), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (403, 480), False, 'from django.db import migrations, models\n'), ((509, 540), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (525, 540), False, 'from django.db import migrations, models\n'), ((574, 605), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (590, 605), False, 'from django.db import migrations, models\n'), ((639, 682), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(16)'}), "(default='', max_length=16)\n", (655, 682), False, 'from django.db import migrations, models\n'), ((717, 769), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""localhost"""', 'max_length': '(16)'}), "(default='localhost', max_length=16)\n", (733, 769), False, 'from django.db import migrations, models\n'), ((801, 836), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (821, 836), False, 'from django.db import migrations, models\n'), ((873, 916), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(32)'}), "(default='', max_length=32)\n", (889, 916), False, 'from django.db import migrations, models\n'), ((950, 993), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(32)'}), "(default='', max_length=32)\n", (966, 993), False, 'from django.db import migrations, models\n'), ((1027, 1070), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(64)'}), "(default='', max_length=64)\n", (1043, 1070), False, 'from django.db import migrations, models\n'), ((1211, 1304), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1227, 1304), False, 'from django.db import migrations, models\n'), ((1333, 1364), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (1349, 1364), False, 'from django.db import migrations, models\n'), ((1395, 1426), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1411, 1426), False, 'from django.db import migrations, models\n'), ((1460, 1491), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (1476, 1491), False, 'from django.db import migrations, models\n'), ((1525, 1556), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1541, 1556), False, 'from django.db import migrations, models\n'), ((1588, 1627), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1608, 1627), False, 'from django.db import migrations, models\n'), ((1664, 1707), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(32)'}), "(default='', max_length=32)\n", (1680, 1707), False, 'from django.db import migrations, models\n'), ((1741, 1784), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(32)'}), "(default='', max_length=32)\n", (1757, 1784), False, 'from django.db import migrations, models\n'), ((1818, 1849), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(48)'}), '(max_length=48)\n', (1834, 1849), False, 'from django.db import migrations, models\n')] |
import pyswan
# # 梁山108好汉!
# print(pyswan.digitize("梁山一百零八好汉!"))
# # 今天是周7
# print(pyswan.digitize("今天是周日"))
# # TODO
# pyswan.digitize("明天是劳动节")
# # [{'dim': 'time', 'body': '12点30分', 'start': 0, 'end': 0, 'value': '2022-01-06 12:30:51 +08:00'}]
# pyswan.parse('十二点三十分', dim=['time', 'number'])
# # [{'type': 'equation', 'start': 0, 'end': 8, 'value': '6+13/2*8'}]
# print(pyswan.parse('六加十三除以2再乘八等于多少', dim=['equation']))
# print(pyswan.parse("徐汇区虹漕路461号58号楼5楼", dim=['place']))
# print(pyswan.parse('今天是二十号', dim=['time', 'number']))
#
# print(pyswan.parse('两天后是几号?', dim=['time']))
# print(pyswan.parse('周六是几号?', dim=['time']))
import arrow
future = arrow.utcnow()
print(future)
future = future.shift(minutes=-1)
print(future)
future = future.shift(seconds=100)
print(future)
| [
"arrow.utcnow"
] | [((666, 680), 'arrow.utcnow', 'arrow.utcnow', ([], {}), '()\n', (678, 680), False, 'import arrow\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
class VideoInputStreamIterator:
"""
iterator through the read video
"""
def __init__(self, input_stream):
self._stream = input_stream
def __next__(self):
return self._stream.grab_frame()
class VideoInputStream:
"""
can be used like by :
stream = VideoInputStream()
stream.open(video_path)
or more concise:
with Video.read(video_path) as stream:
for timestamp, frame in video_stream:
# use frame
"""
def __init__(self):
self._capture = None
def open(self, video_path):
self._capture = cv2.VideoCapture(video_path)
if not self._capture.isOpened():
raise ValueError(f'unable to open video {video_path}')
return self
def close(self):
self._capture.release()
def grab_frame(self):
if not self._capture.isOpened():
raise StopIteration
success, frame = self._capture.read()
if not success:
raise StopIteration
timestamp = self._capture.get(cv2.CAP_PROP_POS_MSEC)
return timestamp, frame
def __len__(self):
assert self._capture
nb_frames = int(self._capture.get(cv2.CAP_PROP_FRAME_COUNT))
# WARNING: inaccurate (based on duration and FPS)
# so, try to see if its real
current_position = int(self._capture.get(cv2.CAP_PROP_POS_FRAMES))
self._capture.set(cv2.CAP_PROP_POS_FRAMES, nb_frames+1)
nb_frames_actual = int(self._capture.get(cv2.CAP_PROP_POS_FRAMES))
nb_frames = nb_frames_actual
# dont forget to seek back at initial position
self._capture.set(cv2.CAP_PROP_POS_FRAMES, current_position)
return nb_frames
def __iter__(self):
assert self._capture
return VideoInputStreamIterator(self)
def __enter__(self):
# nothing to be done
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class VideoOutputStream:
"""
with Video.read(video_path) as stream:
for timestamp, frame in video_stream:
# use frame
"""
def __init__(self):
self._recorder = None
self._video_path = None
self._fps = None
self._fourcc = None
self._size = None
def open(self, video_path, fps=30, fourcc='avc1', size=None):
self._video_path = video_path
self._fps = fps
self._fourcc = fourcc
self._size = size
if size is None:
self._recorder = None
else:
# if size not given, postpone to first frame
self._recorder = cv2.VideoWriter(self._video_path, cv2.VideoWriter_fourcc(*self._fourcc), fps, size)
if not self._recorder.isOpened():
raise ValueError(f'unable to open video {video_path}')
return self
def close(self):
self._recorder.release()
def push_frame(self, frame):
if self._recorder is None:
size = tuple(frame.shape[i] for i in [1, 0])
self.open(self._video_path, self._fps, self._fourcc, size)
self._recorder.write(frame)
def __enter__(self):
# nothing to be done
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class Video:
@staticmethod
def read(video_path):
video_stream = VideoInputStream()
video_stream.open(video_path)
return video_stream
@staticmethod
def write(video_path, fps=30, fourcc='avc1', size=None):
video_stream = VideoOutputStream()
video_stream.open(video_path, fps, fourcc, size)
return video_stream
# test
from tqdm import tqdm
if __name__ == '__main__':
import urllib.request
import os.path as path
video_path_in = '/tmp/video_sample.mp4'
if not path.exists(video_path_in):
url_sample = 'https://www.sample-videos.com/video123/mp4/360/big_buck_bunny_360p_1mb.mp4'
print(f'downloading samples ... \n\tfrom {url_sample}')
urllib.request.urlretrieve(url_sample, video_path_in)
print(f'done.')
with Video.read(video_path_in) as video_stream:
nb_read = 0
nb_frames = len(video_stream)
for timestamp, frame in tqdm(video_stream):
cv2.imshow('frame', frame)
cv2.waitKey(10)
nb_read += 1
assert nb_frames == nb_read
video_stream = Video.read(video_path_in)
nb_frames = len(video_stream)
nb_read = 0
for timestamp, frame in tqdm(video_stream):
cv2.imshow('frame', frame)
cv2.waitKey(10)
nb_read += 1
video_stream.close()
assert nb_frames == nb_read
video_stream = VideoInputStream()
video_stream.open(video_path_in)
nb_frames = len(video_stream)
nb_read = 0
for timestamp, frame in tqdm(video_stream):
cv2.imshow('frame', frame)
cv2.waitKey(10)
nb_read += 1
video_stream.close()
assert nb_frames == nb_read
video_stream = VideoInputStream()
video_stream.open(video_path_in)
nb_frames = len(video_stream)
nb_read = 0
for idx in tqdm(range(len(video_stream))):
timestamp, frame = video_stream.grab_frame()
cv2.imshow('frame', frame)
cv2.waitKey(10)
nb_read += 1
assert nb_frames == nb_read
video_path_out = '/tmp/frame.mp4'
with Video.read(video_path_in) as video_in, Video.write(video_path_out, fps=30) as video_out:
nb_frames = len(video_stream)
nb_read = 0
for timestamp, frame in tqdm(video_in):
video_out.push_frame(frame)
cv2.imshow('frame', frame)
cv2.waitKey(10)
nb_read += 1
assert nb_frames == nb_read
video_stream.close() | [
"os.path.exists",
"tqdm.tqdm",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.waitKey"
] | [((4611, 4629), 'tqdm.tqdm', 'tqdm', (['video_stream'], {}), '(video_stream)\n', (4615, 4629), False, 'from tqdm import tqdm\n'), ((4922, 4940), 'tqdm.tqdm', 'tqdm', (['video_stream'], {}), '(video_stream)\n', (4926, 4940), False, 'from tqdm import tqdm\n'), ((677, 705), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (693, 705), False, 'import cv2\n'), ((3924, 3950), 'os.path.exists', 'path.exists', (['video_path_in'], {}), '(video_path_in)\n', (3935, 3950), True, 'import os.path as path\n'), ((4343, 4361), 'tqdm.tqdm', 'tqdm', (['video_stream'], {}), '(video_stream)\n', (4347, 4361), False, 'from tqdm import tqdm\n'), ((4639, 4665), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (4649, 4665), False, 'import cv2\n'), ((4674, 4689), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4685, 4689), False, 'import cv2\n'), ((4950, 4976), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (4960, 4976), False, 'import cv2\n'), ((4985, 5000), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4996, 5000), False, 'import cv2\n'), ((5313, 5339), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (5323, 5339), False, 'import cv2\n'), ((5348, 5363), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (5359, 5363), False, 'import cv2\n'), ((5644, 5658), 'tqdm.tqdm', 'tqdm', (['video_in'], {}), '(video_in)\n', (5648, 5658), False, 'from tqdm import tqdm\n'), ((4375, 4401), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (4385, 4401), False, 'import cv2\n'), ((4414, 4429), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4425, 4429), False, 'import cv2\n'), ((5712, 5738), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (5722, 5738), False, 'import cv2\n'), ((5751, 5766), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (5762, 5766), False, 'import cv2\n'), ((2760, 2797), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*self._fourcc'], {}), '(*self._fourcc)\n', (2782, 2797), False, 'import cv2\n')] |
"""Module contains an approximate garbage score estimator
See ags_.
"""
from typing import Optional
from FaceEngine import IAGSEstimatorPtr # pylint: disable=E0611,E0401
from lunavl.sdk.errors.errors import LunaVLError
from lunavl.sdk.errors.exceptions import CoreExceptionWrap, LunaSDKException
from lunavl.sdk.estimators.base_estimation import BaseEstimator
from lunavl.sdk.faceengine.facedetector import BoundingBox, FaceDetection
from lunavl.sdk.image_utils.image import VLImage
class AGSEstimator(BaseEstimator):
"""
Approximate garbage score estimator.
"""
# pylint: disable=W0235
def __init__(self, coreEstimator: IAGSEstimatorPtr):
"""
Init.
Args:
coreEstimator: core estimator
"""
super().__init__(coreEstimator)
# pylint: disable=W0221
@CoreExceptionWrap(LunaVLError.EstimationAGSError)
def estimate(
self,
detection: Optional[FaceDetection] = None,
image: Optional[VLImage] = None,
boundingBox: Optional[BoundingBox] = None,
) -> float:
"""
Estimate emotion on warp.
Args:
image: image in R8G8B8 format
boundingBox: face bounding box of corresponding the image
detection: face detection
Returns:
estimated ags, float in range[0,1]
Raises:
LunaSDKException: if estimation failed
ValueError: if image and detection is Noee
"""
if detection is None:
if image is None or boundingBox is None:
raise ValueError("image and boundingBox or detection bust be not None")
error, ags = self._coreEstimator.estimate(image.coreImage, boundingBox.coreEstimation)
else:
error, ags = self._coreEstimator.estimate(detection.image.coreImage, detection.boundingBox.coreEstimation)
if error.isError:
raise LunaSDKException(LunaVLError.fromSDKError(error))
return ags
| [
"lunavl.sdk.errors.exceptions.CoreExceptionWrap",
"lunavl.sdk.errors.errors.LunaVLError.fromSDKError"
] | [((838, 887), 'lunavl.sdk.errors.exceptions.CoreExceptionWrap', 'CoreExceptionWrap', (['LunaVLError.EstimationAGSError'], {}), '(LunaVLError.EstimationAGSError)\n', (855, 887), False, 'from lunavl.sdk.errors.exceptions import CoreExceptionWrap, LunaSDKException\n'), ((1953, 1984), 'lunavl.sdk.errors.errors.LunaVLError.fromSDKError', 'LunaVLError.fromSDKError', (['error'], {}), '(error)\n', (1977, 1984), False, 'from lunavl.sdk.errors.errors import LunaVLError\n')] |
import gzip
import json
import re
import string
import unicodedata
from collections import defaultdict
from pathlib import Path
import pandas as pd
import click
from sentencepiece import SentencePieceProcessor
from tqdm import tqdm
import ast
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
from collections import Counter
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
is_noun = lambda pos: pos[:2] == 'NN' or pos[:2] == 'NNS'
def extract_KeywordTitle(rev, title):
try:
reviews = " ".join(rev)
reviews = nltk.word_tokenize(reviews)
reviews = [w for w in reviews if not w.lower() in stop_words and w.isalpha()]
nouns = [word for (word, pos) in nltk.pos_tag(reviews) if is_noun(pos)]
cnt = Counter()
for word in nouns:
cnt[word] += 1
mc = cnt.most_common(15)
mc = [x[0] for x in mc]
title = nltk.word_tokenize(title)
return " ".join([value for value in title if value.lower() in mc])
except:
return "NaN"
df = pd.read_csv("../data/asintitle.csv")
df.set_index(["asin"])
def get_title(asin:str):
try:
return df.loc[df["asin"] == asin, "title"].values.item()
except:
return "NaN"
curr = "159985130X"
lines = []
def add(x,keywords):
x["keyword"] = keywords
return json.dumps(x) + "\n"
def write_lines(fout, lines, keywords):
lines = [add(x,keywords) for x in lines]
fout.writelines(lines)
with open("../data/amzn/train_title.jsonl", "w") as fout:
with open("../data/amzn/train.jsonl", "r") as fp:
p = tqdm()
while True:
line = fp.readline()
if not line:
break
line = json.loads(json.dumps(ast.literal_eval(line)))
if curr != line["business_id"]:
title = get_title(curr)
rev = [str(x["text"]) for x in lines]
keywords = extract_KeywordTitle(rev,title)
print(keywords)
write_lines(fout, lines, keywords)
lines = []
curr = line["business_id"]
p.update()
lines.append(line)
p.close()
fp.close()
fout.close()
| [
"nltk.pos_tag",
"nltk.corpus.stopwords.words",
"nltk.word_tokenize",
"nltk.download",
"pandas.read_csv",
"json.dumps",
"tqdm.tqdm",
"collections.Counter",
"ast.literal_eval"
] | [((255, 277), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (268, 277), False, 'import nltk\n'), ((278, 304), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (291, 304), False, 'import nltk\n'), ((305, 348), 'nltk.download', 'nltk.download', (['"""averaged_perceptron_tagger"""'], {}), "('averaged_perceptron_tagger')\n", (318, 348), False, 'import nltk\n'), ((1115, 1151), 'pandas.read_csv', 'pd.read_csv', (['"""../data/asintitle.csv"""'], {}), "('../data/asintitle.csv')\n", (1126, 1151), True, 'import pandas as pd\n'), ((433, 459), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (448, 459), False, 'from nltk.corpus import stopwords\n'), ((621, 648), 'nltk.word_tokenize', 'nltk.word_tokenize', (['reviews'], {}), '(reviews)\n', (639, 648), False, 'import nltk\n'), ((830, 839), 'collections.Counter', 'Counter', ([], {}), '()\n', (837, 839), False, 'from collections import Counter\n'), ((975, 1000), 'nltk.word_tokenize', 'nltk.word_tokenize', (['title'], {}), '(title)\n', (993, 1000), False, 'import nltk\n'), ((1404, 1417), 'json.dumps', 'json.dumps', (['x'], {}), '(x)\n', (1414, 1417), False, 'import json\n'), ((1665, 1671), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (1669, 1671), False, 'from tqdm import tqdm\n'), ((776, 797), 'nltk.pos_tag', 'nltk.pos_tag', (['reviews'], {}), '(reviews)\n', (788, 797), False, 'import nltk\n'), ((1843, 1865), 'ast.literal_eval', 'ast.literal_eval', (['line'], {}), '(line)\n', (1859, 1865), False, 'import ast\n')] |
import random
import pygame
class GameObject(pygame.sprite.Sprite):
'''
All sprites should inherit from this class.
'''
def __init__(self, game, name):
'''
Sets the sprite image and rect Surface.
'''
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.name = name
self.game = game
self.image = pygame.image.load(name + '.png')
# Fetch the rectangle object that has the dimensions of the image
# Update the position of this object by setting the values of rect.x and rect.y
self.rect = self.image.get_rect()
# basic movement
def update(self, dx, dy):
self.rect.x += dx
self.rect.y += dy
def setCoordinates(self, x, y):
self.rect.x = x
self.rect.y = y
class Hero(GameObject):
'''
Class which defines hero behavior in the game.
'''
maxAttacks = 3
def __init__(self, game, name):
'''
:param game: Game object.
:param name: Name is expected to be the string before .png extension in name of the matching sprite.
Example : sprite name is 'spiderman.png' -> name is 'spiderman'
'''
GameObject.__init__(self, game, name)
rect = self.image.get_rect()
self.width = rect.width
self.height = rect.height
# automatically positions heroes to be in line
# on the bottom of the window
self.setCoordinates(len(self.game.heroes) * (self.width + 45) + 40, self.game.height - self.height)
# hero starts with maximum attacks
self.num = Hero.maxAttacks
def update(self, dx, dy):
if self.rect.x + dx >= 0 and self.rect.x + dx + self.width < self.game.width:
self.rect.x += dx
if self.rect.y + dy >= 0 and self.rect.y + dy < self.game.height: # maybe unnecessary
self.rect.y += dy
def attack(self):
if self.num > 0:
pygame.time.set_timer(pygame.USEREVENT + 1, 5000)
sound = pygame.mixer.Sound('attack.ogg')
self.game.attackSoundCh.play(sound)
self.game.attacks.add(Attack(self.game))
self.game.attacksLeft.remove(Attack(self.game))
self.num -= 1
def regainAttack(self):
if self.num < Hero.maxAttacks:
self.game.attacksLeft.add(Attack(self.game))
self.num += 1
def drawAttacksLeft(self):
for i in range(self.num):
self.game.display.blit(pygame.image.load('attack.png'), (650 + i * Attack.width, self.game.height - Attack.height))
class Rock(GameObject):
'''
Class which defines obstacles and their positioning in the game, as well as, the explosion effect on
colliding with the attack.
'''
width = 40 # real size is 50x50 px
height = 40
puffImages = [pygame.image.load('puf.png'), pygame.image.load('puf1.png'), pygame.image.load('puf2.png')]
puffTime = 15 # time in ms, for which single puff image will be displayed
def __init__(self,game):
GameObject.__init__(self, game, 'rock')
# random positioning in front of the hero
self.setCoordinates(random.randrange(Rock.width, self.game.width - Rock.width), self.game.hero.rect.y - random.randrange(self.game.height / 2, 2 * self.game.height))
def puff(self):
sound = pygame.mixer.Sound('puff.ogg')
self.game.hitSoundCh.play(sound)
for puff in Rock.puffImages:
self.game.display.blit(puff, (self.rect.x, self.rect.y))
pygame.display.update()
pygame.time.wait(Rock.puffTime)
class Energy(GameObject):
width = 40 # the real size
height = 40
energyImages = [pygame.image.load('energy.png'), pygame.image.load('energy1.png'), pygame.image.load('energy2.png')]
energyTime = 15 # time in ms, for which single energy image will be displayed
def __init__(self,game):
GameObject.__init__(self, game, 'energy')
# random positioning in front of the hero
self.setCoordinates(random.randrange(Rock.width, self.game.width - Rock.width), self.game.hero.rect.y - random.randrange(self.game.height / 2, 2 * self.game.height))
def collect(self):
sound = pygame.mixer.Sound('energy.ogg')
self.game.energySoundCh.play(sound)
for img in Energy.energyImages:
self.game.display.blit(img, (self.rect.x, self.rect.y))
pygame.display.update()
pygame.time.wait(Energy.energyTime)
class Attack(GameObject):
'''
Class which defines attack sprite movement, it's actually different then any other object in the game.
Attacks go up :)
'''
width = 30
height = 30
def __init__(self,game):
GameObject.__init__(self, game, 'attack')
self.hero = game.hero
if self.hero != None:
self.setCoordinates(self.hero.rect.x, self.hero.rect.y)
class Music(GameObject):
'''
Music ON/OFF button.
'''
width = 40
height = 40
def __init__(self, game):
if pygame.mixer.music.get_busy():
name = 'musicON'
else:
name = 'musicOFF'
GameObject.__init__(self, game, name)
self.setCoordinates(self.game.width - Music.width - 20, 20)
def change(self):
if self.name == 'musicON':
self.name = 'musicOFF'
pygame.mixer.music.stop()
else:
self.name = 'musicON'
pygame.mixer.music.play()
self.image = pygame.image.load(self.name + '.png') # must update display image on every change
class Message(object):
def __init__(self, game, msg, x, y, font, size, color):
'''
:param msg: Message to be written.
:param color: Font color.
:param x: Top left x coordinate.
:param y: Top left y coordinate.
:param font: Font style.
:param size: Font size.
'''
self.game = game
self.msg = msg
self.x = x
self.y = y
self.color = color
self.font = font
self.size = size
def displayMessage(self):
msgFont = pygame.font.SysFont(self.font, self.size)
msgSurface = msgFont.render(self.msg, False, self.color)
self.game.display.blit(msgSurface, (self.x, self.y))
pygame.display.update()
def update(self, msg):
self.msg = msg
| [
"pygame.time.set_timer",
"random.randrange",
"pygame.sprite.Sprite.__init__",
"pygame.time.wait",
"pygame.mixer.music.stop",
"pygame.mixer.music.get_busy",
"pygame.mixer.Sound",
"pygame.image.load",
"pygame.display.update",
"pygame.mixer.music.play",
"pygame.font.SysFont"
] | [((303, 338), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (332, 338), False, 'import pygame\n'), ((411, 443), 'pygame.image.load', 'pygame.image.load', (["(name + '.png')"], {}), "(name + '.png')\n", (428, 443), False, 'import pygame\n'), ((2890, 2918), 'pygame.image.load', 'pygame.image.load', (['"""puf.png"""'], {}), "('puf.png')\n", (2907, 2918), False, 'import pygame\n'), ((2920, 2949), 'pygame.image.load', 'pygame.image.load', (['"""puf1.png"""'], {}), "('puf1.png')\n", (2937, 2949), False, 'import pygame\n'), ((2951, 2980), 'pygame.image.load', 'pygame.image.load', (['"""puf2.png"""'], {}), "('puf2.png')\n", (2968, 2980), False, 'import pygame\n'), ((3402, 3432), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""puff.ogg"""'], {}), "('puff.ogg')\n", (3420, 3432), False, 'import pygame\n'), ((3755, 3786), 'pygame.image.load', 'pygame.image.load', (['"""energy.png"""'], {}), "('energy.png')\n", (3772, 3786), False, 'import pygame\n'), ((3788, 3820), 'pygame.image.load', 'pygame.image.load', (['"""energy1.png"""'], {}), "('energy1.png')\n", (3805, 3820), False, 'import pygame\n'), ((3822, 3854), 'pygame.image.load', 'pygame.image.load', (['"""energy2.png"""'], {}), "('energy2.png')\n", (3839, 3854), False, 'import pygame\n'), ((4283, 4315), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""energy.ogg"""'], {}), "('energy.ogg')\n", (4301, 4315), False, 'import pygame\n'), ((5105, 5134), 'pygame.mixer.music.get_busy', 'pygame.mixer.music.get_busy', ([], {}), '()\n', (5132, 5134), False, 'import pygame\n'), ((5561, 5598), 'pygame.image.load', 'pygame.image.load', (["(self.name + '.png')"], {}), "(self.name + '.png')\n", (5578, 5598), False, 'import pygame\n'), ((6188, 6229), 'pygame.font.SysFont', 'pygame.font.SysFont', (['self.font', 'self.size'], {}), '(self.font, self.size)\n', (6207, 6229), False, 'import pygame\n'), ((6364, 6387), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6385, 6387), False, 'import pygame\n'), ((2003, 2052), 'pygame.time.set_timer', 'pygame.time.set_timer', (['(pygame.USEREVENT + 1)', '(5000)'], {}), '(pygame.USEREVENT + 1, 5000)\n', (2024, 2052), False, 'import pygame\n'), ((2073, 2105), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""attack.ogg"""'], {}), "('attack.ogg')\n", (2091, 2105), False, 'import pygame\n'), ((3217, 3275), 'random.randrange', 'random.randrange', (['Rock.width', '(self.game.width - Rock.width)'], {}), '(Rock.width, self.game.width - Rock.width)\n', (3233, 3275), False, 'import random\n'), ((3592, 3615), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3613, 3615), False, 'import pygame\n'), ((3628, 3659), 'pygame.time.wait', 'pygame.time.wait', (['Rock.puffTime'], {}), '(Rock.puffTime)\n', (3644, 3659), False, 'import pygame\n'), ((4098, 4156), 'random.randrange', 'random.randrange', (['Rock.width', '(self.game.width - Rock.width)'], {}), '(Rock.width, self.game.width - Rock.width)\n', (4114, 4156), False, 'import random\n'), ((4480, 4503), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4501, 4503), False, 'import pygame\n'), ((4516, 4551), 'pygame.time.wait', 'pygame.time.wait', (['Energy.energyTime'], {}), '(Energy.energyTime)\n', (4532, 4551), False, 'import pygame\n'), ((5428, 5453), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (5451, 5453), False, 'import pygame\n'), ((5514, 5539), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (5537, 5539), False, 'import pygame\n'), ((2546, 2577), 'pygame.image.load', 'pygame.image.load', (['"""attack.png"""'], {}), "('attack.png')\n", (2563, 2577), False, 'import pygame\n'), ((3301, 3361), 'random.randrange', 'random.randrange', (['(self.game.height / 2)', '(2 * self.game.height)'], {}), '(self.game.height / 2, 2 * self.game.height)\n', (3317, 3361), False, 'import random\n'), ((4182, 4242), 'random.randrange', 'random.randrange', (['(self.game.height / 2)', '(2 * self.game.height)'], {}), '(self.game.height / 2, 2 * self.game.height)\n', (4198, 4242), False, 'import random\n')] |
"""Collage renderer."""
import itertools
import logger
import numpy
from PIL import Image, ImageEnhance
from distance_matrix import imageMSE
ENABLE_POST_OPTIMIZATION = True
def adjustImage(image, parameters):
"""Adjusts the brightness, contrast, and saturation of the given image."""
(brightness, contrast, saturation) = parameters
newImage = ImageEnhance.Brightness(image).enhance(brightness)
newImage = ImageEnhance.Contrast(newImage).enhance(contrast)
newImage = ImageEnhance.Color(newImage).enhance(saturation)
return newImage
def postOptimize(image, goalImage):
"""Adjusts the brightness, contrast, and saturation of the given image in such
a way that the MSE between the adjusted image and the goal image is minimized."""
if not ENABLE_POST_OPTIMIZATION:
return (1, 1, 1)
# Vary brightness, saturation, contrast to better match the goal image
brightnessSet = numpy.arange(0.6, 1.3, 0.05)
contrastSet = numpy.arange(0.9, 1.2, 0.05)
saturationSet = numpy.arange(1.0, 1.3, 0.05)
settings = itertools.product(brightnessSet, contrastSet, saturationSet)
bestMSE = None
for parameters in settings:
newImage = adjustImage(image, parameters)
MSE = imageMSE(newImage, goalImage)
if not bestMSE or MSE < bestMSE:
bestMSE = MSE
bestParameters = parameters
if not bestParameters:
raise Exception("Post-optimization failed")
return bestParameters
def renderCollage(solution, grid, sampleGrid, imageLibrary, outputFile, cheatFactor=0):
"""Post-optimizes the solution and renders the output."""
logger.info("Post-optimizing ...")
optimalParameters = {}
for i in range(grid.imageCountX):
logger.progress(i, grid.imageCountX)
for j in range(grid.imageCountY):
imageIndex = solution[i, j]
image = imageLibrary.images[imageIndex]
sampleImage = image.get(sampleGrid.imageWidth, sampleGrid.imageHeight).get()
optimalParameters[i, j] = postOptimize(sampleImage, sampleGrid[i, j].get())
logger.info("Rendering collage ...")
background = Image.new("RGB", grid.size, "white")
collage = Image.new("RGB", grid.size, "white")
for i in range(grid.imageCountX):
logger.progress(i, grid.imageCountX)
for j in range(grid.imageCountY):
offset = (i * grid.imageWidth, j * grid.imageHeight)
imageIndex = solution[i, j]
image = imageLibrary.images[imageIndex]
subImage = image.get(grid.imageWidth, grid.imageHeight).get()
image = adjustImage(subImage, optimalParameters[i, j])
background.paste(grid[i, j].get(), offset)
collage.paste(image, offset)
logger.info("Saving ...")
output = Image.blend(collage, background, cheatFactor)
output.save(outputFile)
| [
"distance_matrix.imageMSE",
"PIL.Image.new",
"PIL.Image.blend",
"itertools.product",
"logger.info",
"PIL.ImageEnhance.Brightness",
"PIL.ImageEnhance.Contrast",
"PIL.ImageEnhance.Color",
"logger.progress",
"numpy.arange"
] | [((898, 926), 'numpy.arange', 'numpy.arange', (['(0.6)', '(1.3)', '(0.05)'], {}), '(0.6, 1.3, 0.05)\n', (910, 926), False, 'import numpy\n'), ((943, 971), 'numpy.arange', 'numpy.arange', (['(0.9)', '(1.2)', '(0.05)'], {}), '(0.9, 1.2, 0.05)\n', (955, 971), False, 'import numpy\n'), ((990, 1018), 'numpy.arange', 'numpy.arange', (['(1.0)', '(1.3)', '(0.05)'], {}), '(1.0, 1.3, 0.05)\n', (1002, 1018), False, 'import numpy\n'), ((1032, 1092), 'itertools.product', 'itertools.product', (['brightnessSet', 'contrastSet', 'saturationSet'], {}), '(brightnessSet, contrastSet, saturationSet)\n', (1049, 1092), False, 'import itertools\n'), ((1569, 1603), 'logger.info', 'logger.info', (['"""Post-optimizing ..."""'], {}), "('Post-optimizing ...')\n", (1580, 1603), False, 'import logger\n'), ((1992, 2028), 'logger.info', 'logger.info', (['"""Rendering collage ..."""'], {}), "('Rendering collage ...')\n", (2003, 2028), False, 'import logger\n'), ((2044, 2080), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'grid.size', '"""white"""'], {}), "('RGB', grid.size, 'white')\n", (2053, 2080), False, 'from PIL import Image, ImageEnhance\n'), ((2093, 2129), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'grid.size', '"""white"""'], {}), "('RGB', grid.size, 'white')\n", (2102, 2129), False, 'from PIL import Image, ImageEnhance\n'), ((2600, 2625), 'logger.info', 'logger.info', (['"""Saving ..."""'], {}), "('Saving ...')\n", (2611, 2625), False, 'import logger\n'), ((2637, 2682), 'PIL.Image.blend', 'Image.blend', (['collage', 'background', 'cheatFactor'], {}), '(collage, background, cheatFactor)\n', (2648, 2682), False, 'from PIL import Image, ImageEnhance\n'), ((1197, 1226), 'distance_matrix.imageMSE', 'imageMSE', (['newImage', 'goalImage'], {}), '(newImage, goalImage)\n', (1205, 1226), False, 'from distance_matrix import imageMSE\n'), ((1669, 1705), 'logger.progress', 'logger.progress', (['i', 'grid.imageCountX'], {}), '(i, grid.imageCountX)\n', (1684, 1705), False, 'import logger\n'), ((2170, 2206), 'logger.progress', 'logger.progress', (['i', 'grid.imageCountX'], {}), '(i, grid.imageCountX)\n', (2185, 2206), False, 'import logger\n'), ((353, 383), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['image'], {}), '(image)\n', (376, 383), False, 'from PIL import Image, ImageEnhance\n'), ((417, 448), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['newImage'], {}), '(newImage)\n', (438, 448), False, 'from PIL import Image, ImageEnhance\n'), ((480, 508), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['newImage'], {}), '(newImage)\n', (498, 508), False, 'from PIL import Image, ImageEnhance\n')] |
import aiohttp
import asyncio
import os
import sys
import time
import random
import contextlib
seaweedfs_url = 'http://127.0.0.1:9081'
def random_content():
return os.urandom(random.randint(1, 10) * 1024)
def random_fid(volumes):
volume_id = random.choice(volumes)
file_key = random.randint(0, 1 << 24)
file_key_hex = '%x' % file_key
cookie_hex = '00000000'
return f'{volume_id},{file_key_hex}{cookie_hex}'
class Reporter:
def __init__(self):
self.items = []
@contextlib.contextmanager
def report(self):
t0 = time.monotonic()
yield
value = time.monotonic() - t0
self.items.append(value * 1000)
def summary(self, concurrency):
n = len(self.items)
s = sum(self.items)
avg = s / n if n > 0 else 0
s_items = list(sorted(self.items))
result = [f'avg={avg:.1f}']
p_s = [0.5, 0.8, 0.9, 0.95, 0.99]
if n > 0:
for p in p_s:
v = s_items[int(n * p)]
result.append('p{}={:.1f}'.format(int(p * 100), v))
qps = (1000 / avg) * concurrency
result.append(f'qps={qps:.0f}')
print(' '.join(result))
self.items = []
READER_REPORTER = Reporter()
WRITER_REPORTER = Reporter()
async def put(session, fid: str, content: bytes):
url = f'{seaweedfs_url}/{fid}'
data = aiohttp.FormData()
data.add_field(
'file',
content,
content_type='application/gzip'
)
async with session.put(url, data=data) as response:
result = await response.read()
return response.status, result
async def get(session, fid: str):
url = f'{seaweedfs_url}/{fid}'
async with session.get(url) as response:
result = await response.read()
return response.status, result
async def reader_task(session, fid_s, n):
fid_s = list(fid_s)
random.shuffle(fid_s)
for fid in fid_s:
with READER_REPORTER.report():
status, r = await get(session, fid)
assert status == 200, (status, r)
async def writer_task(session, fid_s, n):
fid_s = list(fid_s)
random.shuffle(fid_s)
for fid in fid_s:
content = random_content()
with WRITER_REPORTER.report():
status, r = await put(session, fid, content)
assert status in (200, 201, 204), (status, r)
async def benchmark(session, num_volume, num_fid, num_round, concurrency):
volumes = list(range(20, 20 + num_volume))
fid_s_s = []
for i in range(concurrency):
fid_s = [random_fid(volumes) for _ in range(num_fid // concurrency)]
fid_s_s.append(fid_s)
loop = asyncio.get_event_loop()
for n in range(num_round):
print(f'{n} ' + '-' * 60)
writer_tasks = []
for i in range(concurrency):
t = writer_task(session, fid_s_s[i], num_round)
writer_tasks.append(loop.create_task(t))
await asyncio.gather(*writer_tasks)
WRITER_REPORTER.summary(concurrency)
reader_tasks = []
for i in range(concurrency):
t = reader_task(session, fid_s_s[i], num_round)
reader_tasks.append(loop.create_task(t))
await asyncio.gather(*reader_tasks)
READER_REPORTER.summary(concurrency)
async def async_main(num_volume, concurrency):
print(f'num_volume={num_volume} concurrency={concurrency}')
async with aiohttp.ClientSession() as session:
await benchmark(
session,
num_fid=1000,
num_round=3,
num_volume=num_volume,
concurrency=concurrency,
)
def main():
num_volume = int(sys.argv[1])
concurrency = int(sys.argv[2])
loop = asyncio.get_event_loop()
loop.run_until_complete(async_main(num_volume, concurrency))
if __name__ == "__main__":
main()
| [
"aiohttp.ClientSession",
"random.choice",
"random.shuffle",
"asyncio.gather",
"time.monotonic",
"aiohttp.FormData",
"asyncio.get_event_loop",
"random.randint"
] | [((256, 278), 'random.choice', 'random.choice', (['volumes'], {}), '(volumes)\n', (269, 278), False, 'import random\n'), ((294, 320), 'random.randint', 'random.randint', (['(0)', '(1 << 24)'], {}), '(0, 1 << 24)\n', (308, 320), False, 'import random\n'), ((1376, 1394), 'aiohttp.FormData', 'aiohttp.FormData', ([], {}), '()\n', (1392, 1394), False, 'import aiohttp\n'), ((1894, 1915), 'random.shuffle', 'random.shuffle', (['fid_s'], {}), '(fid_s)\n', (1908, 1915), False, 'import random\n'), ((2143, 2164), 'random.shuffle', 'random.shuffle', (['fid_s'], {}), '(fid_s)\n', (2157, 2164), False, 'import random\n'), ((2668, 2692), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2690, 2692), False, 'import asyncio\n'), ((3725, 3749), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3747, 3749), False, 'import asyncio\n'), ((570, 586), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (584, 586), False, 'import time\n'), ((3416, 3439), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (3437, 3439), False, 'import aiohttp\n'), ((183, 204), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (197, 204), False, 'import random\n'), ((617, 633), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (631, 633), False, 'import time\n'), ((2948, 2977), 'asyncio.gather', 'asyncio.gather', (['*writer_tasks'], {}), '(*writer_tasks)\n', (2962, 2977), False, 'import asyncio\n'), ((3213, 3242), 'asyncio.gather', 'asyncio.gather', (['*reader_tasks'], {}), '(*reader_tasks)\n', (3227, 3242), False, 'import asyncio\n')] |
#-*-coding:UTF-8-*-
#Modules
from tkinter import *
from tkinter import messagebox as msg
from requests import *
#Function
def start():
global usr
url="http://www."+etry_url.get()
#url="http://172.16.58.3/index.html"
a=usr.get()
if a==0:
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'}
elif a==1:
headers = {'User-Agent' : 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;'}
elif a==2:
headers = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'}
else:
headers={}
r=get(url,headers=headers)
msg.showinfo(title='Info',message=r)
#CreateWindow
window=Tk()
window.geometry("500x500")
#Labels
lbl_url=Label(window,text="Url: http://www.")
lbl_url.grid(column=0,row=0)
#Entry
etry_url=Entry(window,width=20)
etry_url.grid(column=1,row=0)
#Header_RadioBtn
usr=IntVar()
usr_agt1=Radiobutton(window,text="Win/Mozilla",value=0,variable=usr)
usr_agt1.grid(column=1,row=1)
usr_agt2=Radiobutton(window,text="IE9.0",value=1,variable=usr)
usr_agt2.grid(column=2,row=1)
usr_agt3=Radiobutton(window,text="MAC/Mozilla",value=2,variable=usr)
usr_agt3.grid(column=3,row=1)
usr.set(1)
#Button
btn=Button(window,command=start)
btn.grid(column=2,row=0)
#Mainloop
window.mainloop() | [
"tkinter.messagebox.showinfo"
] | [((680, 717), 'tkinter.messagebox.showinfo', 'msg.showinfo', ([], {'title': '"""Info"""', 'message': 'r'}), "(title='Info', message=r)\n", (692, 717), True, 'from tkinter import messagebox as msg\n')] |
import json
from django.conf import settings
from django.template.loader import render_to_string
from api_v3.factories import (
ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory)
from api_v3.factories.support import Faker
from api_v3.models import Subscriber, Action
from api_v3.serializers import SubscriberSerializer
from api_v3.serializers.mixins import ResponderSubscriberSerializer
from api_v3.views.subscribers import SubscribersEndpoint
from .support import ApiTestCase, APIClient, reverse
class SubscribersEndpointTestCase(ApiTestCase):
def setUp(self):
self.client = APIClient()
self.users = [
ProfileFactory.create(),
ProfileFactory.create(),
ProfileFactory.create(is_superuser=True),
ProfileFactory.create()
]
self.tickets = [
TicketFactory.create(requester=self.users[0]),
TicketFactory.create(requester=self.users[1])
]
self.responders = [
ResponderFactory.create(ticket=self.tickets[0], user=self.users[1]),
ResponderFactory.create(ticket=self.tickets[1], user=self.users[2])
]
self.subscriber = SubscriberFactory.create(
ticket=self.tickets[0], user=self.users[0]
)
def test_create_arbitrary_user(self):
self.client.force_authenticate(self.users[3])
new_data = self.as_jsonapi_payload(
SubscriberSerializer, self.subscriber)
new_data['data']['attributes']['email'] = self.users[3].email
response = self.client.post(
reverse('subscriber-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 422)
response = json.loads(response.content)
self.assertEqual(
response['errors'][0]['detail']['data/attributes/ticket'],
'Ticket not found.'
)
def test_create_superuser_user_is_subscriber(self):
self.client.force_authenticate(self.users[2])
new_data = self.as_jsonapi_payload(
SubscriberSerializer, self.subscriber)
new_data['data']['attributes']['email'] = self.subscriber.user.email
response = self.client.post(
reverse('subscriber-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 422)
response = json.loads(response.content)
self.assertEqual(
response['errors'][0]['source']['pointer'],
'/data/attributes/user'
)
self.assertEqual(
response['errors'][0]['detail'],
'Subscriber already exists.'
)
def test_create_superuser_user_is_responder(self):
self.client.force_authenticate(self.users[2])
new_data = self.as_jsonapi_payload(
SubscriberSerializer, self.subscriber)
new_data['data']['attributes']['email'] = self.users[1].email
response = self.client.post(
reverse('subscriber-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 422)
response = json.loads(response.content)
self.assertEqual(
response['errors'][0]['source']['pointer'],
'/data/attributes/user'
)
self.assertEqual(
response['errors'][0]['detail'],
'User is a responder.'
)
def test_create_superuser(self):
self.client.force_authenticate(self.users[2])
subscribers_count = Subscriber.objects.count()
new_data = self.as_jsonapi_payload(
SubscriberSerializer, self.subscriber)
new_data['data']['attributes']['email'] = self.users[2].email
response = self.client.post(
reverse('subscriber-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 201)
self.assertEqual(Subscriber.objects.count(), subscribers_count + 1)
def test_create_non_superuser(self):
self.client.force_authenticate(self.users[0])
subscribers_count = Subscriber.objects.count()
new_data = self.as_jsonapi_payload(
SubscriberSerializer, self.subscriber)
new_data['data']['attributes']['email'] = self.users[3].email
response = self.client.post(
reverse('subscriber-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 201)
self.assertEqual(Subscriber.objects.count(), subscribers_count + 1)
def test_create_non_superuser_arbitrary_email(self):
self.client.force_authenticate(self.users[0])
subscribers_count = Subscriber.objects.count()
new_data = self.as_jsonapi_payload(
SubscriberSerializer, self.subscriber)
new_email = Faker('email').generate({})
new_data['data']['attributes']['user'] = {}
new_data['data']['attributes']['email'] = new_email
response = self.client.post(
reverse('subscriber-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 201)
self.assertEqual(Subscriber.objects.count(), subscribers_count + 1)
self.assertEqual(Subscriber.objects.last().email, new_email)
def test_create_non_superuser_arbitrary_email_exists(self):
self.client.force_authenticate(self.users[0])
new_email = Faker('email').generate({})
Subscriber.objects.create(
ticket=self.subscriber.ticket, email=new_email)
subscribers_count = Subscriber.objects.count()
new_data = self.as_jsonapi_payload(
SubscriberSerializer, self.subscriber)
new_data['data']['attributes'].pop('user')
new_data['data']['attributes']['email'] = new_email
response = self.client.post(
reverse('subscriber-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 422)
self.assertEqual(Subscriber.objects.count(), subscribers_count)
data = json.loads(response.content)
self.assertEqual(
data['errors'][0]['source']['pointer'],
'/data/attributes/email'
)
self.assertEqual(
data['errors'][0]['detail'],
ResponderSubscriberSerializer.EMAIL_SUBSCRIBER_ERROR_MESSAGE
)
def test_delete_non_superuser(self):
self.client.force_authenticate(self.users[3])
response = self.client.delete(
reverse('subscriber-detail', args=[self.subscriber.id]),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 404)
def test_delete_self_subscriber(self):
self.client.force_authenticate(self.users[0])
subscribers_count = Subscriber.objects.count()
response = self.client.delete(
reverse('subscriber-detail', args=[self.subscriber.id]),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Subscriber.objects.count(), subscribers_count - 1)
def test_delete_superuser(self):
self.client.force_authenticate(self.users[2])
subscribers_count = Subscriber.objects.count()
response = self.client.delete(
reverse('subscriber-detail', args=[self.subscriber.id]),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Subscriber.objects.count(), subscribers_count - 1)
def test_list_anonymous(self):
response = self.client.get(reverse('subscriber-list'))
self.assertEqual(response.status_code, 401)
def test_list_authenticated(self):
self.client.force_authenticate(self.users[0])
response = self.client.get(reverse('subscriber-list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['data']), 1)
self.assertEqual(
json.loads(response.content)['data'][0]['id'],
str(self.subscriber.id)
)
def test_get_authenticated_subscriber(self):
self.client.force_authenticate(self.users[0])
response = self.client.get(
reverse('subscriber-detail', args=[self.subscriber.id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)['data']['id'],
str(self.subscriber.id)
)
def test_get_authenticated_responder(self):
self.client.force_authenticate(self.users[1])
response = self.client.get(
reverse('subscriber-detail', args=[self.subscriber.id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)['data']['id'],
str(self.subscriber.id)
)
def test_get_authenticated_without_access(self):
self.client.force_authenticate(self.users[3])
response = self.client.get(
reverse('subscriber-detail', args=[self.subscriber.id]))
self.assertEqual(response.status_code, 404)
def test_email_notify(self):
controller = SubscribersEndpoint()
activity = Action(
actor=self.subscriber.user,
action=self.users[0],
target=self.subscriber.ticket,
verb='test-subscriber-added'
)
count, emails = controller.email_notify(activity, self.subscriber)
self.assertEqual(count, 1)
self.assertEqual(emails[0], [
controller.EMAIL_SUBJECT.format(self.tickets[0].id),
render_to_string(
'mail/subscriber_added.txt', {
'ticket': self.tickets[0],
'name': self.users[0].display_name,
'site_name': settings.SITE_NAME
}
),
settings.DEFAULT_FROM_EMAIL,
[self.users[0].email]
])
| [
"json.loads",
"api_v3.models.Subscriber.objects.last",
"api_v3.factories.ResponderFactory.create",
"api_v3.factories.TicketFactory.create",
"json.dumps",
"api_v3.factories.SubscriberFactory.create",
"api_v3.models.Action",
"api_v3.models.Subscriber.objects.count",
"api_v3.factories.ProfileFactory.cr... | [((1200, 1268), 'api_v3.factories.SubscriberFactory.create', 'SubscriberFactory.create', ([], {'ticket': 'self.tickets[0]', 'user': 'self.users[0]'}), '(ticket=self.tickets[0], user=self.users[0])\n', (1224, 1268), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((1807, 1835), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (1817, 1835), False, 'import json\n'), ((2512, 2540), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (2522, 2540), False, 'import json\n'), ((3319, 3347), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (3329, 3347), False, 'import json\n'), ((3713, 3739), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (3737, 3739), False, 'from api_v3.models import Subscriber, Action\n'), ((4340, 4366), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (4364, 4366), False, 'from api_v3.models import Subscriber, Action\n'), ((4983, 5009), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (5007, 5009), False, 'from api_v3.models import Subscriber, Action\n'), ((5820, 5893), 'api_v3.models.Subscriber.objects.create', 'Subscriber.objects.create', ([], {'ticket': 'self.subscriber.ticket', 'email': 'new_email'}), '(ticket=self.subscriber.ticket, email=new_email)\n', (5845, 5893), False, 'from api_v3.models import Subscriber, Action\n'), ((5936, 5962), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (5960, 5962), False, 'from api_v3.models import Subscriber, Action\n'), ((6490, 6518), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (6500, 6518), False, 'import json\n'), ((7242, 7268), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (7266, 7268), False, 'from api_v3.models import Subscriber, Action\n'), ((7690, 7716), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (7714, 7716), False, 'from api_v3.models import Subscriber, Action\n'), ((9685, 9706), 'api_v3.views.subscribers.SubscribersEndpoint', 'SubscribersEndpoint', ([], {}), '()\n', (9704, 9706), False, 'from api_v3.views.subscribers import SubscribersEndpoint\n'), ((9727, 9849), 'api_v3.models.Action', 'Action', ([], {'actor': 'self.subscriber.user', 'action': 'self.users[0]', 'target': 'self.subscriber.ticket', 'verb': '"""test-subscriber-added"""'}), "(actor=self.subscriber.user, action=self.users[0], target=self.\n subscriber.ticket, verb='test-subscriber-added')\n", (9733, 9849), False, 'from api_v3.models import Subscriber, Action\n'), ((661, 684), 'api_v3.factories.ProfileFactory.create', 'ProfileFactory.create', ([], {}), '()\n', (682, 684), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((698, 721), 'api_v3.factories.ProfileFactory.create', 'ProfileFactory.create', ([], {}), '()\n', (719, 721), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((735, 775), 'api_v3.factories.ProfileFactory.create', 'ProfileFactory.create', ([], {'is_superuser': '(True)'}), '(is_superuser=True)\n', (756, 775), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((789, 812), 'api_v3.factories.ProfileFactory.create', 'ProfileFactory.create', ([], {}), '()\n', (810, 812), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((860, 905), 'api_v3.factories.TicketFactory.create', 'TicketFactory.create', ([], {'requester': 'self.users[0]'}), '(requester=self.users[0])\n', (880, 905), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((919, 964), 'api_v3.factories.TicketFactory.create', 'TicketFactory.create', ([], {'requester': 'self.users[1]'}), '(requester=self.users[1])\n', (939, 964), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((1015, 1082), 'api_v3.factories.ResponderFactory.create', 'ResponderFactory.create', ([], {'ticket': 'self.tickets[0]', 'user': 'self.users[1]'}), '(ticket=self.tickets[0], user=self.users[1])\n', (1038, 1082), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((1096, 1163), 'api_v3.factories.ResponderFactory.create', 'ResponderFactory.create', ([], {'ticket': 'self.tickets[1]', 'user': 'self.users[2]'}), '(ticket=self.tickets[1], user=self.users[2])\n', (1119, 1163), False, 'from api_v3.factories import ProfileFactory, TicketFactory, ResponderFactory, SubscriberFactory\n'), ((4164, 4190), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (4188, 4190), False, 'from api_v3.models import Subscriber, Action\n'), ((4791, 4817), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (4815, 4817), False, 'from api_v3.models import Subscriber, Action\n'), ((5524, 5550), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (5548, 5550), False, 'from api_v3.models import Subscriber, Action\n'), ((6427, 6453), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (6451, 6453), False, 'from api_v3.models import Subscriber, Action\n'), ((7518, 7544), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (7542, 7544), False, 'from api_v3.models import Subscriber, Action\n'), ((7966, 7992), 'api_v3.models.Subscriber.objects.count', 'Subscriber.objects.count', ([], {}), '()\n', (7990, 7992), False, 'from api_v3.models import Subscriber, Action\n'), ((1650, 1670), 'json.dumps', 'json.dumps', (['new_data'], {}), '(new_data)\n', (1660, 1670), False, 'import json\n'), ((2355, 2375), 'json.dumps', 'json.dumps', (['new_data'], {}), '(new_data)\n', (2365, 2375), False, 'import json\n'), ((3163, 3183), 'json.dumps', 'json.dumps', (['new_data'], {}), '(new_data)\n', (3173, 3183), False, 'import json\n'), ((4002, 4022), 'json.dumps', 'json.dumps', (['new_data'], {}), '(new_data)\n', (4012, 4022), False, 'import json\n'), ((4629, 4649), 'json.dumps', 'json.dumps', (['new_data'], {}), '(new_data)\n', (4639, 4649), False, 'import json\n'), ((5127, 5141), 'api_v3.factories.support.Faker', 'Faker', (['"""email"""'], {}), "('email')\n", (5132, 5141), False, 'from api_v3.factories.support import Faker\n'), ((5362, 5382), 'json.dumps', 'json.dumps', (['new_data'], {}), '(new_data)\n', (5372, 5382), False, 'import json\n'), ((5600, 5625), 'api_v3.models.Subscriber.objects.last', 'Subscriber.objects.last', ([], {}), '()\n', (5623, 5625), False, 'from api_v3.models import Subscriber, Action\n'), ((5784, 5798), 'api_v3.factories.support.Faker', 'Faker', (['"""email"""'], {}), "('email')\n", (5789, 5798), False, 'from api_v3.factories.support import Faker\n'), ((6265, 6285), 'json.dumps', 'json.dumps', (['new_data'], {}), '(new_data)\n', (6275, 6285), False, 'import json\n'), ((10131, 10278), 'django.template.loader.render_to_string', 'render_to_string', (['"""mail/subscriber_added.txt"""', "{'ticket': self.tickets[0], 'name': self.users[0].display_name, 'site_name':\n settings.SITE_NAME}"], {}), "('mail/subscriber_added.txt', {'ticket': self.tickets[0],\n 'name': self.users[0].display_name, 'site_name': settings.SITE_NAME})\n", (10147, 10278), False, 'from django.template.loader import render_to_string\n'), ((8409, 8437), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (8419, 8437), False, 'import json\n'), ((8883, 8911), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (8893, 8911), False, 'import json\n'), ((9273, 9301), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (9283, 9301), False, 'import json\n'), ((8489, 8517), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (8499, 8517), False, 'import json\n')] |
from time import sleep
import emoji
nome = str(input('Qual seu nome completo? ')).strip()
nom = nome.split()
print('É um prazer conhecer você\n{}, {}'.format(nom[0].capitalize(), nom[len(nom) - 1].capitalize()))
print(emoji.emojize('Serei seu mestre em alguns anos...não pera, estão me desligandooo..:dizzy_face:',use_aliases=True))
sleep(4)
| [
"emoji.emojize",
"time.sleep"
] | [((333, 341), 'time.sleep', 'sleep', (['(4)'], {}), '(4)\n', (338, 341), False, 'from time import sleep\n'), ((218, 342), 'emoji.emojize', 'emoji.emojize', (['"""Serei seu mestre em alguns anos...não pera, estão me desligandooo..:dizzy_face:"""'], {'use_aliases': '(True)'}), "(\n 'Serei seu mestre em alguns anos...não pera, estão me desligandooo..:dizzy_face:'\n , use_aliases=True)\n", (231, 342), False, 'import emoji\n')] |
def capitalcities(user_input):
import pandas as pd
caps = pd.read_html('https://en.wikipedia.org/wiki/List_of_national_capitals')
capital = caps[1]
capital.drop('Notes', axis = 1, inplace = True)
capital.columns = ['Capital', 'Country']
capital[capital['Country'] == 'Sri Lanka']
capital.drop([0,62], inplace=True)
def remove_extra(x):
for i in capital.Country:
if x in i:
capital.Country.replace({ i : x}, inplace = True)
remove_extra('Israel')
remove_extra('Palestine')
remove_extra('Western Sahara')
remove_extra('Kosovo')
capital.Country.replace({ 'Cocos (Keeling) Islands' : 'Cocos Islands', 'Guinea-Bissau':'Guinea Bissau'}, inplace = True)
for i in capital.Country:
capital.Country.replace({i : i.lower()}, inplace = True)
Country = capital.Country.tolist()
Capital = capital.Capital.tolist()
while True:
user_input = user_input.lower()
if user_input in Country:
index = Country.index(user_input)
return f'{Capital[index]}'
break
else:
return 'Call the function with the country name to get the capital'
| [
"pandas.read_html"
] | [((69, 140), 'pandas.read_html', 'pd.read_html', (['"""https://en.wikipedia.org/wiki/List_of_national_capitals"""'], {}), "('https://en.wikipedia.org/wiki/List_of_national_capitals')\n", (81, 140), True, 'import pandas as pd\n')] |
import json
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import sentencepiece as spm
seq_length = 256
max_predictions_per_seq = 20
name_to_features = {
'input_ids':
tf.io.FixedLenFeature([seq_length], tf.int64),
'input_mask':
tf.io.FixedLenFeature([seq_length], tf.int64),
'segment_ids':
tf.io.FixedLenFeature([seq_length], tf.int64),
'masked_lm_positions':
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
'masked_lm_ids':
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
'masked_lm_weights':
tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),
'next_sentence_labels':
tf.io.FixedLenFeature([1], tf.int64),
}
def decode_record(record, name_to_features=name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
spm_name = sys.argv[2]
sp = spm.SentencePieceProcessor()
sp.Load(spm_name)
def splice(ids, masked, sp):
iit = iter(ids)
mit = iter(masked)
out = []
for i in ids:
if i == 0:
try:
out.append(f"[{sp.id_to_piece(next(mit))}]")
except StopIteration:
break
else:
out.append(sp.id_to_piece(i))
return "".join(out)
def rsplice(record, sp):
ids = record["input_ids"]
ids = list(map(int, ids.numpy()))
#print(ids)
masked = record["masked_lm_ids"]
masked = list(map(int, masked.numpy()))
mit = iter(masked)
poss = list(record["masked_lm_positions"].numpy()) + [0]
posit = iter(poss)
nextpos = next(posit)
out = []
for i, id_ in enumerate(ids):
if i == nextpos:
nextpos = next(posit)
out.append(f"[{sp.id_to_piece(next(mit))}]")
else:
out.append(sp.id_to_piece(id_))
return "".join(out).replace("▁", " ")
def decode(record, sp):
ids = record["input_ids"]
ids = list(map(int, ids.numpy()))
print(ids)
masked = record["masked_lm_ids"]
masked = list(map(int, masked.numpy()))
return splice(ids, masked, sp)
#print(sp.decode_ids(masked))
#print(" ".join(map(sp.id_to_piece, masked)))
#return sp.decode_ids(ids)
#return "".join(map(sp.id_to_piece, ids))
pass
name = sys.argv[1]
print("Opening", name)
d = tf.data.TFRecordDataset([name])
count = 20
for s in d:
sample = decode_record(s)
print(rsplice(sample, sp))
print()
count -= 1
if count <= 0:
break
| [
"tensorflow.data.TFRecordDataset",
"tensorflow.io.parse_single_example",
"sentencepiece.SentencePieceProcessor",
"tensorflow.io.FixedLenFeature",
"tensorflow.cast"
] | [((1243, 1271), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (1269, 1271), True, 'import sentencepiece as spm\n'), ((2703, 2734), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['[name]'], {}), '([name])\n', (2726, 2734), True, 'import tensorflow as tf\n'), ((222, 267), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (243, 267), True, 'import tensorflow as tf\n'), ((295, 340), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (316, 340), True, 'import tensorflow as tf\n'), ((369, 414), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (390, 414), True, 'import tensorflow as tf\n'), ((451, 509), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.int64'], {}), '([max_predictions_per_seq], tf.int64)\n', (472, 509), True, 'import tensorflow as tf\n'), ((540, 598), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.int64'], {}), '([max_predictions_per_seq], tf.int64)\n', (561, 598), True, 'import tensorflow as tf\n'), ((633, 693), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.float32'], {}), '([max_predictions_per_seq], tf.float32)\n', (654, 693), True, 'import tensorflow as tf\n'), ((731, 767), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[1]', 'tf.int64'], {}), '([1], tf.int64)\n', (752, 767), True, 'import tensorflow as tf\n'), ((896, 948), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['record', 'name_to_features'], {}), '(record, name_to_features)\n', (922, 948), True, 'import tensorflow as tf\n'), ((1153, 1173), 'tensorflow.cast', 'tf.cast', (['t', 'tf.int32'], {}), '(t, tf.int32)\n', (1160, 1173), True, 'import tensorflow as tf\n')] |
# Universidade Federal de Viçosa - Campus Rio Paranaíba
# Sistemas de Informação - Processamento Digital de Imagens
#
# Professor: <NAME>
# Autores:
# - MatheusRV (3929)
# - iguit0 (3902)
# - ThiagoMunich (3628)
#
# Filtragem espacial para suavização - Filtro de máximo e mínimo
# Como Executar:
# $ python med.py img_1.tif saida <mask_size>
# <mask_size> é um número inteiro. Exemplo: Se mask_size=3 então a máscara possui tamanho 3x3
import sys
import os
import numpy as np
from scipy.ndimage import filters
import matplotlib.pyplot as plt
from scipy import misc
from skimage import color, data, util
def loadImg(arg):
return misc.imread(arg)
img = loadImg(sys.argv[1])
saida = sys.argv[2] + '_mediana.tif'
ms = sys.argv[3]
ms = int(ms)
img_mediana = filters.median_filter(img, size=ms)
img_saida = misc.imsave(saida, img_mediana)
| [
"scipy.ndimage.filters.median_filter",
"scipy.misc.imsave",
"scipy.misc.imread"
] | [((789, 824), 'scipy.ndimage.filters.median_filter', 'filters.median_filter', (['img'], {'size': 'ms'}), '(img, size=ms)\n', (810, 824), False, 'from scipy.ndimage import filters\n'), ((838, 869), 'scipy.misc.imsave', 'misc.imsave', (['saida', 'img_mediana'], {}), '(saida, img_mediana)\n', (849, 869), False, 'from scipy import misc\n'), ((662, 678), 'scipy.misc.imread', 'misc.imread', (['arg'], {}), '(arg)\n', (673, 678), False, 'from scipy import misc\n')] |
import torch
import torch.nn as nn
from .segmentation import deeplabv3_resnet50, deeplabv3_resnet101
__ALL__ = ["get_model"]
BatchNorm2d = nn.BatchNorm2d
BN_MOMENTUM = 0.01
class Transform(nn.Module):
def forward(self, input):
return 2 * input / 255 - 1
def load_pretrain_model(model, pretrain: str, city):
if pretrain != "":
if pretrain.find("{city}") != -1:
pretrain = pretrain.format(city=city)
pretrain = torch.load(pretrain, map_location="cpu")
weight = model.state_dict()
for k, v in pretrain.items():
if k in weight:
if v.shape == weight[k].shape:
weight[k] = v
model.load_state_dict(weight)
def get_hrnet(
input_channels, num_classes, model_version, pos_weight=None, cfg_path="", **kwargs
):
from .hrnet import get_cfg, get_seg_model
model_cfg = get_cfg()
if cfg_path != "":
model_cfg.merge_from_file(cfg_path)
model_cfg.NUM_CLASSES = num_classes
model = get_seg_model(
model_cfg,
model_version=model_version,
pos_weight=pos_weight,
all_cfg=kwargs["cfg"],
)
if model_version in ["HighResolutionNetGaussRank"]:
input_transform = nn.Identity()
else:
input_transform = Transform()
if "frozen_layers" in kwargs:
if kwargs["frozen_layers"]:
for param in model.parameters():
param.requires_grad = False
model.conv1 = nn.Sequential(
input_transform,
nn.Conv2d(input_channels, 64, kernel_size=3, stride=2, padding=1, bias=False),
)
last_inp_channels = model.last_layer[0].in_channels
# redefine last layer
model.last_layer = nn.Sequential(
nn.ConvTranspose2d(
in_channels=last_inp_channels,
out_channels=last_inp_channels // 2,
kernel_size=3,
stride=2,
padding=1,
),
BatchNorm2d(last_inp_channels // 2, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(
in_channels=last_inp_channels // 2,
out_channels=last_inp_channels // 2 // 2, # config.NUM_CLASSES,
kernel_size=3,
stride=2,
padding=0,
output_padding=(0, 1),
),
BatchNorm2d(last_inp_channels // 2 // 2, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=last_inp_channels // 2 // 2,
out_channels=num_classes,
kernel_size=1,
stride=1,
padding=0,
),
)
return model
def get_unet(input_channels, num_classes, model_version, **kwargs):
from .unet import get_unet
model = get_unet(
model_version=model_version, in_channels=input_channels, classes=num_classes
)
return model
# useless
def get_deeplabv3(
input_channels, num_classes, model_version="deeplabv3_resnet50", **kwargs
):
model_map = {
"deeplabv3_resnet50": deeplabv3_resnet50,
"deeplabv3_resnet101": deeplabv3_resnet101,
}
model = model_map[model_version](
pretrained=False, progress=False, num_classes=num_classes, aux_loss=None
)
model.backbone.conv1 = nn.Conv2d(
input_channels,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
)
return model
def replace_relu(model, activation="ReLU"):
for child_name, child in model.named_children():
if isinstance(child, nn.ReLU):
if activation in ["GELU", "CELU"]:
setattr(model, child_name, eval(f"nn.{activation}()"))
else:
setattr(model, child_name, eval(f"nn.{activation}(inplace=True)"))
else:
replace_relu(child, activation)
MODEL_MAPS = {"hrnet": get_hrnet, "deeplabv3": get_deeplabv3, "unet": get_unet}
def get_model(
cfg, city=None,
):
model_name = cfg.MODEL.NAME.lower()
model_version = cfg.MODEL.MODEL_VERSION
num_classes = cfg.DATASET.OUTPUT_CHANNELS # .lower()
input_channels = cfg.DATASET.INPUT_CHANNELS
assert model_name in MODEL_MAPS, "model is not allowed"
pos_weight = cfg.MODEL.POS_WEIGHT if cfg.MODEL.USE_POS_WEIGHT else None
model = MODEL_MAPS[model_name](
input_channels,
num_classes,
model_version,
pos_weight=pos_weight,
cfg_path=cfg.MODEL.MODEL_CONFIG_FILE,
cfg=cfg,
frozen_layers=cfg.MODEL.FROZEN_LAYERS,
)
load_pretrain_model(model, cfg.DIST.PRETRAIN_MODEL, city=city)
if cfg.MODEL.HIDDEN_ACTIVATION != "default":
replace_relu(model, cfg.MODEL.HIDDEN_ACTIVATION)
return model
| [
"torch.nn.ReLU",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.Identity",
"torch.nn.ConvTranspose2d"
] | [((3250, 3346), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(64)'], {'kernel_size': '(7, 7)', 'stride': '(2, 2)', 'padding': '(3, 3)', 'bias': '(False)'}), '(input_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3,\n 3), bias=False)\n', (3259, 3346), True, 'import torch.nn as nn\n'), ((458, 498), 'torch.load', 'torch.load', (['pretrain'], {'map_location': '"""cpu"""'}), "(pretrain, map_location='cpu')\n", (468, 498), False, 'import torch\n'), ((1241, 1254), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1252, 1254), True, 'import torch.nn as nn\n'), ((1530, 1607), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(64)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(input_channels, 64, kernel_size=3, stride=2, padding=1, bias=False)\n', (1539, 1607), True, 'import torch.nn as nn\n'), ((1745, 1872), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'last_inp_channels', 'out_channels': '(last_inp_channels // 2)', 'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=last_inp_channels, out_channels=\n last_inp_channels // 2, kernel_size=3, stride=2, padding=1)\n', (1763, 1872), True, 'import torch.nn as nn\n'), ((2015, 2036), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2022, 2036), True, 'import torch.nn as nn\n'), ((2046, 2210), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(last_inp_channels // 2)', 'out_channels': '(last_inp_channels // 2 // 2)', 'kernel_size': '(3)', 'stride': '(2)', 'padding': '(0)', 'output_padding': '(0, 1)'}), '(in_channels=last_inp_channels // 2, out_channels=\n last_inp_channels // 2 // 2, kernel_size=3, stride=2, padding=0,\n output_padding=(0, 1))\n', (2064, 2210), True, 'import torch.nn as nn\n'), ((2389, 2410), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2396, 2410), True, 'import torch.nn as nn\n'), ((2420, 2536), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(last_inp_channels // 2 // 2)', 'out_channels': 'num_classes', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=last_inp_channels // 2 // 2, out_channels=num_classes,\n kernel_size=1, stride=1, padding=0)\n', (2429, 2536), True, 'import torch.nn as nn\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 16:39:41 2021
@author: harsh
"""
import numpy as np
import math as mm
import opensees as op
import time as tt
##################################################################
# #
# Effective stress site response analysis for a layered #
# soil profile located on a 2% slope and underlain by an #
# elastic half-space. 9-node quadUP elements are used. #
# The finite rigidity of the elastic half space is #
# considered through the use of a viscous damper at the #
# base. #
# #
# Converted to openseespy by: <NAME> #
# The University of Manchester #
# #
# Created by: <NAME> #
# <NAME> #
# <NAME> #
# <NAME> #
# --University of Washington-- #
# #
# ---> Basic units are kN and m (unless specified) #
# #
##################################################################
#-----------------------------------------------------------------------------------------
# 1. DEFINE SOIL AND MESH GEOMETRY
#-----------------------------------------------------------------------------------------
op.wipe()
nodes_dict = dict()
#---SOIL GEOMETRY
# thicknesses of soil profile (m)
soilThick = 30.0
# number of soil layers
numLayers = 3
# layer thicknesses
layerThick=[20.0,8.0,2.0]
# depth of water table
waterTable = 2.0
# define layer boundaries
layerBound=np.zeros((numLayers,1))
layerBound[0]=layerThick[0];
for i in range(1,numLayers):
layerBound[i]=layerBound[i-1]+layerThick[i]
#---MESH GEOMETRY
# number of elements in horizontal direction
nElemX = 1
# number of nodes in horizontal direction
nNodeX =2 * nElemX+1
# horizontal element size (m)
sElemX = 2.0
# number of elements in vertical direction for each layer
nElemY = [40,16,4]
# total number of elements in vertical direction
nElemT = 60
sElemY = np.zeros((numLayers,1))
# vertical element size in each layer
for i in range(numLayers):
sElemY[i] = [layerThick[i-1]/nElemY[i-1]]
print('size:',sElemY[i])
# number of nodes in vertical direction
nNodeY = 2 * nElemT+1
# total number of nodes
nNodeT = nNodeX * nNodeY
#-----------------------------------------------------------------------------------------
# 2. CREATE PORE PRESSURE NODES AND FIXITIES
#-----------------------------------------------------------------------------------------
op.model('basic', '-ndm', 2, '-ndf', 3)
count = 1
layerNodeCount = 0
dry_Node=np.zeros((500,1))
node_save=np.zeros((500,1))
# loop over soil layers
for k in range(1,numLayers+1):
# loop in horizontal direction
for i in range(1,nNodeX+1,2):
if k==1:
bump = 1
else:
bump = 0
j_end=2 * nElemY[k-1] + bump
for j in range(1,j_end+1,2):
xCoord = (i-1) * (sElemX/2)
yctr = j + layerNodeCount
yCoord = (yctr-1) * (np.float(sElemY[k-1]))/2
nodeNum = i + ((yctr-1) * nNodeX)
op.node(nodeNum, xCoord, yCoord)
# output nodal information to data file
nodes_dict[nodeNum] = (nodeNum, xCoord, yCoord)
node_save[nodeNum] = np.int(nodeNum)
# designate nodes above water table
waterHeight = soilThick - waterTable
if yCoord >= waterHeight:
dry_Node[count] = nodeNum
count = count+1
layerNodeCount = yctr + 1
dryNode=np.trim_zeros(dry_Node)
Node_d=np.unique(node_save)
Node_d=np.trim_zeros(Node_d)
np.savetxt('Node_record.txt',Node_d)
print('Finished creating all -ndf 3 nodes')
print('Number of Dry Nodes:',len(dryNode))
# define fixities for pore pressure nodes above water table
for i in range(count-1):
n_dryNode=np.int(dryNode[i])
op.fix(n_dryNode, 0, 0, 1)
op.fix(1, 0, 1, 0)
op.fix(3, 0, 1, 0)
print('Finished creating all -ndf 3 boundary conditions...')
# define equal degrees of freedom for pore pressure nodes
for i in range(1,((3*nNodeY)-2),6):
op.equalDOF(i, i+2, 1, 2)
print("Finished creating equalDOF for pore pressure nodes...")
#-----------------------------------------------------------------------------------------
# 3. CREATE INTERIOR NODES AND FIXITIES
#-----------------------------------------------------------------------------------------
op.model('basic', '-ndm', 2, '-ndf', 2)
xCoord = np.float(sElemX/2)
# loop over soil layers
layerNodeCount = 0
for k in range(1,numLayers+1):
if k==1:
bump = 1
else:
bump = 0
j_end=2 * nElemY[k-1] + bump
for j in range(1,j_end+1,1):
yctr = j + layerNodeCount
yCoord = (yctr-1) * (np.float(sElemY[k-1]))/2
nodeNum = (3*yctr) - 1
op.node(nodeNum, xCoord, yCoord)
# output nodal information to data file
nodes_dict[nodeNum] = (nodeNum, xCoord, yCoord)
layerNodeCount = yctr
# interior nodes on the element edges
# loop over layers
layerNodeCount = 0
for k in range(1,numLayers+1):
# loop in vertical direction
for j in range(1,((nElemY[k-1])+1)):
yctr = j + layerNodeCount;
yCoord = np.float(sElemY[k-1])*(yctr-0.5)
nodeNumL = (6*yctr) - 2
nodeNumR = nodeNumL + 2
op.node(nodeNumL ,0.0, yCoord)
op.node(nodeNumR , sElemX, yCoord)
# output nodal information to data file
nodes_dict[nodeNumL] = (nodeNumL ,0.0, yCoord)
nodes_dict[nodeNumR] = (nodeNumR , sElemX, yCoord)
layerNodeCount = yctr
print("Finished creating all -ndf 2 nodes...")
# define fixities for interior nodes at base of soil column
op.fix(2, 0, 1)
print('Finished creating all -ndf 2 boundary conditions...')
# define equal degrees of freedom which have not yet been defined
for i in range(1,((3*nNodeY)-6),6):
op.equalDOF(i , i+1, 1, 2)
op.equalDOF(i+3, i+4, 1, 2)
op.equalDOF(i+3, i+5, 1, 2)
op.equalDOF(nNodeT-2, nNodeT-1, 1, 2)
print('Finished creating equalDOF constraints...')
#-----------------------------------------------------------------------------------------
# 4. CREATE SOIL MATERIALS
#-----------------------------------------------------------------------------------------
# define grade of slope (%)
grade = 2.0
slope = mm.atan(grade/100.0)
g = -9.81
xwgt_var = g * (mm.sin(slope))
ywgt_var = g * (mm.cos(slope))
thick = [1.0,1.0,1.0]
xWgt = [xwgt_var, xwgt_var, xwgt_var]
yWgt = [ywgt_var, ywgt_var, ywgt_var]
uBulk = [6.88E6, 5.06E6, 5.0E-6]
hPerm = [1.0E-4, 1.0E-4, 1.0E-4]
vPerm = [1.0E-4, 1.0E-4, 1.0E-4]
# nDMaterial PressureDependMultiYield02
# nDMaterial('PressureDependMultiYield02', matTag, nd, rho, refShearModul, refBulkModul,\
# frictionAng, peakShearStra, refPress, pressDependCoe, PTAng,\
# contrac[0], contrac[2], dilat[0], dilat[2], noYieldSurf=20.0,\
# *yieldSurf=[], contrac[1]=5.0, dilat[1]=3.0, *liquefac=[1.0,0.0],e=0.6, \
# *params=[0.9, 0.02, 0.7, 101.0], c=0.1)
op.nDMaterial('PressureDependMultiYield02',3, 2, 1.8, 9.0e4, 2.2e5, 32, 0.1, \
101.0, 0.5, 26, 0.067, 0.23, 0.06, \
0.27, 20, 5.0, 3.0, 1.0, \
0.0, 0.77, 0.9, 0.02, 0.7, 101.0)
op.nDMaterial('PressureDependMultiYield02', 2, 2, 2.24, 9.0e4, 2.2e5, 32, 0.1, \
101.0, 0.5, 26, 0.067, 0.23, 0.06, \
0.27, 20, 5.0, 3.0, 1.0, \
0.0, 0.77, 0.9, 0.02, 0.7, 101.0)
op.nDMaterial('PressureDependMultiYield02',1, 2, 2.45, 1.3e5, 2.6e5, 39, 0.1, \
101.0, 0.5, 26, 0.010, 0.0, 0.35, \
0.0, 20, 5.0, 3.0, 1.0, \
0.0, 0.47, 0.9, 0.02, 0.7, 101.0)
print("Finished creating all soil materials...")
#-----------------------------------------------------------------------------------------
# 5. CREATE SOIL ELEMENTS
#-----------------------------------------------------------------------------------------
for j in range(1,nElemT+1):
nI = ( 6*j) - 5
nJ = nI + 2
nK = nI + 8
nL = nI + 6
nM = nI + 1
nN = nI + 5
nP = nI + 7
nQ = nI + 3
nR = nI + 4
lowerBound = 0.0
for i in range(1,numLayers+1):
if j * sElemY[i-1] <= layerBound[i-1] and j * sElemY[i-1] > lowerBound:
# permeabilities are initially set at 1.0 m/s for gravity analysis,
op.element('9_4_QuadUP', j, nI, nJ, nK, nL, nM, nN, nP, nQ, nR, \
thick[i-1], i, uBulk[i-1], 1.0, 1.0, 1.0, xWgt[i-1], yWgt[i-1])
lowerBound = layerBound[i-1]
print("Finished creating all soil elements...")
#-----------------------------------------------------------------------------------------
# 6. LYSMER DASHPOT
#-----------------------------------------------------------------------------------------
# define dashpot nodes
dashF = nNodeT+1
dashS = nNodeT+2
op.node(dashF, 0.0, 0.0)
op.node(dashS, 0.0, 0.0)
# define fixities for dashpot nodes
op.fix(dashF, 1, 1)
op.fix(dashS, 0, 1)
# define equal DOF for dashpot and base soil node
op.equalDOF(1, dashS, 1)
print('Finished creating dashpot nodes and boundary conditions...')
# define dashpot material
colArea = sElemX * thick[0]
rockVS = 700.0
rockDen = 2.5
dashpotCoeff = rockVS * rockDen
#uniaxialMaterial('Viscous', matTag, C, alpha)
op.uniaxialMaterial('Viscous', numLayers+1, dashpotCoeff * colArea, 1)
# define dashpot element
op.element('zeroLength', nElemT+1, dashF, dashS, '-mat', numLayers+1, '-dir', 1)
print("Finished creating dashpot material and element...")
#-----------------------------------------------------------------------------------------
# 7. CREATE GRAVITY RECORDERS
#-----------------------------------------------------------------------------------------
# create list for pore pressure nodes
load_nodeList3=np.loadtxt('Node_record.txt')
nodeList3=[]
for i in range(len(load_nodeList3)):
nodeList3.append(np.int(load_nodeList3[i]))
# record nodal displacment, acceleration, and porepressure
op.recorder('Node','-file','Gdisplacement.txt','-time','-node',*nodeList3,'-dof', 1, 2, 'disp')
op.recorder('Node','-file','Gacceleration.txt','-time','-node',*nodeList3,'-dof', 1, 2, 'accel')
op.recorder('Node','-file','GporePressure.txt','-time','-node',*nodeList3,'-dof', 3, 'vel')
# record elemental stress and strain (files are names to reflect GiD gp numbering)
op.recorder('Element','-file','Gstress1.txt','-time','-eleRange', 1,nElemT,'material','1','stress')
op.recorder('Element','-file','Gstress2.txt','-time','-eleRange', 1,nElemT,'material','2','stress')
op.recorder('Element','-file','Gstress3.txt','-time','-eleRange', 1,nElemT,'material','3','stress')
op.recorder('Element','-file','Gstress4.txt','-time','-eleRange', 1,nElemT,'material','4','stress')
op.recorder('Element','-file','Gstress9.txt','-time','-eleRange', 1,nElemT,'material','9','stress')
op.recorder('Element','-file','Gstrain1.txt','-time','-eleRange', 1,nElemT,'material','1','strain')
op.recorder('Element','-file','Gstrain2.txt','-time','-eleRange', 1,nElemT,'material','2','strain')
op.recorder('Element','-file','Gstrain3.txt','-time','-eleRange', 1,nElemT,'material','3','strain')
op.recorder('Element','-file','Gstrain4.txt','-time','-eleRange', 1,nElemT,'material','4','strain')
op.recorder('Element','-file','Gstrain9.txt','-time','-eleRange', 1,nElemT,'material','9','strain')
print("Finished creating gravity recorders...")
#-----------------------------------------------------------------------------------------
# 8. DEFINE ANALYSIS PARAMETERS
#-----------------------------------------------------------------------------------------
#---GROUND MOTION PARAMETERS
# time step in ground motion record
motionDT = 0.005
# number of steps in ground motion record
motionSteps = 7990
#---RAYLEIGH DAMPING PARAMETERS
# damping ratio
damp = 0.02
# lower frequency
omega1 = 2 * np.pi * 0.2
# upper frequency
omega2 = 2 * np.pi * 20
# damping coefficients
a0 = 2*damp*omega1*omega2/(omega1 + omega2)
a1 = 2*damp/(omega1 + omega2)
print("Damping Coefficients: a_0 = $a0; a_1 = $a1")
#---DETERMINE STABLE ANALYSIS TIME STEP USING CFL CONDITION
# maximum shear wave velocity (m/s)
vsMax = 250.0
# duration of ground motion (s)
duration = motionDT*motionSteps
# minimum element size
minSize = sElemY[0]
for i in range(2,numLayers+1):
if sElemY[i-1] <= minSize:
minSize = sElemY[i-1]
# trial analysis time step
kTrial = minSize/(vsMax**0.5)
# define time step and number of steps for analysis
if motionDT <= kTrial:
nSteps = motionSteps
dT = motionDT
else:
nSteps = np.int(mm.floor(duration/kTrial)+1)
dT = duration/nSteps
print("Number of steps in analysis: $nSteps")
print("Analysis time step: $dT")
#---ANALYSIS PARAMETERS
# Newmark parameters
gamma = 0.5
beta = 0.25
#-----------------------------------------------------------------------------------------
# 9. GRAVITY ANALYSIS
#-----------------------------------------------------------------------------------------
# update materials to ensure elastic behavior
op.updateMaterialStage('-material', 1, '-stage', 0)
op.updateMaterialStage('-material', 2, '-stage', 0)
op.updateMaterialStage('-material', 3, '-stage', 0)
op.constraints('Penalty', 1.0E14, 1.0E14)
op.test('NormDispIncr', 1e-4, 35, 1)
op.algorithm('KrylovNewton')
op.numberer('RCM')
op.system('ProfileSPD')
op.integrator('Newmark', gamma, beta)
op.analysis('Transient')
startT = tt.time()
op.analyze(10, 5.0E2)
print('Finished with elastic gravity analysis...')
# update material to consider elastoplastic behavior
op.updateMaterialStage('-material', 1, '-stage', 1)
op.updateMaterialStage('-material', 2, '-stage', 1)
op.updateMaterialStage('-material', 3, '-stage', 1)
# plastic gravity loading
op.analyze(40, 5.0e2)
print('Finished with plastic gravity analysis...')
#-----------------------------------------------------------------------------------------
# 10. UPDATE ELEMENT PERMEABILITY VALUES FOR POST-GRAVITY ANALYSIS
#-----------------------------------------------------------------------------------------
# choose base number for parameter IDs which is higer than other tags used in analysis
ctr = 10000.0
# loop over elements to define parameter IDs
for i in range(1,nElemT+1):
op.parameter(np.int(ctr+1.0), 'element', i, 'vPerm')
op.parameter(np.int(ctr+2.0), 'element', i, 'hPerm')
ctr = ctr+2.0
# update permeability parameters for each element using parameter IDs
ctr = 10000.0
for j in range(1,nElemT+1):
lowerBound = 0.0
for i in range(1,numLayers+1):
if j * sElemY[i-1] <= layerBound[i-1] and j*sElemY[i-1] > lowerBound:
op.updateParameter(np.int(ctr+1.0), vPerm[i-1])
op.updateParameter(np.int(ctr+2.0), hPerm[i-1])
lowerBound = layerBound[i-1]
ctr = ctr+2.0
print("Finished updating permeabilities for dynamic analysis...")
#-----------------------------------------------------------------------------------------
# 11. CREATE POST-GRAVITY RECORDERS
#-----------------------------------------------------------------------------------------
# reset time and analysis
op.setTime(0.0)
op.wipeAnalysis()
op.remove('recorders')
# recorder time step
recDT = 10*motionDT
# record nodal displacment, acceleration, and porepressure
op.recorder('Node','-file','displacement.txt','-time', '-dT',recDT,'-node',*nodeList3,'-dof', 1, 2, 'disp')
op.recorder('Node','-file','acceleration.txt','-time', '-dT',recDT,'-node',*nodeList3,'-dof', 1, 2, 'accel')
op.recorder('Node','-file','porePressure.txt','-time', '-dT',recDT,'-node',*nodeList3,'-dof', 3, 'vel')
# record elemental stress and strain (files are names to reflect GiD gp numbering)
op.recorder('Element','-file','stress1.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','1','stress')
op.recorder('Element','-file','stress2.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','2','stress')
op.recorder('Element','-file','stress3.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','3','stress')
op.recorder('Element','-file','stress4.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','4','stress')
op.recorder('Element','-file','stress9.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','9','stress')
op.recorder('Element','-file','strain1.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','1','strain')
op.recorder('Element','-file','strain2.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','2','strain')
op.recorder('Element','-file','strain3.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','3','strain')
op.recorder('Element','-file','strain4.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','4','strain')
op.recorder('Element','-file','strain9.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','9','strain')
print("Finished creating all recorders...")
#-----------------------------------------------------------------------------------------
# 12. DYNAMIC ANALYSIS
#-----------------------------------------------------------------------------------------
op.model('basic', '-ndm', 2, '-ndf', 3)
# define constant scaling factor for applied velocity
cFactor = colArea * dashpotCoeff
# define velocity time history file
velocityFile='velocityHistory';
data_gm=np.loadtxt('velocityHistory.txt')
#motionSteps=len(data_gm)
#print('Number of point for GM:',motionSteps)
# timeseries object for force history
op.timeSeries('Path', 2, '-dt', motionDT, '-filePath', velocityFile+'.txt', '-factor', cFactor)
op.pattern('Plain', 10, 2)
op.load(1, 1.0, 0.0, 0.0)
print( "Dynamic loading created...")
op.constraints('Penalty', 1.0E16, 1.0E16)
op.test('NormDispIncr', 1e-3, 35, 1)
op.algorithm('KrylovNewton')
op.numberer('RCM')
op.system('ProfileSPD')
op.integrator('Newmark', gamma, beta)
op.rayleigh(a0, a1, 0.0, 0.0)
op.analysis('Transient')
# perform analysis with timestep reduction loop
ok = op.analyze(nSteps,dT)
# if analysis fails, reduce timestep and continue with analysis
if ok !=0:
print("did not converge, reducing time step")
curTime = op.getTime()
mTime = curTime
print("curTime: ", curTime)
curStep = curTime/dT
print("curStep: ", curStep)
rStep = (nSteps-curStep)*2.0
remStep = np.int((nSteps-curStep)*2.0)
print("remStep: ", remStep)
dT = dT/2.0
print("dT: ", dT)
ok = op.analyze(remStep, dT)
# if analysis fails again, reduce timestep and continue with analysis
if ok !=0:
print("did not converge, reducing time step")
curTime = op.getTime()
print("curTime: ", curTime)
curStep = (curTime-mTime)/dT
print("curStep: ", curStep)
remStep = np.int((rStep-curStep)*2.0)
print("remStep: ", remStep)
dT = dT/2.0
print("dT: ", dT)
ok = op.analyze(remStep, dT)
endT = tt.time()
print("Finished with dynamic analysis...")
print("Analysis execution time: ",(endT-startT))
op.wipe()
| [
"opensees.updateMaterialStage",
"opensees.wipe",
"math.floor",
"opensees.nDMaterial",
"opensees.pattern",
"math.cos",
"opensees.getTime",
"opensees.numberer",
"opensees.integrator",
"opensees.constraints",
"math.atan",
"opensees.element",
"opensees.wipeAnalysis",
"opensees.timeSeries",
"... | [((1778, 1787), 'opensees.wipe', 'op.wipe', ([], {}), '()\n', (1785, 1787), True, 'import opensees as op\n'), ((2056, 2080), 'numpy.zeros', 'np.zeros', (['(numLayers, 1)'], {}), '((numLayers, 1))\n', (2064, 2080), True, 'import numpy as np\n'), ((2540, 2564), 'numpy.zeros', 'np.zeros', (['(numLayers, 1)'], {}), '((numLayers, 1))\n', (2548, 2564), True, 'import numpy as np\n'), ((3065, 3104), 'opensees.model', 'op.model', (['"""basic"""', '"""-ndm"""', '(2)', '"""-ndf"""', '(3)'], {}), "('basic', '-ndm', 2, '-ndf', 3)\n", (3073, 3104), True, 'import opensees as op\n'), ((3149, 3167), 'numpy.zeros', 'np.zeros', (['(500, 1)'], {}), '((500, 1))\n', (3157, 3167), True, 'import numpy as np\n'), ((3178, 3196), 'numpy.zeros', 'np.zeros', (['(500, 1)'], {}), '((500, 1))\n', (3186, 3196), True, 'import numpy as np\n'), ((4130, 4153), 'numpy.trim_zeros', 'np.trim_zeros', (['dry_Node'], {}), '(dry_Node)\n', (4143, 4153), True, 'import numpy as np\n'), ((4162, 4182), 'numpy.unique', 'np.unique', (['node_save'], {}), '(node_save)\n', (4171, 4182), True, 'import numpy as np\n'), ((4199, 4220), 'numpy.trim_zeros', 'np.trim_zeros', (['Node_d'], {}), '(Node_d)\n', (4212, 4220), True, 'import numpy as np\n'), ((4222, 4259), 'numpy.savetxt', 'np.savetxt', (['"""Node_record.txt"""', 'Node_d'], {}), "('Node_record.txt', Node_d)\n", (4232, 4259), True, 'import numpy as np\n'), ((4518, 4536), 'opensees.fix', 'op.fix', (['(1)', '(0)', '(1)', '(0)'], {}), '(1, 0, 1, 0)\n', (4524, 4536), True, 'import opensees as op\n'), ((4538, 4556), 'opensees.fix', 'op.fix', (['(3)', '(0)', '(1)', '(0)'], {}), '(3, 0, 1, 0)\n', (4544, 4556), True, 'import opensees as op\n'), ((5051, 5090), 'opensees.model', 'op.model', (['"""basic"""', '"""-ndm"""', '(2)', '"""-ndf"""', '(2)'], {}), "('basic', '-ndm', 2, '-ndf', 2)\n", (5059, 5090), True, 'import opensees as op\n'), ((5104, 5124), 'numpy.float', 'np.float', (['(sElemX / 2)'], {}), '(sElemX / 2)\n', (5112, 5124), True, 'import numpy as np\n'), ((6413, 6428), 'opensees.fix', 'op.fix', (['(2)', '(0)', '(1)'], {}), '(2, 0, 1)\n', (6419, 6428), True, 'import opensees as op\n'), ((6699, 6740), 'opensees.equalDOF', 'op.equalDOF', (['(nNodeT - 2)', '(nNodeT - 1)', '(1)', '(2)'], {}), '(nNodeT - 2, nNodeT - 1, 1, 2)\n', (6710, 6740), True, 'import opensees as op\n'), ((7057, 7079), 'math.atan', 'mm.atan', (['(grade / 100.0)'], {}), '(grade / 100.0)\n', (7064, 7079), True, 'import math as mm\n'), ((7793, 7979), 'opensees.nDMaterial', 'op.nDMaterial', (['"""PressureDependMultiYield02"""', '(3)', '(2)', '(1.8)', '(90000.0)', '(220000.0)', '(32)', '(0.1)', '(101.0)', '(0.5)', '(26)', '(0.067)', '(0.23)', '(0.06)', '(0.27)', '(20)', '(5.0)', '(3.0)', '(1.0)', '(0.0)', '(0.77)', '(0.9)', '(0.02)', '(0.7)', '(101.0)'], {}), "('PressureDependMultiYield02', 3, 2, 1.8, 90000.0, 220000.0, \n 32, 0.1, 101.0, 0.5, 26, 0.067, 0.23, 0.06, 0.27, 20, 5.0, 3.0, 1.0, \n 0.0, 0.77, 0.9, 0.02, 0.7, 101.0)\n", (7806, 7979), True, 'import opensees as op\n'), ((8090, 8277), 'opensees.nDMaterial', 'op.nDMaterial', (['"""PressureDependMultiYield02"""', '(2)', '(2)', '(2.24)', '(90000.0)', '(220000.0)', '(32)', '(0.1)', '(101.0)', '(0.5)', '(26)', '(0.067)', '(0.23)', '(0.06)', '(0.27)', '(20)', '(5.0)', '(3.0)', '(1.0)', '(0.0)', '(0.77)', '(0.9)', '(0.02)', '(0.7)', '(101.0)'], {}), "('PressureDependMultiYield02', 2, 2, 2.24, 90000.0, 220000.0, \n 32, 0.1, 101.0, 0.5, 26, 0.067, 0.23, 0.06, 0.27, 20, 5.0, 3.0, 1.0, \n 0.0, 0.77, 0.9, 0.02, 0.7, 101.0)\n", (8103, 8277), True, 'import opensees as op\n'), ((8393, 8577), 'opensees.nDMaterial', 'op.nDMaterial', (['"""PressureDependMultiYield02"""', '(1)', '(2)', '(2.45)', '(130000.0)', '(260000.0)', '(39)', '(0.1)', '(101.0)', '(0.5)', '(26)', '(0.01)', '(0.0)', '(0.35)', '(0.0)', '(20)', '(5.0)', '(3.0)', '(1.0)', '(0.0)', '(0.47)', '(0.9)', '(0.02)', '(0.7)', '(101.0)'], {}), "('PressureDependMultiYield02', 1, 2, 2.45, 130000.0, 260000.0,\n 39, 0.1, 101.0, 0.5, 26, 0.01, 0.0, 0.35, 0.0, 20, 5.0, 3.0, 1.0, 0.0, \n 0.47, 0.9, 0.02, 0.7, 101.0)\n", (8406, 8577), True, 'import opensees as op\n'), ((9933, 9957), 'opensees.node', 'op.node', (['dashF', '(0.0)', '(0.0)'], {}), '(dashF, 0.0, 0.0)\n', (9940, 9957), True, 'import opensees as op\n'), ((9960, 9984), 'opensees.node', 'op.node', (['dashS', '(0.0)', '(0.0)'], {}), '(dashS, 0.0, 0.0)\n', (9967, 9984), True, 'import opensees as op\n'), ((10026, 10045), 'opensees.fix', 'op.fix', (['dashF', '(1)', '(1)'], {}), '(dashF, 1, 1)\n', (10032, 10045), True, 'import opensees as op\n'), ((10047, 10066), 'opensees.fix', 'op.fix', (['dashS', '(0)', '(1)'], {}), '(dashS, 0, 1)\n', (10053, 10066), True, 'import opensees as op\n'), ((10121, 10145), 'opensees.equalDOF', 'op.equalDOF', (['(1)', 'dashS', '(1)'], {}), '(1, dashS, 1)\n', (10132, 10145), True, 'import opensees as op\n'), ((10405, 10477), 'opensees.uniaxialMaterial', 'op.uniaxialMaterial', (['"""Viscous"""', '(numLayers + 1)', '(dashpotCoeff * colArea)', '(1)'], {}), "('Viscous', numLayers + 1, dashpotCoeff * colArea, 1)\n", (10424, 10477), True, 'import opensees as op\n'), ((10505, 10593), 'opensees.element', 'op.element', (['"""zeroLength"""', '(nElemT + 1)', 'dashF', 'dashS', '"""-mat"""', '(numLayers + 1)', '"""-dir"""', '(1)'], {}), "('zeroLength', nElemT + 1, dashF, dashS, '-mat', numLayers + 1,\n '-dir', 1)\n", (10515, 10593), True, 'import opensees as op\n'), ((10923, 10952), 'numpy.loadtxt', 'np.loadtxt', (['"""Node_record.txt"""'], {}), "('Node_record.txt')\n", (10933, 10952), True, 'import numpy as np\n'), ((11117, 11223), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""Gdisplacement.txt"""', '"""-time"""', '"""-node"""', '*nodeList3', '"""-dof"""', '(1)', '(2)', '"""disp"""'], {}), "('Node', '-file', 'Gdisplacement.txt', '-time', '-node', *\n nodeList3, '-dof', 1, 2, 'disp')\n", (11128, 11223), True, 'import opensees as op\n'), ((11214, 11321), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""Gacceleration.txt"""', '"""-time"""', '"""-node"""', '*nodeList3', '"""-dof"""', '(1)', '(2)', '"""accel"""'], {}), "('Node', '-file', 'Gacceleration.txt', '-time', '-node', *\n nodeList3, '-dof', 1, 2, 'accel')\n", (11225, 11321), True, 'import opensees as op\n'), ((11312, 11414), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""GporePressure.txt"""', '"""-time"""', '"""-node"""', '*nodeList3', '"""-dof"""', '(3)', '"""vel"""'], {}), "('Node', '-file', 'GporePressure.txt', '-time', '-node', *\n nodeList3, '-dof', 3, 'vel')\n", (11323, 11414), True, 'import opensees as op\n'), ((11491, 11602), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress1.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""1"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress1.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '1', 'stress')\n", (11502, 11602), True, 'import opensees as op\n'), ((11592, 11703), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress2.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""2"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress2.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '2', 'stress')\n", (11603, 11703), True, 'import opensees as op\n'), ((11693, 11804), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress3.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""3"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress3.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '3', 'stress')\n", (11704, 11804), True, 'import opensees as op\n'), ((11794, 11905), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress4.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""4"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress4.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '4', 'stress')\n", (11805, 11905), True, 'import opensees as op\n'), ((11895, 12006), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress9.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""9"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress9.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '9', 'stress')\n", (11906, 12006), True, 'import opensees as op\n'), ((11996, 12107), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain1.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""1"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain1.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '1', 'strain')\n", (12007, 12107), True, 'import opensees as op\n'), ((12097, 12208), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain2.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""2"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain2.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '2', 'strain')\n", (12108, 12208), True, 'import opensees as op\n'), ((12198, 12309), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain3.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""3"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain3.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '3', 'strain')\n", (12209, 12309), True, 'import opensees as op\n'), ((12299, 12410), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain4.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""4"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain4.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '4', 'strain')\n", (12310, 12410), True, 'import opensees as op\n'), ((12400, 12511), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain9.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""9"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain9.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '9', 'strain')\n", (12411, 12511), True, 'import opensees as op\n'), ((14249, 14300), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(1)', '"""-stage"""', '(0)'], {}), "('-material', 1, '-stage', 0)\n", (14271, 14300), True, 'import opensees as op\n'), ((14302, 14353), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(2)', '"""-stage"""', '(0)'], {}), "('-material', 2, '-stage', 0)\n", (14324, 14353), True, 'import opensees as op\n'), ((14355, 14406), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(3)', '"""-stage"""', '(0)'], {}), "('-material', 3, '-stage', 0)\n", (14377, 14406), True, 'import opensees as op\n'), ((14410, 14473), 'opensees.constraints', 'op.constraints', (['"""Penalty"""', '(100000000000000.0)', '(100000000000000.0)'], {}), "('Penalty', 100000000000000.0, 100000000000000.0)\n", (14424, 14473), True, 'import opensees as op\n'), ((14453, 14491), 'opensees.test', 'op.test', (['"""NormDispIncr"""', '(0.0001)', '(35)', '(1)'], {}), "('NormDispIncr', 0.0001, 35, 1)\n", (14460, 14491), True, 'import opensees as op\n'), ((14491, 14519), 'opensees.algorithm', 'op.algorithm', (['"""KrylovNewton"""'], {}), "('KrylovNewton')\n", (14503, 14519), True, 'import opensees as op\n'), ((14521, 14539), 'opensees.numberer', 'op.numberer', (['"""RCM"""'], {}), "('RCM')\n", (14532, 14539), True, 'import opensees as op\n'), ((14541, 14564), 'opensees.system', 'op.system', (['"""ProfileSPD"""'], {}), "('ProfileSPD')\n", (14550, 14564), True, 'import opensees as op\n'), ((14566, 14603), 'opensees.integrator', 'op.integrator', (['"""Newmark"""', 'gamma', 'beta'], {}), "('Newmark', gamma, beta)\n", (14579, 14603), True, 'import opensees as op\n'), ((14605, 14629), 'opensees.analysis', 'op.analysis', (['"""Transient"""'], {}), "('Transient')\n", (14616, 14629), True, 'import opensees as op\n'), ((14642, 14651), 'time.time', 'tt.time', ([], {}), '()\n', (14649, 14651), True, 'import time as tt\n'), ((14653, 14674), 'opensees.analyze', 'op.analyze', (['(10)', '(500.0)'], {}), '(10, 500.0)\n', (14663, 14674), True, 'import opensees as op\n'), ((14784, 14835), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(1)', '"""-stage"""', '(1)'], {}), "('-material', 1, '-stage', 1)\n", (14806, 14835), True, 'import opensees as op\n'), ((14837, 14888), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(2)', '"""-stage"""', '(1)'], {}), "('-material', 2, '-stage', 1)\n", (14859, 14888), True, 'import opensees as op\n'), ((14890, 14941), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(3)', '"""-stage"""', '(1)'], {}), "('-material', 3, '-stage', 1)\n", (14912, 14941), True, 'import opensees as op\n'), ((14972, 14993), 'opensees.analyze', 'op.analyze', (['(40)', '(500.0)'], {}), '(40, 500.0)\n', (14982, 14993), True, 'import opensees as op\n'), ((16377, 16392), 'opensees.setTime', 'op.setTime', (['(0.0)'], {}), '(0.0)\n', (16387, 16392), True, 'import opensees as op\n'), ((16394, 16411), 'opensees.wipeAnalysis', 'op.wipeAnalysis', ([], {}), '()\n', (16409, 16411), True, 'import opensees as op\n'), ((16413, 16435), 'opensees.remove', 'op.remove', (['"""recorders"""'], {}), "('recorders')\n", (16422, 16435), True, 'import opensees as op\n'), ((16544, 16662), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""displacement.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-node"""', '*nodeList3', '"""-dof"""', '(1)', '(2)', '"""disp"""'], {}), "('Node', '-file', 'displacement.txt', '-time', '-dT', recDT,\n '-node', *nodeList3, '-dof', 1, 2, 'disp')\n", (16555, 16662), True, 'import opensees as op\n'), ((16653, 16772), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""acceleration.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-node"""', '*nodeList3', '"""-dof"""', '(1)', '(2)', '"""accel"""'], {}), "('Node', '-file', 'acceleration.txt', '-time', '-dT', recDT,\n '-node', *nodeList3, '-dof', 1, 2, 'accel')\n", (16664, 16772), True, 'import opensees as op\n'), ((16763, 16877), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""porePressure.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-node"""', '*nodeList3', '"""-dof"""', '(3)', '"""vel"""'], {}), "('Node', '-file', 'porePressure.txt', '-time', '-dT', recDT,\n '-node', *nodeList3, '-dof', 3, 'vel')\n", (16774, 16877), True, 'import opensees as op\n'), ((16954, 17078), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress1.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""1"""', '"""stress"""'], {}), "('Element', '-file', 'stress1.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '1', 'stress')\n", (16965, 17078), True, 'import opensees as op\n'), ((17067, 17191), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress2.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""2"""', '"""stress"""'], {}), "('Element', '-file', 'stress2.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '2', 'stress')\n", (17078, 17191), True, 'import opensees as op\n'), ((17180, 17304), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress3.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""3"""', '"""stress"""'], {}), "('Element', '-file', 'stress3.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '3', 'stress')\n", (17191, 17304), True, 'import opensees as op\n'), ((17293, 17417), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress4.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""4"""', '"""stress"""'], {}), "('Element', '-file', 'stress4.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '4', 'stress')\n", (17304, 17417), True, 'import opensees as op\n'), ((17406, 17530), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress9.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""9"""', '"""stress"""'], {}), "('Element', '-file', 'stress9.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '9', 'stress')\n", (17417, 17530), True, 'import opensees as op\n'), ((17519, 17643), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain1.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""1"""', '"""strain"""'], {}), "('Element', '-file', 'strain1.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '1', 'strain')\n", (17530, 17643), True, 'import opensees as op\n'), ((17632, 17756), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain2.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""2"""', '"""strain"""'], {}), "('Element', '-file', 'strain2.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '2', 'strain')\n", (17643, 17756), True, 'import opensees as op\n'), ((17745, 17869), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain3.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""3"""', '"""strain"""'], {}), "('Element', '-file', 'strain3.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '3', 'strain')\n", (17756, 17869), True, 'import opensees as op\n'), ((17858, 17982), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain4.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""4"""', '"""strain"""'], {}), "('Element', '-file', 'strain4.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '4', 'strain')\n", (17869, 17982), True, 'import opensees as op\n'), ((17971, 18095), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain9.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""9"""', '"""strain"""'], {}), "('Element', '-file', 'strain9.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '9', 'strain')\n", (17982, 18095), True, 'import opensees as op\n'), ((18342, 18381), 'opensees.model', 'op.model', (['"""basic"""', '"""-ndm"""', '(2)', '"""-ndf"""', '(3)'], {}), "('basic', '-ndm', 2, '-ndf', 3)\n", (18350, 18381), True, 'import opensees as op\n'), ((18554, 18587), 'numpy.loadtxt', 'np.loadtxt', (['"""velocityHistory.txt"""'], {}), "('velocityHistory.txt')\n", (18564, 18587), True, 'import numpy as np\n'), ((18704, 18805), 'opensees.timeSeries', 'op.timeSeries', (['"""Path"""', '(2)', '"""-dt"""', 'motionDT', '"""-filePath"""', "(velocityFile + '.txt')", '"""-factor"""', 'cFactor'], {}), "('Path', 2, '-dt', motionDT, '-filePath', velocityFile +\n '.txt', '-factor', cFactor)\n", (18717, 18805), True, 'import opensees as op\n'), ((18801, 18827), 'opensees.pattern', 'op.pattern', (['"""Plain"""', '(10)', '(2)'], {}), "('Plain', 10, 2)\n", (18811, 18827), True, 'import opensees as op\n'), ((18829, 18854), 'opensees.load', 'op.load', (['(1)', '(1.0)', '(0.0)', '(0.0)'], {}), '(1, 1.0, 0.0, 0.0)\n', (18836, 18854), True, 'import opensees as op\n'), ((18898, 18937), 'opensees.constraints', 'op.constraints', (['"""Penalty"""', '(1e+16)', '(1e+16)'], {}), "('Penalty', 1e+16, 1e+16)\n", (18912, 18937), True, 'import opensees as op\n'), ((18941, 18978), 'opensees.test', 'op.test', (['"""NormDispIncr"""', '(0.001)', '(35)', '(1)'], {}), "('NormDispIncr', 0.001, 35, 1)\n", (18948, 18978), True, 'import opensees as op\n'), ((18979, 19007), 'opensees.algorithm', 'op.algorithm', (['"""KrylovNewton"""'], {}), "('KrylovNewton')\n", (18991, 19007), True, 'import opensees as op\n'), ((19009, 19027), 'opensees.numberer', 'op.numberer', (['"""RCM"""'], {}), "('RCM')\n", (19020, 19027), True, 'import opensees as op\n'), ((19029, 19052), 'opensees.system', 'op.system', (['"""ProfileSPD"""'], {}), "('ProfileSPD')\n", (19038, 19052), True, 'import opensees as op\n'), ((19054, 19091), 'opensees.integrator', 'op.integrator', (['"""Newmark"""', 'gamma', 'beta'], {}), "('Newmark', gamma, beta)\n", (19067, 19091), True, 'import opensees as op\n'), ((19093, 19122), 'opensees.rayleigh', 'op.rayleigh', (['a0', 'a1', '(0.0)', '(0.0)'], {}), '(a0, a1, 0.0, 0.0)\n', (19104, 19122), True, 'import opensees as op\n'), ((19124, 19148), 'opensees.analysis', 'op.analysis', (['"""Transient"""'], {}), "('Transient')\n", (19135, 19148), True, 'import opensees as op\n'), ((19206, 19228), 'opensees.analyze', 'op.analyze', (['nSteps', 'dT'], {}), '(nSteps, dT)\n', (19216, 19228), True, 'import opensees as op\n'), ((20172, 20181), 'time.time', 'tt.time', ([], {}), '()\n', (20179, 20181), True, 'import time as tt\n'), ((20277, 20286), 'opensees.wipe', 'op.wipe', ([], {}), '()\n', (20284, 20286), True, 'import opensees as op\n'), ((4460, 4478), 'numpy.int', 'np.int', (['dryNode[i]'], {}), '(dryNode[i])\n', (4466, 4478), True, 'import numpy as np\n'), ((4484, 4510), 'opensees.fix', 'op.fix', (['n_dryNode', '(0)', '(0)', '(1)'], {}), '(n_dryNode, 0, 0, 1)\n', (4490, 4510), True, 'import opensees as op\n'), ((4726, 4753), 'opensees.equalDOF', 'op.equalDOF', (['i', '(i + 2)', '(1)', '(2)'], {}), '(i, i + 2, 1, 2)\n', (4737, 4753), True, 'import opensees as op\n'), ((6602, 6629), 'opensees.equalDOF', 'op.equalDOF', (['i', '(i + 1)', '(1)', '(2)'], {}), '(i, i + 1, 1, 2)\n', (6613, 6629), True, 'import opensees as op\n'), ((6635, 6666), 'opensees.equalDOF', 'op.equalDOF', (['(i + 3)', '(i + 4)', '(1)', '(2)'], {}), '(i + 3, i + 4, 1, 2)\n', (6646, 6666), True, 'import opensees as op\n'), ((6668, 6699), 'opensees.equalDOF', 'op.equalDOF', (['(i + 3)', '(i + 5)', '(1)', '(2)'], {}), '(i + 3, i + 5, 1, 2)\n', (6679, 6699), True, 'import opensees as op\n'), ((7112, 7125), 'math.sin', 'mm.sin', (['slope'], {}), '(slope)\n', (7118, 7125), True, 'import math as mm\n'), ((7144, 7157), 'math.cos', 'mm.cos', (['slope'], {}), '(slope)\n', (7150, 7157), True, 'import math as mm\n'), ((19373, 19385), 'opensees.getTime', 'op.getTime', ([], {}), '()\n', (19383, 19385), True, 'import opensees as op\n'), ((19550, 19582), 'numpy.int', 'np.int', (['((nSteps - curStep) * 2.0)'], {}), '((nSteps - curStep) * 2.0)\n', (19556, 19582), True, 'import numpy as np\n'), ((19664, 19687), 'opensees.analyze', 'op.analyze', (['remStep', 'dT'], {}), '(remStep, dT)\n', (19674, 19687), True, 'import opensees as op\n'), ((5470, 5502), 'opensees.node', 'op.node', (['nodeNum', 'xCoord', 'yCoord'], {}), '(nodeNum, xCoord, yCoord)\n', (5477, 5502), True, 'import opensees as op\n'), ((6014, 6044), 'opensees.node', 'op.node', (['nodeNumL', '(0.0)', 'yCoord'], {}), '(nodeNumL, 0.0, yCoord)\n', (6021, 6044), True, 'import opensees as op\n'), ((6054, 6087), 'opensees.node', 'op.node', (['nodeNumR', 'sElemX', 'yCoord'], {}), '(nodeNumR, sElemX, yCoord)\n', (6061, 6087), True, 'import opensees as op\n'), ((11029, 11054), 'numpy.int', 'np.int', (['load_nodeList3[i]'], {}), '(load_nodeList3[i])\n', (11035, 11054), True, 'import numpy as np\n'), ((15502, 15519), 'numpy.int', 'np.int', (['(ctr + 1.0)'], {}), '(ctr + 1.0)\n', (15508, 15519), True, 'import numpy as np\n'), ((15560, 15577), 'numpy.int', 'np.int', (['(ctr + 2.0)'], {}), '(ctr + 2.0)\n', (15566, 15577), True, 'import numpy as np\n'), ((19857, 19869), 'opensees.getTime', 'op.getTime', ([], {}), '()\n', (19867, 19869), True, 'import opensees as op\n'), ((20001, 20032), 'numpy.int', 'np.int', (['((rStep - curStep) * 2.0)'], {}), '((rStep - curStep) * 2.0)\n', (20007, 20032), True, 'import numpy as np\n'), ((20134, 20157), 'opensees.analyze', 'op.analyze', (['remStep', 'dT'], {}), '(remStep, dT)\n', (20144, 20157), True, 'import opensees as op\n'), ((3679, 3711), 'opensees.node', 'op.node', (['nodeNum', 'xCoord', 'yCoord'], {}), '(nodeNum, xCoord, yCoord)\n', (3686, 3711), True, 'import opensees as op\n'), ((3860, 3875), 'numpy.int', 'np.int', (['nodeNum'], {}), '(nodeNum)\n', (3866, 3875), True, 'import numpy as np\n'), ((5896, 5919), 'numpy.float', 'np.float', (['sElemY[k - 1]'], {}), '(sElemY[k - 1])\n', (5904, 5919), True, 'import numpy as np\n'), ((9383, 9523), 'opensees.element', 'op.element', (['"""9_4_QuadUP"""', 'j', 'nI', 'nJ', 'nK', 'nL', 'nM', 'nN', 'nP', 'nQ', 'nR', 'thick[i - 1]', 'i', 'uBulk[i - 1]', '(1.0)', '(1.0)', '(1.0)', 'xWgt[i - 1]', 'yWgt[i - 1]'], {}), "('9_4_QuadUP', j, nI, nJ, nK, nL, nM, nN, nP, nQ, nR, thick[i - 1\n ], i, uBulk[i - 1], 1.0, 1.0, 1.0, xWgt[i - 1], yWgt[i - 1])\n", (9393, 9523), True, 'import opensees as op\n'), ((13767, 13794), 'math.floor', 'mm.floor', (['(duration / kTrial)'], {}), '(duration / kTrial)\n', (13775, 13794), True, 'import math as mm\n'), ((5403, 5426), 'numpy.float', 'np.float', (['sElemY[k - 1]'], {}), '(sElemY[k - 1])\n', (5411, 5426), True, 'import numpy as np\n'), ((15907, 15924), 'numpy.int', 'np.int', (['(ctr + 1.0)'], {}), '(ctr + 1.0)\n', (15913, 15924), True, 'import numpy as np\n'), ((15968, 15985), 'numpy.int', 'np.int', (['(ctr + 2.0)'], {}), '(ctr + 2.0)\n', (15974, 15985), True, 'import numpy as np\n'), ((3594, 3617), 'numpy.float', 'np.float', (['sElemY[k - 1]'], {}), '(sElemY[k - 1])\n', (3602, 3617), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# sync-folders.py
from datetime import datetime, date, time
import os
from shutil import copyfile
import sys
# DEBUG
# import pdb
def usage():
print("\tsync-folders.py usage:\n\t$sync-folders.py [command] [source folder] [target folder]\n\n\tCommands:\n- scan: Will scan through the specified folders and return the number of files already exiting at the target, the number of files to be copied and their size in bytes.\n- run: This will copy all files from the source folder and its subfolders to the target folder with the same folder structure. Duplicates (already existing at the target) will be skipped.")#
def scan_recursive(source_folder, target_folder):
copy_file_counter = 0
skip_file_counter = 0
file_size = 0
# Traverse through source folder structure / tree
for path, subdirs, files in os.walk(source_folder):
# For each file name found at the relevant level or node of the tree
for name in files:
# Construct target file path according to source file path in the tree
source_file_path = os.path.join(path, name)
target_file_path = source_file_path.replace(source_folder, target_folder)
# Either increase copy or skip counter, when already existing
if not os.path.isfile(target_file_path):
file_size += os.path.getsize(source_file_path)
copy_file_counter += 1
else:
skip_file_counter += 1
print("Files existing: {0}\nFiles to copy: {1} ({2} Bytes)".format(skip_file_counter, copy_file_counter, file_size))
def copy_recursive(source_folder, target_folder):
copy_file_counter = 0
skip_file_counter = 0
# Traverse through source folder structure / tree
for path, subdirs, files in os.walk(source_folder):
# For each file name found at the relevant level or node of the tree
for subdir in subdirs:
# Ensure all required folders are existing in the target folder structure
target_path = path.replace(source_folder, target_folder)
target_subdir = os.path.join(target_path, subdir)
if not os.path.isdir(target_subdir):
print("Creating target sub folder '{0}'".format(target_subdir))
os.mkdir(target_subdir)
for name in files:
# Construct target file path according to source file path in the tree
source_file_path = os.path.join(path, name)
target_file_path = source_file_path.replace(source_folder, target_folder)
# Either copy or skip, when already existing
if not os.path.isfile(target_file_path):
print("Copying '{0}' to '{1}'".format(source_file_path, target_file_path))
copyfile(source_file_path, target_file_path)
copy_file_counter += 1
else:
skip_file_counter += 1
print("Copied {0} files. Skipped {1} files. Done.".format(copy_file_counter, skip_file_counter))
# Validate arguments
if len(sys.argv) != 4:
usage()
exit()
command = sys.argv[1]
source_folder = sys.argv[2]
target_folder = sys.argv[3]
print("Source folder is '{0}'. Target folder is '{1}'.".format(source_folder, target_folder))
if command == 'scan':
scan_recursive(source_folder, target_folder)
elif command == 'copy':
copy_recursive(source_folder, target_folder)
else:
usage() | [
"os.path.getsize",
"os.path.join",
"os.path.isfile",
"shutil.copyfile",
"os.path.isdir",
"os.mkdir",
"os.walk"
] | [((849, 871), 'os.walk', 'os.walk', (['source_folder'], {}), '(source_folder)\n', (856, 871), False, 'import os\n'), ((1800, 1822), 'os.walk', 'os.walk', (['source_folder'], {}), '(source_folder)\n', (1807, 1822), False, 'import os\n'), ((1091, 1115), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (1103, 1115), False, 'import os\n'), ((2115, 2148), 'os.path.join', 'os.path.join', (['target_path', 'subdir'], {}), '(target_path, subdir)\n', (2127, 2148), False, 'import os\n'), ((2459, 2483), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (2471, 2483), False, 'import os\n'), ((1295, 1327), 'os.path.isfile', 'os.path.isfile', (['target_file_path'], {}), '(target_file_path)\n', (1309, 1327), False, 'import os\n'), ((1358, 1391), 'os.path.getsize', 'os.path.getsize', (['source_file_path'], {}), '(source_file_path)\n', (1373, 1391), False, 'import os\n'), ((2168, 2196), 'os.path.isdir', 'os.path.isdir', (['target_subdir'], {}), '(target_subdir)\n', (2181, 2196), False, 'import os\n'), ((2294, 2317), 'os.mkdir', 'os.mkdir', (['target_subdir'], {}), '(target_subdir)\n', (2302, 2317), False, 'import os\n'), ((2646, 2678), 'os.path.isfile', 'os.path.isfile', (['target_file_path'], {}), '(target_file_path)\n', (2660, 2678), False, 'import os\n'), ((2787, 2831), 'shutil.copyfile', 'copyfile', (['source_file_path', 'target_file_path'], {}), '(source_file_path, target_file_path)\n', (2795, 2831), False, 'from shutil import copyfile\n')] |
#! /root/anaconda3/bin/python
import time
from threading import Thread
from threading import current_thread
print('父线程%s启动' % current_thread().getName())
class MyThread(Thread):
def __init__(self, num, name, args):
super().__init__(name=name)
self.args = args
self.num = num
def run(self):
print('子线程%s启动' % current_thread().getName())
time.sleep(5)
print(
'arg1 = %d, arg2 = %d' %
(self.args[0],
self.args[1]))
print(self.num)
print('子线程%s结束' % current_thread().getName())
mt = MyThread(10, name='mythread', args=(5, 8))
mt.start()
time.sleep(5)
| [
"threading.current_thread",
"time.sleep"
] | [((650, 663), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (660, 663), False, 'import time\n'), ((389, 402), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (399, 402), False, 'import time\n'), ((137, 153), 'threading.current_thread', 'current_thread', ([], {}), '()\n', (151, 153), False, 'from threading import current_thread\n'), ((363, 379), 'threading.current_thread', 'current_thread', ([], {}), '()\n', (377, 379), False, 'from threading import current_thread\n'), ((570, 586), 'threading.current_thread', 'current_thread', ([], {}), '()\n', (584, 586), False, 'from threading import current_thread\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016-2017 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Module dedicated to testing the scalars features.
"""
from pytest import raises, mark
from i3py.core.composition import customize
from i3py.core.features.enumerable import Enumerable
from i3py.core.features.scalars import Str, Int, Float
from i3py.core.limits import IntLimitsValidator, FloatLimitsValidator
from i3py.core.unit import get_unit_registry, UNIT_SUPPORT
from i3py.core.declarative import set_feat, limit
from i3py.core.errors import I3pyValueError, I3pyLimitsError
from ..testing_tools import DummyParent
from .test_mappings import TestMappingInit
from .test_feature import TestFeatureInit
from .test_limits_validated import TestLimitsValidatedInit
class TestEnumerableInit(TestFeatureInit):
cls = Enumerable
parameters = dict(values=(11, 2))
class TestStrInit(TestEnumerableInit, TestMappingInit):
cls = Str
def test_str():
u = Str(setter=True, values=['On', 'Off'])
assert u.pre_set(None, 'On') == 'On'
with raises(I3pyValueError):
u.pre_set(None, 'TEST')
assert isinstance(u.post_get(None, 1), type(''))
def test_str_mapping():
m = Str(mapping={'On': 1, 'Off': 2})
assert m.post_get(None, 1) == 'On'
assert m.post_get(None, 2) == 'Off'
assert m.pre_set(None, 'On') == 1
assert m.pre_set(None, 'Off') == 2
class TestIntInit(TestLimitsValidatedInit, TestEnumerableInit,
TestMappingInit):
cls = Int
class TestInt(object):
def test_post_get(self):
i = Int()
assert i.post_get(None, '11') == 11
def test_post_get_with_extract(self):
i = Int(extract='This is the value {}')
assert i.post_get(None, 'This is the value 11') == 11
def test_with_values(self):
i = Int(setter=True, values=(1, 2, 3))
assert i.pre_set(None, 2) == 2
with raises(ValueError):
i.pre_set(None, 5)
del i.pre_set
assert i.pre_set(None, 5)
def test_with_mapping(self):
m = Int(mapping={1: 'On', 2: 'Off'})
assert m.post_get(None, 'On') == 1
assert m.post_get(None, 'Off') == 2
assert m.pre_set(None, 1) == 'On'
assert m.pre_set(None, 2) == 'Off'
def test_with_static_limits(self):
i = Int(setter=True, values=(1,), limits=(2, 5, 2))
with raises(ValueError):
i.pre_set(None, 1)
assert i.pre_set(None, 4)
with raises(ValueError):
i.pre_set(None, 3)
def test_with_dynamic_limits(self):
class LimitsHolder(DummyParent):
n = 0
@limit('test')
def _limits_test(self):
self.n += 1
return IntLimitsValidator(self.n)
o = LimitsHolder()
i = Int(setter=True, limits='test')
assert i.pre_set(o, 1)
with raises(ValueError):
i.pre_set(o, 0)
o.discard_limits(('test', ))
with raises(ValueError):
i.pre_set(o, 1)
class CacheFloatTester(DummyParent):
"""Dummy object used as a base class for testing Float cache handling.
"""
val = 1.
fl = Float(True, True)
def __init__(self, caching_allowed=True):
super(CacheFloatTester, self).__init__(caching_allowed)
@customize('fl', 'get')
def _get_fl(feat, driver):
return driver.val
@customize('fl', 'set')
def _set_fl(feat, driver, value):
driver.val = value
class UnitCacheFloatTester(CacheFloatTester):
"""Same as above but with a unit.
"""
fl = set_feat(unit='V')
class TestFloatInit(TestIntInit):
cls = Float
parameters = dict(unit='V')
class TestFloat(object):
def test_post_get(self):
f = Float()
assert f.post_get(None, '0.1') == 0.1
def test_post_with_extract(self):
f = Float(extract='This is the value {}')
assert f.post_get(None, 'This is the value 1.1') == 1.1
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_post_get_with_unit(self):
class FloatHolder(DummyParent):
f = Float(unit='V')
f = FloatHolder.f
assert hasattr(f.post_get(FloatHolder(), 0.1), 'magnitude')
assert f.post_get(FloatHolder(), 0.1).to('mV').magnitude == 100.
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_post_get_with_extract_and_unit(self):
class FloatHolder(DummyParent):
f = Float(unit='V', extract='This is the value {}')
f = FloatHolder.f
val = f.post_get(FloatHolder(), 'This is the value 0.1')
assert hasattr(val, 'magnitude')
assert val.to('mV').magnitude == 100.
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_post_get_with_unit_return_float(self):
from i3py.core.features import scalars
scalars.UNIT_RETURN = False
class FloatHolder(DummyParent):
f = Float(unit='V')
try:
assert FloatHolder.f.post_get(FloatHolder(), 0.1) == 0.1
finally:
scalars.UNIT_RETURN = True
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_post_get_settings_unit_return_float(self):
class FloatHolder(DummyParent):
f = Float(unit='V')
p = FloatHolder()
with p.temporary_setting('f', 'unit_return', False):
assert FloatHolder.f.post_get(p, 0.1) == 0.1
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_post_get_with_extract_and_unit_return_float(self):
from i3py.core.features import scalars
scalars.UNIT_RETURN = False
class FloatHolder(DummyParent):
f = Float(unit='V', extract='This is the value {}')
try:
val = FloatHolder.f.post_get(FloatHolder(),
'This is the value 0.1')
assert val == 0.1
finally:
scalars.UNIT_RETURN = True
def test_with_values(self):
f = Float(setter=True, values=(1.0, 2.4, 3.1))
assert f.pre_set(None, 2.4) == 2.4
with raises(ValueError):
f.pre_set(None, 5)
del f.pre_set
assert f.pre_set(None, 5)
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_with_values_and_units(self):
f = Float(setter=True, values=(1.0, 2.4, 3.1), unit='mV')
u = get_unit_registry()
assert f.pre_set(None, 1.0) == 1.0
assert f.pre_set(None, u.parse_expression('0.0024 V')) == 2.4
def test_with_mapping_no_units(self):
m = Float(mapping={1.0: 'On', 2.0: 'Off'})
assert m.post_get(None, 'On') == 1.0
assert m.post_get(None, 'Off') == 2.0
assert m.pre_set(None, 1.0) == 'On'
assert m.pre_set(None, 2.0) == 'Off'
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_with_mapping_units(self):
class FloatHolder(DummyParent):
m = Float(mapping={1.0: 'On', 2.0: 'Off'}, unit='mV')
m = FloatHolder.m
u = get_unit_registry()
assert m.post_get(FloatHolder(), 'On') == u.parse_expression('1.0 mV')
assert m.post_get(FloatHolder(), 'Off') == u.parse_expression('2.0 mV')
assert m.pre_set(None, u.parse_expression('0.001 V')) == 'On'
assert m.pre_set(None, u.parse_expression('0.002 V')) == 'Off'
def test_set_with_static_limits(self):
f = Float(setter=True, limits=(0.0, ))
assert f.pre_set(None, 0.1) == 0.1
with raises(I3pyLimitsError):
f.pre_set(None, -1.0)
def test_set_with_dynamic_limits(self):
class LimitsHolder(DummyParent):
n = 0.1
@limit('test')
def _limits_test(self):
self.n += .1
return FloatLimitsValidator(0.0, step=self.n)
o = LimitsHolder()
f = Float(setter=True, limits='test')
assert f.pre_set(o, .2)
with raises(ValueError):
f.pre_set(o, -0.5)
o.discard_limits(('test', ))
with raises(ValueError):
f.pre_set(o, 0.2)
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_set_with_unit(self):
f = Float(setter=True, unit='mV')
u = get_unit_registry()
assert f.pre_set(None, u.parse_expression('10 V')) == 10000.
f = Float(setter=True)
with raises(ValueError):
f.pre_set(None, u.parse_expression('10 V'))
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_with_static_limits_and_units(self):
f = Float(setter=True, unit='mV',
limits=FloatLimitsValidator(-1.0, 1.0, 0.01, unit='V'))
u = get_unit_registry()
assert f.pre_set(None, 10.0) == 10.0
with raises(ValueError):
f.pre_set(None, -2.0)
assert f.pre_set(None, u.parse_expression('10 mV')) == 10.
with raises(ValueError):
f.pre_set(None, u.parse_expression('0.1 mV'))
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_with_dynamic_limits_and_units(self):
class LimitsHolder(DummyParent):
n = 0.0
@limit('test')
def _limits_test(self):
self.n += 100
return FloatLimitsValidator(-1000., 1000., step=self.n,
unit='mV')
o = LimitsHolder()
f = Float(setter=True, limits='test', unit='V')
assert f.pre_set(o, .1) == 0.1
with raises(ValueError):
f.pre_set(o, -5)
o.discard_limits(('test', ))
with raises(ValueError):
f.pre_set(o, 0.1)
u = get_unit_registry()
assert f.pre_set(o, u.parse_expression('200 mV')) == 0.2
with raises(ValueError):
f.pre_set(o, u.parse_expression('100 mV'))
def test_cache_no_unit(self):
"""Test getting a cached value when no unit is specified.
"""
parent = CacheFloatTester()
aux = parent.fl
old_val = parent.val
parent.val += 1
assert parent.fl == aux
parent.fl = aux
assert parent.val != old_val
@mark.skipif(UNIT_SUPPORT is True, reason="Requires Pint absence")
def test_cache_unit_without_support(self):
"""Test getting a cached value with a unit in the absence of unit
support.
"""
parent = UnitCacheFloatTester()
aux = parent.fl
old_val = parent.val
parent.val += 1
assert parent.fl == aux
assert not hasattr(aux, 'magnitude')
parent.fl = aux
assert parent.val != old_val
parent.fl = 0.5
assert parent.val == 0.5
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_cache_get_unit_with_support(self):
"""Test getting a cached value with a unit in the presence of unit
support.
"""
parent = UnitCacheFloatTester()
ureg = get_unit_registry()
parent.val = 0.2
assert parent.val == 0.2
assert parent.fl == ureg.parse_expression('0.2 V')
parent.fl = 0.1
assert parent.fl == ureg.parse_expression('0.1 V')
parent.val = 1
parent.fl = ureg.parse_expression('0.1 V')
assert parent.val == 1
q = ureg.parse_expression('0.2 V')
parent.fl = q
assert parent.val == 0.2
assert parent.fl == q
parent.val = 1
parent.fl = 0.2
assert parent.val == 1
@mark.skipif(UNIT_SUPPORT is False, reason="Requires Pint")
def test_settings_support(self):
"""Test that we respect the unit return setting.
"""
parent = UnitCacheFloatTester()
ureg = get_unit_registry()
parent.val = 0.2
assert parent.val == 0.2
assert parent.fl == ureg.parse_expression('0.2 V')
with parent.temporary_setting('fl', 'unit_return', False):
print(parent, parent._settings)
assert parent.fl == 0.2
parent.clear_cache()
with parent.temporary_setting('fl', 'unit_return', False):
print(parent, parent._settings)
assert parent.fl == 0.2
assert parent.fl == ureg.parse_expression('0.2 V')
| [
"i3py.core.composition.customize",
"i3py.core.limits.FloatLimitsValidator",
"i3py.core.features.scalars.Str",
"i3py.core.declarative.limit",
"i3py.core.features.scalars.Int",
"i3py.core.unit.get_unit_registry",
"i3py.core.limits.IntLimitsValidator",
"pytest.raises",
"i3py.core.declarative.set_feat",... | [((1255, 1293), 'i3py.core.features.scalars.Str', 'Str', ([], {'setter': '(True)', 'values': "['On', 'Off']"}), "(setter=True, values=['On', 'Off'])\n", (1258, 1293), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((1487, 1519), 'i3py.core.features.scalars.Str', 'Str', ([], {'mapping': "{'On': 1, 'Off': 2}"}), "(mapping={'On': 1, 'Off': 2})\n", (1490, 1519), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((3468, 3485), 'i3py.core.features.scalars.Float', 'Float', (['(True)', '(True)'], {}), '(True, True)\n', (3473, 3485), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((3603, 3625), 'i3py.core.composition.customize', 'customize', (['"""fl"""', '"""get"""'], {}), "('fl', 'get')\n", (3612, 3625), False, 'from i3py.core.composition import customize\n'), ((3689, 3711), 'i3py.core.composition.customize', 'customize', (['"""fl"""', '"""set"""'], {}), "('fl', 'set')\n", (3698, 3711), False, 'from i3py.core.composition import customize\n'), ((3881, 3899), 'i3py.core.declarative.set_feat', 'set_feat', ([], {'unit': '"""V"""'}), "(unit='V')\n", (3889, 3899), False, 'from i3py.core.declarative import set_feat, limit\n'), ((4268, 4326), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (4279, 4326), False, 'from pytest import raises, mark\n'), ((4613, 4671), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (4624, 4671), False, 'from pytest import raises, mark\n'), ((5013, 5071), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (5024, 5071), False, 'from pytest import raises, mark\n'), ((5425, 5483), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (5436, 5483), False, 'from pytest import raises, mark\n'), ((5764, 5822), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (5775, 5822), False, 'from pytest import raises, mark\n'), ((6554, 6612), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (6565, 6612), False, 'from pytest import raises, mark\n'), ((7147, 7205), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (7158, 7205), False, 'from pytest import raises, mark\n'), ((8457, 8515), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (8468, 8515), False, 'from pytest import raises, mark\n'), ((8820, 8878), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (8831, 8878), False, 'from pytest import raises, mark\n'), ((9352, 9410), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (9363, 9410), False, 'from pytest import raises, mark\n'), ((10543, 10608), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is True)'], {'reason': '"""Requires Pint absence"""'}), "(UNIT_SUPPORT is True, reason='Requires Pint absence')\n", (10554, 10608), False, 'from pytest import raises, mark\n'), ((11080, 11138), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (11091, 11138), False, 'from pytest import raises, mark\n'), ((11887, 11945), 'pytest.mark.skipif', 'mark.skipif', (['(UNIT_SUPPORT is False)'], {'reason': '"""Requires Pint"""'}), "(UNIT_SUPPORT is False, reason='Requires Pint')\n", (11898, 11945), False, 'from pytest import raises, mark\n'), ((1344, 1366), 'pytest.raises', 'raises', (['I3pyValueError'], {}), '(I3pyValueError)\n', (1350, 1366), False, 'from pytest import raises, mark\n'), ((1860, 1865), 'i3py.core.features.scalars.Int', 'Int', ([], {}), '()\n', (1863, 1865), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((1965, 2000), 'i3py.core.features.scalars.Int', 'Int', ([], {'extract': '"""This is the value {}"""'}), "(extract='This is the value {}')\n", (1968, 2000), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((2108, 2142), 'i3py.core.features.scalars.Int', 'Int', ([], {'setter': '(True)', 'values': '(1, 2, 3)'}), '(setter=True, values=(1, 2, 3))\n', (2111, 2142), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((2348, 2384), 'i3py.core.features.scalars.Int', 'Int', ([], {'mapping': "{(1): 'On', (2): 'Off'}"}), "(mapping={(1): 'On', (2): 'Off'})\n", (2351, 2384), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((2606, 2653), 'i3py.core.features.scalars.Int', 'Int', ([], {'setter': '(True)', 'values': '(1,)', 'limits': '(2, 5, 2)'}), '(setter=True, values=(1,), limits=(2, 5, 2))\n', (2609, 2653), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((3100, 3131), 'i3py.core.features.scalars.Int', 'Int', ([], {'setter': '(True)', 'limits': '"""test"""'}), "(setter=True, limits='test')\n", (3103, 3131), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((4055, 4062), 'i3py.core.features.scalars.Float', 'Float', ([], {}), '()\n', (4060, 4062), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((4160, 4197), 'i3py.core.features.scalars.Float', 'Float', ([], {'extract': '"""This is the value {}"""'}), "(extract='This is the value {}')\n", (4165, 4197), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((6342, 6384), 'i3py.core.features.scalars.Float', 'Float', ([], {'setter': '(True)', 'values': '(1.0, 2.4, 3.1)'}), '(setter=True, values=(1.0, 2.4, 3.1))\n', (6347, 6384), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((6667, 6720), 'i3py.core.features.scalars.Float', 'Float', ([], {'setter': '(True)', 'values': '(1.0, 2.4, 3.1)', 'unit': '"""mV"""'}), "(setter=True, values=(1.0, 2.4, 3.1), unit='mV')\n", (6672, 6720), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((6733, 6752), 'i3py.core.unit.get_unit_registry', 'get_unit_registry', ([], {}), '()\n', (6750, 6752), False, 'from i3py.core.unit import get_unit_registry, UNIT_SUPPORT\n'), ((6921, 6963), 'i3py.core.features.scalars.Float', 'Float', ([], {'mapping': "{(1.0): 'On', (2.0): 'Off'}"}), "(mapping={(1.0): 'On', (2.0): 'Off'})\n", (6926, 6963), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((7391, 7410), 'i3py.core.unit.get_unit_registry', 'get_unit_registry', ([], {}), '()\n', (7408, 7410), False, 'from i3py.core.unit import get_unit_registry, UNIT_SUPPORT\n'), ((7768, 7801), 'i3py.core.features.scalars.Float', 'Float', ([], {'setter': '(True)', 'limits': '(0.0,)'}), '(setter=True, limits=(0.0,))\n', (7773, 7801), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((8221, 8254), 'i3py.core.features.scalars.Float', 'Float', ([], {'setter': '(True)', 'limits': '"""test"""'}), "(setter=True, limits='test')\n", (8226, 8254), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((8562, 8591), 'i3py.core.features.scalars.Float', 'Float', ([], {'setter': '(True)', 'unit': '"""mV"""'}), "(setter=True, unit='mV')\n", (8567, 8591), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((8604, 8623), 'i3py.core.unit.get_unit_registry', 'get_unit_registry', ([], {}), '()\n', (8621, 8623), False, 'from i3py.core.unit import get_unit_registry, UNIT_SUPPORT\n'), ((8706, 8724), 'i3py.core.features.scalars.Float', 'Float', ([], {'setter': '(True)'}), '(setter=True)\n', (8711, 8724), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((9056, 9075), 'i3py.core.unit.get_unit_registry', 'get_unit_registry', ([], {}), '()\n', (9073, 9075), False, 'from i3py.core.unit import get_unit_registry, UNIT_SUPPORT\n'), ((9785, 9828), 'i3py.core.features.scalars.Float', 'Float', ([], {'setter': '(True)', 'limits': '"""test"""', 'unit': '"""V"""'}), "(setter=True, limits='test', unit='V')\n", (9790, 9828), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((10043, 10062), 'i3py.core.unit.get_unit_registry', 'get_unit_registry', ([], {}), '()\n', (10060, 10062), False, 'from i3py.core.unit import get_unit_registry, UNIT_SUPPORT\n'), ((11347, 11366), 'i3py.core.unit.get_unit_registry', 'get_unit_registry', ([], {}), '()\n', (11364, 11366), False, 'from i3py.core.unit import get_unit_registry, UNIT_SUPPORT\n'), ((12108, 12127), 'i3py.core.unit.get_unit_registry', 'get_unit_registry', ([], {}), '()\n', (12125, 12127), False, 'from i3py.core.unit import get_unit_registry, UNIT_SUPPORT\n'), ((2195, 2213), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2201, 2213), False, 'from pytest import raises, mark\n'), ((2667, 2685), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2673, 2685), False, 'from pytest import raises, mark\n'), ((2765, 2783), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2771, 2783), False, 'from pytest import raises, mark\n'), ((2932, 2945), 'i3py.core.declarative.limit', 'limit', (['"""test"""'], {}), "('test')\n", (2937, 2945), False, 'from i3py.core.declarative import set_feat, limit\n'), ((3176, 3194), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (3182, 3194), False, 'from pytest import raises, mark\n'), ((3274, 3292), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (3280, 3292), False, 'from pytest import raises, mark\n'), ((4423, 4438), 'i3py.core.features.scalars.Float', 'Float', ([], {'unit': '"""V"""'}), "(unit='V')\n", (4428, 4438), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((4780, 4827), 'i3py.core.features.scalars.Float', 'Float', ([], {'unit': '"""V"""', 'extract': '"""This is the value {}"""'}), "(unit='V', extract='This is the value {}')\n", (4785, 4827), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((5264, 5279), 'i3py.core.features.scalars.Float', 'Float', ([], {'unit': '"""V"""'}), "(unit='V')\n", (5269, 5279), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((5597, 5612), 'i3py.core.features.scalars.Float', 'Float', ([], {'unit': '"""V"""'}), "(unit='V')\n", (5602, 5612), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((6027, 6074), 'i3py.core.features.scalars.Float', 'Float', ([], {'unit': '"""V"""', 'extract': '"""This is the value {}"""'}), "(unit='V', extract='This is the value {}')\n", (6032, 6074), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((6441, 6459), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (6447, 6459), False, 'from pytest import raises, mark\n'), ((7302, 7355), 'i3py.core.features.scalars.Float', 'Float', ([], {'mapping': "{(1.0): 'On', (2.0): 'Off'}", 'unit': '"""mV"""'}), "(mapping={(1.0): 'On', (2.0): 'Off'}, unit='mV')\n", (7307, 7355), False, 'from i3py.core.features.scalars import Str, Int, Float\n'), ((7859, 7882), 'pytest.raises', 'raises', (['I3pyLimitsError'], {}), '(I3pyLimitsError)\n', (7865, 7882), False, 'from pytest import raises, mark\n'), ((8040, 8053), 'i3py.core.declarative.limit', 'limit', (['"""test"""'], {}), "('test')\n", (8045, 8053), False, 'from i3py.core.declarative import set_feat, limit\n'), ((8300, 8318), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (8306, 8318), False, 'from pytest import raises, mark\n'), ((8401, 8419), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (8407, 8419), False, 'from pytest import raises, mark\n'), ((8738, 8756), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (8744, 8756), False, 'from pytest import raises, mark\n'), ((9134, 9152), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (9140, 9152), False, 'from pytest import raises, mark\n'), ((9268, 9286), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (9274, 9286), False, 'from pytest import raises, mark\n'), ((9538, 9551), 'i3py.core.declarative.limit', 'limit', (['"""test"""'], {}), "('test')\n", (9543, 9551), False, 'from i3py.core.declarative import set_feat, limit\n'), ((9881, 9899), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (9887, 9899), False, 'from pytest import raises, mark\n'), ((9980, 9998), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (9986, 9998), False, 'from pytest import raises, mark\n'), ((10141, 10159), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (10147, 10159), False, 'from pytest import raises, mark\n'), ((3033, 3059), 'i3py.core.limits.IntLimitsValidator', 'IntLimitsValidator', (['self.n'], {}), '(self.n)\n', (3051, 3059), False, 'from i3py.core.limits import IntLimitsValidator, FloatLimitsValidator\n'), ((8142, 8180), 'i3py.core.limits.FloatLimitsValidator', 'FloatLimitsValidator', (['(0.0)'], {'step': 'self.n'}), '(0.0, step=self.n)\n', (8162, 8180), False, 'from i3py.core.limits import IntLimitsValidator, FloatLimitsValidator\n'), ((8995, 9042), 'i3py.core.limits.FloatLimitsValidator', 'FloatLimitsValidator', (['(-1.0)', '(1.0)', '(0.01)'], {'unit': '"""V"""'}), "(-1.0, 1.0, 0.01, unit='V')\n", (9015, 9042), False, 'from i3py.core.limits import IntLimitsValidator, FloatLimitsValidator\n'), ((9641, 9702), 'i3py.core.limits.FloatLimitsValidator', 'FloatLimitsValidator', (['(-1000.0)', '(1000.0)'], {'step': 'self.n', 'unit': '"""mV"""'}), "(-1000.0, 1000.0, step=self.n, unit='mV')\n", (9661, 9702), False, 'from i3py.core.limits import IntLimitsValidator, FloatLimitsValidator\n')] |
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.post_request import PostRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.time_entry.time_entry_command import TimeEntryCommand
from pyopenproject.model.form import Form
class UpdateForm(TimeEntryCommand):
def __init__(self, connection, form):
super().__init__(connection)
self.form = form
def execute(self):
try:
json_obj = PostRequest(connection=self.connection,
# context=f"{self.CONTEXT}/:{self.time_entry}/form",
context=f"{self.CONTEXT}/form",
json=self.form.__dict__).execute()
return Form(json_obj)
except RequestError as re:
raise BusinessError(f"Error updating form: {self.form.name}") from re
| [
"pyopenproject.model.form.Form",
"pyopenproject.business.exception.business_error.BusinessError",
"pyopenproject.api_connection.requests.post_request.PostRequest"
] | [((860, 874), 'pyopenproject.model.form.Form', 'Form', (['json_obj'], {}), '(json_obj)\n', (864, 874), False, 'from pyopenproject.model.form import Form\n'), ((928, 983), 'pyopenproject.business.exception.business_error.BusinessError', 'BusinessError', (['f"""Error updating form: {self.form.name}"""'], {}), "(f'Error updating form: {self.form.name}')\n", (941, 983), False, 'from pyopenproject.business.exception.business_error import BusinessError\n'), ((576, 676), 'pyopenproject.api_connection.requests.post_request.PostRequest', 'PostRequest', ([], {'connection': 'self.connection', 'context': 'f"""{self.CONTEXT}/form"""', 'json': 'self.form.__dict__'}), "(connection=self.connection, context=f'{self.CONTEXT}/form',\n json=self.form.__dict__)\n", (587, 676), False, 'from pyopenproject.api_connection.requests.post_request import PostRequest\n')] |
import bpy
from bpy.props import *
from ...nodes.BASE.node_tree import RenderStackNode
def update_node(self, context):
self.update_parms()
class RenderNodeSceneRenderEngine(RenderStackNode):
"""A simple input node"""
bl_idname = 'RenderNodeSceneRenderEngine'
bl_label = 'Scene Render Engine'
_enum_item_hack = []
def init(self, context):
self.outputs.new('RSNodeSocketTaskSettings', "Settings")
def draw_buttons(self, context, layout):
col = layout.column(align=1)
col.prop(self, "engine")
def process(self):
self.compare(bpy.context.scene.render, 'engine', self.engine)
def engine_enum_items(self, context):
enum_items = RenderNodeSceneRenderEngine._enum_item_hack
enum_items.clear()
# append viewport engine
enum_items.append(('BLENDER_EEVEE', 'Eevee', ''))
enum_items.append(('BLENDER_WORKBENCH', 'Workbench', ''))
addon = [engine.bl_idname for engine in bpy.types.RenderEngine.__subclasses__()]
# append to enum_items
for name in addon:
enum_items.append((name, name.capitalize(), ''))
return enum_items
temp = engine_enum_items
engine: EnumProperty(name='Engine', description='Render Eninge available',
items=temp, update=update_node)
def register():
bpy.utils.register_class(RenderNodeSceneRenderEngine)
def unregister():
bpy.utils.unregister_class(RenderNodeSceneRenderEngine)
| [
"bpy.utils.unregister_class",
"bpy.utils.register_class",
"bpy.types.RenderEngine.__subclasses__"
] | [((1362, 1415), 'bpy.utils.register_class', 'bpy.utils.register_class', (['RenderNodeSceneRenderEngine'], {}), '(RenderNodeSceneRenderEngine)\n', (1386, 1415), False, 'import bpy\n'), ((1440, 1495), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['RenderNodeSceneRenderEngine'], {}), '(RenderNodeSceneRenderEngine)\n', (1466, 1495), False, 'import bpy\n'), ((985, 1024), 'bpy.types.RenderEngine.__subclasses__', 'bpy.types.RenderEngine.__subclasses__', ([], {}), '()\n', (1022, 1024), False, 'import bpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=W0611, W0613, W0621, E1101
from __future__ import unicode_literals
import time
import json
from io import BytesIO
import pytest
import tabun_api as api
from tabun_api.compat import text
from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data
def test_get_posts_data_ok(user):
post_data = json.loads(load_file('index_posts.json', template=False).decode('utf-8'))
posts = list(reversed(user.get_posts('/')))
assert len(posts) == len(post_data)
for data, post in zip(post_data, posts):
assert post.post_id == data['post_id']
assert_data(post, data)
def test_get_posts_data_ok_without_escape(user):
def noescape(data, may_be_short=False):
return data
old_escape = api.utils.escape_topic_contents
api.utils.escape_topic_contents = noescape
try:
post_data = json.loads(load_file('index_posts.json', template=False).decode('utf-8'))
posts = list(reversed(user.get_posts('/')))
assert len(posts) == len(post_data)
for data, post in zip(post_data, posts):
assert post.post_id == data['post_id']
assert_data(post, data)
finally:
api.utils.escape_topic_contents = old_escape
def test_get_posts_profile_data_ok(user, set_mock):
set_mock({'/profile/test/created/topics/': 'profile_topics.html'})
post_data = json.loads(load_file('profile_topics.json', template=False).decode('utf-8'))
posts = list(reversed(user.get_posts('/profile/test/created/topics/')))
assert len(posts) == len(post_data)
for data, post in zip(post_data, posts):
assert post.post_id == data['post_id']
assert_data(post, data)
def test_get_posts_types_ok(user):
posts = reversed(user.get_posts('/'))
for post in posts:
assert isinstance(post.author, text)
assert post.blog is None or isinstance(post.blog, text)
assert isinstance(post.blog_name, text)
assert isinstance(post.title, text)
assert isinstance(post.raw_body, text)
assert isinstance(post.tags[0], text)
assert isinstance(post.comments_count, int)
assert post.cut_text is None or isinstance(post.cut_text, text)
assert isinstance(post.context, dict)
def test_get_posts_context_user_ok(user):
posts = reversed(user.get_posts('/'))
for post in posts:
c = post.context
assert isinstance(c['username'], text)
assert isinstance(c['http_host'], text)
assert isinstance(c['url'], text)
assert isinstance(c['can_comment'], type(None)) # not available on lists
assert isinstance(c['can_edit'], bool)
assert isinstance(c['can_delete'], bool)
assert isinstance(c['can_vote'], bool)
assert isinstance(c['vote_value'], (int, type(None))) # None is not voted
assert isinstance(c['favourited'], bool)
assert isinstance(c['subscribed_to_comments'], type(None)) # not available on lists
assert isinstance(c['unread_comments_count'], int)
def test_get_posts_context_guest_ok(user, as_guest):
posts = reversed(user.get_posts('/'))
for post in posts:
c = post.context
assert isinstance(c['username'], type(None))
assert isinstance(c['http_host'], text)
assert isinstance(c['url'], text)
assert isinstance(c['can_comment'], type(None)) # not available no lists
assert isinstance(c['can_edit'], bool)
assert isinstance(c['can_delete'], bool)
assert isinstance(c['can_vote'], bool)
assert isinstance(c['vote_value'], (int, type(None))) # None is not voted
assert isinstance(c['favourited'], bool)
assert isinstance(c['subscribed_to_comments'], type(None)) # not available on lists
assert isinstance(c['unread_comments_count'], int)
def test_get_post_ok(user):
post = user.get_post(132085)
assert post.post_id == 132085
assert post.author == 'test'
assert post.private is False
assert post.blog is None
assert post.draft is True
assert post.short is False
assert time.strftime("%Y-%m-%d %H:%M:%S", post.time) == "2015-05-30 19:14:04"
assert post.utctime.strftime('%Y-%m-%d %H:%M:%S') == '2015-05-30 16:14:04'
assert post.title == 'Тест'
assert post.raw_body == '<strong>Раз</strong><br/>\n<h4>Два</h4>И ломаем вёрстку <img src="http://ya.ru/" alt="'
assert post.tags == ["тег1", "тег2"]
assert post.cut_text is None
assert post.comments_count == 5
assert post.context['username'] == 'test'
assert post.context['http_host'] == 'https://tabun.everypony.ru'
assert post.context['url'] == 'https://tabun.everypony.ru/blog/132085.html'
assert post.context['can_comment'] is True
assert post.context['can_edit'] is True
assert post.context['can_delete'] is True
assert post.context['can_vote'] is False
assert post.context['vote_value'] is None
assert post.context['favourited'] is False
assert post.context['subscribed_to_comments'] is True
assert post.context['unread_comments_count'] == 0
def test_get_post_other_ok(user):
post = user.get_post(138982, 'borderline')
assert post.post_id == 138982
assert post.author == 'test2'
assert post.private is True
assert post.blog == 'borderline'
assert post.draft is False
assert post.short is False
assert time.strftime("%Y-%m-%d %H:%M:%S", post.time) == "2015-09-10 15:39:13"
assert post.title == 'Тестирование ката'
assert post.raw_body == '<img src="https://i.imgur.com/V3KzzyAs.png"/>Текст до ката<br/>\n<a></a> <br/>\nТекст после ката<img src="https://i.imgur.com/NAg929K.jpg"/>'
assert post.tags == ["Луна", "аликорны", "новость"]
assert post.comments_count == 0
assert post.cut_text is None
assert post.vote_count == 35
assert post.vote_total == 36
assert post.context['username'] == 'test'
assert post.context['http_host'] == 'https://tabun.everypony.ru'
assert post.context['url'] == 'https://tabun.everypony.ru/blog/borderline/138982.html'
assert post.context['can_comment'] is False
assert post.context['can_edit'] is False
assert post.context['can_delete'] is False
assert post.context['can_vote'] is False
assert post.context['vote_value'] == 1
assert post.context['favourited'] is True
assert post.context['subscribed_to_comments'] is False
assert post.context['unread_comments_count'] == 0
def test_get_post_other_blog_1(set_mock, user):
set_mock({'/blog/news/132085.html': ('132085.html', {'url': '/blog/132085.html'})})
assert user.get_post(132085, 'news').blog is None
def test_get_post_other_blog_2(set_mock, user):
set_mock({'/blog/blog/132085.html': ('132085.html', {'url': '/blog/132085.html'})})
assert user.get_post(132085, 'blog').blog is None
@pytest.mark.parametrize("blog_id,blog,result_url,draft,tags,forbid_comment", [
(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2', 'Т3'], False),
(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),
(None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2', 'Т3'], False)
])
def test_add_post_ok(form_intercept, set_mock, user, blog_id, blog, result_url, draft, tags, forbid_comment):
set_mock({
'/topic/add/': (None, {
'headers': {'location': result_url},
'status': 302, 'status_msg': 'Found'
}
)})
@form_intercept('/topic/add/')
def topic_add(data, headers):
assert data.get('blog_id') == [text(blog_id if blog_id is not None else 0).encode('utf-8')]
assert data.get('security_ls_key') == [b'<KEY>']
assert data.get('topic_title') == ['Т0'.encode('utf-8')]
assert data.get('topic_text') == ['Б1'.encode('utf-8')]
assert data.get('topic_tags') == ['Т2, Т3'.encode('utf-8')]
if draft:
assert data.get('submit_topic_save') == ['Сохранить в черновиках'.encode('utf-8')]
else:
assert data.get('submit_topic_publish') == ['Опубликовать'.encode('utf-8')]
if forbid_comment:
assert data.get('topic_forbid_comment') == [b'1']
else:
assert 'topic_forbid_comment' not in data
result = user.add_post(blog_id, 'Т0', 'Б1', tags, forbid_comment, draft=draft)
assert result == (blog, 1)
@pytest.mark.parametrize("blog_id,blog,result_url,draft,tags,forbid_comment", [
(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2', 'Т3'], False),
(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),
(None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2', 'Т3'], False)
])
def test_add_poll_ok(form_intercept, set_mock, user, blog_id, blog, result_url, draft, tags, forbid_comment):
set_mock({
'/question/add/': (None, {
'headers': {'location': result_url},
'status': 302, 'status_msg': 'Found'
}
)})
@form_intercept('/question/add/')
def poll_add(data, headers):
assert data.get('blog_id') == [text(blog_id if blog_id is not None else 0).encode('utf-8')]
assert data.get('security_ls_key') == [b'<KEY>']
assert data.get('topic_title') == ['Т0'.encode('utf-8')]
assert data.get('answer[]') == [b'foo', b'bar']
assert data.get('topic_text') == ['Б1'.encode('utf-8')]
assert data.get('topic_tags') == ['Т2, Т3'.encode('utf-8')]
if draft:
assert data.get('submit_topic_save') == ['Сохранить в черновиках'.encode('utf-8')]
else:
assert data.get('submit_topic_publish') == ['Опубликовать'.encode('utf-8')]
if forbid_comment:
assert data.get('topic_forbid_comment') == [b'1']
else:
assert 'topic_forbid_comment' not in data
result = user.add_poll(blog_id, 'Т0', ('foo', 'bar'), 'Б1', tags, forbid_comment, draft=draft)
assert result == (blog, 1)
def test_add_poll_error(set_mock, user):
set_mock({'/question/add/': 'topic_add_error.html'})
with pytest.raises(api.TabunResultError) as excinfo:
user.add_poll(None, '', ('foo', 'bar'), '', [])
# TODO: test len(choices) > 20
assert excinfo.value.message == 'Поле Заголовок слишком короткое (минимально допустимо 2 символов)'
@pytest.mark.parametrize("blog_id,blog,result_url,draft,tags,forbid_comment", [
(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2', 'Т3'], False),
(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),
(None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2', 'Т3'], False)
])
def test_edit_post_ok(form_intercept, set_mock, user, blog_id, blog, result_url, draft, tags, forbid_comment):
set_mock({
'/topic/edit/1/': (None, {
'headers': {'location': result_url},
'status': 302, 'status_msg': 'Found'
}
)})
@form_intercept('/topic/edit/1/')
def topic_edit(data, headers):
assert data.get('blog_id') == [text(blog_id if blog_id is not None else 0).encode('utf-8')]
assert data.get('security_ls_key') == [b'<KEY>']
assert data.get('topic_title') == ['Т0'.encode('utf-8')]
assert data.get('topic_text') == ['Б1'.encode('utf-8')]
assert data.get('topic_tags') == ['Т2, Т3'.encode('utf-8')]
if draft:
assert data.get('submit_topic_save') == ['Сохранить в черновиках'.encode('utf-8')]
else:
assert data.get('submit_topic_publish') == ['Опубликовать'.encode('utf-8')]
if forbid_comment:
assert data.get('topic_forbid_comment') == [b'1']
else:
assert 'topic_forbid_comment' not in data
result = user.edit_post(1, blog_id, 'Т0', 'Б1', tags, forbid_comment, draft=draft)
assert result == (blog, 1)
def test_edit_post_error(set_mock, user):
set_mock({'/topic/edit/1/': 'topic_add_error.html'})
with pytest.raises(api.TabunResultError) as excinfo:
user.edit_post(1, None, '', '', [])
assert excinfo.value.message == 'Поле Заголовок слишком короткое (минимально допустимо 2 символов)'
# Тесты hashsum гарантируют обратную совместимость, так что лучше их не трогать
def test_post_hashsum_default(user):
p = user.get_posts('/')
oldver_fields = ('post_id', 'time', 'draft', 'author', 'blog', 'title', 'body', 'tags')
assert p[0].post_id == 100000
assert p[0].hashsum(oldver_fields) == 'e93efead3145c59b9aac26037b9c5fcf'
assert p[1].post_id == 131909
assert p[1].hashsum(oldver_fields) == 'b6147c9ba6dbc7e8e07db958390108bd'
assert p[2].post_id == 131911
assert p[2].hashsum(oldver_fields) == '33b7a175c45eea8e5f68f4bc885f324b'
assert p[3].post_id == 131915
assert p[3].hashsum(oldver_fields) == '51b480ee57ee3166750e4f15f6a48f1f'
assert p[4].post_id == 131904
assert p[4].hashsum(oldver_fields) == 'd28e3ff695cd4cdc1f63e5919da95516'
assert p[5].post_id == 131937
assert p[5].hashsum(oldver_fields) == '93ef694d929b03b2f48b702ef68ce77b'
assert p[0].hashsum() == '2f452e09ee106a2beeb5a48927ad72b3'
assert p[1].hashsum() == '5308ccc03831ea4f4f3f3661440fcc75'
assert p[2].hashsum() == 'fb329febe4d073359b1d974098557994'
assert p[3].hashsum() == 'bed41b4d1ab3fa5b6b340f186067d6d5'
assert p[4].hashsum() == '2c49d10769e1fb28cb78cfaf8ac6cd0e'
assert p[5].hashsum() == '6c35ba542fd4f65ab9aac97943ca6672'
def test_post_hashsum_part(user):
p = user.get_posts('/')
assert p[0].post_id == 100000
assert p[0].hashsum(('title', 'body', 'tags')) == 'efeff4792ac7666c280b06d6d0ae1136'
assert p[1].post_id == 131909
assert p[1].hashsum(('title', 'body', 'tags')) == 'dacf2a4631636a1ab796681d607c11e0'
assert p[2].post_id == 131911
assert p[2].hashsum(('title', 'body', 'tags')) == '1381908ebf93038617b400f59d97646a'
assert p[3].post_id == 131915
assert p[3].hashsum(('title', 'body', 'tags')) == '9fbca162a43f2a2b1dff8c5764864fdf'
assert p[4].post_id == 131904
assert p[4].hashsum(('title', 'body', 'tags')) == '61a43c4d0f33313bfb4926fb86560450'
assert p[5].post_id == 131937
assert p[5].hashsum(('title', 'body', 'tags')) == 'a49976cb3879a540334a3e93f57a752e'
# Потому что смешивать \n и \r\n в файлах так же, как и на сайте, очень геморройно
p[0].raw_body = p[0].raw_body.replace('\n', '\r\n')
assert p[0].hashsum(('title', 'body', 'tags')) == '1364ee5a2fee913325d3b220d43623a5'
# TODO: rss
| [
"testutil.user.get_posts",
"testutil.user.add_post",
"time.strftime",
"testutil.load_file",
"pytest.mark.parametrize",
"testutil.user.edit_post",
"pytest.raises",
"testutil.user.get_post",
"tabun_api.compat.text",
"testutil.assert_data",
"testutil.set_mock",
"testutil.user.add_poll",
"testut... | [((6917, 7272), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""blog_id,blog,result_url,draft,tags,forbid_comment"""', "[(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2',\n 'Т3'], False), (6, 'news',\n 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),\n (None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2',\n 'Т3'], False)]"], {}), "('blog_id,blog,result_url,draft,tags,forbid_comment',\n [(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, [\n 'Т2', 'Т3'], False), (6, 'news',\n 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),\n (None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2',\n 'Т3'], False)])\n", (6940, 7272), False, 'import pytest\n'), ((8452, 8807), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""blog_id,blog,result_url,draft,tags,forbid_comment"""', "[(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2',\n 'Т3'], False), (6, 'news',\n 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),\n (None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2',\n 'Т3'], False)]"], {}), "('blog_id,blog,result_url,draft,tags,forbid_comment',\n [(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, [\n 'Т2', 'Т3'], False), (6, 'news',\n 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),\n (None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2',\n 'Т3'], False)])\n", (8475, 8807), False, 'import pytest\n'), ((10416, 10771), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""blog_id,blog,result_url,draft,tags,forbid_comment"""', "[(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2',\n 'Т3'], False), (6, 'news',\n 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),\n (None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2',\n 'Т3'], False)]"], {}), "('blog_id,blog,result_url,draft,tags,forbid_comment',\n [(6, 'news', 'https://tabun.everypony.ru/blog/news/1.html', False, [\n 'Т2', 'Т3'], False), (6, 'news',\n 'https://tabun.everypony.ru/blog/news/1.html', False, ['Т2, Т3'], True),\n (None, None, 'https://tabun.everypony.ru/blog/1.html', True, ['Т2',\n 'Т3'], False)])\n", (10439, 10771), False, 'import pytest\n'), ((1362, 1428), 'testutil.set_mock', 'set_mock', (["{'/profile/test/created/topics/': 'profile_topics.html'}"], {}), "({'/profile/test/created/topics/': 'profile_topics.html'})\n", (1370, 1428), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((3948, 3969), 'testutil.user.get_post', 'user.get_post', (['(132085)'], {}), '(132085)\n', (3961, 3969), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((5211, 5246), 'testutil.user.get_post', 'user.get_post', (['(138982)', '"""borderline"""'], {}), "(138982, 'borderline')\n", (5224, 5246), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((6584, 6671), 'testutil.set_mock', 'set_mock', (["{'/blog/news/132085.html': ('132085.html', {'url': '/blog/132085.html'})}"], {}), "({'/blog/news/132085.html': ('132085.html', {'url':\n '/blog/132085.html'})})\n", (6592, 6671), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((6776, 6863), 'testutil.set_mock', 'set_mock', (["{'/blog/blog/132085.html': ('132085.html', {'url': '/blog/132085.html'})}"], {}), "({'/blog/blog/132085.html': ('132085.html', {'url':\n '/blog/132085.html'})})\n", (6784, 6863), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((7380, 7494), 'testutil.set_mock', 'set_mock', (["{'/topic/add/': (None, {'headers': {'location': result_url}, 'status': 302,\n 'status_msg': 'Found'})}"], {}), "({'/topic/add/': (None, {'headers': {'location': result_url},\n 'status': 302, 'status_msg': 'Found'})})\n", (7388, 7494), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((7544, 7573), 'testutil.form_intercept', 'form_intercept', (['"""/topic/add/"""'], {}), "('/topic/add/')\n", (7558, 7573), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((8348, 8417), 'testutil.user.add_post', 'user.add_post', (['blog_id', '"""Т0"""', '"""Б1"""', 'tags', 'forbid_comment'], {'draft': 'draft'}), "(blog_id, 'Т0', 'Б1', tags, forbid_comment, draft=draft)\n", (8361, 8417), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((8915, 9032), 'testutil.set_mock', 'set_mock', (["{'/question/add/': (None, {'headers': {'location': result_url}, 'status': \n 302, 'status_msg': 'Found'})}"], {}), "({'/question/add/': (None, {'headers': {'location': result_url},\n 'status': 302, 'status_msg': 'Found'})})\n", (8923, 9032), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((9082, 9114), 'testutil.form_intercept', 'form_intercept', (['"""/question/add/"""'], {}), "('/question/add/')\n", (9096, 9114), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((9944, 10033), 'testutil.user.add_poll', 'user.add_poll', (['blog_id', '"""Т0"""', "('foo', 'bar')", '"""Б1"""', 'tags', 'forbid_comment'], {'draft': 'draft'}), "(blog_id, 'Т0', ('foo', 'bar'), 'Б1', tags, forbid_comment,\n draft=draft)\n", (9957, 10033), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((10108, 10160), 'testutil.set_mock', 'set_mock', (["{'/question/add/': 'topic_add_error.html'}"], {}), "({'/question/add/': 'topic_add_error.html'})\n", (10116, 10160), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((10880, 10997), 'testutil.set_mock', 'set_mock', (["{'/topic/edit/1/': (None, {'headers': {'location': result_url}, 'status': \n 302, 'status_msg': 'Found'})}"], {}), "({'/topic/edit/1/': (None, {'headers': {'location': result_url},\n 'status': 302, 'status_msg': 'Found'})})\n", (10888, 10997), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((11047, 11079), 'testutil.form_intercept', 'form_intercept', (['"""/topic/edit/1/"""'], {}), "('/topic/edit/1/')\n", (11061, 11079), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((11855, 11928), 'testutil.user.edit_post', 'user.edit_post', (['(1)', 'blog_id', '"""Т0"""', '"""Б1"""', 'tags', 'forbid_comment'], {'draft': 'draft'}), "(1, blog_id, 'Т0', 'Б1', tags, forbid_comment, draft=draft)\n", (11869, 11928), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((12008, 12060), 'testutil.set_mock', 'set_mock', (["{'/topic/edit/1/': 'topic_add_error.html'}"], {}), "({'/topic/edit/1/': 'topic_add_error.html'})\n", (12016, 12060), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((12395, 12414), 'testutil.user.get_posts', 'user.get_posts', (['"""/"""'], {}), "('/')\n", (12409, 12414), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((13602, 13621), 'testutil.user.get_posts', 'user.get_posts', (['"""/"""'], {}), "('/')\n", (13616, 13621), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((666, 689), 'testutil.assert_data', 'assert_data', (['post', 'data'], {}), '(post, data)\n', (677, 689), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((1740, 1763), 'testutil.assert_data', 'assert_data', (['post', 'data'], {}), '(post, data)\n', (1751, 1763), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((1822, 1841), 'testutil.user.get_posts', 'user.get_posts', (['"""/"""'], {}), "('/')\n", (1836, 1841), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((2395, 2414), 'testutil.user.get_posts', 'user.get_posts', (['"""/"""'], {}), "('/')\n", (2409, 2414), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((3186, 3205), 'testutil.user.get_posts', 'user.get_posts', (['"""/"""'], {}), "('/')\n", (3200, 3205), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((4171, 4216), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""', 'post.time'], {}), "('%Y-%m-%d %H:%M:%S', post.time)\n", (4184, 4216), False, 'import time\n'), ((5457, 5502), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""', 'post.time'], {}), "('%Y-%m-%d %H:%M:%S', post.time)\n", (5470, 5502), False, 'import time\n'), ((10170, 10205), 'pytest.raises', 'pytest.raises', (['api.TabunResultError'], {}), '(api.TabunResultError)\n', (10183, 10205), False, 'import pytest\n'), ((10226, 10273), 'testutil.user.add_poll', 'user.add_poll', (['None', '""""""', "('foo', 'bar')", '""""""', '[]'], {}), "(None, '', ('foo', 'bar'), '', [])\n", (10239, 10273), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((12070, 12105), 'pytest.raises', 'pytest.raises', (['api.TabunResultError'], {}), '(api.TabunResultError)\n', (12083, 12105), False, 'import pytest\n'), ((12126, 12161), 'testutil.user.edit_post', 'user.edit_post', (['(1)', 'None', '""""""', '""""""', '[]'], {}), "(1, None, '', '', [])\n", (12140, 12161), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((503, 522), 'testutil.user.get_posts', 'user.get_posts', (['"""/"""'], {}), "('/')\n", (517, 522), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((1214, 1237), 'testutil.assert_data', 'assert_data', (['post', 'data'], {}), '(post, data)\n', (1225, 1237), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((1549, 1596), 'testutil.user.get_posts', 'user.get_posts', (['"""/profile/test/created/topics/"""'], {}), "('/profile/test/created/topics/')\n", (1563, 1596), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((6679, 6708), 'testutil.user.get_post', 'user.get_post', (['(132085)', '"""news"""'], {}), "(132085, 'news')\n", (6692, 6708), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((6871, 6900), 'testutil.user.get_post', 'user.get_post', (['(132085)', '"""blog"""'], {}), "(132085, 'blog')\n", (6884, 6900), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((414, 459), 'testutil.load_file', 'load_file', (['"""index_posts.json"""'], {'template': '(False)'}), "('index_posts.json', template=False)\n", (423, 459), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((1035, 1054), 'testutil.user.get_posts', 'user.get_posts', (['"""/"""'], {}), "('/')\n", (1049, 1054), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((1457, 1505), 'testutil.load_file', 'load_file', (['"""profile_topics.json"""'], {'template': '(False)'}), "('profile_topics.json', template=False)\n", (1466, 1505), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((942, 987), 'testutil.load_file', 'load_file', (['"""index_posts.json"""'], {'template': '(False)'}), "('index_posts.json', template=False)\n", (951, 987), False, 'from testutil import UserTest, load_file, form_intercept, as_guest, set_mock, user, assert_data\n'), ((7647, 7690), 'tabun_api.compat.text', 'text', (['(blog_id if blog_id is not None else 0)'], {}), '(blog_id if blog_id is not None else 0)\n', (7651, 7690), False, 'from tabun_api.compat import text\n'), ((9187, 9230), 'tabun_api.compat.text', 'text', (['(blog_id if blog_id is not None else 0)'], {}), '(blog_id if blog_id is not None else 0)\n', (9191, 9230), False, 'from tabun_api.compat import text\n'), ((11154, 11197), 'tabun_api.compat.text', 'text', (['(blog_id if blog_id is not None else 0)'], {}), '(blog_id if blog_id is not None else 0)\n', (11158, 11197), False, 'from tabun_api.compat import text\n')] |
# -*- coding: utf-8 -*-
import re
from django.utils import simplejson as json
rx_circle_float = re.compile(r'<\(([\d\.\-]*),([\d\.\-]*)\),([\d\.\-]*)>')
rx_line = re.compile(r'\[\(([\d\.\-]*),\s*([\w\.\-]*)\),\s*\(([\d\.\-]*),\s*([\d\.\+]*)\)\]')
rx_point = re.compile(r'\(([\d\.\-]*),\s*([\d\.\-]*)\)')
rx_box = re.compile(r'\(([\d\.\-]*),\s*([\d\.\-]*)\),\s*\(([\d\.\-]*),\s*([\d\.\-]*)\)')
rx_path_identify = re.compile(r'^((?:\(|\[))(.*)(?:\)|\])$')
class Point(object):
"""
Class that rep resents of geometric point.
"""
x = None
y = None
def __init__(self, *args, **kwargs):
if len(args) == 2:
self.x, self.y = args
elif len(args) == 1 and isinstance(args[0], (list,tuple)):
self.x, self.y = args[0]
self._validate()
def _validate(self):
if not isinstance(self.x, (int, long, float)) \
or not isinstance(self.y, (int, long, float)):
raise ValueError("invalid data")
def __repr__(self):
return "<Point(%s,%s)>" % (self.x, self.y)
def __iter__(self):
yield self.x
yield self.y
def __lt__(self, val):
return tuple(self) < tuple(val)
def __gt__(self, val):
return tuple(self) > tuple(val)
def __eq__(self, val):
return tuple(self) == tuple(val)
class Circle(object):
point = None
r = None
def __init__(self, *args, **kwargs):
if len(args) == 3:
self.point = Point(args[:2])
self.r = args[2]
elif len(args) == 2:
self.point = Point(*args[0])
self.r = args[1]
else:
raise ValueError("invalid data")
self._validate()
def _validate(self):
if not isinstance(self.r, (int, long, float)):
raise ValueError("invalid data")
def __iter__(self):
yield self.point.x
yield self.point.y
yield self.r
def __repr__(self):
return "<Circle(%s,%s)>" % (self.point, self.r)
class Line(object):
init_point = None
end_point = None
def __init__(self, *args, **kwargs):
if len(args) == 4:
self.init_point = Point(*args[:2])
self.end_point = Point(*args[2:])
elif len(args) == 2:
self.init_point = Point(*args[0])
self.end_point = Point(*args[1])
else:
raise ValueError("invalid content")
def __iter__(self):
yield tuple(self.init_point)
yield tuple(self.end_point)
def __repr__(self):
return "<Line(%s, %s)>" % \
(self.init_point, self.end_point)
class Lseg(Line):
def __repr__(self):
return "<Lseg(%s, %s)>" % \
(self.init_point, self.end_point)
class Box(object):
first_vertex = None
second_vertex = None
def __init__(self, *args, **kwargs):
if len(args) == 4:
self.first_vertex = Point(*args[:2])
self.second_vertex = Point(*args[2:])
elif len(args) == 2:
self.first_vertex = Point(*args[0])
self.second_vertex = Point(*args[1])
else:
raise ValueError("invalid content")
self._reorder()
def _reorder(self):
if self.first_vertex < self.second_vertex:
self.first_vertex, self.second_vertex = \
self.second_vertex, self.first_vertex
def __iter__(self):
yield tuple(self.first_vertex)
yield tuple(self.second_vertex)
def __repr__(self):
return "<Box(%s,%s),(%s,%s)>" % (
self.first_vertex.x,
self.first_vertex.y,
self.second_vertex.x,
self.second_vertex.y
)
class Path(object):
closed = False
def __init__(self, *args, **kwargs):
self.points = []
for item in args:
if isinstance(item, (tuple, list, Point)):
self.points.append(tuple(item))
else:
self.points = []
raise ValueError("invalid content")
self.closed = bool(kwargs.get('closed', False))
if len(self.points) == 0:
raise ValueError("invalid content")
def __iter__(self):
for item in self.points:
yield item
def __repr__(self):
return "<Path(%s) closed=%s>" % (len(self.points), self.closed)
class Polygon(Path):
def __repr__(self):
return "<Polygon(%s) closed=%s>" % (len(self.points), self.closed)
from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type
""" PYTHON->SQL ADAPTATION """
def adapt_point(point):
return AsIs("'(%s, %s)'::point" % (adapt(point.x), adapt(point.y)))
def adapt_circle(c):
return AsIs("'<(%s,%s),%s>'::circle" % \
(adapt(c.point.x), adapt(c.point.y), adapt(c.r)))
def adapt_line(l):
return AsIs("'[(%s,%s), (%s,%s)]'::line" % (\
adapt(l.init_point.x),
adapt(l.init_point.y),
adapt(l.end_point.x),
adapt(l.end_point.y)
))
def adapt_lseg(l):
return AsIs("'[(%s,%s), (%s,%s)]'::lseg" % (\
adapt(l.init_point.x),
adapt(l.init_point.y),
adapt(l.end_point.x),
adapt(l.end_point.y)
))
def adapt_box(box):
return AsIs("'(%s,%s),(%s,%s)'::box" % (
adapt(box.first_vertex.x),
adapt(box.first_vertex.y),
adapt(box.second_vertex.x),
adapt(box.second_vertex.y)
))
def adapt_path(path):
container = "'[%s]'::path"
if path.closed:
container = "'(%s)'::path"
points = ["(%s,%s)" % (x, y) \
for x, y in path]
return AsIs(container % (",".join(points)))
def adapt_polygon(path):
container = "'(%s)'::polygon"
points = ["(%s,%s)" % (x, y) \
for x, y in path]
return AsIs(container % (",".join(points)))
register_adapter(Point, adapt_point)
register_adapter(Circle, adapt_circle)
register_adapter(Line, adapt_line)
register_adapter(Box, adapt_box)
register_adapter(Path, adapt_path)
register_adapter(Polygon, adapt_polygon)
register_adapter(Lseg, adapt_lseg)
""" SQL->PYTHON ADAPTATION """
def cast_point(value, cur):
if value is None:
return None
res = rx_point.search(value)
if not res:
raise ValueError("bad point representation: %r" % value)
return Point([int(x) if "." not in x else float(x) \
for x in res.groups()])
def cast_circle(value, cur):
if value is None:
return None
rxres = rx_circle_float.search(value)
if not rxres:
raise ValueError("bad circle representation: %r" % value)
return Circle(*[int(x) if "." not in x else float(x) \
for x in rxres.groups()])
def cast_line(value, cur):
if value is None:
return None
rxres = rx_line.search(value)
if not rxres:
raise ValueError("bad line representation: %r" % value)
return Line(*[int(x) if "." not in x else float(x) \
for x in rxres.groups()])
def cast_lseg(value, cur):
if value is None:
return None
rxres = rx_line.search(value)
if not rxres:
raise ValueError("bad lseg representation: %r" % value)
return Lseg(*[int(x) if "." not in x else float(x) \
for x in rxres.groups()])
def cast_box(value, cur):
if value is None:
return None
rxres = rx_box.search(value)
if not rxres:
raise ValueError("bad box representation: %r" % value)
return Box(*[int(x) if "." not in x else float(x) \
for x in rxres.groups()])
def cast_path(value, cur):
if value is None:
return None
ident = rx_path_identify.search(value)
if not ident:
raise ValueError("bad path representation: %r" % value)
is_closed = True if "(" == ident.group(1) else False
points = ident.group(2)
if not points.strip():
raise ValueError("bad path representation: %r" % value)
return Path(*[(
int(x) if "." not in x else float(x), \
int(y) if "." not in y else float(y) \
) for x, y in rx_point.findall(points)], closed=is_closed)
def cast_polygon(value, cur):
if value is None:
return None
ident = rx_path_identify.search(value)
if not ident:
raise ValueError("bad path representation: %r" % value)
is_closed = True if "(" == ident.group(1) else False
points = ident.group(2)
if not points.strip():
raise ValueError("bad path representation: %r" % value)
return Polygon(*[(
int(x) if "." not in x else float(x), \
int(y) if "." not in y else float(y) \
) for x, y in rx_point.findall(points)], closed=is_closed)
from django.db import connection
cur = connection.cursor()
cur.execute("SELECT NULL::point, NULL::circle, NULL::line, NULL::box, "
"NULL::path, NULL::polygon, NULL::lseg")
point_oid, circle_oid, \
line_oid, box_oid, \
path_oid, polygon_oid, lseg_oid = \
cur.description[0][1], \
cur.description[1][1], \
cur.description[2][1], \
cur.description[3][1], \
cur.description[4][1], \
cur.description[5][1], \
cur.description[6][1]
cur.close()
connection.close()
POINT = new_type((point_oid,), "POINT", cast_point)
CIRCLE = new_type((circle_oid,), "CIRCLE", cast_circle)
LINE = new_type((line_oid,), "LINE", cast_line)
BOX = new_type((box_oid,), "BOX", cast_box)
PATH = new_type((path_oid,), "PATH", cast_path)
POLYGON = new_type((polygon_oid,), "POLYGON", cast_polygon)
LSEG = new_type((lseg_oid,), "LSEG", cast_lseg)
register_type(POINT)
register_type(CIRCLE)
register_type(LINE)
register_type(BOX)
register_type(PATH)
register_type(POLYGON)
register_type(LSEG)
| [
"re.compile",
"psycopg2.extensions.register_type",
"django.db.connection.close",
"psycopg2.extensions.adapt",
"psycopg2.extensions.new_type",
"django.db.connection.cursor",
"psycopg2.extensions.register_adapter"
] | [((98, 164), 're.compile', 're.compile', (['"""<\\\\(([\\\\d\\\\.\\\\-]*),([\\\\d\\\\.\\\\-]*)\\\\),([\\\\d\\\\.\\\\-]*)>"""'], {}), "('<\\\\(([\\\\d\\\\.\\\\-]*),([\\\\d\\\\.\\\\-]*)\\\\),([\\\\d\\\\.\\\\-]*)>')\n", (108, 164), False, 'import re\n'), ((165, 278), 're.compile', 're.compile', (['"""\\\\[\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\w\\\\.\\\\-]*)\\\\),\\\\s*\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\d\\\\.\\\\+]*)\\\\)\\\\]"""'], {}), "(\n '\\\\[\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\w\\\\.\\\\-]*)\\\\),\\\\s*\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\d\\\\.\\\\+]*)\\\\)\\\\]'\n )\n", (175, 278), False, 'import re\n'), ((260, 313), 're.compile', 're.compile', (['"""\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\d\\\\.\\\\-]*)\\\\)"""'], {}), "('\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\d\\\\.\\\\-]*)\\\\)')\n", (270, 313), False, 'import re\n'), ((315, 422), 're.compile', 're.compile', (['"""\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\d\\\\.\\\\-]*)\\\\),\\\\s*\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\d\\\\.\\\\-]*)\\\\)"""'], {}), "(\n '\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\d\\\\.\\\\-]*)\\\\),\\\\s*\\\\(([\\\\d\\\\.\\\\-]*),\\\\s*([\\\\d\\\\.\\\\-]*)\\\\)'\n )\n", (325, 422), False, 'import re\n'), ((414, 458), 're.compile', 're.compile', (['"""^((?:\\\\(|\\\\[))(.*)(?:\\\\)|\\\\])$"""'], {}), "('^((?:\\\\(|\\\\[))(.*)(?:\\\\)|\\\\])$')\n", (424, 458), False, 'import re\n'), ((5861, 5897), 'psycopg2.extensions.register_adapter', 'register_adapter', (['Point', 'adapt_point'], {}), '(Point, adapt_point)\n', (5877, 5897), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5898, 5936), 'psycopg2.extensions.register_adapter', 'register_adapter', (['Circle', 'adapt_circle'], {}), '(Circle, adapt_circle)\n', (5914, 5936), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5937, 5971), 'psycopg2.extensions.register_adapter', 'register_adapter', (['Line', 'adapt_line'], {}), '(Line, adapt_line)\n', (5953, 5971), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5972, 6004), 'psycopg2.extensions.register_adapter', 'register_adapter', (['Box', 'adapt_box'], {}), '(Box, adapt_box)\n', (5988, 6004), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((6005, 6039), 'psycopg2.extensions.register_adapter', 'register_adapter', (['Path', 'adapt_path'], {}), '(Path, adapt_path)\n', (6021, 6039), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((6040, 6080), 'psycopg2.extensions.register_adapter', 'register_adapter', (['Polygon', 'adapt_polygon'], {}), '(Polygon, adapt_polygon)\n', (6056, 6080), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((6081, 6115), 'psycopg2.extensions.register_adapter', 'register_adapter', (['Lseg', 'adapt_lseg'], {}), '(Lseg, adapt_lseg)\n', (6097, 6115), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((8714, 8733), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (8731, 8733), False, 'from django.db import connection\n'), ((9155, 9173), 'django.db.connection.close', 'connection.close', ([], {}), '()\n', (9171, 9173), False, 'from django.db import connection\n'), ((9183, 9226), 'psycopg2.extensions.new_type', 'new_type', (['(point_oid,)', '"""POINT"""', 'cast_point'], {}), "((point_oid,), 'POINT', cast_point)\n", (9191, 9226), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9236, 9282), 'psycopg2.extensions.new_type', 'new_type', (['(circle_oid,)', '"""CIRCLE"""', 'cast_circle'], {}), "((circle_oid,), 'CIRCLE', cast_circle)\n", (9244, 9282), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9290, 9330), 'psycopg2.extensions.new_type', 'new_type', (['(line_oid,)', '"""LINE"""', 'cast_line'], {}), "((line_oid,), 'LINE', cast_line)\n", (9298, 9330), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9337, 9374), 'psycopg2.extensions.new_type', 'new_type', (['(box_oid,)', '"""BOX"""', 'cast_box'], {}), "((box_oid,), 'BOX', cast_box)\n", (9345, 9374), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9382, 9422), 'psycopg2.extensions.new_type', 'new_type', (['(path_oid,)', '"""PATH"""', 'cast_path'], {}), "((path_oid,), 'PATH', cast_path)\n", (9390, 9422), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9433, 9482), 'psycopg2.extensions.new_type', 'new_type', (['(polygon_oid,)', '"""POLYGON"""', 'cast_polygon'], {}), "((polygon_oid,), 'POLYGON', cast_polygon)\n", (9441, 9482), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9490, 9530), 'psycopg2.extensions.new_type', 'new_type', (['(lseg_oid,)', '"""LSEG"""', 'cast_lseg'], {}), "((lseg_oid,), 'LSEG', cast_lseg)\n", (9498, 9530), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9532, 9552), 'psycopg2.extensions.register_type', 'register_type', (['POINT'], {}), '(POINT)\n', (9545, 9552), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9553, 9574), 'psycopg2.extensions.register_type', 'register_type', (['CIRCLE'], {}), '(CIRCLE)\n', (9566, 9574), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9575, 9594), 'psycopg2.extensions.register_type', 'register_type', (['LINE'], {}), '(LINE)\n', (9588, 9594), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9595, 9613), 'psycopg2.extensions.register_type', 'register_type', (['BOX'], {}), '(BOX)\n', (9608, 9613), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9614, 9633), 'psycopg2.extensions.register_type', 'register_type', (['PATH'], {}), '(PATH)\n', (9627, 9633), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9634, 9656), 'psycopg2.extensions.register_type', 'register_type', (['POLYGON'], {}), '(POLYGON)\n', (9647, 9656), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((9657, 9676), 'psycopg2.extensions.register_type', 'register_type', (['LSEG'], {}), '(LSEG)\n', (9670, 9676), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((4693, 4707), 'psycopg2.extensions.adapt', 'adapt', (['point.x'], {}), '(point.x)\n', (4698, 4707), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((4709, 4723), 'psycopg2.extensions.adapt', 'adapt', (['point.y'], {}), '(point.y)\n', (4714, 4723), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((4802, 4818), 'psycopg2.extensions.adapt', 'adapt', (['c.point.x'], {}), '(c.point.x)\n', (4807, 4818), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((4820, 4836), 'psycopg2.extensions.adapt', 'adapt', (['c.point.y'], {}), '(c.point.y)\n', (4825, 4836), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((4838, 4848), 'psycopg2.extensions.adapt', 'adapt', (['c.r'], {}), '(c.r)\n', (4843, 4848), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((4929, 4950), 'psycopg2.extensions.adapt', 'adapt', (['l.init_point.x'], {}), '(l.init_point.x)\n', (4934, 4950), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((4960, 4981), 'psycopg2.extensions.adapt', 'adapt', (['l.init_point.y'], {}), '(l.init_point.y)\n', (4965, 4981), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((4991, 5011), 'psycopg2.extensions.adapt', 'adapt', (['l.end_point.x'], {}), '(l.end_point.x)\n', (4996, 5011), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5021, 5041), 'psycopg2.extensions.adapt', 'adapt', (['l.end_point.y'], {}), '(l.end_point.y)\n', (5026, 5041), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5127, 5148), 'psycopg2.extensions.adapt', 'adapt', (['l.init_point.x'], {}), '(l.init_point.x)\n', (5132, 5148), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5158, 5179), 'psycopg2.extensions.adapt', 'adapt', (['l.init_point.y'], {}), '(l.init_point.y)\n', (5163, 5179), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5189, 5209), 'psycopg2.extensions.adapt', 'adapt', (['l.end_point.x'], {}), '(l.end_point.x)\n', (5194, 5209), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5219, 5239), 'psycopg2.extensions.adapt', 'adapt', (['l.end_point.y'], {}), '(l.end_point.y)\n', (5224, 5239), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5321, 5346), 'psycopg2.extensions.adapt', 'adapt', (['box.first_vertex.x'], {}), '(box.first_vertex.x)\n', (5326, 5346), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5356, 5381), 'psycopg2.extensions.adapt', 'adapt', (['box.first_vertex.y'], {}), '(box.first_vertex.y)\n', (5361, 5381), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5391, 5417), 'psycopg2.extensions.adapt', 'adapt', (['box.second_vertex.x'], {}), '(box.second_vertex.x)\n', (5396, 5417), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n'), ((5427, 5453), 'psycopg2.extensions.adapt', 'adapt', (['box.second_vertex.y'], {}), '(box.second_vertex.y)\n', (5432, 5453), False, 'from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type\n')] |
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class RInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing R 3.1.1 on %s" % (node.alias))
log.info("...installing dependencies")
node.ssh.execute('apt-get install -y libreadline-dev ncurses-dev libpng-dev texinfo texlive texlive-base luatex texlive-latex-base texlive-luatex texlive-extra-utils texlive-latex-recommended texlive-fonts-extra freetype* libxml2 libxml2-dev libpng12-dev libcurl4-openssl-dev tk-dev xterm')
node.ssh.execute('apt-get install -y libgtk2.0-dev xorg-dev')
log.info("...dependencies installed --> --> downloading R")
node.ssh.execute('wget -c -P /opt/software/R http://cran.us.r-project.org/src/base/R-3/R-3.1.1.tar.gz')
log.info("...R has downloaded --> decompressing files")
node.ssh.execute('tar xvzf /opt/software/R/R-3.1.1.tar.gz -C /opt/software/R')
log.info("...files decompressed --> running ./configure")
node.ssh.execute('cd /opt/software/R/R-3.1.1 && ./configure --with-lapack --with-blas --with-pic --enable-threads --with-x=yes --enable-R-shlib --with-libpng --with-jpeglib --with-recommended-packages=yes')
log.info("...configure has finished --> running make")
node.ssh.execute('make -C /opt/software/R/R-3.1.1')
log.info("...make has finished --> creating modulefiles")
node.ssh.execute('mkdir -p /usr/local/Modules/applications/R/;touch /usr/local/Modules/applications/R/3.1.1')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/R/3.1.1')
node.ssh.execute('echo "set root /opt/software/R/R-3.1.1" >> /usr/local/Modules/applications/R/3.1.1')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root/bin" >> /usr/local/Modules/applications/R/3.1.1')
log.info("...installing R packages")
log.info("...installing packages from CRAN")
node.ssh.execute('wget -c -P /opt/software/R https://bitbucket.org/sulab/omics_pipe/raw/e345e666dd70711f79d310fe451a361893626196/dist/AWS_customBuild/Rprofile')
node.ssh.execute('cp /opt/software/R/Rprofile ~/.Rprofile')
node.ssh.execute('wget -c -P /opt/software/R https://bitbucket.org/sulab/omics_pipe/raw/e345e666dd70711f79d310fe451a361893626196/dist/AWS_customBuild/packages_cran.R')
node.ssh.execute('wget -c -P /opt/software/R https://bitbucket.org/sulab/omics_pipe/raw/e345e666dd70711f79d310fe451a361893626196/dist/AWS_customBuild/packages_bioc_1.R')
node.ssh.execute('wget -c -P /opt/software/R https://bitbucket.org/sulab/omics_pipe/raw/e345e666dd70711f79d310fe451a361893626196/dist/AWS_customBuild/packages_bioc_2.R')
node.ssh.execute('module load R/3.1.1 && Rscript /opt/software/R/packages_cran.R')
log.info("...CRAN packages have been installed --> installing BioConductor packages")
node.ssh.execute('module load R/3.1.1 && Rscript /opt/software/R/packages_bioc_1.R')
log.info("...BioConductor1 packages have been installed")
node.ssh.execute('module load R/3.1.1 && Rscript /opt/software/R/packages_bioc_2.R')
log.info("...BioConductor2 packages have been installed") | [
"starcluster.logger.log.info"
] | [((200, 249), 'starcluster.logger.log.info', 'log.info', (["('Installing R 3.1.1 on %s' % node.alias)"], {}), "('Installing R 3.1.1 on %s' % node.alias)\n", (208, 249), False, 'from starcluster.logger import log\n'), ((255, 293), 'starcluster.logger.log.info', 'log.info', (['"""...installing dependencies"""'], {}), "('...installing dependencies')\n", (263, 293), False, 'from starcluster.logger import log\n'), ((656, 715), 'starcluster.logger.log.info', 'log.info', (['"""...dependencies installed --> --> downloading R"""'], {}), "('...dependencies installed --> --> downloading R')\n", (664, 715), False, 'from starcluster.logger import log\n'), ((826, 881), 'starcluster.logger.log.info', 'log.info', (['"""...R has downloaded --> decompressing files"""'], {}), "('...R has downloaded --> decompressing files')\n", (834, 881), False, 'from starcluster.logger import log\n'), ((967, 1024), 'starcluster.logger.log.info', 'log.info', (['"""...files decompressed --> running ./configure"""'], {}), "('...files decompressed --> running ./configure')\n", (975, 1024), False, 'from starcluster.logger import log\n'), ((1238, 1292), 'starcluster.logger.log.info', 'log.info', (['"""...configure has finished --> running make"""'], {}), "('...configure has finished --> running make')\n", (1246, 1292), False, 'from starcluster.logger import log\n'), ((1351, 1408), 'starcluster.logger.log.info', 'log.info', (['"""...make has finished --> creating modulefiles"""'], {}), "('...make has finished --> creating modulefiles')\n", (1359, 1408), False, 'from starcluster.logger import log\n'), ((1822, 1858), 'starcluster.logger.log.info', 'log.info', (['"""...installing R packages"""'], {}), "('...installing R packages')\n", (1830, 1858), False, 'from starcluster.logger import log\n'), ((1862, 1906), 'starcluster.logger.log.info', 'log.info', (['"""...installing packages from CRAN"""'], {}), "('...installing packages from CRAN')\n", (1870, 1906), False, 'from starcluster.logger import log\n'), ((2745, 2840), 'starcluster.logger.log.info', 'log.info', (['"""...CRAN packages have been installed --> installing BioConductor packages"""'], {}), "(\n '...CRAN packages have been installed --> installing BioConductor packages'\n )\n", (2753, 2840), False, 'from starcluster.logger import log\n'), ((2922, 2979), 'starcluster.logger.log.info', 'log.info', (['"""...BioConductor1 packages have been installed"""'], {}), "('...BioConductor1 packages have been installed')\n", (2930, 2979), False, 'from starcluster.logger import log\n'), ((3071, 3128), 'starcluster.logger.log.info', 'log.info', (['"""...BioConductor2 packages have been installed"""'], {}), "('...BioConductor2 packages have been installed')\n", (3079, 3128), False, 'from starcluster.logger import log\n')] |
import unittest
import os
from os import listdir, path
from pathlib import Path
class TestSwap(unittest.TestCase):
def test_imports(self):
from pytube import YouTube
def test_get_video(self):
from main import get_video
rickroll = 'https://youtu.be/dQw4w9WgXcQ'
video_path = get_video(rickroll)
self.assertTrue(path.exists(video_path))
def test_cut_video(self):
from main import cut_video
start = 30
length = 10
vid_path = Path("C:/Users/ru1072781/Repo/yt-swap/video_in/<NAME> - Never Gonna Give You Up (Video).mp4")
cut_path = cut_video(vid_path, start, length)
self.assertTrue(path.exists(cut_path))
def test_swap_faces(self):
from main import swap_faces
vid_path = Path("C:/Users/ru1072781/Repo/yt-swap/cut_vid/<NAME> - Never Gonna Give You Up (Video).mp4")
im_path = Path("C:/Users/ru1072781/Repo/yt-swap/image_in/obama.png")
final_path = swap_faces(vid_path, im_path)
self.assertTrue(path.exists(final_path))
if __name__ == "__main__":
unittest.main() | [
"main.cut_video",
"os.path.exists",
"pathlib.Path",
"main.swap_faces",
"main.get_video",
"unittest.main"
] | [((1103, 1118), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1116, 1118), False, 'import unittest\n'), ((315, 334), 'main.get_video', 'get_video', (['rickroll'], {}), '(rickroll)\n', (324, 334), False, 'from main import get_video\n'), ((508, 611), 'pathlib.Path', 'Path', (['"""C:/Users/ru1072781/Repo/yt-swap/video_in/<NAME> - Never Gonna Give You Up (Video).mp4"""'], {}), "(\n 'C:/Users/ru1072781/Repo/yt-swap/video_in/<NAME> - Never Gonna Give You Up (Video).mp4'\n )\n", (512, 611), False, 'from pathlib import Path\n'), ((621, 655), 'main.cut_video', 'cut_video', (['vid_path', 'start', 'length'], {}), '(vid_path, start, length)\n', (630, 655), False, 'from main import cut_video\n'), ((789, 891), 'pathlib.Path', 'Path', (['"""C:/Users/ru1072781/Repo/yt-swap/cut_vid/<NAME> - Never Gonna Give You Up (Video).mp4"""'], {}), "(\n 'C:/Users/ru1072781/Repo/yt-swap/cut_vid/<NAME> - Never Gonna Give You Up (Video).mp4'\n )\n", (793, 891), False, 'from pathlib import Path\n'), ((900, 958), 'pathlib.Path', 'Path', (['"""C:/Users/ru1072781/Repo/yt-swap/image_in/obama.png"""'], {}), "('C:/Users/ru1072781/Repo/yt-swap/image_in/obama.png')\n", (904, 958), False, 'from pathlib import Path\n'), ((980, 1009), 'main.swap_faces', 'swap_faces', (['vid_path', 'im_path'], {}), '(vid_path, im_path)\n', (990, 1009), False, 'from main import swap_faces\n'), ((359, 382), 'os.path.exists', 'path.exists', (['video_path'], {}), '(video_path)\n', (370, 382), False, 'from os import listdir, path\n'), ((680, 701), 'os.path.exists', 'path.exists', (['cut_path'], {}), '(cut_path)\n', (691, 701), False, 'from os import listdir, path\n'), ((1034, 1057), 'os.path.exists', 'path.exists', (['final_path'], {}), '(final_path)\n', (1045, 1057), False, 'from os import listdir, path\n')] |
from helpers import Tags
# Controls
# Button tag class
class Button(Tags):
def __init__(self, attrs):
Tags.__init__(self, "button", attrs)
# Label tag class
class Label(Tags):
def __init__(self, attrs):
Tags.__init__(self, "label", attrs)
# Input tag class
class Input(Tags):
def __init__(self, type, attrs):
Tags.__init__(self, "input", attrs)
self.type = type
def getType(self):
return self.type
def getPrintableTag(self):
return Tags.getPrintableTag(self) + " type=" + self.type
# Hyperlink tag class
class HyperLink(Tags):
def __init__(self, href, attrs):
Tags.__init__(self, "a", attrs)
self.href = href
def getPrintableTag(self):
attributes = " href=" + self.href
if self.data:
attributes = attributes + " data=" + self.data
return Tags.getPrintableTag(self) + attributes
# TextArea tag class
class TextArea(Tags):
def __init__(self, attrs):
Tags.__init__(self, "textarea", attrs)
| [
"helpers.Tags.__init__",
"helpers.Tags.getPrintableTag"
] | [((117, 153), 'helpers.Tags.__init__', 'Tags.__init__', (['self', '"""button"""', 'attrs'], {}), "(self, 'button', attrs)\n", (130, 153), False, 'from helpers import Tags\n'), ((233, 268), 'helpers.Tags.__init__', 'Tags.__init__', (['self', '"""label"""', 'attrs'], {}), "(self, 'label', attrs)\n", (246, 268), False, 'from helpers import Tags\n'), ((354, 389), 'helpers.Tags.__init__', 'Tags.__init__', (['self', '"""input"""', 'attrs'], {}), "(self, 'input', attrs)\n", (367, 389), False, 'from helpers import Tags\n'), ((654, 685), 'helpers.Tags.__init__', 'Tags.__init__', (['self', '"""a"""', 'attrs'], {}), "(self, 'a', attrs)\n", (667, 685), False, 'from helpers import Tags\n'), ((1006, 1044), 'helpers.Tags.__init__', 'Tags.__init__', (['self', '"""textarea"""', 'attrs'], {}), "(self, 'textarea', attrs)\n", (1019, 1044), False, 'from helpers import Tags\n'), ((881, 907), 'helpers.Tags.getPrintableTag', 'Tags.getPrintableTag', (['self'], {}), '(self)\n', (901, 907), False, 'from helpers import Tags\n'), ((511, 537), 'helpers.Tags.getPrintableTag', 'Tags.getPrintableTag', (['self'], {}), '(self)\n', (531, 537), False, 'from helpers import Tags\n')] |
import logging
from typing import Union, Tuple
import threading
import numpy as np
from pyobs.comm import RemoteException
from pyobs.interfaces import IFocuser, ICamera, IAutoFocus, IFilters, ICameraExposureTime, IImageType
from pyobs.events import FocusFoundEvent
from pyobs.object import get_object
from pyobs.mixins import CameraSettingsMixin
from pyobs.modules import timeout, Module
from pyobs.utils.enums import ImageType
from pyobs.utils.focusseries import FocusSeries
log = logging.getLogger(__name__)
class AutoFocusSeries(Module, CameraSettingsMixin, IAutoFocus):
"""Module for auto-focusing a telescope."""
__module__ = 'pyobs.modules.focus'
def __init__(self, focuser: Union[str, IFocuser], camera: Union[str, ICamera], filters: Union[str, IFilters],
series: FocusSeries, offset: bool = False, *args, **kwargs):
"""Initialize a new auto focus system.
Args:
focuser: Name of IFocuser.
camera: Name of ICamera.
filters: Name of IFilters, if any.
offset: If True, offsets are used instead of absolute focus values.
"""
Module.__init__(self, *args, **kwargs)
# store focuser and camera
self._focuser = focuser
self._camera = camera
self._filters = filters
self._offset = offset
self._abort = threading.Event()
# create focus series
self._series: FocusSeries = get_object(series, FocusSeries)
# init camera settings mixin
CameraSettingsMixin.__init__(self, *args, filters=filters, **kwargs)
def open(self):
"""Open module"""
Module.open(self)
# register event
self.comm.register_event(FocusFoundEvent)
# check focuser and camera
try:
self.proxy(self._focuser, IFocuser)
self.proxy(self._camera, ICamera)
except ValueError:
log.warning('Either camera or focuser do not exist or are not of correct type at the moment.')
def close(self):
"""Close module."""
@timeout(600)
def auto_focus(self, count: int, step: float, exposure_time: int, *args, **kwargs) -> Tuple[float, float]:
"""Perform an auto-focus series.
This method performs an auto-focus series with "count" images on each side of the initial guess and the given
step size. With count=3, step=1 and guess=10, this takes images at the following focus values:
7, 8, 9, 10, 11, 12, 13
Args:
count: Number of images to take on each side of the initial guess. Should be an odd number.
step: Step size.
exposure_time: Exposure time for images.
Returns:
Tuple of obtained best focus value and its uncertainty. Or Nones, if focus series failed.
Raises:
FileNotFoundException: If image could not be downloaded.
"""
log.info('Performing auto-focus...')
# get focuser
log.info('Getting proxy for focuser...')
focuser: IFocuser = self.proxy(self._focuser, IFocuser)
# get camera
log.info('Getting proxy for camera...')
camera: ICamera = self.proxy(self._camera, ICamera)
# do camera settings
self._do_camera_settings(camera)
# get filter wheel and current filter
filter_name = 'unknown'
try:
filter_wheel: IFilters = self.proxy(self._filters, IFilters)
filter_name = filter_wheel.get_filter().wait()
except ValueError:
log.warning('Filter module is not of type IFilters. Could not get filter.')
# get focus as first guess
try:
if self._offset:
guess = 0
log.info('Using focus offset of 0mm as initial guess.')
else:
guess = focuser.get_focus().wait()
log.info('Using current focus of %.2fmm as initial guess.', guess)
except RemoteException:
raise ValueError('Could not fetch current focus value.')
# define array of focus values to iterate
focus_values = np.linspace(guess - count * step, guess + count * step, 2 * count + 1)
# define set_focus method
set_focus = focuser.set_focus_offset if self._offset else focuser.set_focus
# reset
self._series.reset()
self._abort = threading.Event()
# loop focus values
log.info('Starting focus series...')
for foc in focus_values:
# set focus
log.info('Changing focus to %.2fmm...', foc)
if self._abort.is_set():
raise InterruptedError()
try:
set_focus(float(foc)).wait()
except RemoteException:
raise ValueError('Could not set new focus value.')
# do exposure
log.info('Taking picture...')
if self._abort.is_set():
raise InterruptedError()
try:
if isinstance(camera, ICameraExposureTime):
camera.set_exposure_time(exposure_time)
if isinstance(camera, IImageType):
camera.set_image_type(ImageType.FOCUS)
filename = camera.expose().wait()
except RemoteException:
log.error('Could not take image.')
continue
# download image
log.info('Downloading image...')
image = self.vfs.read_image(filename)
# analyse
log.info('Analysing picture...')
try:
self._series.analyse_image(image)
except:
# do nothing..
log.error('Could not analyse image.')
continue
# fit focus
if self._abort.is_set():
raise InterruptedError()
focus = self._series.fit_focus()
# did focus series fail?
if focus is None or focus[0] is None or np.isnan(focus[0]):
log.warning('Focus series failed.')
# reset to initial values
if self._offset:
log.info('Resetting focus offset to initial guess of %.3f mm.', guess)
focuser.set_focus_offset(focus[0]).wait()
else:
log.info('Resetting focus to initial guess of %.3f mm.', guess)
focuser.set_focus(focus[0]).wait()
# raise error
raise ValueError('Could not find best focus.')
# "absolute" will be the absolute focus value, i.e. focus+offset
absolute = None
# log and set focus
if self._offset:
log.info('Setting new focus offset of (%.3f+-%.3f) mm.', focus[0], focus[1])
absolute = focus[0] + focuser.get_focus().wait()
focuser.set_focus_offset(focus[0]).wait()
else:
log.info('Setting new focus value of (%.3f+-%.3f) mm.', focus[0], focus[1])
absolute = focus[0] + focuser.get_focus_offset().wait()
focuser.set_focus(focus[0]).wait()
# send event
self.comm.send_event(FocusFoundEvent(absolute, focus[1], filter_name))
# return result
return focus[0], focus[1]
def auto_focus_status(self, *args, **kwargs) -> dict:
"""Returns current status of auto focus.
Returned dictionary contains a list of focus/fwhm pairs in X and Y direction.
Returns:
Dictionary with current status.
"""
return {}
@timeout(20)
def abort(self, *args, **kwargs):
"""Abort current actions."""
self._abort.set()
__all__ = ['AutoFocusSeries']
| [
"logging.getLogger",
"pyobs.mixins.CameraSettingsMixin.__init__",
"pyobs.object.get_object",
"threading.Event",
"pyobs.modules.Module.open",
"pyobs.modules.timeout",
"numpy.linspace",
"numpy.isnan",
"pyobs.modules.Module.__init__",
"pyobs.events.FocusFoundEvent"
] | [((484, 511), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (501, 511), False, 'import logging\n'), ((2078, 2090), 'pyobs.modules.timeout', 'timeout', (['(600)'], {}), '(600)\n', (2085, 2090), False, 'from pyobs.modules import timeout, Module\n'), ((7549, 7560), 'pyobs.modules.timeout', 'timeout', (['(20)'], {}), '(20)\n', (7556, 7560), False, 'from pyobs.modules import timeout, Module\n'), ((1143, 1181), 'pyobs.modules.Module.__init__', 'Module.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1158, 1181), False, 'from pyobs.modules import timeout, Module\n'), ((1364, 1381), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1379, 1381), False, 'import threading\n'), ((1449, 1480), 'pyobs.object.get_object', 'get_object', (['series', 'FocusSeries'], {}), '(series, FocusSeries)\n', (1459, 1480), False, 'from pyobs.object import get_object\n'), ((1527, 1595), 'pyobs.mixins.CameraSettingsMixin.__init__', 'CameraSettingsMixin.__init__', (['self', '*args'], {'filters': 'filters'}), '(self, *args, filters=filters, **kwargs)\n', (1555, 1595), False, 'from pyobs.mixins import CameraSettingsMixin\n'), ((1651, 1668), 'pyobs.modules.Module.open', 'Module.open', (['self'], {}), '(self)\n', (1662, 1668), False, 'from pyobs.modules import timeout, Module\n'), ((4140, 4210), 'numpy.linspace', 'np.linspace', (['(guess - count * step)', '(guess + count * step)', '(2 * count + 1)'], {}), '(guess - count * step, guess + count * step, 2 * count + 1)\n', (4151, 4210), True, 'import numpy as np\n'), ((4398, 4415), 'threading.Event', 'threading.Event', ([], {}), '()\n', (4413, 4415), False, 'import threading\n'), ((6007, 6025), 'numpy.isnan', 'np.isnan', (['focus[0]'], {}), '(focus[0])\n', (6015, 6025), True, 'import numpy as np\n'), ((7147, 7195), 'pyobs.events.FocusFoundEvent', 'FocusFoundEvent', (['absolute', 'focus[1]', 'filter_name'], {}), '(absolute, focus[1], filter_name)\n', (7162, 7195), False, 'from pyobs.events import FocusFoundEvent\n')] |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from unittest import mock
from sasctl.services import model_publish as mp
def test_publish_name():
assert 'ModuleName' == mp._publish_name('Module Name') # Remove spaces
assert '_1stModule' == mp._publish_name('1st Module') # Cannot start with numbers
assert 'ValidModule' == mp._publish_name('$&^Va*li#d @Modu(le)!')
def test_create_cas_destination():
target = {
'name': 'caslocal',
'destinationType': 'cas',
'casServerName': 'camelot',
'casLibrary': 'round',
'destinationTable': 'table',
'description': None,
}
with mock.patch('sasctl._services.model_publish.ModelPublish.post') as post:
mp.create_cas_destination(
'caslocal', server='camelot', library='round', table='table'
)
assert post.called
json = post.call_args[1]['json']
for k in json.keys():
assert json[k] == target[k]
def test_create_mas_destination():
target = {
'name': 'spam',
'destinationType': 'microAnalyticService',
'masUri': 'http://spam.com',
'description': 'Real-time spam',
}
with mock.patch('sasctl._services.model_publish.ModelPublish.post') as post:
mp.create_mas_destination(
target['name'], target['masUri'], target['description']
)
assert post.called
json = post.call_args[1]['json']
for k in json.keys():
assert json[k] == target[k]
| [
"sasctl.services.model_publish.create_cas_destination",
"sasctl.services.model_publish.create_mas_destination",
"sasctl.services.model_publish._publish_name",
"unittest.mock.patch"
] | [((287, 318), 'sasctl.services.model_publish._publish_name', 'mp._publish_name', (['"""Module Name"""'], {}), "('Module Name')\n", (303, 318), True, 'from sasctl.services import model_publish as mp\n'), ((363, 393), 'sasctl.services.model_publish._publish_name', 'mp._publish_name', (['"""1st Module"""'], {}), "('1st Module')\n", (379, 393), True, 'from sasctl.services import model_publish as mp\n'), ((451, 494), 'sasctl.services.model_publish._publish_name', 'mp._publish_name', (['"""$&^Va*li#d @Modu(le)!"""'], {}), "('$&^Va*li#d @Modu(le)!')\n", (467, 494), True, 'from sasctl.services import model_publish as mp\n'), ((758, 820), 'unittest.mock.patch', 'mock.patch', (['"""sasctl._services.model_publish.ModelPublish.post"""'], {}), "('sasctl._services.model_publish.ModelPublish.post')\n", (768, 820), False, 'from unittest import mock\n'), ((838, 929), 'sasctl.services.model_publish.create_cas_destination', 'mp.create_cas_destination', (['"""caslocal"""'], {'server': '"""camelot"""', 'library': '"""round"""', 'table': '"""table"""'}), "('caslocal', server='camelot', library='round',\n table='table')\n", (863, 929), True, 'from sasctl.services import model_publish as mp\n'), ((1309, 1371), 'unittest.mock.patch', 'mock.patch', (['"""sasctl._services.model_publish.ModelPublish.post"""'], {}), "('sasctl._services.model_publish.ModelPublish.post')\n", (1319, 1371), False, 'from unittest import mock\n'), ((1389, 1476), 'sasctl.services.model_publish.create_mas_destination', 'mp.create_mas_destination', (["target['name']", "target['masUri']", "target['description']"], {}), "(target['name'], target['masUri'], target[\n 'description'])\n", (1414, 1476), True, 'from sasctl.services import model_publish as mp\n')] |
from flask import json
import unittest
from app import app
class BasicTestCase(unittest.TestCase):
def test_ok(self):
assert(1)
def test_ping(self):
tester = app.test_client(self)
response = tester.get('/ping', content_type='html/text')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, b'pong')
def test_ping_json(self):
tester = app.test_client(self)
response = tester.get('/ping_json')
self.assertEqual(response.status_code, 200)
result = json.loads(response.data)
self.assertEqual(result['ping'], 'pong')
| [
"flask.json.loads",
"app.app.test_client"
] | [((186, 207), 'app.app.test_client', 'app.test_client', (['self'], {}), '(self)\n', (201, 207), False, 'from app import app\n'), ((422, 443), 'app.app.test_client', 'app.test_client', (['self'], {}), '(self)\n', (437, 443), False, 'from app import app\n'), ((557, 582), 'flask.json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (567, 582), False, 'from flask import json\n')] |
import matplotlib.pylab as plt
import numpy as np
def plotFlow(env,policy,x2d):
flow = []
for s in range(env.nx):
env.reset(s)
x = x2d(s)
a = policy(s)
snext,r = env.step(a)
xnext = x2d(snext)
flow.append( [x,xnext-x] )
flow=np.array( [ np.concatenate(a) for a in flow ])
h = plt.quiver(flow[:,0],flow[:,1],flow[:,2],flow[:,3])
return h
| [
"matplotlib.pylab.quiver",
"numpy.concatenate"
] | [((342, 400), 'matplotlib.pylab.quiver', 'plt.quiver', (['flow[:, 0]', 'flow[:, 1]', 'flow[:, 2]', 'flow[:, 3]'], {}), '(flow[:, 0], flow[:, 1], flow[:, 2], flow[:, 3])\n', (352, 400), True, 'import matplotlib.pylab as plt\n'), ((299, 316), 'numpy.concatenate', 'np.concatenate', (['a'], {}), '(a)\n', (313, 316), True, 'import numpy as np\n')] |
#!/usr/local/bin/python3
import subprocess
import signal
import sys
from pathlib import Path
import click
import requests
MIGRATE_CHART_SCRIPT = '/migrate_chart.sh'
HELM_CMD = '/linux-amd64/helm'
CA_UPDATE_CMD = 'update-ca-certificates'
CHART_URL_PATTERN = "https://{host}/api/v2.0/projects/{project}/repositories/{name}/artifacts/{version}"
CHART_SOURCE_DIR = Path('/chart_storage')
errs = []
def print_exist_errs():
if errs:
click.echo("Following errors exist", err=True)
for e in errs:
click.echo(e, err=True)
def graceful_exit(signum, frame):
print_exist_errs()
sys.exit()
signal.signal(signal.SIGINT, graceful_exit)
signal.signal(signal.SIGTERM, graceful_exit)
class ChartV2:
def __init__(self, filepath:Path):
self.filepath = filepath
self.project = self.filepath.parts[-2]
parts = self.filepath.stem.split('-')
flag = False
for i in range(len(parts)-1, -1, -1):
if parts[i][0].isnumeric():
self.name, self.version = '-'.join(parts[:i]), '-'.join(parts[i:])
flag = True
break
if not flag:
raise Exception('chart name: {} is illegal'.format('-'.join(parts)))
def __check_exist(self, hostname, username, password):
return requests.get(CHART_URL_PATTERN.format(
host=hostname,
project=self.project,
name=self.name,
version=self.version),
auth=requests.auth.HTTPBasicAuth(username, password))
def migrate(self, hostname, username, password):
res = self.__check_exist(hostname, username, password)
if res.status_code == 200:
raise Exception("Artifact already exist in harbor")
if res.status_code == 401:
raise Exception(res.reason)
oci_ref = "{host}/{project}/{name}:{version}".format(
host=hostname,
project=self.project,
name=self.name,
version=self.version)
return subprocess.run([MIGRATE_CHART_SCRIPT, HELM_CMD, self.filepath, oci_ref],
text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
@click.command()
@click.option('--hostname', default='127.0.0.1', help='the password to login harbor')
@click.option('--username', default='admin', help='The username to login harbor')
@click.option('--password', default='<PASSWORD>', help='the password to login harbor')
def migrate(hostname, username, password):
"""
Migrate chart v2 to harbor oci registry
"""
if username != 'admin':
raise Exception('This operation only allowed for admin')
subprocess.run([CA_UPDATE_CMD])
subprocess.run([HELM_CMD, 'registry', 'login', hostname, '--username', username, '--password', password])
charts = [ChartV2(c) for p in CHART_SOURCE_DIR.iterdir() if p.is_dir() for c in p.iterdir() if c.is_file() and c.name != "index-cache.yaml"]
with click.progressbar(charts, label="Migrating chart ...", length=len(charts),
item_show_func=lambda x: "{}/{}:{} total errors: {}".format(x.project, x.name, x.version, len(errs)) if x else '') as bar:
for chart in bar:
try:
result = chart.migrate(hostname, username, password)
if result.stderr:
errs.append("chart: {name}:{version} in {project} has err: {err}".format(
name=chart.name,
version=chart.version,
project=chart.project,
err=result.stderr
))
except Exception as e:
errs.append("chart: {name}:{version} in {project} has err: {err}".format(
name=chart.name,
version=chart.version,
project=chart.project,
err=e))
click.echo("Migration is Done.")
print_exist_errs()
if __name__ == '__main__':
migrate()
| [
"signal.signal",
"requests.auth.HTTPBasicAuth",
"pathlib.Path",
"click.option",
"subprocess.run",
"click.echo",
"sys.exit",
"click.command"
] | [((364, 386), 'pathlib.Path', 'Path', (['"""/chart_storage"""'], {}), "('/chart_storage')\n", (368, 386), False, 'from pathlib import Path\n'), ((624, 667), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'graceful_exit'], {}), '(signal.SIGINT, graceful_exit)\n', (637, 667), False, 'import signal\n'), ((668, 712), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'graceful_exit'], {}), '(signal.SIGTERM, graceful_exit)\n', (681, 712), False, 'import signal\n'), ((2200, 2215), 'click.command', 'click.command', ([], {}), '()\n', (2213, 2215), False, 'import click\n'), ((2217, 2306), 'click.option', 'click.option', (['"""--hostname"""'], {'default': '"""127.0.0.1"""', 'help': '"""the password to login harbor"""'}), "('--hostname', default='127.0.0.1', help=\n 'the password to login harbor')\n", (2229, 2306), False, 'import click\n'), ((2303, 2388), 'click.option', 'click.option', (['"""--username"""'], {'default': '"""admin"""', 'help': '"""The username to login harbor"""'}), "('--username', default='admin', help='The username to login harbor'\n )\n", (2315, 2388), False, 'import click\n'), ((2385, 2475), 'click.option', 'click.option', (['"""--password"""'], {'default': '"""<PASSWORD>"""', 'help': '"""the password to login harbor"""'}), "('--password', default='<PASSWORD>', help=\n 'the password to login harbor')\n", (2397, 2475), False, 'import click\n'), ((612, 622), 'sys.exit', 'sys.exit', ([], {}), '()\n', (620, 622), False, 'import sys\n'), ((2671, 2702), 'subprocess.run', 'subprocess.run', (['[CA_UPDATE_CMD]'], {}), '([CA_UPDATE_CMD])\n', (2685, 2702), False, 'import subprocess\n'), ((2707, 2816), 'subprocess.run', 'subprocess.run', (["[HELM_CMD, 'registry', 'login', hostname, '--username', username,\n '--password', password]"], {}), "([HELM_CMD, 'registry', 'login', hostname, '--username',\n username, '--password', password])\n", (2721, 2816), False, 'import subprocess\n'), ((3889, 3921), 'click.echo', 'click.echo', (['"""Migration is Done."""'], {}), "('Migration is Done.')\n", (3899, 3921), False, 'import click\n'), ((444, 490), 'click.echo', 'click.echo', (['"""Following errors exist"""'], {'err': '(True)'}), "('Following errors exist', err=True)\n", (454, 490), False, 'import click\n'), ((2054, 2192), 'subprocess.run', 'subprocess.run', (['[MIGRATE_CHART_SCRIPT, HELM_CMD, self.filepath, oci_ref]'], {'text': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.PIPE'}), '([MIGRATE_CHART_SCRIPT, HELM_CMD, self.filepath, oci_ref],\n text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)\n', (2068, 2192), False, 'import subprocess\n'), ((526, 549), 'click.echo', 'click.echo', (['e'], {'err': '(True)'}), '(e, err=True)\n', (536, 549), False, 'import click\n'), ((1512, 1559), 'requests.auth.HTTPBasicAuth', 'requests.auth.HTTPBasicAuth', (['username', 'password'], {}), '(username, password)\n', (1539, 1559), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
"""
Created on July 2017
@author: JulienWuthrich
"""
import time
import datetime
import dateutil.parser
def val2date(val, nformat="%d-%m-%Y"):
if isinstance(val, str):
return dateutil.parser.parse(val).date().strftime(nformat)
if isinstance(val, datetime.date):
return val.strftime(nformat)
if isinstance(val, datetime.datetime):
return val.strftime(nformat)
else:
message = "Format of date {} unknow".format(type(val))
raise Val2DateException(message)
def dateWscds(dt, scd):
def cast_scd(scd):
try:
return int(scd)
except TypeError as te:
message = "Unknow type {} for scd".format(type(scd))
raise DateWScdsException(message)
def make_timestamp(dt, scd):
try:
return time.mktime(dt.timetuple()) + scd
except TypeError as te:
raise DateWScdsException(te)
if isinstance(dt, str):
dt = dateutil.parser.parse(dt).date()
if isinstance(scd, str):
scd = cast_scd(scd)
return datetime.datetime.fromtimestamp(make_timestamp(dt, scd))
def range_date(start, end):
liste_date = list()
while(end >= start):
liste_date.append(start)
start = start + datetime.timedelta(days=1)
return liste_date
class Val2DateException(Exception):
def __call__(self, *args):
return self.__class__(*args)
class DateWScdsException(Exception):
def __call__(self, *args):
return self.__class__(*args)
| [
"datetime.timedelta"
] | [((1285, 1311), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1303, 1311), False, 'import datetime\n')] |
import pytest
from metagraph.tests.util import default_plugin_resolver
from . import RoundTripper
from metagraph.plugins.python.types import PythonNodeSetType
from metagraph.plugins.numpy.types import NumpyNodeSet, NumpyNodeMap
import numpy as np
def test_nodeset_roundtrip(default_plugin_resolver):
rt = RoundTripper(default_plugin_resolver)
ns = {2, 3, 55}
rt.verify_round_trip(ns)
def test_np_nodemap_2_np_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = NumpyNodeMap(np.array([00, 10, 20]))
assert len(x) == 3
intermediate = NumpyNodeSet(np.array([0, 1, 2]))
y = dpr.translate(x, NumpyNodeSet)
dpr.assert_equal(y, intermediate)
def test_np_nodeset_2_py_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = NumpyNodeSet(np.array([9, 5, 1]))
assert len(x) == 3
intermediate = {5, 1, 9}
y = dpr.translate(x, PythonNodeSetType)
dpr.assert_equal(y, intermediate)
def test_py_nodeset_2_np_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = {2, 1, 5}
assert len(x) == 3
intermediate = NumpyNodeSet.from_mask(
np.array([False, True, True, False, False, True])
)
y = dpr.translate(x, NumpyNodeSet)
dpr.assert_equal(y, intermediate)
| [
"numpy.array"
] | [((514, 535), 'numpy.array', 'np.array', (['[0, 10, 20]'], {}), '([0, 10, 20])\n', (522, 535), True, 'import numpy as np\n'), ((593, 612), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (601, 612), True, 'import numpy as np\n'), ((807, 826), 'numpy.array', 'np.array', (['[9, 5, 1]'], {}), '([9, 5, 1])\n', (815, 826), True, 'import numpy as np\n'), ((1149, 1198), 'numpy.array', 'np.array', (['[False, True, True, False, False, True]'], {}), '([False, True, True, False, False, True])\n', (1157, 1198), True, 'import numpy as np\n')] |
from types import ClassMethodDescriptorType
import requests, colorgram, os
import time as t
import spotipy.util as util
from PIL import Image, ImageDraw, ImageFont
from spotipy.oauth2 import SpotifyOAuth
# Get creds please enter your creds in creds.txt
global spotify_token, client_id, client_secret, username, display
client_id = ""
client_secret = ""
spotify_token = ""
username = ""
scope = "user-read-currently-playing"
display = ""
def main():
datadict = get_variables()
global client_secret, colors, client_id, username, display
client_id = datadict["client_id"]
client_secret = datadict["client_secret"]
username = datadict["spot_username"]
display = datadict["display_size"]
display = display.split("x")
spotify_authenticate()
get_song_id()
def spotify_authenticate():
global spotify_token
token = util.prompt_for_user_token(username, scope, client_id, client_secret, "https://www.google.com/")
if token:
spotify_token = token
else:
print("Couldn't get proper Spotify authentication")
exit()
def get_song_id():
header = {
"Authorization": "Bearer {}".format(spotify_token)
}
get_id = requests.get("https://api.spotify.com/v1/me/player/currently-playing", headers=header)
try:
song_content = get_id.json()
id = song_content['item']['id']
if not id:
t.sleep(1)
name = song_content['item']['name']
artistName = song_content['item']['album']['artists'][0]['name']
imageUrl = song_content['item']['album']['images'][1]['url']
imageRequest = requests.get(str(imageUrl))
file = open("./ImageCache/newCover.png", "wb")
file.write(imageRequest.content)
file.close()
return [id, name, artistName]
except KeyError:
spotify_authenticate()
get_song_id()
except TypeError:
print("Spotify Error: make sure valid song is playing")
print("Waiting for valid song to be played.")
t.sleep(5)
get_song_id()
except ValueError:
print("Error: looks like no song is playing")
print("Waiting for song to be played.")
t.sleep(5)
get_song_id()
def get_variables():
dicti = {}
with open('creds.txt', 'r') as file:
content = file.readlines()
for line in content:
if "=" in line:
v = line.split("=")
if len(v) == 2:
dicti[v[0].strip()] = v[1].strip()
else:
print("Please fill in your information on the creds.txt file")
exit()
return dicti
main()
while 1:
# Setup Album Image
width = int(int(display[0]) / 5)
height = int(int(display[1]) / 2)
baseWidth = int(display[0])
baseHeight = int(display[1])
image = Image.open("./ImageCache/newCover.png")
wpercent = (width/float(image.size[0]))
hsize = int((float(image.size[1])*float(wpercent)))
image = image.resize((width,hsize), Image.ANTIALIAS)
image.save('./ImageCache/albumImage.png')
#Setup Background Colors
colors = colorgram.extract('./ImageCache/albumImage.png', 2)
if len(colors) < 2:
firstColor = colors[0]
secondColor = colors[0]
else:
firstColor = colors[0]
secondColor = colors[1]
#Create images with colors
colorImageOne = Image.new('RGB', (baseWidth, int(baseHeight / 2)), (firstColor.rgb))
titleArtist = ImageDraw.Draw(colorImageOne)
songTitle = get_song_id()[1]
songArtist = get_song_id()[2]
myFont = ImageFont.truetype("./fonts/Rubik.ttf", 40)
titleArtist.text((50,50), (songTitle + "\n" + songArtist), font = myFont, fill = (255,255,255))
colorImageOne.save('./ImageCache/firstColor.png')
colorImageTwo = Image.new('RGB', (baseWidth, int(baseHeight / 2)), (secondColor.rgb))
colorImageTwo.save('./ImageCache/secondColor.png')
#Combine Images
background = Image.new('RGB', (colorImageOne.width, colorImageOne.height + colorImageTwo.height))
background.paste(colorImageOne, (0, 0))
background.paste(colorImageTwo, (0, colorImageOne.height))
background.save('./ImageCache/background.png')
finalImage = Image.new('RGB', (width, height))
background.paste(image, ((int(background.width/2) - int(image.width / 2)), int((background.height/2) - int(image.height / 2))))
background.save("./ImageCache/finalImage.png")
#set image
cwd = os.getcwd()
os.system("gsettings set org.gnome.desktop.background picture-uri " + cwd + "/ImageCache/finalImage.png")
t.sleep(5)
| [
"PIL.Image.open",
"colorgram.extract",
"PIL.Image.new",
"PIL.ImageFont.truetype",
"requests.get",
"spotipy.util.prompt_for_user_token",
"os.getcwd",
"time.sleep",
"PIL.ImageDraw.Draw",
"os.system"
] | [((868, 968), 'spotipy.util.prompt_for_user_token', 'util.prompt_for_user_token', (['username', 'scope', 'client_id', 'client_secret', '"""https://www.google.com/"""'], {}), "(username, scope, client_id, client_secret,\n 'https://www.google.com/')\n", (894, 968), True, 'import spotipy.util as util\n'), ((1208, 1298), 'requests.get', 'requests.get', (['"""https://api.spotify.com/v1/me/player/currently-playing"""'], {'headers': 'header'}), "('https://api.spotify.com/v1/me/player/currently-playing',\n headers=header)\n", (1220, 1298), False, 'import requests, colorgram, os\n'), ((2906, 2945), 'PIL.Image.open', 'Image.open', (['"""./ImageCache/newCover.png"""'], {}), "('./ImageCache/newCover.png')\n", (2916, 2945), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3192, 3243), 'colorgram.extract', 'colorgram.extract', (['"""./ImageCache/albumImage.png"""', '(2)'], {}), "('./ImageCache/albumImage.png', 2)\n", (3209, 3243), False, 'import requests, colorgram, os\n'), ((3548, 3577), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['colorImageOne'], {}), '(colorImageOne)\n', (3562, 3577), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3658, 3701), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""./fonts/Rubik.ttf"""', '(40)'], {}), "('./fonts/Rubik.ttf', 40)\n", (3676, 3701), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4042, 4131), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(colorImageOne.width, colorImageOne.height + colorImageTwo.height)'], {}), "('RGB', (colorImageOne.width, colorImageOne.height + colorImageTwo\n .height))\n", (4051, 4131), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4308, 4341), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)'], {}), "('RGB', (width, height))\n", (4317, 4341), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4552, 4563), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4561, 4563), False, 'import requests, colorgram, os\n'), ((4568, 4677), 'os.system', 'os.system', (["('gsettings set org.gnome.desktop.background picture-uri ' + cwd +\n '/ImageCache/finalImage.png')"], {}), "('gsettings set org.gnome.desktop.background picture-uri ' + cwd +\n '/ImageCache/finalImage.png')\n", (4577, 4677), False, 'import requests, colorgram, os\n'), ((4679, 4689), 'time.sleep', 't.sleep', (['(5)'], {}), '(5)\n', (4686, 4689), True, 'import time as t\n'), ((1412, 1422), 'time.sleep', 't.sleep', (['(1)'], {}), '(1)\n', (1419, 1422), True, 'import time as t\n'), ((2060, 2070), 'time.sleep', 't.sleep', (['(5)'], {}), '(5)\n', (2067, 2070), True, 'import time as t\n'), ((2226, 2236), 'time.sleep', 't.sleep', (['(5)'], {}), '(5)\n', (2233, 2236), True, 'import time as t\n')] |
'''
Created on 25 sty 2015
@author: <NAME>
'''
import unittest
from probability.metric import ExpectedValue, Variation, StandardDeviation
class MetricTest(unittest.TestCase):
def testShouldCalculateExpectedValue(self):
# given
metric = ExpectedValue()
probabilities = {1: 0.004629629629629629, 2: 0.032407407407407406, 3: 0.08796296296296297, 4: 0.1712962962962963, 5: 0.2824074074074074, 6: 0.4212962962962963}
# when
expectedValue = metric.calculate(probabilities)
# then
self.assertEqual(expectedValue, 4.958333333333333, "Invalid expected value.")
def testShouldCalculateVariation(self):
# given
metric = Variation()
probabilities = {1: 0.004629629629629629, 2: 0.032407407407407406, 3: 0.08796296296296297, 4: 0.1712962962962963, 5: 0.2824074074074074, 6: 0.4212962962962963}
# when
variation = metric.calculate(probabilities)
# then
self.assertEqual(variation, 25.89351851851852, "Invalid variation.")
def testShouldCalculateStandardDeviation(self):
# given
metric = StandardDeviation()
probabilities = {1: 0.004629629629629629, 2: 0.032407407407407406, 3: 0.08796296296296297, 4: 0.1712962962962963, 5: 0.2824074074074074, 6: 0.4212962962962963}
# when
standardDeviation = metric.calculate(probabilities)
# then
self.assertEqual(standardDeviation, 5.088567432835937, "Invalid standard deviation.")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"unittest.main",
"probability.metric.StandardDeviation",
"probability.metric.ExpectedValue",
"probability.metric.Variation"
] | [((1692, 1707), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1705, 1707), False, 'import unittest\n'), ((275, 290), 'probability.metric.ExpectedValue', 'ExpectedValue', ([], {}), '()\n', (288, 290), False, 'from probability.metric import ExpectedValue, Variation, StandardDeviation\n'), ((746, 757), 'probability.metric.Variation', 'Variation', ([], {}), '()\n', (755, 757), False, 'from probability.metric import ExpectedValue, Variation, StandardDeviation\n'), ((1209, 1228), 'probability.metric.StandardDeviation', 'StandardDeviation', ([], {}), '()\n', (1226, 1228), False, 'from probability.metric import ExpectedValue, Variation, StandardDeviation\n')] |
import example
import pytest
def test_hunger_is_a_read_only_attribute():
"""
A Pet() object should have an attribute called hunger, and
it should be read only.
"""
pet = example.Pet("Odie")
assert hasattr(pet, "hunger")
with pytest.raises(AttributeError):
pet.hunger = 5
def test_hunger_defaults_to_zero():
"""
When a Pet() is created, it's hunger should be zero by default.
"""
pet = example.Pet("Odie")
assert pet.hunger == 0
def test_hunger_can_be_initialized_to_int():
"""
When a Pet() is created, the hunger value may be initialized.
"""
pet = example.Pet("Odie", hunger=3)
assert pet.hunger == 3
@pytest.mark.parametrize("bad_type", [str(3), dict(), list(), float(3), None])
def test_hunger_can_not_be_initialized_to_non_int(bad_type):
"""
When a Pet() is created, the hunger value must be an integer.
This is an infinite search space, so just settle for a few
basic types for bad data type checking.
"""
if isinstance(bad_type, (str, float)):
# These types *could* be cast by the user as I do here. This
# is not an error, and so this should not throw/assert.
pet = example.Pet("Odie", hunger=int(bad_type))
# But for all bad_types, using it directly should be an error.
with pytest.raises(TypeError):
pet = example.Pet("Odie", hunger=bad_type)
def test_pet_gets_hungry_after_walk():
"""
When a Pet() goes for a walk, it's hunger should increment by one.
"""
pet = example.Pet("Odie")
pet.walk()
assert pet.hunger == 1
pet.walk()
assert pet.hunger == 2
def test_pet_gets_full_after_feeding():
"""
When a Pet() is fed, then the hunger should be reset to zero.
"""
pet = example.Pet("Odie")
pet.walk()
assert pet.hunger == 1
pet.walk()
assert pet.hunger == 2
pet.feed()
assert pet.hunger == 0
| [
"pytest.raises",
"example.Pet"
] | [((193, 212), 'example.Pet', 'example.Pet', (['"""Odie"""'], {}), "('Odie')\n", (204, 212), False, 'import example\n'), ((442, 461), 'example.Pet', 'example.Pet', (['"""Odie"""'], {}), "('Odie')\n", (453, 461), False, 'import example\n'), ((628, 657), 'example.Pet', 'example.Pet', (['"""Odie"""'], {'hunger': '(3)'}), "('Odie', hunger=3)\n", (639, 657), False, 'import example\n'), ((1541, 1560), 'example.Pet', 'example.Pet', (['"""Odie"""'], {}), "('Odie')\n", (1552, 1560), False, 'import example\n'), ((1779, 1798), 'example.Pet', 'example.Pet', (['"""Odie"""'], {}), "('Odie')\n", (1790, 1798), False, 'import example\n'), ((256, 285), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (269, 285), False, 'import pytest\n'), ((1326, 1350), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1339, 1350), False, 'import pytest\n'), ((1366, 1402), 'example.Pet', 'example.Pet', (['"""Odie"""'], {'hunger': 'bad_type'}), "('Odie', hunger=bad_type)\n", (1377, 1402), False, 'import example\n')] |
# Copyright (c) 2021, Hitachi America Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from enum import Enum
from typing import List, Dict, Tuple, Optional
from collections import defaultdict
import tqdm
from pdf_struct.core.utils import get_filename
class ListAction(Enum):
EXCLUDED = -1
CONTINUOUS = 0
SAME_LEVEL = 1
DOWN = 2
UP = 3
ELIMINATE = 4
ADDRESS = 5
@staticmethod
def contains(key: str) -> bool:
return key in {'c', 'a', 'b', 's', 'd', 'e', 'x'}
@classmethod
def from_key(
cls, key: str, pointer: Optional[int], use_address: bool=False) -> 'ListAction':
if pointer is not None:
if key == 'e' or key == 'x':
raise ValueError(f'Cannot use a pointer with {key}')
if pointer == -1 and key != 's':
raise ValueError(f'Cannot use -1 with {key}')
return cls.UP
if use_address and key == 'a':
return cls.ADDRESS
if key == 'c' or key == 'a':
return cls.CONTINUOUS
# annotated block or same_level
if key == 'b' or key == 's':
return cls.SAME_LEVEL
if key == 'x':
return cls.EXCLUDED
if key == 'd':
return cls.DOWN
if key == 'e':
return cls.ELIMINATE
raise ValueError(f'Unknown key {key}')
def _load_anno(in_path: str, lines: List[str], offset: int) -> List[Tuple[ListAction, Optional[int]]]:
ret = []
root_line_indices = set()
root_flg = True
for i, line in enumerate(lines):
line_num = i + 1 + offset # for debugging
line = line.rstrip('\n').split('\t')
if len(line) != 3:
raise ValueError(
f'Invalid line "{line}" in {line_num}-th line of "{in_path}".')
if not ListAction.contains(line[2]):
raise ValueError(
f'Invalid label "{line[2]}" in {line_num}-th line of "{in_path}".')
ptr = int(line[1])
if offset > 0 and ptr > 0:
ptr -= offset
if not (-1 <= ptr <= i):
raise ValueError(
f'Invalid pointer "{line[1]}" in {line_num}-th line of "{in_path}".')
if ptr == 0:
ptr = None
elif ptr > 0:
ptr = ptr - 1
if ptr is not None and ptr > 0 and ret[ptr][0] != ListAction.DOWN:
raise ValueError(
f'Pointer is pointing at "{ret[ptr][0]}" in {line_num}-th line of "{in_path}".')
try:
l = ListAction.from_key(line[2], ptr)
except ValueError as e:
raise ValueError(f'{e} in {line_num}-th line of "{in_path}".')
if ptr is not None and ptr in root_line_indices:
root_flg = True
if root_flg:
root_line_indices.add(i)
if ptr == -1:
ptr = max(root_line_indices)
if l == ListAction.DOWN:
root_flg = False
if ptr == i:
print('Pointer pointing at root when it is already in root in '
f'{line_num}-th line of "{in_path}". Turning it into SAME_LEVEL.')
ptr = None
l = ListAction.SAME_LEVEL
ret.append((l, ptr))
return ret
AnnoListType = Dict[str, List[Tuple[ListAction, Optional[int]]]]
def load_annos(base_dir: str) -> AnnoListType:
annos = dict()
for path in tqdm.tqdm(glob.glob(os.path.join(base_dir, '*.tsv'))):
with open(path, 'r') as fin:
lines = [l for l in fin]
a = _load_anno(path, lines, offset=0)
filename = get_filename(path)
annos[filename] = a
return annos
def load_hocr_annos(base_dir: str) -> AnnoListType:
annos = defaultdict(list)
for path in tqdm.tqdm(glob.glob(os.path.join(base_dir, '*.tsv'))):
filename = get_filename(path)
with open(path, 'r') as fin:
cur_id = None
cur_idx = 0
for i, line in enumerate(fin):
if line[:5] != cur_id:
if cur_id is not None:
assert len(lines) > 1
a = _load_anno(path, lines, offset=cur_idx)
annos[filename].extend(a)
lines = []
cur_id = line[:5]
cur_idx = i
lines.append(line)
assert len(lines) > 1
a = _load_anno(path, lines, offset=cur_idx)
annos[filename].extend(a)
return dict(annos)
def filter_text_blocks(text_blocks, labels, pointers):
_labels = []
_pointers = []
_text_boxes = []
for i in range(len(labels)):
if labels[i] != ListAction.EXCLUDED:
_labels.append(labels[i])
if pointers[i] is None:
p = None
elif pointers[i] == -1:
p = -1
else:
p = pointers[i]
assert len(_pointers) > p >= 0
_pointers.append(p)
_text_boxes.append(text_blocks[i])
else:
pointers_tmp = []
for p in pointers:
if p is None:
pointers_tmp.append(None)
elif p > len(_labels):
pointers_tmp.append(p - 1)
else:
pointers_tmp.append(p)
pointers = pointers_tmp
return _text_boxes, _labels, _pointers
| [
"os.path.join",
"pdf_struct.core.utils.get_filename",
"collections.defaultdict"
] | [((4245, 4262), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4256, 4262), False, 'from collections import defaultdict\n'), ((4115, 4133), 'pdf_struct.core.utils.get_filename', 'get_filename', (['path'], {}), '(path)\n', (4127, 4133), False, 'from pdf_struct.core.utils import get_filename\n'), ((4353, 4371), 'pdf_struct.core.utils.get_filename', 'get_filename', (['path'], {}), '(path)\n', (4365, 4371), False, 'from pdf_struct.core.utils import get_filename\n'), ((3941, 3972), 'os.path.join', 'os.path.join', (['base_dir', '"""*.tsv"""'], {}), "(base_dir, '*.tsv')\n", (3953, 3972), False, 'import os\n'), ((4299, 4330), 'os.path.join', 'os.path.join', (['base_dir', '"""*.tsv"""'], {}), "(base_dir, '*.tsv')\n", (4311, 4330), False, 'import os\n')] |
"""
Definition of direct collocation problem.
Authors: <NAME>, <NAME>
Date: 05/01/2021
"""
# third party imports
try:
import ipyopt
_ipyopt_imported = True
except:
_ipyopt_imported = False
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize, NonlinearConstraint
from scipy.integrate import solve_ivp
from sympy import Matrix, Symbol, lambdify
from sympy.core.function import BadArgumentsError
# pydcol imports
from .Objective import Objective
from .EqualityConstraints import EqualityConstraints
from .CollocMethods import *
from .Solution import Solution
class CollocationProblem:
def __init__(self,
state_vars,
control_vars,
ode,
tspan,
X_start,
X_goal=None,
colloc_method=HERM,
custom_objective=None):
self.ode = ode
self.state_vars = state_vars
self.control_vars = control_vars
self.ode_fun = lambdify(self.state_vars+self.control_vars, Matrix(self.ode), 'numpy')
self.colloc_method = colloc_method
self.tspan = tspan
self.objective = custom_objective
self.X_start = X_start
self.X_goal = X_goal
# Get variable dimensions
self.N = self.tspan.size
self.Ntilde=self.tspan.size
self.X_dim = len(state_vars)
self.U_dim = len(control_vars)
self.all_vars = state_vars + control_vars
self.h = Symbol("h") # symbolic time step
self._h = self.tspan[1:] - self.tspan[:-1] # time steps
# Create a set of "prev" and "mid" variables for accessing values at previous time step
self.prev_all_vars = [Symbol(str(var)+"_prev") for var in self.all_vars]
self.prev_dict = {}
for i in range(len(self.all_vars)):
self.prev_dict[self.all_vars[i]] = self.prev_all_vars[i]
if self.colloc_method in MIDPOINT_METHODS:
self.mid_all_vars = [Symbol(str(var)+"_mid") for var in self.all_vars]
self.mid_dict = {}
for i in range(len(self.all_vars)):
self.mid_dict[self.all_vars[i]] = self.mid_all_vars[i]
else:
self.mid_all_vars = []
X = Matrix(state_vars)
U = Matrix(control_vars)
# Scalar Objective
if self.objective is None:
if self.colloc_method in [HERM]:
Obj = 0
for i in range(self.U_dim):
effort = self.control_vars[i]**2
Obj += (self.h/6.0) * (effort + 4.0 * effort.subs(self.mid_dict) + effort.subs(self.prev_dict))
elif self.colloc_method in [RADAU]:
Obj = 0
for i in range(self.U_dim):
effort = self.control_vars[i]**2
Obj += (self.h/4.0) * (3.0 * effort.subs(self.mid_dict) + effort)
else:
effort = self.h * U.multiply_elementwise(U)
Obj = np.sum(effort[:])
# Equality Constraints
C_eq = []
if colloc_method == TRAP:
# Trapezoid method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - 0.5 * self.h * (ode[i] + ode[i].subs(self.prev_dict))]
elif colloc_method == EB:
# Euler Backward method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - self.h * ode[i]]
elif colloc_method == EF:
# Euler Forward method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - self.h * ode[i].subs(self.prev_dict)]
elif colloc_method == HERM:
# Hermite Simpson method
self.Ntilde=self.Ntilde*2-1 # actual number of node points due to addition of "mid" points
for i in range(self.X_dim):
C_eq+=[state_vars[i].subs(self.mid_dict) - 0.5 * (state_vars[i] + state_vars[i].subs(self.prev_dict)) - (self.h/8.0) * (ode[i].subs(self.prev_dict) - ode[i])]
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - (self.h/6.0) * (ode[i] + 4.0 * ode[i].subs(self.mid_dict) + ode[i].subs(self.prev_dict))]
elif colloc_method == RADAU:
# Radau 3rd order
self.Ntilde=self.Ntilde*2-1 # actual number of node points due to addition of "mid" points
for i in range(self.X_dim):
C_eq+=[state_vars[i].subs(self.mid_dict) - state_vars[i].subs(self.prev_dict)-5.0/12.0*self.h*ode[i].subs(self.mid_dict)+1.0/12.0*self.h*ode[i]] # intermediate point residue
for i in range(self.X_dim):
C_eq+=[state_vars[i] - state_vars[i].subs(self.prev_dict)-3.0/4.0*self.h*ode[i].subs(self.mid_dict)-1.0/4.0*self.h*ode[i]] # end point residue
# Compile objective and equality constraints
self.equality_constr = EqualityConstraints(self, Matrix(C_eq))
if self.objective is None:
self.objective = Objective(self, Obj)
def solve(self, x0: np.array = None, bounds: list = None, solver: str='scipy')->Solution:
"""
Solve the direct collocation problem as a nonlinear program.
Parameters
----------
x0 -- initial guess for solution, if not provided, an educated guess is based on initial/final state.
bounds -- list of [upper, lower] bound lists, one for each variable (order should match x0)
solver -- which optimizer to use (options: scipy, ipopt)
Returns
-------
pydcol.Solution containing solution and problem metadata
"""
self.is_solved = False
if x0 is None:
# Initialize optimization variables
if bounds is not None:
u_bounds = bounds[self.X_dim:]
u_mid = []
for ubnd in u_bounds:
if ubnd[0] is not None and ubnd[1] is not None:
u_mid += [(ubnd[0]+ubnd[1])/2.0]
elif ubnd[1] is not None:
u_mid += [ubnd[1]]
elif ubnd[0] is not None:
u_mid += [ubnd[0]]
else:
u_mid += [0.0]
else:
u_mid = [0.1] * self.U_dim
x0 = [self.X_start.tolist() + u_mid]
x0_mid = []
for i in range(self.N - 1):
if self.X_goal is not None:
xnew = self.X_start + (self.X_goal - self.X_start) * i / self.Ntilde
else:
xnew = self.X_start + i / self.Ntilde
x0.append(xnew.tolist() + u_mid)
if self.N != self.Ntilde:
x0_mid.append(0.5*(np.array(x0[-1]) + np.array(x0[-2])))
x0 = np.array(x0 + x0_mid).ravel()
if solver=='scipy':
_bounds = bounds * self.Ntilde
# Problem constraints
constr_eq = NonlinearConstraint(self.equality_constr.eval,
lb=0,
ub=0,
jac=self.equality_constr.jac,
hess=self.equality_constr.hess)
# Solve Problem
sol_opt = minimize(self.objective.eval,
x0,
method="trust-constr",
jac=self.objective.jac,
hess=self.objective.hess,
constraints=(constr_eq),
bounds=_bounds,
options={'sparse_jacobian': True})
# convert scipy solution to our format
self.sol_c = Solution(sol_opt, self.colloc_method, (self.N, self.Ntilde, self.X_dim, self.U_dim), self.tspan, solver)
self.is_solved = sol_opt.success
elif solver == "ipopt":
if not _ipyopt_imported:
raise(ImportError("Ipyopt could not be imported! Please use scipy solver."))
# setup variable bounds
nvar = self.Ntilde * len(bounds)
x_L = np.zeros(nvar)
x_U = np.zeros(nvar)
v_idx = 0
for i in range(self.Ntilde):
for b_pair in bounds:
if b_pair[0] is None:
x_L[v_idx] = -1e9
else:
x_L[v_idx] = b_pair[0]
if b_pair[1] is None:
x_U[v_idx] = 1e9
else:
x_U[v_idx] = b_pair[1]
v_idx += 1
# setup equality constraints
ncon = self.equality_constr.eval(x0).size
g_L = np.zeros((ncon,))
g_U = np.zeros((ncon,))
# finding out which entries of the constraint jacobian and problem hessian are allways
# nonzero.
jac_g_idx = self.equality_constr.jac(x0, return_sparse_indices=True)
lagrange = np.ones(ncon)
h_obj_idx = self.objective.hess(x0, return_sparse_indices=True)
h_con_idx = self.equality_constr.hess(x0, lagrange, return_sparse_indices=True)
# merge objective and constraint hessian indices
coords = set()
for i in range(len(h_obj_idx[0])):
coords.add((h_obj_idx[0][i], h_obj_idx[1][i]))
for i in range(len(h_con_idx[0])):
coords.add((h_con_idx[0][i], h_con_idx[1][i]))
coords = np.array(list(coords))
h_idx = (coords[:,0], coords[:,1])
def eval_grad_f(x, out):
out[()] = self.objective.jac(x).ravel()
return out
def eval_g(x, out):
out[()] = self.equality_constr.eval(x).ravel()
return out
def eval_jac_g(x, out):
out[()] = self.equality_constr.jac(x).data
return out
def eval_h(x, lagrange, obj_factor, out):
"""
Combined hessian for the problem.
"""
H = self.objective.hess(x) * (obj_factor) + self.equality_constr.hess(x, lagrange)
out[()] = H.data
return out
nlp = ipyopt.Problem(nvar, x_L, x_U,
ncon, g_L, g_U,
jac_g_idx, h_idx,
self.objective.eval, eval_grad_f,
eval_g, eval_jac_g, eval_h)
# nlp.set(print_level=0)
sol_x, obj, status = nlp.solve(x0)
# convert scipy solution to our format
self.sol_c = Solution(sol_x, self.colloc_method, (self.N, self.Ntilde, self.X_dim, self.U_dim), self.tspan, solver)
self.is_solved = (status == 0) or (status == 1) # solver either succeeded or converged to acceptable accuracy
else:
raise(BadArgumentsError("Error unsupported solver!"))
self.sol_c.obj = self.objective.eval(self.sol_c.opt_x)
print("Done")
if self.is_solved:
print("Success :-)")
else:
print("Failure :-(")
return self.sol_c
def evaluate(self, ivp_method: str='RK45'):
"""
Creates a plot comparing the direct collocation solution to an implicit IVP solver solution
generated by applying the U from the solution from the initial condition from t0 to tf.
Parameters
----------
ivp_method -- string representing ivp solution method to use
Returns
-------
None
"""
tspan = self.sol_c.t
X = self.sol_c.x.copy()
U = self.sol_c.u
def system_eqs(t, x_t):
U_t = self.sol_c.u_t(t)
return self.ode_fun(*x_t, *U_t).ravel()
eval_tspan = np.linspace(tspan[0],tspan[-1],100)
sol_ivp = solve_ivp(system_eqs, [tspan[0],tspan[-1]], self.X_start, method=ivp_method, t_eval=eval_tspan)
colors = ['k', 'g', 'b', 'r', 'c', 'm', 'y']
_, axs = plt.subplots(2, 1)
axs[0].set_title("Collocation Points vs. Integration Results")
for i in range(self.X_dim):
axs[0].plot(tspan, X[:,i],'o',color=colors[i],markersize=3)
axs[0].plot(sol_ivp.t, sol_ivp.y[i,:],color=colors[i])
axs[0].set_ylabel("State Variables")
axs[0].plot([], [],'o',color='k',label='Colloc solution')
axs[0].plot([], [],color='k',label='IVP solution')
axs[0].legend()
U_t = np.array(self.sol_c.u_t(sol_ivp.t)).T.reshape(-1, self.U_dim)
for j in range(self.U_dim):
axs[1].plot(tspan, U[:,j],'o',color=colors[j],markersize=3)
axs[1].plot(sol_ivp.t, U_t[:,j],color=colors[j])
axs[1].set_ylabel("Control Variables")
axs[1].set_xlabel("Time [s]")
plt.show()
| [
"sympy.Symbol",
"numpy.ones",
"sympy.core.function.BadArgumentsError",
"scipy.optimize.minimize",
"scipy.integrate.solve_ivp",
"sympy.Matrix",
"scipy.optimize.NonlinearConstraint",
"numpy.sum",
"numpy.linspace",
"numpy.zeros",
"numpy.array",
"ipyopt.Problem",
"matplotlib.pyplot.subplots",
... | [((1312, 1323), 'sympy.Symbol', 'Symbol', (['"""h"""'], {}), "('h')\n", (1318, 1323), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((1973, 1991), 'sympy.Matrix', 'Matrix', (['state_vars'], {}), '(state_vars)\n', (1979, 1991), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((1998, 2018), 'sympy.Matrix', 'Matrix', (['control_vars'], {}), '(control_vars)\n', (2004, 2018), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((9672, 9709), 'numpy.linspace', 'np.linspace', (['tspan[0]', 'tspan[-1]', '(100)'], {}), '(tspan[0], tspan[-1], 100)\n', (9683, 9709), True, 'import numpy as np\n'), ((9720, 9821), 'scipy.integrate.solve_ivp', 'solve_ivp', (['system_eqs', '[tspan[0], tspan[-1]]', 'self.X_start'], {'method': 'ivp_method', 't_eval': 'eval_tspan'}), '(system_eqs, [tspan[0], tspan[-1]], self.X_start, method=\n ivp_method, t_eval=eval_tspan)\n', (9729, 9821), False, 'from scipy.integrate import solve_ivp\n'), ((9876, 9894), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (9888, 9894), True, 'import matplotlib.pyplot as plt\n'), ((10573, 10583), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10581, 10583), True, 'import matplotlib.pyplot as plt\n'), ((935, 951), 'sympy.Matrix', 'Matrix', (['self.ode'], {}), '(self.ode)\n', (941, 951), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((4329, 4341), 'sympy.Matrix', 'Matrix', (['C_eq'], {}), '(C_eq)\n', (4335, 4341), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((5916, 6041), 'scipy.optimize.NonlinearConstraint', 'NonlinearConstraint', (['self.equality_constr.eval'], {'lb': '(0)', 'ub': '(0)', 'jac': 'self.equality_constr.jac', 'hess': 'self.equality_constr.hess'}), '(self.equality_constr.eval, lb=0, ub=0, jac=self.\n equality_constr.jac, hess=self.equality_constr.hess)\n', (5935, 6041), False, 'from scipy.optimize import minimize, NonlinearConstraint\n'), ((6114, 6303), 'scipy.optimize.minimize', 'minimize', (['self.objective.eval', 'x0'], {'method': '"""trust-constr"""', 'jac': 'self.objective.jac', 'hess': 'self.objective.hess', 'constraints': 'constr_eq', 'bounds': '_bounds', 'options': "{'sparse_jacobian': True}"}), "(self.objective.eval, x0, method='trust-constr', jac=self.objective\n .jac, hess=self.objective.hess, constraints=constr_eq, bounds=_bounds,\n options={'sparse_jacobian': True})\n", (6122, 6303), False, 'from scipy.optimize import minimize, NonlinearConstraint\n'), ((6756, 6770), 'numpy.zeros', 'np.zeros', (['nvar'], {}), '(nvar)\n', (6764, 6770), True, 'import numpy as np\n'), ((6780, 6794), 'numpy.zeros', 'np.zeros', (['nvar'], {}), '(nvar)\n', (6788, 6794), True, 'import numpy as np\n'), ((7156, 7173), 'numpy.zeros', 'np.zeros', (['(ncon,)'], {}), '((ncon,))\n', (7164, 7173), True, 'import numpy as np\n'), ((7183, 7200), 'numpy.zeros', 'np.zeros', (['(ncon,)'], {}), '((ncon,))\n', (7191, 7200), True, 'import numpy as np\n'), ((7393, 7406), 'numpy.ones', 'np.ones', (['ncon'], {}), '(ncon)\n', (7400, 7406), True, 'import numpy as np\n'), ((8383, 8514), 'ipyopt.Problem', 'ipyopt.Problem', (['nvar', 'x_L', 'x_U', 'ncon', 'g_L', 'g_U', 'jac_g_idx', 'h_idx', 'self.objective.eval', 'eval_grad_f', 'eval_g', 'eval_jac_g', 'eval_h'], {}), '(nvar, x_L, x_U, ncon, g_L, g_U, jac_g_idx, h_idx, self.\n objective.eval, eval_grad_f, eval_g, eval_jac_g, eval_h)\n', (8397, 8514), False, 'import ipyopt\n'), ((8908, 8954), 'sympy.core.function.BadArgumentsError', 'BadArgumentsError', (['"""Error unsupported solver!"""'], {}), "('Error unsupported solver!')\n", (8925, 8954), False, 'from sympy.core.function import BadArgumentsError\n'), ((2548, 2565), 'numpy.sum', 'np.sum', (['effort[:]'], {}), '(effort[:])\n', (2554, 2565), True, 'import numpy as np\n'), ((5788, 5809), 'numpy.array', 'np.array', (['(x0 + x0_mid)'], {}), '(x0 + x0_mid)\n', (5796, 5809), True, 'import numpy as np\n'), ((5742, 5758), 'numpy.array', 'np.array', (['x0[-1]'], {}), '(x0[-1])\n', (5750, 5758), True, 'import numpy as np\n'), ((5761, 5777), 'numpy.array', 'np.array', (['x0[-2]'], {}), '(x0[-2])\n', (5769, 5777), True, 'import numpy as np\n')] |
from sqlalchemy import Column, Integer, String, DateTime
from .database import Base
class Website(Base):
__tablename__ = "websites"
id = Column(Integer, primary_key=True, index=True, unique=True)
url = Column(String)
started_at = Column(DateTime)
status = Column(String, default="pending")
completed_at = Column(DateTime, default=None)
| [
"sqlalchemy.Column"
] | [((149, 207), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)', 'unique': '(True)'}), '(Integer, primary_key=True, index=True, unique=True)\n', (155, 207), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((218, 232), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (224, 232), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((250, 266), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (256, 266), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((280, 313), 'sqlalchemy.Column', 'Column', (['String'], {'default': '"""pending"""'}), "(String, default='pending')\n", (286, 313), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((333, 363), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'None'}), '(DateTime, default=None)\n', (339, 363), False, 'from sqlalchemy import Column, Integer, String, DateTime\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
linear_math_tests.test_transform
"""
from __future__ import unicode_literals, print_function, absolute_import
import unittest
import math
import bullet
class TestTransform(unittest.TestCase):
def setUp(self):
self.v1 = tuple(float(i) for i in range(0, 9))
self.vec = bullet.btVector3(0, 0, 0)
self.q = bullet.btQuaternion.identity
self.m = bullet.btMatrix3x3(*self.v1)
self.a = bullet.btTransform()
self.b = bullet.btTransform()
self.c = bullet.btTransform()
self.d = bullet.btTransform()
self.e = bullet.btTransform()
def test_ctors(self):
self.a = bullet.btTransform(bullet.btMatrix3x3(*self.v1))
self.b = bullet.btTransform(bullet.btMatrix3x3(*self.v1),
bullet.btVector3(0, 0, 0))
self.c = bullet.btTransform(bullet.btQuaternion.identity)
self.d = bullet.btTransform(bullet.btQuaternion.identity,
bullet.btVector3(0, 0, 0))
self.assertEqual(self.a, self.b)
def test_str(self):
self.a = bullet.btTransform(self.q)
self.b = bullet.btTransform(self.q)
self.assertEqual(self.a, self.b)
def test_mult(self):
self.a = bullet.btTransform(self.q)
self.b = bullet.btTransform(bullet.btQuaternion(1, 1, 0, 0))
self.c = bullet.btTransform(self.q)
self.c.mult(self.a, self.b)
self.assertFalse(self.c == self.a)
def test_basis(self):
self.a = bullet.btTransform(self.m)
self.assertEqual(self.a.get_basis(), self.m)
w = (4, 5, 6)
self.a.basis[0] = bullet.btVector3(*w)
self.assertFalse(self.a.get_basis() == self.m)
self.a.set_basis(self.m)
self.assertEqual(self.a.get_basis(), self.m)
def test_rotation(self):
self.a = bullet.btTransform(self.q)
self.assertEqual(self.a.get_rotation(), self.q)
self.q = bullet.btQuaternion(0, 1, 1, 0)
self.assertNotEqual(self.q, self.a.get_rotation())
self.a.set_rotation(self.q)
print(self.q.normalized(), self.a.get_rotation())
# Hack to bypass numeric imprecision
# TODO: Extend test case to implement assertAlmostEqual with matrices
self.assertTrue(str(self.q.normalized()) == str(self.a.get_rotation()))
def test_origin(self):
self.a = bullet.btTransform(self.q)
self.b = bullet.btTransform(self.q)
self.vec = self.a.get_origin()
self.assertEqual(self.vec, self.a.get_origin())
self.assertEqual(self.a.get_origin(), bullet.btVector3(0, 0, 0))
self.a.origin.set_value(1, 0, 0)
self.assertEqual(self.a.get_origin(), bullet.btVector3(1, 0, 0))
self.a.set_origin(bullet.btVector3(0, 1, 0))
self.assertEqual(self.a.get_origin(), bullet.btVector3(0, 1, 0))
def test_identit(self):
self.a = bullet.btTransform.identity
self.b = bullet.btTransform(bullet.btQuaternion(1, 0, 0, 0),
bullet.btVector3(0, 1, 0))
self.assertEqual(self.a.get_rotation(), bullet.btQuaternion.identity)
self.assertEqual(self.a.get_origin(), self.vec)
print(self.b.get_origin(), self.vec)
self.assertFalse(self.b.get_origin() == self.vec)
self.b.set_identity()
self.assertEqual(self.a, self.b)
self.assertEqual(bullet.btTransform.identity, self.a)
def test_inverse(self):
self.a = bullet.btTransform(self.q, bullet.btVector3(0, 0, 1))
self.b = bullet.btTransform(self.q, bullet.btVector3(0, 0, -1))
self.c = self.a.inverse()
self.d = bullet.btTransform(bullet.btQuaternion.identity,
bullet.btVector3(1, 0, 0))
self.assertEqual(self.b, self.c)
self.vec = self.d.inv_xform(bullet.btVector3(0, 1, 0))
self.assertEqual(self.vec, bullet.btVector3(-1, 1, 0))
self.e = self.b.inverse_times(self.a)
self.assertEqual(self.e, bullet.btTransform(
bullet.btQuaternion.identity,
bullet.btVector3(0, 0, 2)))
def test_mul(self):
self.a = bullet.btTransform.identity
self.b = bullet.btTransform.identity
self.a.set_origin(bullet.btVector3(1, 1, 1))
self.c = self.a * self.b
self.a *= self.b
self.assertEqual(self.c, self.a)
def tearDown(self):
del self.a
del self.b
del self.c
del self.d
del self.e
| [
"bullet.btMatrix3x3",
"bullet.btTransform",
"bullet.btQuaternion",
"bullet.btVector3"
] | [((341, 366), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (357, 366), False, 'import bullet\n'), ((430, 458), 'bullet.btMatrix3x3', 'bullet.btMatrix3x3', (['*self.v1'], {}), '(*self.v1)\n', (448, 458), False, 'import bullet\n'), ((477, 497), 'bullet.btTransform', 'bullet.btTransform', ([], {}), '()\n', (495, 497), False, 'import bullet\n'), ((515, 535), 'bullet.btTransform', 'bullet.btTransform', ([], {}), '()\n', (533, 535), False, 'import bullet\n'), ((553, 573), 'bullet.btTransform', 'bullet.btTransform', ([], {}), '()\n', (571, 573), False, 'import bullet\n'), ((591, 611), 'bullet.btTransform', 'bullet.btTransform', ([], {}), '()\n', (609, 611), False, 'import bullet\n'), ((629, 649), 'bullet.btTransform', 'bullet.btTransform', ([], {}), '()\n', (647, 649), False, 'import bullet\n'), ((889, 937), 'bullet.btTransform', 'bullet.btTransform', (['bullet.btQuaternion.identity'], {}), '(bullet.btQuaternion.identity)\n', (907, 937), False, 'import bullet\n'), ((1150, 1176), 'bullet.btTransform', 'bullet.btTransform', (['self.q'], {}), '(self.q)\n', (1168, 1176), False, 'import bullet\n'), ((1194, 1220), 'bullet.btTransform', 'bullet.btTransform', (['self.q'], {}), '(self.q)\n', (1212, 1220), False, 'import bullet\n'), ((1305, 1331), 'bullet.btTransform', 'bullet.btTransform', (['self.q'], {}), '(self.q)\n', (1323, 1331), False, 'import bullet\n'), ((1418, 1444), 'bullet.btTransform', 'bullet.btTransform', (['self.q'], {}), '(self.q)\n', (1436, 1444), False, 'import bullet\n'), ((1568, 1594), 'bullet.btTransform', 'bullet.btTransform', (['self.m'], {}), '(self.m)\n', (1586, 1594), False, 'import bullet\n'), ((1696, 1716), 'bullet.btVector3', 'bullet.btVector3', (['*w'], {}), '(*w)\n', (1712, 1716), False, 'import bullet\n'), ((1905, 1931), 'bullet.btTransform', 'bullet.btTransform', (['self.q'], {}), '(self.q)\n', (1923, 1931), False, 'import bullet\n'), ((2005, 2036), 'bullet.btQuaternion', 'bullet.btQuaternion', (['(0)', '(1)', '(1)', '(0)'], {}), '(0, 1, 1, 0)\n', (2024, 2036), False, 'import bullet\n'), ((2438, 2464), 'bullet.btTransform', 'bullet.btTransform', (['self.q'], {}), '(self.q)\n', (2456, 2464), False, 'import bullet\n'), ((2482, 2508), 'bullet.btTransform', 'bullet.btTransform', (['self.q'], {}), '(self.q)\n', (2500, 2508), False, 'import bullet\n'), ((713, 741), 'bullet.btMatrix3x3', 'bullet.btMatrix3x3', (['*self.v1'], {}), '(*self.v1)\n', (731, 741), False, 'import bullet\n'), ((779, 807), 'bullet.btMatrix3x3', 'bullet.btMatrix3x3', (['*self.v1'], {}), '(*self.v1)\n', (797, 807), False, 'import bullet\n'), ((845, 870), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (861, 870), False, 'import bullet\n'), ((1040, 1065), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1056, 1065), False, 'import bullet\n'), ((1368, 1399), 'bullet.btQuaternion', 'bullet.btQuaternion', (['(1)', '(1)', '(0)', '(0)'], {}), '(1, 1, 0, 0)\n', (1387, 1399), False, 'import bullet\n'), ((2650, 2675), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2666, 2675), False, 'import bullet\n'), ((2764, 2789), 'bullet.btVector3', 'bullet.btVector3', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (2780, 2789), False, 'import bullet\n'), ((2817, 2842), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (2833, 2842), False, 'import bullet\n'), ((2890, 2915), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (2906, 2915), False, 'import bullet\n'), ((3027, 3058), 'bullet.btQuaternion', 'bullet.btQuaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (3046, 3058), False, 'import bullet\n'), ((3096, 3121), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (3112, 3121), False, 'import bullet\n'), ((3566, 3591), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (3582, 3591), False, 'import bullet\n'), ((3637, 3663), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(0)', '(-1)'], {}), '(0, 0, -1)\n', (3653, 3663), False, 'import bullet\n'), ((3801, 3826), 'bullet.btVector3', 'bullet.btVector3', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (3817, 3826), False, 'import bullet\n'), ((3905, 3930), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (3921, 3930), False, 'import bullet\n'), ((3967, 3993), 'bullet.btVector3', 'bullet.btVector3', (['(-1)', '(1)', '(0)'], {}), '(-1, 1, 0)\n', (3983, 3993), False, 'import bullet\n'), ((4343, 4368), 'bullet.btVector3', 'bullet.btVector3', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (4359, 4368), False, 'import bullet\n'), ((4174, 4199), 'bullet.btVector3', 'bullet.btVector3', (['(0)', '(0)', '(2)'], {}), '(0, 0, 2)\n', (4190, 4199), False, 'import bullet\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributed
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.autograd import Function
# ********************* range_trackers *********************
class RangeTracker(nn.Module):
def __init__(self, q_level):
super().__init__()
self.q_level = q_level
def update_range(self, min_val, max_val):
raise NotImplementedErrorF
@torch.no_grad()
def forward(self, input):
if self.q_level == 'L':
min_val = torch.min(input)
max_val = torch.max(input)
elif self.q_level == 'C':
min_val = torch.min(torch.min(input, 2, keepdim=True)[0], 1, keepdim=True)[0]
max_val = torch.max(torch.max(input, 2, keepdim=True)[0], 1, keepdim=True)[0]
self.update_range(min_val, max_val)
class GlobalRangeTracker(RangeTracker):
def __init__(self, q_level, out_channels):
super().__init__(q_level)
self.register_buffer('min_val', torch.zeros(out_channels, 1, 1))
self.register_buffer('max_val', torch.zeros(out_channels, 1, 1))
self.register_buffer('first_w', torch.zeros(1))
def update_range(self, min_val, max_val):
temp_minval = self.min_val
temp_maxval = self.max_val
if self.first_w == 0:
self.first_w.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))
self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))
class AveragedRangeTracker(RangeTracker):
def __init__(self, q_level, momentum=0.1):
super().__init__(q_level)
self.momentum = momentum
self.register_buffer('min_val', torch.zeros(1))
self.register_buffer('max_val', torch.zeros(1))
self.register_buffer('first_a', torch.zeros(1))
def update_range(self, min_val, max_val):
if self.first_a == 0:
self.first_a.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.mul_(1 - self.momentum).add_(min_val * self.momentum)
self.max_val.mul_(1 - self.momentum).add_(max_val * self.momentum)
class LinearWeightRangeTracker(RangeTracker):
def __init__(self,q_level):
super().__init__(q_level)
self.register_buffer('min_val',torch.zeros(1))
self.register_buffer('max_val',torch.zeros(1))
self.register_buffer('first_l', torch.zeros(1))
def update_range(self, min_val, max_val):
self.min_val=min_val
self.max_val=max_val
# ********************* quantizers*********************
class Round(Function):
@staticmethod
def forward(self, input):
output = torch.round(input)
return output
@staticmethod
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
class Quantizer(nn.Module):
def __init__(self, bits, range_tracker):
super().__init__()
self.bits = bits
self.range_tracker = range_tracker
self.register_buffer('scale', None)
self.register_buffer('zero_point', None)
def update_params(self):
raise NotImplementedError
def quantize(self, input):
output = input / self.scale + self.zero_point
return output
def round(self, input):
output = Round.apply(input)
return output
def clamp(self, input):
output = torch.clamp(input, self.min_val, self.max_val)
return output
def dequantize(self, input):
output = (input - self.zero_point) * self.scale
return output
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
self.range_tracker(input)
self.update_params()
output = self.quantize(input)
output = self.round(output)
output = self.clamp(output)
output = self.dequantize(output)
return output
class SignedQuantizer(Quantizer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer('min_val', torch.tensor(-(1 << (self.bits - 1))))
self.register_buffer('max_val', torch.tensor((1 << (self.bits - 1)) - 1))
class UnsignedQuantizer(Quantizer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer('min_val', torch.tensor(0))
self.register_buffer('max_val', torch.tensor((1 << self.bits) - 1))
class SymmetricQuantizer(SignedQuantizer):
def update_params(self):
quantized_range = torch.min(torch.abs(self.min_val), torch.abs(self.max_val))
float_range = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val))
self.scale = quantized_range / float_range
self.zero_point = torch.zeros_like(self.scale)
class AsymmetricQuantizer(UnsignedQuantizer):
def update_params(self):
quantized_range = self.max_val - self.min_val
float_range = self.range_tracker.max_val - self.range_tracker.min_val
self.scale = float_range/quantized_range
self.zero_point = torch.round(self.max_val - self.range_tracker.max_val / self.scale)
# ********************* quantify the convolution operation *********************
class Conv1d_Q(nn.Conv1d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
a_bits=8,
w_bits=8,
q_type=1,
first_layer=0,
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
if q_type == 0:
self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
self.weight_quantizer = SymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C', out_channels=out_channels))
else:
self.activation_quantizer = AsymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
self.weight_quantizer = AsymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C', out_channels=out_channels))
self.first_layer = first_layer
def forward(self, input):
if not self.first_layer:
input = self.activation_quantizer(input)
q_input = input
q_weight = self.weight_quantizer(self.weight)
output = F.conv1d(
input=q_input,
weight=q_weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
return output
def reshape_to_activation(input):
return input.reshape(1, -1, 1)
def reshape_to_weight(input):
return input.reshape(-1, 1, 1)
def reshape_to_bias(input):
return input.reshape(-1)
# ********************* quantify the full connection layer *********************
class Linear_Q(nn.Linear):
def __init__(
self,
in_features,
out_features,
bias=True,
a_bits =16,
w_bits =8,
q_type = 1,
):
super(Linear_Q,self).__init__(
in_features = in_features,
out_features = out_features,
bias = bias
)
if q_type == 0:
self.activation_quantizer = SymmetricQuantizer(bits = a_bits, range_tracker=LinearWeightRangeTracker(q_level='L'))
self.weight_quantizer = SymmetricQuantizer(bits = w_bits, range_tracker=LinearWeightRangeTracker(q_level='L'))
else:
self.activation_quantizer = AsymmetricQuantizer(bits = a_bits, range_tracker=LinearWeightRangeTracker(q_level='L'))
self.weight_quantizer = AsymmetricQuantizer(bits = w_bits, range_tracker=LinearWeightRangeTracker(q_level='L'))
def forward(self, input):
input = self.activation_quantizer(input)
q_input = input
q_weight = self.weight_quantizer(self.weight)
output = F.linear(
input = q_input,
weight = q_weight,
bias = self.bias
)
return output
class AvgPool1d_Q(nn.AvgPool1d):
def __init__(
self,
kernel_size,
stride,
padding = 0,
a_bits =16,
q_type = 1,
):
super(AvgPool1d_Q,self).__init__(
kernel_size = kernel_size,
stride = stride,
padding = padding
)
if q_type == 0:
self.activation_quantizer = SymmetricQuantizer(bits = a_bits, range_tracker=LinearWeightRangeTracker(q_level='L'))
else:
self.activation_quantizer = AsymmetricQuantizer(bits = a_bits, range_tracker=LinearWeightRangeTracker(q_level='L'))
def forward(self, input):
input = self.activation_quantizer(input)
q_input = input
output = F.avg_pool1d(q_input, kernel_size=2, stride=2)
return output
# *********************BN fusion and quantization *********************
class BNFold_Conv1d_Q(Conv1d_Q):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
eps=1e-5,
momentum=0.01,
a_bits=16,
w_bits=8,
q_type=1,
first_layer=0
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
self.eps = eps
self.momentum = momentum
self.gamma = Parameter(torch.Tensor(out_channels))
self.beta = Parameter(torch.Tensor(out_channels))
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.ones(out_channels))
self.register_buffer('first_bn', torch.zeros(1))
init.uniform_(self.gamma)
init.zeros_(self.beta)
if q_type == 0:
self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
self.weight_quantizer = SymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C', out_channels=out_channels))
else:
self.activation_quantizer = AsymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
self.weight_quantizer = AsymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C', out_channels=out_channels))
self.first_layer = first_layer
def forward(self, input):
if self.training:
output = F.conv1d(
input=input,
weight=self.weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
dims = [dim for dim in range(3) if dim != 1]
batch_mean = torch.mean(output, dim=dims)
batch_var = torch.var(output, dim=dims)
with torch.no_grad():
if self.first_bn == 0:
self.first_bn.add_(1)
self.running_mean.add_(batch_mean)
self.running_var.add_(batch_var)
else:
self.running_mean.mul_(1 - self.momentum).add_(batch_mean * self.momentum)
self.running_var.mul_(1 - self.momentum).add_(batch_var * self.momentum)
if self.bias is not None:##即bias为True
bias = reshape_to_bias(self.beta + (self.bias - batch_mean) * (self.gamma / torch.sqrt(batch_var + self.eps)))
weight = self.weight * reshape_to_weight(self.gamma / torch.sqrt(batch_var + self.eps))
else:
bias = reshape_to_bias(self.beta - batch_mean * (self.gamma / torch.sqrt(batch_var + self.eps)))
weight = self.weight * reshape_to_weight(self.gamma / torch.sqrt(batch_var + self.eps))
else:
if self.bias is not None:#True
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (self.gamma / torch.sqrt(self.running_var + self.eps)))
weight = self.weight * reshape_to_weight(self.gamma / torch.sqrt(self.running_var + self.eps))
else:
bias = reshape_to_bias(self.beta - self.running_mean * (self.gamma / torch.sqrt(self.running_var + self.eps)))
weight = self.weight * reshape_to_weight(self.gamma / torch.sqrt(self.running_var + self.eps))
if not self.first_layer:
input = self.activation_quantizer(input)
q_input = input
q_weight = self.weight_quantizer(weight)
if self.training:
output = F.conv1d(
input=q_input,
weight=q_weight,
bias = bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
else:
output = F.conv1d(
input=q_input,
weight=q_weight,
bias = bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
return output
class QuanConv1d(nn.Module):
def __init__(self, input_channels, output_channels,
kernel_size=-1, stride=-1, padding=-1, groups=1, last_relu=0, abits=8, wbits=8, bn_fold=0, q_type=1, first_layer=0):
super(QuanConv1d, self).__init__()
self.last_relu = last_relu
self.bn_fold = bn_fold
self.first_layer = first_layer
if self.bn_fold == 1:
self.bn_q_conv = BNFold_Conv1d_Q(input_channels, output_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, a_bits=abits, w_bits=wbits, q_type=q_type, first_layer=first_layer)
else:
self.q_conv = Conv1d_Q(input_channels, output_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, a_bits=abits, w_bits=wbits, q_type=q_type, first_layer=first_layer)
self.bn = nn.BatchNorm1d(output_channels, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
if not self.first_layer:
x = self.relu(x)
if self.bn_fold == 1:
x = self.bn_q_conv(x)
else:
x = self.q_conv(x)
x = self.bn(x)
if self.last_relu:
x = self.relu(x)
return x
| [
"torch.nn.ReLU",
"torch.nn.functional.conv1d",
"torch.max",
"torch.sqrt",
"torch.min",
"torch.nn.BatchNorm1d",
"torch.nn.functional.avg_pool1d",
"torch.nn.functional.linear",
"torch.mean",
"torch.nn.init.zeros_",
"torch.zeros_like",
"torch.nn.init.uniform_",
"torch.abs",
"torch.Tensor",
... | [((472, 487), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (485, 487), False, 'import torch\n'), ((2892, 2910), 'torch.round', 'torch.round', (['input'], {}), '(input)\n', (2903, 2910), False, 'import torch\n'), ((3622, 3668), 'torch.clamp', 'torch.clamp', (['input', 'self.min_val', 'self.max_val'], {}), '(input, self.min_val, self.max_val)\n', (3633, 3668), False, 'import torch\n'), ((5164, 5192), 'torch.zeros_like', 'torch.zeros_like', (['self.scale'], {}), '(self.scale)\n', (5180, 5192), False, 'import torch\n'), ((5477, 5544), 'torch.round', 'torch.round', (['(self.max_val - self.range_tracker.max_val / self.scale)'], {}), '(self.max_val - self.range_tracker.max_val / self.scale)\n', (5488, 5544), False, 'import torch\n'), ((7033, 7179), 'torch.nn.functional.conv1d', 'F.conv1d', ([], {'input': 'q_input', 'weight': 'q_weight', 'bias': 'self.bias', 'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(input=q_input, weight=q_weight, bias=self.bias, stride=self.stride,\n padding=self.padding, dilation=self.dilation, groups=self.groups)\n', (7041, 7179), True, 'import torch.nn.functional as F\n'), ((8631, 8687), 'torch.nn.functional.linear', 'F.linear', ([], {'input': 'q_input', 'weight': 'q_weight', 'bias': 'self.bias'}), '(input=q_input, weight=q_weight, bias=self.bias)\n', (8639, 8687), True, 'import torch.nn.functional as F\n'), ((9526, 9572), 'torch.nn.functional.avg_pool1d', 'F.avg_pool1d', (['q_input'], {'kernel_size': '(2)', 'stride': '(2)'}), '(q_input, kernel_size=2, stride=2)\n', (9538, 9572), True, 'import torch.nn.functional as F\n'), ((10682, 10707), 'torch.nn.init.uniform_', 'init.uniform_', (['self.gamma'], {}), '(self.gamma)\n', (10695, 10707), False, 'from torch.nn import init\n'), ((10716, 10738), 'torch.nn.init.zeros_', 'init.zeros_', (['self.beta'], {}), '(self.beta)\n', (10727, 10738), False, 'from torch.nn import init\n'), ((15125, 15146), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (15132, 15146), True, 'import torch.nn as nn\n'), ((572, 588), 'torch.min', 'torch.min', (['input'], {}), '(input)\n', (581, 588), False, 'import torch\n'), ((611, 627), 'torch.max', 'torch.max', (['input'], {}), '(input)\n', (620, 627), False, 'import torch\n'), ((1068, 1099), 'torch.zeros', 'torch.zeros', (['out_channels', '(1)', '(1)'], {}), '(out_channels, 1, 1)\n', (1079, 1099), False, 'import torch\n'), ((1141, 1172), 'torch.zeros', 'torch.zeros', (['out_channels', '(1)', '(1)'], {}), '(out_channels, 1, 1)\n', (1152, 1172), False, 'import torch\n'), ((1214, 1228), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1225, 1228), False, 'import torch\n'), ((1862, 1876), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1873, 1876), False, 'import torch\n'), ((1918, 1932), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1929, 1932), False, 'import torch\n'), ((1974, 1988), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1985, 1988), False, 'import torch\n'), ((2501, 2515), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (2512, 2515), False, 'import torch\n'), ((2556, 2570), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (2567, 2570), False, 'import torch\n'), ((2612, 2626), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (2623, 2626), False, 'import torch\n'), ((4444, 4479), 'torch.tensor', 'torch.tensor', (['(-(1 << self.bits - 1))'], {}), '(-(1 << self.bits - 1))\n', (4456, 4479), False, 'import torch\n'), ((4523, 4561), 'torch.tensor', 'torch.tensor', (['((1 << self.bits - 1) - 1)'], {}), '((1 << self.bits - 1) - 1)\n', (4535, 4561), False, 'import torch\n'), ((4724, 4739), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4736, 4739), False, 'import torch\n'), ((4781, 4815), 'torch.tensor', 'torch.tensor', (['((1 << self.bits) - 1)'], {}), '((1 << self.bits) - 1)\n', (4793, 4815), False, 'import torch\n'), ((4927, 4950), 'torch.abs', 'torch.abs', (['self.min_val'], {}), '(self.min_val)\n', (4936, 4950), False, 'import torch\n'), ((4952, 4975), 'torch.abs', 'torch.abs', (['self.max_val'], {}), '(self.max_val)\n', (4961, 4975), False, 'import torch\n'), ((5009, 5046), 'torch.abs', 'torch.abs', (['self.range_tracker.min_val'], {}), '(self.range_tracker.min_val)\n', (5018, 5046), False, 'import torch\n'), ((5048, 5085), 'torch.abs', 'torch.abs', (['self.range_tracker.max_val'], {}), '(self.range_tracker.max_val)\n', (5057, 5085), False, 'import torch\n'), ((10389, 10415), 'torch.Tensor', 'torch.Tensor', (['out_channels'], {}), '(out_channels)\n', (10401, 10415), False, 'import torch\n'), ((10447, 10473), 'torch.Tensor', 'torch.Tensor', (['out_channels'], {}), '(out_channels)\n', (10459, 10473), False, 'import torch\n'), ((10520, 10545), 'torch.zeros', 'torch.zeros', (['out_channels'], {}), '(out_channels)\n', (10531, 10545), False, 'import torch\n'), ((10591, 10615), 'torch.ones', 'torch.ones', (['out_channels'], {}), '(out_channels)\n', (10601, 10615), False, 'import torch\n'), ((10658, 10672), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (10669, 10672), False, 'import torch\n'), ((11422, 11570), 'torch.nn.functional.conv1d', 'F.conv1d', ([], {'input': 'input', 'weight': 'self.weight', 'bias': 'self.bias', 'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(input=input, weight=self.weight, bias=self.bias, stride=self.\n stride, padding=self.padding, dilation=self.dilation, groups=self.groups)\n', (11430, 11570), True, 'import torch.nn.functional as F\n'), ((11774, 11802), 'torch.mean', 'torch.mean', (['output'], {'dim': 'dims'}), '(output, dim=dims)\n', (11784, 11802), False, 'import torch\n'), ((11827, 11854), 'torch.var', 'torch.var', (['output'], {'dim': 'dims'}), '(output, dim=dims)\n', (11836, 11854), False, 'import torch\n'), ((13597, 13738), 'torch.nn.functional.conv1d', 'F.conv1d', ([], {'input': 'q_input', 'weight': 'q_weight', 'bias': 'bias', 'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(input=q_input, weight=q_weight, bias=bias, stride=self.stride,\n padding=self.padding, dilation=self.dilation, groups=self.groups)\n', (13605, 13738), True, 'import torch.nn.functional as F\n'), ((13882, 14023), 'torch.nn.functional.conv1d', 'F.conv1d', ([], {'input': 'q_input', 'weight': 'q_weight', 'bias': 'bias', 'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(input=q_input, weight=q_weight, bias=bias, stride=self.stride,\n padding=self.padding, dilation=self.dilation, groups=self.groups)\n', (13890, 14023), True, 'import torch.nn.functional as F\n'), ((15057, 15103), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['output_channels'], {'momentum': '(0.01)'}), '(output_channels, momentum=0.01)\n', (15071, 15103), True, 'import torch.nn as nn\n'), ((1551, 1582), 'torch.min', 'torch.min', (['temp_minval', 'min_val'], {}), '(temp_minval, min_val)\n', (1560, 1582), False, 'import torch\n'), ((1633, 1664), 'torch.max', 'torch.max', (['temp_maxval', 'max_val'], {}), '(temp_maxval, max_val)\n', (1642, 1664), False, 'import torch\n'), ((11872, 11887), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11885, 11887), False, 'import torch\n'), ((694, 727), 'torch.min', 'torch.min', (['input', '(2)'], {'keepdim': '(True)'}), '(input, 2, keepdim=True)\n', (703, 727), False, 'import torch\n'), ((784, 817), 'torch.max', 'torch.max', (['input', '(2)'], {'keepdim': '(True)'}), '(input, 2, keepdim=True)\n', (793, 817), False, 'import torch\n'), ((12537, 12569), 'torch.sqrt', 'torch.sqrt', (['(batch_var + self.eps)'], {}), '(batch_var + self.eps)\n', (12547, 12569), False, 'import torch\n'), ((12790, 12822), 'torch.sqrt', 'torch.sqrt', (['(batch_var + self.eps)'], {}), '(batch_var + self.eps)\n', (12800, 12822), False, 'import torch\n'), ((13092, 13131), 'torch.sqrt', 'torch.sqrt', (['(self.running_var + self.eps)'], {}), '(self.running_var + self.eps)\n', (13102, 13131), False, 'import torch\n'), ((13348, 13387), 'torch.sqrt', 'torch.sqrt', (['(self.running_var + self.eps)'], {}), '(self.running_var + self.eps)\n', (13358, 13387), False, 'import torch\n'), ((12432, 12464), 'torch.sqrt', 'torch.sqrt', (['(batch_var + self.eps)'], {}), '(batch_var + self.eps)\n', (12442, 12464), False, 'import torch\n'), ((12685, 12717), 'torch.sqrt', 'torch.sqrt', (['(batch_var + self.eps)'], {}), '(batch_var + self.eps)\n', (12695, 12717), False, 'import torch\n'), ((12980, 13019), 'torch.sqrt', 'torch.sqrt', (['(self.running_var + self.eps)'], {}), '(self.running_var + self.eps)\n', (12990, 13019), False, 'import torch\n'), ((13236, 13275), 'torch.sqrt', 'torch.sqrt', (['(self.running_var + self.eps)'], {}), '(self.running_var + self.eps)\n', (13246, 13275), False, 'import torch\n')] |
import pandas as pd
def build_columns():
columns = ['pid']
for i in range(1, 501):
columns.append('trackuri_%s' % i)
return columns
def build_output(df_list, id_cloumn, song_column):
output = pd.DataFrame(columns=build_columns())
for df in df_list:
output = output.append(format_output(df, id_cloumn, song_column))
return output
def format_output(df, id_cloumn, song_column):
columns = build_columns()[1:501]
output = df
output['columns'] = columns
output = output.pivot(index=id_cloumn, columns='columns', values=song_column)
output['pid'] = output.index
output = output[build_columns()]
return output
def output_submission(df, file_name, team_name, contact_information, path='submissions/', challenge_track='main'):
file = path + file_name
first_row = pd.DataFrame(columns=build_columns())
first_row.loc[0] = build_first_row(team_name, contact_information, challenge_track)
output = pd.concat([first_row, df])
output = output[build_columns()]
output = output.set_index('pid')
output.to_csv(file)
def build_first_row(team_name, contact_information, challenge_track='main'):
row = ['team_info', challenge_track, team_name, contact_information]
for i in range(4, 501):
row.append(None)
return row
| [
"pandas.concat"
] | [((982, 1008), 'pandas.concat', 'pd.concat', (['[first_row, df]'], {}), '([first_row, df])\n', (991, 1008), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 12 01:38:00 2020
@author: 45063883
"""
import networkx as nx
from networkx import karate_club_graph, to_numpy_matrix
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Flatten,Embedding,Dropout
from keras.models import Sequential, Model
from keras import initializers, regularizers,activations,constraints
import keras.backend as k
from tensorflow.keras.layers import Layer,Input
from keras.optimizers import Adam
import numpy as np
from networkx import to_numpy_matrix, degree_centrality, betweenness_centrality, shortest_path_length,in_degree_centrality,out_degree_centrality,eigenvector_centrality,katz_centrality,closeness_centrality
import matplotlib.pyplot as plt
import NexGCN as venom
Gr = nx.gnm_random_graph(70,140)
exp=venom.ExperimentalGCN()
kernel=venom.feature_kernels()
#X=kernel.centrality_kernel(katz_centrality,Gr)
X=kernel.feature_random_weight_kernel(34,Gr)
#X=kernel.feature_distributions(np.random.poisson(4,9),Gr)
exp.create_network(Gr,X,True)
# Xs=np.matrix([
# [np.random.randn(),np.random.randn(),np.random.randn()]
# for j in range(exp.network.A.shape[0])
# ])
#
# exp.create_network(None,Xs,True)
#
predictions=exp.extract_binary_features(2048,2,keras.activations.sigmoid,'adam',5,20,1)
print(predictions)
exp.draw_graph(predictions,exp.network.F.shape[-1],300,True,90,90,'#00FFFF','#FF00FF')
output_class=exp.get_outcome(37)
print(output_class)
| [
"NexGCN.ExperimentalGCN",
"networkx.gnm_random_graph",
"NexGCN.feature_kernels"
] | [((793, 821), 'networkx.gnm_random_graph', 'nx.gnm_random_graph', (['(70)', '(140)'], {}), '(70, 140)\n', (812, 821), True, 'import networkx as nx\n'), ((825, 848), 'NexGCN.ExperimentalGCN', 'venom.ExperimentalGCN', ([], {}), '()\n', (846, 848), True, 'import NexGCN as venom\n'), ((856, 879), 'NexGCN.feature_kernels', 'venom.feature_kernels', ([], {}), '()\n', (877, 879), True, 'import NexGCN as venom\n')] |
#!/usr/bin/env python
from imutils.video import VideoStream
import argparse
import imagezmq
import socket
import time
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--server-ip', required=True,
help='IP address of server to which client will connect')
parser.add_argument('-p', '--pi-camera', type=bool, default=True,
help='Toggle use of Raspberry Pi camera module')
args = vars(parser.parse_args())
sender = imagezmq.ImageSender(connect_to=f'tcp://{args["server_ip"]}:5555')
host_name = socket.gethostname()
if args['pi_camera']:
vs = VideoStream(usePiCamera=True).start()
else:
vs = VideoStream(src=0).start()
time.sleep(2.0)
while True:
frame = vs.read()
sender.send_image(host_name, frame)
| [
"imutils.video.VideoStream",
"imagezmq.ImageSender",
"argparse.ArgumentParser",
"time.sleep",
"socket.gethostname"
] | [((128, 153), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (151, 153), False, 'import argparse\n'), ((466, 532), 'imagezmq.ImageSender', 'imagezmq.ImageSender', ([], {'connect_to': 'f"""tcp://{args[\'server_ip\']}:5555"""'}), '(connect_to=f"tcp://{args[\'server_ip\']}:5555")\n', (486, 532), False, 'import imagezmq\n'), ((546, 566), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (564, 566), False, 'import socket\n'), ((678, 693), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (688, 693), False, 'import time\n'), ((598, 627), 'imutils.video.VideoStream', 'VideoStream', ([], {'usePiCamera': '(True)'}), '(usePiCamera=True)\n', (609, 627), False, 'from imutils.video import VideoStream\n'), ((651, 669), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (662, 669), False, 'from imutils.video import VideoStream\n')] |