code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import ctypes
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library('c'))
# Get network device's name
def if_indextoname (index):
if not isinstance (index, int):
raise TypeError ('Index must be an integer.')
libc.if_indextoname.argtypes = [ctypes.c_uint32, ctypes.c_char_p]
libc.if_indextoname.restype = ctypes.c_char_p
ifname = ctypes.create_string_buffer(32)
ifname = libc.if_indextoname (index, ifname)
if not ifname:
raise RuntimeError ("Invalid network interface index.")
return ifname
# Generate socket id
def to_socket_id (addr1, addr1_str, addr2, addr2_str, port1, port2):
socket_id = None
if addr1 < addr2:
socket_id = "%s:%d-%s:%d" % (addr1_str, port1, addr2_str, port2)
elif addr2 < addr1:
socket_id = "%s:%d-%s:%d" % (addr2_str, port2, addr1_str, port1)
else:
if port1 < port2:
socket_id = "%s:%d-%s:%d" % (addr1_str, port1, addr2_str, port2)
else:
socket_id = "%s:%d-%s:%d" % (addr2_str, port2, addr1_str, port1)
return socket_id
| [
"ctypes.util.find_library",
"ctypes.create_string_buffer"
] | [((53, 82), 'ctypes.util.find_library', 'ctypes.util.find_library', (['"""c"""'], {}), "('c')\n", (77, 82), False, 'import ctypes\n'), ((365, 396), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['(32)'], {}), '(32)\n', (392, 396), False, 'import ctypes\n')] |
import nltk,re,codecs
from nltk.tokenize import word_tokenize,sent_tokenize
from backNode import BackNode
from nltk import Tree
def trace_tree(trace):
if trace.left==None and trace.right==None:
return str(trace.root)+" "+str(trace.word)
return "("+str(trace.root)+"("+str(trace_tree(trace.left))+")"+" "+"("+str(trace_tree(trace.right))+")"+")"
def data_preprosessing():
#fp=codecs.open(f'F:/MTECH1/NLP/Assignment5/Training_set.txt','r',encoding='utf-8',errors='ignore')
#=nltk.data.load("grammars/large_grammars/atis_sentences.txt")
with open('F:/MTECH1/NLP/Assignment5/Training_set.txt') as f:
lines = f.readlines()
for i in range(0,len(lines)):
lines[i]=re.sub(r'\d+\s:\s',"",lines[i])
#print(lines[i])
lines = [line.rstrip('\n') for line in lines]
#print(lines)
#list_sentences=sent_tokenize(s)
"""parser = nltk.parse.BottomUpChartParser(grammer)
for i in list_sentences:
i=word_tokenize(i)
for tree in parser.parse(i):
result=list(tree)
print(result)
for tree in result:
tree.draw()"""
#print(lines)
return lines
lines=data_preprosessing()
def grammer_parse():
grammer=(nltk.data.load("grammars/large_grammars/atis.cfg"))
grammar=grammer.chomsky_normal_form(new_token_padding='#',flexible=False)
grammar_dict={}
for production in grammar.productions():
prod=list(production.rhs())
prod_rhs=" "
for i in prod:
prod_rhs=prod_rhs+" "+str(i)
prod_rhs=prod_rhs.strip()
if prod_rhs in grammar_dict.keys():
temp1=production.lhs()
grammar_dict[prod_rhs].append(temp1)
else:
temp1=production.lhs()
grammar_dict[prod_rhs]=[temp1]
#print(len(grammar_dict))
return grammar_dict
grammar=grammer_parse()
def parse(lines,grammar):
line=[]
line=lines[56].split()
line.insert(0," ")
#x="i need a flight from pittsburgh to newark on monday ."
#line=x.split()
#line.insert(0," ")
length=len(line)
print(line)
tree_set=set()
parse_table=[[ set() for col in range(length+1)] for row in range(length+1)]
back_table=[[ [] for col in range(length+1)] for row in range(length+1)]
#grammer=(nltk.data.load("grammars/large_grammars/atis.cfg"))
#print((grammar))
#grammar=(nltk.data.load("grammars/sample_grammars/toy.cfg"))
#print(type(grammer))
#grammar=grammer.chomsky_normal_form(new_token_padding='#',flexible=False)
#print(grammar)
for k in range(1,len(line)):
if line[k] in grammar.keys():
lhs=grammar[line[k]]
for l in lhs:
parse_table[k][k].add(l)
back_table[k][k].append(BackNode(None,None,l,line[k]))
for w in range(2,length):
#print("*")
for s in range(1,length-w+1):
#print("**")
end=w+s
for m in range(s,end-1):
#print("***")
for p in parse_table[s][m]:
for q in parse_table[m+1][end-1]:
#print(q)
x=str(p)+" "+str(q)
#print(x)
if x in grammar.keys() and (len(x.split())==2):
lhs=grammar[x]
#print(s,m)
for l in lhs:
parse_table[s][end-1].add(l)
prod=x.split()
for r1 in back_table[s][m]:
for r2 in back_table[m+1][end-1]:
#print(s,m)
#print(m+1,end-1)
if(str(r1.root)==prod[0] and str(r2.root)==prod[1]):
back_table[s][end-1].append(BackNode(r1,r2,l,None))
#print(back_table[s][end-1])
#print(back_table)
if ("SIGMA" in str(parse_table[1][length-1])):
#print(back_table)
for pointer in back_table[1][length-1]:
if(str(pointer.root)=="SIGMA"):
value=trace_tree(pointer)
tree_set.add(value)
print(tree_set)
print(len(tree_set))
for result in tree_set:
trees=Tree.fromstring(value)
trees.draw()
else:
print("No parse tree exist")
parse(lines,grammar)
| [
"backNode.BackNode",
"re.sub",
"nltk.Tree.fromstring",
"nltk.data.load"
] | [((1127, 1177), 'nltk.data.load', 'nltk.data.load', (['"""grammars/large_grammars/atis.cfg"""'], {}), "('grammars/large_grammars/atis.cfg')\n", (1141, 1177), False, 'import nltk, re, codecs\n'), ((680, 715), 're.sub', 're.sub', (['"""\\\\d+\\\\s:\\\\s"""', '""""""', 'lines[i]'], {}), "('\\\\d+\\\\s:\\\\s', '', lines[i])\n", (686, 715), False, 'import nltk, re, codecs\n'), ((3572, 3594), 'nltk.Tree.fromstring', 'Tree.fromstring', (['value'], {}), '(value)\n', (3587, 3594), False, 'from nltk import Tree\n'), ((2492, 2524), 'backNode.BackNode', 'BackNode', (['None', 'None', 'l', 'line[k]'], {}), '(None, None, l, line[k])\n', (2500, 2524), False, 'from backNode import BackNode\n'), ((3209, 3234), 'backNode.BackNode', 'BackNode', (['r1', 'r2', 'l', 'None'], {}), '(r1, r2, l, None)\n', (3217, 3234), False, 'from backNode import BackNode\n')] |
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""
{{cookiecutter.project_slug}}.cli
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{{ cookiecutter.project_short_description }}
:copyright: © 2019 by the Choppy Team.
:license: AGPLv3+, see LICENSE for more details.
"""
"""Console script for {{cookiecutter.project_slug}}."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for {{cookiecutter.project_slug}}."""
click.echo("Replace this message by putting your code into "
"{{cookiecutter.project_slug}}.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| [
"click.echo",
"click.command"
] | [((361, 376), 'click.command', 'click.command', ([], {}), '()\n', (374, 376), False, 'import click\n'), ((462, 571), 'click.echo', 'click.echo', (['"""Replace this message by putting your code into {{cookiecutter.project_slug}}.cli.main"""'], {}), "(\n 'Replace this message by putting your code into {{cookiecutter.project_slug}}.cli.main'\n )\n", (472, 571), False, 'import click\n'), ((584, 648), 'click.echo', 'click.echo', (['"""See click documentation at http://click.pocoo.org/"""'], {}), "('See click documentation at http://click.pocoo.org/')\n", (594, 648), False, 'import click\n')] |
from __future__ import annotations
__all__ = ("executor",)
import inspect
import sys
from asyncio import get_running_loop
from concurrent.futures import Executor
from functools import partial, wraps
from typing import Awaitable, Callable, TypeVar, overload
from asphalt.core import Context
if sys.version_info >= (3, 10):
from typing import Concatenate, ParamSpec
else:
from typing_extensions import Concatenate, ParamSpec
T_Retval = TypeVar("T_Retval")
P = ParamSpec("P")
@overload
def executor(
func_or_executor: Executor | str,
) -> Callable[
[Callable[Concatenate[Context, P], T_Retval]],
Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]],
]:
...
@overload
def executor(
func_or_executor: Callable[Concatenate[Context, P], T_Retval]
) -> Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]:
...
def executor(
func_or_executor: Executor | str | Callable[Concatenate[Context, P], T_Retval]
) -> (
Callable[
[Callable[Concatenate[Context, P], T_Retval]],
Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]],
]
| Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]
):
"""
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is
used. Otherwise, the argument must be a PEP 3148 compliant thread pool executor or
the name of an :class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is
ignored and the wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the
event loop thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor
instance or the name of an :class:`~concurrent.futures.Executor` resource
"""
def outer(
func: Callable[Concatenate[Context, P], T_Retval]
) -> Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]:
def wrapper(
ctx: Context, *args: P.args, **kwargs: P.kwargs
) -> T_Retval | Awaitable[T_Retval]:
try:
loop = get_running_loop()
except RuntimeError:
# Event loop not available -- we're in a worker thread
return func(ctx, *args, **kwargs)
# Resolve the executor resource name to an Executor instance
_executor: Executor | None
if isinstance(executor, str):
_executor = ctx.require_resource(Executor, executor)
else:
_executor = executor
callback = partial(func, ctx, *args, **kwargs)
return loop.run_in_executor(_executor, callback)
assert not inspect.iscoroutinefunction(
func
), "Cannot wrap coroutine functions to be run in an executor"
return wraps(func)(wrapper)
executor: Executor | str | None = None
if isinstance(func_or_executor, (str, Executor)):
executor = func_or_executor
return outer
else:
return outer(func_or_executor)
| [
"inspect.iscoroutinefunction",
"functools.wraps",
"functools.partial",
"typing_extensions.ParamSpec",
"asyncio.get_running_loop",
"typing.TypeVar"
] | [((447, 466), 'typing.TypeVar', 'TypeVar', (['"""T_Retval"""'], {}), "('T_Retval')\n", (454, 466), False, 'from typing import Awaitable, Callable, TypeVar, overload\n'), ((471, 485), 'typing_extensions.ParamSpec', 'ParamSpec', (['"""P"""'], {}), "('P')\n", (480, 485), False, 'from typing_extensions import Concatenate, ParamSpec\n'), ((3272, 3307), 'functools.partial', 'partial', (['func', 'ctx', '*args'], {}), '(func, ctx, *args, **kwargs)\n', (3279, 3307), False, 'from functools import partial, wraps\n'), ((3389, 3422), 'inspect.iscoroutinefunction', 'inspect.iscoroutinefunction', (['func'], {}), '(func)\n', (3416, 3422), False, 'import inspect\n'), ((3520, 3531), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (3525, 3531), False, 'from functools import partial, wraps\n'), ((2796, 2814), 'asyncio.get_running_loop', 'get_running_loop', ([], {}), '()\n', (2812, 2814), False, 'from asyncio import get_running_loop\n')] |
import jinja2
page = {}
page['title'] = 'Shkola'
page['item_path'] = '../src/'
page['google_signin_client_id'] = ""
page['google_site_verification'] = ""
page['button'] = {
'width' : '137px',
'height' : '140px',
'font_size' : '111px',
'margin' : '10px',
'choices' : []
}
page['button']['choices'].append({
'title' : '1',
'obj_type' : 'A',
'front_color' : '#ff6956',
'back_color' : '#f9f9f9',
'link' : 'href="1"'
})
page['button']['choices'].append({
'title' : '2',
'obj_type' : 'A',
'front_color' : '#489cba',
'back_color' : '#f9f9f9',
'link' : 'href="2"'
})
page['button']['choices'].append({
'title' : '3',
'obj_type' : 'A',
'front_color' : '#ff6956',
'back_color' : '#f9f9f9',
'link' : 'href="1"'
})
page['button']['choices'].append({
'title' : '4',
'obj_type' : 'A',
'front_color' : '#489cba',
'back_color' : '#f9f9f9',
'link' : 'href="2"'
})
page['menu'] = [
{
'name' : 'Zadaci',
'submenu' : {
'id' : 'zadaci',
'options' : [
{
'name' : 'Cetvrti',
'link' : 'C',
'submenu' : {
'id' : 'cetvrti',
'options' : [
{ 'name' : 'Brojevi', 'link' : '1'},
{ 'name' : 'Geometrija', 'link' : '2'},
{ 'name' : 'Razlomci', 'link' : '3'}
]
}
},
{
'name' : 'Treci',
'link' : 'T',
'submenu' : {
'id' : 'treci',
'options' : [
{ 'name' : 'Brojevi', 'link' : '1'},
{ 'name' : 'Geometrija', 'link' : '2'},
{ 'name' : 'Razlomci', 'link' : '3'}
]
}
}
]
}
},
{
'name' : 'Rezultati',
'link' : 'R'
}
]
file_loader = jinja2.FileSystemLoader("..")
env = jinja2.Environment(loader=file_loader)
template = env.get_template("rsc/year.html.j2")
print(template.render(template_params=page))
| [
"jinja2.FileSystemLoader",
"jinja2.Environment"
] | [((2134, 2163), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['""".."""'], {}), "('..')\n", (2157, 2163), False, 'import jinja2\n'), ((2170, 2208), 'jinja2.Environment', 'jinja2.Environment', ([], {'loader': 'file_loader'}), '(loader=file_loader)\n', (2188, 2208), False, 'import jinja2\n')] |
import logging
import json
import glob
import pandas as pd
import multiprocessing
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.model_selection import cross_val_predict
from sklearn.decomposition import IncrementalPCA
from scipy.stats import spearmanr
import config
from util import *
np.random.seed(config.RANDOM_SEED)
repo_lang = Repository_language()
def store_classification_result(model_name, language, model_classification_report, classification_results):
"""
Stores the result of the classifier
:param model_name: the classification type
:param language: programming language
:param model_classification_report: results
:param classification_results: results
"""
open('{}classification_result_raw_{}_{}.txt'.format(config.PREDICTION_RESULT_PATH, model_name, language), 'w')\
.write(model_classification_report)
open('{}classification_result_json_{}_{}.json'.format(config.PREDICTION_RESULT_PATH, model_name, language), 'w')\
.write(json.dumps(classification_results))
def data_classification_wo_cv(language, repo, data_train, label_train, data_test, label_test, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the classifier
:param language: programming language
:param data: input data
:param label: input labels
:param random_seed: the random_seed
:param job_num: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
outer_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Hyper-parameters
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
forest_param = {'n_estimators': config.ESTIMATOR_NUM, 'min_samples_leaf': config.MIN_SAMPLE_LEAVES,
'min_samples_split': config.MIN_SAMPLE_SPLIT}
boosting_param = {'n_estimators': config.ESTIMATOR_NUM, 'learning_rate': config.LEARNING_RATE}
# Grid search definition
grid_searches = [
GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state = random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
, GridSearchCV(RandomForestClassifier(class_weight='balanced', n_jobs=job_num, random_state=random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
, GridSearchCV(ExtraTreesClassifier(n_jobs=job_num, class_weight='balanced', random_state=random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(AdaBoostClassifier(base_estimator=DecisionTreeClassifier(class_weight = 'balanced',
random_state=random_seed,
max_depth=2),
algorithm='SAMME.R', random_state=random_seed),
boosting_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
]
# Fitting the classifiers
classification_results = {}
res = []
for model in grid_searches:
# Model training/testing
model.score_sample_weight = True
model.fit(data_train, label_train)
model_name = str(type(model.best_estimator_)).replace('<class \'', '').replace('\'>', '').split('.')[-1]
model_best_param = model.best_params_
predicted_label = model.best_estimator_.predict(data_test)
t = get_metrics(label_test, predicted_label)
t['model_name'] = model_name
t['language'] = language
t['repository'] = repo
res.append(t)
return res
def data_classification(language, data, label, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the classifier
:param language: programming language
:param data: input data
:param label: input labels
:param random_seed: the random_seed
:param job_num: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
outer_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Hyper-parameters
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
forest_param = {'n_estimators': config.ESTIMATOR_NUM, 'min_samples_leaf': config.MIN_SAMPLE_LEAVES,
'min_samples_split': config.MIN_SAMPLE_SPLIT}
boosting_param = {'n_estimators': config.ESTIMATOR_NUM, 'learning_rate': config.LEARNING_RATE}
# Grid search definition
grid_searches = [
GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state = random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(RandomForestClassifier(class_weight='balanced', n_jobs=job_num, random_state = random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(ExtraTreesClassifier(n_jobs=job_num, class_weight='balanced', random_state = random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(AdaBoostClassifier(base_estimator=DecisionTreeClassifier(class_weight = 'balanced',
random_state = random_seed,
max_depth=2),
algorithm='SAMME.R', random_state=random_seed),
boosting_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
]
# Fitting the classifiers
classification_results = {}
for model in grid_searches:
# Model training/testing
model.score_sample_weight = True
model.fit(data, label)
model_name = str(type(model.best_estimator_)).replace('<class \'', '').replace('\'>', '').split('.')[-1]
model_best_param = model.best_params_
predicted_label = cross_val_predict(model.best_estimator_, X=data, y=label, cv=outer_cv, n_jobs=job_num)
model_accuracy = accuracy_score(label, predicted_label)
model_confusion_matrix = confusion_matrix(label, predicted_label)
model_classification_report = classification_report(label, predicted_label)
classification_results[model_name] = {}
classification_results[model_name]['best_params'] = model_best_param
classification_results[model_name]['accuracy'] = model_accuracy
classification_results[model_name]['confusion_matrix'] = model_confusion_matrix.tolist()
classification_results[model_name]['classification_report'] = model_classification_report
print(model_classification_report)
## Save the classification result
#store_classification_result(model_name, language, model_classification_report, classification_results)
def get_best_decision_tree(data, label, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the best decision tree
:param data: the data
:param label: the labels
:param random_seed: the random seed
:param job_num:
:return: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Train/test
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
grid_search = GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state=random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
grid_search.score_sample_weight = True
grid_search.fit(data, label)
return grid_search.best_estimator_
def get_feature_importance_by_model(model):
"""
Returns the features importance of a model
:param model: the classifier
:return: The list of feature importance
"""
return model.feature_importances_
def get_feature_set(data):
"""
Returns the feature sets separately
:param data: The input data
"""
# Data separation of feature sets
parallel_changes = data[:, 0].reshape(-1, 1)
commit_num = data[:, 1].reshape(-1, 1)
commit_density = data[:, 2].reshape(-1, 1)
file_edits = IncrementalPCA(n_components=1).fit_transform(data[:, 3:8])
line_edits = IncrementalPCA(n_components=1).fit_transform(data[:, 8:10])
dev_num = data[:, 10].reshape(-1, 1)
keywords = IncrementalPCA(n_components=1).fit_transform(data[:, 11:23])
message = IncrementalPCA(n_components=1).fit_transform(data[:, 23:27])
duration = data[:, 27].reshape(-1, 1)
feature_sets = ['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration']
return feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords\
, message, duration
def save_feature_correlation(language, data, label):
"""
Store the feature correlation of the data with the label
:param language: the programming language
:param data: the data
:param label: the label
"""
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message\
, duration = get_feature_set(data)
features = [parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message
, duration]
for i, feature in enumerate(features):
corr, p_value = spearmanr(feature, label)
open('{}feature_correlation_{}.txt'.format(config.PREDICTION_RESULT_PATH, language), 'a') \
.write('{}:\t\t{} \t {}\n'.format(feature_sets[i], round(corr, 2), round(p_value, 2)))
def save_feature_correlation_dict(data, label):
"""
Store the feature correlation of the data with the label
:param data: the data
:param label: the label
"""
feature_sets = ['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration']
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message\
, duration = get_feature_set(data)
features = [parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message
, duration]
correlation = {}
try:
for i, feature in enumerate(features):
corr, p_value = spearmanr(feature, label)
correlation[feature_sets[i] + '_corr'] = corr
correlation[feature_sets[i] + '_p_value'] = p_value
except:
pass
finally:
return correlation
def save_feature_importance(repo_name, data, label):
"""
Store the feature importance
:param language: the programming language
:param data: the data
:param label: the label
"""
data = data.values
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message, duration \
= get_feature_set(data)
feature_data = np.concatenate((parallel_changes, commit_num, commit_density, file_edits, line_edits,
dev_num, keywords, message, duration), axis=1)
return get_feature_importance_by_model(get_best_decision_tree(feature_data, label))
def baseline_classification(language, data, label):
"""
Classify the baseline data (parallel changed files)
:param language: The programming language
:param data: The data
:param label: The labels
"""
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message \
, duration = get_feature_set(data)
language = language + '__baseline'
data_classification(language, parallel_changes, label)
############################################
############################################
from sklearn import metrics
import autosklearn.classification
from sklearn.svm import SVC
def get_metrics(label_test, predicted_labels):
result = {}
result['roc_curve'] = metrics.roc_curve(label_test, predicted_labels)
result['confusion_matrix'] = metrics.confusion_matrix(label_test, predicted_labels)
result['classification_report'] = metrics.classification_report(label_test, predicted_labels)
result['accuracy_score'] = metrics.accuracy_score(label_test, predicted_labels)
result['roc_auc_score'] = metrics.roc_auc_score(label_test, predicted_labels)
result['precision_score_conflict'] = metrics.precision_score(label_test, predicted_labels)
result['precision_score_not_conflict'] = metrics.precision_score(label_test, predicted_labels,pos_label=0)
result['precision_score_average'] = metrics.precision_score(label_test, predicted_labels, average='weighted')
result['recall_score_conflict'] = metrics.recall_score(label_test, predicted_labels)
result['recall_score_not_conflict'] = metrics.recall_score(label_test, predicted_labels,pos_label=0)
result['recall_score_average'] = metrics.recall_score(label_test, predicted_labels, average='weighted')
result['f1_score_conflict'] = metrics.f1_score(label_test, predicted_labels)
result['f1_score_not_conflict'] = metrics.f1_score(label_test, predicted_labels,pos_label=0)
result['f1_score_average'] = metrics.f1_score(label_test, predicted_labels, average='weighted')
result['conflict_rate'] = len([i for i in label_test if i == 1]) / len(label_test)
return result
def get_decision_tree_result(data_train, label_train, data_test, label_test):
clf = DecisionTreeClassifier(class_weight='balanced').fit(data_train, label_train)
predicted_labels = clf.predict(data_test)
return get_metrics(label_test, predicted_labels)
def get_random_forest_result(data_train, label_train, data_test, label_test):
clf = RandomForestClassifier(class_weight='balanced').fit(data_train, label_train)
predicted_labels = clf.predict(data_test)
return get_metrics(label_test, predicted_labels)
def get_svm_result(data_train, label_train, data_test, label_test):
clf = SVC(C=1.0, kernel='linear', class_weight='balanced').fit(data_train, label_train)
predicted_labels = clf.predict(data_test)
return get_metrics(label_test, predicted_labels)
def get_auto_scikit_result(data_train, label_train, data_test, label_test):
automl = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task= 60 * 60,
per_run_time_limit=300,
tmp_folder='/tmp/autosklearn_sequential_example_tmp1111',
output_folder='/tmp/autosklearn_sequential_example_out1111',
)
automl.fit(data_train, label_train, metric=autosklearn.metrics.roc_auc)
predicted_labels = automl.predict(data_test)
result = get_metrics(label_test, predicted_labels)
result['show_models'] = automl.show_models()
result['sprint_statistics'] = automl.sprint_statistics()
return result
if __name__ == "__main__":
# Logging
logging.basicConfig(level=logging.INFO,
format='%(levelname)s in %(threadName)s - %(asctime)s by %(name)-12s : %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
logging.info('Train/test of merge conflict prediction')
# Data classification
data_files = glob.glob(config.PREDICTION_CSV_PATH + 'data_*')
label_files = glob.glob(config.PREDICTION_CSV_PATH + 'label_*')
repos_set = [files.split('/')[-1].split('_')[3].replace('.csv', '') for files in data_files]
classification_result = []
feature_importance = []
languages = []
corr = []
for ind, data_path in enumerate(data_files):
data_tmp = pd.read_csv(data_path).sort_values(by=['merge_commit_date'])
label_tmp = pd.read_csv(data_path.replace('data_prediction', 'label_prediction')).sort_values(by=['merge_commit_date'])
data_tmp = data_tmp.drop('merge_commit_date', axis=1)
label_tmp = label_tmp.drop('merge_commit_date', axis=1)
# Correlation
try:
tmp_corr = save_feature_correlation_dict(data_tmp.to_numpy(), label_tmp.to_numpy())
if len(tmp_corr) > 0:
tmp_corr['langugae'] = repo_lang.get_lang(repos_set[ind].lower())
tmp_corr['repository'] = repos_set[ind]
corr.append(tmp_corr)
except:
pass
continue
train_ind = int(data_tmp.shape[0] * config.TRAIN_RATE)
data_train = data_tmp.iloc[0:train_ind, :]
data_test = data_tmp.iloc[train_ind:-1, :]
label_train = label_tmp.iloc[0:train_ind, :]['is_conflict'].tolist()
label_test = label_tmp.iloc[train_ind:-1, :]['is_conflict'].tolist()
if len(label_test) != data_test.shape[0]:
print('Inconsistent data: {}'.format(repos_set[ind]))
continue
if data_test.shape[0] < 50:
print('Not enough merge scenarios: {}'.format(repos_set[ind]))
continue
if len(set(label_test)) != 2 or len(set(label_train)) != 2:
print('One class is missed: {}'.format(repos_set[ind]))
continue
if len([i for i in label_test if i == 1]) < 10:
print('Nor enough conflicting merge in the test batch for evaluation: {}'.format(repos_set[ind]))
continue
# k = k + data_tmp.shape[0]
try:
res = data_classification_wo_cv(repo_lang.get_lang(repos_set[ind].lower()), repos_set[ind] ,data_train, label_train, data_test, label_test)
classification_result = classification_result + res
feature_importance.append(save_feature_importance(repos_set[ind], data_train, label_train))
languages.append(repo_lang.get_lang(repos_set[ind].lower()))
except Exception as e:
print('Error - {}'.format(e))
continue
corr_df = pd.DataFrame(corr)
corr_df.to_csv(f'corr_{config.RANDOM_SEED}.csv')
exit()
# Feature importance
feature_importance = pd.DataFrame(feature_importance, columns=['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration'])
feature_importance['language'] = pd.Series(languages)
feature_importance['repository'] = pd.Series(repos_set)
feature_importance.dropna()
feature_importance.to_csv(f'feature_importance_{config.RANDOM_SEED}.csv')
feature_importance_summery = feature_importance.drop('repository', axis=1).groupby('language').agg('median')
feature_importance_summery.to_csv(f'feature_importance_summery_{config.RANDOM_SEED}.csv')
# Classification result
classification_result_df = pd.DataFrame(classification_result)
classification_result_df.to_csv(f'res_{config.RANDOM_SEED}.csv')
| [
"sklearn.ensemble.ExtraTreesClassifier",
"pandas.read_csv",
"sklearn.metrics.classification_report",
"multiprocessing.cpu_count",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_curve",
"sklearn.model_selection.KFold",
"loggi... | [((666, 700), 'numpy.random.seed', 'np.random.seed', (['config.RANDOM_SEED'], {}), '(config.RANDOM_SEED)\n', (680, 700), True, 'import numpy as np\n'), ((1547, 1574), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1572, 1574), False, 'import multiprocessing\n'), ((1832, 1903), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'config.FOLD_NUM', 'shuffle': '(True)', 'random_state': 'random_seed'}), '(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)\n', (1837, 1903), False, 'from sklearn.model_selection import GridSearchCV, KFold\n'), ((1919, 1990), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'config.FOLD_NUM', 'shuffle': '(True)', 'random_state': 'random_seed'}), '(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)\n', (1924, 1990), False, 'from sklearn.model_selection import GridSearchCV, KFold\n'), ((4362, 4389), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4387, 4389), False, 'import multiprocessing\n'), ((4647, 4718), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'config.FOLD_NUM', 'shuffle': '(True)', 'random_state': 'random_seed'}), '(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)\n', (4652, 4718), False, 'from sklearn.model_selection import GridSearchCV, KFold\n'), ((4734, 4805), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'config.FOLD_NUM', 'shuffle': '(True)', 'random_state': 'random_seed'}), '(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)\n', (4739, 4805), False, 'from sklearn.model_selection import GridSearchCV, KFold\n'), ((7800, 7827), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (7825, 7827), False, 'import multiprocessing\n'), ((8059, 8130), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'config.FOLD_NUM', 'shuffle': '(True)', 'random_state': 'random_seed'}), '(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)\n', (8064, 8130), False, 'from sklearn.model_selection import GridSearchCV, KFold\n'), ((12024, 12160), 'numpy.concatenate', 'np.concatenate', (['(parallel_changes, commit_num, commit_density, file_edits, line_edits,\n dev_num, keywords, message, duration)'], {'axis': '(1)'}), '((parallel_changes, commit_num, commit_density, file_edits,\n line_edits, dev_num, keywords, message, duration), axis=1)\n', (12038, 12160), True, 'import numpy as np\n'), ((13043, 13090), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['label_test', 'predicted_labels'], {}), '(label_test, predicted_labels)\n', (13060, 13090), False, 'from sklearn import metrics\n'), ((13124, 13178), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['label_test', 'predicted_labels'], {}), '(label_test, predicted_labels)\n', (13148, 13178), False, 'from sklearn import metrics\n'), ((13217, 13276), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['label_test', 'predicted_labels'], {}), '(label_test, predicted_labels)\n', (13246, 13276), False, 'from sklearn import metrics\n'), ((13309, 13361), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['label_test', 'predicted_labels'], {}), '(label_test, predicted_labels)\n', (13331, 13361), False, 'from sklearn import metrics\n'), ((13392, 13443), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['label_test', 'predicted_labels'], {}), '(label_test, predicted_labels)\n', (13413, 13443), False, 'from sklearn import metrics\n'), ((13486, 13539), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['label_test', 'predicted_labels'], {}), '(label_test, predicted_labels)\n', (13509, 13539), False, 'from sklearn import metrics\n'), ((13585, 13651), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['label_test', 'predicted_labels'], {'pos_label': '(0)'}), '(label_test, predicted_labels, pos_label=0)\n', (13608, 13651), False, 'from sklearn import metrics\n'), ((13691, 13764), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['label_test', 'predicted_labels'], {'average': '"""weighted"""'}), "(label_test, predicted_labels, average='weighted')\n", (13714, 13764), False, 'from sklearn import metrics\n'), ((13804, 13854), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['label_test', 'predicted_labels'], {}), '(label_test, predicted_labels)\n', (13824, 13854), False, 'from sklearn import metrics\n'), ((13897, 13960), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['label_test', 'predicted_labels'], {'pos_label': '(0)'}), '(label_test, predicted_labels, pos_label=0)\n', (13917, 13960), False, 'from sklearn import metrics\n'), ((13997, 14067), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['label_test', 'predicted_labels'], {'average': '"""weighted"""'}), "(label_test, predicted_labels, average='weighted')\n", (14017, 14067), False, 'from sklearn import metrics\n'), ((14103, 14149), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['label_test', 'predicted_labels'], {}), '(label_test, predicted_labels)\n', (14119, 14149), False, 'from sklearn import metrics\n'), ((14188, 14247), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['label_test', 'predicted_labels'], {'pos_label': '(0)'}), '(label_test, predicted_labels, pos_label=0)\n', (14204, 14247), False, 'from sklearn import metrics\n'), ((14280, 14346), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['label_test', 'predicted_labels'], {'average': '"""weighted"""'}), "(label_test, predicted_labels, average='weighted')\n", (14296, 14346), False, 'from sklearn import metrics\n'), ((15967, 16131), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(levelname)s in %(threadName)s - %(asctime)s by %(name)-12s : %(message)s"""', 'datefmt': '"""%y-%m-%d %H:%M:%S"""'}), "(level=logging.INFO, format=\n '%(levelname)s in %(threadName)s - %(asctime)s by %(name)-12s : %(message)s'\n , datefmt='%y-%m-%d %H:%M:%S')\n", (15986, 16131), False, 'import logging\n'), ((16174, 16229), 'logging.info', 'logging.info', (['"""Train/test of merge conflict prediction"""'], {}), "('Train/test of merge conflict prediction')\n", (16186, 16229), False, 'import logging\n'), ((16275, 16323), 'glob.glob', 'glob.glob', (["(config.PREDICTION_CSV_PATH + 'data_*')"], {}), "(config.PREDICTION_CSV_PATH + 'data_*')\n", (16284, 16323), False, 'import glob\n'), ((16342, 16391), 'glob.glob', 'glob.glob', (["(config.PREDICTION_CSV_PATH + 'label_*')"], {}), "(config.PREDICTION_CSV_PATH + 'label_*')\n", (16351, 16391), False, 'import glob\n'), ((18853, 18871), 'pandas.DataFrame', 'pd.DataFrame', (['corr'], {}), '(corr)\n', (18865, 18871), True, 'import pandas as pd\n'), ((18987, 19158), 'pandas.DataFrame', 'pd.DataFrame', (['feature_importance'], {'columns': "['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits',\n 'dev_num', 'keywords', 'message', 'duration']"}), "(feature_importance, columns=['prl_changes', 'commit_num',\n 'commit_density', 'file_edits', 'line_edits', 'dev_num', 'keywords',\n 'message', 'duration'])\n", (18999, 19158), True, 'import pandas as pd\n'), ((19208, 19228), 'pandas.Series', 'pd.Series', (['languages'], {}), '(languages)\n', (19217, 19228), True, 'import pandas as pd\n'), ((19268, 19288), 'pandas.Series', 'pd.Series', (['repos_set'], {}), '(repos_set)\n', (19277, 19288), True, 'import pandas as pd\n'), ((19666, 19701), 'pandas.DataFrame', 'pd.DataFrame', (['classification_result'], {}), '(classification_result)\n', (19678, 19701), True, 'import pandas as pd\n'), ((1375, 1409), 'json.dumps', 'json.dumps', (['classification_results'], {}), '(classification_results)\n', (1385, 1409), False, 'import json\n'), ((6818, 6908), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['model.best_estimator_'], {'X': 'data', 'y': 'label', 'cv': 'outer_cv', 'n_jobs': 'job_num'}), '(model.best_estimator_, X=data, y=label, cv=outer_cv,\n n_jobs=job_num)\n', (6835, 6908), False, 'from sklearn.model_selection import cross_val_predict\n'), ((6930, 6968), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['label', 'predicted_label'], {}), '(label, predicted_label)\n', (6944, 6968), False, 'from sklearn.metrics import accuracy_score\n'), ((7002, 7042), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['label', 'predicted_label'], {}), '(label, predicted_label)\n', (7018, 7042), False, 'from sklearn.metrics import confusion_matrix\n'), ((7081, 7126), 'sklearn.metrics.classification_report', 'classification_report', (['label', 'predicted_label'], {}), '(label, predicted_label)\n', (7102, 7126), False, 'from sklearn.metrics import classification_report\n'), ((8344, 8417), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '"""balanced"""', 'random_state': 'random_seed'}), "(class_weight='balanced', random_state=random_seed)\n", (8366, 8417), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((10440, 10465), 'scipy.stats.spearmanr', 'spearmanr', (['feature', 'label'], {}), '(feature, label)\n', (10449, 10465), False, 'from scipy.stats import spearmanr\n'), ((2521, 2594), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '"""balanced"""', 'random_state': 'random_seed'}), "(class_weight='balanced', random_state=random_seed)\n", (2543, 2594), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2713, 2806), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': '"""balanced"""', 'n_jobs': 'job_num', 'random_state': 'random_seed'}), "(class_weight='balanced', n_jobs=job_num,\n random_state=random_seed)\n", (2735, 2806), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2924, 3016), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_jobs': 'job_num', 'class_weight': '"""balanced"""', 'random_state': 'random_seed'}), "(n_jobs=job_num, class_weight='balanced', random_state=\n random_seed)\n", (2944, 3016), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((5336, 5409), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '"""balanced"""', 'random_state': 'random_seed'}), "(class_weight='balanced', random_state=random_seed)\n", (5358, 5409), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((5527, 5620), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': '"""balanced"""', 'n_jobs': 'job_num', 'random_state': 'random_seed'}), "(class_weight='balanced', n_jobs=job_num,\n random_state=random_seed)\n", (5549, 5620), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5739, 5831), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_jobs': 'job_num', 'class_weight': '"""balanced"""', 'random_state': 'random_seed'}), "(n_jobs=job_num, class_weight='balanced', random_state=\n random_seed)\n", (5759, 5831), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((9174, 9204), 'sklearn.decomposition.IncrementalPCA', 'IncrementalPCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (9188, 9204), False, 'from sklearn.decomposition import IncrementalPCA\n'), ((9250, 9280), 'sklearn.decomposition.IncrementalPCA', 'IncrementalPCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (9264, 9280), False, 'from sklearn.decomposition import IncrementalPCA\n'), ((9366, 9396), 'sklearn.decomposition.IncrementalPCA', 'IncrementalPCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (9380, 9396), False, 'from sklearn.decomposition import IncrementalPCA\n'), ((9441, 9471), 'sklearn.decomposition.IncrementalPCA', 'IncrementalPCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (9455, 9471), False, 'from sklearn.decomposition import IncrementalPCA\n'), ((11405, 11430), 'scipy.stats.spearmanr', 'spearmanr', (['feature', 'label'], {}), '(feature, label)\n', (11414, 11430), False, 'from scipy.stats import spearmanr\n'), ((14546, 14593), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced')\n", (14568, 14593), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((14813, 14860), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced')\n", (14835, 14860), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((15069, 15121), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1.0)', 'kernel': '"""linear"""', 'class_weight': '"""balanced"""'}), "(C=1.0, kernel='linear', class_weight='balanced')\n", (15072, 15121), False, 'from sklearn.svm import SVC\n'), ((16656, 16678), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (16667, 16678), True, 'import pandas as pd\n'), ((3166, 3256), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '"""balanced"""', 'random_state': 'random_seed', 'max_depth': '(2)'}), "(class_weight='balanced', random_state=random_seed,\n max_depth=2)\n", (3188, 3256), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((5983, 6073), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '"""balanced"""', 'random_state': 'random_seed', 'max_depth': '(2)'}), "(class_weight='balanced', random_state=random_seed,\n max_depth=2)\n", (6005, 6073), False, 'from sklearn.tree import DecisionTreeClassifier\n')] |
# coding: utf-8
from __future__ import unicode_literals
import time
import logging
import traceback
from optparse import make_option
import json
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django_fuzzytest.runner import FuzzyRunner
logger = logging.getLogger(__file__)
class Command(BaseCommand):
help = 'Run fuzzytest'
option_list = BaseCommand.option_list + (
make_option("--exclude", "-e", action="append", default=[],
dest="exclude", help="Exclude applications from test"),
make_option("--cache", "-c", action="store_true", default='.fuzzycache',
dest="cache", help="Cache path. Default: .fuzzycache"),
)
params = {}
def _merge_cache(self, path):
fp = file(path)
for line in fp:
d = json.loads(line)
self.params.setdefault(
d['path'],
{"get":[],"post":[],"files":[]}
)
self.params[d['path']]['get'] = list(set( \
self.params[d['path']]['get'] + d['get']))
self.params[d['path']]['post'] = list(set( \
self.params[d['path']]['post'] + d['post']))
self.params[d['path']]['post'] = list(set( \
self.params[d['path']]['post'] + d['post']))
def handle(self, *args, **options):
exclude = options.get('exclude')
cache_path = options.get('cache')
self._merge_cache(cache_path)
runner = FuzzyRunner(self.params)
runner.run()
| [
"logging.getLogger",
"json.loads",
"optparse.make_option",
"django_fuzzytest.runner.FuzzyRunner"
] | [((304, 331), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (321, 331), False, 'import logging\n'), ((1519, 1543), 'django_fuzzytest.runner.FuzzyRunner', 'FuzzyRunner', (['self.params'], {}), '(self.params)\n', (1530, 1543), False, 'from django_fuzzytest.runner import FuzzyRunner\n'), ((444, 562), 'optparse.make_option', 'make_option', (['"""--exclude"""', '"""-e"""'], {'action': '"""append"""', 'default': '[]', 'dest': '"""exclude"""', 'help': '"""Exclude applications from test"""'}), "('--exclude', '-e', action='append', default=[], dest='exclude',\n help='Exclude applications from test')\n", (455, 562), False, 'from optparse import make_option\n'), ((581, 712), 'optparse.make_option', 'make_option', (['"""--cache"""', '"""-c"""'], {'action': '"""store_true"""', 'default': '""".fuzzycache"""', 'dest': '"""cache"""', 'help': '"""Cache path. Default: .fuzzycache"""'}), "('--cache', '-c', action='store_true', default='.fuzzycache',\n dest='cache', help='Cache path. Default: .fuzzycache')\n", (592, 712), False, 'from optparse import make_option\n'), ((846, 862), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (856, 862), False, 'import json\n')] |
# Generated by Django 2.0.7 on 2018-07-06 19:23
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('grading_system', '0002_student_email'),
]
operations = [
migrations.RenameField(
model_name='assignment',
old_name='assingment_date',
new_name='assignment_date',
),
migrations.AddField(
model_name='course',
name='slug',
field=django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from=models.CharField(max_length=60)),
),
migrations.AlterField(
model_name='assignmentresult',
name='grade',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=5),
),
]
| [
"django.db.models.DecimalField",
"django.db.migrations.RenameField",
"django.db.models.CharField"
] | [((272, 379), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""assignment"""', 'old_name': '"""assingment_date"""', 'new_name': '"""assignment_date"""'}), "(model_name='assignment', old_name='assingment_date',\n new_name='assignment_date')\n", (294, 379), False, 'from django.db import migrations, models\n'), ((776, 840), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '(0.0)', 'max_digits': '(5)'}), '(decimal_places=2, default=0.0, max_digits=5)\n', (795, 840), False, 'from django.db import migrations, models\n'), ((613, 644), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (629, 644), False, 'from django.db import migrations, models\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AppliedLicenseResponse',
'CloneJobResponse',
'ComputeEngineTargetDefaultsResponse',
'ComputeEngineTargetDetailsResponse',
'ComputeSchedulingResponse',
'CutoverJobResponse',
'NetworkInterfaceResponse',
'ReplicationCycleResponse',
'ReplicationSyncResponse',
'SchedulePolicyResponse',
'SchedulingNodeAffinityResponse',
'StatusResponse',
'VmUtilizationInfoResponse',
'VmUtilizationMetricsResponse',
'VmwareSourceDetailsResponse',
'VmwareVmDetailsResponse',
]
@pulumi.output_type
class AppliedLicenseResponse(dict):
"""
AppliedLicense holds the license data returned by adaptation module report.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "osLicense":
suggest = "os_license"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AppliedLicenseResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AppliedLicenseResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AppliedLicenseResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
os_license: str,
type: str):
"""
AppliedLicense holds the license data returned by adaptation module report.
:param str os_license: The OS license returned from the adaptation module's report.
:param str type: The license type that was used in OS adaptation.
"""
pulumi.set(__self__, "os_license", os_license)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="osLicense")
def os_license(self) -> str:
"""
The OS license returned from the adaptation module's report.
"""
return pulumi.get(self, "os_license")
@property
@pulumi.getter
def type(self) -> str:
"""
The license type that was used in OS adaptation.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class CloneJobResponse(dict):
"""
CloneJob describes the process of creating a clone of a MigratingVM to the requested target based on the latest successful uploaded snapshots. While the migration cycles of a MigratingVm take place, it is possible to verify the uploaded VM can be started in the cloud, by creating a clone. The clone can be created without any downtime, and it is created using the latest snapshots which are already in the cloud. The cloneJob is only responsible for its work, not its products, which means once it is finished, it will never touch the instance it created. It will only delete it in case of the CloneJob being cancelled or upon failure to clone.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeEngineTargetDetails":
suggest = "compute_engine_target_details"
elif key == "createTime":
suggest = "create_time"
elif key == "stateTime":
suggest = "state_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CloneJobResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CloneJobResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CloneJobResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_engine_target_details: 'outputs.ComputeEngineTargetDetailsResponse',
create_time: str,
error: 'outputs.StatusResponse',
name: str,
state: str,
state_time: str):
"""
CloneJob describes the process of creating a clone of a MigratingVM to the requested target based on the latest successful uploaded snapshots. While the migration cycles of a MigratingVm take place, it is possible to verify the uploaded VM can be started in the cloud, by creating a clone. The clone can be created without any downtime, and it is created using the latest snapshots which are already in the cloud. The cloneJob is only responsible for its work, not its products, which means once it is finished, it will never touch the instance it created. It will only delete it in case of the CloneJob being cancelled or upon failure to clone.
:param 'ComputeEngineTargetDetailsResponse' compute_engine_target_details: Details of the target VM in Compute Engine.
:param str create_time: The time the clone job was created (as an API call, not when it was actually created in the target).
:param 'StatusResponse' error: Provides details for the errors that led to the Clone Job's state.
:param str name: The name of the clone.
:param str state: State of the clone job.
:param str state_time: The time the state was last updated.
"""
pulumi.set(__self__, "compute_engine_target_details", compute_engine_target_details)
pulumi.set(__self__, "create_time", create_time)
pulumi.set(__self__, "error", error)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "state_time", state_time)
@property
@pulumi.getter(name="computeEngineTargetDetails")
def compute_engine_target_details(self) -> 'outputs.ComputeEngineTargetDetailsResponse':
"""
Details of the target VM in Compute Engine.
"""
return pulumi.get(self, "compute_engine_target_details")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time the clone job was created (as an API call, not when it was actually created in the target).
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def error(self) -> 'outputs.StatusResponse':
"""
Provides details for the errors that led to the Clone Job's state.
"""
return pulumi.get(self, "error")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the clone.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
State of the clone job.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateTime")
def state_time(self) -> str:
"""
The time the state was last updated.
"""
return pulumi.get(self, "state_time")
@pulumi.output_type
class ComputeEngineTargetDefaultsResponse(dict):
"""
ComputeEngineTargetDefaults is a collection of details for creating a VM in a target Compute Engine project.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalLicenses":
suggest = "additional_licenses"
elif key == "appliedLicense":
suggest = "applied_license"
elif key == "bootOption":
suggest = "boot_option"
elif key == "computeScheduling":
suggest = "compute_scheduling"
elif key == "diskType":
suggest = "disk_type"
elif key == "licenseType":
suggest = "license_type"
elif key == "machineType":
suggest = "machine_type"
elif key == "machineTypeSeries":
suggest = "machine_type_series"
elif key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "networkTags":
suggest = "network_tags"
elif key == "secureBoot":
suggest = "secure_boot"
elif key == "serviceAccount":
suggest = "service_account"
elif key == "targetProject":
suggest = "target_project"
elif key == "vmName":
suggest = "vm_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ComputeEngineTargetDefaultsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ComputeEngineTargetDefaultsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ComputeEngineTargetDefaultsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_licenses: Sequence[str],
applied_license: 'outputs.AppliedLicenseResponse',
boot_option: str,
compute_scheduling: 'outputs.ComputeSchedulingResponse',
disk_type: str,
labels: Mapping[str, str],
license_type: str,
machine_type: str,
machine_type_series: str,
metadata: Mapping[str, str],
network_interfaces: Sequence['outputs.NetworkInterfaceResponse'],
network_tags: Sequence[str],
secure_boot: bool,
service_account: str,
target_project: str,
vm_name: str,
zone: str):
"""
ComputeEngineTargetDefaults is a collection of details for creating a VM in a target Compute Engine project.
:param Sequence[str] additional_licenses: Additional licenses to assign to the VM.
:param 'AppliedLicenseResponse' applied_license: The OS license returned from the adaptation module report.
:param str boot_option: The VM Boot Option, as set in the source vm.
:param 'ComputeSchedulingResponse' compute_scheduling: Compute instance scheduling information (if empty default is used).
:param str disk_type: The disk type to use in the VM.
:param Mapping[str, str] labels: A map of labels to associate with the VM.
:param str license_type: The license type to use in OS adaptation.
:param str machine_type: The machine type to create the VM with.
:param str machine_type_series: The machine type series to create the VM with.
:param Mapping[str, str] metadata: The metadata key/value pairs to assign to the VM.
:param Sequence['NetworkInterfaceResponse'] network_interfaces: List of NICs connected to this VM.
:param Sequence[str] network_tags: A map of network tags to associate with the VM.
:param bool secure_boot: Defines whether the instance has Secure Boot enabled. This can be set to true only if the vm boot option is EFI.
:param str service_account: The service account to associate the VM with.
:param str target_project: The full path of the resource of type TargetProject which represents the Compute Engine project in which to create this VM.
:param str vm_name: The name of the VM to create.
:param str zone: The zone in which to create the VM.
"""
pulumi.set(__self__, "additional_licenses", additional_licenses)
pulumi.set(__self__, "applied_license", applied_license)
pulumi.set(__self__, "boot_option", boot_option)
pulumi.set(__self__, "compute_scheduling", compute_scheduling)
pulumi.set(__self__, "disk_type", disk_type)
pulumi.set(__self__, "labels", labels)
pulumi.set(__self__, "license_type", license_type)
pulumi.set(__self__, "machine_type", machine_type)
pulumi.set(__self__, "machine_type_series", machine_type_series)
pulumi.set(__self__, "metadata", metadata)
pulumi.set(__self__, "network_interfaces", network_interfaces)
pulumi.set(__self__, "network_tags", network_tags)
pulumi.set(__self__, "secure_boot", secure_boot)
pulumi.set(__self__, "service_account", service_account)
pulumi.set(__self__, "target_project", target_project)
pulumi.set(__self__, "vm_name", vm_name)
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="additionalLicenses")
def additional_licenses(self) -> Sequence[str]:
"""
Additional licenses to assign to the VM.
"""
return pulumi.get(self, "additional_licenses")
@property
@pulumi.getter(name="appliedLicense")
def applied_license(self) -> 'outputs.AppliedLicenseResponse':
"""
The OS license returned from the adaptation module report.
"""
return pulumi.get(self, "applied_license")
@property
@pulumi.getter(name="bootOption")
def boot_option(self) -> str:
"""
The VM Boot Option, as set in the source vm.
"""
return pulumi.get(self, "boot_option")
@property
@pulumi.getter(name="computeScheduling")
def compute_scheduling(self) -> 'outputs.ComputeSchedulingResponse':
"""
Compute instance scheduling information (if empty default is used).
"""
return pulumi.get(self, "compute_scheduling")
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> str:
"""
The disk type to use in the VM.
"""
return pulumi.get(self, "disk_type")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
A map of labels to associate with the VM.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> str:
"""
The license type to use in OS adaptation.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter(name="machineType")
def machine_type(self) -> str:
"""
The machine type to create the VM with.
"""
return pulumi.get(self, "machine_type")
@property
@pulumi.getter(name="machineTypeSeries")
def machine_type_series(self) -> str:
"""
The machine type series to create the VM with.
"""
return pulumi.get(self, "machine_type_series")
@property
@pulumi.getter
def metadata(self) -> Mapping[str, str]:
"""
The metadata key/value pairs to assign to the VM.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
List of NICs connected to this VM.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="networkTags")
def network_tags(self) -> Sequence[str]:
"""
A map of network tags to associate with the VM.
"""
return pulumi.get(self, "network_tags")
@property
@pulumi.getter(name="secureBoot")
def secure_boot(self) -> bool:
"""
Defines whether the instance has Secure Boot enabled. This can be set to true only if the vm boot option is EFI.
"""
return pulumi.get(self, "secure_boot")
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> str:
"""
The service account to associate the VM with.
"""
return pulumi.get(self, "service_account")
@property
@pulumi.getter(name="targetProject")
def target_project(self) -> str:
"""
The full path of the resource of type TargetProject which represents the Compute Engine project in which to create this VM.
"""
return pulumi.get(self, "target_project")
@property
@pulumi.getter(name="vmName")
def vm_name(self) -> str:
"""
The name of the VM to create.
"""
return pulumi.get(self, "vm_name")
@property
@pulumi.getter
def zone(self) -> str:
"""
The zone in which to create the VM.
"""
return pulumi.get(self, "zone")
@pulumi.output_type
class ComputeEngineTargetDetailsResponse(dict):
"""
ComputeEngineTargetDetails is a collection of details for creating a VM in a target Compute Engine project.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalLicenses":
suggest = "additional_licenses"
elif key == "appliedLicense":
suggest = "applied_license"
elif key == "bootOption":
suggest = "boot_option"
elif key == "computeScheduling":
suggest = "compute_scheduling"
elif key == "diskType":
suggest = "disk_type"
elif key == "licenseType":
suggest = "license_type"
elif key == "machineType":
suggest = "machine_type"
elif key == "machineTypeSeries":
suggest = "machine_type_series"
elif key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "networkTags":
suggest = "network_tags"
elif key == "secureBoot":
suggest = "secure_boot"
elif key == "serviceAccount":
suggest = "service_account"
elif key == "vmName":
suggest = "vm_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ComputeEngineTargetDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ComputeEngineTargetDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ComputeEngineTargetDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_licenses: Sequence[str],
applied_license: 'outputs.AppliedLicenseResponse',
boot_option: str,
compute_scheduling: 'outputs.ComputeSchedulingResponse',
disk_type: str,
labels: Mapping[str, str],
license_type: str,
machine_type: str,
machine_type_series: str,
metadata: Mapping[str, str],
network_interfaces: Sequence['outputs.NetworkInterfaceResponse'],
network_tags: Sequence[str],
project: str,
secure_boot: bool,
service_account: str,
vm_name: str,
zone: str):
"""
ComputeEngineTargetDetails is a collection of details for creating a VM in a target Compute Engine project.
:param Sequence[str] additional_licenses: Additional licenses to assign to the VM.
:param 'AppliedLicenseResponse' applied_license: The OS license returned from the adaptation module report.
:param str boot_option: The VM Boot Option, as set in the source vm.
:param 'ComputeSchedulingResponse' compute_scheduling: Compute instance scheduling information (if empty default is used).
:param str disk_type: The disk type to use in the VM.
:param Mapping[str, str] labels: A map of labels to associate with the VM.
:param str license_type: The license type to use in OS adaptation.
:param str machine_type: The machine type to create the VM with.
:param str machine_type_series: The machine type series to create the VM with.
:param Mapping[str, str] metadata: The metadata key/value pairs to assign to the VM.
:param Sequence['NetworkInterfaceResponse'] network_interfaces: List of NICs connected to this VM.
:param Sequence[str] network_tags: A map of network tags to associate with the VM.
:param str project: The GCP target project ID or project name.
:param bool secure_boot: Defines whether the instance has Secure Boot enabled. This can be set to true only if the vm boot option is EFI.
:param str service_account: The service account to associate the VM with.
:param str vm_name: The name of the VM to create.
:param str zone: The zone in which to create the VM.
"""
pulumi.set(__self__, "additional_licenses", additional_licenses)
pulumi.set(__self__, "applied_license", applied_license)
pulumi.set(__self__, "boot_option", boot_option)
pulumi.set(__self__, "compute_scheduling", compute_scheduling)
pulumi.set(__self__, "disk_type", disk_type)
pulumi.set(__self__, "labels", labels)
pulumi.set(__self__, "license_type", license_type)
pulumi.set(__self__, "machine_type", machine_type)
pulumi.set(__self__, "machine_type_series", machine_type_series)
pulumi.set(__self__, "metadata", metadata)
pulumi.set(__self__, "network_interfaces", network_interfaces)
pulumi.set(__self__, "network_tags", network_tags)
pulumi.set(__self__, "project", project)
pulumi.set(__self__, "secure_boot", secure_boot)
pulumi.set(__self__, "service_account", service_account)
pulumi.set(__self__, "vm_name", vm_name)
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="additionalLicenses")
def additional_licenses(self) -> Sequence[str]:
"""
Additional licenses to assign to the VM.
"""
return pulumi.get(self, "additional_licenses")
@property
@pulumi.getter(name="appliedLicense")
def applied_license(self) -> 'outputs.AppliedLicenseResponse':
"""
The OS license returned from the adaptation module report.
"""
return pulumi.get(self, "applied_license")
@property
@pulumi.getter(name="bootOption")
def boot_option(self) -> str:
"""
The VM Boot Option, as set in the source vm.
"""
return pulumi.get(self, "boot_option")
@property
@pulumi.getter(name="computeScheduling")
def compute_scheduling(self) -> 'outputs.ComputeSchedulingResponse':
"""
Compute instance scheduling information (if empty default is used).
"""
return pulumi.get(self, "compute_scheduling")
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> str:
"""
The disk type to use in the VM.
"""
return pulumi.get(self, "disk_type")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
A map of labels to associate with the VM.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> str:
"""
The license type to use in OS adaptation.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter(name="machineType")
def machine_type(self) -> str:
"""
The machine type to create the VM with.
"""
return pulumi.get(self, "machine_type")
@property
@pulumi.getter(name="machineTypeSeries")
def machine_type_series(self) -> str:
"""
The machine type series to create the VM with.
"""
return pulumi.get(self, "machine_type_series")
@property
@pulumi.getter
def metadata(self) -> Mapping[str, str]:
"""
The metadata key/value pairs to assign to the VM.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
List of NICs connected to this VM.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="networkTags")
def network_tags(self) -> Sequence[str]:
"""
A map of network tags to associate with the VM.
"""
return pulumi.get(self, "network_tags")
@property
@pulumi.getter
def project(self) -> str:
"""
The GCP target project ID or project name.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="secureBoot")
def secure_boot(self) -> bool:
"""
Defines whether the instance has Secure Boot enabled. This can be set to true only if the vm boot option is EFI.
"""
return pulumi.get(self, "secure_boot")
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> str:
"""
The service account to associate the VM with.
"""
return pulumi.get(self, "service_account")
@property
@pulumi.getter(name="vmName")
def vm_name(self) -> str:
"""
The name of the VM to create.
"""
return pulumi.get(self, "vm_name")
@property
@pulumi.getter
def zone(self) -> str:
"""
The zone in which to create the VM.
"""
return pulumi.get(self, "zone")
@pulumi.output_type
class ComputeSchedulingResponse(dict):
"""
Scheduling information for VM on maintenance/restart behaviour and node allocation in sole tenant nodes.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "automaticRestart":
suggest = "automatic_restart"
elif key == "minNodeCpus":
suggest = "min_node_cpus"
elif key == "nodeAffinities":
suggest = "node_affinities"
elif key == "onHostMaintenance":
suggest = "on_host_maintenance"
elif key == "restartType":
suggest = "restart_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ComputeSchedulingResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ComputeSchedulingResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ComputeSchedulingResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
automatic_restart: bool,
min_node_cpus: int,
node_affinities: Sequence['outputs.SchedulingNodeAffinityResponse'],
on_host_maintenance: str,
restart_type: str):
"""
Scheduling information for VM on maintenance/restart behaviour and node allocation in sole tenant nodes.
:param int min_node_cpus: The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. Ignored if no node_affinites are configured.
:param Sequence['SchedulingNodeAffinityResponse'] node_affinities: A set of node affinity and anti-affinity configurations for sole tenant nodes.
:param str on_host_maintenance: How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance.
:param str restart_type: Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart.
"""
pulumi.set(__self__, "automatic_restart", automatic_restart)
pulumi.set(__self__, "min_node_cpus", min_node_cpus)
pulumi.set(__self__, "node_affinities", node_affinities)
pulumi.set(__self__, "on_host_maintenance", on_host_maintenance)
pulumi.set(__self__, "restart_type", restart_type)
@property
@pulumi.getter(name="automaticRestart")
def automatic_restart(self) -> bool:
return pulumi.get(self, "automatic_restart")
@property
@pulumi.getter(name="minNodeCpus")
def min_node_cpus(self) -> int:
"""
The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. Ignored if no node_affinites are configured.
"""
return pulumi.get(self, "min_node_cpus")
@property
@pulumi.getter(name="nodeAffinities")
def node_affinities(self) -> Sequence['outputs.SchedulingNodeAffinityResponse']:
"""
A set of node affinity and anti-affinity configurations for sole tenant nodes.
"""
return pulumi.get(self, "node_affinities")
@property
@pulumi.getter(name="onHostMaintenance")
def on_host_maintenance(self) -> str:
"""
How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance.
"""
return pulumi.get(self, "on_host_maintenance")
@property
@pulumi.getter(name="restartType")
def restart_type(self) -> str:
"""
Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart.
"""
return pulumi.get(self, "restart_type")
@pulumi.output_type
class CutoverJobResponse(dict):
"""
CutoverJob message describes a cutover of a migrating VM. The CutoverJob is the operation of shutting down the VM, creating a snapshot and clonning the VM using the replicated snapshot.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeEngineTargetDetails":
suggest = "compute_engine_target_details"
elif key == "createTime":
suggest = "create_time"
elif key == "progressPercent":
suggest = "progress_percent"
elif key == "stateMessage":
suggest = "state_message"
elif key == "stateTime":
suggest = "state_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CutoverJobResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CutoverJobResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CutoverJobResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_engine_target_details: 'outputs.ComputeEngineTargetDetailsResponse',
create_time: str,
error: 'outputs.StatusResponse',
name: str,
progress: int,
progress_percent: int,
state: str,
state_message: str,
state_time: str):
"""
CutoverJob message describes a cutover of a migrating VM. The CutoverJob is the operation of shutting down the VM, creating a snapshot and clonning the VM using the replicated snapshot.
:param 'ComputeEngineTargetDetailsResponse' compute_engine_target_details: Details of the target VM in Compute Engine.
:param str create_time: The time the cutover job was created (as an API call, not when it was actually created in the target).
:param 'StatusResponse' error: Provides details for the errors that led to the Cutover Job's state.
:param str name: The name of the cutover job.
:param int progress: The current progress in percentage of the cutover job.
:param int progress_percent: The current progress in percentage of the cutover job.
:param str state: State of the cutover job.
:param str state_message: A message providing possible extra details about the current state.
:param str state_time: The time the state was last updated.
"""
pulumi.set(__self__, "compute_engine_target_details", compute_engine_target_details)
pulumi.set(__self__, "create_time", create_time)
pulumi.set(__self__, "error", error)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "progress", progress)
pulumi.set(__self__, "progress_percent", progress_percent)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "state_message", state_message)
pulumi.set(__self__, "state_time", state_time)
@property
@pulumi.getter(name="computeEngineTargetDetails")
def compute_engine_target_details(self) -> 'outputs.ComputeEngineTargetDetailsResponse':
"""
Details of the target VM in Compute Engine.
"""
return pulumi.get(self, "compute_engine_target_details")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time the cutover job was created (as an API call, not when it was actually created in the target).
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def error(self) -> 'outputs.StatusResponse':
"""
Provides details for the errors that led to the Cutover Job's state.
"""
return pulumi.get(self, "error")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the cutover job.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def progress(self) -> int:
"""
The current progress in percentage of the cutover job.
"""
return pulumi.get(self, "progress")
@property
@pulumi.getter(name="progressPercent")
def progress_percent(self) -> int:
"""
The current progress in percentage of the cutover job.
"""
return pulumi.get(self, "progress_percent")
@property
@pulumi.getter
def state(self) -> str:
"""
State of the cutover job.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateMessage")
def state_message(self) -> str:
"""
A message providing possible extra details about the current state.
"""
return pulumi.get(self, "state_message")
@property
@pulumi.getter(name="stateTime")
def state_time(self) -> str:
"""
The time the state was last updated.
"""
return pulumi.get(self, "state_time")
@pulumi.output_type
class NetworkInterfaceResponse(dict):
"""
NetworkInterface represents a NIC of a VM.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "externalIp":
suggest = "external_ip"
elif key == "internalIp":
suggest = "internal_ip"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
external_ip: str,
internal_ip: str,
network: str,
subnetwork: str):
"""
NetworkInterface represents a NIC of a VM.
:param str external_ip: The external IP to define in the NIC.
:param str internal_ip: The internal IP to define in the NIC. The formats accepted are: `ephemeral` \ ipv4 address \ a named address resource full path.
:param str network: The network to connect the NIC to.
:param str subnetwork: The subnetwork to connect the NIC to.
"""
pulumi.set(__self__, "external_ip", external_ip)
pulumi.set(__self__, "internal_ip", internal_ip)
pulumi.set(__self__, "network", network)
pulumi.set(__self__, "subnetwork", subnetwork)
@property
@pulumi.getter(name="externalIp")
def external_ip(self) -> str:
"""
The external IP to define in the NIC.
"""
return pulumi.get(self, "external_ip")
@property
@pulumi.getter(name="internalIp")
def internal_ip(self) -> str:
"""
The internal IP to define in the NIC. The formats accepted are: `ephemeral` \ ipv4 address \ a named address resource full path.
"""
return pulumi.get(self, "internal_ip")
@property
@pulumi.getter
def network(self) -> str:
"""
The network to connect the NIC to.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter
def subnetwork(self) -> str:
"""
The subnetwork to connect the NIC to.
"""
return pulumi.get(self, "subnetwork")
@pulumi.output_type
class ReplicationCycleResponse(dict):
"""
ReplicationCycle contains information about the current replication cycle status.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "progressPercent":
suggest = "progress_percent"
elif key == "startTime":
suggest = "start_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ReplicationCycleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ReplicationCycleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ReplicationCycleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
progress: int,
progress_percent: int,
start_time: str):
"""
ReplicationCycle contains information about the current replication cycle status.
:param int progress: The current progress in percentage of this cycle.
:param int progress_percent: The current progress in percentage of this cycle.
:param str start_time: The time the replication cycle has started.
"""
pulumi.set(__self__, "progress", progress)
pulumi.set(__self__, "progress_percent", progress_percent)
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter
def progress(self) -> int:
"""
The current progress in percentage of this cycle.
"""
return pulumi.get(self, "progress")
@property
@pulumi.getter(name="progressPercent")
def progress_percent(self) -> int:
"""
The current progress in percentage of this cycle.
"""
return pulumi.get(self, "progress_percent")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> str:
"""
The time the replication cycle has started.
"""
return pulumi.get(self, "start_time")
@pulumi.output_type
class ReplicationSyncResponse(dict):
"""
ReplicationSync contain information about the last replica sync to the cloud.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lastSyncTime":
suggest = "last_sync_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ReplicationSyncResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ReplicationSyncResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ReplicationSyncResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
last_sync_time: str):
"""
ReplicationSync contain information about the last replica sync to the cloud.
:param str last_sync_time: The most updated snapshot created time in the source that finished replication.
"""
pulumi.set(__self__, "last_sync_time", last_sync_time)
@property
@pulumi.getter(name="lastSyncTime")
def last_sync_time(self) -> str:
"""
The most updated snapshot created time in the source that finished replication.
"""
return pulumi.get(self, "last_sync_time")
@pulumi.output_type
class SchedulePolicyResponse(dict):
"""
A policy for scheduling replications.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "idleDuration":
suggest = "idle_duration"
elif key == "skipOsAdaptation":
suggest = "skip_os_adaptation"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SchedulePolicyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SchedulePolicyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SchedulePolicyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
idle_duration: str,
skip_os_adaptation: bool):
"""
A policy for scheduling replications.
:param str idle_duration: The idle duration between replication stages.
:param bool skip_os_adaptation: A flag to indicate whether to skip OS adaptation during the replication sync. OS adaptation is a process where the VM's operating system undergoes changes and adaptations to fully function on Compute Engine.
"""
pulumi.set(__self__, "idle_duration", idle_duration)
pulumi.set(__self__, "skip_os_adaptation", skip_os_adaptation)
@property
@pulumi.getter(name="idleDuration")
def idle_duration(self) -> str:
"""
The idle duration between replication stages.
"""
return pulumi.get(self, "idle_duration")
@property
@pulumi.getter(name="skipOsAdaptation")
def skip_os_adaptation(self) -> bool:
"""
A flag to indicate whether to skip OS adaptation during the replication sync. OS adaptation is a process where the VM's operating system undergoes changes and adaptations to fully function on Compute Engine.
"""
return pulumi.get(self, "skip_os_adaptation")
@pulumi.output_type
class SchedulingNodeAffinityResponse(dict):
"""
Node Affinity: the configuration of desired nodes onto which this Instance could be scheduled. Based on https://cloud.google.com/compute/docs/reference/rest/v1/instances/setScheduling
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Sequence[str]):
"""
Node Affinity: the configuration of desired nodes onto which this Instance could be scheduled. Based on https://cloud.google.com/compute/docs/reference/rest/v1/instances/setScheduling
:param str key: The label key of Node resource to reference.
:param str operator: The operator to use for the node resources specified in the `values` parameter.
:param Sequence[str] values: Corresponds to the label values of Node resource.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key of Node resource to reference.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
The operator to use for the node resources specified in the `values` parameter.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Corresponds to the label values of Node resource.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class StatusResponse(dict):
"""
The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
"""
def __init__(__self__, *,
code: int,
details: Sequence[Mapping[str, str]],
message: str):
"""
The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
:param int code: The status code, which should be an enum value of google.rpc.Code.
:param Sequence[Mapping[str, str]] details: A list of messages that carry the error details. There is a common set of message types for APIs to use.
:param str message: A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "details", details)
pulumi.set(__self__, "message", message)
@property
@pulumi.getter
def code(self) -> int:
"""
The status code, which should be an enum value of google.rpc.Code.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def details(self) -> Sequence[Mapping[str, str]]:
"""
A list of messages that carry the error details. There is a common set of message types for APIs to use.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter
def message(self) -> str:
"""
A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
"""
return pulumi.get(self, "message")
@pulumi.output_type
class VmUtilizationInfoResponse(dict):
"""
Utilization information of a single VM.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "vmId":
suggest = "vm_id"
elif key == "vmwareVmDetails":
suggest = "vmware_vm_details"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VmUtilizationInfoResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VmUtilizationInfoResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VmUtilizationInfoResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
utilization: 'outputs.VmUtilizationMetricsResponse',
vm_id: str,
vmware_vm_details: 'outputs.VmwareVmDetailsResponse'):
"""
Utilization information of a single VM.
:param 'VmUtilizationMetricsResponse' utilization: Utilization metrics for this VM.
:param str vm_id: The VM's ID in the source.
:param 'VmwareVmDetailsResponse' vmware_vm_details: The description of the VM in a Source of type Vmware.
"""
pulumi.set(__self__, "utilization", utilization)
pulumi.set(__self__, "vm_id", vm_id)
pulumi.set(__self__, "vmware_vm_details", vmware_vm_details)
@property
@pulumi.getter
def utilization(self) -> 'outputs.VmUtilizationMetricsResponse':
"""
Utilization metrics for this VM.
"""
return pulumi.get(self, "utilization")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> str:
"""
The VM's ID in the source.
"""
return pulumi.get(self, "vm_id")
@property
@pulumi.getter(name="vmwareVmDetails")
def vmware_vm_details(self) -> 'outputs.VmwareVmDetailsResponse':
"""
The description of the VM in a Source of type Vmware.
"""
return pulumi.get(self, "vmware_vm_details")
@pulumi.output_type
class VmUtilizationMetricsResponse(dict):
"""
Utilization metrics values for a single VM.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cpuAverage":
suggest = "cpu_average"
elif key == "cpuAveragePercent":
suggest = "cpu_average_percent"
elif key == "cpuMax":
suggest = "cpu_max"
elif key == "cpuMaxPercent":
suggest = "cpu_max_percent"
elif key == "diskIoRateAverage":
suggest = "disk_io_rate_average"
elif key == "diskIoRateAverageKbps":
suggest = "disk_io_rate_average_kbps"
elif key == "diskIoRateMax":
suggest = "disk_io_rate_max"
elif key == "diskIoRateMaxKbps":
suggest = "disk_io_rate_max_kbps"
elif key == "memoryAverage":
suggest = "memory_average"
elif key == "memoryAveragePercent":
suggest = "memory_average_percent"
elif key == "memoryMax":
suggest = "memory_max"
elif key == "memoryMaxPercent":
suggest = "memory_max_percent"
elif key == "networkThroughputAverage":
suggest = "network_throughput_average"
elif key == "networkThroughputAverageKbps":
suggest = "network_throughput_average_kbps"
elif key == "networkThroughputMax":
suggest = "network_throughput_max"
elif key == "networkThroughputMaxKbps":
suggest = "network_throughput_max_kbps"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VmUtilizationMetricsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VmUtilizationMetricsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VmUtilizationMetricsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cpu_average: int,
cpu_average_percent: int,
cpu_max: int,
cpu_max_percent: int,
disk_io_rate_average: str,
disk_io_rate_average_kbps: str,
disk_io_rate_max: str,
disk_io_rate_max_kbps: str,
memory_average: int,
memory_average_percent: int,
memory_max: int,
memory_max_percent: int,
network_throughput_average: str,
network_throughput_average_kbps: str,
network_throughput_max: str,
network_throughput_max_kbps: str):
"""
Utilization metrics values for a single VM.
:param int cpu_average: Average CPU usage, percent.
:param int cpu_average_percent: Average CPU usage, percent.
:param int cpu_max: Max CPU usage, percent.
:param int cpu_max_percent: Max CPU usage, percent.
:param str disk_io_rate_average: Average disk IO rate, in kilobytes per second.
:param str disk_io_rate_average_kbps: Average disk IO rate, in kilobytes per second.
:param str disk_io_rate_max: Max disk IO rate, in kilobytes per second.
:param str disk_io_rate_max_kbps: Max disk IO rate, in kilobytes per second.
:param int memory_average: Average memory usage, percent.
:param int memory_average_percent: Average memory usage, percent.
:param int memory_max: Max memory usage, percent.
:param int memory_max_percent: Max memory usage, percent.
:param str network_throughput_average: Average network throughput (combined transmit-rates and receive-rates), in kilobytes per second.
:param str network_throughput_average_kbps: Average network throughput (combined transmit-rates and receive-rates), in kilobytes per second.
:param str network_throughput_max: Max network throughput (combined transmit-rates and receive-rates), in kilobytes per second.
:param str network_throughput_max_kbps: Max network throughput (combined transmit-rates and receive-rates), in kilobytes per second.
"""
pulumi.set(__self__, "cpu_average", cpu_average)
pulumi.set(__self__, "cpu_average_percent", cpu_average_percent)
pulumi.set(__self__, "cpu_max", cpu_max)
pulumi.set(__self__, "cpu_max_percent", cpu_max_percent)
pulumi.set(__self__, "disk_io_rate_average", disk_io_rate_average)
pulumi.set(__self__, "disk_io_rate_average_kbps", disk_io_rate_average_kbps)
pulumi.set(__self__, "disk_io_rate_max", disk_io_rate_max)
pulumi.set(__self__, "disk_io_rate_max_kbps", disk_io_rate_max_kbps)
pulumi.set(__self__, "memory_average", memory_average)
pulumi.set(__self__, "memory_average_percent", memory_average_percent)
pulumi.set(__self__, "memory_max", memory_max)
pulumi.set(__self__, "memory_max_percent", memory_max_percent)
pulumi.set(__self__, "network_throughput_average", network_throughput_average)
pulumi.set(__self__, "network_throughput_average_kbps", network_throughput_average_kbps)
pulumi.set(__self__, "network_throughput_max", network_throughput_max)
pulumi.set(__self__, "network_throughput_max_kbps", network_throughput_max_kbps)
@property
@pulumi.getter(name="cpuAverage")
def cpu_average(self) -> int:
"""
Average CPU usage, percent.
"""
return pulumi.get(self, "cpu_average")
@property
@pulumi.getter(name="cpuAveragePercent")
def cpu_average_percent(self) -> int:
"""
Average CPU usage, percent.
"""
return pulumi.get(self, "cpu_average_percent")
@property
@pulumi.getter(name="cpuMax")
def cpu_max(self) -> int:
"""
Max CPU usage, percent.
"""
return pulumi.get(self, "cpu_max")
@property
@pulumi.getter(name="cpuMaxPercent")
def cpu_max_percent(self) -> int:
"""
Max CPU usage, percent.
"""
return pulumi.get(self, "cpu_max_percent")
@property
@pulumi.getter(name="diskIoRateAverage")
def disk_io_rate_average(self) -> str:
"""
Average disk IO rate, in kilobytes per second.
"""
return pulumi.get(self, "disk_io_rate_average")
@property
@pulumi.getter(name="diskIoRateAverageKbps")
def disk_io_rate_average_kbps(self) -> str:
"""
Average disk IO rate, in kilobytes per second.
"""
return pulumi.get(self, "disk_io_rate_average_kbps")
@property
@pulumi.getter(name="diskIoRateMax")
def disk_io_rate_max(self) -> str:
"""
Max disk IO rate, in kilobytes per second.
"""
return pulumi.get(self, "disk_io_rate_max")
@property
@pulumi.getter(name="diskIoRateMaxKbps")
def disk_io_rate_max_kbps(self) -> str:
"""
Max disk IO rate, in kilobytes per second.
"""
return pulumi.get(self, "disk_io_rate_max_kbps")
@property
@pulumi.getter(name="memoryAverage")
def memory_average(self) -> int:
"""
Average memory usage, percent.
"""
return pulumi.get(self, "memory_average")
@property
@pulumi.getter(name="memoryAveragePercent")
def memory_average_percent(self) -> int:
"""
Average memory usage, percent.
"""
return pulumi.get(self, "memory_average_percent")
@property
@pulumi.getter(name="memoryMax")
def memory_max(self) -> int:
"""
Max memory usage, percent.
"""
return pulumi.get(self, "memory_max")
@property
@pulumi.getter(name="memoryMaxPercent")
def memory_max_percent(self) -> int:
"""
Max memory usage, percent.
"""
return pulumi.get(self, "memory_max_percent")
@property
@pulumi.getter(name="networkThroughputAverage")
def network_throughput_average(self) -> str:
"""
Average network throughput (combined transmit-rates and receive-rates), in kilobytes per second.
"""
return pulumi.get(self, "network_throughput_average")
@property
@pulumi.getter(name="networkThroughputAverageKbps")
def network_throughput_average_kbps(self) -> str:
"""
Average network throughput (combined transmit-rates and receive-rates), in kilobytes per second.
"""
return pulumi.get(self, "network_throughput_average_kbps")
@property
@pulumi.getter(name="networkThroughputMax")
def network_throughput_max(self) -> str:
"""
Max network throughput (combined transmit-rates and receive-rates), in kilobytes per second.
"""
return pulumi.get(self, "network_throughput_max")
@property
@pulumi.getter(name="networkThroughputMaxKbps")
def network_throughput_max_kbps(self) -> str:
"""
Max network throughput (combined transmit-rates and receive-rates), in kilobytes per second.
"""
return pulumi.get(self, "network_throughput_max_kbps")
@pulumi.output_type
class VmwareSourceDetailsResponse(dict):
"""
VmwareSourceDetails message describes a specific source details for the vmware source type.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "vcenterIp":
suggest = "vcenter_ip"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VmwareSourceDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VmwareSourceDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VmwareSourceDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
password: str,
thumbprint: str,
username: str,
vcenter_ip: str):
"""
VmwareSourceDetails message describes a specific source details for the vmware source type.
:param str password: Input only. The credentials password. This is write only and can not be read in a GET operation.
:param str thumbprint: The thumbprint representing the certificate for the vcenter.
:param str username: The credentials username.
:param str vcenter_ip: The ip address of the vcenter this Source represents.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "thumbprint", thumbprint)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "vcenter_ip", vcenter_ip)
@property
@pulumi.getter
def password(self) -> str:
"""
Input only. The credentials password. This is write only and can not be read in a GET operation.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def thumbprint(self) -> str:
"""
The thumbprint representing the certificate for the vcenter.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def username(self) -> str:
"""
The credentials username.
"""
return pulumi.get(self, "username")
@property
@pulumi.getter(name="vcenterIp")
def vcenter_ip(self) -> str:
"""
The ip address of the vcenter this Source represents.
"""
return pulumi.get(self, "vcenter_ip")
@pulumi.output_type
class VmwareVmDetailsResponse(dict):
"""
VmwareVmDetails describes a VM in vCenter.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bootOption":
suggest = "boot_option"
elif key == "committedStorage":
suggest = "committed_storage"
elif key == "committedStorageMb":
suggest = "committed_storage_mb"
elif key == "cpuCount":
suggest = "cpu_count"
elif key == "datacenterDescription":
suggest = "datacenter_description"
elif key == "datacenterId":
suggest = "datacenter_id"
elif key == "diskCount":
suggest = "disk_count"
elif key == "displayName":
suggest = "display_name"
elif key == "guestDescription":
suggest = "guest_description"
elif key == "memoryMb":
suggest = "memory_mb"
elif key == "powerState":
suggest = "power_state"
elif key == "vmId":
suggest = "vm_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VmwareVmDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VmwareVmDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VmwareVmDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
boot_option: str,
committed_storage: str,
committed_storage_mb: str,
cpu_count: int,
datacenter_description: str,
datacenter_id: str,
disk_count: int,
display_name: str,
guest_description: str,
memory_mb: int,
power_state: str,
uuid: str,
vm_id: str):
"""
VmwareVmDetails describes a VM in vCenter.
:param str boot_option: The VM Boot Option.
:param str committed_storage: The total size of the storage allocated to the VM in MB.
:param str committed_storage_mb: The total size of the storage allocated to the VM in MB.
:param int cpu_count: The number of cpus in the VM.
:param str datacenter_description: The descriptive name of the vCenter's datacenter this VM is contained in.
:param str datacenter_id: The id of the vCenter's datacenter this VM is contained in.
:param int disk_count: The number of disks the VM has.
:param str display_name: The display name of the VM. Note that this is not necessarily unique.
:param str guest_description: The VM's OS. See for example https://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html for types of strings this might hold.
:param int memory_mb: The size of the memory of the VM in MB.
:param str power_state: The power state of the VM at the moment list was taken.
:param str uuid: The unique identifier of the VM in vCenter.
:param str vm_id: The VM's id in the source (note that this is not the MigratingVm's id). This is the moref id of the VM.
"""
pulumi.set(__self__, "boot_option", boot_option)
pulumi.set(__self__, "committed_storage", committed_storage)
pulumi.set(__self__, "committed_storage_mb", committed_storage_mb)
pulumi.set(__self__, "cpu_count", cpu_count)
pulumi.set(__self__, "datacenter_description", datacenter_description)
pulumi.set(__self__, "datacenter_id", datacenter_id)
pulumi.set(__self__, "disk_count", disk_count)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "guest_description", guest_description)
pulumi.set(__self__, "memory_mb", memory_mb)
pulumi.set(__self__, "power_state", power_state)
pulumi.set(__self__, "uuid", uuid)
pulumi.set(__self__, "vm_id", vm_id)
@property
@pulumi.getter(name="bootOption")
def boot_option(self) -> str:
"""
The VM Boot Option.
"""
return pulumi.get(self, "boot_option")
@property
@pulumi.getter(name="committedStorage")
def committed_storage(self) -> str:
"""
The total size of the storage allocated to the VM in MB.
"""
return pulumi.get(self, "committed_storage")
@property
@pulumi.getter(name="committedStorageMb")
def committed_storage_mb(self) -> str:
"""
The total size of the storage allocated to the VM in MB.
"""
return pulumi.get(self, "committed_storage_mb")
@property
@pulumi.getter(name="cpuCount")
def cpu_count(self) -> int:
"""
The number of cpus in the VM.
"""
return pulumi.get(self, "cpu_count")
@property
@pulumi.getter(name="datacenterDescription")
def datacenter_description(self) -> str:
"""
The descriptive name of the vCenter's datacenter this VM is contained in.
"""
return pulumi.get(self, "datacenter_description")
@property
@pulumi.getter(name="datacenterId")
def datacenter_id(self) -> str:
"""
The id of the vCenter's datacenter this VM is contained in.
"""
return pulumi.get(self, "datacenter_id")
@property
@pulumi.getter(name="diskCount")
def disk_count(self) -> int:
"""
The number of disks the VM has.
"""
return pulumi.get(self, "disk_count")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The display name of the VM. Note that this is not necessarily unique.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="guestDescription")
def guest_description(self) -> str:
"""
The VM's OS. See for example https://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html for types of strings this might hold.
"""
return pulumi.get(self, "guest_description")
@property
@pulumi.getter(name="memoryMb")
def memory_mb(self) -> int:
"""
The size of the memory of the VM in MB.
"""
return pulumi.get(self, "memory_mb")
@property
@pulumi.getter(name="powerState")
def power_state(self) -> str:
"""
The power state of the VM at the moment list was taken.
"""
return pulumi.get(self, "power_state")
@property
@pulumi.getter
def uuid(self) -> str:
"""
The unique identifier of the VM in vCenter.
"""
return pulumi.get(self, "uuid")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> str:
"""
The VM's id in the source (note that this is not the MigratingVm's id). This is the moref id of the VM.
"""
return pulumi.get(self, "vm_id")
| [
"pulumi.getter",
"pulumi.log.warn",
"pulumi.set",
"pulumi.get"
] | [((2116, 2147), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""osLicense"""'}), "(name='osLicense')\n", (2129, 2147), False, 'import pulumi\n'), ((5829, 5877), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""computeEngineTargetDetails"""'}), "(name='computeEngineTargetDetails')\n", (5842, 5877), False, 'import pulumi\n'), ((6132, 6164), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""createTime"""'}), "(name='createTime')\n", (6145, 6164), False, 'import pulumi\n'), ((6937, 6968), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""stateTime"""'}), "(name='stateTime')\n", (6950, 6968), False, 'import pulumi\n'), ((12495, 12535), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""additionalLicenses"""'}), "(name='additionalLicenses')\n", (12508, 12535), False, 'import pulumi\n'), ((12736, 12772), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""appliedLicense"""'}), "(name='appliedLicense')\n", (12749, 12772), False, 'import pulumi\n'), ((13002, 13034), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""bootOption"""'}), "(name='bootOption')\n", (13015, 13034), False, 'import pulumi\n'), ((13213, 13252), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""computeScheduling"""'}), "(name='computeScheduling')\n", (13226, 13252), False, 'import pulumi\n'), ((13500, 13530), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""diskType"""'}), "(name='diskType')\n", (13513, 13530), False, 'import pulumi\n'), ((13885, 13918), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""licenseType"""'}), "(name='licenseType')\n", (13898, 13918), False, 'import pulumi\n'), ((14096, 14129), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""machineType"""'}), "(name='machineType')\n", (14109, 14129), False, 'import pulumi\n'), ((14305, 14344), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""machineTypeSeries"""'}), "(name='machineTypeSeries')\n", (14318, 14344), False, 'import pulumi\n'), ((14746, 14785), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkInterfaces"""'}), "(name='networkInterfaces')\n", (14759, 14785), False, 'import pulumi\n'), ((15009, 15042), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkTags"""'}), "(name='networkTags')\n", (15022, 15042), False, 'import pulumi\n'), ((15236, 15268), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""secureBoot"""'}), "(name='secureBoot')\n", (15249, 15268), False, 'import pulumi\n'), ((15516, 15552), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""serviceAccount"""'}), "(name='serviceAccount')\n", (15529, 15552), False, 'import pulumi\n'), ((15740, 15775), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""targetProject"""'}), "(name='targetProject')\n", (15753, 15775), False, 'import pulumi\n'), ((16039, 16067), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""vmName"""'}), "(name='vmName')\n", (16052, 16067), False, 'import pulumi\n'), ((21559, 21599), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""additionalLicenses"""'}), "(name='additionalLicenses')\n", (21572, 21599), False, 'import pulumi\n'), ((21800, 21836), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""appliedLicense"""'}), "(name='appliedLicense')\n", (21813, 21836), False, 'import pulumi\n'), ((22066, 22098), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""bootOption"""'}), "(name='bootOption')\n", (22079, 22098), False, 'import pulumi\n'), ((22277, 22316), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""computeScheduling"""'}), "(name='computeScheduling')\n", (22290, 22316), False, 'import pulumi\n'), ((22564, 22594), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""diskType"""'}), "(name='diskType')\n", (22577, 22594), False, 'import pulumi\n'), ((22949, 22982), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""licenseType"""'}), "(name='licenseType')\n", (22962, 22982), False, 'import pulumi\n'), ((23160, 23193), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""machineType"""'}), "(name='machineType')\n", (23173, 23193), False, 'import pulumi\n'), ((23369, 23408), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""machineTypeSeries"""'}), "(name='machineTypeSeries')\n", (23382, 23408), False, 'import pulumi\n'), ((23810, 23849), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkInterfaces"""'}), "(name='networkInterfaces')\n", (23823, 23849), False, 'import pulumi\n'), ((24073, 24106), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkTags"""'}), "(name='networkTags')\n", (24086, 24106), False, 'import pulumi\n'), ((24482, 24514), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""secureBoot"""'}), "(name='secureBoot')\n", (24495, 24514), False, 'import pulumi\n'), ((24762, 24798), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""serviceAccount"""'}), "(name='serviceAccount')\n", (24775, 24798), False, 'import pulumi\n'), ((24986, 25014), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""vmName"""'}), "(name='vmName')\n", (24999, 25014), False, 'import pulumi\n'), ((28067, 28105), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""automaticRestart"""'}), "(name='automaticRestart')\n", (28080, 28105), False, 'import pulumi\n'), ((28220, 28253), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""minNodeCpus"""'}), "(name='minNodeCpus')\n", (28233, 28253), False, 'import pulumi\n'), ((28534, 28570), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""nodeAffinities"""'}), "(name='nodeAffinities')\n", (28547, 28570), False, 'import pulumi\n'), ((28838, 28877), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""onHostMaintenance"""'}), "(name='onHostMaintenance')\n", (28851, 28877), False, 'import pulumi\n'), ((29152, 29185), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""restartType"""'}), "(name='restartType')\n", (29165, 29185), False, 'import pulumi\n'), ((32807, 32855), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""computeEngineTargetDetails"""'}), "(name='computeEngineTargetDetails')\n", (32820, 32855), False, 'import pulumi\n'), ((33110, 33142), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""createTime"""'}), "(name='createTime')\n", (33123, 33142), False, 'import pulumi\n'), ((33962, 33999), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""progressPercent"""'}), "(name='progressPercent')\n", (33975, 33999), False, 'import pulumi\n'), ((34359, 34393), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""stateMessage"""'}), "(name='stateMessage')\n", (34372, 34393), False, 'import pulumi\n'), ((34599, 34630), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""stateTime"""'}), "(name='stateTime')\n", (34612, 34630), False, 'import pulumi\n'), ((36406, 36438), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""externalIp"""'}), "(name='externalIp')\n", (36419, 36438), False, 'import pulumi\n'), ((36610, 36642), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""internalIp"""'}), "(name='internalIp')\n", (36623, 36642), False, 'import pulumi\n'), ((38950, 38987), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""progressPercent"""'}), "(name='progressPercent')\n", (38963, 38987), False, 'import pulumi\n'), ((39181, 39212), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""startTime"""'}), "(name='startTime')\n", (39194, 39212), False, 'import pulumi\n'), ((40496, 40530), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""lastSyncTime"""'}), "(name='lastSyncTime')\n", (40509, 40530), False, 'import pulumi\n'), ((42180, 42214), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""idleDuration"""'}), "(name='idleDuration')\n", (42193, 42214), False, 'import pulumi\n'), ((42398, 42436), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""skipOsAdaptation"""'}), "(name='skipOsAdaptation')\n", (42411, 42436), False, 'import pulumi\n'), ((48612, 48638), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""vmId"""'}), "(name='vmId')\n", (48625, 48638), False, 'import pulumi\n'), ((48787, 48824), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""vmwareVmDetails"""'}), "(name='vmwareVmDetails')\n", (48800, 48824), False, 'import pulumi\n'), ((54455, 54487), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cpuAverage"""'}), "(name='cpuAverage')\n", (54468, 54487), False, 'import pulumi\n'), ((54649, 54688), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cpuAveragePercent"""'}), "(name='cpuAveragePercent')\n", (54662, 54688), False, 'import pulumi\n'), ((54866, 54894), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cpuMax"""'}), "(name='cpuMax')\n", (54879, 54894), False, 'import pulumi\n'), ((55044, 55079), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cpuMaxPercent"""'}), "(name='cpuMaxPercent')\n", (55057, 55079), False, 'import pulumi\n'), ((55245, 55284), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""diskIoRateAverage"""'}), "(name='diskIoRateAverage')\n", (55258, 55284), False, 'import pulumi\n'), ((55483, 55526), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""diskIoRateAverageKbps"""'}), "(name='diskIoRateAverageKbps')\n", (55496, 55526), False, 'import pulumi\n'), ((55735, 55770), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""diskIoRateMax"""'}), "(name='diskIoRateMax')\n", (55748, 55770), False, 'import pulumi\n'), ((55957, 55996), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""diskIoRateMaxKbps"""'}), "(name='diskIoRateMaxKbps')\n", (55970, 55996), False, 'import pulumi\n'), ((56193, 56228), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""memoryAverage"""'}), "(name='memoryAverage')\n", (56206, 56228), False, 'import pulumi\n'), ((56399, 56441), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""memoryAveragePercent"""'}), "(name='memoryAveragePercent')\n", (56412, 56441), False, 'import pulumi\n'), ((56628, 56659), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""memoryMax"""'}), "(name='memoryMax')\n", (56641, 56659), False, 'import pulumi\n'), ((56818, 56856), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""memoryMaxPercent"""'}), "(name='memoryMaxPercent')\n", (56831, 56856), False, 'import pulumi\n'), ((57031, 57077), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkThroughputAverage"""'}), "(name='networkThroughputAverage')\n", (57044, 57077), False, 'import pulumi\n'), ((57338, 57388), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkThroughputAverageKbps"""'}), "(name='networkThroughputAverageKbps')\n", (57351, 57388), False, 'import pulumi\n'), ((57659, 57701), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkThroughputMax"""'}), "(name='networkThroughputMax')\n", (57672, 57701), False, 'import pulumi\n'), ((57950, 57996), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkThroughputMaxKbps"""'}), "(name='networkThroughputMaxKbps')\n", (57963, 57996), False, 'import pulumi\n'), ((60497, 60528), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""vcenterIp"""'}), "(name='vcenterIp')\n", (60510, 60528), False, 'import pulumi\n'), ((64842, 64874), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""bootOption"""'}), "(name='bootOption')\n", (64855, 64874), False, 'import pulumi\n'), ((65028, 65066), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""committedStorage"""'}), "(name='committedStorage')\n", (65041, 65066), False, 'import pulumi\n'), ((65269, 65309), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""committedStorageMb"""'}), "(name='committedStorageMb')\n", (65282, 65309), False, 'import pulumi\n'), ((65518, 65548), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cpuCount"""'}), "(name='cpuCount')\n", (65531, 65548), False, 'import pulumi\n'), ((65708, 65751), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""datacenterDescription"""'}), "(name='datacenterDescription')\n", (65721, 65751), False, 'import pulumi\n'), ((65981, 66015), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""datacenterId"""'}), "(name='datacenterId')\n", (65994, 66015), False, 'import pulumi\n'), ((66213, 66244), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""diskCount"""'}), "(name='diskCount')\n", (66226, 66244), False, 'import pulumi\n'), ((66408, 66441), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""displayName"""'}), "(name='displayName')\n", (66421, 66441), False, 'import pulumi\n'), ((66647, 66685), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""guestDescription"""'}), "(name='guestDescription')\n", (66660, 66685), False, 'import pulumi\n'), ((67001, 67031), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""memoryMb"""'}), "(name='memoryMb')\n", (67014, 67031), False, 'import pulumi\n'), ((67201, 67233), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""powerState"""'}), "(name='powerState')\n", (67214, 67233), False, 'import pulumi\n'), ((67600, 67626), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""vmId"""'}), "(name='vmId')\n", (67613, 67626), False, 'import pulumi\n'), ((2006, 2052), 'pulumi.set', 'pulumi.set', (['__self__', '"""os_license"""', 'os_license'], {}), "(__self__, 'os_license', os_license)\n", (2016, 2052), False, 'import pulumi\n'), ((2061, 2095), 'pulumi.set', 'pulumi.set', (['__self__', '"""type"""', 'type'], {}), "(__self__, 'type', type)\n", (2071, 2095), False, 'import pulumi\n'), ((2289, 2319), 'pulumi.get', 'pulumi.get', (['self', '"""os_license"""'], {}), "(self, 'os_license')\n", (2299, 2319), False, 'import pulumi\n'), ((2477, 2501), 'pulumi.get', 'pulumi.get', (['self', '"""type"""'], {}), "(self, 'type')\n", (2487, 2501), False, 'import pulumi\n'), ((5479, 5567), 'pulumi.set', 'pulumi.set', (['__self__', '"""compute_engine_target_details"""', 'compute_engine_target_details'], {}), "(__self__, 'compute_engine_target_details',\n compute_engine_target_details)\n", (5489, 5567), False, 'import pulumi\n'), ((5572, 5620), 'pulumi.set', 'pulumi.set', (['__self__', '"""create_time"""', 'create_time'], {}), "(__self__, 'create_time', create_time)\n", (5582, 5620), False, 'import pulumi\n'), ((5629, 5665), 'pulumi.set', 'pulumi.set', (['__self__', '"""error"""', 'error'], {}), "(__self__, 'error', error)\n", (5639, 5665), False, 'import pulumi\n'), ((5674, 5708), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (5684, 5708), False, 'import pulumi\n'), ((5717, 5753), 'pulumi.set', 'pulumi.set', (['__self__', '"""state"""', 'state'], {}), "(__self__, 'state', state)\n", (5727, 5753), False, 'import pulumi\n'), ((5762, 5808), 'pulumi.set', 'pulumi.set', (['__self__', '"""state_time"""', 'state_time'], {}), "(__self__, 'state_time', state_time)\n", (5772, 5808), False, 'import pulumi\n'), ((6062, 6111), 'pulumi.get', 'pulumi.get', (['self', '"""compute_engine_target_details"""'], {}), "(self, 'compute_engine_target_details')\n", (6072, 6111), False, 'import pulumi\n'), ((6347, 6378), 'pulumi.get', 'pulumi.get', (['self', '"""create_time"""'], {}), "(self, 'create_time')\n", (6357, 6378), False, 'import pulumi\n'), ((6576, 6601), 'pulumi.get', 'pulumi.get', (['self', '"""error"""'], {}), "(self, 'error')\n", (6586, 6601), False, 'import pulumi\n'), ((6733, 6757), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (6743, 6757), False, 'import pulumi\n'), ((6891, 6916), 'pulumi.get', 'pulumi.get', (['self', '"""state"""'], {}), "(self, 'state')\n", (6901, 6916), False, 'import pulumi\n'), ((7086, 7116), 'pulumi.get', 'pulumi.get', (['self', '"""state_time"""'], {}), "(self, 'state_time')\n", (7096, 7116), False, 'import pulumi\n'), ((11468, 11532), 'pulumi.set', 'pulumi.set', (['__self__', '"""additional_licenses"""', 'additional_licenses'], {}), "(__self__, 'additional_licenses', additional_licenses)\n", (11478, 11532), False, 'import pulumi\n'), ((11541, 11597), 'pulumi.set', 'pulumi.set', (['__self__', '"""applied_license"""', 'applied_license'], {}), "(__self__, 'applied_license', applied_license)\n", (11551, 11597), False, 'import pulumi\n'), ((11606, 11654), 'pulumi.set', 'pulumi.set', (['__self__', '"""boot_option"""', 'boot_option'], {}), "(__self__, 'boot_option', boot_option)\n", (11616, 11654), False, 'import pulumi\n'), ((11663, 11725), 'pulumi.set', 'pulumi.set', (['__self__', '"""compute_scheduling"""', 'compute_scheduling'], {}), "(__self__, 'compute_scheduling', compute_scheduling)\n", (11673, 11725), False, 'import pulumi\n'), ((11734, 11778), 'pulumi.set', 'pulumi.set', (['__self__', '"""disk_type"""', 'disk_type'], {}), "(__self__, 'disk_type', disk_type)\n", (11744, 11778), False, 'import pulumi\n'), ((11787, 11825), 'pulumi.set', 'pulumi.set', (['__self__', '"""labels"""', 'labels'], {}), "(__self__, 'labels', labels)\n", (11797, 11825), False, 'import pulumi\n'), ((11834, 11884), 'pulumi.set', 'pulumi.set', (['__self__', '"""license_type"""', 'license_type'], {}), "(__self__, 'license_type', license_type)\n", (11844, 11884), False, 'import pulumi\n'), ((11893, 11943), 'pulumi.set', 'pulumi.set', (['__self__', '"""machine_type"""', 'machine_type'], {}), "(__self__, 'machine_type', machine_type)\n", (11903, 11943), False, 'import pulumi\n'), ((11952, 12016), 'pulumi.set', 'pulumi.set', (['__self__', '"""machine_type_series"""', 'machine_type_series'], {}), "(__self__, 'machine_type_series', machine_type_series)\n", (11962, 12016), False, 'import pulumi\n'), ((12025, 12067), 'pulumi.set', 'pulumi.set', (['__self__', '"""metadata"""', 'metadata'], {}), "(__self__, 'metadata', metadata)\n", (12035, 12067), False, 'import pulumi\n'), ((12076, 12138), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_interfaces"""', 'network_interfaces'], {}), "(__self__, 'network_interfaces', network_interfaces)\n", (12086, 12138), False, 'import pulumi\n'), ((12147, 12197), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_tags"""', 'network_tags'], {}), "(__self__, 'network_tags', network_tags)\n", (12157, 12197), False, 'import pulumi\n'), ((12206, 12254), 'pulumi.set', 'pulumi.set', (['__self__', '"""secure_boot"""', 'secure_boot'], {}), "(__self__, 'secure_boot', secure_boot)\n", (12216, 12254), False, 'import pulumi\n'), ((12263, 12319), 'pulumi.set', 'pulumi.set', (['__self__', '"""service_account"""', 'service_account'], {}), "(__self__, 'service_account', service_account)\n", (12273, 12319), False, 'import pulumi\n'), ((12328, 12382), 'pulumi.set', 'pulumi.set', (['__self__', '"""target_project"""', 'target_project'], {}), "(__self__, 'target_project', target_project)\n", (12338, 12382), False, 'import pulumi\n'), ((12391, 12431), 'pulumi.set', 'pulumi.set', (['__self__', '"""vm_name"""', 'vm_name'], {}), "(__self__, 'vm_name', vm_name)\n", (12401, 12431), False, 'import pulumi\n'), ((12440, 12474), 'pulumi.set', 'pulumi.set', (['__self__', '"""zone"""', 'zone'], {}), "(__self__, 'zone', zone)\n", (12450, 12474), False, 'import pulumi\n'), ((12676, 12715), 'pulumi.get', 'pulumi.get', (['self', '"""additional_licenses"""'], {}), "(self, 'additional_licenses')\n", (12686, 12715), False, 'import pulumi\n'), ((12946, 12981), 'pulumi.get', 'pulumi.get', (['self', '"""applied_license"""'], {}), "(self, 'applied_license')\n", (12956, 12981), False, 'import pulumi\n'), ((13161, 13192), 'pulumi.get', 'pulumi.get', (['self', '"""boot_option"""'], {}), "(self, 'boot_option')\n", (13171, 13192), False, 'import pulumi\n'), ((13441, 13479), 'pulumi.get', 'pulumi.get', (['self', '"""compute_scheduling"""'], {}), "(self, 'compute_scheduling')\n", (13451, 13479), False, 'import pulumi\n'), ((13642, 13671), 'pulumi.get', 'pulumi.get', (['self', '"""disk_type"""'], {}), "(self, 'disk_type')\n", (13652, 13671), False, 'import pulumi\n'), ((13838, 13864), 'pulumi.get', 'pulumi.get', (['self', '"""labels"""'], {}), "(self, 'labels')\n", (13848, 13864), False, 'import pulumi\n'), ((14043, 14075), 'pulumi.get', 'pulumi.get', (['self', '"""license_type"""'], {}), "(self, 'license_type')\n", (14053, 14075), False, 'import pulumi\n'), ((14252, 14284), 'pulumi.get', 'pulumi.get', (['self', '"""machine_type"""'], {}), "(self, 'machine_type')\n", (14262, 14284), False, 'import pulumi\n'), ((14481, 14520), 'pulumi.get', 'pulumi.get', (['self', '"""machine_type_series"""'], {}), "(self, 'machine_type_series')\n", (14491, 14520), False, 'import pulumi\n'), ((14697, 14725), 'pulumi.get', 'pulumi.get', (['self', '"""metadata"""'], {}), "(self, 'metadata')\n", (14707, 14725), False, 'import pulumi\n'), ((14950, 14988), 'pulumi.get', 'pulumi.get', (['self', '"""network_interfaces"""'], {}), "(self, 'network_interfaces')\n", (14960, 14988), False, 'import pulumi\n'), ((15183, 15215), 'pulumi.get', 'pulumi.get', (['self', '"""network_tags"""'], {}), "(self, 'network_tags')\n", (15193, 15215), False, 'import pulumi\n'), ((15464, 15495), 'pulumi.get', 'pulumi.get', (['self', '"""secure_boot"""'], {}), "(self, 'secure_boot')\n", (15474, 15495), False, 'import pulumi\n'), ((15684, 15719), 'pulumi.get', 'pulumi.get', (['self', '"""service_account"""'], {}), "(self, 'service_account')\n", (15694, 15719), False, 'import pulumi\n'), ((15984, 16018), 'pulumi.get', 'pulumi.get', (['self', '"""target_project"""'], {}), "(self, 'target_project')\n", (15994, 16018), False, 'import pulumi\n'), ((16175, 16202), 'pulumi.get', 'pulumi.get', (['self', '"""vm_name"""'], {}), "(self, 'vm_name')\n", (16185, 16202), False, 'import pulumi\n'), ((16347, 16371), 'pulumi.get', 'pulumi.get', (['self', '"""zone"""'], {}), "(self, 'zone')\n", (16357, 16371), False, 'import pulumi\n'), ((20546, 20610), 'pulumi.set', 'pulumi.set', (['__self__', '"""additional_licenses"""', 'additional_licenses'], {}), "(__self__, 'additional_licenses', additional_licenses)\n", (20556, 20610), False, 'import pulumi\n'), ((20619, 20675), 'pulumi.set', 'pulumi.set', (['__self__', '"""applied_license"""', 'applied_license'], {}), "(__self__, 'applied_license', applied_license)\n", (20629, 20675), False, 'import pulumi\n'), ((20684, 20732), 'pulumi.set', 'pulumi.set', (['__self__', '"""boot_option"""', 'boot_option'], {}), "(__self__, 'boot_option', boot_option)\n", (20694, 20732), False, 'import pulumi\n'), ((20741, 20803), 'pulumi.set', 'pulumi.set', (['__self__', '"""compute_scheduling"""', 'compute_scheduling'], {}), "(__self__, 'compute_scheduling', compute_scheduling)\n", (20751, 20803), False, 'import pulumi\n'), ((20812, 20856), 'pulumi.set', 'pulumi.set', (['__self__', '"""disk_type"""', 'disk_type'], {}), "(__self__, 'disk_type', disk_type)\n", (20822, 20856), False, 'import pulumi\n'), ((20865, 20903), 'pulumi.set', 'pulumi.set', (['__self__', '"""labels"""', 'labels'], {}), "(__self__, 'labels', labels)\n", (20875, 20903), False, 'import pulumi\n'), ((20912, 20962), 'pulumi.set', 'pulumi.set', (['__self__', '"""license_type"""', 'license_type'], {}), "(__self__, 'license_type', license_type)\n", (20922, 20962), False, 'import pulumi\n'), ((20971, 21021), 'pulumi.set', 'pulumi.set', (['__self__', '"""machine_type"""', 'machine_type'], {}), "(__self__, 'machine_type', machine_type)\n", (20981, 21021), False, 'import pulumi\n'), ((21030, 21094), 'pulumi.set', 'pulumi.set', (['__self__', '"""machine_type_series"""', 'machine_type_series'], {}), "(__self__, 'machine_type_series', machine_type_series)\n", (21040, 21094), False, 'import pulumi\n'), ((21103, 21145), 'pulumi.set', 'pulumi.set', (['__self__', '"""metadata"""', 'metadata'], {}), "(__self__, 'metadata', metadata)\n", (21113, 21145), False, 'import pulumi\n'), ((21154, 21216), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_interfaces"""', 'network_interfaces'], {}), "(__self__, 'network_interfaces', network_interfaces)\n", (21164, 21216), False, 'import pulumi\n'), ((21225, 21275), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_tags"""', 'network_tags'], {}), "(__self__, 'network_tags', network_tags)\n", (21235, 21275), False, 'import pulumi\n'), ((21284, 21324), 'pulumi.set', 'pulumi.set', (['__self__', '"""project"""', 'project'], {}), "(__self__, 'project', project)\n", (21294, 21324), False, 'import pulumi\n'), ((21333, 21381), 'pulumi.set', 'pulumi.set', (['__self__', '"""secure_boot"""', 'secure_boot'], {}), "(__self__, 'secure_boot', secure_boot)\n", (21343, 21381), False, 'import pulumi\n'), ((21390, 21446), 'pulumi.set', 'pulumi.set', (['__self__', '"""service_account"""', 'service_account'], {}), "(__self__, 'service_account', service_account)\n", (21400, 21446), False, 'import pulumi\n'), ((21455, 21495), 'pulumi.set', 'pulumi.set', (['__self__', '"""vm_name"""', 'vm_name'], {}), "(__self__, 'vm_name', vm_name)\n", (21465, 21495), False, 'import pulumi\n'), ((21504, 21538), 'pulumi.set', 'pulumi.set', (['__self__', '"""zone"""', 'zone'], {}), "(__self__, 'zone', zone)\n", (21514, 21538), False, 'import pulumi\n'), ((21740, 21779), 'pulumi.get', 'pulumi.get', (['self', '"""additional_licenses"""'], {}), "(self, 'additional_licenses')\n", (21750, 21779), False, 'import pulumi\n'), ((22010, 22045), 'pulumi.get', 'pulumi.get', (['self', '"""applied_license"""'], {}), "(self, 'applied_license')\n", (22020, 22045), False, 'import pulumi\n'), ((22225, 22256), 'pulumi.get', 'pulumi.get', (['self', '"""boot_option"""'], {}), "(self, 'boot_option')\n", (22235, 22256), False, 'import pulumi\n'), ((22505, 22543), 'pulumi.get', 'pulumi.get', (['self', '"""compute_scheduling"""'], {}), "(self, 'compute_scheduling')\n", (22515, 22543), False, 'import pulumi\n'), ((22706, 22735), 'pulumi.get', 'pulumi.get', (['self', '"""disk_type"""'], {}), "(self, 'disk_type')\n", (22716, 22735), False, 'import pulumi\n'), ((22902, 22928), 'pulumi.get', 'pulumi.get', (['self', '"""labels"""'], {}), "(self, 'labels')\n", (22912, 22928), False, 'import pulumi\n'), ((23107, 23139), 'pulumi.get', 'pulumi.get', (['self', '"""license_type"""'], {}), "(self, 'license_type')\n", (23117, 23139), False, 'import pulumi\n'), ((23316, 23348), 'pulumi.get', 'pulumi.get', (['self', '"""machine_type"""'], {}), "(self, 'machine_type')\n", (23326, 23348), False, 'import pulumi\n'), ((23545, 23584), 'pulumi.get', 'pulumi.get', (['self', '"""machine_type_series"""'], {}), "(self, 'machine_type_series')\n", (23555, 23584), False, 'import pulumi\n'), ((23761, 23789), 'pulumi.get', 'pulumi.get', (['self', '"""metadata"""'], {}), "(self, 'metadata')\n", (23771, 23789), False, 'import pulumi\n'), ((24014, 24052), 'pulumi.get', 'pulumi.get', (['self', '"""network_interfaces"""'], {}), "(self, 'network_interfaces')\n", (24024, 24052), False, 'import pulumi\n'), ((24247, 24279), 'pulumi.get', 'pulumi.get', (['self', '"""network_tags"""'], {}), "(self, 'network_tags')\n", (24257, 24279), False, 'import pulumi\n'), ((24434, 24461), 'pulumi.get', 'pulumi.get', (['self', '"""project"""'], {}), "(self, 'project')\n", (24444, 24461), False, 'import pulumi\n'), ((24710, 24741), 'pulumi.get', 'pulumi.get', (['self', '"""secure_boot"""'], {}), "(self, 'secure_boot')\n", (24720, 24741), False, 'import pulumi\n'), ((24930, 24965), 'pulumi.get', 'pulumi.get', (['self', '"""service_account"""'], {}), "(self, 'service_account')\n", (24940, 24965), False, 'import pulumi\n'), ((25122, 25149), 'pulumi.get', 'pulumi.get', (['self', '"""vm_name"""'], {}), "(self, 'vm_name')\n", (25132, 25149), False, 'import pulumi\n'), ((25294, 25318), 'pulumi.get', 'pulumi.get', (['self', '"""zone"""'], {}), "(self, 'zone')\n", (25304, 25318), False, 'import pulumi\n'), ((27728, 27788), 'pulumi.set', 'pulumi.set', (['__self__', '"""automatic_restart"""', 'automatic_restart'], {}), "(__self__, 'automatic_restart', automatic_restart)\n", (27738, 27788), False, 'import pulumi\n'), ((27797, 27849), 'pulumi.set', 'pulumi.set', (['__self__', '"""min_node_cpus"""', 'min_node_cpus'], {}), "(__self__, 'min_node_cpus', min_node_cpus)\n", (27807, 27849), False, 'import pulumi\n'), ((27858, 27914), 'pulumi.set', 'pulumi.set', (['__self__', '"""node_affinities"""', 'node_affinities'], {}), "(__self__, 'node_affinities', node_affinities)\n", (27868, 27914), False, 'import pulumi\n'), ((27923, 27987), 'pulumi.set', 'pulumi.set', (['__self__', '"""on_host_maintenance"""', 'on_host_maintenance'], {}), "(__self__, 'on_host_maintenance', on_host_maintenance)\n", (27933, 27987), False, 'import pulumi\n'), ((27996, 28046), 'pulumi.set', 'pulumi.set', (['__self__', '"""restart_type"""', 'restart_type'], {}), "(__self__, 'restart_type', restart_type)\n", (28006, 28046), False, 'import pulumi\n'), ((28162, 28199), 'pulumi.get', 'pulumi.get', (['self', '"""automatic_restart"""'], {}), "(self, 'automatic_restart')\n", (28172, 28199), False, 'import pulumi\n'), ((28480, 28513), 'pulumi.get', 'pulumi.get', (['self', '"""min_node_cpus"""'], {}), "(self, 'min_node_cpus')\n", (28490, 28513), False, 'import pulumi\n'), ((28782, 28817), 'pulumi.get', 'pulumi.get', (['self', '"""node_affinities"""'], {}), "(self, 'node_affinities')\n", (28792, 28817), False, 'import pulumi\n'), ((29092, 29131), 'pulumi.get', 'pulumi.get', (['self', '"""on_host_maintenance"""'], {}), "(self, 'on_host_maintenance')\n", (29102, 29131), False, 'import pulumi\n'), ((29627, 29659), 'pulumi.get', 'pulumi.get', (['self', '"""restart_type"""'], {}), "(self, 'restart_type')\n", (29637, 29659), False, 'import pulumi\n'), ((32278, 32366), 'pulumi.set', 'pulumi.set', (['__self__', '"""compute_engine_target_details"""', 'compute_engine_target_details'], {}), "(__self__, 'compute_engine_target_details',\n compute_engine_target_details)\n", (32288, 32366), False, 'import pulumi\n'), ((32371, 32419), 'pulumi.set', 'pulumi.set', (['__self__', '"""create_time"""', 'create_time'], {}), "(__self__, 'create_time', create_time)\n", (32381, 32419), False, 'import pulumi\n'), ((32428, 32464), 'pulumi.set', 'pulumi.set', (['__self__', '"""error"""', 'error'], {}), "(__self__, 'error', error)\n", (32438, 32464), False, 'import pulumi\n'), ((32473, 32507), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (32483, 32507), False, 'import pulumi\n'), ((32516, 32558), 'pulumi.set', 'pulumi.set', (['__self__', '"""progress"""', 'progress'], {}), "(__self__, 'progress', progress)\n", (32526, 32558), False, 'import pulumi\n'), ((32567, 32625), 'pulumi.set', 'pulumi.set', (['__self__', '"""progress_percent"""', 'progress_percent'], {}), "(__self__, 'progress_percent', progress_percent)\n", (32577, 32625), False, 'import pulumi\n'), ((32634, 32670), 'pulumi.set', 'pulumi.set', (['__self__', '"""state"""', 'state'], {}), "(__self__, 'state', state)\n", (32644, 32670), False, 'import pulumi\n'), ((32679, 32731), 'pulumi.set', 'pulumi.set', (['__self__', '"""state_message"""', 'state_message'], {}), "(__self__, 'state_message', state_message)\n", (32689, 32731), False, 'import pulumi\n'), ((32740, 32786), 'pulumi.set', 'pulumi.set', (['__self__', '"""state_time"""', 'state_time'], {}), "(__self__, 'state_time', state_time)\n", (32750, 32786), False, 'import pulumi\n'), ((33040, 33089), 'pulumi.get', 'pulumi.get', (['self', '"""compute_engine_target_details"""'], {}), "(self, 'compute_engine_target_details')\n", (33050, 33089), False, 'import pulumi\n'), ((33327, 33358), 'pulumi.get', 'pulumi.get', (['self', '"""create_time"""'], {}), "(self, 'create_time')\n", (33337, 33358), False, 'import pulumi\n'), ((33558, 33583), 'pulumi.get', 'pulumi.get', (['self', '"""error"""'], {}), "(self, 'error')\n", (33568, 33583), False, 'import pulumi\n'), ((33721, 33745), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (33731, 33745), False, 'import pulumi\n'), ((33913, 33941), 'pulumi.get', 'pulumi.get', (['self', '"""progress"""'], {}), "(self, 'progress')\n", (33923, 33941), False, 'import pulumi\n'), ((34141, 34177), 'pulumi.get', 'pulumi.get', (['self', '"""progress_percent"""'], {}), "(self, 'progress_percent')\n", (34151, 34177), False, 'import pulumi\n'), ((34313, 34338), 'pulumi.get', 'pulumi.get', (['self', '"""state"""'], {}), "(self, 'state')\n", (34323, 34338), False, 'import pulumi\n'), ((34545, 34578), 'pulumi.get', 'pulumi.get', (['self', '"""state_message"""'], {}), "(self, 'state_message')\n", (34555, 34578), False, 'import pulumi\n'), ((34748, 34778), 'pulumi.get', 'pulumi.get', (['self', '"""state_time"""'], {}), "(self, 'state_time')\n", (34758, 34778), False, 'import pulumi\n'), ((36176, 36224), 'pulumi.set', 'pulumi.set', (['__self__', '"""external_ip"""', 'external_ip'], {}), "(__self__, 'external_ip', external_ip)\n", (36186, 36224), False, 'import pulumi\n'), ((36233, 36281), 'pulumi.set', 'pulumi.set', (['__self__', '"""internal_ip"""', 'internal_ip'], {}), "(__self__, 'internal_ip', internal_ip)\n", (36243, 36281), False, 'import pulumi\n'), ((36290, 36330), 'pulumi.set', 'pulumi.set', (['__self__', '"""network"""', 'network'], {}), "(__self__, 'network', network)\n", (36300, 36330), False, 'import pulumi\n'), ((36339, 36385), 'pulumi.set', 'pulumi.set', (['__self__', '"""subnetwork"""', 'subnetwork'], {}), "(__self__, 'subnetwork', subnetwork)\n", (36349, 36385), False, 'import pulumi\n'), ((36558, 36589), 'pulumi.get', 'pulumi.get', (['self', '"""external_ip"""'], {}), "(self, 'external_ip')\n", (36568, 36589), False, 'import pulumi\n'), ((36853, 36884), 'pulumi.get', 'pulumi.get', (['self', '"""internal_ip"""'], {}), "(self, 'internal_ip')\n", (36863, 36884), False, 'import pulumi\n'), ((37031, 37058), 'pulumi.get', 'pulumi.get', (['self', '"""network"""'], {}), "(self, 'network')\n", (37041, 37058), False, 'import pulumi\n'), ((37211, 37241), 'pulumi.get', 'pulumi.get', (['self', '"""subnetwork"""'], {}), "(self, 'subnetwork')\n", (37221, 37241), False, 'import pulumi\n'), ((38574, 38616), 'pulumi.set', 'pulumi.set', (['__self__', '"""progress"""', 'progress'], {}), "(__self__, 'progress', progress)\n", (38584, 38616), False, 'import pulumi\n'), ((38625, 38683), 'pulumi.set', 'pulumi.set', (['__self__', '"""progress_percent"""', 'progress_percent'], {}), "(__self__, 'progress_percent', progress_percent)\n", (38635, 38683), False, 'import pulumi\n'), ((38692, 38738), 'pulumi.set', 'pulumi.set', (['__self__', '"""start_time"""', 'start_time'], {}), "(__self__, 'start_time', start_time)\n", (38702, 38738), False, 'import pulumi\n'), ((38901, 38929), 'pulumi.get', 'pulumi.get', (['self', '"""progress"""'], {}), "(self, 'progress')\n", (38911, 38929), False, 'import pulumi\n'), ((39124, 39160), 'pulumi.get', 'pulumi.get', (['self', '"""progress_percent"""'], {}), "(self, 'progress_percent')\n", (39134, 39160), False, 'import pulumi\n'), ((39337, 39367), 'pulumi.get', 'pulumi.get', (['self', '"""start_time"""'], {}), "(self, 'start_time')\n", (39347, 39367), False, 'import pulumi\n'), ((40421, 40475), 'pulumi.set', 'pulumi.set', (['__self__', '"""last_sync_time"""', 'last_sync_time'], {}), "(__self__, 'last_sync_time', last_sync_time)\n", (40431, 40475), False, 'import pulumi\n'), ((40695, 40729), 'pulumi.get', 'pulumi.get', (['self', '"""last_sync_time"""'], {}), "(self, 'last_sync_time')\n", (40705, 40729), False, 'import pulumi\n'), ((42036, 42088), 'pulumi.set', 'pulumi.set', (['__self__', '"""idle_duration"""', 'idle_duration'], {}), "(__self__, 'idle_duration', idle_duration)\n", (42046, 42088), False, 'import pulumi\n'), ((42097, 42159), 'pulumi.set', 'pulumi.set', (['__self__', '"""skip_os_adaptation"""', 'skip_os_adaptation'], {}), "(__self__, 'skip_os_adaptation', skip_os_adaptation)\n", (42107, 42159), False, 'import pulumi\n'), ((42344, 42377), 'pulumi.get', 'pulumi.get', (['self', '"""idle_duration"""'], {}), "(self, 'idle_duration')\n", (42354, 42377), False, 'import pulumi\n'), ((42734, 42772), 'pulumi.get', 'pulumi.get', (['self', '"""skip_os_adaptation"""'], {}), "(self, 'skip_os_adaptation')\n", (42744, 42772), False, 'import pulumi\n'), ((43662, 43694), 'pulumi.set', 'pulumi.set', (['__self__', '"""key"""', 'key'], {}), "(__self__, 'key', key)\n", (43672, 43694), False, 'import pulumi\n'), ((43703, 43745), 'pulumi.set', 'pulumi.set', (['__self__', '"""operator"""', 'operator'], {}), "(__self__, 'operator', operator)\n", (43713, 43745), False, 'import pulumi\n'), ((43754, 43792), 'pulumi.set', 'pulumi.set', (['__self__', '"""values"""', 'values'], {}), "(__self__, 'values', values)\n", (43764, 43792), False, 'import pulumi\n'), ((43945, 43968), 'pulumi.get', 'pulumi.get', (['self', '"""key"""'], {}), "(self, 'key')\n", (43955, 43968), False, 'import pulumi\n'), ((44161, 44189), 'pulumi.get', 'pulumi.get', (['self', '"""operator"""'], {}), "(self, 'operator')\n", (44171, 44189), False, 'import pulumi\n'), ((44360, 44386), 'pulumi.get', 'pulumi.get', (['self', '"""values"""'], {}), "(self, 'values')\n", (44370, 44386), False, 'import pulumi\n'), ((45960, 45994), 'pulumi.set', 'pulumi.set', (['__self__', '"""code"""', 'code'], {}), "(__self__, 'code', code)\n", (45970, 45994), False, 'import pulumi\n'), ((46003, 46043), 'pulumi.set', 'pulumi.set', (['__self__', '"""details"""', 'details'], {}), "(__self__, 'details', details)\n", (46013, 46043), False, 'import pulumi\n'), ((46052, 46092), 'pulumi.set', 'pulumi.set', (['__self__', '"""message"""', 'message'], {}), "(__self__, 'message', message)\n", (46062, 46092), False, 'import pulumi\n'), ((46268, 46292), 'pulumi.get', 'pulumi.get', (['self', '"""code"""'], {}), "(self, 'code')\n", (46278, 46292), False, 'import pulumi\n'), ((46533, 46560), 'pulumi.get', 'pulumi.get', (['self', '"""details"""'], {}), "(self, 'details')\n", (46543, 46560), False, 'import pulumi\n'), ((46861, 46888), 'pulumi.get', 'pulumi.get', (['self', '"""message"""'], {}), "(self, 'message')\n", (46871, 46888), False, 'import pulumi\n'), ((48214, 48262), 'pulumi.set', 'pulumi.set', (['__self__', '"""utilization"""', 'utilization'], {}), "(__self__, 'utilization', utilization)\n", (48224, 48262), False, 'import pulumi\n'), ((48271, 48307), 'pulumi.set', 'pulumi.set', (['__self__', '"""vm_id"""', 'vm_id'], {}), "(__self__, 'vm_id', vm_id)\n", (48281, 48307), False, 'import pulumi\n'), ((48316, 48376), 'pulumi.set', 'pulumi.set', (['__self__', '"""vmware_vm_details"""', 'vmware_vm_details'], {}), "(__self__, 'vmware_vm_details', vmware_vm_details)\n", (48326, 48376), False, 'import pulumi\n'), ((48560, 48591), 'pulumi.get', 'pulumi.get', (['self', '"""utilization"""'], {}), "(self, 'utilization')\n", (48570, 48591), False, 'import pulumi\n'), ((48741, 48766), 'pulumi.get', 'pulumi.get', (['self', '"""vm_id"""'], {}), "(self, 'vm_id')\n", (48751, 48766), False, 'import pulumi\n'), ((48996, 49033), 'pulumi.get', 'pulumi.get', (['self', '"""vmware_vm_details"""'], {}), "(self, 'vmware_vm_details')\n", (49006, 49033), False, 'import pulumi\n'), ((53275, 53323), 'pulumi.set', 'pulumi.set', (['__self__', '"""cpu_average"""', 'cpu_average'], {}), "(__self__, 'cpu_average', cpu_average)\n", (53285, 53323), False, 'import pulumi\n'), ((53332, 53396), 'pulumi.set', 'pulumi.set', (['__self__', '"""cpu_average_percent"""', 'cpu_average_percent'], {}), "(__self__, 'cpu_average_percent', cpu_average_percent)\n", (53342, 53396), False, 'import pulumi\n'), ((53405, 53445), 'pulumi.set', 'pulumi.set', (['__self__', '"""cpu_max"""', 'cpu_max'], {}), "(__self__, 'cpu_max', cpu_max)\n", (53415, 53445), False, 'import pulumi\n'), ((53454, 53510), 'pulumi.set', 'pulumi.set', (['__self__', '"""cpu_max_percent"""', 'cpu_max_percent'], {}), "(__self__, 'cpu_max_percent', cpu_max_percent)\n", (53464, 53510), False, 'import pulumi\n'), ((53519, 53585), 'pulumi.set', 'pulumi.set', (['__self__', '"""disk_io_rate_average"""', 'disk_io_rate_average'], {}), "(__self__, 'disk_io_rate_average', disk_io_rate_average)\n", (53529, 53585), False, 'import pulumi\n'), ((53594, 53670), 'pulumi.set', 'pulumi.set', (['__self__', '"""disk_io_rate_average_kbps"""', 'disk_io_rate_average_kbps'], {}), "(__self__, 'disk_io_rate_average_kbps', disk_io_rate_average_kbps)\n", (53604, 53670), False, 'import pulumi\n'), ((53679, 53737), 'pulumi.set', 'pulumi.set', (['__self__', '"""disk_io_rate_max"""', 'disk_io_rate_max'], {}), "(__self__, 'disk_io_rate_max', disk_io_rate_max)\n", (53689, 53737), False, 'import pulumi\n'), ((53746, 53814), 'pulumi.set', 'pulumi.set', (['__self__', '"""disk_io_rate_max_kbps"""', 'disk_io_rate_max_kbps'], {}), "(__self__, 'disk_io_rate_max_kbps', disk_io_rate_max_kbps)\n", (53756, 53814), False, 'import pulumi\n'), ((53823, 53877), 'pulumi.set', 'pulumi.set', (['__self__', '"""memory_average"""', 'memory_average'], {}), "(__self__, 'memory_average', memory_average)\n", (53833, 53877), False, 'import pulumi\n'), ((53886, 53956), 'pulumi.set', 'pulumi.set', (['__self__', '"""memory_average_percent"""', 'memory_average_percent'], {}), "(__self__, 'memory_average_percent', memory_average_percent)\n", (53896, 53956), False, 'import pulumi\n'), ((53965, 54011), 'pulumi.set', 'pulumi.set', (['__self__', '"""memory_max"""', 'memory_max'], {}), "(__self__, 'memory_max', memory_max)\n", (53975, 54011), False, 'import pulumi\n'), ((54020, 54082), 'pulumi.set', 'pulumi.set', (['__self__', '"""memory_max_percent"""', 'memory_max_percent'], {}), "(__self__, 'memory_max_percent', memory_max_percent)\n", (54030, 54082), False, 'import pulumi\n'), ((54091, 54169), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_throughput_average"""', 'network_throughput_average'], {}), "(__self__, 'network_throughput_average', network_throughput_average)\n", (54101, 54169), False, 'import pulumi\n'), ((54178, 54270), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_throughput_average_kbps"""', 'network_throughput_average_kbps'], {}), "(__self__, 'network_throughput_average_kbps',\n network_throughput_average_kbps)\n", (54188, 54270), False, 'import pulumi\n'), ((54275, 54345), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_throughput_max"""', 'network_throughput_max'], {}), "(__self__, 'network_throughput_max', network_throughput_max)\n", (54285, 54345), False, 'import pulumi\n'), ((54354, 54439), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_throughput_max_kbps"""', 'network_throughput_max_kbps'], {}), "(__self__, 'network_throughput_max_kbps', network_throughput_max_kbps\n )\n", (54364, 54439), False, 'import pulumi\n'), ((54597, 54628), 'pulumi.get', 'pulumi.get', (['self', '"""cpu_average"""'], {}), "(self, 'cpu_average')\n", (54607, 54628), False, 'import pulumi\n'), ((54806, 54845), 'pulumi.get', 'pulumi.get', (['self', '"""cpu_average_percent"""'], {}), "(self, 'cpu_average_percent')\n", (54816, 54845), False, 'import pulumi\n'), ((54996, 55023), 'pulumi.get', 'pulumi.get', (['self', '"""cpu_max"""'], {}), "(self, 'cpu_max')\n", (55006, 55023), False, 'import pulumi\n'), ((55189, 55224), 'pulumi.get', 'pulumi.get', (['self', '"""cpu_max_percent"""'], {}), "(self, 'cpu_max_percent')\n", (55199, 55224), False, 'import pulumi\n'), ((55422, 55462), 'pulumi.get', 'pulumi.get', (['self', '"""disk_io_rate_average"""'], {}), "(self, 'disk_io_rate_average')\n", (55432, 55462), False, 'import pulumi\n'), ((55669, 55714), 'pulumi.get', 'pulumi.get', (['self', '"""disk_io_rate_average_kbps"""'], {}), "(self, 'disk_io_rate_average_kbps')\n", (55679, 55714), False, 'import pulumi\n'), ((55900, 55936), 'pulumi.get', 'pulumi.get', (['self', '"""disk_io_rate_max"""'], {}), "(self, 'disk_io_rate_max')\n", (55910, 55936), False, 'import pulumi\n'), ((56131, 56172), 'pulumi.get', 'pulumi.get', (['self', '"""disk_io_rate_max_kbps"""'], {}), "(self, 'disk_io_rate_max_kbps')\n", (56141, 56172), False, 'import pulumi\n'), ((56344, 56378), 'pulumi.get', 'pulumi.get', (['self', '"""memory_average"""'], {}), "(self, 'memory_average')\n", (56354, 56378), False, 'import pulumi\n'), ((56565, 56607), 'pulumi.get', 'pulumi.get', (['self', '"""memory_average_percent"""'], {}), "(self, 'memory_average_percent')\n", (56575, 56607), False, 'import pulumi\n'), ((56767, 56797), 'pulumi.get', 'pulumi.get', (['self', '"""memory_max"""'], {}), "(self, 'memory_max')\n", (56777, 56797), False, 'import pulumi\n'), ((56972, 57010), 'pulumi.get', 'pulumi.get', (['self', '"""memory_max_percent"""'], {}), "(self, 'memory_max_percent')\n", (56982, 57010), False, 'import pulumi\n'), ((57271, 57317), 'pulumi.get', 'pulumi.get', (['self', '"""network_throughput_average"""'], {}), "(self, 'network_throughput_average')\n", (57281, 57317), False, 'import pulumi\n'), ((57587, 57638), 'pulumi.get', 'pulumi.get', (['self', '"""network_throughput_average_kbps"""'], {}), "(self, 'network_throughput_average_kbps')\n", (57597, 57638), False, 'import pulumi\n'), ((57887, 57929), 'pulumi.get', 'pulumi.get', (['self', '"""network_throughput_max"""'], {}), "(self, 'network_throughput_max')\n", (57897, 57929), False, 'import pulumi\n'), ((58187, 58234), 'pulumi.get', 'pulumi.get', (['self', '"""network_throughput_max_kbps"""'], {}), "(self, 'network_throughput_max_kbps')\n", (58197, 58234), False, 'import pulumi\n'), ((59662, 59704), 'pulumi.set', 'pulumi.set', (['__self__', '"""password"""', 'password'], {}), "(__self__, 'password', password)\n", (59672, 59704), False, 'import pulumi\n'), ((59713, 59759), 'pulumi.set', 'pulumi.set', (['__self__', '"""thumbprint"""', 'thumbprint'], {}), "(__self__, 'thumbprint', thumbprint)\n", (59723, 59759), False, 'import pulumi\n'), ((59768, 59810), 'pulumi.set', 'pulumi.set', (['__self__', '"""username"""', 'username'], {}), "(__self__, 'username', username)\n", (59778, 59810), False, 'import pulumi\n'), ((59819, 59865), 'pulumi.set', 'pulumi.set', (['__self__', '"""vcenter_ip"""', 'vcenter_ip'], {}), "(__self__, 'vcenter_ip', vcenter_ip)\n", (59829, 59865), False, 'import pulumi\n'), ((60075, 60103), 'pulumi.get', 'pulumi.get', (['self', '"""password"""'], {}), "(self, 'password')\n", (60085, 60103), False, 'import pulumi\n'), ((60279, 60309), 'pulumi.get', 'pulumi.get', (['self', '"""thumbprint"""'], {}), "(self, 'thumbprint')\n", (60289, 60309), False, 'import pulumi\n'), ((60448, 60476), 'pulumi.get', 'pulumi.get', (['self', '"""username"""'], {}), "(self, 'username')\n", (60458, 60476), False, 'import pulumi\n'), ((60663, 60693), 'pulumi.get', 'pulumi.get', (['self', '"""vcenter_ip"""'], {}), "(self, 'vcenter_ip')\n", (60673, 60693), False, 'import pulumi\n'), ((64055, 64103), 'pulumi.set', 'pulumi.set', (['__self__', '"""boot_option"""', 'boot_option'], {}), "(__self__, 'boot_option', boot_option)\n", (64065, 64103), False, 'import pulumi\n'), ((64112, 64172), 'pulumi.set', 'pulumi.set', (['__self__', '"""committed_storage"""', 'committed_storage'], {}), "(__self__, 'committed_storage', committed_storage)\n", (64122, 64172), False, 'import pulumi\n'), ((64181, 64247), 'pulumi.set', 'pulumi.set', (['__self__', '"""committed_storage_mb"""', 'committed_storage_mb'], {}), "(__self__, 'committed_storage_mb', committed_storage_mb)\n", (64191, 64247), False, 'import pulumi\n'), ((64256, 64300), 'pulumi.set', 'pulumi.set', (['__self__', '"""cpu_count"""', 'cpu_count'], {}), "(__self__, 'cpu_count', cpu_count)\n", (64266, 64300), False, 'import pulumi\n'), ((64309, 64379), 'pulumi.set', 'pulumi.set', (['__self__', '"""datacenter_description"""', 'datacenter_description'], {}), "(__self__, 'datacenter_description', datacenter_description)\n", (64319, 64379), False, 'import pulumi\n'), ((64388, 64440), 'pulumi.set', 'pulumi.set', (['__self__', '"""datacenter_id"""', 'datacenter_id'], {}), "(__self__, 'datacenter_id', datacenter_id)\n", (64398, 64440), False, 'import pulumi\n'), ((64449, 64495), 'pulumi.set', 'pulumi.set', (['__self__', '"""disk_count"""', 'disk_count'], {}), "(__self__, 'disk_count', disk_count)\n", (64459, 64495), False, 'import pulumi\n'), ((64504, 64554), 'pulumi.set', 'pulumi.set', (['__self__', '"""display_name"""', 'display_name'], {}), "(__self__, 'display_name', display_name)\n", (64514, 64554), False, 'import pulumi\n'), ((64563, 64623), 'pulumi.set', 'pulumi.set', (['__self__', '"""guest_description"""', 'guest_description'], {}), "(__self__, 'guest_description', guest_description)\n", (64573, 64623), False, 'import pulumi\n'), ((64632, 64676), 'pulumi.set', 'pulumi.set', (['__self__', '"""memory_mb"""', 'memory_mb'], {}), "(__self__, 'memory_mb', memory_mb)\n", (64642, 64676), False, 'import pulumi\n'), ((64685, 64733), 'pulumi.set', 'pulumi.set', (['__self__', '"""power_state"""', 'power_state'], {}), "(__self__, 'power_state', power_state)\n", (64695, 64733), False, 'import pulumi\n'), ((64742, 64776), 'pulumi.set', 'pulumi.set', (['__self__', '"""uuid"""', 'uuid'], {}), "(__self__, 'uuid', uuid)\n", (64752, 64776), False, 'import pulumi\n'), ((64785, 64821), 'pulumi.set', 'pulumi.set', (['__self__', '"""vm_id"""', 'vm_id'], {}), "(__self__, 'vm_id', vm_id)\n", (64795, 64821), False, 'import pulumi\n'), ((64976, 65007), 'pulumi.get', 'pulumi.get', (['self', '"""boot_option"""'], {}), "(self, 'boot_option')\n", (64986, 65007), False, 'import pulumi\n'), ((65211, 65248), 'pulumi.get', 'pulumi.get', (['self', '"""committed_storage"""'], {}), "(self, 'committed_storage')\n", (65221, 65248), False, 'import pulumi\n'), ((65457, 65497), 'pulumi.get', 'pulumi.get', (['self', '"""committed_storage_mb"""'], {}), "(self, 'committed_storage_mb')\n", (65467, 65497), False, 'import pulumi\n'), ((65658, 65687), 'pulumi.get', 'pulumi.get', (['self', '"""cpu_count"""'], {}), "(self, 'cpu_count')\n", (65668, 65687), False, 'import pulumi\n'), ((65918, 65960), 'pulumi.get', 'pulumi.get', (['self', '"""datacenter_description"""'], {}), "(self, 'datacenter_description')\n", (65928, 65960), False, 'import pulumi\n'), ((66159, 66192), 'pulumi.get', 'pulumi.get', (['self', '"""datacenter_id"""'], {}), "(self, 'datacenter_id')\n", (66169, 66192), False, 'import pulumi\n'), ((66357, 66387), 'pulumi.get', 'pulumi.get', (['self', '"""disk_count"""'], {}), "(self, 'disk_count')\n", (66367, 66387), False, 'import pulumi\n'), ((66594, 66626), 'pulumi.get', 'pulumi.get', (['self', '"""display_name"""'], {}), "(self, 'display_name')\n", (66604, 66626), False, 'import pulumi\n'), ((66943, 66980), 'pulumi.get', 'pulumi.get', (['self', '"""guest_description"""'], {}), "(self, 'guest_description')\n", (66953, 66980), False, 'import pulumi\n'), ((67151, 67180), 'pulumi.get', 'pulumi.get', (['self', '"""memory_mb"""'], {}), "(self, 'memory_mb')\n", (67161, 67180), False, 'import pulumi\n'), ((67371, 67402), 'pulumi.get', 'pulumi.get', (['self', '"""power_state"""'], {}), "(self, 'power_state')\n", (67381, 67402), False, 'import pulumi\n'), ((67555, 67579), 'pulumi.get', 'pulumi.get', (['self', '"""uuid"""'], {}), "(self, 'uuid')\n", (67565, 67579), False, 'import pulumi\n'), ((67806, 67831), 'pulumi.get', 'pulumi.get', (['self', '"""vm_id"""'], {}), "(self, 'vm_id')\n", (67816, 67831), False, 'import pulumi\n'), ((1220, 1360), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in AppliedLicenseResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in AppliedLicenseResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (1235, 1360), False, 'import pulumi\n'), ((3567, 3701), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in CloneJobResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in CloneJobResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (3582, 3701), False, 'import pulumi\n'), ((8477, 8630), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in ComputeEngineTargetDefaultsResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in ComputeEngineTargetDefaultsResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (8492, 8630), False, 'import pulumi\n'), ((17654, 17806), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in ComputeEngineTargetDetailsResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in ComputeEngineTargetDetailsResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (17669, 17806), False, 'import pulumi\n'), ((26000, 26143), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in ComputeSchedulingResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in ComputeSchedulingResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (26015, 26143), False, 'import pulumi\n'), ((30421, 30557), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in CutoverJobResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in CutoverJobResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (30436, 30557), False, 'import pulumi\n'), ((35147, 35289), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in NetworkInterfaceResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in NetworkInterfaceResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (35162, 35289), False, 'import pulumi\n'), ((37657, 37799), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in ReplicationCycleResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in ReplicationCycleResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (37672, 37799), False, 'import pulumi\n'), ((39705, 39846), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in ReplicationSyncResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in ReplicationSyncResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (39720, 39846), False, 'import pulumi\n'), ((41108, 41248), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in SchedulePolicyResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in SchedulePolicyResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (41123, 41248), False, 'import pulumi\n'), ((47254, 47397), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in VmUtilizationInfoResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in VmUtilizationInfoResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (47269, 47397), False, 'import pulumi\n'), ((50623, 50769), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in VmUtilizationMetricsResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in VmUtilizationMetricsResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (50638, 50769), False, 'import pulumi\n'), ((58583, 58728), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in VmwareSourceDetailsResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in VmwareSourceDetailsResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (58598, 58728), False, 'import pulumi\n'), ((61808, 61949), 'pulumi.log.warn', 'pulumi.log.warn', (['f"""Key \'{key}\' not found in VmwareVmDetailsResponse. Access the value via the \'{suggest}\' property getter instead."""'], {}), '(\n f"Key \'{key}\' not found in VmwareVmDetailsResponse. Access the value via the \'{suggest}\' property getter instead."\n )\n', (61823, 61949), False, 'import pulumi\n')] |
# coding=utf-8
#------------------------------------------------------------------------------#
import os
import time
import fuse
import errno
from .item import RelFuseItem
from .static_dir import StaticDirectory
#------------------------------------------------------------------------------#
class MountRoot(RelFuseItem):
# --------------------------------------------------------------------------
def __init__(self):
RelFuseItem.__init__(self)
self._mount_time = time.time()
self._relfs_dir = StaticDirectory()
self._repos_backstage = self._relfs_dir.add("repos", StaticDirectory())
self._repos = dict()
# --------------------------------------------------------------------------
def add_repo_root(self, name, item):
self._repos[name] = item
# --------------------------------------------------------------------------
def repos_backstage(self):
return self._repos_backstage
# --------------------------------------------------------------------------
def find_item(self, split_path):
if not split_path or split_path == ["."]:
return self
if split_path[0] == ".relfs":
return self._relfs_dir.find_item(split_path[1:])
try:
repo = self._repos[split_path[0]]
return repo.find_item(split_path[1:])
except KeyError:
pass
# --------------------------------------------------------------------------
def readdir(self, fh):
yield ".."
yield "."
yield ".relfs"
for name in self._repos:
yield name
# --------------------------------------------------------------------------
def _modify_time(self):
return self._mount_time
# --------------------------------------------------------------------------
def access(self, mode):
if mode & os.X_OK:
return 0
return RelFuseItem.access(self, mode)
# --------------------------------------------------------------------------
def _get_mode(self):
return 0o40550
#------------------------------------------------------------------------------#
| [
"time.time"
] | [((491, 502), 'time.time', 'time.time', ([], {}), '()\n', (500, 502), False, 'import time\n')] |
"""etravel URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from main import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.homepage, name = 'home'),
path('login/', views.loginPage, name = 'login'),
path('logout/', views.logoutUser, name = 'logout'),
path('signup/', views.signupPage, name = 'signup'),
path('browsehotel/', views.filterhotel, name = 'browsehotel'),
path('myaccount/', views.accountpage, name='myaccount'),
path('editprofile/', views.edit_profile, name='editprofile'),
path('change-password/', views.change_password, name='editpassword'),
path('hotel_booking/', views.bookhotel, name='bookhotel'),
path('hotel/<int:hotel_id>', views.hotelpage, name='hotelpage'),
path('cancelbooking/', views.cancelbooking, name='cancelbooking'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.conf.urls.static.static",
"django.urls.path"
] | [((1542, 1603), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1548, 1603), False, 'from django.conf.urls.static import static\n'), ((823, 854), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (827, 854), False, 'from django.urls import path, include\n'), ((860, 897), 'django.urls.path', 'path', (['""""""', 'views.homepage'], {'name': '"""home"""'}), "('', views.homepage, name='home')\n", (864, 897), False, 'from django.urls import path, include\n'), ((905, 950), 'django.urls.path', 'path', (['"""login/"""', 'views.loginPage'], {'name': '"""login"""'}), "('login/', views.loginPage, name='login')\n", (909, 950), False, 'from django.urls import path, include\n'), ((958, 1006), 'django.urls.path', 'path', (['"""logout/"""', 'views.logoutUser'], {'name': '"""logout"""'}), "('logout/', views.logoutUser, name='logout')\n", (962, 1006), False, 'from django.urls import path, include\n'), ((1014, 1062), 'django.urls.path', 'path', (['"""signup/"""', 'views.signupPage'], {'name': '"""signup"""'}), "('signup/', views.signupPage, name='signup')\n", (1018, 1062), False, 'from django.urls import path, include\n'), ((1070, 1129), 'django.urls.path', 'path', (['"""browsehotel/"""', 'views.filterhotel'], {'name': '"""browsehotel"""'}), "('browsehotel/', views.filterhotel, name='browsehotel')\n", (1074, 1129), False, 'from django.urls import path, include\n'), ((1137, 1192), 'django.urls.path', 'path', (['"""myaccount/"""', 'views.accountpage'], {'name': '"""myaccount"""'}), "('myaccount/', views.accountpage, name='myaccount')\n", (1141, 1192), False, 'from django.urls import path, include\n'), ((1198, 1258), 'django.urls.path', 'path', (['"""editprofile/"""', 'views.edit_profile'], {'name': '"""editprofile"""'}), "('editprofile/', views.edit_profile, name='editprofile')\n", (1202, 1258), False, 'from django.urls import path, include\n'), ((1264, 1332), 'django.urls.path', 'path', (['"""change-password/"""', 'views.change_password'], {'name': '"""editpassword"""'}), "('change-password/', views.change_password, name='editpassword')\n", (1268, 1332), False, 'from django.urls import path, include\n'), ((1338, 1395), 'django.urls.path', 'path', (['"""hotel_booking/"""', 'views.bookhotel'], {'name': '"""bookhotel"""'}), "('hotel_booking/', views.bookhotel, name='bookhotel')\n", (1342, 1395), False, 'from django.urls import path, include\n'), ((1401, 1464), 'django.urls.path', 'path', (['"""hotel/<int:hotel_id>"""', 'views.hotelpage'], {'name': '"""hotelpage"""'}), "('hotel/<int:hotel_id>', views.hotelpage, name='hotelpage')\n", (1405, 1464), False, 'from django.urls import path, include\n'), ((1470, 1535), 'django.urls.path', 'path', (['"""cancelbooking/"""', 'views.cancelbooking'], {'name': '"""cancelbooking"""'}), "('cancelbooking/', views.cancelbooking, name='cancelbooking')\n", (1474, 1535), False, 'from django.urls import path, include\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.contrib import gan as tfgan
from GeneralTools.graph_funcs.my_session import MySession
from GeneralTools.math_funcs.graph_func_support import mean_cov_np, trace_sqrt_product_np
from GeneralTools.misc_fun import FLAGS
class GenerativeModelMetric(object):
def __init__(self, image_format=None, model='v1', model_path=None):
""" This class defines several metrics using pre-trained classifier inception v1.
:param image_format:
"""
if model_path is None:
self.model = model
if model == 'v1':
self.inception_graph_def = tfgan.eval.get_graph_def_from_disk(FLAGS.INCEPTION_V1)
elif model == 'v3':
self.inception_graph_def = tfgan.eval.get_graph_def_from_disk(FLAGS.INCEPTION_V3)
elif model in {'swd', 'ms_ssim', 'ssim'}:
pass
else:
raise NotImplementedError('Model {} not implemented.'.format(model))
else:
self.model = 'custom'
self.inception_graph_def = tfgan.eval.get_graph_def_from_disk(model_path)
if image_format is None:
self.image_format = FLAGS.IMAGE_FORMAT
else:
self.image_format = image_format
# preserved for inception v3
self._pool3_v3_ = None
self._logits_v3_ = None
def inception_v1_one_batch(self, image, output_tensor=None):
""" This function runs the inception v1 model on images and give logits output.
Note: if other layers of inception model is needed, change the output_tensor option in tfgan.eval.run_inception
:param image:
:param output_tensor:
:return:
"""
if output_tensor is None:
output_tensor = ['logits:0', 'pool_3:0']
image_size = tfgan.eval.INCEPTION_DEFAULT_IMAGE_SIZE
if self.image_format in {'channels_first', 'NCHW'}:
image = tf.transpose(image, perm=(0, 2, 3, 1))
if image.get_shape().as_list()[1] != image_size:
image = tf.compat.v1.image.resize_bilinear(image, [image_size, image_size])
# inception score uses the logits:0 while FID uses pool_3:0.
return tfgan.eval.run_inception(
image, graph_def=self.inception_graph_def, input_tensor='Mul:0', output_tensor=output_tensor)
def inception_v1(self, images):
""" This function runs the inception v1 model on images and give logits output.
Note: if other layers of inception model is needed, change the output_tensor option in tfgan.eval.run_inception.
Note: for large inputs, e.g. [10000, 64, 64, 3], it is better to run iterations containing this function.
:param images:
:return:
"""
num_images = images.get_shape().as_list()[0]
if num_images > 2500:
raise MemoryError('The input is too big to possibly fit into memory. Consider using multiple runs.')
if num_images >= 400:
print(num_images)
# Note: need to validate the code below
# somehow tfgan.eval.classifier_score does not work properly when splitting the datasets.
# The following code is inspired by:
# https://github.com/tensorflow/tensorflow/blob/r1.7/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
if num_images % 100 == 0:
generated_images_list = tf.split(images, num_or_size_splits=num_images // 100, axis=0)
logits, pool3 = tf.map_fn(
fn=self.inception_v1_one_batch,
elems=tf.stack(generated_images_list),
dtype=(tf.float32, tf.float32),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = tf.concat(tf.unstack(logits), 0)
pool3 = tf.concat(tf.unstack(pool3), 0)
else:
generated_images_list = tf.split(
images, num_or_size_splits=[100] * (num_images // 100) + [num_images % 100], axis=0)
# tf.stack requires the dimension of tensor in list to be the same
logits, pool3 = tf.map_fn(
fn=self.inception_v1_one_batch,
elems=tf.stack(generated_images_list[0:-1]),
dtype=(tf.float32, tf.float32),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits_last, pool3_last = self.inception_v1_one_batch(generated_images_list[-1])
logits = tf.concat(tf.unstack(logits) + [logits_last], 0)
pool3 = tf.concat(tf.unstack(pool3) + [pool3_last], 0)
else:
logits, pool3 = self.inception_v1_one_batch(images)
return logits, pool3
@staticmethod
def inception_score_from_logits(logits):
""" This function estimates the inception score from logits output by inception_v1
:param logits:
:return:
"""
if type(logits) == np.ndarray:
logits = tf.constant(logits, dtype=tf.float32)
return tfgan.eval.classifier_score_from_logits(logits)
@staticmethod
def fid_from_pool3(x_pool3, y_pool3):
""" This function estimates Fréchet inception distance from pool3 of inception model
:param x_pool3:
:param y_pool3:
:return:
"""
if type(x_pool3) == np.ndarray:
x_pool3 = tf.constant(x_pool3, dtype=tf.float32)
if type(y_pool3) == np.ndarray:
y_pool3 = tf.constant(y_pool3, dtype=tf.float32)
return tfgan.eval.frechet_classifier_distance_from_activations(x_pool3, y_pool3)
@ staticmethod
def my_fid_from_pool3(x_pool3_np, y_pool3_np):
""" This function estimates Fréchet inception distance from pool3 of inception model.
Different from fid_from_pool3, here pool3_np could be a list [mean, cov]
:param x_pool3_np:
:param y_pool3_np:
:return:
"""
# from scipy.linalg import sqrtm
x_mean, x_cov = x_pool3_np if isinstance(x_pool3_np, (list, tuple)) else mean_cov_np(x_pool3_np)
y_mean, y_cov = y_pool3_np if isinstance(y_pool3_np, (list, tuple)) else mean_cov_np(y_pool3_np)
fid = np.sum((x_mean-y_mean) ** 2)+np.trace(x_cov)+np.trace(y_cov)-2.0*trace_sqrt_product_np(x_cov, y_cov)
return fid
# return np.sum((x_mean - y_mean) ** 2) + np.trace(x_cov + y_cov - 2.0 * sqrtm(np.dot(x_cov, y_cov)))
def inception_score_and_fid_v1(self, x_batch, y_batch, num_batch=10, ckpt_folder=None, ckpt_file=None):
""" This function calculates inception scores and FID based on inception v1.
Note: batch_size * num_batch needs to be larger than 2048, otherwise the convariance matrix will be
ill-conditioned.
According to TensorFlow v1.7 (below), this is actually inception v3 model.
Somehow the downloaded file says it's v1.
code link: https://github.com/tensorflow/tensorflow/blob/r1.7/tensorflow/contrib \
/gan/python/eval/python/classifier_metrics_impl.py
Steps:
1, the pool3 and logits are calculated for x_batch and y_batch with sess
2, the pool3 and logits are passed to corresponding metrics
:param ckpt_file:
:param x_batch: tensor, one batch of x in range [-1, 1]
:param y_batch: tensor, one batch of y in range [-1, 1]
:param num_batch:
:param ckpt_folder: check point folder
:param ckpt_file: in case an older ckpt file is needed, provide it here, e.g. 'cifar.ckpt-6284'
:return:
"""
assert self.model == 'v1', 'GenerativeModelMetric is not initialized with model="v1".'
assert ckpt_folder is not None, 'ckpt_folder must be provided.'
x_logits, x_pool3 = self.inception_v1(x_batch)
y_logits, y_pool3 = self.inception_v1(y_batch)
with MySession(load_ckpt=True) as sess:
inception_outputs = sess.run_m_times(
[x_logits, y_logits, x_pool3, y_pool3],
ckpt_folder=ckpt_folder, ckpt_file=ckpt_file,
max_iter=num_batch, trace=True)
# get logits and pool3
x_logits_np = np.concatenate([inc[0] for inc in inception_outputs], axis=0)
y_logits_np = np.concatenate([inc[1] for inc in inception_outputs], axis=0)
x_pool3_np = np.concatenate([inc[2] for inc in inception_outputs], axis=0)
y_pool3_np = np.concatenate([inc[3] for inc in inception_outputs], axis=0)
FLAGS.print('logits calculated. Shape = {}.'.format(x_logits_np.shape))
FLAGS.print('pool3 calculated. Shape = {}.'.format(x_pool3_np.shape))
# calculate scores
inc_x = self.inception_score_from_logits(x_logits_np)
inc_y = self.inception_score_from_logits(y_logits_np)
xp3_1, xp3_2 = np.split(x_pool3_np, indices_or_sections=2, axis=0)
fid_xx = self.fid_from_pool3(xp3_1, xp3_2)
fid_xy = self.fid_from_pool3(x_pool3_np, y_pool3_np)
with MySession() as sess:
scores = sess.run_once([inc_x, inc_y, fid_xx, fid_xy])
return scores
def sliced_wasserstein_distance(self, x_batch, y_batch, num_batch=128, ckpt_folder=None, ckpt_file=None):
""" This function calculates the sliced wasserstein distance between real and fake images.
This function does not work as expected, swd gives nan
:param x_batch:
:param y_batch:
:param num_batch:
:param ckpt_folder:
:param ckpt_file:
:return:
"""
with MySession(load_ckpt=True) as sess:
batches = sess.run_m_times(
[x_batch, y_batch],
ckpt_folder=ckpt_folder, ckpt_file=ckpt_file,
max_iter=num_batch, trace=True)
# get x_images and y_images
x_images = (tf.constant(np.concatenate([batch[0] for batch in batches], axis=0)) + 1.0) * 128.5
y_images = (tf.constant(np.concatenate([batch[1] for batch in batches], axis=0)) + 1.0) * 128.5
if self.image_format in {'channels_first', 'NCHW'}:
x_images = tf.transpose(x_images, perm=(0, 2, 3, 1))
y_images = tf.transpose(y_images, perm=(0, 2, 3, 1))
print('images obtained, shape: {}'.format(x_images.shape))
# sliced_wasserstein_distance returns a list of tuples (distance_real, distance_fake)
# for each level of the Laplacian pyramid from the highest resolution to the lowest
swd = tfgan.eval.sliced_wasserstein_distance(
x_images, y_images, patches_per_image=64, random_sampling_count=4, use_svd=True)
with MySession() as sess:
swd = sess.run_once(swd)
return swd
def ms_ssim(self, x_batch, y_batch, num_batch=128, ckpt_folder=None, ckpt_file=None, image_size=256):
""" This function calculates the multiscale structural similarity between a pair of images.
The image is downscaled four times; at each scale, a 11x11 filter is applied to extract patches.
USE WITH CAUTION !!!
1. This code was lost once and redone. Need to test on real datasets to verify it.
2. This code can be improved to calculate pairwise ms-ssim using tf.image.ssim. tf.image.ssim_multicale is just
tf.image.ssim with pool downsampling.
:param x_batch:
:param y_batch:
:param num_batch:
:param ckpt_folder:
:param ckpt_file:
:param image_size: ssim is defined on images of size at least 176
:return:
"""
# get x_images and y_images
x_images = (x_batch + 1.0) * 128.5
y_images = (y_batch + 1.0) * 128.5
if self.image_format in {'channels_first', 'NCHW'}:
x_images = tf.transpose(x_images, perm=(0, 2, 3, 1))
y_images = tf.transpose(y_images, perm=(0, 2, 3, 1))
if x_images.get_shape().as_list()[1] != 256:
x_images = tf.compat.v1.image.resize_bilinear(x_images, [image_size, image_size])
y_images = tf.compat.v1.image.resize_bilinear(y_images, [image_size, image_size])
scores = tf.image.ssim_multiscale(x_images, y_images, max_val=255) # scores in range [0, 1]
with MySession(load_ckpt=True) as sess:
scores = sess.run_m_times(
scores,
ckpt_folder=ckpt_folder, ckpt_file=ckpt_file,
max_iter=num_batch, trace=True)
ssim_score = np.mean(np.concatenate(scores, axis=0), axis=0)
return ssim_score
| [
"tensorflow.unstack",
"numpy.trace",
"tensorflow.transpose",
"tensorflow.contrib.gan.eval.run_inception",
"tensorflow.split",
"tensorflow.contrib.gan.eval.get_graph_def_from_disk",
"tensorflow.contrib.gan.eval.sliced_wasserstein_distance",
"tensorflow.image.ssim_multiscale",
"numpy.concatenate",
"... | [((2250, 2372), 'tensorflow.contrib.gan.eval.run_inception', 'tfgan.eval.run_inception', (['image'], {'graph_def': 'self.inception_graph_def', 'input_tensor': '"""Mul:0"""', 'output_tensor': 'output_tensor'}), "(image, graph_def=self.inception_graph_def,\n input_tensor='Mul:0', output_tensor=output_tensor)\n", (2274, 2372), True, 'from tensorflow.contrib import gan as tfgan\n'), ((5310, 5357), 'tensorflow.contrib.gan.eval.classifier_score_from_logits', 'tfgan.eval.classifier_score_from_logits', (['logits'], {}), '(logits)\n', (5349, 5357), True, 'from tensorflow.contrib import gan as tfgan\n'), ((5807, 5880), 'tensorflow.contrib.gan.eval.frechet_classifier_distance_from_activations', 'tfgan.eval.frechet_classifier_distance_from_activations', (['x_pool3', 'y_pool3'], {}), '(x_pool3, y_pool3)\n', (5862, 5880), True, 'from tensorflow.contrib import gan as tfgan\n'), ((8440, 8501), 'numpy.concatenate', 'np.concatenate', (['[inc[0] for inc in inception_outputs]'], {'axis': '(0)'}), '([inc[0] for inc in inception_outputs], axis=0)\n', (8454, 8501), True, 'import numpy as np\n'), ((8524, 8585), 'numpy.concatenate', 'np.concatenate', (['[inc[1] for inc in inception_outputs]'], {'axis': '(0)'}), '([inc[1] for inc in inception_outputs], axis=0)\n', (8538, 8585), True, 'import numpy as np\n'), ((8607, 8668), 'numpy.concatenate', 'np.concatenate', (['[inc[2] for inc in inception_outputs]'], {'axis': '(0)'}), '([inc[2] for inc in inception_outputs], axis=0)\n', (8621, 8668), True, 'import numpy as np\n'), ((8690, 8751), 'numpy.concatenate', 'np.concatenate', (['[inc[3] for inc in inception_outputs]'], {'axis': '(0)'}), '([inc[3] for inc in inception_outputs], axis=0)\n', (8704, 8751), True, 'import numpy as np\n'), ((9084, 9135), 'numpy.split', 'np.split', (['x_pool3_np'], {'indices_or_sections': '(2)', 'axis': '(0)'}), '(x_pool3_np, indices_or_sections=2, axis=0)\n', (9092, 9135), True, 'import numpy as np\n'), ((10744, 10867), 'tensorflow.contrib.gan.eval.sliced_wasserstein_distance', 'tfgan.eval.sliced_wasserstein_distance', (['x_images', 'y_images'], {'patches_per_image': '(64)', 'random_sampling_count': '(4)', 'use_svd': '(True)'}), '(x_images, y_images,\n patches_per_image=64, random_sampling_count=4, use_svd=True)\n', (10782, 10867), True, 'from tensorflow.contrib import gan as tfgan\n'), ((12372, 12429), 'tensorflow.image.ssim_multiscale', 'tf.image.ssim_multiscale', (['x_images', 'y_images'], {'max_val': '(255)'}), '(x_images, y_images, max_val=255)\n', (12396, 12429), True, 'import tensorflow as tf\n'), ((1104, 1150), 'tensorflow.contrib.gan.eval.get_graph_def_from_disk', 'tfgan.eval.get_graph_def_from_disk', (['model_path'], {}), '(model_path)\n', (1138, 1150), True, 'from tensorflow.contrib import gan as tfgan\n'), ((1981, 2019), 'tensorflow.transpose', 'tf.transpose', (['image'], {'perm': '(0, 2, 3, 1)'}), '(image, perm=(0, 2, 3, 1))\n', (1993, 2019), True, 'import tensorflow as tf\n'), ((2097, 2164), 'tensorflow.compat.v1.image.resize_bilinear', 'tf.compat.v1.image.resize_bilinear', (['image', '[image_size, image_size]'], {}), '(image, [image_size, image_size])\n', (2131, 2164), True, 'import tensorflow as tf\n'), ((5257, 5294), 'tensorflow.constant', 'tf.constant', (['logits'], {'dtype': 'tf.float32'}), '(logits, dtype=tf.float32)\n', (5268, 5294), True, 'import tensorflow as tf\n'), ((5652, 5690), 'tensorflow.constant', 'tf.constant', (['x_pool3'], {'dtype': 'tf.float32'}), '(x_pool3, dtype=tf.float32)\n', (5663, 5690), True, 'import tensorflow as tf\n'), ((5753, 5791), 'tensorflow.constant', 'tf.constant', (['y_pool3'], {'dtype': 'tf.float32'}), '(y_pool3, dtype=tf.float32)\n', (5764, 5791), True, 'import tensorflow as tf\n'), ((6333, 6356), 'GeneralTools.math_funcs.graph_func_support.mean_cov_np', 'mean_cov_np', (['x_pool3_np'], {}), '(x_pool3_np)\n', (6344, 6356), False, 'from GeneralTools.math_funcs.graph_func_support import mean_cov_np, trace_sqrt_product_np\n'), ((6438, 6461), 'GeneralTools.math_funcs.graph_func_support.mean_cov_np', 'mean_cov_np', (['y_pool3_np'], {}), '(y_pool3_np)\n', (6449, 6461), False, 'from GeneralTools.math_funcs.graph_func_support import mean_cov_np, trace_sqrt_product_np\n'), ((8135, 8160), 'GeneralTools.graph_funcs.my_session.MySession', 'MySession', ([], {'load_ckpt': '(True)'}), '(load_ckpt=True)\n', (8144, 8160), False, 'from GeneralTools.graph_funcs.my_session import MySession\n'), ((9262, 9273), 'GeneralTools.graph_funcs.my_session.MySession', 'MySession', ([], {}), '()\n', (9271, 9273), False, 'from GeneralTools.graph_funcs.my_session import MySession\n'), ((9819, 9844), 'GeneralTools.graph_funcs.my_session.MySession', 'MySession', ([], {'load_ckpt': '(True)'}), '(load_ckpt=True)\n', (9828, 9844), False, 'from GeneralTools.graph_funcs.my_session import MySession\n'), ((10369, 10410), 'tensorflow.transpose', 'tf.transpose', (['x_images'], {'perm': '(0, 2, 3, 1)'}), '(x_images, perm=(0, 2, 3, 1))\n', (10381, 10410), True, 'import tensorflow as tf\n'), ((10434, 10475), 'tensorflow.transpose', 'tf.transpose', (['y_images'], {'perm': '(0, 2, 3, 1)'}), '(y_images, perm=(0, 2, 3, 1))\n', (10446, 10475), True, 'import tensorflow as tf\n'), ((10890, 10901), 'GeneralTools.graph_funcs.my_session.MySession', 'MySession', ([], {}), '()\n', (10899, 10901), False, 'from GeneralTools.graph_funcs.my_session import MySession\n'), ((12006, 12047), 'tensorflow.transpose', 'tf.transpose', (['x_images'], {'perm': '(0, 2, 3, 1)'}), '(x_images, perm=(0, 2, 3, 1))\n', (12018, 12047), True, 'import tensorflow as tf\n'), ((12071, 12112), 'tensorflow.transpose', 'tf.transpose', (['y_images'], {'perm': '(0, 2, 3, 1)'}), '(y_images, perm=(0, 2, 3, 1))\n', (12083, 12112), True, 'import tensorflow as tf\n'), ((12189, 12259), 'tensorflow.compat.v1.image.resize_bilinear', 'tf.compat.v1.image.resize_bilinear', (['x_images', '[image_size, image_size]'], {}), '(x_images, [image_size, image_size])\n', (12223, 12259), True, 'import tensorflow as tf\n'), ((12283, 12353), 'tensorflow.compat.v1.image.resize_bilinear', 'tf.compat.v1.image.resize_bilinear', (['y_images', '[image_size, image_size]'], {}), '(y_images, [image_size, image_size])\n', (12317, 12353), True, 'import tensorflow as tf\n'), ((12470, 12495), 'GeneralTools.graph_funcs.my_session.MySession', 'MySession', ([], {'load_ckpt': '(True)'}), '(load_ckpt=True)\n', (12479, 12495), False, 'from GeneralTools.graph_funcs.my_session import MySession\n'), ((12708, 12738), 'numpy.concatenate', 'np.concatenate', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (12722, 12738), True, 'import numpy as np\n'), ((654, 708), 'tensorflow.contrib.gan.eval.get_graph_def_from_disk', 'tfgan.eval.get_graph_def_from_disk', (['FLAGS.INCEPTION_V1'], {}), '(FLAGS.INCEPTION_V1)\n', (688, 708), True, 'from tensorflow.contrib import gan as tfgan\n'), ((3468, 3530), 'tensorflow.split', 'tf.split', (['images'], {'num_or_size_splits': '(num_images // 100)', 'axis': '(0)'}), '(images, num_or_size_splits=num_images // 100, axis=0)\n', (3476, 3530), True, 'import tensorflow as tf\n'), ((4069, 4167), 'tensorflow.split', 'tf.split', (['images'], {'num_or_size_splits': '([100] * (num_images // 100) + [num_images % 100])', 'axis': '(0)'}), '(images, num_or_size_splits=[100] * (num_images // 100) + [\n num_images % 100], axis=0)\n', (4077, 4167), True, 'import tensorflow as tf\n'), ((6521, 6536), 'numpy.trace', 'np.trace', (['y_cov'], {}), '(y_cov)\n', (6529, 6536), True, 'import numpy as np\n'), ((6541, 6576), 'GeneralTools.math_funcs.graph_func_support.trace_sqrt_product_np', 'trace_sqrt_product_np', (['x_cov', 'y_cov'], {}), '(x_cov, y_cov)\n', (6562, 6576), False, 'from GeneralTools.math_funcs.graph_func_support import mean_cov_np, trace_sqrt_product_np\n'), ((784, 838), 'tensorflow.contrib.gan.eval.get_graph_def_from_disk', 'tfgan.eval.get_graph_def_from_disk', (['FLAGS.INCEPTION_V3'], {}), '(FLAGS.INCEPTION_V3)\n', (818, 838), True, 'from tensorflow.contrib import gan as tfgan\n'), ((3932, 3950), 'tensorflow.unstack', 'tf.unstack', (['logits'], {}), '(logits)\n', (3942, 3950), True, 'import tensorflow as tf\n'), ((3989, 4006), 'tensorflow.unstack', 'tf.unstack', (['pool3'], {}), '(pool3)\n', (3999, 4006), True, 'import tensorflow as tf\n'), ((6476, 6506), 'numpy.sum', 'np.sum', (['((x_mean - y_mean) ** 2)'], {}), '((x_mean - y_mean) ** 2)\n', (6482, 6506), True, 'import numpy as np\n'), ((6505, 6520), 'numpy.trace', 'np.trace', (['x_cov'], {}), '(x_cov)\n', (6513, 6520), True, 'import numpy as np\n'), ((10109, 10164), 'numpy.concatenate', 'np.concatenate', (['[batch[0] for batch in batches]'], {'axis': '(0)'}), '([batch[0] for batch in batches], axis=0)\n', (10123, 10164), True, 'import numpy as np\n'), ((10213, 10268), 'numpy.concatenate', 'np.concatenate', (['[batch[1] for batch in batches]'], {'axis': '(0)'}), '([batch[1] for batch in batches], axis=0)\n', (10227, 10268), True, 'import numpy as np\n'), ((3652, 3683), 'tensorflow.stack', 'tf.stack', (['generated_images_list'], {}), '(generated_images_list)\n', (3660, 3683), True, 'import tensorflow as tf\n'), ((4388, 4425), 'tensorflow.stack', 'tf.stack', (['generated_images_list[0:-1]'], {}), '(generated_images_list[0:-1])\n', (4396, 4425), True, 'import tensorflow as tf\n'), ((4771, 4789), 'tensorflow.unstack', 'tf.unstack', (['logits'], {}), '(logits)\n', (4781, 4789), True, 'import tensorflow as tf\n'), ((4844, 4861), 'tensorflow.unstack', 'tf.unstack', (['pool3'], {}), '(pool3)\n', (4854, 4861), True, 'import tensorflow as tf\n')] |
import logging
import pytest
from selene import Browser, Config
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
@pytest.fixture(scope='function')
def browser_func(choose_driver):
"""Browser that closes after each test function or method."""
yield choose_driver
choose_driver.quit()
@pytest.fixture(scope='class')
def browser_class(choose_driver):
"""Browser that closes after each test class."""
yield choose_driver
choose_driver.quit()
@pytest.fixture(scope='module')
def browser_module(choose_driver):
"""Browser that closes after each test module."""
yield choose_driver
choose_driver.quit()
@pytest.fixture(scope='session')
def choose_driver(is_remote, t_browser):
"""Remote or local browser selector fixture."""
if is_remote:
return remote_driver(t_browser)
return custom_driver(t_browser)
def custom_driver(t_browser):
""" Custom driver """
logging.debug('custom driver config start')
if t_browser == 'chrome':
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(),
options=headless_chrome_options())
else:
raise ValueError('t_browser does not set')
driver.set_page_load_timeout(10)
browser = Browser(Config(
driver=driver,
timeout=10,
window_width=1366,
window_height=1200,
))
logging.debug('custom driver config finish')
return browser
def headless_chrome_options():
""" Custom chrome options """
logging.info('set chromedriver options start')
chrome_options = Options()
chrome_options.set_capability("pageLoadStrategy", "eager")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--disable-notifications")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-infobars")
chrome_options.add_argument("--enable-automation")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-setuid-sandbox")
logging.info('set chromedriver options finish')
return chrome_options
def remote_driver(t_browser, page_load_strategy=None):
""" Remote driver """
logging.debug('remote driver config start')
remote_mapping = {
'chrome': {
'command_executor': 'http://selenium__standalone-chrome:4444/wd/hub',
'options': webdriver.ChromeOptions()
},
'firefox': {
'command_executor': 'http://selenium__standalone-firefox:4444/wd/hub',
'options': webdriver.FirefoxOptions()
}
}
if page_load_strategy:
desired_capabilities = webdriver.DesiredCapabilities().CHROME
desired_capabilities["pageLoadStrategy"] = "eager"
driver = webdriver.Remote(command_executor=remote_mapping[t_browser]['command_executor'],
options=remote_mapping[t_browser]['options'])
driver.set_page_load_timeout(20)
browser = Browser(Config(
driver=driver,
timeout=10,
window_width=1500,
window_height=1200,
))
logging.debug('remote driver config finish')
return browser
| [
"selenium.webdriver.chrome.options.Options",
"selenium.webdriver.Remote",
"selene.Config",
"selenium.webdriver.ChromeOptions",
"logging.debug",
"webdriver_manager.chrome.ChromeDriverManager",
"selenium.webdriver.FirefoxOptions",
"pytest.fixture",
"selenium.webdriver.DesiredCapabilities",
"logging.... | [((210, 242), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (224, 242), False, 'import pytest\n'), ((394, 423), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (408, 423), False, 'import pytest\n'), ((563, 593), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (577, 593), False, 'import pytest\n'), ((735, 766), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (749, 766), False, 'import pytest\n'), ((1016, 1059), 'logging.debug', 'logging.debug', (['"""custom driver config start"""'], {}), "('custom driver config start')\n", (1029, 1059), False, 'import logging\n'), ((1479, 1523), 'logging.debug', 'logging.debug', (['"""custom driver config finish"""'], {}), "('custom driver config finish')\n", (1492, 1523), False, 'import logging\n'), ((1614, 1660), 'logging.info', 'logging.info', (['"""set chromedriver options start"""'], {}), "('set chromedriver options start')\n", (1626, 1660), False, 'import logging\n'), ((1682, 1691), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (1689, 1691), False, 'from selenium.webdriver.chrome.options import Options\n'), ((2245, 2292), 'logging.info', 'logging.info', (['"""set chromedriver options finish"""'], {}), "('set chromedriver options finish')\n", (2257, 2292), False, 'import logging\n'), ((2406, 2449), 'logging.debug', 'logging.debug', (['"""remote driver config start"""'], {}), "('remote driver config start')\n", (2419, 2449), False, 'import logging\n'), ((2975, 3106), 'selenium.webdriver.Remote', 'webdriver.Remote', ([], {'command_executor': "remote_mapping[t_browser]['command_executor']", 'options': "remote_mapping[t_browser]['options']"}), "(command_executor=remote_mapping[t_browser][\n 'command_executor'], options=remote_mapping[t_browser]['options'])\n", (2991, 3106), False, 'from selenium import webdriver\n'), ((3308, 3352), 'logging.debug', 'logging.debug', (['"""remote driver config finish"""'], {}), "('remote driver config finish')\n", (3321, 3352), False, 'import logging\n'), ((1362, 1434), 'selene.Config', 'Config', ([], {'driver': 'driver', 'timeout': '(10)', 'window_width': '(1366)', 'window_height': '(1200)'}), '(driver=driver, timeout=10, window_width=1366, window_height=1200)\n', (1368, 1434), False, 'from selene import Browser, Config\n'), ((3191, 3263), 'selene.Config', 'Config', ([], {'driver': 'driver', 'timeout': '(10)', 'window_width': '(1500)', 'window_height': '(1200)'}), '(driver=driver, timeout=10, window_width=1500, window_height=1200)\n', (3197, 3263), False, 'from selene import Browser, Config\n'), ((2598, 2623), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (2621, 2623), False, 'from selenium import webdriver\n'), ((2762, 2788), 'selenium.webdriver.FirefoxOptions', 'webdriver.FirefoxOptions', ([], {}), '()\n', (2786, 2788), False, 'from selenium import webdriver\n'), ((2863, 2894), 'selenium.webdriver.DesiredCapabilities', 'webdriver.DesiredCapabilities', ([], {}), '()\n', (2892, 2894), False, 'from selenium import webdriver\n'), ((1140, 1161), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (1159, 1161), False, 'from webdriver_manager.chrome import ChromeDriverManager\n')] |
from typing import Any, Callable
import matplotlib.pyplot as plt
from numpy import arange
from .membership import Membership
class BaseSet:
def __init__(
self,
name: str,
membership: Membership,
aggregation: Callable[[Any, Any], Any],
):
self.name = name
self.membership = membership
self.aggregation = aggregation
def __add__(self, arg: "BaseSet"):
memb = Membership(
lambda x: self.aggregation(
self.membership(x),
arg.membership(x),
),
self.membership.items + arg.membership.items,
)
return BaseSet(
f"({self.name})_union_({arg.name})",
memb,
aggregation=self.aggregation,
)
def domain(self, step=0.05):
start = self.membership.items[0]
end = self.membership.items[-1]
result = list(arange(start, end, step))
result += self.membership.items
result.sort()
return result
def __iter__(self):
return iter(self.domain())
def __len__(self):
return len(self.domain())
def __str__(self) -> str:
return self.name
def graph(self, step: float = 0.05):
x_data = self.domain(step=step)
y_data = [self.membership(x) for x in x_data]
plt.figure()
plt.title(self.name)
plt.xlabel("Domain values")
plt.ylabel("Membership grade")
plt.plot(x_data, y_data)
plt.savefig(f"set_{self.name}.png")
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.arange"
] | [((1352, 1364), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1362, 1364), True, 'import matplotlib.pyplot as plt\n'), ((1373, 1393), 'matplotlib.pyplot.title', 'plt.title', (['self.name'], {}), '(self.name)\n', (1382, 1393), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1429), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Domain values"""'], {}), "('Domain values')\n", (1412, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1438, 1468), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Membership grade"""'], {}), "('Membership grade')\n", (1448, 1468), True, 'import matplotlib.pyplot as plt\n'), ((1477, 1501), 'matplotlib.pyplot.plot', 'plt.plot', (['x_data', 'y_data'], {}), '(x_data, y_data)\n', (1485, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1510, 1545), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""set_{self.name}.png"""'], {}), "(f'set_{self.name}.png')\n", (1521, 1545), True, 'import matplotlib.pyplot as plt\n'), ((924, 948), 'numpy.arange', 'arange', (['start', 'end', 'step'], {}), '(start, end, step)\n', (930, 948), False, 'from numpy import arange\n')] |
import numpy as np
import chess
import chess.engine
from tkinter.filedialog import asksaveasfilename
from parsing.math_encode import tensor_encode, tensor_decode
from inference.infer_action import get_action
class PlayLoop:
__doc__ = '''
An interactive REPL environment for play with a trained chess AI
'''
TRACE_FORMAT = '{:<7}{:<18}{:<42}{:<45}{:<7}{:<18}{:<42}{}'
TRACE_HEADER = ('move', 'WHITE', 'source', 'target', 'move', 'BLACK', 'source', 'target')
def __init__(self, policy, secondary_policy=None, engine_path='../engine/stockfish.exe'):
'''
Constructs a PlayLoop instance
:param policy: PolicyModel - primary AI agent to simulate
:param secondary_policy: None/PolicyModel/'same'/'stockfish' - Agent used to replace player moves
(if None, human is playing; if 'same', secondary_policy=policy)
'''
self.policy = policy
self.player_white = None
self.board = None
self.keep_trace = False
self.trace = None
self.engine = None
if secondary_policy is not None:
if secondary_policy == 'same':
self.player_move_func = lambda: self._get_action_from_policy(policy)
elif secondary_policy == 'stockfish':
self.player_move_func = self._get_stockfish
self.engine = chess.engine.SimpleEngine.popen_uci(engine_path)
else:
self.player_move_func = lambda: self._get_action_from_policy(secondary_policy)
else:
self.player_move_func = self._get_player_move
def init_game(self, player_side, keep_trace=True):
'''
Sets up a game and indicating the side to play as by the player
:param player_side: 'w'/'b' - side to play as
:param keep_trace: bool - if True, accumulates the trace for the entire game
:return: None
'''
if self.board is not None:
raise RuntimeWarning('Board already initiatized, set force_new=True to force override')
if player_side == 'w':
self.player_white = True
elif player_side == 'b':
self.player_white = False
else:
raise ValueError(f'Expected "w" or "b" for player_side but given {player_side}')
self.board = chess.Board()
self.keep_trace = keep_trace
if keep_trace:
self.trace = list()
def reset(self):
'''
Resets the PlayLoop state (except the trace)
:return: None
'''
self.board = None
self.keep_trace = False
def loop(self, verbose=True):
'''
Runs the loop until the termination of a game
:param verbose: bool - prints messages if True
:return: None
'''
if self.board is None:
raise RuntimeError('init_game was not called to configure game settings!')
if self.board.is_game_over():
raise RuntimeError('Game has already ended. Call reset and init_gram before calling loop')
trace_collector = list()
if not self.player_white:
move, policy = self._get_action_from_policy()
if self.keep_trace: self._store_trace(move, trace_collector, policy=policy)
if verbose: print(f'\nAI made {move} move\n')
while not self.board.is_game_over():
if verbose: print(self.board)
# player/secondary_policy move
move, policy = self.player_move_func()
if self.keep_trace: self._store_trace(move, trace_collector, policy=policy)
if verbose: print(f'\nPlayer made {move} move\n')
if self.board.is_game_over():
break
# policy move
move, policy = self._get_action_from_policy()
if self.keep_trace: self._store_trace(move, trace_collector, policy=policy)
if verbose: print(f'\nAI made {move} move\n')
if len(trace_collector) != 0:
self._store_trace(move, trace_collector, policy=policy, force_flush=True)
if verbose: print('Game completed')
def get_trace(self, printable=True):
'''
Returns the trace
:param printable: bool - If True, returns a printable and formamted string of the trace
:return: str/list(str)
'''
if printable:
return '\n'.join(self.trace)
return self.trace
def save_trace(self, file_path=None, interactive=True):
'''
Saves trace in a text file
Automatically appends ".txt" at the end of the file_path if the suffix is not found
:param file_path: None/str - file path to save to
:param interactive: bool - if True, using Tkinter GUI to select file path
:return: None
'''
if interactive:
file_path = asksaveasfilename(filetypes=[('Text file', '*.txt')])
if file_path[-4:] != '.txt':
file_path = file_path + '.txt'
with open(file_path, 'w') as f:
f.write(self.get_trace(printable=True))
def _get_action_from_policy(self, external_policy=None):
'''
Gets UCI representation of the move using the policy loaded and pushes the move on the board
:param external_policy - None/PolicyModel - policy to use (if None, defaults to loaded policy)
:return: str
'''
policy = self.policy
flip = self.player_white
if external_policy: # player is an AI
policy = external_policy
flip = not flip
src, tgt, _ = policy.infer(tensor_encode(self.board, mirror=flip))
if flip: # perform mirror flips
src = np.flip(src[0, ...], 0)
tgt = np.flip(tgt[0, ...], 0)
else:
src = src[0, ...]
tgt = tgt[0, ...]
move = get_action(self.board, src, tgt)
self.board.push(chess.Move.from_uci(move))
return move, (src, tgt)
def _get_player_move(self):
'''
Obtains the move from the player by command line and pushes the move on the board
:return: str, None
'''
while True: # handles invalid player moves
try:
move_input = input('Enter your move: ')
move = chess.Move.from_uci(move_input)
if move in self.board.legal_moves:
self.board.push(move)
else:
raise AssertionError(f'{move_input} is not a valid move')
except AssertionError as e:
print(f'ERROR: {e}')
else:
break
return move_input, None
def _get_stockfish(self, time=0.001, depth=1):
'''
Obtains the move from the Stockfish engine with the lowest ELO ratings
:param time: float - time limit for the engine
:param depth: int - maximum search depth
:return: str, None
'''
move = self.engine.play(self.board, chess.engine.Limit(time=time, depth=depth),
ponder=False, options={'uci_elo': 1350}).move
self.board.push(move)
return move.uci(), None
def _store_trace(self, move, trace_collector, policy=None, force_flush=False):
'''
Collects the trace onto trace_collector and once white and black has made the move,
append to the main trace list
:param move: str - UCI representation of the move
:param trace_collector: list(str) - string accumulator
:param policy: None/tuple(np.ndarray, np.ndarray) - policy output
:param force_flush: bool - if True, appends incomplete trace
:return: None
'''
trace_collector.append(str(self.board))
trace_collector.append(move)
if policy is None:
trace_collector.append('N/A\n\n\n\n\n\n\n')
trace_collector.append('N/A\n\n\n\n\n\n\n')
else:
trace_collector.append(str(np.around(policy[0], 2)).replace('[[', '')
.replace(' [ ', '').replace(' [', '').replace(']', ''))
trace_collector.append(str(np.around(policy[1], 2)).replace('[[', '')
.replace(' [ ', '').replace(' [', '').replace(']', ''))
if len(trace_collector) == 8: # two half-moves has been made
self.trace.append(PlayLoop.TRACE_FORMAT.format(*PlayLoop.TRACE_HEADER))
for b1, src1, tgt1, b2, src2, tgt2 in zip(trace_collector[0].split('\n'),
trace_collector[2].split('\n'),
trace_collector[3].split('\n'),
trace_collector[4].split('\n'),
trace_collector[6].split('\n'),
trace_collector[7].split('\n')):
self.trace.append(PlayLoop.TRACE_FORMAT.format(trace_collector[1], b1, src1, tgt1,
trace_collector[5], b2, src2, tgt2))
trace_collector[1] = ''
trace_collector[5] = ''
self.trace.append('\n')
trace_collector.clear()
elif force_flush:
self.trace.append(PlayLoop.TRACE_FORMAT.format(*PlayLoop.TRACE_HEADER))
for b1, src1, tgt1 in zip(trace_collector[0].split('\n'),
trace_collector[2].split('\n'),
trace_collector[3].split('\n')):
self.trace.append(PlayLoop.TRACE_FORMAT.format(trace_collector[1], b1, src1, tgt1,
'', '', '', ''))
trace_collector[1] = ''
self.trace.append('\n')
| [
"numpy.flip",
"inference.infer_action.get_action",
"chess.Move.from_uci",
"chess.engine.SimpleEngine.popen_uci",
"tkinter.filedialog.asksaveasfilename",
"chess.Board",
"chess.engine.Limit",
"parsing.math_encode.tensor_encode",
"numpy.around"
] | [((2322, 2335), 'chess.Board', 'chess.Board', ([], {}), '()\n', (2333, 2335), False, 'import chess\n'), ((5869, 5901), 'inference.infer_action.get_action', 'get_action', (['self.board', 'src', 'tgt'], {}), '(self.board, src, tgt)\n', (5879, 5901), False, 'from inference.infer_action import get_action\n'), ((4863, 4916), 'tkinter.filedialog.asksaveasfilename', 'asksaveasfilename', ([], {'filetypes': "[('Text file', '*.txt')]"}), "(filetypes=[('Text file', '*.txt')])\n", (4880, 4916), False, 'from tkinter.filedialog import asksaveasfilename\n'), ((5613, 5651), 'parsing.math_encode.tensor_encode', 'tensor_encode', (['self.board'], {'mirror': 'flip'}), '(self.board, mirror=flip)\n', (5626, 5651), False, 'from parsing.math_encode import tensor_encode, tensor_decode\n'), ((5713, 5736), 'numpy.flip', 'np.flip', (['src[0, ...]', '(0)'], {}), '(src[0, ...], 0)\n', (5720, 5736), True, 'import numpy as np\n'), ((5755, 5778), 'numpy.flip', 'np.flip', (['tgt[0, ...]', '(0)'], {}), '(tgt[0, ...], 0)\n', (5762, 5778), True, 'import numpy as np\n'), ((5926, 5951), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move'], {}), '(move)\n', (5945, 5951), False, 'import chess\n'), ((6309, 6340), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move_input'], {}), '(move_input)\n', (6328, 6340), False, 'import chess\n'), ((7014, 7056), 'chess.engine.Limit', 'chess.engine.Limit', ([], {'time': 'time', 'depth': 'depth'}), '(time=time, depth=depth)\n', (7032, 7056), False, 'import chess\n'), ((1370, 1418), 'chess.engine.SimpleEngine.popen_uci', 'chess.engine.SimpleEngine.popen_uci', (['engine_path'], {}), '(engine_path)\n', (1405, 1418), False, 'import chess\n'), ((8005, 8028), 'numpy.around', 'np.around', (['policy[0]', '(2)'], {}), '(policy[0], 2)\n', (8014, 8028), True, 'import numpy as np\n'), ((8178, 8201), 'numpy.around', 'np.around', (['policy[1]', '(2)'], {}), '(policy[1], 2)\n', (8187, 8201), True, 'import numpy as np\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit test for the streaming wordcount example with debug."""
# pytype: skip-file
import unittest
import mock
import pytest
import apache_beam as beam
from apache_beam.examples import streaming_wordcount_debugging
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# Protect against environments where the PubSub library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
# pylint: enable=wrong-import-order, wrong-import-position
@pytest.mark.examples_postcommit
class StreamingWordcountDebugging(unittest.TestCase):
@unittest.skipIf(pubsub is None, 'GCP dependencies are not installed')
@mock.patch('apache_beam.io.ReadFromPubSub')
@mock.patch('apache_beam.io.WriteToPubSub')
def test_streaming_wordcount_debugging(self, *unused_mocks):
def FakeReadFromPubSub(topic=None, subscription=None, values=None):
expected_topic = topic
expected_subscription = subscription
def _inner(topic=None, subscription=None):
assert topic == expected_topic
assert subscription == expected_subscription
return TestStream().add_elements(values)
return _inner
class AssertTransform(beam.PTransform):
def __init__(self, matcher):
self.matcher = matcher
def expand(self, pcoll):
assert_that(pcoll, self.matcher)
def FakeWriteToPubSub(topic=None, values=None):
expected_topic = topic
def _inner(topic=None, subscription=None):
assert topic == expected_topic
return AssertTransform(equal_to(values))
return _inner
input_topic = 'projects/fake-beam-test-project/topic/intopic'
input_values = [
'150', '151', '152', '153', '154', '210', '211', '212', '213', '214'
]
output_topic = 'projects/fake-beam-test-project/topic/outtopic'
output_values = [
'150: 1',
'151: 1',
'152: 1',
'153: 1',
'154: 1',
'210: 1',
'211: 1',
'212: 1',
'213: 1',
'214: 1'
]
beam.io.ReadFromPubSub = (
FakeReadFromPubSub(
topic=input_topic,
values=list(x.encode('utf-8') for x in input_values)))
beam.io.WriteToPubSub = (
FakeWriteToPubSub(
topic=output_topic,
values=list(x.encode('utf-8') for x in output_values)))
streaming_wordcount_debugging.run([
'--input_topic',
'projects/fake-beam-test-project/topic/intopic',
'--output_topic',
'projects/fake-beam-test-project/topic/outtopic'
],
save_main_session=False)
if __name__ == '__main__':
unittest.main()
| [
"mock.patch",
"apache_beam.testing.util.equal_to",
"unittest.skipIf",
"apache_beam.testing.test_stream.TestStream",
"apache_beam.examples.streaming_wordcount_debugging.run",
"apache_beam.testing.util.assert_that",
"unittest.main"
] | [((1516, 1585), 'unittest.skipIf', 'unittest.skipIf', (['(pubsub is None)', '"""GCP dependencies are not installed"""'], {}), "(pubsub is None, 'GCP dependencies are not installed')\n", (1531, 1585), False, 'import unittest\n'), ((1589, 1632), 'mock.patch', 'mock.patch', (['"""apache_beam.io.ReadFromPubSub"""'], {}), "('apache_beam.io.ReadFromPubSub')\n", (1599, 1632), False, 'import mock\n'), ((1636, 1678), 'mock.patch', 'mock.patch', (['"""apache_beam.io.WriteToPubSub"""'], {}), "('apache_beam.io.WriteToPubSub')\n", (1646, 1678), False, 'import mock\n'), ((3589, 3604), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3602, 3604), False, 'import unittest\n'), ((3287, 3489), 'apache_beam.examples.streaming_wordcount_debugging.run', 'streaming_wordcount_debugging.run', (["['--input_topic', 'projects/fake-beam-test-project/topic/intopic',\n '--output_topic', 'projects/fake-beam-test-project/topic/outtopic']"], {'save_main_session': '(False)'}), "(['--input_topic',\n 'projects/fake-beam-test-project/topic/intopic', '--output_topic',\n 'projects/fake-beam-test-project/topic/outtopic'], save_main_session=False)\n", (3320, 3489), False, 'from apache_beam.examples import streaming_wordcount_debugging\n'), ((2249, 2281), 'apache_beam.testing.util.assert_that', 'assert_that', (['pcoll', 'self.matcher'], {}), '(pcoll, self.matcher)\n', (2260, 2281), False, 'from apache_beam.testing.util import assert_that\n'), ((2484, 2500), 'apache_beam.testing.util.equal_to', 'equal_to', (['values'], {}), '(values)\n', (2492, 2500), False, 'from apache_beam.testing.util import equal_to\n'), ((2043, 2055), 'apache_beam.testing.test_stream.TestStream', 'TestStream', ([], {}), '()\n', (2053, 2055), False, 'from apache_beam.testing.test_stream import TestStream\n')] |
# -*- coding: utf-8 -*-
import pickle
from sklearn.ensemble import RandomForestClassifier
from base_shallow_classifier import BaseShallowClassifier
class RFClassifier(BaseShallowClassifier):
'''
Image classification using random forest classifier (RFC).
Can reach 87.82% accuracy on test set of FashionMNIST datasets
using the following parameters:
- n_estimators=160
- min_samples_split=2
Note: actual accuracy may vary based on intial seed.
'''
def __init__(self, load_data=True):
'''
Simply calls parent's constructor,
which in turn calls load_data method (if needed).
'''
super().__init__(load_data)
def get_algorithm(self):
'''
Returns the algorithm in use (which is RFC),
this method is used in cross_validation method.
'''
return RandomForestClassifier()
def train_model(self, save_path, max_obs=None,
n_estimators=10, min_samples_split=2):
'''
Trains the model on training set of FashionMNIST datasets,
using RFC algorithm. n_estimators and min_samples_split
can be set from parameters.
'''
if self.train_data is None or self.train_labels is None:
raise ValueError('Fashion MNIST datasets is not loaded')
last_train_index = max_obs if max_obs else self.train_data.shape[0]
train_data = self.train_data[:last_train_index]
train_labels = self.train_labels[:last_train_index]
self.model = RandomForestClassifier(n_estimators=n_estimators,
min_samples_split=min_samples_split)
self.model.fit(train_data, train_labels)
with open(save_path, 'wb') as f:
f.write(pickle.dumps(self.model))
| [
"pickle.dumps",
"sklearn.ensemble.RandomForestClassifier"
] | [((867, 891), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (889, 891), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1537, 1628), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'min_samples_split': 'min_samples_split'}), '(n_estimators=n_estimators, min_samples_split=\n min_samples_split)\n', (1559, 1628), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1747, 1771), 'pickle.dumps', 'pickle.dumps', (['self.model'], {}), '(self.model)\n', (1759, 1771), False, 'import pickle\n')] |
from oslo_log import log
from oslo_config import cfg
from report import storage
from pecan import hooks
LOG = log.getLogger(__name__)
class RPCHook(hooks.PecanHook):
def __init__(self, rcp_client):
self._rpc_client = rcp_client
def before(self, state):
state.request.rpc_client = self._rpc_client
class DBHook(hooks.PecanHook):
def __init__(self):
self.storage_connection = storage.get_connection_from_config(cfg.CONF)
if not self.storage_connection:
raise Exception("Api failed to start. "
"Failed to connect to database.")
def before(self, state):
state.request.storage_conn = self.storage_connection
| [
"report.storage.get_connection_from_config",
"oslo_log.log.getLogger"
] | [((111, 134), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (124, 134), False, 'from oslo_log import log\n'), ((417, 461), 'report.storage.get_connection_from_config', 'storage.get_connection_from_config', (['cfg.CONF'], {}), '(cfg.CONF)\n', (451, 461), False, 'from report import storage\n')] |
# Generated by Django 2.0.9 on 2018-11-25 11:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0011_auto_20181123_1446'),
]
operations = [
migrations.RenameField(
model_name='item',
old_name='amount',
new_name='_amount',
),
migrations.AddField(
model_name='itemtype',
name='default_amount',
field=models.SmallIntegerField(blank=True, null=True),
),
]
| [
"django.db.migrations.RenameField",
"django.db.models.SmallIntegerField"
] | [((236, 321), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""item"""', 'old_name': '"""amount"""', 'new_name': '"""_amount"""'}), "(model_name='item', old_name='amount', new_name='_amount'\n )\n", (258, 321), False, 'from django.db import migrations, models\n'), ((482, 529), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (506, 529), False, 'from django.db import migrations, models\n')] |
from weibo import WeiboClient
from weibo.watchyou import fetch_replies
for r in fetch_replies(): # fetch_replies所依赖的weibo全局变量是在watchyou模块中存在的, 函数无法访问到这个模块中的全局变量
print(r['text'])
| [
"weibo.watchyou.fetch_replies"
] | [((84, 99), 'weibo.watchyou.fetch_replies', 'fetch_replies', ([], {}), '()\n', (97, 99), False, 'from weibo.watchyou import fetch_replies\n')] |
# ============================================================================
# FILE: kind.py
# AUTHOR: <NAME> <Shougo.Matsu at g<EMAIL>>
# License: MIT license
# ============================================================================
import json
import typing
from pathlib import Path
from defx.action import ActionAttr
from defx.action import ActionTable
from defx.action import do_action
from defx.context import Context
from defx.defx import Defx
from defx.session import Session
from defx.util import Nvim
from defx.view import View
_action_table: typing.Dict[str, ActionTable] = {}
ACTION_FUNC = typing.Callable[[View, Defx, Context], None]
def action(name: str, attr: ActionAttr = ActionAttr.NONE
) -> typing.Callable[[ACTION_FUNC], ACTION_FUNC]:
def wrapper(func: ACTION_FUNC) -> ACTION_FUNC:
_action_table[name] = ActionTable(func=func, attr=attr)
def inner_wrapper(view: View, defx: Defx, context: Context) -> None:
return func(view, defx, context)
return inner_wrapper
return wrapper
class Base:
def __init__(self, vim: Nvim) -> None:
self.vim = vim
self.name = 'base'
def get_actions(self) -> typing.Dict[str, ActionTable]:
return _action_table
@action(name='add_session', attr=ActionAttr.NO_TAGETS)
def _add_session(view: View, defx: Defx, context: Context) -> None:
path = context.args[0] if context.args else defx._cwd
if path[-1] == '/':
# Remove the last slash
path = path[: -1]
opened_candidates = [] if context.args else list(defx._opened_candidates)
session: Session
if path in view._sessions:
old_session = view._sessions[path]
session = Session(
name=old_session.name, path=old_session.path,
opened_candidates=opened_candidates)
else:
name = Path(path).name
session = Session(
name=name, path=path,
opened_candidates=opened_candidates)
view.print_msg(f'session "{name}" is created')
view._sessions[session.path] = session
_save_session(view, defx, context)
@action(name='call', attr=ActionAttr.REDRAW)
def _call(view: View, defx: Defx, context: Context) -> None:
"""
Call the function.
"""
function = context.args[0] if context.args else None
if not function:
return
dict_context = context._asdict()
dict_context['cwd'] = defx._cwd
dict_context['targets'] = [
str(x['action__path']) for x in context.targets]
view._vim.call(function, dict_context)
@action(name='clear_select_all', attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _clear_select_all(view: View, defx: Defx, context: Context) -> None:
for candidate in [x for x in view._candidates
if x['_defx_index'] == defx._index]:
candidate['is_selected'] = False
@action(name='close_tree', attr=ActionAttr.TREE | ActionAttr.CURSOR_TARGET)
def _close_tree(view: View, defx: Defx, context: Context) -> None:
for target in context.targets:
if target['is_directory'] and target['is_opened_tree']:
view.close_tree(target['action__path'], defx._index)
else:
view.close_tree(target['action__path'].parent, defx._index)
view.search_file(target['action__path'].parent, defx._index)
@action(name='delete_session', attr=ActionAttr.NO_TAGETS)
def _delete_session(view: View, defx: Defx, context: Context) -> None:
if not context.args:
return
session_name = context.args[0]
if session_name not in view._sessions:
return
view._sessions.pop(session_name)
_save_session(view, defx, context)
@action(name='load_session', attr=ActionAttr.NO_TAGETS)
def _load_session(view: View, defx: Defx, context: Context) -> None:
session_file = Path(context.session_file)
if not context.session_file or not session_file.exists():
return
loaded_session = json.loads(session_file.read_text())
if 'sessions' not in loaded_session:
return
view._sessions = {}
for path, session in loaded_session['sessions'].items():
view._sessions[path] = Session(**session)
view._vim.current.buffer.vars['defx#_sessions'] = [
x._asdict() for x in view._sessions.values()
]
@action(name='multi')
def _multi(view: View, defx: Defx, context: Context) -> None:
for arg in context.args:
args: typing.List[str]
if isinstance(arg, list):
args = arg
else:
args = [arg]
do_action(view, defx, args[0], context._replace(args=args[1:]))
@action(name='check_redraw', attr=ActionAttr.NO_TAGETS)
def _nop(view: View, defx: Defx, context: Context) -> None:
pass
@action(name='open_tree', attr=ActionAttr.TREE | ActionAttr.CURSOR_TARGET)
def _open_tree(view: View, defx: Defx, context: Context) -> None:
for target in [x for x in context.targets if x['is_directory']]:
view.open_tree(target['action__path'], defx._index, 0)
@action(name='open_tree_recursive',
attr=ActionAttr.TREE | ActionAttr.CURSOR_TARGET)
def _open_tree_recursive(view: View, defx: Defx, context: Context) -> None:
level = int(context.args[0]) if context.args else 20
for target in [x for x in context.targets if x['is_directory']]:
view.open_tree(target['action__path'], defx._index, level)
@action(name='open_or_close_tree',
attr=ActionAttr.TREE | ActionAttr.CURSOR_TARGET)
def _open_or_close_tree(view: View, defx: Defx, context: Context) -> None:
for target in context.targets:
if not target['is_directory'] or target['is_opened_tree']:
_close_tree(view, defx, context._replace(targets=[target]))
else:
_open_tree(view, defx, context._replace(targets=[target]))
@action(name='print')
def _print(view: View, defx: Defx, context: Context) -> None:
for target in context.targets:
view.print_msg(str(target['action__path']))
@action(name='quit', attr=ActionAttr.NO_TAGETS)
def _quit(view: View, defx: Defx, context: Context) -> None:
view.quit()
@action(name='redraw', attr=ActionAttr.NO_TAGETS)
def _redraw(view: View, defx: Defx, context: Context) -> None:
view.redraw(True)
@action(name='repeat', attr=ActionAttr.MARK)
def _repeat(view: View, defx: Defx, context: Context) -> None:
do_action(view, defx, view._prev_action, context)
@action(name='save_session', attr=ActionAttr.NO_TAGETS)
def _save_session(view: View, defx: Defx, context: Context) -> None:
view._vim.current.buffer.vars['defx#_sessions'] = [
x._asdict() for x in view._sessions.values()
]
if not context.session_file:
return
session_file = Path(context.session_file)
session_file.write_text(json.dumps({
'version': view._session_version,
'sessions': {x: y._asdict() for x, y in view._sessions.items()}
}))
@action(name='search', attr=ActionAttr.NO_TAGETS)
def _search(view: View, defx: Defx, context: Context) -> None:
if not context.args or not context.args[0]:
return
search_path = context.args[0]
path = Path(search_path)
parents: typing.List[Path] = []
while view.get_candidate_pos(
path, defx._index) < 0 and path.parent != path:
path = path.parent
parents.append(path)
for parent in reversed(parents):
view.open_tree(parent, defx._index, 0)
view.update_opened_candidates()
view.redraw()
view.search_file(Path(search_path), defx._index)
@action(name='toggle_columns', attr=ActionAttr.REDRAW)
def _toggle_columns(view: View, defx: Defx, context: Context) -> None:
"""
Toggle the current columns.
"""
columns = (context.args[0] if context.args else '').split(':')
if not columns:
return
current_columns = [x.name for x in view._columns]
if columns == current_columns:
# Use default columns
columns = context.columns.split(':')
view._init_columns(columns)
@action(name='toggle_ignored_files', attr=ActionAttr.REDRAW)
def _toggle_ignored_files(view: View, defx: Defx, context: Context) -> None:
defx._enabled_ignored_files = not defx._enabled_ignored_files
@action(name='toggle_select', attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _toggle_select(view: View, defx: Defx, context: Context) -> None:
candidate = view.get_cursor_candidate(context.cursor)
if not candidate:
return
candidate['is_selected'] = not candidate['is_selected']
@action(name='toggle_select_all', attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _toggle_select_all(view: View, defx: Defx, context: Context) -> None:
for candidate in [x for x in view._candidates
if not x['is_root'] and
x['_defx_index'] == defx._index]:
candidate['is_selected'] = not candidate['is_selected']
@action(name='toggle_select_visual',
attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _toggle_select_visual(view: View, defx: Defx, context: Context) -> None:
if context.visual_start <= 0 or context.visual_end <= 0:
return
start = context.visual_start - 1
end = min([context.visual_end, len(view._candidates)])
for candidate in [x for x in view._candidates[start:end]
if not x['is_root'] and
x['_defx_index'] == defx._index]:
candidate['is_selected'] = not candidate['is_selected']
@action(name='toggle_sort', attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _toggle_sort(view: View, defx: Defx, context: Context) -> None:
"""
Toggle the current sort method.
"""
sort = context.args[0] if context.args else ''
if sort == defx._sort_method:
# Use default sort method
defx._sort_method = context.sort
else:
defx._sort_method = sort
@action(name='yank_path')
def _yank_path(view: View, defx: Defx, context: Context) -> None:
yank = '\n'.join([str(x['action__path']) for x in context.targets])
view._vim.call('setreg', '"', yank)
if (view._vim.call('has', 'clipboard') or
view._vim.call('has', 'xterm_clipboard')):
view._vim.call('setreg', '+', yank)
view.print_msg('Yanked:\n' + yank)
| [
"defx.session.Session",
"defx.action.ActionTable",
"defx.action.do_action",
"pathlib.Path"
] | [((3830, 3856), 'pathlib.Path', 'Path', (['context.session_file'], {}), '(context.session_file)\n', (3834, 3856), False, 'from pathlib import Path\n'), ((6360, 6409), 'defx.action.do_action', 'do_action', (['view', 'defx', 'view._prev_action', 'context'], {}), '(view, defx, view._prev_action, context)\n', (6369, 6409), False, 'from defx.action import do_action\n'), ((6721, 6747), 'pathlib.Path', 'Path', (['context.session_file'], {}), '(context.session_file)\n', (6725, 6747), False, 'from pathlib import Path\n'), ((7135, 7152), 'pathlib.Path', 'Path', (['search_path'], {}), '(search_path)\n', (7139, 7152), False, 'from pathlib import Path\n'), ((858, 891), 'defx.action.ActionTable', 'ActionTable', ([], {'func': 'func', 'attr': 'attr'}), '(func=func, attr=attr)\n', (869, 891), False, 'from defx.action import ActionTable\n'), ((1719, 1814), 'defx.session.Session', 'Session', ([], {'name': 'old_session.name', 'path': 'old_session.path', 'opened_candidates': 'opened_candidates'}), '(name=old_session.name, path=old_session.path, opened_candidates=\n opened_candidates)\n', (1726, 1814), False, 'from defx.session import Session\n'), ((1894, 1960), 'defx.session.Session', 'Session', ([], {'name': 'name', 'path': 'path', 'opened_candidates': 'opened_candidates'}), '(name=name, path=path, opened_candidates=opened_candidates)\n', (1901, 1960), False, 'from defx.session import Session\n'), ((4166, 4184), 'defx.session.Session', 'Session', ([], {}), '(**session)\n', (4173, 4184), False, 'from defx.session import Session\n'), ((7500, 7517), 'pathlib.Path', 'Path', (['search_path'], {}), '(search_path)\n', (7504, 7517), False, 'from pathlib import Path\n'), ((1860, 1870), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1864, 1870), False, 'from pathlib import Path\n')] |
import os.path
from typing import List, Tuple
from mapfmclient import MarkedLocation, Problem
class MapParser:
def __init__(self, root_folder: str):
self.root_folder = root_folder
def parse_map(self, name: str) -> Problem:
with open(os.path.join(self.root_folder, name)) as file:
# Read map width
width_line = file.readline()
width = int(width_line.split(' ')[1])
# Read map height
height_line = file.readline()
height = int(height_line.split(' ')[1])
# Read map
grid = []
for _ in range(height):
grid.append([1 if char == '@' else 0 for char in file.readline()])
# Read number of agents
num_agents = int(file.readline())
starts: List[MarkedLocation] = []
# Read starting positions
for _ in range(num_agents):
line = file.readline().split(' ')
starts.append(MarkedLocation(int(line[2]), int(line[0]), int(line[1])))
# Empty line
file.readline()
# Read goal positions
goals: List[MarkedLocation] = []
for _ in range(num_agents):
line = file.readline().split(' ')
goals.append(MarkedLocation(int(line[2]), int(line[0]), int(line[1])))
return Problem(grid, width, height, starts, goals)
def parse_batch(self, folder: str) -> List[Tuple[str, Problem]]:
paths = os.listdir(f'{self.root_folder}/{folder}')
problems = []
for file in paths:
problems.append((str(file), self.parse_map(f'{folder}/{file}')))
return problems
| [
"mapfmclient.Problem"
] | [((1397, 1440), 'mapfmclient.Problem', 'Problem', (['grid', 'width', 'height', 'starts', 'goals'], {}), '(grid, width, height, starts, goals)\n', (1404, 1440), False, 'from mapfmclient import MarkedLocation, Problem\n')] |
#!/usr/bin/python
# Copyright (c) 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_api_key
short_description: Upload and delete API signing key of a user in OCI
description:
- This module allows the user upload and delete API signing keys of a user in OCI. A PEM-format RSA credential for
securing requests to the Oracle Cloud Infrastructure REST API. Also known as an API signing key. Specifically,
this is the public key from the key pair. The private key remains with the user calling the API. For information
about generating a key pair in the required PEM format, see Required Keys and OCIDs.
Note that this is not the SSH key for accessing compute instances.
Each user can have a maximum of three API signing keys.
For more information about user credentials, see
U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
version_added: "2.5"
options:
user_id:
description: The OCID of the user whose API signing key needs to be created or deleted.
required: true
api_signing_key:
description: The public key. Must be an RSA key in PEM format. Required when the API signing key is
uploaded with I(state=present)
required: false
aliases: ['key']
api_key_id:
description: The API signing key's id. The Id must be of the format TENANCY_OCID/USER_OCID/KEY_FINGERPRINT.
required: false
aliases: ['id']
state:
description: The state of the api signing key that must be asserted to. When I(state=present), and the
api key doesn't exist, the api key is created with the provided C(api_signing_key).
When I(state=absent), the api signing key corresponding to the provided C(fingerprint) is deleted.
required: false
default: "present"
choices: ['present', 'absent']
author: "<NAME> (@sivakumart)"
extends_documentation_fragment: [ oracle, oracle_creatable_resource, oracle_wait_options ]
"""
EXAMPLES = """
- name: Upload a new api signing key for the specified user
oci_api_key:
user_id: "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
key: "-----BEGIN PUBLIC KEY-----cmdnMIIBIjANBgkqhkiG9w0BAQEFA......mwIDAQAB-----END PUBLIC KEY-----"
- name: Delete an API signing key for the specified user
oci_api_key:
user_id: "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
"id": "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx/ocid1.user.oc1..xxxxxEXAMPLExxxxx/08:07:fc00:db20:35b:7399::5:da"
state: "absent"
"""
RETURN = """
oci_api_key:
description: Details of the API signing key
returned: On success
type: dict
sample: {
"fingerprint": "08:07:a6:7d:06:b4:73:91:e9:2c:da:42:c8:cb:df:02",
"inactive_status": null,
"key_id": "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx/ocid1.user.oc1..xxxxxEXAMPLExxxxx/08:07:a6:7d:06:b4:<KEY>",
"key_value": "-----BEGIN PUBLIC KEY-----...urt/fN8jNz2nZwIDAQAB-----END PUBLIC KEY-----",
"lifecycle_state": "ACTIVE",
"time_created": "2018-01-08T09:33:59.705000+00:00",
"user_id": "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
import oci
from oci.identity.identity_client import IdentityClient
from oci.identity.models import CreateApiKeyDetails
from oci.util import to_dict
from oci.exceptions import ServiceError, MaximumWaitTimeExceeded
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = None
RESOURCE_NAME = "api_key"
def set_logger(provided_logger):
global logger
logger = provided_logger
def get_logger():
return logger
def _get_api_key_from_id(identity_client, user_id, api_key_id, module):
try:
resp = oci_utils.call_with_backoff(
identity_client.list_api_keys, user_id=user_id
)
if resp is not None:
for api_key in resp.data:
if api_key.key_id == api_key_id:
return api_key
return None
except ServiceError as ex:
module.fail_json(msg=ex.message)
def delete_api_key(identity_client, user_id, id, module):
result = {}
changed = False
try:
api_key = _get_api_key_from_id(identity_client, user_id, id, module)
oci_utils.call_with_backoff(
identity_client.delete_api_key,
user_id=user_id,
fingerprint=api_key.fingerprint,
)
get_logger().info("Deleted api password %s", id)
changed = True
# The API key is not returned by list api passwords after it
# is deleted, and so we currently reuse the earlier api password object and mark
# its lifecycle state as DELETED.
# Note: This current approach has problems around idempotency.
# We also don't wait, as there is no state transition that we need to wait for.
api_key.lifecycle_state = "DELETED"
result[RESOURCE_NAME] = to_dict(api_key)
except ServiceError as ex:
module.fail_json(msg=ex.message)
result["changed"] = changed
return result
def _is_api_key_active(api_keys, api_key_id):
result = [
api_key
for api_key in api_keys
if api_key.key_id == api_key_id and api_key.lifecycle_state == "ACTIVE"
]
return len(result) == 1
def create_api_key(identity_client, user_id, key, module):
try:
cakd = CreateApiKeyDetails()
cakd.key = key
result = oci_utils.create_resource(
resource_type=RESOURCE_NAME,
create_fn=identity_client.upload_api_key,
kwargs_create={"user_id": user_id, "create_api_key_details": cakd},
module=module,
)
resource = result[RESOURCE_NAME]
api_key_id = resource["key_id"]
get_logger().info("Created API signing key %s", to_dict(resource))
# API keys don't have a get<resource> and so we can't use oci_utils.create_and_wait
# The following logic manually checks if the API key in `list_api_keys` has reached the desired ACTIVE state
response = identity_client.list_api_keys(user_id)
# wait until the created API Key reaches Active state
oci.wait_until(
identity_client,
response,
evaluate_response=lambda resp: _is_api_key_active(resp.data, api_key_id),
)
result[RESOURCE_NAME] = to_dict(
_get_api_key_from_id(identity_client, user_id, api_key_id, module)
)
return result
except ServiceError as ex:
module.fail_json(msg=ex.message)
except MaximumWaitTimeExceeded as mwte:
module.fail_json(msg=str(mwte))
def main():
set_logger(oci_utils.get_logger("oci_api_key"))
module_args = oci_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
user_id=dict(type="str", required=True),
api_key_id=dict(type="str", required=False, aliases=["id"]),
api_signing_key=dict(type="str", required=False, aliases=["key"]),
state=dict(
type="str",
required=False,
default="present",
choices=["present", "absent"],
),
)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
required_if=[("state", "absent", ["api_key_id"])],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
identity_client = oci_utils.create_service_client(module, IdentityClient)
state = module.params["state"]
result = dict(changed=False)
user_id = module.params.get("user_id", None)
public_key = module.params.get("api_signing_key", None)
api_key_id = module.params.get("api_key_id", None)
if api_key_id is not None:
api_key = _get_api_key_from_id(identity_client, user_id, api_key_id, module)
if state == "absent":
get_logger().debug(
"Delete api password %s for user %s requested", api_key_id, user_id
)
if api_key is not None:
get_logger().debug("Deleting %s", api_key.key_id)
result = delete_api_key(identity_client, user_id, api_key_id, module)
else:
get_logger().debug("API Signing Key %s already deleted.", api_key_id)
elif state == "present":
module.fail_json(msg="API signing key cannot be updated.")
else:
result = oci_utils.check_and_create_resource(
resource_type=RESOURCE_NAME,
create_fn=create_api_key,
kwargs_create={
"identity_client": identity_client,
"user_id": user_id,
"key": public_key,
"module": module,
},
list_fn=identity_client.list_api_keys,
kwargs_list={"user_id": user_id},
module=module,
model=CreateApiKeyDetails(),
create_model_attr_to_get_model_mapping={"key": "key_value"},
)
module.exit_json(**result)
if __name__ == "__main__":
main()
| [
"ansible.module_utils.basic.AnsibleModule",
"ansible.module_utils.oracle.oci_utils.create_service_client",
"ansible.module_utils.oracle.oci_utils.create_resource",
"oci.identity.models.CreateApiKeyDetails",
"ansible.module_utils.oracle.oci_utils.call_with_backoff",
"oci.util.to_dict",
"ansible.module_ut... | [((7336, 7407), 'ansible.module_utils.oracle.oci_utils.get_common_arg_spec', 'oci_utils.get_common_arg_spec', ([], {'supports_create': '(True)', 'supports_wait': '(True)'}), '(supports_create=True, supports_wait=True)\n', (7365, 7407), False, 'from ansible.module_utils.oracle import oci_utils\n'), ((7876, 7998), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'module_args', 'supports_check_mode': '(False)', 'required_if': "[('state', 'absent', ['api_key_id'])]"}), "(argument_spec=module_args, supports_check_mode=False,\n required_if=[('state', 'absent', ['api_key_id'])])\n", (7889, 7998), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((8150, 8205), 'ansible.module_utils.oracle.oci_utils.create_service_client', 'oci_utils.create_service_client', (['module', 'IdentityClient'], {}), '(module, IdentityClient)\n', (8181, 8205), False, 'from ansible.module_utils.oracle import oci_utils\n'), ((4330, 4405), 'ansible.module_utils.oracle.oci_utils.call_with_backoff', 'oci_utils.call_with_backoff', (['identity_client.list_api_keys'], {'user_id': 'user_id'}), '(identity_client.list_api_keys, user_id=user_id)\n', (4357, 4405), False, 'from ansible.module_utils.oracle import oci_utils\n'), ((4861, 4974), 'ansible.module_utils.oracle.oci_utils.call_with_backoff', 'oci_utils.call_with_backoff', (['identity_client.delete_api_key'], {'user_id': 'user_id', 'fingerprint': 'api_key.fingerprint'}), '(identity_client.delete_api_key, user_id=user_id,\n fingerprint=api_key.fingerprint)\n', (4888, 4974), False, 'from ansible.module_utils.oracle import oci_utils\n'), ((5534, 5550), 'oci.util.to_dict', 'to_dict', (['api_key'], {}), '(api_key)\n', (5541, 5550), False, 'from oci.util import to_dict\n'), ((5984, 6005), 'oci.identity.models.CreateApiKeyDetails', 'CreateApiKeyDetails', ([], {}), '()\n', (6003, 6005), False, 'from oci.identity.models import CreateApiKeyDetails\n'), ((6046, 6234), 'ansible.module_utils.oracle.oci_utils.create_resource', 'oci_utils.create_resource', ([], {'resource_type': 'RESOURCE_NAME', 'create_fn': 'identity_client.upload_api_key', 'kwargs_create': "{'user_id': user_id, 'create_api_key_details': cakd}", 'module': 'module'}), "(resource_type=RESOURCE_NAME, create_fn=\n identity_client.upload_api_key, kwargs_create={'user_id': user_id,\n 'create_api_key_details': cakd}, module=module)\n", (6071, 6234), False, 'from ansible.module_utils.oracle import oci_utils\n'), ((7280, 7315), 'ansible.module_utils.oracle.oci_utils.get_logger', 'oci_utils.get_logger', (['"""oci_api_key"""'], {}), "('oci_api_key')\n", (7300, 7315), False, 'from ansible.module_utils.oracle import oci_utils\n'), ((6422, 6439), 'oci.util.to_dict', 'to_dict', (['resource'], {}), '(resource)\n', (6429, 6439), False, 'from oci.util import to_dict\n'), ((9599, 9620), 'oci.identity.models.CreateApiKeyDetails', 'CreateApiKeyDetails', ([], {}), '()\n', (9618, 9620), False, 'from oci.identity.models import CreateApiKeyDetails\n')] |
import pytest
from dlms_cosem import enumerations, state
from dlms_cosem.exceptions import LocalDlmsProtocolError
from dlms_cosem.protocol import acse
from dlms_cosem.protocol.acse import UserInformation
from dlms_cosem.protocol.xdlms import Conformance, InitiateRequestApdu
def test_non_aarq_on_initial_raises_protocol_error():
s = state.DlmsConnectionState()
with pytest.raises(LocalDlmsProtocolError):
s.process_event(acse.ReleaseResponseApdu())
def test_aarq_makes_dlms_waiting_for_aare():
s = state.DlmsConnectionState()
s.process_event(
acse.ApplicationAssociationRequestApdu(
user_information=UserInformation(
InitiateRequestApdu(proposed_conformance=Conformance())
)
)
)
assert s.current_state == state.AWAITING_ASSOCIATION_RESPONSE
def test_aare_sets_ready_on_waiting_aare_response():
s = state.DlmsConnectionState(current_state=state.AWAITING_ASSOCIATION_RESPONSE)
s.process_event(
acse.ApplicationAssociationResponseApdu(
enumerations.AssociationResult.ACCEPTED,
result_source_diagnostics=enumerations.AcseServiceUserDiagnostics.NULL,
)
)
assert s.current_state == state.READY
| [
"dlms_cosem.protocol.acse.ApplicationAssociationResponseApdu",
"dlms_cosem.state.DlmsConnectionState",
"dlms_cosem.protocol.xdlms.Conformance",
"dlms_cosem.protocol.acse.ReleaseResponseApdu",
"pytest.raises"
] | [((340, 367), 'dlms_cosem.state.DlmsConnectionState', 'state.DlmsConnectionState', ([], {}), '()\n', (365, 367), False, 'from dlms_cosem import enumerations, state\n'), ((524, 551), 'dlms_cosem.state.DlmsConnectionState', 'state.DlmsConnectionState', ([], {}), '()\n', (549, 551), False, 'from dlms_cosem import enumerations, state\n'), ((898, 974), 'dlms_cosem.state.DlmsConnectionState', 'state.DlmsConnectionState', ([], {'current_state': 'state.AWAITING_ASSOCIATION_RESPONSE'}), '(current_state=state.AWAITING_ASSOCIATION_RESPONSE)\n', (923, 974), False, 'from dlms_cosem import enumerations, state\n'), ((378, 415), 'pytest.raises', 'pytest.raises', (['LocalDlmsProtocolError'], {}), '(LocalDlmsProtocolError)\n', (391, 415), False, 'import pytest\n'), ((1004, 1166), 'dlms_cosem.protocol.acse.ApplicationAssociationResponseApdu', 'acse.ApplicationAssociationResponseApdu', (['enumerations.AssociationResult.ACCEPTED'], {'result_source_diagnostics': 'enumerations.AcseServiceUserDiagnostics.NULL'}), '(enumerations.AssociationResult.\n ACCEPTED, result_source_diagnostics=enumerations.\n AcseServiceUserDiagnostics.NULL)\n', (1043, 1166), False, 'from dlms_cosem.protocol import acse\n'), ((441, 467), 'dlms_cosem.protocol.acse.ReleaseResponseApdu', 'acse.ReleaseResponseApdu', ([], {}), '()\n', (465, 467), False, 'from dlms_cosem.protocol import acse\n'), ((724, 737), 'dlms_cosem.protocol.xdlms.Conformance', 'Conformance', ([], {}), '()\n', (735, 737), False, 'from dlms_cosem.protocol.xdlms import Conformance, InitiateRequestApdu\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Optional
from epicteller.core import redis
from epicteller.core.model.credential import Credential
class CredentialDAO:
r = redis.pool
@classmethod
async def set_access_credential(cls, credential: Credential):
await cls.r.pool.set(f'access_token:{credential.token}', credential.json(), expire=credential.ttl)
@classmethod
async def revoke_access_credential(cls, token: str):
await cls.r.pool.expire(f'access_token:{token}', 10)
@classmethod
async def set_email_validate_token(cls, action: str, token: str, email: str):
await cls.r.pool.set(f'email_validate:{action}:{token}', email, expire=600)
@classmethod
async def get_email_validate_token(cls, action: str, token: str) -> Optional[str]:
email = await cls.r.pool.get(f'email_validate:{action}:{token}')
if not email:
return
return email.decode('utf8')
@classmethod
async def get_access_credential(cls, token: str) -> Optional[Credential]:
data = await cls.r.pool.get(f'access_token:{token}')
if not data:
return
return Credential.parse_raw(data)
| [
"epicteller.core.model.credential.Credential.parse_raw"
] | [((1185, 1211), 'epicteller.core.model.credential.Credential.parse_raw', 'Credential.parse_raw', (['data'], {}), '(data)\n', (1205, 1211), False, 'from epicteller.core.model.credential import Credential\n')] |
from common import *
from model import vocab
option = dict(edim=256, epochs=1.5, maxgrad=1., learningrate=1e-3, sdt_decay_step=1, batchsize=8, vocabsize=vocab, fp16=2, saveInterval=10, logInterval=.4)
option['loss'] = lambda opt, model, y, out, *_, rewards=[]: F.cross_entropy(out.transpose(-1, -2), y, reduction='none')
option['criterion'] = lambda y, out, mask, *_: (out[:,:,1:vocab].max(-1)[1] + 1).ne(y).float() * mask.float()
option['startEnv'] = lambda x, y, l, *args: (x, y, l, *args)
option['stepEnv'] = lambda i, pred, l, *args: (False, 1., None, None) # done episode, fake reward, Null next input, Null length, Null args
option['cumOut'] = False # True to keep trajectory
option['devices'] = [0] if torch.cuda.is_available() else [] # list of GPUs
option['init_method'] = 'file:///tmp/sharedfile' # initial configuration for multiple-GPU training
try:
from qhoptim.pyt import QHAdam
option['newOptimizer'] = lambda opt, params, _: QHAdam(params, lr=opt.learningrate, nus=(.7, .8), betas=(0.995, 0.999))
except ImportError: pass
| [
"qhoptim.pyt.QHAdam"
] | [((949, 1022), 'qhoptim.pyt.QHAdam', 'QHAdam', (['params'], {'lr': 'opt.learningrate', 'nus': '(0.7, 0.8)', 'betas': '(0.995, 0.999)'}), '(params, lr=opt.learningrate, nus=(0.7, 0.8), betas=(0.995, 0.999))\n', (955, 1022), False, 'from qhoptim.pyt import QHAdam\n')] |
import bpy
from aiohttp import web
import numpy as np
from mathutils import Matrix, Vector
import asyncio
from cinebot_mini_render_server.blender_timer_executor import EXECUTOR
routes = web.RouteTableDef()
def delete_animation_helper(obj):
if not obj.animation_data:
return False
if not obj.animation_data.action:
return False
if not obj.animation_data.action.fcurves:
return False
action = obj.animation_data.action
remove_types = ["location", "scale", "rotation"]
fcurves = [fc for fc in action.fcurves
for type in remove_types
if fc.data_path.startswith(type)]
while fcurves:
fc = fcurves.pop()
action.fcurves.remove(fc)
return True
def handle_object_animation_get_helper(obj_name):
scene = bpy.context.scene
obj = bpy.data.objects[obj_name]
fc = obj.animation_data.action.fcurves[0]
start, end = fc.range()
transforms = []
for t in range(int(start), int(end)):
scene.frame_set(t)
matrix_world = np.array(obj.matrix_world)
tf_data = {
"frame_number": t,
"matrix_world": matrix_world.tolist()
}
transforms.append(tf_data)
return transforms
@routes.get('/api/object/{obj_name}/animation')
async def handle_object_animation_get(request):
obj_name = request.match_info.get('obj_name', "None")
if obj_name not in bpy.data.objects:
raise web.HTTPBadRequest()
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(EXECUTOR,
handle_object_animation_get_helper, obj_name)
data = {
"result": result,
"url": '/api/object/{}/animation'.format(obj_name),
"method": "GET"
}
return web.json_response(data)
def handle_object_animation_put_helper(input_data, obj_name):
scene = bpy.context.scene
obj = bpy.data.objects[obj_name]
print("before delete")
delete_animation_helper(obj)
print("after delete")
if not obj.animation_data:
obj.animation_data_create()
if not obj.animation_data.action:
obj.animation_data.action = bpy.data.actions.new(name=obj_name + "_action")
f_curves_loc = [obj.animation_data.action.fcurves.new(data_path="location", index=i) for i in range(3)]
f_curves_rot = [obj.animation_data.action.fcurves.new(data_path="rotation_euler", index=i) for i in range(3)]
[x.keyframe_points.add(len(input_data["transforms"])) for x in f_curves_loc]
[x.keyframe_points.add(len(input_data["transforms"])) for x in f_curves_rot]
for i, frame in enumerate(input_data["transforms"]):
frame_number = frame["frame_number"]
location = None
rotation_euler = None
if "matrix_world" in frame:
matrix_world = frame["matrix_world"]
m = Matrix(matrix_world)
location = m.to_translation()
rotation_euler = m.to_euler()
elif "location" in frame and "rotation_euler" in frame:
location = frame["location"]
rotation_euler = frame["rotation_euler"]
else:
return False
for j in range(3):
f_curves_loc[j].keyframe_points[i].co = [float(frame_number), location[j]]
f_curves_rot[j].keyframe_points[i].co = [float(frame_number), rotation_euler[j]]
return True
@routes.put('/api/object/{obj_name}/animation')
async def handle_object_animation_put(request):
input_data = await request.json()
obj_name = request.match_info.get('obj_name', "None")
if obj_name not in bpy.data.objects:
raise web.HTTPBadRequest()
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(EXECUTOR,
handle_object_animation_put_helper, input_data, obj_name)
data = {
"result": "SUCCESS" if result else "FAILED",
"url": '/api/object/{}/animation'.format(obj_name),
"method": "PUT"
}
return web.json_response(data=data)
def handle_object_animation_delete_helper(obj_name):
scene = bpy.context.scene
obj = bpy.data.objects[obj_name]
result = delete_animation_helper(obj)
return result
@routes.delete('/api/object/{obj_name}/animation')
async def handle_object_animation_delete(request):
obj_name = request.match_info.get('obj_name', "None")
if obj_name not in bpy.data.objects:
raise web.HTTPBadRequest()
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(EXECUTOR,
handle_object_animation_delete_helper, obj_name)
data = {
"result": "SUCCESS" if result else "FAILED",
"url": '/api/object/{}/animation'.format(obj_name),
"method": "DELETE"
}
return web.json_response(data=data)
| [
"numpy.array",
"aiohttp.web.RouteTableDef",
"aiohttp.web.json_response",
"mathutils.Matrix",
"aiohttp.web.HTTPBadRequest",
"asyncio.get_event_loop",
"bpy.data.actions.new"
] | [((187, 206), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (204, 206), False, 'from aiohttp import web\n'), ((1489, 1513), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1511, 1513), False, 'import asyncio\n'), ((1791, 1814), 'aiohttp.web.json_response', 'web.json_response', (['data'], {}), '(data)\n', (1808, 1814), False, 'from aiohttp import web\n'), ((3685, 3709), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3707, 3709), False, 'import asyncio\n'), ((3991, 4019), 'aiohttp.web.json_response', 'web.json_response', ([], {'data': 'data'}), '(data=data)\n', (4008, 4019), False, 'from aiohttp import web\n'), ((4452, 4476), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4474, 4476), False, 'import asyncio\n'), ((4752, 4780), 'aiohttp.web.json_response', 'web.json_response', ([], {'data': 'data'}), '(data=data)\n', (4769, 4780), False, 'from aiohttp import web\n'), ((1049, 1075), 'numpy.array', 'np.array', (['obj.matrix_world'], {}), '(obj.matrix_world)\n', (1057, 1075), True, 'import numpy as np\n'), ((1456, 1476), 'aiohttp.web.HTTPBadRequest', 'web.HTTPBadRequest', ([], {}), '()\n', (1474, 1476), False, 'from aiohttp import web\n'), ((2176, 2223), 'bpy.data.actions.new', 'bpy.data.actions.new', ([], {'name': "(obj_name + '_action')"}), "(name=obj_name + '_action')\n", (2196, 2223), False, 'import bpy\n'), ((3652, 3672), 'aiohttp.web.HTTPBadRequest', 'web.HTTPBadRequest', ([], {}), '()\n', (3670, 3672), False, 'from aiohttp import web\n'), ((4419, 4439), 'aiohttp.web.HTTPBadRequest', 'web.HTTPBadRequest', ([], {}), '()\n', (4437, 4439), False, 'from aiohttp import web\n'), ((2875, 2895), 'mathutils.Matrix', 'Matrix', (['matrix_world'], {}), '(matrix_world)\n', (2881, 2895), False, 'from mathutils import Matrix, Vector\n')] |
from copy import deepcopy
from numba import jit,njit
import numpy as np
import pymctdh.opfactory as opfactory
from pymctdh.cy.sparsemat import CSRmat#,matvec
@njit(fastmath=True)
def matvec(nrows,IA,JA,data,vec,outvec):
"""
"""
d_ind = 0
for i in range(nrows):
ncol = IA[i+1]-IA[i]
for j in range(ncol):
col_ind = JA[d_ind]
outvec[i] = outvec[i] + data[d_ind]*vec[col_ind]
d_ind += 1
return outvec
def matadd(nrows,op1,a,op2,b):
"""
"""
if op1 is None:
opout = deepcopy(op2)
opout.data *= b
else:
data = []
JA = []
IA = [0]
ind1 = 0
ind2 = 0
for i in range(nrows):
op1_col = op1.JA[op1.IA[i]:op1.IA[i+1]]
op2_col = op2.JA[op2.IA[i]:op2.IA[i+1]]
inds = np.union1d(op1_col,op2_col)
IA.append( IA[i]+len(inds) )
for ind in inds:
JA.append( ind )
dat = 0.0
if ind in op1_col:
dat += a*op1.data[ind1]
ind1 +=1
if ind in op2_col:
dat += b*op2.data[ind2]
ind2 +=1
data.append( dat )
data = np.array(data)
IA = np.array(IA, dtype=np.intc)
JA = np.array(JA, dtype=np.intc)
opout = CSRmat(data, IA, JA)
return opout
#@njit(fastmath=True)
def kron(nrows1,IA1,JA1,data1,nrows2,IA2,JA2,data2):
"""
"""
data = []
JA = []
IA = [0]
d_ind1 = 0
for i in range(nrows1):
ncol1 = IA1[i+1]-IA1[i]
for j in range(ncol1):
col_ind1 = JA1[d_ind1]
d_ind2 = 0
for k in range(nrows2):
ncol2 = IA2[i+1]-IA2[i]
IA.append( IA[-1] + ncol2 )
for l in range(ncol2):
data.append( data1[d_ind1]*data2[d_ind2] )
JA.append( JA1[d_ind1]*nrows2 + JA2[d_ind2] )
d_ind2 += 1
d_ind += 1
return CSRmat(np.array(data), np.array(IA, dtype=int), np.array(JA, dtype=int))
class PBasis:
"""
"""
def __init__(self, args, sparse=False):
"""
"""
self.params = {}
self.params['basis'] = args[0].lower()
self.sparse = sparse
# set up parameters for basis
if self.params['basis'] == 'ho':
self.params['npbf'] = args[1]
self.params['mass'] = args[2]
self.params['omega'] = args[3]
if len(args) == 5:
self.combined = args[4]
else:
self.combined = False
if self.combined:
self.npbfs = 1
for n in self.params['npbf']:
self.npbfs *= n
self.make_ops = opfactory.make_ho_ops_combined
if not isinstance(self.params['mass'], list):
mlist = [args[2] for i in range(len(args[1]))]
self.params['mass'] = mlist
if not isinstance(self.params['omega'], list):
omlist = [args[2] for i in range(len(args[1]))]
self.params['omega'] = omlist
else:
self.npbfs = self.params['npbf']
self.make_ops = opfactory.make_ho_ops
#self.grid = make_ho_grid(self.params['npbf'])
elif self.params['basis'] == 'sinc':
self.params['npbf'] = args[1]
self.params['qmin'] = args[2]
self.params['qmax'] = args[3]
self.params['dq'] = args[4]
self.params['mass'] = args[5]
if isinstance(self.params['npbf'], list):
self.make_ops = opfactory.make_sinc_ops_combined
else:
self.make_ops = opfactory.make_sinc_ops
self.grid = np.arange(qmin,qmax+dq,dq)
elif self.params['basis'] == 'plane wave':
if args[1]%2 == 0:
self.params['npbf'] = args[1]+1
else:
self.params['npbf'] = args[1]
self.params['nm'] = int((args[1]-1)/2)
self.params['mass'] = args[2]
if len(args) == 4:
self.combined = args[3]
else:
self.combined = False
if self.combined:
raise NotImplementedError
else:
self.make_ops = opfactory.make_planewave_ops
elif self.params['basis'] == 'plane wave dvr':
raise NotImplementedError
#if args[1]%2 == 0:
# self.params['npbf'] = args[1]+1
#else:
# self.params['npbf'] = args[1]
#self.params['nm'] = int((args[1]-1)/2)
#self.params['mass'] = args[2]
#if len(args) == 4:
# self.combined = args[3]
#else:
# self.combined = False
#if self.combined:
# raise NotImplementedError
#else:
# self.make_ops = opfactory.make_planewave_ops
# #self.grid = np.arange(qmin,qmax+dq,dq)
elif self.params['basis'] == 'radial':
raise NotImplementedError
#self.params['npbf'] = args[1]
#self.params['dq'] = args[2]
#self.params['mass'] = args[3]
else:
raise ValueError("Not a valid basis.")
def make_operators(self, ops, matrix=None):
"""Creates matrices for all the relevant operators used in the
calculation. These matrices are then stored in a dictionary called
self.ops.
Input
-----
ops - list of strings, all the operators that are used for this pbf
"""
try:
self.ops
except:
self.ops = {}
if matrix is None:
matrix = [None for i in range(len(ops))]
for i,op in enumerate(ops):
if not op in self.ops:
if matrix[i] is None:
self.ops[op] = self.make_ops(self.params,op,sparse=self.sparse)
else:
self.ops[op] = matrix[i]
## TODO make this for custom operators
#if isinstance(op,str):
# self.ops[op] = self.make_ops(params,op)
#else:
# ind = 'c%d'%(count)
# count += 1
# self.ops[op] = op.copy()
def make_1b_ham(self, nel, terms):
"""Make the 1-body hamiltonians that act on the spfs with this pbf.
"""
op1b = []
for alpha in range(nel):
if self.sparse:
op = None
else:
#op = np.zeros((self.params['npbf'],)*2)
op = np.zeros((self.npbfs,)*2)
for term in terms[alpha]:
opstr = term['ops'][0]
coeff = term['coeff']
if self.sparse:
#op = matadd(self.params['npbf'],op,1.0,self.ops[opstr],coeff)
op = matadd(self.npbfs,op,1.0,self.ops[opstr],coeff)
else:
#print(type(coeff))
op = op.astype(type(coeff))
op += coeff*self.ops[opstr]
op1b.append( op )
self.ops['1b'] = op1b
return
def operate1b(self, spf, alpha):
"""Operate the single-body hamiltonian on a single spf.
"""
if self.sparse:
op = self.ops['1b'][alpha]
outvec = np.zeros(op.nrows, dtype=complex)
return matvec(op.nrows,op.IA,op.JA,op.data,spf,outvec)
#return matvec(op,spf)
else:
return np.dot(self.ops['1b'][alpha], spf)
def operate(self, spf, term):
"""Operate a single-body term on a single spf.
"""
#return self.ops[term]@spf
if self.sparse:
op = self.ops[term]
outvec = np.zeros(op.nrows, dtype=complex)
return matvec(op.nrows,op.IA,op.JA,op.data,spf,outvec)
#return matvec(self.ops[term], spf)
else:
return np.dot(self.ops[term], spf)
if __name__ == "__main__":
# no mode combination
pbf = PBasis(['ho',22,1.0,1.0])
pbf.make_operators(['q','KE','q^2'])
print(pbf.params['basis'])
print(pbf.params['npbf'])
print(pbf.params['mass'])
print(pbf.params['omega'])
opkeys = pbf.ops.keys()
for op in opkeys:
print(op)
print(pbf.ops[op].shape)
print('')
print('')
# mode combination
pbf = PBasis(['ho',[6,6],1.0,1.0,True])
pbf.make_operators(['(q)*(1)','(1)*(q)'])
print(pbf.params['basis'])
print(pbf.params['npbf'])
print(pbf.params['mass'])
print(pbf.params['omega'])
opkeys = pbf.ops.keys()
for op in opkeys:
print(op)
print(pbf.ops[op].shape)
print('')
print('')
# mode combination
pbf = PBasis(['ho',[6,6],[1.0,2.0],[1.0,2.0],True])
pbf.make_operators(['(q)*(1)','(1)*(q)'])
print(pbf.params['basis'])
print(pbf.params['npbf'])
print(pbf.params['mass'])
print(pbf.params['omega'])
opkeys = pbf.ops.keys()
for op in opkeys:
print(op)
print(pbf.ops[op].shape)
print('')
print('')
| [
"numpy.union1d",
"numba.njit",
"pymctdh.cy.sparsemat.CSRmat",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"copy.deepcopy",
"numpy.arange"
] | [((162, 181), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (166, 181), False, 'from numba import jit, njit\n'), ((557, 570), 'copy.deepcopy', 'deepcopy', (['op2'], {}), '(op2)\n', (565, 570), False, 'from copy import deepcopy\n'), ((1268, 1282), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1276, 1282), True, 'import numpy as np\n'), ((1299, 1326), 'numpy.array', 'np.array', (['IA'], {'dtype': 'np.intc'}), '(IA, dtype=np.intc)\n', (1307, 1326), True, 'import numpy as np\n'), ((1343, 1370), 'numpy.array', 'np.array', (['JA'], {'dtype': 'np.intc'}), '(JA, dtype=np.intc)\n', (1351, 1370), True, 'import numpy as np\n'), ((1387, 1407), 'pymctdh.cy.sparsemat.CSRmat', 'CSRmat', (['data', 'IA', 'JA'], {}), '(data, IA, JA)\n', (1393, 1407), False, 'from pymctdh.cy.sparsemat import CSRmat\n'), ((2081, 2095), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2089, 2095), True, 'import numpy as np\n'), ((2097, 2120), 'numpy.array', 'np.array', (['IA'], {'dtype': 'int'}), '(IA, dtype=int)\n', (2105, 2120), True, 'import numpy as np\n'), ((2122, 2145), 'numpy.array', 'np.array', (['JA'], {'dtype': 'int'}), '(JA, dtype=int)\n', (2130, 2145), True, 'import numpy as np\n'), ((844, 872), 'numpy.union1d', 'np.union1d', (['op1_col', 'op2_col'], {}), '(op1_col, op2_col)\n', (854, 872), True, 'import numpy as np\n'), ((7567, 7600), 'numpy.zeros', 'np.zeros', (['op.nrows'], {'dtype': 'complex'}), '(op.nrows, dtype=complex)\n', (7575, 7600), True, 'import numpy as np\n'), ((7736, 7770), 'numpy.dot', 'np.dot', (["self.ops['1b'][alpha]", 'spf'], {}), "(self.ops['1b'][alpha], spf)\n", (7742, 7770), True, 'import numpy as np\n'), ((7985, 8018), 'numpy.zeros', 'np.zeros', (['op.nrows'], {'dtype': 'complex'}), '(op.nrows, dtype=complex)\n', (7993, 8018), True, 'import numpy as np\n'), ((8167, 8194), 'numpy.dot', 'np.dot', (['self.ops[term]', 'spf'], {}), '(self.ops[term], spf)\n', (8173, 8194), True, 'import numpy as np\n'), ((3908, 3938), 'numpy.arange', 'np.arange', (['qmin', '(qmax + dq)', 'dq'], {}), '(qmin, qmax + dq, dq)\n', (3917, 3938), True, 'import numpy as np\n'), ((6807, 6834), 'numpy.zeros', 'np.zeros', (['((self.npbfs,) * 2)'], {}), '((self.npbfs,) * 2)\n', (6815, 6834), True, 'import numpy as np\n')] |
# Copyright (c) 2020 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
# kaggle_environments licensed under Copyright 2020 Kaggle Inc. and the Apache License, Version 2.0
# (see https://github.com/Kaggle/kaggle-environments/blob/master/LICENSE for details)
# wrapper of Hungry Geese environment from kaggle
import random
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import handyrl.envs.kaggle.public_flood_goose as pfg
# You need to install kaggle_environments, requests
from kaggle_environments import make
from kaggle_environments.envs.hungry_geese.hungry_geese import Observation, Configuration, Action, GreedyAgent
from ...environment import BaseEnvironment
class TorusConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, bn):
super().__init__()
self.edge_size = (kernel_size[0] // 2, kernel_size[1] // 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size=kernel_size, padding = self.edge_size, padding_mode = 'circular')
self.bn = nn.BatchNorm2d(output_dim) if bn else None
def forward(self, x):
h = self.conv(x)
h = self.bn(h) if self.bn is not None else h
return h
'''
class GeeseNet(nn.Module):
def __init__(self):
super().__init__()
layers, filters = 12, 32
self.conv0 = TorusConv2d(53, filters, (3, 3), True) # TBD
self.blocks = nn.ModuleList([TorusConv2d(filters, filters, (3, 3), True) for _ in range(layers)])
self.head_p = nn.Linear(filters, 4, bias=False)
self.head_v = nn.Linear(filters * 2, 1, bias=False)
def forward(self, x, _=None):
h = F.relu_(self.conv0(x))
for block in self.blocks:
h = F.relu_(h + block(h))
h_head = (h * x[:,:1]).view(h.size(0), h.size(1), -1).sum(-1)
h_avg = h.view(h.size(0), h.size(1), -1).mean(-1)
p = self.head_p(h_head)
v = torch.tanh(self.head_v(torch.cat([h_head, h_avg], 1)))
return {'policy': p, 'value': v}
'''
class GeeseNet(nn.Module):
def __init__(self):
super().__init__()
layers, filters = 14, 32
self.conv0 = TorusConv2d(53, filters, (3, 3), True) # TBD
self.blocks = nn.ModuleList([TorusConv2d(filters, filters, (3, 3), True) for _ in range(layers)])
self.head_p = nn.Linear(filters, 4, bias=False)
self.head_v = nn.Linear(filters * 2, 1, bias=False)
def forward(self, x, _=None):
h = F.relu_(self.conv0(x))
for block in self.blocks:
h = F.relu_(h + block(h))
h_head = (h * x[:,:1]).view(h.size(0), h.size(1), -1).sum(-1)
h_avg = h.view(h.size(0), h.size(1), -1).mean(-1)
p = self.head_p(h_head)
v = torch.tanh(self.head_v(torch.cat([h_head, h_avg], 1)))
return {'policy': p, 'value': v}
class Environment(BaseEnvironment):
ACTION = ['NORTH', 'SOUTH', 'WEST', 'EAST']
DIRECTION = [[-1, 0], [1, 0], [0, -1], [0, 1]]
NUM_AGENTS = 4
ACTION_MAP = {'N': Action.NORTH, 'S': Action.SOUTH, 'W': Action.WEST, 'E': Action.EAST}
pfg_action_map = { Action.NORTH: 'NORTH', Action.SOUTH: 'SOUTH', Action.WEST: 'WEST', Action.EAST: 'EAST'}
def __init__(self, args={}):
super().__init__()
self.env = make("hungry_geese")
self.reset()
def reset(self, args={}):
obs = self.env.reset(num_agents=self.NUM_AGENTS)
self.update((obs, {}), True)
def update(self, info, reset):
obs, last_actions = info
if reset:
self.obs_list = []
self.obs_list.append(obs)
self.last_actions = last_actions
def action2str(self, a, player=None):
return self.ACTION[a]
def str2action(self, s, player=None):
return self.ACTION.index(s)
def direction(self, pos_from, pos_to):
if pos_from is None or pos_to is None:
return None
x, y = pos_from // 11, pos_from % 11
for i, d in enumerate(self.DIRECTION):
nx, ny = (x + d[0]) % 7, (y + d[1]) % 11
if nx * 11 + ny == pos_to:
return i
return None
def __str__(self):
# output state
obs = self.obs_list[-1][0]['observation']
colors = ['\033[33m', '\033[34m', '\033[32m', '\033[31m']
color_end = '\033[0m'
def check_cell(pos):
for i, geese in enumerate(obs['geese']):
if pos in geese:
if pos == geese[0]:
return i, 'h'
if pos == geese[-1]:
return i, 't'
index = geese.index(pos)
pos_prev = geese[index - 1] if index > 0 else None
pos_next = geese[index + 1] if index < len(geese) - 1 else None
directions = [self.direction(pos, pos_prev), self.direction(pos, pos_next)]
return i, directions
if pos in obs['food']:
return 'f'
return None
def cell_string(cell):
if cell is None:
return '.'
elif cell == 'f':
return 'f'
else:
index, directions = cell
if directions == 'h':
return colors[index] + '@' + color_end
elif directions == 't':
return colors[index] + '*' + color_end
elif max(directions) < 2:
return colors[index] + '|' + color_end
elif min(directions) >= 2:
return colors[index] + '-' + color_end
else:
return colors[index] + '+' + color_end
cell_status = [check_cell(pos) for pos in range(7 * 11)]
s = 'turn %d\n' % len(self.obs_list)
for x in range(7):
for y in range(11):
pos = x * 11 + y
s += cell_string(cell_status[pos])
s += '\n'
for i, geese in enumerate(obs['geese']):
s += colors[i] + str(len(geese) or '-') + color_end + ' '
return s
def step(self, actions):
# state transition
obs = self.env.step([self.action2str(actions.get(p, None) or 0) for p in self.players()])
self.update((obs, actions), False)
def diff_info(self, _):
return self.obs_list[-1], self.last_actions
def turns(self):
# players to move
return [p for p in self.players() if self.obs_list[-1][p]['status'] == 'ACTIVE']
def terminal(self):
# check whether terminal state or not
for obs in self.obs_list[-1]:
if obs['status'] == 'ACTIVE':
return False
return True
def outcome(self):
# return terminal outcomes
# 1st: 1.0 2nd: 0.33 3rd: -0.33 4th: -1.00
rewards = {o['observation']['index']: o['reward'] for o in self.obs_list[-1]}
outcomes = {p: 0 for p in self.players()}
for p, r in rewards.items():
for pp, rr in rewards.items():
if p != pp:
if r > rr:
outcomes[p] += 1 / (self.NUM_AGENTS - 1)
elif r < rr:
outcomes[p] -= 1 / (self.NUM_AGENTS - 1)
return outcomes
def legal_actions(self, player):
# return legal action list
return list(range(len(self.ACTION)))
def action_length(self):
# maximum action label (it determines output size of policy function)
return len(self.ACTION)
def players(self):
return list(range(self.NUM_AGENTS))
def rule_based_action(self, player):
agent = GreedyAgent(Configuration({'rows': 7, 'columns': 11}))
agent.last_action = self.ACTION_MAP[self.ACTION[self.last_actions[player]][0]] if player in self.last_actions else None
obs = {**self.obs_list[-1][0]['observation'], **self.obs_list[-1][player]['observation']}
action = agent(Observation(obs))
return self.ACTION.index(action)
def public_flood_goose_based_action(self, player):
obs = {**self.obs_list[-1][0]['observation'], **self.obs_list[-1][player]['observation']}
conf = {'rows': 7, 'columns': 11}
if player in self.last_actions and len(self.obs_list) > 1:
prev_obs = {**self.obs_list[-2][0]['observation'], **self.obs_list[-2][player]['observation']}
pos_int = prev_obs['geese'][prev_obs['index']][0]
pfg.public_flood_agent_goose.last_pos = pfg.Pos(pos_int//11, pos_int%11)
else:
pfg.public_flood_agent_goose.last_pos = None
# print("prev action = ", pfg.public_flood_agent_goose.last_action)
state = pfg.State.from_obs_conf(obs, conf)
action = pfg.public_flood_agent_goose.step(state)
action = state.geo.action_to(state.my_goose.head, action)
# print("action = ", action)
# print("action = ",self.ACTION.index(self.pfg_action_map[action]))
return self.ACTION.index(self.pfg_action_map[action])
def net(self):
return GeeseNet
def observation(self, player): # = None
# if player is None:
# player = 0
b = np.zeros((self.NUM_AGENTS * 13 + 1, 7 * 11), dtype=np.float32) # TBD
obs = self.obs_list[-1][0]['observation']
for p, geese in enumerate(obs['geese']):
# head position
for pos in geese[:1]:
b[0 + (p - player) % self.NUM_AGENTS, pos] = 1
# whole position
for pos in geese:
b[4 + (p - player) % self.NUM_AGENTS, pos] = 1
# body position
for pos in geese[1:-1]:
b[8 + (p - player) % self.NUM_AGENTS, pos] = 1
# tip position
for pos in geese[-1:]:
b[12 + (p - player) % self.NUM_AGENTS, pos] = 1
# previous head positon: see below
# code attached below: line 16,17,18,19
# potential next move
for pos in geese[:1]:
b[20 + (p - player) % self.NUM_AGENTS, (pos - 1)%77] = 1
b[20 + (p - player) % self.NUM_AGENTS, (pos + 1)%77] = 1
b[20 + (p - player) % self.NUM_AGENTS, (pos - 11)%77] = 1
b[20 + (p - player) % self.NUM_AGENTS, (pos + 11)%77] = 1
# the impossible part will be removed in the previous head positions
# snake length for each player
b[24 + (p - player) % self.NUM_AGENTS, :] = len(geese)/77
# snake last second grid
for pos in geese[-2:-1]:
b[28 + (p - player) % self.NUM_AGENTS, pos] = 1
# snake last third grid
for pos in geese[-3:-2]:
b[32 + (p - player) % self.NUM_AGENTS, pos] = 1
# ordered grid snake
for gridi, gridpos in enumerate(geese):
b[36 + (p - player) % self.NUM_AGENTS, gridpos] = (len(geese) - gridi)/20
# previous head position
if len(self.obs_list) > 1:
obs_prev = self.obs_list[-2][0]['observation']
for p, geese in enumerate(obs_prev['geese']):
for pos in geese[:1]:
b[16 + (p - player) % self.NUM_AGENTS, pos] = 1
b[20 + (p - player) % self.NUM_AGENTS, pos] = 0
b[40, :] = b[0:4, :].sum(axis = 0) # all heads
b[41, ] = b[4:8, :].sum(axis = 0) # all wholes
b[42, ] = b[8:12, :].sum(axis = 0) # all bodies
b[43, ] = b[12:16, :].sum(axis = 0) # all tails
b[44, ] = b[16:20, :].sum(axis = 0) # all previous heads
b[45, ] = b[20:24, :].max(axis = 0) # all potential steps
b[46, ] = b[28:32, :].sum(axis = 0) # all last second grid
b[47, ] = b[32:36, :].sum(axis = 0) # all last third grid
b[48, ] = b[36:40, :].sum(axis = 0) # all ordered grid
# food
for pos in obs['food']:
b[49, pos] = 1
# step, distance to next starving
b[50, :] = obs['step']%40 / 40
# step, wether next turn will be starving
b[51, :] = (obs['step']+1)% 40 == 0
b[52, :] = obs['step']/200
# TBD: centralizing
player_head = obs['geese'][player][0]
player_head_x = player_head//11
player_head_y = player_head%11
return b.reshape(-1, 7, 11)
if __name__ == '__main__':
e = Environment()
for _ in range(100):
e.reset()
while not e.terminal():
print(e)
actions = {p: e.legal_actions(p) for p in e.turns()}
print([[e.action2str(a, p) for a in alist] for p, alist in actions.items()])
e.step({p: random.choice(alist) for p, alist in actions.items()})
print(e)
print(e.outcome())
| [
"torch.nn.BatchNorm2d",
"random.choice",
"kaggle_environments.envs.hungry_geese.hungry_geese.Observation",
"handyrl.envs.kaggle.public_flood_goose.public_flood_agent_goose.step",
"torch.nn.Conv2d",
"torch.cat",
"numpy.zeros",
"torch.nn.Linear",
"handyrl.envs.kaggle.public_flood_goose.State.from_obs_... | [((962, 1073), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_dim', 'output_dim'], {'kernel_size': 'kernel_size', 'padding': 'self.edge_size', 'padding_mode': '"""circular"""'}), "(input_dim, output_dim, kernel_size=kernel_size, padding=self.\n edge_size, padding_mode='circular')\n", (971, 1073), True, 'import torch.nn as nn\n'), ((2383, 2416), 'torch.nn.Linear', 'nn.Linear', (['filters', '(4)'], {'bias': '(False)'}), '(filters, 4, bias=False)\n', (2392, 2416), True, 'import torch.nn as nn\n'), ((2439, 2476), 'torch.nn.Linear', 'nn.Linear', (['(filters * 2)', '(1)'], {'bias': '(False)'}), '(filters * 2, 1, bias=False)\n', (2448, 2476), True, 'import torch.nn as nn\n'), ((3336, 3356), 'kaggle_environments.make', 'make', (['"""hungry_geese"""'], {}), "('hungry_geese')\n", (3340, 3356), False, 'from kaggle_environments import make\n'), ((8813, 8847), 'handyrl.envs.kaggle.public_flood_goose.State.from_obs_conf', 'pfg.State.from_obs_conf', (['obs', 'conf'], {}), '(obs, conf)\n', (8836, 8847), True, 'import handyrl.envs.kaggle.public_flood_goose as pfg\n'), ((8865, 8905), 'handyrl.envs.kaggle.public_flood_goose.public_flood_agent_goose.step', 'pfg.public_flood_agent_goose.step', (['state'], {}), '(state)\n', (8898, 8905), True, 'import handyrl.envs.kaggle.public_flood_goose as pfg\n'), ((9303, 9365), 'numpy.zeros', 'np.zeros', (['(self.NUM_AGENTS * 13 + 1, 7 * 11)'], {'dtype': 'np.float32'}), '((self.NUM_AGENTS * 13 + 1, 7 * 11), dtype=np.float32)\n', (9311, 9365), True, 'import numpy as np\n'), ((1091, 1117), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['output_dim'], {}), '(output_dim)\n', (1105, 1117), True, 'import torch.nn as nn\n'), ((7762, 7803), 'kaggle_environments.envs.hungry_geese.hungry_geese.Configuration', 'Configuration', (["{'rows': 7, 'columns': 11}"], {}), "({'rows': 7, 'columns': 11})\n", (7775, 7803), False, 'from kaggle_environments.envs.hungry_geese.hungry_geese import Observation, Configuration, Action, GreedyAgent\n'), ((8054, 8070), 'kaggle_environments.envs.hungry_geese.hungry_geese.Observation', 'Observation', (['obs'], {}), '(obs)\n', (8065, 8070), False, 'from kaggle_environments.envs.hungry_geese.hungry_geese import Observation, Configuration, Action, GreedyAgent\n'), ((8617, 8653), 'handyrl.envs.kaggle.public_flood_goose.Pos', 'pfg.Pos', (['(pos_int // 11)', '(pos_int % 11)'], {}), '(pos_int // 11, pos_int % 11)\n', (8624, 8653), True, 'import handyrl.envs.kaggle.public_flood_goose as pfg\n'), ((2814, 2843), 'torch.cat', 'torch.cat', (['[h_head, h_avg]', '(1)'], {}), '([h_head, h_avg], 1)\n', (2823, 2843), False, 'import torch\n'), ((12919, 12939), 'random.choice', 'random.choice', (['alist'], {}), '(alist)\n', (12932, 12939), False, 'import random\n')] |
# =======================================================================================================================================
# VNU-HCM, University of Science
# Department Computer Science, Faculty of Information Technology
# Authors: <NAME> (<NAME>)
# © 2020
"""
Given a string name, e.g. "Bob", return a greeting of the form "Hello Bob!".
For example test case:
hello_name('Bob') → 'Hello Bob!'
hello_name('Alice') → 'Hello Alice!'
hello_name('X') → 'Hello X!'
"""
import unittest
def hello_name(name):
return "Hello " + name + "!"
class TestHelloName(unittest.TestCase):
def test_case_00(self):
self.assertEqual(hello_name('Bob'), 'Hello Bob!')
def test_case_01(self):
self.assertEqual(hello_name('Alice'), 'Hello Alice!')
def test_case_02(self):
self.assertEqual(hello_name('X'), 'Hello X!')
def test_case_03(self):
self.assertEqual(hello_name('Dolly'), 'Hello Dolly!')
def test_case_04(self):
self.assertEqual(hello_name('Alpha'), 'Hello Alpha!')
def test_case_05(self):
self.assertEqual(hello_name('Omega'), 'Hello Omega!')
def test_case_06(self):
self.assertEqual(hello_name('Goodbye'), 'Hello Goodbye!')
def test_case_07(self):
self.assertEqual(hello_name('ho ho ho'), 'Hello ho ho ho!')
def test_case_08(self):
self.assertEqual(hello_name('xyz!'), 'Hello xyz!!')
def test_case_09(self):
self.assertEqual(hello_name('Hello'), 'Hello Hello!')
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
] | [((1535, 1550), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1548, 1550), False, 'import unittest\n')] |
from pygears import gear, Intf
from pygears.common import czip
from pygears.typing import Tuple, Uint, Union, Queue
from pygears.common import fmap, demux, decoupler, fifo, union_collapse
from pygears.cookbook import priority_mux, replicate
TCfg = Tuple[{'reduce_size': Uint['w_reduce_size'], 'init': 't_acc'}]
@gear
def reduce2(din, cfg: TCfg, *, f, max_size):
acctype = cfg.dtype['init']
qtype = Queue[acctype, din.dtype.lvl - 1]
temp_res = Intf(dtype=qtype)
cfg_rep = cfg | replicate
sec_opnd = (cfg_rep, temp_res) \
| priority_mux \
| fmap(f=union_collapse, fcat=czip, lvl=1)
result = czip(din, sec_opnd) | decoupler | fmap(f=f, fcat=czip, lvl=2)
acc, fin_res = result | Union[qtype, qtype] | demux
acc | fifo(intfs=[temp_res], depth=max_size)
return fin_res
| [
"pygears.common.czip",
"pygears.Intf",
"pygears.common.fifo",
"pygears.common.fmap"
] | [((461, 478), 'pygears.Intf', 'Intf', ([], {'dtype': 'qtype'}), '(dtype=qtype)\n', (465, 478), False, 'from pygears import gear, Intf\n'), ((581, 621), 'pygears.common.fmap', 'fmap', ([], {'f': 'union_collapse', 'fcat': 'czip', 'lvl': '(1)'}), '(f=union_collapse, fcat=czip, lvl=1)\n', (585, 621), False, 'from pygears.common import fmap, demux, decoupler, fifo, union_collapse\n'), ((670, 697), 'pygears.common.fmap', 'fmap', ([], {'f': 'f', 'fcat': 'czip', 'lvl': '(2)'}), '(f=f, fcat=czip, lvl=2)\n', (674, 697), False, 'from pygears.common import fmap, demux, decoupler, fifo, union_collapse\n'), ((764, 802), 'pygears.common.fifo', 'fifo', ([], {'intfs': '[temp_res]', 'depth': 'max_size'}), '(intfs=[temp_res], depth=max_size)\n', (768, 802), False, 'from pygears.common import fmap, demux, decoupler, fifo, union_collapse\n'), ((636, 655), 'pygears.common.czip', 'czip', (['din', 'sec_opnd'], {}), '(din, sec_opnd)\n', (640, 655), False, 'from pygears.common import czip\n')] |
#
# Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import pathlib
import tempfile
from unittest import TestCase, mock
from mbed_tools.build.flash import flash_binary, _build_binary_file_path, _flash_dev
from mbed_tools.build.exceptions import BinaryFileNotFoundError
from tests.build.factories import DeviceFactory
@mock.patch("mbed_tools.build.flash._build_binary_file_path")
@mock.patch("mbed_tools.build.flash._flash_dev")
class TestFlashBinary(TestCase):
def test_check_flashing(self, _flash_dev, _build_binary_file_path):
test_device = DeviceFactory()
_flash_dev.return_value = True
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
bin_file = base_dir.name + ".bin"
bin_file = build_dir / bin_file
bin_file.touch()
_build_binary_file_path.return_value = bin_file
flash_binary(test_device.mount_points[0].resolve(), base_dir, build_dir, "TEST", False)
_build_binary_file_path.assert_called_once_with(base_dir, build_dir, False)
_flash_dev.assert_called_once_with(test_device.mount_points[0].resolve(), bin_file)
class TestBuildBinFilePath(TestCase):
def test_build_bin_file_path(self):
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
bin_file = base_dir.name + ".bin"
bin_file = build_dir / bin_file
bin_file.touch()
self.assertEqual(_build_binary_file_path(base_dir, build_dir, False), bin_file)
def test_build_hex_file_path(self):
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
bin_file = base_dir.name + ".hex"
bin_file = build_dir / bin_file
bin_file.touch()
self.assertEqual(_build_binary_file_path(base_dir, build_dir, True), bin_file)
def test_missing_binary_file(self):
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
with self.assertRaises(BinaryFileNotFoundError):
_build_binary_file_path(base_dir, build_dir, False)
@mock.patch("mbed_tools.build.flash.shutil.copy")
class TestCopyToDevice(TestCase):
def test_copy_to_device(self, copy):
test_device = DeviceFactory()
with tempfile.TemporaryDirectory() as tmpDir:
base_dir = pathlib.Path(tmpDir)
build_dir = base_dir / "cmake_build"
build_dir.mkdir()
bin_file = base_dir.name + ".bin"
bin_file = build_dir / bin_file
bin_file.touch()
_flash_dev(test_device.mount_points[0].resolve(), bin_file)
copy.assert_called_once_with(bin_file, test_device.mount_points[0].resolve(), follow_symlinks=False)
| [
"tempfile.TemporaryDirectory",
"pathlib.Path",
"mbed_tools.build.flash._build_binary_file_path",
"tests.build.factories.DeviceFactory",
"mbed_tools.build.flash._build_binary_file_path.assert_called_once_with",
"unittest.mock.patch"
] | [((390, 450), 'unittest.mock.patch', 'mock.patch', (['"""mbed_tools.build.flash._build_binary_file_path"""'], {}), "('mbed_tools.build.flash._build_binary_file_path')\n", (400, 450), False, 'from unittest import TestCase, mock\n'), ((452, 499), 'unittest.mock.patch', 'mock.patch', (['"""mbed_tools.build.flash._flash_dev"""'], {}), "('mbed_tools.build.flash._flash_dev')\n", (462, 499), False, 'from unittest import TestCase, mock\n'), ((2575, 2623), 'unittest.mock.patch', 'mock.patch', (['"""mbed_tools.build.flash.shutil.copy"""'], {}), "('mbed_tools.build.flash.shutil.copy')\n", (2585, 2623), False, 'from unittest import TestCase, mock\n'), ((627, 642), 'tests.build.factories.DeviceFactory', 'DeviceFactory', ([], {}), '()\n', (640, 642), False, 'from tests.build.factories import DeviceFactory\n'), ((2721, 2736), 'tests.build.factories.DeviceFactory', 'DeviceFactory', ([], {}), '()\n', (2734, 2736), False, 'from tests.build.factories import DeviceFactory\n'), ((697, 726), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (724, 726), False, 'import tempfile\n'), ((761, 781), 'pathlib.Path', 'pathlib.Path', (['tmpDir'], {}), '(tmpDir)\n', (773, 781), False, 'import pathlib\n'), ((1154, 1229), 'mbed_tools.build.flash._build_binary_file_path.assert_called_once_with', '_build_binary_file_path.assert_called_once_with', (['base_dir', 'build_dir', '(False)'], {}), '(base_dir, build_dir, False)\n', (1201, 1229), False, 'from mbed_tools.build.flash import flash_binary, _build_binary_file_path, _flash_dev\n'), ((1419, 1448), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1446, 1448), False, 'import tempfile\n'), ((1483, 1503), 'pathlib.Path', 'pathlib.Path', (['tmpDir'], {}), '(tmpDir)\n', (1495, 1503), False, 'import pathlib\n'), ((1849, 1878), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1876, 1878), False, 'import tempfile\n'), ((1913, 1933), 'pathlib.Path', 'pathlib.Path', (['tmpDir'], {}), '(tmpDir)\n', (1925, 1933), False, 'import pathlib\n'), ((2278, 2307), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2305, 2307), False, 'import tempfile\n'), ((2342, 2362), 'pathlib.Path', 'pathlib.Path', (['tmpDir'], {}), '(tmpDir)\n', (2354, 2362), False, 'import pathlib\n'), ((2751, 2780), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2778, 2780), False, 'import tempfile\n'), ((2815, 2835), 'pathlib.Path', 'pathlib.Path', (['tmpDir'], {}), '(tmpDir)\n', (2827, 2835), False, 'import pathlib\n'), ((1732, 1783), 'mbed_tools.build.flash._build_binary_file_path', '_build_binary_file_path', (['base_dir', 'build_dir', '(False)'], {}), '(base_dir, build_dir, False)\n', (1755, 1783), False, 'from mbed_tools.build.flash import flash_binary, _build_binary_file_path, _flash_dev\n'), ((2162, 2212), 'mbed_tools.build.flash._build_binary_file_path', '_build_binary_file_path', (['base_dir', 'build_dir', '(True)'], {}), '(base_dir, build_dir, True)\n', (2185, 2212), False, 'from mbed_tools.build.flash import flash_binary, _build_binary_file_path, _flash_dev\n'), ((2520, 2571), 'mbed_tools.build.flash._build_binary_file_path', '_build_binary_file_path', (['base_dir', 'build_dir', '(False)'], {}), '(base_dir, build_dir, False)\n', (2543, 2571), False, 'from mbed_tools.build.flash import flash_binary, _build_binary_file_path, _flash_dev\n')] |
import time
import typing
import requests
from sys import stderr
from datetime import datetime
from packettotal_sdk import packettotal_api
class SearchTools(packettotal_api.PacketTotalApi):
def __init__(self, api_key: str):
"""
:param api_key: An API authentication token
"""
super().__init__(api_key)
def search_by_pcap(self, pcap_file_obj: typing.BinaryIO) -> requests.Response:
"""
Search by a pcap/pcapng file, get list list of similar packet captures
:param pcap_file_obj: A file like object that provides a .read() interface (E.G open('path_to_pcap.pcap, 'rb') )
:return: A request.Response instance, containing a graph of similar pcaps with matched terms
"""
response = super().analyze(pcap_file_obj)
if response.status_code == 200:
sim_response = super().pcap_similar(response.json()['pcap_metadata']['md5'])
elif response.status_code == 202:
pcap_id = response.json()['id']
info_response = super().pcap_info(pcap_id)
while info_response.status_code == 404:
print('[{}] Waiting for {} to finish analyzing.'.format(datetime.utcnow(), pcap_id))
info_response = super().pcap_info(response.json()['id'])
time.sleep(10)
print('[{}] Fetching results for {}.'.format(datetime.utcnow(), pcap_id))
time.sleep(5)
sim_response = super().pcap_similar(response.json()['id'])
else:
return response
return sim_response
def search_by_iocs(self, ioc_file: typing.TextIO) -> requests.Response:
"""
Search up to 100 IOC terms at once, and get matching packet captures
:param ioc_file: A file like object that provides a .read() interface (E.G open('path_to_iocs.txt, 'r')
contents are line delim
:return: A request.Response instance containing the search results containing at least one matching IOC
"""
text = ioc_file.read()
delim = '\n'
if '\r\n' in text[0:2048]:
delim = '\r\n'
elif '\r' in text[0:2048]:
delim = '\r'
elif ',' in text[0:2048]:
delim = ','
elif '\t' in text[0:2048]:
delim = '\t'
text_delimd = text.split(delim)
search_str = ''
for i, ioc in enumerate(text_delimd[0: -2]):
search_str += '"{}" OR '.format(ioc.strip())
if i > 100:
print('Warning searching only the first 100 IOC terms of {}.'.format(len(text_delimd)), file=stderr)
break
search_str += '"{}"'.format(text_delimd[-1].strip())
response = super().search(search_str)
return response
| [
"time.sleep",
"datetime.datetime.utcnow"
] | [((1428, 1441), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1438, 1441), False, 'import time\n'), ((1315, 1329), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1325, 1329), False, 'import time\n'), ((1387, 1404), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1402, 1404), False, 'from datetime import datetime\n'), ((1197, 1214), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1212, 1214), False, 'from datetime import datetime\n')] |
from unittest.mock import Mock, patch
import pandas as pd
from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file
def test_is_s3_path_with_local_dir():
"""Test the ``sdgym.s3.is_s3_path`` function with a local directory.
If the path is not an s3 path, it should return ``False``.
Input:
- path to a local directory
Output:
- False
"""
# setup
path = 'path/to/local/dir'
# run
result = is_s3_path(path)
# asserts
assert not result
def test_is_s3_path_with_s3_bucket():
"""Test the ``sdgym.s3.is_s3_path`` function with an s3 directory.
If the path is an s3 path, it should return ``True``.
Input:
- path to an s3 directory
Output:
- True
"""
# setup
path = 's3://my-bucket/my/path'
# run
result = is_s3_path(path)
# asserts
assert result
def test_parse_s3_path_bucket_only():
"""Test the ``sdgym.s3.parse_s3_path`` function with an s3 path.
If the s3 path contains only the bucket name, the returned tuple
should be ``(bucket_name, '')``.
Input:
- path to s3 bucket
Output:
- ('my-bucket', '')
"""
# setup
expected_bucket_name = 'my-bucket'
expected_key_prefix = ''
path = f's3://{expected_bucket_name}/{expected_key_prefix}'
# run
bucket_name, key_prefix = parse_s3_path(path)
# asserts
assert bucket_name == expected_bucket_name
assert key_prefix == expected_key_prefix
def test_parse_s3_path_bucket_and_dir_path():
"""Test the `sdgym.s3.parse_s3_path`` function with an s3 path.
If the s3 path contains the bucket and a sub directory, the returned
tuple should be ``(bucket_name, subdirectory)``.
Input:
- path to s3 directory
Output:
- ('my-bucket', 'path/to/dir')
"""
# setup
expected_bucket_name = 'my-bucket'
expected_key_prefix = 'path/to/dir'
path = f's3://{expected_bucket_name}/{expected_key_prefix}'
# run
bucket_name, key_prefix = parse_s3_path(path)
# asserts
assert bucket_name == expected_bucket_name
assert key_prefix == expected_key_prefix
def test_write_file(tmpdir):
"""Test the `sdgym.s3.write_file`` function with a local path.
If the path is a local path, a file with the correct
contents should be created at the specified path.
Input:
- contents of the local file
- path to the local file
- aws_key is None
- aws_secret is None
Output:
- None
Side effects:
- file creation at the specified path with the given contents
"""
# setup
content_str = 'test_content'
path = f'{tmpdir}/test.txt'
# run
write_file(content_str.encode('utf-8'), path, None, None)
# asserts
with open(path, 'r') as f:
assert f.read() == content_str
@patch('sdgym.s3.boto3')
def test_write_file_s3(boto3_mock):
"""Test the `sdgym.s3.write_file`` function with an s3 path.
If the path is an s3 path, a file with the given contents
should be created at the specified s3 path.
Input:
- contents of the s3 file
- path to the s3 file location
- aws_key for aws authentication
- aws_secret for aws authentication
Output:
- None
Side effects:
- s3 client creation with aws credentials (aws_key, aws_secret)
- s3 method call to create a file in the given bucket with the
given contents
"""
# setup
content_str = 'test_content'
bucket_name = 'my-bucket'
key = 'test.txt'
path = f's3://{bucket_name}/{key}'
aws_key = 'my-key'
aws_secret = 'my-secret'
s3_mock = Mock()
boto3_mock.client.return_value = s3_mock
# run
write_file(content_str.encode('utf-8'), path, aws_key, aws_secret)
# asserts
boto3_mock.client.assert_called_once_with(
's3',
aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret
)
s3_mock.put_object.assert_called_once_with(
Bucket=bucket_name,
Key=key,
Body=content_str.encode('utf-8'),
ContentEncoding='',
)
@patch('sdgym.s3.write_file')
def test_write_csv(write_file_mock):
"""Test the ``sdgym.s3.write_csv`` function.
If ``write_csv`` is called with a DataFrame,
``write_file`` should be called with the expected DataFrame
contents.
Input:
- data to be written to the csv file
- path of the desired csv file
- aws_key is None
- aws_secret is None
Output:
- None
Side effects:
- call to write_file with the correct contents and path
"""
# setup
data = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
path = 'tmp/path'
# run
write_csv(data, path, None, None)
# asserts
input_data = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
expected_content = input_data.to_csv(index=False).encode('utf-8')
write_file_mock.assert_called_once_with(
expected_content,
path,
None,
None
)
| [
"sdgym.s3.write_csv",
"unittest.mock.Mock",
"sdgym.s3.is_s3_path",
"pandas.DataFrame",
"unittest.mock.patch",
"sdgym.s3.parse_s3_path"
] | [((2827, 2850), 'unittest.mock.patch', 'patch', (['"""sdgym.s3.boto3"""'], {}), "('sdgym.s3.boto3')\n", (2832, 2850), False, 'from unittest.mock import Mock, patch\n'), ((4090, 4118), 'unittest.mock.patch', 'patch', (['"""sdgym.s3.write_file"""'], {}), "('sdgym.s3.write_file')\n", (4095, 4118), False, 'from unittest.mock import Mock, patch\n'), ((451, 467), 'sdgym.s3.is_s3_path', 'is_s3_path', (['path'], {}), '(path)\n', (461, 467), False, 'from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file\n'), ((821, 837), 'sdgym.s3.is_s3_path', 'is_s3_path', (['path'], {}), '(path)\n', (831, 837), False, 'from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file\n'), ((1353, 1372), 'sdgym.s3.parse_s3_path', 'parse_s3_path', (['path'], {}), '(path)\n', (1366, 1372), False, 'from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file\n'), ((2014, 2033), 'sdgym.s3.parse_s3_path', 'parse_s3_path', (['path'], {}), '(path)\n', (2027, 2033), False, 'from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file\n'), ((3626, 3632), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3630, 3632), False, 'from unittest.mock import Mock, patch\n'), ((4602, 4648), 'pandas.DataFrame', 'pd.DataFrame', (["{'col1': [1, 2], 'col2': [3, 4]}"], {}), "({'col1': [1, 2], 'col2': [3, 4]})\n", (4614, 4648), True, 'import pandas as pd\n'), ((4686, 4719), 'sdgym.s3.write_csv', 'write_csv', (['data', 'path', 'None', 'None'], {}), '(data, path, None, None)\n', (4695, 4719), False, 'from sdgym.s3 import is_s3_path, parse_s3_path, write_csv, write_file\n'), ((4752, 4798), 'pandas.DataFrame', 'pd.DataFrame', (["{'col1': [1, 2], 'col2': [3, 4]}"], {}), "({'col1': [1, 2], 'col2': [3, 4]})\n", (4764, 4798), True, 'import pandas as pd\n')] |
from cloud.permission import Permission, NeedPermission
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'session_ids': ['str'],
},
'output_format': {
'success': 'bool'
},
'description': 'Delete sessions'
}
@NeedPermission(Permission.Run.Auth.delete_sessions)
def do(data, resource):
body = {}
params = data['params']
session_ids = params.get('session_ids')
success = resource.db_delete_item_batch(session_ids)
body['success'] = success
return body
| [
"cloud.permission.NeedPermission"
] | [((328, 379), 'cloud.permission.NeedPermission', 'NeedPermission', (['Permission.Run.Auth.delete_sessions'], {}), '(Permission.Run.Auth.delete_sessions)\n', (342, 379), False, 'from cloud.permission import Permission, NeedPermission\n')] |
# coding=<utf-8>
import requests
import re
import socket
import base64
import psutil
import pywifi
from pywifi import const
import subprocess
import os
import time
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def encrypt(password):
password = base64.b64encode(password.encode('utf-8'))
return password.decode('utf-8')
def getNetIfAddr():
dic = psutil.net_if_addrs()
mac = ''
for adapter in dic:
print(adapter)
if adapter != 'wls1':
continue
snicList = dic[adapter]
mac = ''
ipv4 = ''
ipv6 = ''
for snic in snicList:
if snic.family.name in {'AF_LINK', 'AF_PACKET'}:
mac = snic.address
elif snic.family.name == 'AF_INET':
ipv4 = snic.address
elif snic.family.name == 'AF_INET6':
ipv6 = snic.address
print('%s, %s, %s, %s' % (adapter, mac, ipv4, ipv6))
return mac
def get_mac_address():
return getNetIfAddr().lower()
class AutoWHUT:
def get_param(self, username: str, password: str, cookies: str):
header = {
'Origin': 'http://172.30.16.34',
'Referer': 'http://172.30.16.34/srun_portal_pc.php?ac_id=1&cmd=login&switchip=172.30.14.104&mac=84:ef:18'
':91:e5:5b&ip=' + get_host_ip() +
'&essid=WHUT-WLAN6&apname=JB-JH-J4-0901-E&apgroup=WHUT-WLAN-Dual&url=http://www.gstatic.com'
'/generate_204',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362',
'Accept': '*/*',
'Accept-Language': 'zh-CN',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Accept-Encoding': 'gzip, deflate',
'Host': '172.30.16.34',
'Connection': 'Keep-Alive',
'Pragma': 'no-cache',
'Cookie': cookies
}
data = 'action=login&username=&password=&ac_id=64&user_ip=&nas_ip=&user_mac=&save_me=1&ajax=1'
data = re.sub("username=.*?&", "username=" + username + '&', data)
data = re.sub("password=.*?&", "password={B}" + encrypt(password) + '&', data)
data = re.sub("user_ip=.*?&", "user_ip=" + get_host_ip() + '&', data)
data = re.sub("user_mac=.*?&", "user_mac=" + get_mac_address() + '&', data)
return header, data
def sign_in(self):
try:
username = ''
password = ''
cookies = 'login=bQ0pOyR6IXU7PJaQQqRAcBPxGAvxAcrvEe0UJsVvdkTHxMBomR2HUS3oxriFtDiSt7XrDS' \
'%2BmurcIcGKHmgRZbb8fUGzw%2FUGvJFIjk0nAVIEwPGYVt7br7b5u1t4sMp' \
'%2BAfr4VZ5VcKPDr8eaBrOt2YRrH9Bdy6bogpY89dPj' \
'%2BzwrVuc4xmFUoWD8peECGHshewZRrIVvucbx652F2TRxF3VtHNL9H0fs5GjjmJjQMtecd; ' \
'NSC_tsvo_4l_TH=ffffffffaf160e3a45525d5f4f58455e445a4a423660; ' \
'login=bQ0pOyR6IXU7PJaQQqRAcBPxGAvxAcrvEe0UJsVvdkTHxMBomR2HUS3oxriFtDiSt7XrDS' \
'%2BmurcIcGKHmgRZbb8fUGzw%2FUGvJFIjk0nAVIEwPGYVt7br7b5u1t4sMp' \
'%2BAfr4VZ5VcKPDr8eaBrOt2YRrH9Bdy6bogpY89dPj' \
'%2BzwrVuc4xmFUoWD8peECGHshewZRrIVvucbx652F2TRxF3VtHNL9H0fs5GjjmJjQMtecd '
header, data = self.get_param(username, password, cookies)
print(data)
result = requests.post('http://172.30.16.34/include/auth_action.php', headers=header, data=data)
print(result.text, '\n{}\n'.format('*' * 79), result.encoding)
except BaseException as arg:
print(arg)
class WifiManager:
def __init__(self):
self.wifi = pywifi.PyWiFi()
self.ifaces = self.wifi.interfaces()[1]
self.autoWHUT = AutoWHUT()
self.sleepTime = 1
def is_connected_wifi(self):
return self.ifaces.status() == const.IFACE_CONNECTED
def get_current_wifi(self):
cmd = 'netsh wlan show interfaces'
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
ret = p.stdout.read()
ret = ret.decode('gbk')
index = ret.find("SSID")
if index > 0:
return ret[index:].split(':')[1].split('\r\n')[0].strip()
else:
return None
def check_net(self):
try:
result = requests.post('http://www.baidu.com')
return result.text.find("?cmd=redirect") == -1
except Exception:
return False
def auto_check(self):
if self.is_connected_wifi():
if not self.check_net():
self.autoWHUT.sign_in()
print("2s")
self.sleepTime = 2
else:
self.sleepTime = 60
print("60s")
else:
self.sleepTime = 4
print("no wifi")
def start(self):
while True:
self.auto_check()
time.sleep(self.sleepTime)
if __name__ == '__main__':
wifiManager = WifiManager()
wifiManager.start()
| [
"pywifi.PyWiFi",
"requests.post",
"socket.socket",
"subprocess.Popen",
"time.sleep",
"re.sub",
"psutil.net_if_addrs"
] | [((518, 539), 'psutil.net_if_addrs', 'psutil.net_if_addrs', ([], {}), '()\n', (537, 539), False, 'import psutil\n'), ((206, 254), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (219, 254), False, 'import socket\n'), ((2352, 2411), 're.sub', 're.sub', (['"""username=.*?&"""', "('username=' + username + '&')", 'data'], {}), "('username=.*?&', 'username=' + username + '&', data)\n", (2358, 2411), False, 'import re\n'), ((3989, 4004), 'pywifi.PyWiFi', 'pywifi.PyWiFi', ([], {}), '()\n', (4002, 4004), False, 'import pywifi\n'), ((4298, 4407), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr\n =subprocess.PIPE, shell=True)\n', (4314, 4407), False, 'import subprocess\n'), ((3700, 3791), 'requests.post', 'requests.post', (['"""http://172.30.16.34/include/auth_action.php"""'], {'headers': 'header', 'data': 'data'}), "('http://172.30.16.34/include/auth_action.php', headers=header,\n data=data)\n", (3713, 3791), False, 'import requests\n'), ((4804, 4841), 'requests.post', 'requests.post', (['"""http://www.baidu.com"""'], {}), "('http://www.baidu.com')\n", (4817, 4841), False, 'import requests\n'), ((5397, 5423), 'time.sleep', 'time.sleep', (['self.sleepTime'], {}), '(self.sleepTime)\n', (5407, 5423), False, 'import time\n')] |
"""Utilities for approximating gradients."""
import numpy as np
from utils.misc import process_inputs
from utils.simrunners import SimulationRunner
def local_linear_gradients(X, f, p=None, weights=None):
"""Estimate a collection of gradients from input/output pairs.
Given a set of input/output pairs, choose subsets of neighboring points and
build a local linear model for each subset. The gradients of these local
linear models comprise estimates of sampled gradients.
Parameters
----------
X : ndarray
M-by-m matrix that contains the m-dimensional inputs
f : ndarray
M-by-1 matrix that contains scalar outputs
p : int, optional
how many nearest neighbors to use when constructing the local linear
model (default 1)
weights : ndarray, optional
M-by-1 matrix that contains the weights for each observation (default
None)
Returns
-------
df : ndarray
M-by-m matrix that contains estimated partial derivatives approximated
by the local linear models
Notes
-----
If `p` is not specified, the default value is floor(1.7*m).
"""
X, M, m = process_inputs(X)
if M<=m: raise Exception('Not enough samples for local linear models.')
if p is None:
p = int(np.minimum(np.floor(1.7*m), M))
elif not isinstance(p, int):
raise TypeError('p must be an integer.')
if p < m+1 or p > M:
raise Exception('p must be between m+1 and M')
if weights is None:
weights = np.ones((M, 1)) / M
MM = np.minimum(int(np.ceil(10*m*np.log(m))), M-1)
df = np.zeros((MM, m))
for i in range(MM):
ii = np.random.randint(M)
x = X[ii,:]
D2 = np.sum((X - x)**2, axis=1)
ind = np.argsort(D2)
ind = ind[D2 != 0]
A = np.hstack((np.ones((p,1)), X[ind[:p],:])) * np.sqrt(weights[ii])
b = f[ind[:p]] * np.sqrt(weights[ii])
u = np.linalg.lstsq(A, b)[0]
df[i,:] = u[1:].T
return df
def finite_difference_gradients(X, fun, h=1e-6):
"""Compute finite difference gradients with a given interface.
Parameters
----------
X : ndarray
M-by-m matrix that contains the points to estimate the gradients with
finite differences
fun : function
function that returns the simulation's quantity of interest given inputs
h : float, optional
the finite difference step size (default 1e-6)
Returns
-------
df : ndarray
M-by-m matrix that contains estimated partial derivatives approximated
by finite differences
"""
X, M, m = process_inputs(X)
# points to run simulations including the perturbed inputs
XX = np.kron(np.ones((m+1, 1)),X) + \
h*np.kron(np.vstack((np.zeros((1, m)), np.eye(m))), np.ones((M, 1)))
# run the simulation
if isinstance(fun, SimulationRunner):
F = fun.run(XX)
else:
F = SimulationRunner(fun).run(XX)
df = (F[M:].reshape((m, M)).transpose() - F[:M]) / h
return df.reshape((M,m))
| [
"numpy.eye",
"numpy.sqrt",
"numpy.ones",
"numpy.log",
"numpy.floor",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"utils.simrunners.SimulationRunner",
"numpy.linalg.lstsq",
"utils.misc.process_inputs"
] | [((1186, 1203), 'utils.misc.process_inputs', 'process_inputs', (['X'], {}), '(X)\n', (1200, 1203), False, 'from utils.misc import process_inputs\n'), ((1646, 1663), 'numpy.zeros', 'np.zeros', (['(MM, m)'], {}), '((MM, m))\n', (1654, 1663), True, 'import numpy as np\n'), ((2664, 2681), 'utils.misc.process_inputs', 'process_inputs', (['X'], {}), '(X)\n', (2678, 2681), False, 'from utils.misc import process_inputs\n'), ((1701, 1721), 'numpy.random.randint', 'np.random.randint', (['M'], {}), '(M)\n', (1718, 1721), True, 'import numpy as np\n'), ((1755, 1783), 'numpy.sum', 'np.sum', (['((X - x) ** 2)'], {'axis': '(1)'}), '((X - x) ** 2, axis=1)\n', (1761, 1783), True, 'import numpy as np\n'), ((1796, 1810), 'numpy.argsort', 'np.argsort', (['D2'], {}), '(D2)\n', (1806, 1810), True, 'import numpy as np\n'), ((1561, 1576), 'numpy.ones', 'np.ones', (['(M, 1)'], {}), '((M, 1))\n', (1568, 1576), True, 'import numpy as np\n'), ((1894, 1914), 'numpy.sqrt', 'np.sqrt', (['weights[ii]'], {}), '(weights[ii])\n', (1901, 1914), True, 'import numpy as np\n'), ((1940, 1960), 'numpy.sqrt', 'np.sqrt', (['weights[ii]'], {}), '(weights[ii])\n', (1947, 1960), True, 'import numpy as np\n'), ((1973, 1994), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'b'], {}), '(A, b)\n', (1988, 1994), True, 'import numpy as np\n'), ((2763, 2782), 'numpy.ones', 'np.ones', (['(m + 1, 1)'], {}), '((m + 1, 1))\n', (2770, 2782), True, 'import numpy as np\n'), ((1326, 1343), 'numpy.floor', 'np.floor', (['(1.7 * m)'], {}), '(1.7 * m)\n', (1334, 1343), True, 'import numpy as np\n'), ((2848, 2863), 'numpy.ones', 'np.ones', (['(M, 1)'], {}), '((M, 1))\n', (2855, 2863), True, 'import numpy as np\n'), ((2979, 3000), 'utils.simrunners.SimulationRunner', 'SimulationRunner', (['fun'], {}), '(fun)\n', (2995, 3000), False, 'from utils.simrunners import SimulationRunner\n'), ((1619, 1628), 'numpy.log', 'np.log', (['m'], {}), '(m)\n', (1625, 1628), True, 'import numpy as np\n'), ((1861, 1876), 'numpy.ones', 'np.ones', (['(p, 1)'], {}), '((p, 1))\n', (1868, 1876), True, 'import numpy as np\n'), ((2817, 2833), 'numpy.zeros', 'np.zeros', (['(1, m)'], {}), '((1, m))\n', (2825, 2833), True, 'import numpy as np\n'), ((2835, 2844), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (2841, 2844), True, 'import numpy as np\n')] |
import csv
import sys
from pathlib import Path
from abc import abstractmethod
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import common.tf_utils as tf_utils
import metrics.manager as metric_manager
from common.model_loader import Ckpt
from common.utils import format_text
from common.utils import get_logger
from helper.base import AudioBase
from metrics.summaries import BaseSummaries
from metrics.summaries import Summaries
class Evaluator(object):
def __init__(self, model, session, args, dataset, dataset_name, name):
self.log = get_logger(name)
self.model = model
self.session = session
self.args = args
self.dataset = dataset
self.dataset_name = dataset_name
if Path(self.args.checkpoint_path).is_dir():
latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_path)
if latest_checkpoint is not None:
self.args.checkpoint_path = latest_checkpoint
self.log.info(f"Get latest checkpoint and update to it: {self.args.checkpoint_path}")
self.watch_path = self._build_watch_path()
self.session.run(tf.global_variables_initializer())
self.session.run(tf.local_variables_initializer())
self.ckpt_loader = Ckpt(
session=session,
include_scopes=args.checkpoint_include_scopes,
exclude_scopes=args.checkpoint_exclude_scopes,
ignore_missing_vars=args.ignore_missing_vars,
use_ema=self.args.use_ema,
ema_decay=self.args.ema_decay,
)
@abstractmethod
def setup_metric_manager(self):
raise NotImplementedError
@abstractmethod
def setup_metric_ops(self):
raise NotImplementedError
@abstractmethod
def build_non_tensor_data_from_eval_dict(self, eval_dict, **kwargs):
raise NotImplementedError
@abstractmethod
def setup_dataset_iterator(self):
raise NotImplementedError
def _build_watch_path(self):
if Path(self.args.checkpoint_path).is_dir():
return Path(self.args.checkpoint_path)
else:
return Path(self.args.checkpoint_path).parent
def build_evaluation_step(self, checkpoint_path):
if "-" in checkpoint_path and checkpoint_path.split("-")[-1].isdigit():
return int(checkpoint_path.split("-")[-1])
else:
return 0
def build_checkpoint_paths(self, checkpoint_path):
checkpoint_glob = Path(checkpoint_path + "*")
checkpoint_path = Path(checkpoint_path)
return checkpoint_glob, checkpoint_path
def build_miscellaneous_path(self, name):
target_dir = self.watch_path / "miscellaneous" / self.dataset_name / name
if not target_dir.exists():
target_dir.mkdir(parents=True)
return target_dir
def setup_best_keeper(self):
metric_with_modes = self.metric_manager.get_best_keep_metric_with_modes()
self.log.debug(metric_with_modes)
self.best_keeper = tf_utils.BestKeeper(
metric_with_modes,
self.dataset_name,
self.watch_path,
self.log,
)
def evaluate_once(self, checkpoint_path):
self.log.info("Evaluation started")
self.setup_dataset_iterator()
self.ckpt_loader.load(checkpoint_path)
step = self.build_evaluation_step(checkpoint_path)
checkpoint_glob, checkpoint_path = self.build_checkpoint_paths(checkpoint_path)
self.session.run(tf.local_variables_initializer())
eval_metric_dict = self.run_evaluation(step, is_training=False)
best_keep_metric_dict = self.metric_manager.filter_best_keep_metric(eval_metric_dict)
is_keep, metrics_keep = self.best_keeper.monitor(self.dataset_name, best_keep_metric_dict)
if self.args.save_best_keeper:
meta_info = {
"step": step,
"model_size": self.model.total_params,
}
self.best_keeper.remove_old_best(self.dataset_name, metrics_keep)
self.best_keeper.save_best(self.dataset_name, metrics_keep, checkpoint_glob)
self.best_keeper.remove_temp_dir()
self.best_keeper.save_scores(self.dataset_name, metrics_keep, best_keep_metric_dict, meta_info)
self.metric_manager.write_evaluation_summaries(step=step,
collection_keys=[BaseSummaries.KEY_TYPES.DEFAULT])
self.metric_manager.log_metrics(step=step)
self.log.info("Evaluation finished")
if step >= self.args.max_step_from_restore:
self.log.info("Evaluation stopped")
sys.exit()
def build_train_directory(self):
if Path(self.args.checkpoint_path).is_dir():
return str(self.args.checkpoint_path)
else:
return str(Path(self.args.checkpoint_path).parent)
@staticmethod
def add_arguments(parser):
g = parser.add_argument_group("(Evaluator) arguments")
g.add_argument("--valid_type", default="loop", type=str, choices=["loop", "once"])
g.add_argument("--max_outputs", default=5, type=int)
g.add_argument("--maximum_num_labels_for_metric", default=10, type=int,
help="Maximum number of labels for using class-specific metrics(e.g. precision/recall/f1score)")
g.add_argument("--no-save_best_keeper", dest="save_best_keeper", action="store_false")
g.add_argument("--save_best_keeper", dest="save_best_keeper", action="store_true")
g.set_defaults(save_best_keeper=True)
g.add_argument("--no-flatten_output", dest="flatten_output", action="store_false")
g.add_argument("--flatten_output", dest="flatten_output", action="store_true")
g.set_defaults(flatten_output=False)
g.add_argument("--max_step_from_restore", default=1e20, type=int)
class SingleLabelAudioEvaluator(Evaluator, AudioBase):
def __init__(self, model, session, args, dataset, dataset_name):
super().__init__(model, session, args, dataset, dataset_name, "SingleLabelAudioEvaluator")
self.setup_dataset_related_attr()
self.setup_metric_manager()
self.setup_metric_ops()
self.setup_best_keeper()
def setup_dataset_related_attr(self):
assert len(self.dataset.label_names) == self.args.num_classes
self.use_class_metrics = len(self.dataset.label_names) < self.args.maximum_num_labels_for_metric
def setup_metric_manager(self):
self.metric_manager = metric_manager.AudioMetricManager(
is_training=False,
use_class_metrics=self.use_class_metrics,
exclude_metric_names=self.args.exclude_metric_names,
summary=Summaries(
session=self.session,
train_dir=self.build_train_directory(),
is_training=False,
base_name=self.dataset.dataset_split_name,
max_summary_outputs=self.args.max_summary_outputs,
),
)
def setup_metric_ops(self):
losses = self.build_basic_loss_ops()
self.metric_tf_op = self.metric_manager.build_metric_ops({
"dataset_split_name": self.dataset_name,
"label_names": self.dataset.label_names,
"losses": losses,
"learning_rate": None,
"wavs": self.model.audio_original,
})
def build_non_tensor_data_from_eval_dict(self, eval_dict, **kwargs):
return {
"dataset_split_name": self.dataset.dataset_split_name,
"label_names": self.dataset.label_names,
"predictions_onehot": eval_dict["predictions_onehot"],
"labels_onehot": eval_dict["labels_onehot"],
}
def setup_dataset_iterator(self):
self.dataset.setup_iterator(
self.session,
self.dataset.placeholders,
self.dataset.data,
)
| [
"tensorflow.local_variables_initializer",
"common.tf_utils.BestKeeper",
"common.model_loader.Ckpt",
"sys.exit",
"pathlib.Path",
"tensorflow.global_variables_initializer",
"common.utils.get_logger",
"tensorflow.train.latest_checkpoint"
] | [((570, 586), 'common.utils.get_logger', 'get_logger', (['name'], {}), '(name)\n', (580, 586), False, 'from common.utils import get_logger\n'), ((1289, 1522), 'common.model_loader.Ckpt', 'Ckpt', ([], {'session': 'session', 'include_scopes': 'args.checkpoint_include_scopes', 'exclude_scopes': 'args.checkpoint_exclude_scopes', 'ignore_missing_vars': 'args.ignore_missing_vars', 'use_ema': 'self.args.use_ema', 'ema_decay': 'self.args.ema_decay'}), '(session=session, include_scopes=args.checkpoint_include_scopes,\n exclude_scopes=args.checkpoint_exclude_scopes, ignore_missing_vars=args\n .ignore_missing_vars, use_ema=self.args.use_ema, ema_decay=self.args.\n ema_decay)\n', (1293, 1522), False, 'from common.model_loader import Ckpt\n'), ((2508, 2535), 'pathlib.Path', 'Path', (["(checkpoint_path + '*')"], {}), "(checkpoint_path + '*')\n", (2512, 2535), False, 'from pathlib import Path\n'), ((2562, 2583), 'pathlib.Path', 'Path', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2566, 2583), False, 'from pathlib import Path\n'), ((3054, 3142), 'common.tf_utils.BestKeeper', 'tf_utils.BestKeeper', (['metric_with_modes', 'self.dataset_name', 'self.watch_path', 'self.log'], {}), '(metric_with_modes, self.dataset_name, self.watch_path,\n self.log)\n', (3073, 3142), True, 'import common.tf_utils as tf_utils\n'), ((829, 882), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.args.checkpoint_path'], {}), '(self.args.checkpoint_path)\n', (855, 882), True, 'import tensorflow as tf\n'), ((1167, 1200), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1198, 1200), True, 'import tensorflow as tf\n'), ((1227, 1259), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (1257, 1259), True, 'import tensorflow as tf\n'), ((2097, 2128), 'pathlib.Path', 'Path', (['self.args.checkpoint_path'], {}), '(self.args.checkpoint_path)\n', (2101, 2128), False, 'from pathlib import Path\n'), ((3547, 3579), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3577, 3579), True, 'import tensorflow as tf\n'), ((4717, 4727), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4725, 4727), False, 'import sys\n'), ((755, 786), 'pathlib.Path', 'Path', (['self.args.checkpoint_path'], {}), '(self.args.checkpoint_path)\n', (759, 786), False, 'from pathlib import Path\n'), ((2036, 2067), 'pathlib.Path', 'Path', (['self.args.checkpoint_path'], {}), '(self.args.checkpoint_path)\n', (2040, 2067), False, 'from pathlib import Path\n'), ((2162, 2193), 'pathlib.Path', 'Path', (['self.args.checkpoint_path'], {}), '(self.args.checkpoint_path)\n', (2166, 2193), False, 'from pathlib import Path\n'), ((4777, 4808), 'pathlib.Path', 'Path', (['self.args.checkpoint_path'], {}), '(self.args.checkpoint_path)\n', (4781, 4808), False, 'from pathlib import Path\n'), ((4906, 4937), 'pathlib.Path', 'Path', (['self.args.checkpoint_path'], {}), '(self.args.checkpoint_path)\n', (4910, 4937), False, 'from pathlib import Path\n')] |
from genshi.template import MarkupTemplate
from trac.core import *
from trac.web.chrome import Chrome
from trac.wiki.macros import WikiMacroBase
class GenshiMacro(WikiMacroBase):
def expand_macro(self, formatter, name, text, args):
template = MarkupTemplate(text)
chrome = Chrome(self.env)
return template.generate(**chrome.populate_data(formatter.req, {}))
| [
"trac.web.chrome.Chrome",
"genshi.template.MarkupTemplate"
] | [((257, 277), 'genshi.template.MarkupTemplate', 'MarkupTemplate', (['text'], {}), '(text)\n', (271, 277), False, 'from genshi.template import MarkupTemplate\n'), ((295, 311), 'trac.web.chrome.Chrome', 'Chrome', (['self.env'], {}), '(self.env)\n', (301, 311), False, 'from trac.web.chrome import Chrome\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import colony
class SchedulerPlugin(colony.Plugin):
"""
The main class for the Scheduler plugin.
"""
id = "pt.hive.colony.plugins.misc.scheduler"
name = "Scheduler"
description = "A plugin to manage the scheduling of tasks"
version = "1.0.0"
author = "Hive Solutions Lda. <<EMAIL>>"
platforms = [
colony.CPYTHON_ENVIRONMENT
]
capabilities = [
"main",
"scheduler",
"console_command_extension"
]
dependencies = [
colony.PluginDependency("pt.hive.colony.plugins.misc.guid"),
colony.PluginDependency("pt.hive.colony.plugins.console")
]
main_modules = [
"scheduler_c"
]
def load_plugin(self):
colony.Plugin.load_plugin(self)
import scheduler_c
self.system = scheduler_c.Scheduler(self)
self.console = scheduler_c.ConsoleScheduler(self)
self.release_ready_semaphore()
def end_load_plugin(self):
colony.Plugin.end_load_plugin(self)
self.system.load_scheduler()
def unload_plugin(self):
colony.Plugin.unload_plugin(self)
self.system.unload_scheduler()
self.release_ready_semaphore()
def end_unload_plugin(self):
colony.Plugin.end_unload_plugin(self)
self.release_ready_semaphore()
@colony.set_configuration_property
def set_configuration_property(self, property_name, property):
colony.Plugin.set_configuration_property(self, property_name, property)
@colony.unset_configuration_property
def unset_configuration_property(self, property_name):
colony.Plugin.unset_configuration_property(self, property_name)
def get_console_extension_name(self):
return self.console.get_console_extension_name()
def get_commands_map(self):
return self.console.get_commands_map()
def register_task(self, task, time):
return self.system.register_task(task, time)
def register_task_absolute(self, task, absolute_time):
return self.system.register_task_absolute(task, absolute_time)
def register_task_date_time(self, task, date_time):
return self.system.register_task_date_time(task, date_time)
def register_task_date_time_absolute(self, task, absolute_date_time):
return self.system.register_task_date_time_absolute(task, absolute_date_time)
def register_task_recursive(self, task, time, recursion_list):
return self.system.register_task_recursive(task, time, recursion_list)
def register_task_absolute_recursive(self, task, absolute_time, recursion_list):
return self.system.register_task_absolute_recursive(task, absolute_time, recursion_list)
def register_task_date_time_recursive(self, task, date_time, recursion_list):
return self.system.register_task_date_time_recursive(task, date_time, recursion_list)
def register_task_date_time_absolute_recursive(self, task, absolute_date_time, recursion_list):
return self.system.register_task_date_time_absolute_recursive(task, absolute_date_time, recursion_list)
def unregister_task(self, task):
return self.system.unregister_task(task)
def get_task_class(self):
"""
Retrieves the class that represents
a task in the current scope.
:rtype: Class
:return: The task class for the current scope.
"""
return self.system.get_task_class()
@colony.set_configuration_property_method("startup_configuration")
def startup_configuration_set_configuration_property(self, property_name, property):
self.system.set_startup_configuration_property(property)
@colony.unset_configuration_property_method("startup_configuration")
def startup_configuration_unset_configuration_property(self, property_name):
self.system.unset_startup_configuration_property()
| [
"colony.Plugin.set_configuration_property",
"scheduler_c.ConsoleScheduler",
"colony.Plugin.unset_configuration_property",
"colony.Plugin.end_load_plugin",
"colony.Plugin.end_unload_plugin",
"colony.unset_configuration_property_method",
"colony.PluginDependency",
"colony.Plugin.load_plugin",
"colony.... | [((4822, 4887), 'colony.set_configuration_property_method', 'colony.set_configuration_property_method', (['"""startup_configuration"""'], {}), "('startup_configuration')\n", (4862, 4887), False, 'import colony\n'), ((5052, 5119), 'colony.unset_configuration_property_method', 'colony.unset_configuration_property_method', (['"""startup_configuration"""'], {}), "('startup_configuration')\n", (5094, 5119), False, 'import colony\n'), ((1813, 1872), 'colony.PluginDependency', 'colony.PluginDependency', (['"""pt.hive.colony.plugins.misc.guid"""'], {}), "('pt.hive.colony.plugins.misc.guid')\n", (1836, 1872), False, 'import colony\n'), ((1883, 1940), 'colony.PluginDependency', 'colony.PluginDependency', (['"""pt.hive.colony.plugins.console"""'], {}), "('pt.hive.colony.plugins.console')\n", (1906, 1940), False, 'import colony\n'), ((2039, 2070), 'colony.Plugin.load_plugin', 'colony.Plugin.load_plugin', (['self'], {}), '(self)\n', (2064, 2070), False, 'import colony\n'), ((2122, 2149), 'scheduler_c.Scheduler', 'scheduler_c.Scheduler', (['self'], {}), '(self)\n', (2143, 2149), False, 'import scheduler_c\n'), ((2174, 2208), 'scheduler_c.ConsoleScheduler', 'scheduler_c.ConsoleScheduler', (['self'], {}), '(self)\n', (2202, 2208), False, 'import scheduler_c\n'), ((2292, 2327), 'colony.Plugin.end_load_plugin', 'colony.Plugin.end_load_plugin', (['self'], {}), '(self)\n', (2321, 2327), False, 'import colony\n'), ((2407, 2440), 'colony.Plugin.unload_plugin', 'colony.Plugin.unload_plugin', (['self'], {}), '(self)\n', (2434, 2440), False, 'import colony\n'), ((2566, 2603), 'colony.Plugin.end_unload_plugin', 'colony.Plugin.end_unload_plugin', (['self'], {}), '(self)\n', (2597, 2603), False, 'import colony\n'), ((2763, 2834), 'colony.Plugin.set_configuration_property', 'colony.Plugin.set_configuration_property', (['self', 'property_name', 'property'], {}), '(self, property_name, property)\n', (2803, 2834), False, 'import colony\n'), ((2948, 3011), 'colony.Plugin.unset_configuration_property', 'colony.Plugin.unset_configuration_property', (['self', 'property_name'], {}), '(self, property_name)\n', (2990, 3011), False, 'import colony\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\SVNzhangy\fast-transfer\src\_sever.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(798, 732)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.checkBox_time = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_time.setObjectName(_fromUtf8("checkBox_time"))
self.horizontalLayout.addWidget(self.checkBox_time)
self.dateTimeEdit_start = QtGui.QDateTimeEdit(self.groupBox_2)
self.dateTimeEdit_start.setDateTime(QtCore.QDateTime(QtCore.QDate(2017, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateTimeEdit_start.setCalendarPopup(True)
self.dateTimeEdit_start.setObjectName(_fromUtf8("dateTimeEdit_start"))
self.horizontalLayout.addWidget(self.dateTimeEdit_start)
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.dateTimeEdit_end = QtGui.QDateTimeEdit(self.groupBox_2)
self.dateTimeEdit_end.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateTimeEdit_end.setCalendarPopup(True)
self.dateTimeEdit_end.setObjectName(_fromUtf8("dateTimeEdit_end"))
self.horizontalLayout.addWidget(self.dateTimeEdit_end)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
spacerItem2 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.checkBox_ip = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_ip.setObjectName(_fromUtf8("checkBox_ip"))
self.horizontalLayout_3.addWidget(self.checkBox_ip)
self.lineEdit_ip = QtGui.QLineEdit(self.groupBox_2)
self.lineEdit_ip.setObjectName(_fromUtf8("lineEdit_ip"))
self.horizontalLayout_3.addWidget(self.lineEdit_ip)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem4 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.checkBox_fuzzy = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_fuzzy.setObjectName(_fromUtf8("checkBox_fuzzy"))
self.horizontalLayout_4.addWidget(self.checkBox_fuzzy)
self.lineEdit_fuzzysearch = QtGui.QLineEdit(self.groupBox_2)
self.lineEdit_fuzzysearch.setObjectName(_fromUtf8("lineEdit_fuzzysearch"))
self.horizontalLayout_4.addWidget(self.lineEdit_fuzzysearch)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.gridLayout.addWidget(self.groupBox_2, 1, 0, 1, 2)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.textBrowser_log = QtGui.QTextBrowser(self.groupBox)
self.textBrowser_log.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.textBrowser_log.setMouseTracking(True)
self.textBrowser_log.setObjectName(_fromUtf8("textBrowser_log"))
self.verticalLayout.addWidget(self.textBrowser_log)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.lineEdit_pagenumStart = QtGui.QLineEdit(self.groupBox)
self.lineEdit_pagenumStart.setMaximumSize(QtCore.QSize(50, 16777215))
self.lineEdit_pagenumStart.setObjectName(_fromUtf8("lineEdit_pagenumStart"))
self.horizontalLayout_2.addWidget(self.lineEdit_pagenumStart)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setMaximumSize(QtCore.QSize(20, 16777215))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
self.lineEdit_pagenumEnd = QtGui.QLineEdit(self.groupBox)
self.lineEdit_pagenumEnd.setMaximumSize(QtCore.QSize(50, 16777215))
self.lineEdit_pagenumEnd.setObjectName(_fromUtf8("lineEdit_pagenumEnd"))
self.horizontalLayout_2.addWidget(self.lineEdit_pagenumEnd)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem6)
self.pushButton_pageup = QtGui.QPushButton(self.groupBox)
self.pushButton_pageup.setObjectName(_fromUtf8("pushButton_pageup"))
self.horizontalLayout_2.addWidget(self.pushButton_pageup)
self.pushButton_pagedown = QtGui.QPushButton(self.groupBox)
self.pushButton_pagedown.setObjectName(_fromUtf8("pushButton_pagedown"))
self.horizontalLayout_2.addWidget(self.pushButton_pagedown)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 2)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_notice = QtGui.QLabel(Form)
self.label_notice.setMinimumSize(QtCore.QSize(600, 0))
self.label_notice.setObjectName(_fromUtf8("label_notice"))
self.horizontalLayout_5.addWidget(self.label_notice)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem7)
self.pushButton_check = QtGui.QPushButton(Form)
self.pushButton_check.setObjectName(_fromUtf8("pushButton_check"))
self.horizontalLayout_5.addWidget(self.pushButton_check)
self.gridLayout.addLayout(self.horizontalLayout_5, 2, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "LogManager", None))
self.groupBox_2.setTitle(_translate("Form", "Search Setting", None))
self.checkBox_time.setText(_translate("Form", "time:", None))
self.label_2.setText(_translate("Form", "-----", None))
self.checkBox_ip.setText(_translate("Form", "IP: ", None))
self.checkBox_fuzzy.setText(_translate("Form", "fuzzy:", None))
self.groupBox.setTitle(_translate("Form", "Log Display", None))
self.label_3.setText(_translate("Form", "---", None))
self.pushButton_pageup.setText(_translate("Form", "page up ", None))
self.pushButton_pagedown.setText(_translate("Form", "page down", None))
self.label_notice.setText(_translate("Form", "Notice:", None))
self.pushButton_check.setText(_translate("Form", "Check", None))
| [
"PySide.QtCore.QDate",
"PySide.QtGui.QGridLayout",
"PySide.QtGui.QCheckBox",
"PySide.QtCore.QMetaObject.connectSlotsByName",
"PySide.QtGui.QTextBrowser",
"PySide.QtCore.QSize",
"PySide.QtGui.QHBoxLayout",
"PySide.QtGui.QDateTimeEdit",
"PySide.QtCore.QTime",
"PySide.QtGui.QPushButton",
"PySide.Qt... | [((480, 544), 'PySide.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['context', 'text', 'disambig', '_encoding'], {}), '(context, text, disambig, _encoding)\n', (508, 544), False, 'from PySide import QtCore, QtGui\n'), ((837, 860), 'PySide.QtGui.QGridLayout', 'QtGui.QGridLayout', (['Form'], {}), '(Form)\n', (854, 860), False, 'from PySide import QtCore, QtGui\n'), ((950, 971), 'PySide.QtGui.QGroupBox', 'QtGui.QGroupBox', (['Form'], {}), '(Form)\n', (965, 971), False, 'from PySide import QtCore, QtGui\n'), ((1067, 1101), 'PySide.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (1084, 1101), False, 'from PySide import QtCore, QtGui\n'), ((1209, 1228), 'PySide.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (1226, 1228), False, 'from PySide import QtCore, QtGui\n'), ((1325, 1404), 'PySide.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(20)', '(20)', 'QtGui.QSizePolicy.Minimum', 'QtGui.QSizePolicy.Minimum'], {}), '(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)\n', (1342, 1404), False, 'from PySide import QtCore, QtGui\n'), ((1484, 1516), 'PySide.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (1499, 1516), False, 'from PySide import QtCore, QtGui\n'), ((1680, 1716), 'PySide.QtGui.QDateTimeEdit', 'QtGui.QDateTimeEdit', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (1699, 1716), False, 'from PySide import QtCore, QtGui\n'), ((2050, 2079), 'PySide.QtGui.QLabel', 'QtGui.QLabel', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (2062, 2079), False, 'from PySide import QtCore, QtGui\n'), ((2223, 2259), 'PySide.QtGui.QDateTimeEdit', 'QtGui.QDateTimeEdit', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (2242, 2259), False, 'from PySide import QtCore, QtGui\n'), ((2582, 2668), 'PySide.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(40)', '(20)', 'QtGui.QSizePolicy.Expanding', 'QtGui.QSizePolicy.Minimum'], {}), '(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.\n Minimum)\n', (2599, 2668), False, 'from PySide import QtCore, QtGui\n'), ((2812, 2831), 'PySide.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (2829, 2831), False, 'from PySide import QtCore, QtGui\n'), ((2933, 3012), 'PySide.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(20)', '(20)', 'QtGui.QSizePolicy.Minimum', 'QtGui.QSizePolicy.Minimum'], {}), '(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)\n', (2950, 3012), False, 'from PySide import QtCore, QtGui\n'), ((3093, 3125), 'PySide.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (3108, 3125), False, 'from PySide import QtCore, QtGui\n'), ((3278, 3310), 'PySide.QtGui.QLineEdit', 'QtGui.QLineEdit', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (3293, 3310), False, 'from PySide import QtCore, QtGui\n'), ((3458, 3551), 'PySide.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(40)', '(20)', 'QtGui.QSizePolicy.MinimumExpanding', 'QtGui.QSizePolicy.Minimum'], {}), '(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.\n QSizePolicy.Minimum)\n', (3475, 3551), False, 'from PySide import QtCore, QtGui\n'), ((3699, 3718), 'PySide.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (3716, 3718), False, 'from PySide import QtCore, QtGui\n'), ((3820, 3899), 'PySide.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(20)', '(20)', 'QtGui.QSizePolicy.Minimum', 'QtGui.QSizePolicy.Minimum'], {}), '(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)\n', (3837, 3899), False, 'from PySide import QtCore, QtGui\n'), ((3983, 4015), 'PySide.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (3998, 4015), False, 'from PySide import QtCore, QtGui\n'), ((4186, 4218), 'PySide.QtGui.QLineEdit', 'QtGui.QLineEdit', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4201, 4218), False, 'from PySide import QtCore, QtGui\n'), ((4393, 4479), 'PySide.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(40)', '(20)', 'QtGui.QSizePolicy.Expanding', 'QtGui.QSizePolicy.Minimum'], {}), '(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.\n Minimum)\n', (4410, 4479), False, 'from PySide import QtCore, QtGui\n'), ((4680, 4701), 'PySide.QtGui.QGroupBox', 'QtGui.QGroupBox', (['Form'], {}), '(Form)\n', (4695, 4701), False, 'from PySide import QtCore, QtGui\n'), ((4791, 4823), 'PySide.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', (['self.groupBox'], {}), '(self.groupBox)\n', (4808, 4823), False, 'from PySide import QtCore, QtGui\n'), ((4926, 4959), 'PySide.QtGui.QTextBrowser', 'QtGui.QTextBrowser', (['self.groupBox'], {}), '(self.groupBox)\n', (4944, 4959), False, 'from PySide import QtCore, QtGui\n'), ((5279, 5298), 'PySide.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (5296, 5298), False, 'from PySide import QtCore, QtGui\n'), ((5415, 5445), 'PySide.QtGui.QLineEdit', 'QtGui.QLineEdit', (['self.groupBox'], {}), '(self.groupBox)\n', (5430, 5445), False, 'from PySide import QtCore, QtGui\n'), ((5702, 5729), 'PySide.QtGui.QLabel', 'QtGui.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (5714, 5729), False, 'from PySide import QtCore, QtGui\n'), ((5942, 5972), 'PySide.QtGui.QLineEdit', 'QtGui.QLineEdit', (['self.groupBox'], {}), '(self.groupBox)\n', (5957, 5972), False, 'from PySide import QtCore, QtGui\n'), ((6220, 6306), 'PySide.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(40)', '(20)', 'QtGui.QSizePolicy.Expanding', 'QtGui.QSizePolicy.Minimum'], {}), '(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.\n Minimum)\n', (6237, 6306), False, 'from PySide import QtCore, QtGui\n'), ((6388, 6420), 'PySide.QtGui.QPushButton', 'QtGui.QPushButton', (['self.groupBox'], {}), '(self.groupBox)\n', (6405, 6420), False, 'from PySide import QtCore, QtGui\n'), ((6599, 6631), 'PySide.QtGui.QPushButton', 'QtGui.QPushButton', (['self.groupBox'], {}), '(self.groupBox)\n', (6616, 6631), False, 'from PySide import QtCore, QtGui\n'), ((6939, 6958), 'PySide.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (6956, 6958), False, 'from PySide import QtCore, QtGui\n'), ((7066, 7084), 'PySide.QtGui.QLabel', 'QtGui.QLabel', (['Form'], {}), '(Form)\n', (7078, 7084), False, 'from PySide import QtCore, QtGui\n'), ((7298, 7384), 'PySide.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(40)', '(20)', 'QtGui.QSizePolicy.Expanding', 'QtGui.QSizePolicy.Minimum'], {}), '(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.\n Minimum)\n', (7315, 7384), False, 'from PySide import QtCore, QtGui\n'), ((7465, 7488), 'PySide.QtGui.QPushButton', 'QtGui.QPushButton', (['Form'], {}), '(Form)\n', (7482, 7488), False, 'from PySide import QtCore, QtGui\n'), ((7742, 7785), 'PySide.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (7779, 7785), False, 'from PySide import QtCore, QtGui\n'), ((628, 681), 'PySide.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['context', 'text', 'disambig'], {}), '(context, text, disambig)\n', (656, 681), False, 'from PySide import QtCore, QtGui\n'), ((5022, 5058), 'PySide.QtGui.QCursor', 'QtGui.QCursor', (['QtCore.Qt.IBeamCursor'], {}), '(QtCore.Qt.IBeamCursor)\n', (5035, 5058), False, 'from PySide import QtCore, QtGui\n'), ((5496, 5522), 'PySide.QtCore.QSize', 'QtCore.QSize', (['(50)', '(16777215)'], {}), '(50, 16777215)\n', (5508, 5522), False, 'from PySide import QtCore, QtGui\n'), ((5766, 5792), 'PySide.QtCore.QSize', 'QtCore.QSize', (['(20)', '(16777215)'], {}), '(20, 16777215)\n', (5778, 5792), False, 'from PySide import QtCore, QtGui\n'), ((6021, 6047), 'PySide.QtCore.QSize', 'QtCore.QSize', (['(50)', '(16777215)'], {}), '(50, 16777215)\n', (6033, 6047), False, 'from PySide import QtCore, QtGui\n'), ((7126, 7146), 'PySide.QtCore.QSize', 'QtCore.QSize', (['(600)', '(0)'], {}), '(600, 0)\n', (7138, 7146), False, 'from PySide import QtCore, QtGui\n'), ((1778, 1802), 'PySide.QtCore.QDate', 'QtCore.QDate', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (1790, 1802), False, 'from PySide import QtCore, QtGui\n'), ((1804, 1825), 'PySide.QtCore.QTime', 'QtCore.QTime', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1816, 1825), False, 'from PySide import QtCore, QtGui\n'), ((2319, 2343), 'PySide.QtCore.QDate', 'QtCore.QDate', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (2331, 2343), False, 'from PySide import QtCore, QtGui\n'), ((2345, 2366), 'PySide.QtCore.QTime', 'QtCore.QTime', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2357, 2366), False, 'from PySide import QtCore, QtGui\n')] |
import json
from aleph.tests.util import TestCase
class DocumentsApiTestCase(TestCase):
def setUp(self):
super(DocumentsApiTestCase, self).setUp()
self.load_fixtures('docs.yaml')
def test_index(self):
res = self.client.get('/api/1/documents')
assert res.status_code == 200, res
self.login(is_admin=True)
res = self.client.get('/api/1/documents')
assert res.status_code == 200, res
assert res.json['total'] == 4, res.json
fix = '720badc9cfa9a80fc455239f86c56273dc5c8291'
res = self.client.get('/api/1/documents?content_hash=%s' % fix)
assert res.status_code == 200, res
assert res.json['total'] == 1, res.json
assert res.json['results'][0]['content_hash'] == fix, res.json
def test_view(self):
doc_id = 1000
res = self.client.get('/api/1/documents/%s' % doc_id)
assert res.status_code == 200, res
assert res.json['foreign_id'] == 'test1', res
res = self.client.get('/api/1/documents/328984')
assert res.status_code == 404, res
def test_view_tables(self):
doc_id = 1003
res = self.client.get('/api/1/documents/%s/tables/0' % doc_id)
assert res.status_code == 200, res
assert 'sheet_name' in res.json, res.json
res = self.client.get('/api/1/documents/%s/tables/444' % doc_id)
assert res.status_code == 404, res
def test_view_records(self):
res = self.client.get('/api/1/documents/1003/records')
assert res.status_code == 200, res
assert 'results' in res.json, res.json
assert len(res.json['results']) == 10, res.json
def test_view_record_by_id(self):
doc_id = 1000
res = self.client.get('/api/1/documents/%s/records/1' % doc_id)
assert res.status_code == 200, res
assert 'banana' in res.json['text'], res
assert 'total' not in res.json['text'], res
res = self.client.get('/api/1/documents/%s/records/2' % doc_id)
assert 'total' in res.json['text'], res
res = self.client.get('/api/1/documents/%s/records/2000' % doc_id)
assert res.status_code == 404, res
def test_records_search(self):
res = self.client.get('/api/1/documents/1003/records?q=kwazulu')
assert res.status_code == 200, res
assert res.json['total'] == 1, res.json
def test_view_pdf(self):
res = self.client.get('/api/1/documents/1003/pdf')
assert res.status_code == 400, res
res = self.client.get('/api/1/documents/1000/pdf')
assert res.status_code == 404, res
def test_view_references(self):
doc_id = 1001
res = self.client.get('/api/1/documents/%s/references' % doc_id)
assert res.status_code == 403, res
self.login(is_admin=True)
res = self.client.get('/api/1/documents/%s/references' % doc_id)
assert res.status_code == 200, res
assert 'results' in res.json, res.json
# assert len(res.json['results']) == 2, res.json
def test_update_simple(self):
url = '/api/1/documents/1000'
res = self.client.get(url)
assert res.status_code == 200, res
data = res.json
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 403, res.json
data['title'] = 'Eaten by a pumpkin'
self.login(is_admin=True)
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 200, res.json
assert res.json['title'] == data['title'], res.json
def test_update_invalid(self):
url = '/api/1/documents/1000'
ores = self.client.get(url)
self.login(is_admin=True)
data = ores.json.copy()
data['countries'] = ['xz']
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 400, res.json
data = ores.json.copy()
data['urls'] = ['lalala']
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 400, res.json
data = ores.json.copy()
data['dates'] = ['2011-XX-XX']
res = self.client.post(url, data=json.dumps(data),
content_type='application/json')
assert res.status_code == 400, res.json
| [
"json.dumps"
] | [((3274, 3290), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3284, 3290), False, 'import json\n'), ((3525, 3541), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3535, 3541), False, 'import json\n'), ((3968, 3984), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3978, 3984), False, 'import json\n'), ((4206, 4222), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4216, 4222), False, 'import json\n'), ((4449, 4465), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4459, 4465), False, 'import json\n')] |
import numpy as np
import pandas as pd
from os.path import join as oj
import os
import pygsheets
import pandas as pd
import sys
import inspect
from datetime import datetime, timedelta
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
sys.path.append(parentdir + '/modeling')
import load_data
from fit_and_predict import fit_and_predict_ensemble
from functions import merge_data
from viz import viz_interactive
import matplotlib.pyplot as plt
import plotly.express as px
import plotly
def predictions_plot(df_county, NUM_DAYS_LIST, num_days_in_past, output_key):
today = datetime.today().strftime("%B %d")
day_past = (datetime.now() - timedelta(days=num_days_in_past)).strftime("%B %d")
pred_key = f'Predicted deaths by {today}\n(predicted on {day_past})'
deaths_key = f'Actual deaths by {today}'
d = df_county.rename(columns={
output_key: pred_key,
'tot_deaths': deaths_key,
})
minn = min(min(d[pred_key]), min(d[deaths_key])) + 1
maxx = max(max(d[pred_key]), max(d[deaths_key]))
px.colors.DEFAULT_PLOTLY_COLORS[:3] = ['rgb(239,138,98)','rgb(247,247,247)','rgb(103,169,207)']
fig = px.scatter(d,
x=deaths_key,
y=pred_key,
size='PopulationEstimate2018',
hover_name="CountyName",
hover_data=["CountyName", 'StateName'],
log_x=True, log_y=True)
fig.update_layout(shapes=[
dict(
type= 'line',
yref= 'y', y0=minn, y1=maxx,
xref= 'x', x0=minn, x1=maxx,
opacity=0.2
)
])
fig.update_layout(
paper_bgcolor='rgba(0,0,0,255)',
plot_bgcolor='rgba(0,0,0,255)',
template='plotly_dark',
title='County-level predictions'
)
plotly.offline.plot(fig, filename=oj(parentdir, 'results', 'predictions.html'), auto_open=False)
if __name__ == '__main__':
print('loading data...')
NUM_DAYS_LIST = [1, 2, 3, 4, 5, 6, 7]
df_county = load_data.load_county_level(data_dir=oj(parentdir, 'data'))
num_days_in_past = 3
output_key = f'<PASSWORD>ed Deaths {num_days_in_past}-day'
df_county = fit_and_predict_ensemble(df_county,
outcome='deaths',
mode='eval_mode',
target_day=np.array([num_days_in_past]),
output_key=output_key)
df_county[output_key] = [v[0] for v in df_county[output_key].values]
predictions_plot(df_county, NUM_DAYS_LIST, num_days_in_past, output_key) | [
"plotly.express.scatter",
"inspect.currentframe",
"os.path.join",
"os.path.dirname",
"numpy.array",
"datetime.datetime.now",
"datetime.datetime.today",
"datetime.timedelta",
"sys.path.append"
] | [((284, 311), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (299, 311), False, 'import os\n'), ((312, 338), 'sys.path.append', 'sys.path.append', (['parentdir'], {}), '(parentdir)\n', (327, 338), False, 'import sys\n'), ((339, 379), 'sys.path.append', 'sys.path.append', (["(parentdir + '/modeling')"], {}), "(parentdir + '/modeling')\n", (354, 379), False, 'import sys\n'), ((1248, 1416), 'plotly.express.scatter', 'px.scatter', (['d'], {'x': 'deaths_key', 'y': 'pred_key', 'size': '"""PopulationEstimate2018"""', 'hover_name': '"""CountyName"""', 'hover_data': "['CountyName', 'StateName']", 'log_x': '(True)', 'log_y': '(True)'}), "(d, x=deaths_key, y=pred_key, size='PopulationEstimate2018',\n hover_name='CountyName', hover_data=['CountyName', 'StateName'], log_x=\n True, log_y=True)\n", (1258, 1416), True, 'import plotly.express as px\n'), ((246, 268), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (266, 268), False, 'import inspect\n'), ((681, 697), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (695, 697), False, 'from datetime import datetime, timedelta\n'), ((1969, 2013), 'os.path.join', 'oj', (['parentdir', '"""results"""', '"""predictions.html"""'], {}), "(parentdir, 'results', 'predictions.html')\n", (1971, 2013), True, 'from os.path import join as oj\n'), ((2188, 2209), 'os.path.join', 'oj', (['parentdir', '"""data"""'], {}), "(parentdir, 'data')\n", (2190, 2209), True, 'from os.path import join as oj\n'), ((2501, 2529), 'numpy.array', 'np.array', (['[num_days_in_past]'], {}), '([num_days_in_past])\n', (2509, 2529), True, 'import numpy as np\n'), ((732, 746), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (744, 746), False, 'from datetime import datetime, timedelta\n'), ((749, 781), 'datetime.timedelta', 'timedelta', ([], {'days': 'num_days_in_past'}), '(days=num_days_in_past)\n', (758, 781), False, 'from datetime import datetime, timedelta\n')] |
"""
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a MLP on MNIST
"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import nutsflow as nf
import nutsml as nm
import numpy as np
from nutsml.network import PytorchNetwork
from utils import download_mnist, load_mnist
class Model(nn.Module):
"""Pytorch model"""
def __init__(self, device):
"""Construct model on given device, e.g. 'cpu' or 'cuda'"""
super(Model, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
self.to(device) # set device before constructing optimizer
# required properties of a model to be wrapped as PytorchNetwork!
self.device = device # 'cuda', 'cuda:0' or 'gpu'
self.losses = nn.CrossEntropyLoss() # can be list of loss functions
self.optimizer = optim.Adam(self.parameters())
def forward(self, x):
"""Forward pass through network for input x"""
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def accuracy(y_true, y_pred):
"""Compute accuracy"""
from sklearn.metrics import accuracy_score
y_pred = [yp.argmax() for yp in y_pred]
return 100 * accuracy_score(y_true, y_pred)
def evaluate(network, x, y):
"""Evaluate network performance (here accuracy)"""
metrics = [accuracy]
build_batch = (nm.BuildBatch(64)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
acc = zip(x, y) >> build_batch >> network.evaluate(metrics)
return acc
def train(network, epochs=3):
"""Train network for given number of epochs"""
print('loading data...')
filepath = download_mnist()
x_train, y_train, x_test, y_test = load_mnist(filepath)
plot = nm.PlotLines(None, every_sec=0.2)
build_batch = (nm.BuildBatch(128)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
for epoch in range(epochs):
print('epoch', epoch + 1)
losses = (zip(x_train, y_train) >> nf.PrintProgress(x_train) >>
nf.Shuffle(1000) >> build_batch >>
network.train() >> plot >> nf.Collect())
acc_test = evaluate(network, x_test, y_test)
acc_train = evaluate(network, x_train, y_train)
print('train loss : {:.6f}'.format(np.mean(losses)))
print('train acc : {:.1f}'.format(acc_train))
print('test acc : {:.1f}'.format(acc_test))
if __name__ == '__main__':
print('creating model...')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(device)
network = PytorchNetwork(model)
# network.load_weights()
network.print_layers((28 * 28,))
print('training network...')
train(network, epochs=3)
| [
"numpy.mean",
"utils.load_mnist",
"torch.nn.CrossEntropyLoss",
"nutsml.network.PytorchNetwork",
"nutsflow.Collect",
"nutsml.PlotLines",
"utils.download_mnist",
"torch.cuda.is_available",
"nutsflow.Shuffle",
"nutsflow.PrintProgress",
"torch.nn.Linear",
"nutsml.BuildBatch",
"sklearn.metrics.ac... | [((1856, 1872), 'utils.download_mnist', 'download_mnist', ([], {}), '()\n', (1870, 1872), False, 'from utils import download_mnist, load_mnist\n'), ((1912, 1932), 'utils.load_mnist', 'load_mnist', (['filepath'], {}), '(filepath)\n', (1922, 1932), False, 'from utils import download_mnist, load_mnist\n'), ((1945, 1978), 'nutsml.PlotLines', 'nm.PlotLines', (['None'], {'every_sec': '(0.2)'}), '(None, every_sec=0.2)\n', (1957, 1978), True, 'import nutsml as nm\n'), ((2807, 2828), 'nutsml.network.PytorchNetwork', 'PytorchNetwork', (['model'], {}), '(model)\n', (2821, 2828), False, 'from nutsml.network import PytorchNetwork\n'), ((550, 573), 'torch.nn.Linear', 'nn.Linear', (['(28 * 28)', '(500)'], {}), '(28 * 28, 500)\n', (559, 573), True, 'import torch.nn as nn\n'), ((593, 612), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(256)'], {}), '(500, 256)\n', (602, 612), True, 'import torch.nn as nn\n'), ((632, 650), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(10)'], {}), '(256, 10)\n', (641, 650), True, 'import torch.nn as nn\n'), ((875, 896), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (894, 896), True, 'import torch.nn as nn\n'), ((1371, 1401), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1385, 1401), False, 'from sklearn.metrics import accuracy_score\n'), ((2730, 2755), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2753, 2755), False, 'import torch\n'), ((2354, 2366), 'nutsflow.Collect', 'nf.Collect', ([], {}), '()\n', (2364, 2366), True, 'import nutsflow as nf\n'), ((2520, 2535), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2527, 2535), True, 'import numpy as np\n'), ((1532, 1549), 'nutsml.BuildBatch', 'nm.BuildBatch', (['(64)'], {}), '(64)\n', (1545, 1549), True, 'import nutsml as nm\n'), ((1998, 2016), 'nutsml.BuildBatch', 'nm.BuildBatch', (['(128)'], {}), '(128)\n', (2011, 2016), True, 'import nutsml as nm\n'), ((2274, 2290), 'nutsflow.Shuffle', 'nf.Shuffle', (['(1000)'], {}), '(1000)\n', (2284, 2290), True, 'import nutsflow as nf\n'), ((2227, 2252), 'nutsflow.PrintProgress', 'nf.PrintProgress', (['x_train'], {}), '(x_train)\n', (2243, 2252), True, 'import nutsflow as nf\n')] |
# Generated by Django 3.1.4 on 2020-12-27 15:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('short_description', models.TextField(max_length=255)),
('long_description', models.TextField()),
('image', models.ImageField(blank=True, null=True, upload_to='product_pictures/%Y/%m')),
('slug', models.SlugField(unique=True)),
('price_marketing', models.FloatField()),
('price_marketing_promotion', models.FloatField(default=0)),
('FIELDNAME', models.CharField(choices=[('V', 'Variação'), ('S', 'Simples')], default='V', max_length=1)),
],
),
]
| [
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.SlugField",
"django.db.models.AutoField",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((303, 396), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (319, 396), False, 'from django.db import migrations, models\n'), ((420, 452), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (436, 452), False, 'from django.db import migrations, models\n'), ((493, 525), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (509, 525), False, 'from django.db import migrations, models\n'), ((565, 583), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (581, 583), False, 'from django.db import migrations, models\n'), ((612, 688), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""product_pictures/%Y/%m"""'}), "(blank=True, null=True, upload_to='product_pictures/%Y/%m')\n", (629, 688), False, 'from django.db import migrations, models\n'), ((716, 745), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)'}), '(unique=True)\n', (732, 745), False, 'from django.db import migrations, models\n'), ((784, 803), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (801, 803), False, 'from django.db import migrations, models\n'), ((852, 880), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (869, 880), False, 'from django.db import migrations, models\n'), ((913, 1007), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('V', 'Variação'), ('S', 'Simples')]", 'default': '"""V"""', 'max_length': '(1)'}), "(choices=[('V', 'Variação'), ('S', 'Simples')], default='V',\n max_length=1)\n", (929, 1007), False, 'from django.db import migrations, models\n')] |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import os
import sys
from six import StringIO
from pants.reporting.html_reporter import HtmlReporter
from pants.reporting.plaintext_reporter import PlainTextReporter
from pants.reporting.quiet_reporter import QuietReporter
from pants.reporting.report import Report, ReportingError
from pants.reporting.reporting_server import ReportingServerManager
from pants.util.dirutil import safe_mkdir, safe_rmtree
def initial_reporting(config, run_tracker):
"""Sets up the initial reporting configuration.
Will be changed after we parse cmd-line flags.
"""
reports_dir = os.path.join(config.getdefault('pants_workdir'), 'reports')
link_to_latest = os.path.join(reports_dir, 'latest')
run_id = run_tracker.run_info.get_info('id')
if run_id is None:
raise ReportingError('No run_id set')
run_dir = os.path.join(reports_dir, run_id)
safe_rmtree(run_dir)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
try:
if os.path.lexists(link_to_latest):
os.unlink(link_to_latest)
os.symlink(run_dir, link_to_latest)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
report = Report()
# Capture initial console reporting into a buffer. We'll do something with it once
# we know what the cmd-line flag settings are.
outfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, log_level=Report.INFO,
color=False, indent=True, timing=False,
cache_stats=False)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
# Set up HTML reporting. We always want that.
template_dir = config.get('reporting', 'reports_template_dir')
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
html_dir=html_dir,
template_dir=template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
# Add some useful RunInfo.
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
(_, port) = ReportingServerManager.get_current_server_pid_and_port()
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
return report
def update_reporting(options, is_quiet_task, run_tracker):
"""Updates reporting config once we've parsed cmd-line flags."""
# Get any output silently buffered in the old console reporter, and remove it.
old_outfile = run_tracker.report.remove_reporter('capturing').settings.outfile
old_outfile.flush()
buffered_output = old_outfile.getvalue()
old_outfile.close()
log_level = Report.log_level_from_string(options.level or 'info')
# Ideally, we'd use terminfo or somesuch to discover whether a
# terminal truly supports color, but most that don't set TERM=dumb.
color = (options.colors) and (os.getenv('TERM') != 'dumb')
timing = options.time
cache_stats = options.time # TODO: Separate flag for this?
if options.quiet or is_quiet_task:
console_reporter = QuietReporter(run_tracker,
QuietReporter.Settings(log_level=log_level, color=color))
else:
# Set up the new console reporter.
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, color=color,
indent=True, timing=timing, cache_stats=cache_stats)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_output)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if options.logdir:
# Also write plaintext logs to a file. This is completely separate from the html reports.
safe_mkdir(options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(options.logdir, '{}.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, color=False,
indent=True, timing=True, cache_stats=True)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_output)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
| [
"pants.reporting.plaintext_reporter.PlainTextReporter.Settings",
"pants.reporting.report.Report",
"pants.reporting.plaintext_reporter.PlainTextReporter",
"pants.reporting.reporting_server.ReportingServerManager.get_current_server_pid_and_port",
"os.getenv",
"pants.reporting.report.Report.log_level_from_st... | [((966, 1001), 'os.path.join', 'os.path.join', (['reports_dir', '"""latest"""'], {}), "(reports_dir, 'latest')\n", (978, 1001), False, 'import os\n'), ((1125, 1158), 'os.path.join', 'os.path.join', (['reports_dir', 'run_id'], {}), '(reports_dir, run_id)\n', (1137, 1158), False, 'import os\n'), ((1161, 1181), 'pants.util.dirutil.safe_rmtree', 'safe_rmtree', (['run_dir'], {}), '(run_dir)\n', (1172, 1181), False, 'from pants.util.dirutil import safe_mkdir, safe_rmtree\n'), ((1196, 1225), 'os.path.join', 'os.path.join', (['run_dir', '"""html"""'], {}), "(run_dir, 'html')\n", (1208, 1225), False, 'import os\n'), ((1228, 1248), 'pants.util.dirutil.safe_mkdir', 'safe_mkdir', (['html_dir'], {}), '(html_dir)\n', (1238, 1248), False, 'from pants.util.dirutil import safe_mkdir, safe_rmtree\n'), ((1536, 1544), 'pants.reporting.report.Report', 'Report', ([], {}), '()\n', (1542, 1544), False, 'from pants.reporting.report import Report, ReportingError\n'), ((1692, 1702), 'six.StringIO', 'StringIO', ([], {}), '()\n', (1700, 1702), False, 'from six import StringIO\n'), ((1735, 1865), 'pants.reporting.plaintext_reporter.PlainTextReporter.Settings', 'PlainTextReporter.Settings', ([], {'outfile': 'outfile', 'log_level': 'Report.INFO', 'color': '(False)', 'indent': '(True)', 'timing': '(False)', 'cache_stats': '(False)'}), '(outfile=outfile, log_level=Report.INFO, color=\n False, indent=True, timing=False, cache_stats=False)\n', (1761, 1865), False, 'from pants.reporting.plaintext_reporter import PlainTextReporter\n'), ((2002, 2061), 'pants.reporting.plaintext_reporter.PlainTextReporter', 'PlainTextReporter', (['run_tracker', 'capturing_reporter_settings'], {}), '(run_tracker, capturing_reporter_settings)\n', (2019, 2061), False, 'from pants.reporting.plaintext_reporter import PlainTextReporter\n'), ((2258, 2352), 'pants.reporting.html_reporter.HtmlReporter.Settings', 'HtmlReporter.Settings', ([], {'log_level': 'Report.INFO', 'html_dir': 'html_dir', 'template_dir': 'template_dir'}), '(log_level=Report.INFO, html_dir=html_dir,\n template_dir=template_dir)\n', (2279, 2352), False, 'from pants.reporting.html_reporter import HtmlReporter\n'), ((2465, 2514), 'pants.reporting.html_reporter.HtmlReporter', 'HtmlReporter', (['run_tracker', 'html_reporter_settings'], {}), '(run_tracker, html_reporter_settings)\n', (2477, 2514), False, 'from pants.reporting.html_reporter import HtmlReporter\n'), ((2683, 2739), 'pants.reporting.reporting_server.ReportingServerManager.get_current_server_pid_and_port', 'ReportingServerManager.get_current_server_pid_and_port', ([], {}), '()\n', (2737, 2739), False, 'from pants.reporting.reporting_server import ReportingServerManager\n'), ((3259, 3312), 'pants.reporting.report.Report.log_level_from_string', 'Report.log_level_from_string', (["(options.level or 'info')"], {}), "(options.level or 'info')\n", (3287, 3312), False, 'from pants.reporting.report import Report, ReportingError\n'), ((1081, 1112), 'pants.reporting.report.ReportingError', 'ReportingError', (['"""No run_id set"""'], {}), "('No run_id set')\n", (1095, 1112), False, 'from pants.reporting.report import Report, ReportingError\n'), ((1264, 1295), 'os.path.lexists', 'os.path.lexists', (['link_to_latest'], {}), '(link_to_latest)\n', (1279, 1295), False, 'import os\n'), ((1333, 1368), 'os.symlink', 'os.symlink', (['run_dir', 'link_to_latest'], {}), '(run_dir, link_to_latest)\n', (1343, 1368), False, 'import os\n'), ((3840, 3978), 'pants.reporting.plaintext_reporter.PlainTextReporter.Settings', 'PlainTextReporter.Settings', ([], {'log_level': 'log_level', 'outfile': 'sys.stdout', 'color': 'color', 'indent': '(True)', 'timing': 'timing', 'cache_stats': 'cache_stats'}), '(log_level=log_level, outfile=sys.stdout, color=\n color, indent=True, timing=timing, cache_stats=cache_stats)\n', (3866, 3978), False, 'from pants.reporting.plaintext_reporter import PlainTextReporter\n'), ((4039, 4079), 'pants.reporting.plaintext_reporter.PlainTextReporter', 'PlainTextReporter', (['run_tracker', 'settings'], {}), '(run_tracker, settings)\n', (4056, 4079), False, 'from pants.reporting.plaintext_reporter import PlainTextReporter\n'), ((4335, 4361), 'pants.util.dirutil.safe_mkdir', 'safe_mkdir', (['options.logdir'], {}), '(options.logdir)\n', (4345, 4361), False, 'from pants.util.dirutil import safe_mkdir, safe_rmtree\n'), ((4505, 4631), 'pants.reporting.plaintext_reporter.PlainTextReporter.Settings', 'PlainTextReporter.Settings', ([], {'log_level': 'log_level', 'outfile': 'outfile', 'color': '(False)', 'indent': '(True)', 'timing': '(True)', 'cache_stats': '(True)'}), '(log_level=log_level, outfile=outfile, color=\n False, indent=True, timing=True, cache_stats=True)\n', (4531, 4631), False, 'from pants.reporting.plaintext_reporter import PlainTextReporter\n'), ((4692, 4732), 'pants.reporting.plaintext_reporter.PlainTextReporter', 'PlainTextReporter', (['run_tracker', 'settings'], {}), '(run_tracker, settings)\n', (4709, 4732), False, 'from pants.reporting.plaintext_reporter import PlainTextReporter\n'), ((1303, 1328), 'os.unlink', 'os.unlink', (['link_to_latest'], {}), '(link_to_latest)\n', (1312, 1328), False, 'import os\n'), ((3480, 3497), 'os.getenv', 'os.getenv', (['"""TERM"""'], {}), "('TERM')\n", (3489, 3497), False, 'import os\n'), ((3720, 3776), 'pants.reporting.quiet_reporter.QuietReporter.Settings', 'QuietReporter.Settings', ([], {'log_level': 'log_level', 'color': 'color'}), '(log_level=log_level, color=color)\n', (3742, 3776), False, 'from pants.reporting.quiet_reporter import QuietReporter\n')] |
"""
All the data sources are scattered around the D drive, this script
organizes it and consolidates it into the "Data" subfolder in the
"Chapter 2 Dune Aspect Ratio" folder.
<NAME>, 5/6/2020
"""
import shutil as sh
import pandas as pd
import numpy as np
import os
# Set the data directory to save files into
DATA_DIR = os.path.join('..', 'Data')
# Set the directory with most of the XBeach data
XB_DIR = os.path.join('..', '..', 'XBeach Modelling', 'Dune Complexity Experiments')
def bogue_lidar_data():
"""
Load all Bogue Banks morphometrics from 1997-2016
and return a dataframe of aspect ratios and natural
dune volumes
"""
# Set a list of years
years = [1997, 1998, 1999, 2000, 2004, 2005, 2010, 2011, 2014, 2016]
# Set an empty dataframe
morpho = pd.DataFrame()
# Loop through the years and load the data
for year in years:
# Set a path to the data and load
path = os.path.join('..', '..', 'Chapter 1 Sand Fences', 'Data', f'Morphometrics for Bogue {year}.csv')
temp = pd.read_csv(path, delimiter=',', header=0)
# Add a column for the year
temp['Year'] = year
# Append the data to the main dataframe
morpho = pd.concat([morpho, temp])
# Make a new dataframe with just aspect ratios and volumes
data = pd.DataFrame()
data['Year'] = morpho['Year']
data['Ratio'] = (morpho['y_crest'] - morpho['y_toe']) / (morpho['x_heel'] - morpho['x_toe'])
data['Volume'] = morpho['Natural Dune Volume']
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Bogue Banks Volumes and Aspect Ratios.csv')
data.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def initial_profiles():
"""
Take all the initial profiles and place them
into a Dataframe to save as a .csv
Make a column for the experiment names, a column for
the X-grids, and columns for the profiles
"""
# Set the experiment names. The initial profiles are the same regardless of
# the surge level so just take from the half surge simulations
experiments = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
# Set an empty dataframe
profiles = pd.DataFrame()
# Loop through the experiments
for experiment in experiments:
# Set a path to the profiles
PROFILE_DIR = os.path.join(XB_DIR, f'{experiment} Half Surge')
# Load the x-grid
x_grid_fname = os.path.join(PROFILE_DIR, 'Dune Complexity 1 1', 'x.grd')
x_grid = np.loadtxt(x_grid_fname)
# Load the dunes
dune_1 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 1 1', 'bed.dep'))
dune_2 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 20 1', 'bed.dep'))
dune_3 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 40 1', 'bed.dep'))
dune_4 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 60 1', 'bed.dep'))
dune_5 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -20 1', 'bed.dep'))
dune_6 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -40 1', 'bed.dep'))
dune_7 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -60 1', 'bed.dep'))
# Put all of the stretched dunes into a dataframe
dune_dict = {
'Experiment': experiment.replace('Joined', 'Aligned'),
'X': x_grid,
'1 pct': dune_1,
'20 pct': dune_2,
'40 pct': dune_3,
'60 pct': dune_4,
'-20 pct': dune_5,
'-40 pct': dune_6,
'-60 pct': dune_7,
}
dune_data = pd.DataFrame(data=dune_dict)
# Concatenate the Dataframes
profiles = pd.concat([profiles, dune_data])
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Initial Profiles.csv')
profiles.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def initial_ratios():
"""
Make a .csv file with the initial dune aspect ratios and
dune volumes for the profiles used in the simulations
"""
# Set the experiment names. The initial profiles are the same regardless of
# the surge level so just take from the half surge simulations
experiments = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
# Set an empty dataframe
ratios = pd.DataFrame()
# Loop through the experiments
for experiment in experiments:
# Load the initial dune ratios
init_ratio_fname = os.path.join(XB_DIR, f'{experiment} Half Surge', 'Setup Data', 'Initial Dune Ratios.csv')
init_ratios = pd.read_csv(init_ratio_fname, delimiter=',', header=None, names=['Stretch', 'Ratio', 'Volume'])
# Add a column for the experiment name
init_ratios['Experiment'] = experiment.replace('Joined', 'Aligned')
# Concatenate the data
ratios = pd.concat([ratios, init_ratios])
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Initial Dune Ratios.csv')
ratios.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def joaquin_and_florence():
"""
Load the storm surge time series' from
Tropical Storm Joaquin and Hurricane
Florence, put them in a .csv file
"""
# Loop through the storms
for storm in ['Joaquin', 'Florence']:
# Load the tide predictions and observations as a Pandas dataframe
filename = os.path.join(XB_DIR, 'Setup Data', f'{storm}.csv')
if storm == 'Joaquin':
parse_dates_cols = ['Date', 'Time']
data_columns = ['Time', 'Predicted', 'Observed']
else:
parse_dates_cols = ['Date', 'Time (GMT)']
data_columns = ['Time', 'Predicted', 'Preliminary', 'Observed']
data = pd.read_csv(filename, delimiter=',', parse_dates=[parse_dates_cols], header=0)
data.columns = data_columns
# Calculate the non-tidal residual
data['NTR'] = data['Observed'] - data['Predicted']
# Load the time data
times = data['Time'].tolist()
data['String Times'] = [t.strftime('%Y-%m-%d %H') for t in times]
# Save the DataFrame as a .csv
save_name = os.path.join(DATA_DIR, f'{storm}.csv')
data.to_csv(save_name, index=False)
def move_csv_output():
"""
Take the .csv files and move them into the "Data" folder,
then rename them from "xboutput.nc" to the name of the simulation
"""
# Set lists with the dune configurations, storm surge
# modifications, storm duration increases, and dune aspect
# ratio stretches
dunes = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
surges = ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
stretches = [-60, -40, -20, 1, 20, 40, 60]
# Loop through the dunes and surges
for dune in dunes:
for surge in surges:
# Set the experiment folder name
experiment_name = f'{dune} {surge} Surge'
experiment_folder = os.path.join(XB_DIR, experiment_name)
# Make a target folder to move the runs into
save_folder = os.path.join(DATA_DIR, 'XBeach Morphometrics', experiment_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Loop through the dunes and durations within the experiment
for stretch in stretches:
for duration in durations:
# Set the simulation folder
run_name = f'Dune Complexity {stretch} {duration}'
simulation_folder = os.path.join(experiment_folder, run_name)
# Set the XBeach output file as the source. Set the destination
# name. Then copy the file over
source = os.path.join(simulation_folder, f'{run_name} Morphometrics.csv')
if os.path.exists(source):
destination = os.path.join(save_folder, f'{run_name} Morphometrics.csv')
if not os.path.exists(destination):
sh.copy(source, destination)
print(f'File Successfully Copied: {destination}')
else:
print(f'File already exists: {destination}')
else:
print(f'FILE DOES NOT EXIST: {source}')
def move_field_data():
"""
Move the field data morphometrics from 2017
and 2018 into the data folder
"""
# Set the years
years = [2017, 2018]
# Set a path to the field data
field_dir = os.path.join('..', '..', 'Bogue Banks Field Data')
# Loop through the years
for year in years:
# Identify the source file
source = os.path.join(field_dir, str(year), f'Morphometrics for Bogue Banks {year}.csv')
# Set the target
destination = os.path.join(DATA_DIR, f'Morphometrics for Bogue Banks {year}.csv')
# Copy the file
sh.copy(source, destination)
def move_netcdf_output():
"""
Take the netCDF files and move them into the "Data" folder,
then rename them from "xboutput.nc" to the name of the simulation
"""
# Set lists with the dune configurations, storm surge
# modifications, storm duration increases, and dune aspect
# ratio stretches
dunes = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
surges = ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
stretches = [-60, -40, -20, 1, 20, 40, 60]
# Loop through the dunes and surges
for dune in dunes:
for surge in surges:
# Set the experiment folder name
experiment_name = f'{dune} {surge} Surge'
experiment_folder = os.path.join(XB_DIR, experiment_name)
# Make a target folder to move the runs into
save_folder = os.path.join(DATA_DIR, 'XBeach Output', experiment_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Loop through the dunes and durations within the experiment
for stretch in stretches:
for duration in durations:
# Set the simulation folder
run_name = f'Dune Complexity {stretch} {duration}'
simulation_folder = os.path.join(experiment_folder, run_name)
# Set the XBeach output file as the source. Set the destination
# name. Then copy the file over
source = os.path.join(simulation_folder, 'xboutput.nc')
if os.path.exists(source):
destination = os.path.join(save_folder, f'{run_name}.nc')
if not os.path.exists(destination):
sh.copy(source, destination)
print(f'File Successfully Copied: {destination}')
else:
print(f'File already exists: {destination}')
else:
print(f'FILE DOES NOT EXIST: {source}')
def surge_time_series():
"""
Put all the storm time series' into
a .csv file that can be loaded as a
DataFrame
"""
# Set a list of storm surge modifiers
# and storm duration increases
surges, surge_labels = [0.5, 1.0, 1.5], ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
# Make an empty DataFrame to loop into
surge_df = pd.DataFrame()
# Loop through the surges
for surge, label in zip(surges, surge_labels):
# Loop through the durations
for duration in durations:
# The DataFrame won't work if the columns are different
# lengths so place them all in a preset 125 "hour" long
# array so that they'll fit in the DataFrame
time_series = np.full((1, 125), fill_value=np.nan)[0]
# Load the data and place it in the time series NaN array
filename = os.path.join(XB_DIR, f'Toes Joined {label} Surge', f'Dune Complexity 1 {duration}', 'ntr.txt')
ntr = np.genfromtxt(filename, dtype=np.float32)
time_series[:len(ntr)] = ntr
# Place the time series in the dict
surge_df[f'{label} {duration}'] = time_series
# Save the DataFrame as a .csv file
save_name = os.path.join(DATA_DIR, 'Storm Surge Time Series.csv')
surge_df.to_csv(save_name, index=False)
def main():
"""
Main program function to consolidate all the
data sources
"""
# Make a .csv file with the initial profiles used
# initial_profiles()
# Make a .csv file with the initial dune ratios
# initial_ratios()
# Make a .csv file with all the natural dune volumes
# and aspect ratios measured from Bogue Banks LiDAR
# bogue_lidar_data()
# Make a .csv file with the storm surge time
# series' for all the model runs
# surge_time_series()
# Make a .csv file with storm surge data
# for Tropical Storm Joaquin and <NAME>
# joaquin_and_florence()
# Move the netCDF output files into the Data folder
# and rename them for the run name. Move the .csv
# files with the morphometrics from the runs too
# move_csv_output()
# move_netcdf_output()
# Move the Bogue Banks field data morphometrics
# from 2017 and 2018 into the data folder
move_field_data()
if __name__ == '__main__':
main()
| [
"os.path.exists",
"numpy.genfromtxt",
"pandas.read_csv",
"os.path.join",
"os.mkdir",
"shutil.copy",
"pandas.DataFrame",
"numpy.full",
"numpy.loadtxt",
"pandas.concat"
] | [((337, 363), 'os.path.join', 'os.path.join', (['""".."""', '"""Data"""'], {}), "('..', 'Data')\n", (349, 363), False, 'import os\n'), ((426, 501), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""XBeach Modelling"""', '"""Dune Complexity Experiments"""'], {}), "('..', '..', 'XBeach Modelling', 'Dune Complexity Experiments')\n", (438, 501), False, 'import os\n'), ((828, 842), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (840, 842), True, 'import pandas as pd\n'), ((1375, 1389), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1387, 1389), True, 'import pandas as pd\n'), ((1639, 1706), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""Bogue Banks Volumes and Aspect Ratios.csv"""'], {}), "(DATA_DIR, 'Bogue Banks Volumes and Aspect Ratios.csv')\n", (1651, 1706), False, 'import os\n'), ((2308, 2322), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2320, 2322), True, 'import pandas as pd\n'), ((3987, 4033), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""Initial Profiles.csv"""'], {}), "(DATA_DIR, 'Initial Profiles.csv')\n", (3999, 4033), False, 'import os\n'), ((4559, 4573), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4571, 4573), True, 'import pandas as pd\n'), ((5203, 5252), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""Initial Dune Ratios.csv"""'], {}), "(DATA_DIR, 'Initial Dune Ratios.csv')\n", (5215, 5252), False, 'import os\n'), ((8976, 9026), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""Bogue Banks Field Data"""'], {}), "('..', '..', 'Bogue Banks Field Data')\n", (8988, 9026), False, 'import os\n'), ((11958, 11972), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11970, 11972), True, 'import pandas as pd\n'), ((12864, 12917), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""Storm Surge Time Series.csv"""'], {}), "(DATA_DIR, 'Storm Surge Time Series.csv')\n", (12876, 12917), False, 'import os\n'), ((978, 1078), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""Chapter 1 Sand Fences"""', '"""Data"""', 'f"""Morphometrics for Bogue {year}.csv"""'], {}), "('..', '..', 'Chapter 1 Sand Fences', 'Data',\n f'Morphometrics for Bogue {year}.csv')\n", (990, 1078), False, 'import os\n'), ((1091, 1133), 'pandas.read_csv', 'pd.read_csv', (['path'], {'delimiter': '""","""', 'header': '(0)'}), "(path, delimiter=',', header=0)\n", (1102, 1133), True, 'import pandas as pd\n'), ((1271, 1296), 'pandas.concat', 'pd.concat', (['[morpho, temp]'], {}), '([morpho, temp])\n', (1280, 1296), True, 'import pandas as pd\n'), ((2460, 2508), 'os.path.join', 'os.path.join', (['XB_DIR', 'f"""{experiment} Half Surge"""'], {}), "(XB_DIR, f'{experiment} Half Surge')\n", (2472, 2508), False, 'import os\n'), ((2562, 2619), 'os.path.join', 'os.path.join', (['PROFILE_DIR', '"""Dune Complexity 1 1"""', '"""x.grd"""'], {}), "(PROFILE_DIR, 'Dune Complexity 1 1', 'x.grd')\n", (2574, 2619), False, 'import os\n'), ((2638, 2662), 'numpy.loadtxt', 'np.loadtxt', (['x_grid_fname'], {}), '(x_grid_fname)\n', (2648, 2662), True, 'import numpy as np\n'), ((3801, 3829), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dune_dict'}), '(data=dune_dict)\n', (3813, 3829), True, 'import pandas as pd\n'), ((3890, 3922), 'pandas.concat', 'pd.concat', (['[profiles, dune_data]'], {}), '([profiles, dune_data])\n', (3899, 3922), True, 'import pandas as pd\n'), ((4718, 4811), 'os.path.join', 'os.path.join', (['XB_DIR', 'f"""{experiment} Half Surge"""', '"""Setup Data"""', '"""Initial Dune Ratios.csv"""'], {}), "(XB_DIR, f'{experiment} Half Surge', 'Setup Data',\n 'Initial Dune Ratios.csv')\n", (4730, 4811), False, 'import os\n'), ((4831, 4930), 'pandas.read_csv', 'pd.read_csv', (['init_ratio_fname'], {'delimiter': '""","""', 'header': 'None', 'names': "['Stretch', 'Ratio', 'Volume']"}), "(init_ratio_fname, delimiter=',', header=None, names=['Stretch',\n 'Ratio', 'Volume'])\n", (4842, 4930), True, 'import pandas as pd\n'), ((5106, 5138), 'pandas.concat', 'pd.concat', (['[ratios, init_ratios]'], {}), '([ratios, init_ratios])\n', (5115, 5138), True, 'import pandas as pd\n'), ((5685, 5735), 'os.path.join', 'os.path.join', (['XB_DIR', '"""Setup Data"""', 'f"""{storm}.csv"""'], {}), "(XB_DIR, 'Setup Data', f'{storm}.csv')\n", (5697, 5735), False, 'import os\n'), ((6042, 6120), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delimiter': '""","""', 'parse_dates': '[parse_dates_cols]', 'header': '(0)'}), "(filename, delimiter=',', parse_dates=[parse_dates_cols], header=0)\n", (6053, 6120), True, 'import pandas as pd\n'), ((6473, 6511), 'os.path.join', 'os.path.join', (['DATA_DIR', 'f"""{storm}.csv"""'], {}), "(DATA_DIR, f'{storm}.csv')\n", (6485, 6511), False, 'import os\n'), ((9270, 9337), 'os.path.join', 'os.path.join', (['DATA_DIR', 'f"""Morphometrics for Bogue Banks {year}.csv"""'], {}), "(DATA_DIR, f'Morphometrics for Bogue Banks {year}.csv')\n", (9282, 9337), False, 'import os\n'), ((9374, 9402), 'shutil.copy', 'sh.copy', (['source', 'destination'], {}), '(source, destination)\n', (9381, 9402), True, 'import shutil as sh\n'), ((7324, 7361), 'os.path.join', 'os.path.join', (['XB_DIR', 'experiment_name'], {}), '(XB_DIR, experiment_name)\n', (7336, 7361), False, 'import os\n'), ((7449, 7512), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""XBeach Morphometrics"""', 'experiment_name'], {}), "(DATA_DIR, 'XBeach Morphometrics', experiment_name)\n", (7461, 7512), False, 'import os\n'), ((10175, 10212), 'os.path.join', 'os.path.join', (['XB_DIR', 'experiment_name'], {}), '(XB_DIR, experiment_name)\n', (10187, 10212), False, 'import os\n'), ((10300, 10356), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""XBeach Output"""', 'experiment_name'], {}), "(DATA_DIR, 'XBeach Output', experiment_name)\n", (10312, 10356), False, 'import os\n'), ((12496, 12594), 'os.path.join', 'os.path.join', (['XB_DIR', 'f"""Toes Joined {label} Surge"""', 'f"""Dune Complexity 1 {duration}"""', '"""ntr.txt"""'], {}), "(XB_DIR, f'Toes Joined {label} Surge',\n f'Dune Complexity 1 {duration}', 'ntr.txt')\n", (12508, 12594), False, 'import os\n'), ((12610, 12651), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'dtype': 'np.float32'}), '(filename, dtype=np.float32)\n', (12623, 12651), True, 'import numpy as np\n'), ((2726, 2785), 'os.path.join', 'os.path.join', (['PROFILE_DIR', '"""Dune Complexity 1 1"""', '"""bed.dep"""'], {}), "(PROFILE_DIR, 'Dune Complexity 1 1', 'bed.dep')\n", (2738, 2785), False, 'import os\n'), ((2822, 2882), 'os.path.join', 'os.path.join', (['PROFILE_DIR', '"""Dune Complexity 20 1"""', '"""bed.dep"""'], {}), "(PROFILE_DIR, 'Dune Complexity 20 1', 'bed.dep')\n", (2834, 2882), False, 'import os\n'), ((2919, 2979), 'os.path.join', 'os.path.join', (['PROFILE_DIR', '"""Dune Complexity 40 1"""', '"""bed.dep"""'], {}), "(PROFILE_DIR, 'Dune Complexity 40 1', 'bed.dep')\n", (2931, 2979), False, 'import os\n'), ((3016, 3076), 'os.path.join', 'os.path.join', (['PROFILE_DIR', '"""Dune Complexity 60 1"""', '"""bed.dep"""'], {}), "(PROFILE_DIR, 'Dune Complexity 60 1', 'bed.dep')\n", (3028, 3076), False, 'import os\n'), ((3113, 3174), 'os.path.join', 'os.path.join', (['PROFILE_DIR', '"""Dune Complexity -20 1"""', '"""bed.dep"""'], {}), "(PROFILE_DIR, 'Dune Complexity -20 1', 'bed.dep')\n", (3125, 3174), False, 'import os\n'), ((3211, 3272), 'os.path.join', 'os.path.join', (['PROFILE_DIR', '"""Dune Complexity -40 1"""', '"""bed.dep"""'], {}), "(PROFILE_DIR, 'Dune Complexity -40 1', 'bed.dep')\n", (3223, 3272), False, 'import os\n'), ((3309, 3370), 'os.path.join', 'os.path.join', (['PROFILE_DIR', '"""Dune Complexity -60 1"""', '"""bed.dep"""'], {}), "(PROFILE_DIR, 'Dune Complexity -60 1', 'bed.dep')\n", (3321, 3370), False, 'import os\n'), ((7533, 7560), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (7547, 7560), False, 'import os\n'), ((7579, 7600), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (7587, 7600), False, 'import os\n'), ((10377, 10404), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (10391, 10404), False, 'import os\n'), ((10423, 10444), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (10431, 10444), False, 'import os\n'), ((12359, 12395), 'numpy.full', 'np.full', (['(1, 125)'], {'fill_value': 'np.nan'}), '((1, 125), fill_value=np.nan)\n', (12366, 12395), True, 'import numpy as np\n'), ((7924, 7965), 'os.path.join', 'os.path.join', (['experiment_folder', 'run_name'], {}), '(experiment_folder, run_name)\n', (7936, 7965), False, 'import os\n'), ((8136, 8200), 'os.path.join', 'os.path.join', (['simulation_folder', 'f"""{run_name} Morphometrics.csv"""'], {}), "(simulation_folder, f'{run_name} Morphometrics.csv')\n", (8148, 8200), False, 'import os\n'), ((8225, 8247), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (8239, 8247), False, 'import os\n'), ((10768, 10809), 'os.path.join', 'os.path.join', (['experiment_folder', 'run_name'], {}), '(experiment_folder, run_name)\n', (10780, 10809), False, 'import os\n'), ((10980, 11026), 'os.path.join', 'os.path.join', (['simulation_folder', '"""xboutput.nc"""'], {}), "(simulation_folder, 'xboutput.nc')\n", (10992, 11026), False, 'import os\n'), ((11051, 11073), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (11065, 11073), False, 'import os\n'), ((8288, 8346), 'os.path.join', 'os.path.join', (['save_folder', 'f"""{run_name} Morphometrics.csv"""'], {}), "(save_folder, f'{run_name} Morphometrics.csv')\n", (8300, 8346), False, 'import os\n'), ((11114, 11157), 'os.path.join', 'os.path.join', (['save_folder', 'f"""{run_name}.nc"""'], {}), "(save_folder, f'{run_name}.nc')\n", (11126, 11157), False, 'import os\n'), ((8379, 8406), 'os.path.exists', 'os.path.exists', (['destination'], {}), '(destination)\n', (8393, 8406), False, 'import os\n'), ((8437, 8465), 'shutil.copy', 'sh.copy', (['source', 'destination'], {}), '(source, destination)\n', (8444, 8465), True, 'import shutil as sh\n'), ((11190, 11217), 'os.path.exists', 'os.path.exists', (['destination'], {}), '(destination)\n', (11204, 11217), False, 'import os\n'), ((11248, 11276), 'shutil.copy', 'sh.copy', (['source', 'destination'], {}), '(source, destination)\n', (11255, 11276), True, 'import shutil as sh\n')] |
def end_of_import():
return 0
def end_of_init():
return 0
def end_of_computing():
return 0
import numpy as np
from sklearn.linear_model import LinearRegression
end_of_import()
X = np.array(range(0,100000)).reshape(-1, 1)
# y = 2x + 3
y = np.dot(X, 2) + 3
end_of_init()
reg = LinearRegression().fit(X, y)
end_of_computing() | [
"numpy.dot",
"sklearn.linear_model.LinearRegression"
] | [((254, 266), 'numpy.dot', 'np.dot', (['X', '(2)'], {}), '(X, 2)\n', (260, 266), True, 'import numpy as np\n'), ((292, 310), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (308, 310), False, 'from sklearn.linear_model import LinearRegression\n')] |
import requests
url = "https://api.korbit.co.kr/v1/ticker/detailed?currency_pair=btc_krw"
r = requests.get(url)
bitcoin = r.json()
print(bitcoin)
print(type(bitcoin))
print(bitcoin['last'])
print(bitcoin['bid'])
print(bitcoin['ask'])
print(bitcoin['volume'])
| [
"requests.get"
] | [((95, 112), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (107, 112), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-11-06 01:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myaxf', '0010_minebtns'),
]
operations = [
migrations.AddField(
model_name='minebtns',
name='is_used',
field=models.BooleanField(default=True),
),
]
| [
"django.db.models.BooleanField"
] | [((390, 423), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (409, 423), False, 'from django.db import migrations, models\n')] |
"""
Utility to get generate all submission pipelines for all primitives.
This script assumes that `generate_annotations.py` has already been run.
"""
import os
import subprocess
import shutil
import fire
from kf_d3m_primitives.data_preprocessing.data_cleaning.data_cleaning_pipeline import DataCleaningPipeline
from kf_d3m_primitives.data_preprocessing.text_summarization.duke_pipeline import DukePipeline
from kf_d3m_primitives.data_preprocessing.geocoding_forward.goat_forward_pipeline import GoatForwardPipeline
from kf_d3m_primitives.data_preprocessing.geocoding_reverse.goat_reverse_pipeline import GoatReversePipeline
from kf_d3m_primitives.data_preprocessing.data_typing.simon_pipeline import SimonPipeline
from kf_d3m_primitives.clustering.spectral_clustering.spectral_clustering_pipeline import SpectralClusteringPipeline
from kf_d3m_primitives.clustering.k_means.storc_pipeline import StorcPipeline
from kf_d3m_primitives.clustering.hdbscan.hdbscan_pipeline import HdbscanPipeline
from kf_d3m_primitives.dimensionality_reduction.tsne.tsne_pipeline import TsnePipeline
from kf_d3m_primitives.feature_selection.pca_features.pca_features_pipeline import PcaFeaturesPipeline
from kf_d3m_primitives.feature_selection.rf_features.rf_features_pipeline import RfFeaturesPipeline
from kf_d3m_primitives.natural_language_processing.sent2vec.sent2vec_pipeline import Sent2VecPipeline
from kf_d3m_primitives.object_detection.retinanet.object_detection_retinanet_pipeline import ObjectDetectionRNPipeline
from kf_d3m_primitives.image_classification.imagenet_transfer_learning.gator_pipeline import GatorPipeline
from kf_d3m_primitives.ts_classification.knn.kanine_pipeline import KaninePipeline
from kf_d3m_primitives.ts_classification.lstm_fcn.lstm_fcn_pipeline import LstmFcnPipeline
from kf_d3m_primitives.ts_forecasting.vector_autoregression.var_pipeline import VarPipeline
from kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline import DeepARPipeline
from kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline import NBEATSPipeline
from kf_d3m_primitives.remote_sensing.classifier.mlp_classifier_pipeline import MlpClassifierPipeline
def generate_pipelines(gpu = False):
gpu_prims = [
"d3m.primitives.classification.inceptionV3_image_feature.Gator",
"d3m.primitives.object_detection.retina_net.ObjectDetectionRN",
"d3m.primitives.time_series_classification.convolutional_neural_net.LSTM_FCN",
"d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec",
"d3m.primitives.remote_sensing.mlp.MlpClassifier"
]
prims_to_pipelines = {
"d3m.primitives.data_cleaning.column_type_profiler.Simon": [
(SimonPipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.data_cleaning.geocoding.Goat_forward": [
(GoatForwardPipeline(), ('LL0_acled_reduced_MIN_METADATA',))
],
"d3m.primitives.data_cleaning.geocoding.Goat_reverse": [
(GoatReversePipeline(), ('LL0_acled_reduced_MIN_METADATA',))
],
"d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec": [
(Sent2VecPipeline(), ('LL1_TXT_CLS_apple_products_sentiment_MIN_METADATA',))
],
"d3m.primitives.clustering.k_means.Sloth": [
(StorcPipeline(), ('66_chlorineConcentration_MIN_METADATA',))
],
"d3m.primitives.clustering.hdbscan.Hdbscan": [
(HdbscanPipeline(), ('SEMI_1044_eye_movements_MIN_METADATA',))
],
"d3m.primitives.clustering.spectral_graph.SpectralClustering": [
(SpectralClusteringPipeline(), ('SEMI_1044_eye_movements_MIN_METADATA',))
],
"d3m.primitives.dimensionality_reduction.t_distributed_stochastic_neighbor_embedding.Tsne": [
(TsnePipeline(), ('SEMI_1044_eye_movements_MIN_METADATA',))
],
"d3m.primitives.time_series_classification.k_neighbors.Kanine": [
(KaninePipeline(), ('66_chlorineConcentration_MIN_METADATA',))
],
"d3m.primitives.time_series_classification.convolutional_neural_net.LSTM_FCN": [
(LstmFcnPipeline(), (
'66_chlorineConcentration_MIN_METADATA',
"LL1_Adiac_MIN_METADATA",
"LL1_ArrowHead_MIN_METADATA",
"LL1_Cricket_Y_MIN_METADATA",
"LL1_ECG200_MIN_METADATA",
"LL1_ElectricDevices_MIN_METADATA",
"LL1_FISH_MIN_METADATA",
"LL1_FaceFour_MIN_METADATA",
"LL1_HandOutlines_MIN_METADATA",
"LL1_Haptics_MIN_METADATA",
"LL1_ItalyPowerDemand_MIN_METADATA",
"LL1_Meat_MIN_METADATA",
"LL1_OSULeaf_MIN_METADATA",
)),
(LstmFcnPipeline(attention_lstm=True), (
'66_chlorineConcentration_MIN_METADATA',
"LL1_Adiac_MIN_METADATA",
"LL1_ArrowHead_MIN_METADATA",
"LL1_Cricket_Y_MIN_METADATA",
"LL1_ECG200_MIN_METADATA",
"LL1_ElectricDevices_MIN_METADATA",
"LL1_FISH_MIN_METADATA",
"LL1_FaceFour_MIN_METADATA",
"LL1_HandOutlines_MIN_METADATA",
"LL1_Haptics_MIN_METADATA",
"LL1_ItalyPowerDemand_MIN_METADATA",
"LL1_Meat_MIN_METADATA",
"LL1_OSULeaf_MIN_METADATA",
))
],
"d3m.primitives.time_series_forecasting.vector_autoregression.VAR": [
(VarPipeline(), (
'56_sunspots_MIN_METADATA',
'56_sunspots_monthly_MIN_METADATA',
'LL1_736_population_spawn_MIN_METADATA',
'LL1_736_stock_market_MIN_METADATA',
'LL1_terra_canopy_height_long_form_s4_100_MIN_METADATA',
"LL1_terra_canopy_height_long_form_s4_90_MIN_METADATA",
"LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA",
"LL1_terra_canopy_height_long_form_s4_70_MIN_METADATA",
'LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA',
'LL1_PHEM_Monthly_Malnutrition_MIN_METADATA',
'LL1_PHEM_weeklyData_malnutrition_MIN_METADATA',
))
],
"d3m.primitives.time_series_forecasting.lstm.DeepAR": [
(DeepARPipeline(prediction_length = 21, context_length = 21), ('56_sunspots_MIN_METADATA',)),
(DeepARPipeline(prediction_length = 38, context_length = 38), ('56_sunspots_monthly_MIN_METADATA',)),
(DeepARPipeline(prediction_length = 60, context_length = 30), ('LL1_736_population_spawn_MIN_METADATA',)),
(DeepARPipeline(prediction_length = 34, context_length = 17), ('LL1_736_stock_market_MIN_METADATA',)),
],
"d3m.primitives.time_series_forecasting.feed_forward_neural_net.NBEATS": [
(NBEATSPipeline(prediction_length = 21), ('56_sunspots_MIN_METADATA',)),
(NBEATSPipeline(prediction_length = 38), ('56_sunspots_monthly_MIN_METADATA',)),
(NBEATSPipeline(prediction_length = 60), ('LL1_736_population_spawn_MIN_METADATA',)),
(NBEATSPipeline(prediction_length = 34), ('LL1_736_stock_market_MIN_METADATA',)),
],
"d3m.primitives.object_detection.retina_net.ObjectDetectionRN": [
(ObjectDetectionRNPipeline(), (
'LL1_tidy_terra_panicle_detection_MIN_METADATA',
'LL1_penn_fudan_pedestrian_MIN_METADATA'
))
],
"d3m.primitives.data_cleaning.data_cleaning.Datacleaning": [
(DataCleaningPipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.data_cleaning.text_summarization.Duke": [
(DukePipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.feature_selection.pca_features.Pcafeatures": [
(PcaFeaturesPipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.feature_selection.rffeatures.Rffeatures": [
(RfFeaturesPipeline(), ('185_baseball_MIN_METADATA',))
],
"d3m.primitives.classification.inceptionV3_image_feature.Gator": [
(GatorPipeline(), (
"124_174_cifar10_MIN_METADATA",
"124_188_usps_MIN_METADATA",
"124_214_coil20_MIN_METADATA",
"uu_101_object_categories_MIN_METADATA",
))
],
"d3m.primitives.remote_sensing.mlp.MlpClassifier": [
(MlpClassifierPipeline(), ('LL1_bigearth_landuse_detection',))
]
}
for primitive, pipelines in prims_to_pipelines.items():
if gpu:
if primitive not in gpu_prims:
continue
else:
if primitive in gpu_prims:
continue
os.chdir(f'/annotations/{primitive}')
os.chdir(os.listdir('.')[0])
if not os.path.isdir('pipelines'):
os.mkdir('pipelines')
else:
[os.remove(f'pipelines/{pipeline}') for pipeline in os.listdir('pipelines')]
if not os.path.isdir('pipeline_runs'):
os.mkdir('pipeline_runs')
else:
[os.remove(f'pipeline_runs/{pipeline_run}') for pipeline_run in os.listdir('pipeline_runs')]
if not os.path.isdir(f'/pipeline_scores/{primitive.split(".")[-1]}'):
os.mkdir(f'/pipeline_scores/{primitive.split(".")[-1]}')
for pipeline, datasets in pipelines:
pipeline.write_pipeline(output_dir = './pipelines')
for dataset in datasets:
print(f'Generating pipeline for {primitive.split(".")[-1]} on {dataset}')
if primitive.split(".")[-1] in ['Duke', 'Sloth']:
pipeline.fit_produce(
dataset,
output_yml_dir = './pipeline_runs',
submission = True
)
else:
if primitive.split(".")[-1] == 'NBEATS':
shutil.rmtree(f'/scratch_dir/nbeats')
pipeline.fit_score(
dataset,
output_yml_dir = './pipeline_runs',
output_score_dir = f'/pipeline_scores/{primitive.split(".")[-1]}',
submission = True
)
os.system('gzip -r pipeline_runs')
if __name__ == '__main__':
fire.Fire(generate_pipelines) | [
"fire.Fire",
"kf_d3m_primitives.object_detection.retinanet.object_detection_retinanet_pipeline.ObjectDetectionRNPipeline",
"kf_d3m_primitives.data_preprocessing.data_cleaning.data_cleaning_pipeline.DataCleaningPipeline",
"kf_d3m_primitives.clustering.hdbscan.hdbscan_pipeline.HdbscanPipeline",
"kf_d3m_primit... | [((10424, 10453), 'fire.Fire', 'fire.Fire', (['generate_pipelines'], {}), '(generate_pipelines)\n', (10433, 10453), False, 'import fire\n'), ((8809, 8846), 'os.chdir', 'os.chdir', (['f"""/annotations/{primitive}"""'], {}), "(f'/annotations/{primitive}')\n", (8817, 8846), False, 'import os\n'), ((10357, 10391), 'os.system', 'os.system', (['"""gzip -r pipeline_runs"""'], {}), "('gzip -r pipeline_runs')\n", (10366, 10391), False, 'import os\n'), ((8899, 8925), 'os.path.isdir', 'os.path.isdir', (['"""pipelines"""'], {}), "('pipelines')\n", (8912, 8925), False, 'import os\n'), ((8939, 8960), 'os.mkdir', 'os.mkdir', (['"""pipelines"""'], {}), "('pipelines')\n", (8947, 8960), False, 'import os\n'), ((9079, 9109), 'os.path.isdir', 'os.path.isdir', (['"""pipeline_runs"""'], {}), "('pipeline_runs')\n", (9092, 9109), False, 'import os\n'), ((9123, 9148), 'os.mkdir', 'os.mkdir', (['"""pipeline_runs"""'], {}), "('pipeline_runs')\n", (9131, 9148), False, 'import os\n'), ((2678, 2693), 'kf_d3m_primitives.data_preprocessing.data_typing.simon_pipeline.SimonPipeline', 'SimonPipeline', ([], {}), '()\n', (2691, 2693), False, 'from kf_d3m_primitives.data_preprocessing.data_typing.simon_pipeline import SimonPipeline\n'), ((2816, 2837), 'kf_d3m_primitives.data_preprocessing.geocoding_forward.goat_forward_pipeline.GoatForwardPipeline', 'GoatForwardPipeline', ([], {}), '()\n', (2835, 2837), False, 'from kf_d3m_primitives.data_preprocessing.geocoding_forward.goat_forward_pipeline import GoatForwardPipeline\n'), ((2965, 2986), 'kf_d3m_primitives.data_preprocessing.geocoding_reverse.goat_reverse_pipeline.GoatReversePipeline', 'GoatReversePipeline', ([], {}), '()\n', (2984, 2986), False, 'from kf_d3m_primitives.data_preprocessing.geocoding_reverse.goat_reverse_pipeline import GoatReversePipeline\n'), ((3117, 3135), 'kf_d3m_primitives.natural_language_processing.sent2vec.sent2vec_pipeline.Sent2VecPipeline', 'Sent2VecPipeline', ([], {}), '()\n', (3133, 3135), False, 'from kf_d3m_primitives.natural_language_processing.sent2vec.sent2vec_pipeline import Sent2VecPipeline\n'), ((3270, 3285), 'kf_d3m_primitives.clustering.k_means.storc_pipeline.StorcPipeline', 'StorcPipeline', ([], {}), '()\n', (3283, 3285), False, 'from kf_d3m_primitives.clustering.k_means.storc_pipeline import StorcPipeline\n'), ((3410, 3427), 'kf_d3m_primitives.clustering.hdbscan.hdbscan_pipeline.HdbscanPipeline', 'HdbscanPipeline', ([], {}), '()\n', (3425, 3427), False, 'from kf_d3m_primitives.clustering.hdbscan.hdbscan_pipeline import HdbscanPipeline\n'), ((3569, 3597), 'kf_d3m_primitives.clustering.spectral_clustering.spectral_clustering_pipeline.SpectralClusteringPipeline', 'SpectralClusteringPipeline', ([], {}), '()\n', (3595, 3597), False, 'from kf_d3m_primitives.clustering.spectral_clustering.spectral_clustering_pipeline import SpectralClusteringPipeline\n'), ((3768, 3782), 'kf_d3m_primitives.dimensionality_reduction.tsne.tsne_pipeline.TsnePipeline', 'TsnePipeline', ([], {}), '()\n', (3780, 3782), False, 'from kf_d3m_primitives.dimensionality_reduction.tsne.tsne_pipeline import TsnePipeline\n'), ((3925, 3941), 'kf_d3m_primitives.ts_classification.knn.kanine_pipeline.KaninePipeline', 'KaninePipeline', ([], {}), '()\n', (3939, 3941), False, 'from kf_d3m_primitives.ts_classification.knn.kanine_pipeline import KaninePipeline\n'), ((4100, 4117), 'kf_d3m_primitives.ts_classification.lstm_fcn.lstm_fcn_pipeline.LstmFcnPipeline', 'LstmFcnPipeline', ([], {}), '()\n', (4115, 4117), False, 'from kf_d3m_primitives.ts_classification.lstm_fcn.lstm_fcn_pipeline import LstmFcnPipeline\n'), ((4753, 4789), 'kf_d3m_primitives.ts_classification.lstm_fcn.lstm_fcn_pipeline.LstmFcnPipeline', 'LstmFcnPipeline', ([], {'attention_lstm': '(True)'}), '(attention_lstm=True)\n', (4768, 4789), False, 'from kf_d3m_primitives.ts_classification.lstm_fcn.lstm_fcn_pipeline import LstmFcnPipeline\n'), ((5513, 5526), 'kf_d3m_primitives.ts_forecasting.vector_autoregression.var_pipeline.VarPipeline', 'VarPipeline', ([], {}), '()\n', (5524, 5526), False, 'from kf_d3m_primitives.ts_forecasting.vector_autoregression.var_pipeline import VarPipeline\n'), ((6326, 6381), 'kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline.DeepARPipeline', 'DeepARPipeline', ([], {'prediction_length': '(21)', 'context_length': '(21)'}), '(prediction_length=21, context_length=21)\n', (6340, 6381), False, 'from kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline import DeepARPipeline\n'), ((6433, 6488), 'kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline.DeepARPipeline', 'DeepARPipeline', ([], {'prediction_length': '(38)', 'context_length': '(38)'}), '(prediction_length=38, context_length=38)\n', (6447, 6488), False, 'from kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline import DeepARPipeline\n'), ((6547, 6602), 'kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline.DeepARPipeline', 'DeepARPipeline', ([], {'prediction_length': '(60)', 'context_length': '(30)'}), '(prediction_length=60, context_length=30)\n', (6561, 6602), False, 'from kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline import DeepARPipeline\n'), ((6666, 6721), 'kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline.DeepARPipeline', 'DeepARPipeline', ([], {'prediction_length': '(34)', 'context_length': '(17)'}), '(prediction_length=34, context_length=17)\n', (6680, 6721), False, 'from kf_d3m_primitives.ts_forecasting.deep_ar.deepar_pipeline import DeepARPipeline\n'), ((6875, 6911), 'kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline.NBEATSPipeline', 'NBEATSPipeline', ([], {'prediction_length': '(21)'}), '(prediction_length=21)\n', (6889, 6911), False, 'from kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline import NBEATSPipeline\n'), ((6961, 6997), 'kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline.NBEATSPipeline', 'NBEATSPipeline', ([], {'prediction_length': '(38)'}), '(prediction_length=38)\n', (6975, 6997), False, 'from kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline import NBEATSPipeline\n'), ((7054, 7090), 'kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline.NBEATSPipeline', 'NBEATSPipeline', ([], {'prediction_length': '(60)'}), '(prediction_length=60)\n', (7068, 7090), False, 'from kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline import NBEATSPipeline\n'), ((7152, 7188), 'kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline.NBEATSPipeline', 'NBEATSPipeline', ([], {'prediction_length': '(34)'}), '(prediction_length=34)\n', (7166, 7188), False, 'from kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline import NBEATSPipeline\n'), ((7331, 7358), 'kf_d3m_primitives.object_detection.retinanet.object_detection_retinanet_pipeline.ObjectDetectionRNPipeline', 'ObjectDetectionRNPipeline', ([], {}), '()\n', (7356, 7358), False, 'from kf_d3m_primitives.object_detection.retinanet.object_detection_retinanet_pipeline import ObjectDetectionRNPipeline\n'), ((7593, 7615), 'kf_d3m_primitives.data_preprocessing.data_cleaning.data_cleaning_pipeline.DataCleaningPipeline', 'DataCleaningPipeline', ([], {}), '()\n', (7613, 7615), False, 'from kf_d3m_primitives.data_preprocessing.data_cleaning.data_cleaning_pipeline import DataCleaningPipeline\n'), ((7739, 7753), 'kf_d3m_primitives.data_preprocessing.text_summarization.duke_pipeline.DukePipeline', 'DukePipeline', ([], {}), '()\n', (7751, 7753), False, 'from kf_d3m_primitives.data_preprocessing.text_summarization.duke_pipeline import DukePipeline\n'), ((7882, 7903), 'kf_d3m_primitives.feature_selection.pca_features.pca_features_pipeline.PcaFeaturesPipeline', 'PcaFeaturesPipeline', ([], {}), '()\n', (7901, 7903), False, 'from kf_d3m_primitives.feature_selection.pca_features.pca_features_pipeline import PcaFeaturesPipeline\n'), ((8029, 8049), 'kf_d3m_primitives.feature_selection.rf_features.rf_features_pipeline.RfFeaturesPipeline', 'RfFeaturesPipeline', ([], {}), '()\n', (8047, 8049), False, 'from kf_d3m_primitives.feature_selection.rf_features.rf_features_pipeline import RfFeaturesPipeline\n'), ((8182, 8197), 'kf_d3m_primitives.image_classification.imagenet_transfer_learning.gator_pipeline.GatorPipeline', 'GatorPipeline', ([], {}), '()\n', (8195, 8197), False, 'from kf_d3m_primitives.image_classification.imagenet_transfer_learning.gator_pipeline import GatorPipeline\n'), ((8498, 8521), 'kf_d3m_primitives.remote_sensing.classifier.mlp_classifier_pipeline.MlpClassifierPipeline', 'MlpClassifierPipeline', ([], {}), '()\n', (8519, 8521), False, 'from kf_d3m_primitives.remote_sensing.classifier.mlp_classifier_pipeline import MlpClassifierPipeline\n'), ((8864, 8879), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (8874, 8879), False, 'import os\n'), ((8988, 9022), 'os.remove', 'os.remove', (['f"""pipelines/{pipeline}"""'], {}), "(f'pipelines/{pipeline}')\n", (8997, 9022), False, 'import os\n'), ((9176, 9218), 'os.remove', 'os.remove', (['f"""pipeline_runs/{pipeline_run}"""'], {}), "(f'pipeline_runs/{pipeline_run}')\n", (9185, 9218), False, 'import os\n'), ((9039, 9062), 'os.listdir', 'os.listdir', (['"""pipelines"""'], {}), "('pipelines')\n", (9049, 9062), False, 'import os\n'), ((9239, 9266), 'os.listdir', 'os.listdir', (['"""pipeline_runs"""'], {}), "('pipeline_runs')\n", (9249, 9266), False, 'import os\n'), ((10023, 10060), 'shutil.rmtree', 'shutil.rmtree', (['f"""/scratch_dir/nbeats"""'], {}), "(f'/scratch_dir/nbeats')\n", (10036, 10060), False, 'import shutil\n')] |
# Generated by Django 3.2.12 on 2022-02-15 02:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SpotPrice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.CharField(max_length=200)),
('amount', models.FloatField()),
('timestamp', models.DateField()),
],
),
]
| [
"django.db.models.DateField",
"django.db.models.FloatField",
"django.db.models.CharField",
"django.db.models.BigAutoField"
] | [((306, 402), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (325, 402), False, 'from django.db import migrations, models\n'), ((430, 462), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (446, 462), False, 'from django.db import migrations, models\n'), ((492, 511), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (509, 511), False, 'from django.db import migrations, models\n'), ((544, 562), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (560, 562), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 <NAME>.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.views.generic import View
from django.urls import reverse
from django.http import HttpResponseRedirect
from Functie.rol import Rollen, rol_get_huidige
from .view_maand import get_url_huidige_maand
class KalenderLandingPageView(View):
""" Deze pagina is puur voor het doorsturen naar een van de andere pagina's
afhankelijk van de gekozen rol.
"""
@staticmethod
def get(request, *args, **kwargs):
rol_nu = rol_get_huidige(request)
if rol_nu == Rollen.ROL_BB:
url = reverse('Kalender:manager')
elif rol_nu == Rollen.ROL_HWL:
url = reverse('Kalender:vereniging')
else:
url = get_url_huidige_maand()
return HttpResponseRedirect(url)
# end of file
| [
"django.http.HttpResponseRedirect",
"Functie.rol.rol_get_huidige",
"django.urls.reverse"
] | [((598, 622), 'Functie.rol.rol_get_huidige', 'rol_get_huidige', (['request'], {}), '(request)\n', (613, 622), False, 'from Functie.rol import Rollen, rol_get_huidige\n'), ((868, 893), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['url'], {}), '(url)\n', (888, 893), False, 'from django.http import HttpResponseRedirect\n'), ((678, 705), 'django.urls.reverse', 'reverse', (['"""Kalender:manager"""'], {}), "('Kalender:manager')\n", (685, 705), False, 'from django.urls import reverse\n'), ((764, 794), 'django.urls.reverse', 'reverse', (['"""Kalender:vereniging"""'], {}), "('Kalender:vereniging')\n", (771, 794), False, 'from django.urls import reverse\n')] |
import argparse
import os
import csv
import random
from utils import ensure_dir, get_project_path
from collections import defaultdict
# POS-tag for irrelevant tag selection
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
__author__ = "<NAME>"
def write_tsv(intention_dir_path, filename, keys, dict):
file_test = open(intention_dir_path + "/" + filename, 'wt')
dict_writer = csv.writer(file_test, delimiter='\t')
dict_writer.writerow(keys)
r = zip(*dict.values())
for d in r:
dict_writer.writerow(d)
def make_dataset(root_data_dir, complete_data_dir, incomplete_data_dir, results_dir):
"""
:param root_data_dir: directory to save data
:param complete_data_dir: subdirectory with complete data
:param incomplete_data_dir: subdirectory with incomplete data
:param results_dir: subdirectory with incomplete data
:return:
"""
print("Making incomplete intention classification dataset...")
complete_data_dir_path = root_data_dir + '/' + complete_data_dir
incomplete_data_dir_path = root_data_dir + '/' + incomplete_data_dir
results_dir_path = root_data_dir + '/' + results_dir
ensure_dir(results_dir_path)
# Traverse all sub-directories
files_dictionary = defaultdict(lambda: [])
for sub_dir in os.walk(complete_data_dir_path):
if len(sub_dir[1]) == 0:
data_name = sub_dir[0].split('/')[-1]
files_dictionary[data_name] = sub_dir[2]
# Open train and test tsv files
for k, v in files_dictionary.items():
save_path = results_dir_path + '/' + k
ensure_dir(save_path)
for comp_v_i, inc_v_i in zip(['test.tsv', 'train.tsv'], ['test_withMissingWords.tsv', 'train_withMissingWords.tsv']):
complete_tsv_file = open(complete_data_dir_path + '/' + k + '/' + comp_v_i, 'r')
incomplete_tsv_file = open(incomplete_data_dir_path + '/' + k + '/' + inc_v_i, 'r')
reader_complete = csv.reader(complete_tsv_file, delimiter='\t')
reader_incomplete = csv.reader(incomplete_tsv_file, delimiter='\t')
sentences, labels, missing_words_arr, targets = [], [], [], []
row_count = 0
for row_comp, row_inc in zip(reader_complete, reader_incomplete):
if row_count != 0:
# Incomplete
sentences.append(row_inc[0])
labels.append(row_inc[1])
missing_words_arr.append(row_inc[2])
targets.append(row_comp[0])
if 'train' in comp_v_i:
# Complete
sentences.append(row_comp[0])
labels.append(row_comp[1])
missing_words_arr.append('')
targets.append(row_comp[0])
row_count += 1
# Shuffle
if 'train' in comp_v_i:
c = list(zip(sentences, labels, missing_words_arr, targets))
random.shuffle(c)
sentences, labels, missing_words_arr, targets = zip(*c)
# Save train, test, val in files in the format (sentence, label)
keys = ['sentence', 'label', 'missing', 'target']
data_dict = {'sentence': sentences, 'label': labels, 'missing': missing_words_arr, 'target': targets}
write_tsv(save_path, comp_v_i, keys, data_dict)
print("Complete + Incomplete intention classification dataset completed")
def init_args():
parser = argparse.ArgumentParser(description="Script to make intention recognition dataset")
parser.add_argument('--root_data_dir', type=str, default=get_project_path() + "/data",
help='Directory to save subdirectories, needs to be an absolute path')
parser.add_argument('--complete_data_dir', type=str, default="complete_data",
help='Subdirectory with complete data')
parser.add_argument('--incomplete_data_dir', type=str, default="incomplete_data_tfidf_lower_0.8_noMissingTag",
help='Subdirectory with incomplete data')
parser.add_argument('--results_dir', type=str, default="comp_with_incomplete_data_tfidf_lower_0.8_noMissingTag",
help='Subdirectory to save Joint Complete and Incomplete data')
return parser.parse_args()
if __name__ == '__main__':
args = init_args()
make_dataset(args.root_data_dir, args.complete_data_dir, args.incomplete_data_dir, args.results_dir)
| [
"random.shuffle",
"nltk.download",
"argparse.ArgumentParser",
"csv.writer",
"utils.ensure_dir",
"utils.get_project_path",
"collections.defaultdict",
"csv.reader",
"os.walk"
] | [((187, 209), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (200, 209), False, 'import nltk\n'), ((210, 253), 'nltk.download', 'nltk.download', (['"""averaged_perceptron_tagger"""'], {}), "('averaged_perceptron_tagger')\n", (223, 253), False, 'import nltk\n'), ((418, 455), 'csv.writer', 'csv.writer', (['file_test'], {'delimiter': '"""\t"""'}), "(file_test, delimiter='\\t')\n", (428, 455), False, 'import csv\n'), ((1186, 1214), 'utils.ensure_dir', 'ensure_dir', (['results_dir_path'], {}), '(results_dir_path)\n', (1196, 1214), False, 'from utils import ensure_dir, get_project_path\n'), ((1274, 1298), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (1285, 1298), False, 'from collections import defaultdict\n'), ((1317, 1348), 'os.walk', 'os.walk', (['complete_data_dir_path'], {}), '(complete_data_dir_path)\n', (1324, 1348), False, 'import os\n'), ((3548, 3636), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script to make intention recognition dataset"""'}), "(description=\n 'Script to make intention recognition dataset')\n", (3571, 3636), False, 'import argparse\n'), ((1620, 1641), 'utils.ensure_dir', 'ensure_dir', (['save_path'], {}), '(save_path)\n', (1630, 1641), False, 'from utils import ensure_dir, get_project_path\n'), ((1987, 2032), 'csv.reader', 'csv.reader', (['complete_tsv_file'], {'delimiter': '"""\t"""'}), "(complete_tsv_file, delimiter='\\t')\n", (1997, 2032), False, 'import csv\n'), ((2065, 2112), 'csv.reader', 'csv.reader', (['incomplete_tsv_file'], {'delimiter': '"""\t"""'}), "(incomplete_tsv_file, delimiter='\\t')\n", (2075, 2112), False, 'import csv\n'), ((3033, 3050), 'random.shuffle', 'random.shuffle', (['c'], {}), '(c)\n', (3047, 3050), False, 'import random\n'), ((3693, 3711), 'utils.get_project_path', 'get_project_path', ([], {}), '()\n', (3709, 3711), False, 'from utils import ensure_dir, get_project_path\n')] |
###demo code provided by <NAME> at www.steves-internet-guide.com
##email <EMAIL>
###Free to use for any purpose
"""
implements data logging class
"""
import time, os, json, logging
###############
class m_logger(object):
"""Class for logging data to a file. You can set the maximim bunber
of messages in a file the default is 1000. When the file is full
a new file is created.Log files are store under a root directoy
and a sub directory that uses the timestamp for the directory name
Log file data is flushed immediately to disk so that data is not lost.
Data can be stored as plain text or in JSON format """
def __init__(self, log_dir="mlogs", log_recs=1000, number_logs=0):
self.log_dir = log_dir
self.log_recs = log_recs
self.number_logs = number_logs
self.count = 0
self.log_dir = self.create_log_dir(self.log_dir)
self.fo = self.get_log_name(self.log_dir, self.count)
self.new_file_flag = 0
self.writecount = 0
self.timenow = time.time()
self.flush_flag = True
self.flush_time = 2 # flush logs to disk every 2 seconds
def __flushlogs(self): # write to disk
self.fo.flush()
# logging.info("flushing logs")
os.fsync(self.fo.fileno())
self.timenow = time.time()
def __del__(self):
if not self.fo.closed:
print("closing log file")
self.fo.close()
def close_file(self):
if not self.fo.closed:
print("closing log file")
self.fo.close()
def create_log_dir(self, log_dir):
"""Function for creating new log directories
using the timestamp for the name"""
self.t = time.localtime(time.time())
self.time_stamp = (str(self.t[1]) + "-" + str(self.t[2]) + "-" +
str(self.t[3]) + "-" + str(self.t[4]))
logging.info("creating sub directory" + str(self.time_stamp))
try:
os.stat(self.log_dir)
except:
os.mkdir(self.log_dir)
self.log_sub_dir = self.log_dir + "/" + self.time_stamp
try:
os.stat(self.log_sub_dir)
except:
os.mkdir(self.log_sub_dir)
return (self.log_sub_dir)
def get_log_name(self, log_dir, count):
"""get log files and directories"""
self.log_numbr = "{0:003d}".format(count)
logging.info("s is" + str(self.log_numbr))
self.file_name = self.log_dir + "/" + "log" + self.log_numbr
logging.info("creating log file " + self.file_name)
f = open(self.file_name, 'w') # clears file if it exists
f.close()
f = open(self.file_name, 'a')
return (f)
def log_json(self, data):
jdata = json.dumps(data) + "\n"
self.log_data(jdata)
def log_data(self, data):
self.data = data
try:
self.fo.write(data)
self.writecount += 1
self.__flushlogs()
if self.writecount >= self.log_recs:
self.count += 1 # counts number of logs
if self.count > self.number_logs and self.number_logs != 0:
logging.info("too many logs: starting from 0")
self.count = 0 # reset
self.fo = self.get_log_name(self.log_dir, self.count)
self.writecount = 0
except BaseException as e:
logging.error("Error on_data: %s" % str(e))
return False
return True
| [
"json.dumps",
"logging.info",
"os.mkdir",
"os.stat",
"time.time"
] | [((1043, 1054), 'time.time', 'time.time', ([], {}), '()\n', (1052, 1054), False, 'import time, os, json, logging\n'), ((1327, 1338), 'time.time', 'time.time', ([], {}), '()\n', (1336, 1338), False, 'import time, os, json, logging\n'), ((2581, 2632), 'logging.info', 'logging.info', (["('creating log file ' + self.file_name)"], {}), "('creating log file ' + self.file_name)\n", (2593, 2632), False, 'import time, os, json, logging\n'), ((1768, 1779), 'time.time', 'time.time', ([], {}), '()\n', (1777, 1779), False, 'import time, os, json, logging\n'), ((2022, 2043), 'os.stat', 'os.stat', (['self.log_dir'], {}), '(self.log_dir)\n', (2029, 2043), False, 'import time, os, json, logging\n'), ((2189, 2214), 'os.stat', 'os.stat', (['self.log_sub_dir'], {}), '(self.log_sub_dir)\n', (2196, 2214), False, 'import time, os, json, logging\n'), ((2828, 2844), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2838, 2844), False, 'import time, os, json, logging\n'), ((2074, 2096), 'os.mkdir', 'os.mkdir', (['self.log_dir'], {}), '(self.log_dir)\n', (2082, 2096), False, 'import time, os, json, logging\n'), ((2245, 2271), 'os.mkdir', 'os.mkdir', (['self.log_sub_dir'], {}), '(self.log_sub_dir)\n', (2253, 2271), False, 'import time, os, json, logging\n'), ((3260, 3306), 'logging.info', 'logging.info', (['"""too many logs: starting from 0"""'], {}), "('too many logs: starting from 0')\n", (3272, 3306), False, 'import time, os, json, logging\n')] |
"""
2018 (c) piteren
some little methods (but frequently used) for Python
"""
from collections import OrderedDict
import csv
import inspect
import json
import os
import pickle
import random
import shutil
import string
import time
from typing import List, Callable, Any, Optional
# prepares function parameters dictionary
def get_params(function: Callable):
params_dict = {'without_defaults':[], 'with_defaults':OrderedDict()}
if function:
specs = inspect.getfullargspec(function)
params = specs.args
if not params: params = []
vals = specs.defaults
if not vals: vals = ()
while len(params) > len(vals):
params_dict['without_defaults'].append(params.pop(0))
params_dict['with_defaults'] = {k: v for k,v in zip(params,vals)}
return params_dict
# short(compressed) scientific notation for floats
def short_scin(
fl: float,
precision:int= 1):
sh = f'{fl:.{precision}E}'
sh = sh.replace('+0','')
sh = sh.replace('+','')
sh = sh.replace('-0','-')
sh = sh.replace('E','e')
return sh
# returns sting from float, always of given width
def float_to_str(
num: float,
width: int= 7):
if width < 5: width = 5
scientific_decimals = width-6 if width>6 else 0
ff = f'{num:.{scientific_decimals}E}'
if 1000 > num > 0.0001: ff = str(num)[:width]
if len(ff)<width: ff += '0'*(width-len(ff))
return ff
# *********************************************************************************************** file readers / writers
# ********************************************* for raise_exception=False each reader will return None if file not found
def r_pickle( # pickle reader
file_path,
obj_type= None, # if obj_type is given checks for compatibility with given type
raise_exception= False):
if not os.path.isfile(file_path):
if raise_exception: raise FileNotFoundError(f'file {file_path} not exists!')
return None
# obj = pickle.load(open(file_path, 'rb')) << replaced by:
with open(file_path, 'rb') as file: obj = pickle.load(file)
if obj_type: assert type(obj) is obj_type, f'ERROR: obj from file is not {str(obj_type)} type !!!'
return obj
def w_pickle( # pickle writer
obj,
file_path):
with open(file_path, 'wb') as file:
pickle.dump(obj, file)
def r_json( # json reader
file_path,
raise_exception= False):
if not os.path.isfile(file_path):
if raise_exception: raise FileNotFoundError(f'file {file_path} not exists!')
return None
with open(file_path, 'r', encoding='utf-8') as file:
return json.load(file)
def w_json( # json writer
data: dict,
file_path):
with open(file_path, 'w', encoding='utf-8') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
def r_jsonl( # jsonl reader
file_path,
raise_exception=False):
if not os.path.isfile(file_path):
if raise_exception: raise FileNotFoundError(f'file {file_path} not exists!')
return None
with open(file_path, 'r', encoding='utf-8') as file:
return [json.loads(line) for line in file]
def w_jsonl( # jsonl writer
data: List[dict],
file_path):
with open(file_path, 'w', encoding='utf-8') as file:
for d in data:
json.dump(d, file, ensure_ascii=False)
file.write('\n')
def r_csv( # csv reader
file_path,
raise_exception= False):
if not os.path.isfile(file_path):
if raise_exception: raise FileNotFoundError(f'file {file_path} not exists!')
return None
with open(file_path, newline='') as f:
reader = csv.reader(f)
return [row for row in reader][1:]
# returns timestamp string
def stamp(
year= False,
date= True,
letters: Optional[int]= 3):
random.seed(time.time())
if date:
if year: stp = time.strftime('%y%m%d_%H%M')
else: stp = time.strftime('%m%d_%H%M')
else: stp = ''
if letters:
if date: stp += '_'
stp += ''.join([random.choice(string.ascii_letters) for _ in range(letters)])
return stp
# returns nice string of given list
def list_str(ls: List[Any], limit:Optional[int]=200):
lstr = [str(e) for e in ls]
lstr = '; '.join(lstr)
if limit: lstr = lstr[:limit]
return lstr
# prints nested dict
def print_nested_dict(dc: dict, ind_scale=2, line_limit=200):
tpD = {
dict: 'D',
list: 'L',
tuple: 'T',
str: 'S'}
def __prn_root(root: dict, ind, ind_scale=2, line_limit=line_limit):
spacer = ' ' * ind * ind_scale
for k in sorted(list(root.keys())):
tp = tpD.get(type(root[k]),'O')
ln = len(root[k]) if tp in tpD.values() else ''
exmpl = ''
if tp!='D':
exmpl = str(root[k])
if line_limit:
if len(exmpl)>line_limit: exmpl = f'{exmpl[:line_limit]}..'
exmpl = f' : {exmpl}'
print(f'{spacer}{k} [{tp}.{ln}]{exmpl}')
if type(root[k]) is dict: __prn_root(root[k],ind+1,ind_scale)
__prn_root(dc,ind=0,ind_scale=ind_scale)
# prepares folder, creates or flushes
def prep_folder(
folder_path :str, # folder path
flush_non_empty= False):
if flush_non_empty and os.path.isdir(folder_path): shutil.rmtree(folder_path)
os.makedirs(folder_path, exist_ok=True)
# random <0;1> probability function
def prob(p: float) -> bool:
return random.random() < p
# terminal progress bar
def progress_ (
iteration: float or int, # current iteration
total: float or int, # total iterations
prefix: str= '', # prefix string
suffix: str= '', # suffix string
length: int= 20,
fill: str= '█',
print_end: str= ''):
prog = iteration / total
if prog > 1: prog = 1
filled_length = int(length * prog)
bar = fill * filled_length + '-' * (length - filled_length)
print(f'\r{prefix} |{bar}| {prog*100:.1f}% {suffix}', end = print_end)
if prog == 1: print() | [
"collections.OrderedDict",
"json.loads",
"pickle.dump",
"random.choice",
"os.makedirs",
"time.strftime",
"pickle.load",
"inspect.getfullargspec",
"shutil.rmtree",
"os.path.isfile",
"os.path.isdir",
"time.time",
"json.load",
"random.random",
"csv.reader",
"json.dump"
] | [((5563, 5602), 'os.makedirs', 'os.makedirs', (['folder_path'], {'exist_ok': '(True)'}), '(folder_path, exist_ok=True)\n', (5574, 5602), False, 'import os\n'), ((426, 439), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (437, 439), False, 'from collections import OrderedDict\n'), ((474, 506), 'inspect.getfullargspec', 'inspect.getfullargspec', (['function'], {}), '(function)\n', (496, 506), False, 'import inspect\n'), ((1898, 1923), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1912, 1923), False, 'import os\n'), ((2140, 2157), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2151, 2157), False, 'import pickle\n'), ((2389, 2411), 'pickle.dump', 'pickle.dump', (['obj', 'file'], {}), '(obj, file)\n', (2400, 2411), False, 'import pickle\n'), ((2505, 2530), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (2519, 2530), False, 'import os\n'), ((2709, 2724), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2718, 2724), False, 'import json\n'), ((2857, 2908), 'json.dump', 'json.dump', (['data', 'file'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(data, file, indent=4, ensure_ascii=False)\n', (2866, 2908), False, 'import json\n'), ((3000, 3025), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (3014, 3025), False, 'import os\n'), ((3566, 3591), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (3580, 3591), False, 'import os\n'), ((3758, 3771), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3768, 3771), False, 'import csv\n'), ((3984, 3995), 'time.time', 'time.time', ([], {}), '()\n', (3993, 3995), False, 'import time\n'), ((5504, 5530), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (5517, 5530), False, 'import os\n'), ((5532, 5558), 'shutil.rmtree', 'shutil.rmtree', (['folder_path'], {}), '(folder_path)\n', (5545, 5558), False, 'import shutil\n'), ((5679, 5694), 'random.random', 'random.random', ([], {}), '()\n', (5692, 5694), False, 'import random\n'), ((3205, 3221), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3215, 3221), False, 'import json\n'), ((3407, 3445), 'json.dump', 'json.dump', (['d', 'file'], {'ensure_ascii': '(False)'}), '(d, file, ensure_ascii=False)\n', (3416, 3445), False, 'import json\n'), ((4033, 4061), 'time.strftime', 'time.strftime', (['"""%y%m%d_%H%M"""'], {}), "('%y%m%d_%H%M')\n", (4046, 4061), False, 'import time\n'), ((4085, 4111), 'time.strftime', 'time.strftime', (['"""%m%d_%H%M"""'], {}), "('%m%d_%H%M')\n", (4098, 4111), False, 'import time\n'), ((4206, 4241), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (4219, 4241), False, 'import random\n')] |
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from botorch.test_functions.michalewicz import (
GLOBAL_MAXIMIZER,
GLOBAL_MAXIMUM,
neg_michalewicz,
)
class TestNegMichalewicz(unittest.TestCase):
def test_single_eval_neg_michalewicz(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.zeros(10, device=device, dtype=dtype)
res = neg_michalewicz(X)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, device.type)
self.assertEqual(res.shape, torch.Size())
def test_single_eval_neg_michalewicz_cuda(self):
if torch.cuda.is_available():
self.test_single_eval_neg_michalewicz(cuda=True)
def test_batch_eval_neg_michalewicz(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.zeros(2, 10, device=device, dtype=dtype)
res = neg_michalewicz(X)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, device.type)
self.assertEqual(res.shape, torch.Size([2]))
def test_batch_eval_neg_michalewicz_cuda(self):
if torch.cuda.is_available():
self.test_batch_eval_neg_michalewicz(cuda=True)
def test_neg_michalewicz_global_maximum(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.tensor(
GLOBAL_MAXIMIZER, device=device, dtype=dtype, requires_grad=True
)
res = neg_michalewicz(X)
res.backward()
self.assertAlmostEqual(res.item(), GLOBAL_MAXIMUM, places=4)
self.assertLess(X.grad.abs().max().item(), 1e-3)
def test_neg_michalewicz_global_maximum_cuda(self):
if torch.cuda.is_available():
self.test_neg_michalewicz_global_maximum(cuda=False)
| [
"botorch.test_functions.michalewicz.neg_michalewicz",
"torch.tensor",
"torch.cuda.is_available",
"torch.Size",
"torch.zeros",
"torch.device"
] | [((791, 816), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (814, 816), False, 'import torch\n'), ((1387, 1412), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1410, 1412), False, 'import torch\n'), ((2050, 2075), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2073, 2075), False, 'import torch\n'), ((365, 385), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (377, 385), False, 'import torch\n'), ((399, 418), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (411, 418), False, 'import torch\n'), ((485, 528), 'torch.zeros', 'torch.zeros', (['(10)'], {'device': 'device', 'dtype': 'dtype'}), '(10, device=device, dtype=dtype)\n', (496, 528), False, 'import torch\n'), ((547, 565), 'botorch.test_functions.michalewicz.neg_michalewicz', 'neg_michalewicz', (['X'], {}), '(X)\n', (562, 565), False, 'from botorch.test_functions.michalewicz import GLOBAL_MAXIMIZER, GLOBAL_MAXIMUM, neg_michalewicz\n'), ((956, 976), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (968, 976), False, 'import torch\n'), ((990, 1009), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1002, 1009), False, 'import torch\n'), ((1076, 1122), 'torch.zeros', 'torch.zeros', (['(2)', '(10)'], {'device': 'device', 'dtype': 'dtype'}), '(2, 10, device=device, dtype=dtype)\n', (1087, 1122), False, 'import torch\n'), ((1141, 1159), 'botorch.test_functions.michalewicz.neg_michalewicz', 'neg_michalewicz', (['X'], {}), '(X)\n', (1156, 1159), False, 'from botorch.test_functions.michalewicz import GLOBAL_MAXIMIZER, GLOBAL_MAXIMUM, neg_michalewicz\n'), ((1555, 1575), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1567, 1575), False, 'import torch\n'), ((1589, 1608), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1601, 1608), False, 'import torch\n'), ((1675, 1753), 'torch.tensor', 'torch.tensor', (['GLOBAL_MAXIMIZER'], {'device': 'device', 'dtype': 'dtype', 'requires_grad': '(True)'}), '(GLOBAL_MAXIMIZER, device=device, dtype=dtype, requires_grad=True)\n', (1687, 1753), False, 'import torch\n'), ((1802, 1820), 'botorch.test_functions.michalewicz.neg_michalewicz', 'neg_michalewicz', (['X'], {}), '(X)\n', (1817, 1820), False, 'from botorch.test_functions.michalewicz import GLOBAL_MAXIMIZER, GLOBAL_MAXIMUM, neg_michalewicz\n'), ((712, 724), 'torch.Size', 'torch.Size', ([], {}), '()\n', (722, 724), False, 'import torch\n'), ((1306, 1321), 'torch.Size', 'torch.Size', (['[2]'], {}), '([2])\n', (1316, 1321), False, 'import torch\n')] |
from decimal import Decimal
from typing import List, Any
from common.Enums import SortingType
from models import Message
from .engine import db_engine, DBEngine
class MessageDAO:
def __init__(self, engine: DBEngine):
self.engine = engine
@staticmethod
def __make_insert_values_from_messages_array(messages: List[Message]) -> List[tuple]:
return [
(
message.username,
message.text,
Decimal(message.timestamp),
message.reply_count,
message.reply_users_count,
message.reactions_rate,
message.thread_length,
message.channel_id,
)
for message in messages
]
@staticmethod
def __request_messages_to_message_class(request_messages: List[Any]) -> List[Message]:
return [Message(**message) for message in request_messages]
@staticmethod
def __make_link_update_values_from_messages_array(messages: List[Message]) -> List[tuple]:
return [(x.link, Decimal(x.timestamp), x.channel_id) for x in messages]
async def create_messages(self, messages: List[Message]) -> None:
request = f"""
INSERT INTO message (username, text, timestamp, reply_count, reply_users_count,
reactions_rate, thread_length, channel_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8);
"""
sequence = self.__make_insert_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def upsert_messages(self, messages: List[Message]) -> None:
request = f"""
INSERT INTO message (username, text, timestamp, reply_count, reply_users_count,
reactions_rate, thread_length, channel_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (timestamp, channel_id)
DO UPDATE SET
reply_count = EXCLUDED.reply_count,
reply_users_count = EXCLUDED.reply_users_count,
reactions_rate = EXCLUDED.reactions_rate,
thread_length = EXCLUDED.thread_length;
"""
sequence = self.__make_insert_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def get_messages_without_links(self) -> List[Message]:
request = f"SELECT * FROM message WHERE link IS NULL;"
messages = await self.engine.make_fetch_rows(request)
return self.__request_messages_to_message_class(messages)
async def update_message_links(self, messages: List[Message]) -> None:
request = f" UPDATE message SET link=($1) WHERE timestamp=($2) AND channel_id=($3)"
sequence = self.__make_link_update_values_from_messages_array(messages)
await self.engine.make_execute_many(request, sequence)
async def get_top_messages(
self,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10
) -> List[Message]:
request = f"""
SELECT * FROM message
WHERE timestamp >= $1 AND username NOT IN
(SELECT ignore_username FROM IgnoreList WHERE author_username = $3)
ORDER BY {sorting_type.value} DESC
LIMIT $2;
"""
messages = await self.engine.make_fetch_rows(request, after_ts, top_count, user_id)
return self.__request_messages_to_message_class(messages)
async def get_top_messages_by_channel_id(
self,
channel_id: str,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10,
) -> List[Message]:
request = f"""
SELECT * FROM message
WHERE
channel_id=$1
AND
timestamp >= $2
AND
username NOT IN (SELECT ignore_username FROM IgnoreList WHERE author_username = $4)
ORDER BY {sorting_type.value} DESC
LIMIT $3;
"""
messages = await self.engine.make_fetch_rows(
request, channel_id, after_ts, top_count, user_id
)
return self.__request_messages_to_message_class(messages)
async def get_top_messages_by_preset_name(
self,
preset_name: str,
after_ts: str,
user_id: str,
sorting_type: SortingType = SortingType.REPLIES,
top_count: int = 10,
) -> List[Message]:
request = f"""
WITH presets AS (
SELECT *
FROM preset
WHERE name = $1
AND (username = $2 OR username IS NULL)
ORDER BY username NULLS LAST
LIMIT 1
)
SELECT message.* FROM message
JOIN presets preset
ON message.channel_id=ANY(preset.channel_ids)
WHERE message.timestamp >= $3 AND message.username NOT IN
(SELECT ignore_username FROM IgnoreList WHERE author_username = $2)
ORDER BY {sorting_type.value} DESC
LIMIT $4;
"""
messages = await self.engine.make_fetch_rows(
request, preset_name, user_id, after_ts, top_count
)
return self.__request_messages_to_message_class(messages)
message_dao = MessageDAO(db_engine)
| [
"models.Message",
"decimal.Decimal"
] | [((882, 900), 'models.Message', 'Message', ([], {}), '(**message)\n', (889, 900), False, 'from models import Message\n'), ((473, 499), 'decimal.Decimal', 'Decimal', (['message.timestamp'], {}), '(message.timestamp)\n', (480, 499), False, 'from decimal import Decimal\n'), ((1073, 1093), 'decimal.Decimal', 'Decimal', (['x.timestamp'], {}), '(x.timestamp)\n', (1080, 1093), False, 'from decimal import Decimal\n')] |
#!/usr/bin/python
import socket
import sys
junk = 'A'*500
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect = s.connect(('192.168.129.128',21))
s.recv(1024)
s.send('USER '+junk+'\r\n')
| [
"socket.socket"
] | [((62, 111), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (75, 111), False, 'import socket\n')] |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.http import urlquote
class ConcreteModel(models.Model):
name = models.CharField(max_length=10)
class ProxyModel(ConcreteModel):
class Meta:
proxy = True
@python_2_unicode_compatible
class FooWithoutUrl(models.Model):
"""
Fake model not defining ``get_absolute_url`` for
ContentTypesTests.test_shortcut_view_without_get_absolute_url()
"""
name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.name
class FooWithUrl(FooWithoutUrl):
"""
Fake model defining ``get_absolute_url`` for
ContentTypesTests.test_shortcut_view().
"""
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.name)
class FooWithBrokenAbsoluteUrl(FooWithoutUrl):
"""
Fake model defining a ``get_absolute_url`` method containing an error
"""
def get_absolute_url(self):
return "/users/%s/" % self.unknown_field
| [
"django.db.models.CharField",
"django.utils.http.urlquote"
] | [((178, 209), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (194, 209), False, 'from django.db import models\n'), ((496, 540), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)'}), '(max_length=30, unique=True)\n', (512, 540), False, 'from django.db import models\n'), ((797, 816), 'django.utils.http.urlquote', 'urlquote', (['self.name'], {}), '(self.name)\n', (805, 816), False, 'from django.utils.http import urlquote\n')] |
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
class HealthCheckMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.META["PATH_INFO"] == "/health-check/":
return HttpResponse("ok")
response = self.get_response(request)
return response | [
"django.http.HttpResponse"
] | [((311, 329), 'django.http.HttpResponse', 'HttpResponse', (['"""ok"""'], {}), "('ok')\n", (323, 329), False, 'from django.http import HttpResponse\n')] |
import random
from contextlib import suppress
from typing import Optional
from discord import AllowedMentions, Embed, Forbidden
from discord.ext import commands
from bot.constants import Cats, Colours, NEGATIVE_REPLIES
from bot.utils import helpers
class Catify(commands.Cog):
"""Cog for the catify command."""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(aliases=["ᓚᘏᗢify", "ᓚᘏᗢ"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def catify(self, ctx: commands.Context, *, text: Optional[str]) -> None:
"""
Convert the provided text into a cat themed sentence by interspercing cats throughout text.
If no text is given then the users nickname is edited.
"""
if not text:
display_name = ctx.author.display_name
if len(display_name) > 26:
embed = Embed(
title=random.choice(NEGATIVE_REPLIES),
description=(
"Your display name is too long to be catified! "
"Please change it to be under 26 characters."
),
color=Colours.soft_red
)
await ctx.send(embed=embed)
return
else:
display_name += f" | {random.choice(Cats.cats)}"
await ctx.send(f"Your catified nickname is: `{display_name}`", allowed_mentions=AllowedMentions.none())
with suppress(Forbidden):
await ctx.author.edit(nick=display_name)
else:
if len(text) >= 1500:
embed = Embed(
title=random.choice(NEGATIVE_REPLIES),
description="Submitted text was too large! Please submit something under 1500 characters.",
color=Colours.soft_red
)
await ctx.send(embed=embed)
return
string_list = text.split()
for index, name in enumerate(string_list):
name = name.lower()
if "cat" in name:
if random.randint(0, 5) == 5:
string_list[index] = name.replace("cat", f"**{random.choice(Cats.cats)}**")
else:
string_list[index] = name.replace("cat", random.choice(Cats.cats))
for element in Cats.cats:
if element in name:
string_list[index] = name.replace(element, "cat")
string_len = len(string_list) // 3 or len(string_list)
for _ in range(random.randint(1, string_len)):
# insert cat at random index
if random.randint(0, 5) == 5:
string_list.insert(random.randint(0, len(string_list)), f"**{random.choice(Cats.cats)}**")
else:
string_list.insert(random.randint(0, len(string_list)), random.choice(Cats.cats))
text = helpers.suppress_links(" ".join(string_list))
await ctx.send(
f">>> {text}",
allowed_mentions=AllowedMentions.none()
)
def setup(bot: commands.Bot) -> None:
"""Loads the catify cog."""
bot.add_cog(Catify(bot))
| [
"random.choice",
"discord.AllowedMentions.none",
"contextlib.suppress",
"discord.ext.commands.cooldown",
"discord.ext.commands.command",
"random.randint"
] | [((392, 435), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['ᓚᘏᗢify', 'ᓚᘏᗢ']"}), "(aliases=['ᓚᘏᗢify', 'ᓚᘏᗢ'])\n", (408, 435), False, 'from discord.ext import commands\n'), ((441, 490), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (458, 490), False, 'from discord.ext import commands\n'), ((2663, 2692), 'random.randint', 'random.randint', (['(1)', 'string_len'], {}), '(1, string_len)\n', (2677, 2692), False, 'import random\n'), ((1519, 1538), 'contextlib.suppress', 'suppress', (['Forbidden'], {}), '(Forbidden)\n', (1527, 1538), False, 'from contextlib import suppress\n'), ((2759, 2779), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (2773, 2779), False, 'import random\n'), ((931, 962), 'random.choice', 'random.choice', (['NEGATIVE_REPLIES'], {}), '(NEGATIVE_REPLIES)\n', (944, 962), False, 'import random\n'), ((1349, 1373), 'random.choice', 'random.choice', (['Cats.cats'], {}), '(Cats.cats)\n', (1362, 1373), False, 'import random\n'), ((1706, 1737), 'random.choice', 'random.choice', (['NEGATIVE_REPLIES'], {}), '(NEGATIVE_REPLIES)\n', (1719, 1737), False, 'import random\n'), ((2167, 2187), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (2181, 2187), False, 'import random\n'), ((2995, 3019), 'random.choice', 'random.choice', (['Cats.cats'], {}), '(Cats.cats)\n', (3008, 3019), False, 'import random\n'), ((3179, 3201), 'discord.AllowedMentions.none', 'AllowedMentions.none', ([], {}), '()\n', (3199, 3201), False, 'from discord import AllowedMentions, Embed, Forbidden\n'), ((1473, 1495), 'discord.AllowedMentions.none', 'AllowedMentions.none', ([], {}), '()\n', (1493, 1495), False, 'from discord import AllowedMentions, Embed, Forbidden\n'), ((2385, 2409), 'random.choice', 'random.choice', (['Cats.cats'], {}), '(Cats.cats)\n', (2398, 2409), False, 'import random\n'), ((2867, 2891), 'random.choice', 'random.choice', (['Cats.cats'], {}), '(Cats.cats)\n', (2880, 2891), False, 'import random\n'), ((2264, 2288), 'random.choice', 'random.choice', (['Cats.cats'], {}), '(Cats.cats)\n', (2277, 2288), False, 'import random\n')] |
import os
import data_utils
from pathlib import Path
top_path = Path(os.path.dirname(os.path.abspath(__file__)))
EBM_NLP = Path('/Users/ben/Desktop/ebm_nlp/repo/ebm_nlp_2_00/')
NO_LABEL = '0'
def overwrite_tags(new_tags, tags):
for i, t in enumerate(new_tags):
if t != NO_LABEL:
tags[i] = t
def get_tags(d):
pmid_tags = {}
for e in ['participants', 'interventions', 'outcomes']:
for a in (EBM_NLP / 'annotations' / 'aggregated' / 'starting_spans' / e / d).glob('*.ann'):
pmid = a.stem.split('.')[0]
tags = a.open().read().split()
tags = [e[0] if t == '1' else NO_LABEL for t in tags]
if pmid not in pmid_tags:
pmid_tags[pmid] = tags
else:
overwrite_tags(tags, pmid_tags[pmid])
return pmid_tags
def get_words(pmids):
return { pmid: (EBM_NLP / 'documents' / '{}.tokens'.format(pmid)).open().read().split() for pmid in pmids }
def get_seqs(tag_d, word_d, keys):
tag_seqs = []
word_seqs = []
for k in keys:
words, tags = data_utils.generate_seqs(word_d[k], tag_d[k])
tag_seqs += tags
word_seqs += words
return word_seqs, tag_seqs
TRAIN_TAG_D = get_tags(Path('train/'))
TRAIN_PMIDS = sorted(TRAIN_TAG_D.keys())
TRAIN_WORD_D = get_words(TRAIN_PMIDS)
TRAIN_WORDS, TRAIN_TAGS = get_seqs(TRAIN_TAG_D, TRAIN_WORD_D, TRAIN_PMIDS)
TEST_TAG_D = get_tags(Path('test/gold/'))
TEST_PMIDS = sorted(TEST_TAG_D.keys())
TEST_WORD_D = get_words(TEST_PMIDS)
TEST_WORDS, TEST_TAGS = get_seqs(TEST_TAG_D, TEST_WORD_D, TEST_PMIDS)
def train_words():
return TRAIN_WORDS
def train_tags():
return TRAIN_TAGS
def test_words():
return TEST_WORDS
def test_tags():
return TEST_TAGS
def word_embeddings():
return ((top_path / '..' / 'embeddings' / 'glove.840B.300d.txt').open(), 300)
| [
"os.path.abspath",
"data_utils.generate_seqs",
"pathlib.Path"
] | [((124, 177), 'pathlib.Path', 'Path', (['"""/Users/ben/Desktop/ebm_nlp/repo/ebm_nlp_2_00/"""'], {}), "('/Users/ben/Desktop/ebm_nlp/repo/ebm_nlp_2_00/')\n", (128, 177), False, 'from pathlib import Path\n'), ((1145, 1159), 'pathlib.Path', 'Path', (['"""train/"""'], {}), "('train/')\n", (1149, 1159), False, 'from pathlib import Path\n'), ((1339, 1357), 'pathlib.Path', 'Path', (['"""test/gold/"""'], {}), "('test/gold/')\n", (1343, 1357), False, 'from pathlib import Path\n'), ((86, 111), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import os\n'), ((1002, 1047), 'data_utils.generate_seqs', 'data_utils.generate_seqs', (['word_d[k]', 'tag_d[k]'], {}), '(word_d[k], tag_d[k])\n', (1026, 1047), False, 'import data_utils\n')] |
#
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sympy.ntheory import factorint
import numpy as np
from sympy.combinatorics import Permutation
import io
import math
from config.strtools import *
import itertools
import struct
import config.formats
# Conversion of double to fixed point values
#
# - 8000 gives 8000 in C (int16)
# So when it is multiplied it will give the wrong sign for the result
# of the multiplication except if DSPE instructions with saturation are used
# to compute the negate (and we should get 7FFF).
#
# So for cortex-m without DSP extension, we should try to use 8001
# It is done but not yet tested.
def to_q63(v,dspe):
r = int(round(v * 2**63))
if (r > 0x07FFFFFFFFFFFFFFF):
r = 0x07FFFFFFFFFFFFFFF
if (r < -0x08000000000000000):
if dspe:
r = -0x08000000000000000
else:
r = -0x07FFFFFFFFFFFFFFF
return ("0x%s" % format(struct.unpack('<Q', struct.pack('<q', r))[0],'016X'))
def to_q31(v,dspe):
r = int(round(v * 2**31))
if (r > 0x07FFFFFFF):
r = 0x07FFFFFFF
if (r < -0x080000000):
if dspe:
r = -0x080000000
else:
r = -0x07FFFFFFF
return ("0x%s" % format(struct.unpack('<I', struct.pack('<i', r))[0],'08X'))
def to_q15(v,dspe):
r = int(round(v * 2**15))
if (r > 0x07FFF):
r = 0x07FFF
if (r < -0x08000):
if dspe:
r = -0x08000
else:
r = -0x07FFF
return ("0x%s" % format(struct.unpack('<H', struct.pack('<h', r))[0],'04X'))
def to_q7(v,dspe):
r = int(round(v * 2**7))
if (r > 0x07F):
r = 0x07F
if (r < -0x080):#
if dspe:
r = -0x080
else:
r = -0x07F
return ("0x%s" % format(struct.unpack('<B', struct.pack('<b', r))[0],'02X'))
Q7=1
Q15=2
Q31=3
F16=4
F32=5
F64=6
# In the final C++ code, we have a loop for a given radix.
# The input list here has not grouped the factors.
# The list need to be transformed into a list of pair.
# The pair being (radix,exponent)
def groupFactors(factors):
n = 0
current=-1
result=[]
for f in factors:
if f != current:
if current != -1:
result = result + [current,n]
current=f
n=1
else:
n=n+1
result = result + [current,n]
return(result)
# Compute the grouped factors for the the FFT length originaln
# where the only possible radix are in primitiveFactors list.
def getFactors(primitiveFactors,originaln):
factors=[]
length=[]
primitiveFactors.sort(reverse=True)
n = originaln
while (n > 1) and primitiveFactors:
if (n % primitiveFactors[0] == 0):
factors.append(primitiveFactors[0])
n = n // primitiveFactors[0]
else:
primitiveFactors=primitiveFactors[1:]
# When lowest factors are at the beginning (like 2)
# we use a special implementation of the loopcore template
# and it is removing some cycles.
# So, we will get (for instance) 2x8x8x8 instead of 8x8x8x2
factors.reverse()
for f in factors:
originaln = originaln // f
length.append(originaln)
groupedfactors=groupFactors(factors)
return(groupedfactors,factors,length)
# Apply the radix decomposition to compute the input -> output permutation
# computed by the FFT.
def radixReverse(f,n):
a=np.array(range(0,n)).reshape(f)
r = list(range(0,len(f)))
r.reverse()
r = tuple(r)
a = np.transpose(a,r)
return(a.reshape(n))
def radixPermutation(factors,n):
a = radixReverse(factors,n)
tps = []
vectorizable=True
for c in Permutation.from_sequence(a).cyclic_form:
if (len(c)>2):
vectorizable = False
for i in range(len(c)-1,0,-1):
# 2 because those are indexes in an array of complex numbers but
# with a real type.
tps.append([2*c[i], 2*c[i-1]])
return(np.array(tps,dtype=int).flatten(),vectorizable)
# CFFT Twiddle table
def cfft_twiddle(n):
a=2.0*math.pi*np.linspace(0,n,num=n,endpoint=False)/n
c=np.cos(-a)
s=np.sin(-a)
r = np.empty((c.size + s.size,), dtype=c.dtype)
r[0::2] = c
r[1::2] = s
return(r)
# RFFT twiddle for the merge and split steps.
def rfft_twiddle(n):
a=2.0j*math.pi*np.linspace(0,n//2,num=n // 2,endpoint=False)/n
z=-1.0j * np.exp(-a)
r = z.view(dtype=np.float64)
return(r)
# Compute the twiddle tables
def twiddle(transform,n):
if transform=="CFFT":
return(cfft_twiddle(n))
if transform=="RFFT":
return(rfft_twiddle(n))
return(None)
NB_ELEMS_PER_LINE=3
# Generate C array content for a given datatype
def printFloat64Array(f,n):
nb=0
for s in n:
print("%.20f, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printFloat32Array(f,n):
nb=0
for s in n:
print("%.20ff, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printFloat16Array(f,n):
nb=0
for s in n:
print("%.8ff16, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ31Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q31(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ15Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q15(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
def printQ7Array(f,mode,n):
DSPE=False
if mode == "DSP":
DSPE=True
nb=0
for s in n:
print(to_q7(s,DSPE) + ", ",end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
# Print a C array
# Using the type, dpse mode, name
# (dpse mode is for knowing if 0x8000 must be generated as 8000 or 8001
# to avoid sign issues when multiplying with the twiddles)
def printArray(f,ctype,mode,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const %s %s[%s]={" % (ctype,name,define),file=f)
if ctype == "float64_t":
printFloat64Array(f,n)
if ctype == "float32_t":
printFloat32Array(f,n)
if ctype == "float16_t":
printFloat16Array(f,n)
if ctype == "Q31":
printQ31Array(f,mode,n)
if ctype == "Q15":
printQ15Array(f,mode,n)
if ctype == "Q7":
printQ7Array(f,mode,n)
print("};",file=f)
# Convert a float value to a given datatype.
def convertToDatatype(r,ctype,mode):
DSPE=False
if mode == "DSP":
DSPE=True
if ctype == "float64_t":
result = "%.20f" % r
if ctype == "float32_t":
result = "%.20ff" % r
if ctype == "float16_t":
result = "%.20ff16" % r
if ctype == "Q31":
result = "Q31(%s)" % to_q31(r,DSPE)
if ctype == "Q15":
result = "Q15(%s)" % to_q15(r,DSPE)
if ctype == "Q7":
result = "Q7(%s)" % to_q7(r,DSPE)
return(result)
def printArrayHeader(f,ctype,name,nbSamples):
define = "NB_" + name.upper()
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const %s %s[%s];\n" % (ctype,name,define),file=f)
# Print UINT arrays for permutations.
def printUInt32Array(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const uint32_t %s[%s]={" % (name,define),file=f)
nb=0
for s in n:
print("%d, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
print("};",file=f)
def printUInt16Array(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("__ALIGNED(8) const uint16_t %s[%s]={" % (name,define),file=f)
nb=0
for s in n:
print("%d, " % s,end="",file=f)
nb = nb + 1
if nb == NB_ELEMS_PER_LINE:
nb=0
print("",file=f)
print("};",file=f)
def printUInt32ArrayHeader(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const uint32_t %s[%s];\n" % (name,define),file=f)
def printUInt16ArrayHeader(f,name,a):
nbSamples = len(a)
define = "NB_" + name.upper()
n = a.reshape(len(a))
print("#define %s %d" % (define, nbSamples),file=f)
print("extern __ALIGNED(8) const uint16_t %s[%s];\n" % (name,define),file=f)
def getCtype(t):
if t == 'f64':
return("float64_t")
if t == 'f32':
return("float32_t")
if t == 'f16':
return("float16_t")
if t == 'q31':
return("Q31")
if t == 'q15':
return("Q15")
if t == 'q7':
return("Q7")
return("void")
# Configuration structures for CFFT and RFFT
cfftconfig = """cfftconfig<%s> config%d={
.normalization=%s,
.nbPerms=%s,
.perms=perm%d,
.nbTwiddle=%s,
.twiddle=twiddle%d,
.nbGroupedFactors=%d,
.nbFactors=%d,
.factors=factors%d,
.lengths=lengths%d,
.format=%d,
.reversalVectorizable=%d
};"""
rfftconfig = """rfftconfig<%s> config%d={
.nbTwiddle=%s,
.twiddle=twiddle%d
};"""
fftconfigHeader = """extern %sconfig<%s> config%d;"""
fftFactorArray = """const uint16_t factors%d[%d]=%s;\n"""
fftLengthArray = """const uint16_t lengths%d[%d]=%s;\n"""
# Descriptino of a permutation
class Perm:
PermID = 0
# Grouped factors and factors.
def getFactors(core,nb,datatype):
_groupedFactors,_factors,_lens=getFactors(core.radix(datatype,nb),nb)
return(_factors)
def __init__(self,core,nb,datatype):
Perm.PermID = Perm.PermID + 1
self._nb=nb
self._id = Perm.PermID
self._radixUsed=set([])
self._groupedFactors,self._factors,self._lens=getFactors(core.radix(datatype,nb),nb)
self._perms = None
self._core=core
self._isvectorizable=False
def permutations(self):
_permFactors=list(itertools.chain(*[self._core.getPermFactor(x) for x in self._factors]))
#print(_permFactors)
self._perms,self._isvectorizable = radixPermutation(_permFactors[::-1],self._nb)
@property
def isVectorizable(self):
return(self._isvectorizable)
@property
def permID(self):
return(self._id)
@property
def perms(self):
if self._perms is not None:
return(self._perms)
else:
self.permutations()
return(self._perms)
@property
def factors(self):
return(self._factors)
@property
def nbGroupedFactors(self):
return(int(len(self._groupedFactors)/2))
@property
def nbFactors(self):
return(len(self._factors))
def writePermHeader(self,h):
printUInt16ArrayHeader(h,"perm%d" % self.permID,self.perms)
def writePermCode(self,c):
printUInt16Array(c,"perm%d" % self.permID,self.perms)
def writeFactorDesc(self,c):
radixList="{%s}" % joinStr([str(x) for x in self._groupedFactors])
lengthList="{%s}" % joinStr([str(x) for x in self._lens])
print(fftFactorArray % (self.permID,2*self.nbGroupedFactors,radixList),file=c);
print(fftLengthArray % (self.permID,len(self._lens),lengthList),file=c);
class Twiddle:
TwiddleId = 0
def __init__(self,transform,nb,datatype,mode):
Twiddle.TwiddleId = Twiddle.TwiddleId + 1
self._id = Twiddle.TwiddleId
self._datatype = datatype
self._nb=nb
self._twiddle = None
self._transform=transform
self._mode=mode
@property
def twiddleID(self):
return(self._id)
@property
def datatype(self):
return(self._datatype)
@property
def samples(self):
if self._twiddle is None:
self._twiddle=twiddle(self._transform,self._nb)
return(self._twiddle)
@property
def nbSamples(self):
return(self._nb)
@property
def nbTwiddles(self):
if self._transform=="RFFT":
return(self._nb // 2)
else:
return(self._nb)
def writeTwidHeader(self,h):
ctype=getCtype(self.datatype)
# Twiddle is a complex array so 2*nbSamples must be used
printArrayHeader(h,ctype,"twiddle%d" % self.twiddleID,2*self.nbTwiddles)
def writeTwidCode(self,c):
ctype=getCtype(self.datatype)
printArray(c,ctype,self._mode,"twiddle%d" % self.twiddleID,self.samples)
class Config:
ConfigID = 0
def __init__(self,transform,twiddle,perms,coreMode):
Config.ConfigID = Config.ConfigID + 1
self._id = Config.ConfigID
self._twiddle=twiddle
self._perms=perms
self._transform=transform
self._coreMode=coreMode
@property
def transform(self):
return(self._transform)
@property
def configID(self):
return(self._id)
@property
def perms(self):
return(self._perms)
@property
def twiddle(self):
return(self._twiddle)
@property
def nbSamples(self):
return(self.twiddle.nbSamples)
def writeConfigHeader(self,c):
ctype=getCtype(self.twiddle.datatype)
print(fftconfigHeader % (self.transform.lower(),ctype,self.configID),file=c)
def writeConfigCode(self,c):
ctype=getCtype(self.twiddle.datatype)
twiddleLen = "NB_" + ("twiddle%d"% self.twiddle.twiddleID).upper()
if self.transform == "RFFT":
print(rfftconfig % (ctype,self.configID,twiddleLen,self.twiddle.twiddleID),file=c)
else:
normfactor = 1.0 / self.twiddle.nbSamples
normFactorStr = convertToDatatype(normfactor,ctype,self._coreMode)
permsLen = "NB_" + ("perm%d"% self.perms.permID).upper()
outputFormat = 0
#print(self.twiddle.datatype)
#print(self.twiddle.nbSamples)
#print(self.perms.factors)
# For fixed point, each stage will change the output format.
# We need to cmpute the final format of the FFT
# and record it in the initialization structure
# so that the user can easily know how to recover the
# input format (q31, q15). It is encoded as a shift value.
# The shift to apply to recover the input format
# But applying this shift will saturate the result in general.
if self.twiddle.datatype == "q15" or self.twiddle.datatype == "q31":
for f in self.perms.factors:
#print(f,self.twiddle.datatype,self._coreMode)
# The file "formats.py" is decribing the format of each radix
# and is used to compute the format of the FFT based
# on the decomposition of its length.
#
# Currently (since there is no vector version for fixed point)
# this is not taking into account the format change that may
# be implied by the vectorization in case it may be different
# from the scalar version.
formatForSize = config.formats.formats[f][self._coreMode]
outputFormat += formatForSize[self.twiddle.datatype]
vectorizable=0
if self.perms.isVectorizable:
vectorizable = 1
print(cfftconfig % (ctype,self.configID,normFactorStr,permsLen,self.perms.permID,
twiddleLen,self.twiddle.twiddleID,self.perms.nbGroupedFactors,self.perms.nbFactors,
self.perms.permID,self.perms.permID,outputFormat,vectorizable
),file=c)
| [
"struct.pack",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"numpy.empty",
"numpy.cos",
"numpy.sin",
"numpy.transpose",
"sympy.combinatorics.Permutation.from_sequence"
] | [((4077, 4095), 'numpy.transpose', 'np.transpose', (['a', 'r'], {}), '(a, r)\n', (4089, 4095), True, 'import numpy as np\n'), ((4688, 4698), 'numpy.cos', 'np.cos', (['(-a)'], {}), '(-a)\n', (4694, 4698), True, 'import numpy as np\n'), ((4705, 4715), 'numpy.sin', 'np.sin', (['(-a)'], {}), '(-a)\n', (4711, 4715), True, 'import numpy as np\n'), ((4725, 4768), 'numpy.empty', 'np.empty', (['(c.size + s.size,)'], {'dtype': 'c.dtype'}), '((c.size + s.size,), dtype=c.dtype)\n', (4733, 4768), True, 'import numpy as np\n'), ((4234, 4262), 'sympy.combinatorics.Permutation.from_sequence', 'Permutation.from_sequence', (['a'], {}), '(a)\n', (4259, 4262), False, 'from sympy.combinatorics import Permutation\n'), ((4964, 4974), 'numpy.exp', 'np.exp', (['(-a)'], {}), '(-a)\n', (4970, 4974), True, 'import numpy as np\n'), ((4642, 4682), 'numpy.linspace', 'np.linspace', (['(0)', 'n'], {'num': 'n', 'endpoint': '(False)'}), '(0, n, num=n, endpoint=False)\n', (4653, 4682), True, 'import numpy as np\n'), ((4902, 4952), 'numpy.linspace', 'np.linspace', (['(0)', '(n // 2)'], {'num': '(n // 2)', 'endpoint': '(False)'}), '(0, n // 2, num=n // 2, endpoint=False)\n', (4913, 4952), True, 'import numpy as np\n'), ((4528, 4552), 'numpy.array', 'np.array', (['tps'], {'dtype': 'int'}), '(tps, dtype=int)\n', (4536, 4552), True, 'import numpy as np\n'), ((1534, 1554), 'struct.pack', 'struct.pack', (['"""<q"""', 'r'], {}), "('<q', r)\n", (1545, 1554), False, 'import struct\n'), ((1819, 1839), 'struct.pack', 'struct.pack', (['"""<i"""', 'r'], {}), "('<i', r)\n", (1830, 1839), False, 'import struct\n'), ((2085, 2105), 'struct.pack', 'struct.pack', (['"""<h"""', 'r'], {}), "('<h', r)\n", (2096, 2105), False, 'import struct\n'), ((2340, 2360), 'struct.pack', 'struct.pack', (['"""<b"""', 'r'], {}), "('<b', r)\n", (2351, 2360), False, 'import struct\n')] |
'''
This inference script takes in images of dynamic size
Runs inference in batch
** In this images have been resized but not need for this script
'''
import onnx
import onnxruntime as ort
import numpy as np
import cv2
from imagenet_classlist import get_class
import os
model_path = 'resnet18.onnx'
model = onnx.load(model_path)
image_path = "../sample_images"
try:
print("Checking model...")
onnx.checker.check_model(model)
onnx.helper.printable_graph(model.graph)
print("Model checked...")
print("Running inference...")
ort_session = ort.InferenceSession(model_path)
img_list = []
for image in os.listdir(image_path):
img = cv2.imread(os.path.join(image_path, image), cv2.IMREAD_COLOR)
img = cv2.resize(img, ((224, 224)))
img = np.moveaxis(img, -1, 0) # (Batch_size, channels, width, heigth)
img_list.append(img/255.0) # Normalize the image
outputs = ort_session.run(None, {"input":img_list})
out = np.array(outputs)
for image_num, image_name in zip(range(out.shape[1]), os.listdir(image_path)):
index = out[0][image_num]
print("Image : {0}, Class : {1}".format(image_name, get_class(np.argmax(index))))
except Exception as e:
print("Exception occured : ", e) | [
"os.listdir",
"onnx.helper.printable_graph",
"onnxruntime.InferenceSession",
"os.path.join",
"numpy.argmax",
"numpy.array",
"onnx.load",
"numpy.moveaxis",
"cv2.resize",
"onnx.checker.check_model"
] | [((310, 331), 'onnx.load', 'onnx.load', (['model_path'], {}), '(model_path)\n', (319, 331), False, 'import onnx\n'), ((405, 436), 'onnx.checker.check_model', 'onnx.checker.check_model', (['model'], {}), '(model)\n', (429, 436), False, 'import onnx\n'), ((441, 481), 'onnx.helper.printable_graph', 'onnx.helper.printable_graph', (['model.graph'], {}), '(model.graph)\n', (468, 481), False, 'import onnx\n'), ((574, 606), 'onnxruntime.InferenceSession', 'ort.InferenceSession', (['model_path'], {}), '(model_path)\n', (594, 606), True, 'import onnxruntime as ort\n'), ((643, 665), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (653, 665), False, 'import os\n'), ((989, 1006), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (997, 1006), True, 'import numpy as np\n'), ((757, 784), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (767, 784), False, 'import cv2\n'), ((801, 824), 'numpy.moveaxis', 'np.moveaxis', (['img', '(-1)', '(0)'], {}), '(img, -1, 0)\n', (812, 824), True, 'import numpy as np\n'), ((1066, 1088), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (1076, 1088), False, 'import os\n'), ((692, 723), 'os.path.join', 'os.path.join', (['image_path', 'image'], {}), '(image_path, image)\n', (704, 723), False, 'import os\n'), ((1195, 1211), 'numpy.argmax', 'np.argmax', (['index'], {}), '(index)\n', (1204, 1211), True, 'import numpy as np\n')] |
from .functions1 import my_sum, factorial
from .constants import pi
from .print import myprint
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
| [
"pkg_resources.get_distribution"
] | [((183, 209), 'pkg_resources.get_distribution', 'get_distribution', (['__name__'], {}), '(__name__)\n', (199, 209), False, 'from pkg_resources import get_distribution, DistributionNotFound\n')] |
# -*- coding: utf-8 -*-
import sys
import pytest
from snake.common import Frame, Point, BoundaryCollision, SelfCollision
from snake.config import GameConfig
from snake.model import SnakeModel
@pytest.fixture
def config():
config = GameConfig()
config.solid_walls = True
config.initial_food_count = 0
config.food_increase_interval = 0
return config
@pytest.fixture
def model(config):
"""Initial state (T0)."""
frame = Frame(10, 10)
m = SnakeModel(frame, config)
return m
class TestSnakeModelInitialState:
def test_length(self, model):
assert len(model) == 1
def test_score(self, model):
assert model.score == 0
def test_occupied_locations(self, model):
assert {model.head_location} == set(model.occupied_locations)
def test_empty_locations(self, model):
assert model.head_location not in model.empty_locations
def test_available_food_locations(self, model):
assert model.available_food_locations == model.empty_locations
@pytest.fixture
def model2(model):
"""Initial state (T0) + 3 steps forward, where each spot had food."""
model.face_up()
model.food_locations.append(model.head_location + Point(0, 1))
model.food_locations.append(model.head_location + Point(0, 2))
model.food_locations.append(model.head_location + Point(0, 3))
model.step()
model.step()
model.step()
return model
class TestSnakeEatsAndGrows:
def test_length(self, model2):
assert len(model2) == 4
def test_score(self, model2):
assert model2.score == 3
class TestBoundaryCollision:
def test_raises_scenario_1(self, config):
model = SnakeModel(Frame(3, 3), config)
model.face_up()
with pytest.raises(BoundaryCollision):
model.step()
model.step()
def test_raises_scenario_2(self, config):
model = SnakeModel(Frame(3, 3), config)
model.face_down()
with pytest.raises(BoundaryCollision):
model.step()
model.step()
def test_raises_scenario_3(self, config):
model = SnakeModel(Frame(3, 3), config)
model.face_left()
with pytest.raises(BoundaryCollision):
model.step()
model.step()
def test_raises_scenario_4(self, config):
model = SnakeModel(Frame(3, 3), config)
model.face_right()
with pytest.raises(BoundaryCollision):
model.step()
model.step()
class TestSelfCollision:
def test_valid_scenario_raises(self, model):
"""Snake turns into itself."""
model.face_up()
model.step(should_grow=True)
model.step(should_grow=True)
model.step(should_grow=True)
model.face_right()
model.step()
model.face_down()
model.step()
model.face_left()
with pytest.raises(SelfCollision):
model.step()
# The scenarios below should never raise
def test_scenario_1a(self, model):
model.face_up()
model.step(should_grow=True)
model.face_down()
model.step()
def test_scenario_1b(self, model):
model.face_down()
model.step(should_grow=True)
model.face_up()
model.step()
def test_scenario_1c(self, model):
model.face_left()
model.step(should_grow=True)
model.face_right()
model.step()
def test_scenario_1d(self, model):
model.face_right()
model.step(should_grow=True)
model.face_left()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_2a(self, model):
model.face_up()
model.step(should_grow=True)
model.face_left()
model.face_down()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_2b(self, model):
model.face_up()
model.step(should_grow=True)
model.face_right()
model.face_down()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_3a(self, model):
model.face_down()
model.step(should_grow=True)
model.face_left()
model.face_up()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_3b(self, model):
model.face_down()
model.step(should_grow=True)
model.face_right()
model.face_up()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_4a(self, model):
model.face_left()
model.step(should_grow=True)
model.face_down()
model.face_right()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_4b(self, model):
model.face_left()
model.step(should_grow=True)
model.face_up()
model.face_right()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_5a(self, model):
model.face_right()
model.step(should_grow=True)
model.face_down()
model.face_left()
model.step()
@pytest.mark.skipif(sys.version_info.major == 3, reason='Non-critical test failure from Python2.')
def test_scenario_5b(self, model):
model.face_right()
model.step(should_grow=True)
model.face_up()
model.face_left()
model.step()
| [
"snake.common.Frame",
"snake.model.SnakeModel",
"snake.common.Point",
"snake.config.GameConfig",
"pytest.raises",
"pytest.mark.skipif"
] | [((240, 252), 'snake.config.GameConfig', 'GameConfig', ([], {}), '()\n', (250, 252), False, 'from snake.config import GameConfig\n'), ((452, 465), 'snake.common.Frame', 'Frame', (['(10)', '(10)'], {}), '(10, 10)\n', (457, 465), False, 'from snake.common import Frame, Point, BoundaryCollision, SelfCollision\n'), ((474, 499), 'snake.model.SnakeModel', 'SnakeModel', (['frame', 'config'], {}), '(frame, config)\n', (484, 499), False, 'from snake.model import SnakeModel\n'), ((3585, 3687), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major == 3)'], {'reason': '"""Non-critical test failure from Python2."""'}), "(sys.version_info.major == 3, reason=\n 'Non-critical test failure from Python2.')\n", (3603, 3687), False, 'import pytest\n'), ((3862, 3964), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major == 3)'], {'reason': '"""Non-critical test failure from Python2."""'}), "(sys.version_info.major == 3, reason=\n 'Non-critical test failure from Python2.')\n", (3880, 3964), False, 'import pytest\n'), ((4140, 4242), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major == 3)'], {'reason': '"""Non-critical test failure from Python2."""'}), "(sys.version_info.major == 3, reason=\n 'Non-critical test failure from Python2.')\n", (4158, 4242), False, 'import pytest\n'), ((4417, 4519), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major == 3)'], {'reason': '"""Non-critical test failure from Python2."""'}), "(sys.version_info.major == 3, reason=\n 'Non-critical test failure from Python2.')\n", (4435, 4519), False, 'import pytest\n'), ((4695, 4797), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major == 3)'], {'reason': '"""Non-critical test failure from Python2."""'}), "(sys.version_info.major == 3, reason=\n 'Non-critical test failure from Python2.')\n", (4713, 4797), False, 'import pytest\n'), ((4975, 5077), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major == 3)'], {'reason': '"""Non-critical test failure from Python2."""'}), "(sys.version_info.major == 3, reason=\n 'Non-critical test failure from Python2.')\n", (4993, 5077), False, 'import pytest\n'), ((5253, 5355), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major == 3)'], {'reason': '"""Non-critical test failure from Python2."""'}), "(sys.version_info.major == 3, reason=\n 'Non-critical test failure from Python2.')\n", (5271, 5355), False, 'import pytest\n'), ((5533, 5635), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major == 3)'], {'reason': '"""Non-critical test failure from Python2."""'}), "(sys.version_info.major == 3, reason=\n 'Non-critical test failure from Python2.')\n", (5551, 5635), False, 'import pytest\n'), ((1214, 1225), 'snake.common.Point', 'Point', (['(0)', '(1)'], {}), '(0, 1)\n', (1219, 1225), False, 'from snake.common import Frame, Point, BoundaryCollision, SelfCollision\n'), ((1281, 1292), 'snake.common.Point', 'Point', (['(0)', '(2)'], {}), '(0, 2)\n', (1286, 1292), False, 'from snake.common import Frame, Point, BoundaryCollision, SelfCollision\n'), ((1348, 1359), 'snake.common.Point', 'Point', (['(0)', '(3)'], {}), '(0, 3)\n', (1353, 1359), False, 'from snake.common import Frame, Point, BoundaryCollision, SelfCollision\n'), ((1699, 1710), 'snake.common.Frame', 'Frame', (['(3)', '(3)'], {}), '(3, 3)\n', (1704, 1710), False, 'from snake.common import Frame, Point, BoundaryCollision, SelfCollision\n'), ((1757, 1789), 'pytest.raises', 'pytest.raises', (['BoundaryCollision'], {}), '(BoundaryCollision)\n', (1770, 1789), False, 'import pytest\n'), ((1915, 1926), 'snake.common.Frame', 'Frame', (['(3)', '(3)'], {}), '(3, 3)\n', (1920, 1926), False, 'from snake.common import Frame, Point, BoundaryCollision, SelfCollision\n'), ((1975, 2007), 'pytest.raises', 'pytest.raises', (['BoundaryCollision'], {}), '(BoundaryCollision)\n', (1988, 2007), False, 'import pytest\n'), ((2133, 2144), 'snake.common.Frame', 'Frame', (['(3)', '(3)'], {}), '(3, 3)\n', (2138, 2144), False, 'from snake.common import Frame, Point, BoundaryCollision, SelfCollision\n'), ((2193, 2225), 'pytest.raises', 'pytest.raises', (['BoundaryCollision'], {}), '(BoundaryCollision)\n', (2206, 2225), False, 'import pytest\n'), ((2351, 2362), 'snake.common.Frame', 'Frame', (['(3)', '(3)'], {}), '(3, 3)\n', (2356, 2362), False, 'from snake.common import Frame, Point, BoundaryCollision, SelfCollision\n'), ((2412, 2444), 'pytest.raises', 'pytest.raises', (['BoundaryCollision'], {}), '(BoundaryCollision)\n', (2425, 2444), False, 'import pytest\n'), ((2880, 2908), 'pytest.raises', 'pytest.raises', (['SelfCollision'], {}), '(SelfCollision)\n', (2893, 2908), False, 'import pytest\n')] |
import numpy as np
import tensorflow as tf
from tqdm import trange
from fedsimul.utils.model_utils import batch_data
from fedsimul.utils.tf_utils import graph_size
from fedsimul.utils.tf_utils import process_grad
class Model(object):
'''
This is the tf model for the MNIST dataset with multiple class learner regression.
Images are 28px by 28px.
'''
def __init__(self, num_classes, optimizer, gpu_id=0, seed=1):
""" Initialize the learner.
Args:
num_classes: int
optimizer: tf.train.Optimizer
gpu_id: int, default 0
seed: int, default 1
"""
# params
self.num_classes = num_classes
# create computation graph
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(123 + seed)
_created = self.create_model(optimizer)
self.features = _created[0]
self.labels = _created[1]
self.train_op = _created[2]
self.grads = _created[3]
self.eval_metric_ops = _created[4]
self.loss = _created[5]
self.saver = tf.train.Saver()
# set the gpu resources
gpu_options = tf.compat.v1.GPUOptions(visible_device_list="{}".format(gpu_id), allow_growth=True)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
self.sess = tf.Session(graph=self.graph, config=config)
# self.sess = tf.Session(graph=self.graph)
# REVIEW: find memory footprint and compute cost of the model
self.size = graph_size(self.graph)
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
metadata = tf.RunMetadata()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
self.flops = tf.profiler.profile(self.graph, run_meta=metadata, cmd='scope', options=opts).total_float_ops
def create_model(self, optimizer):
""" Model function for Logistic Regression.
Args:
optimizer: tf.train.Optimizer
Returns:
tuple: (features, labels, train_op, grads, eval_metric_ops, loss)
"""
features = tf.placeholder(tf.float32, shape=[None, 784], name='features')
labels = tf.placeholder(tf.int64, shape=[None, ], name='labels')
logits = tf.layers.dense(inputs=features,
units=self.num_classes,
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
grads_and_vars = optimizer.compute_gradients(loss)
grads, _ = zip(*grads_and_vars)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=tf.train.get_global_step())
eval_metric_ops = tf.count_nonzero(tf.equal(labels, predictions["classes"]))
return features, labels, train_op, grads, eval_metric_ops, loss
def set_params(self, latest_params=None, momentum=False, gamma=0.9):
""" Set parameters from server
Args:
latest_params: list
list of tf.Variables
momentum: boolean
gamma: float
TODO: update variable with its local variable and the value from
latest_params
TODO: DO NOT set_params from the global, instead, use the global gradient to update
"""
if latest_params is not None:
with self.graph.as_default():
# previous gradient
all_vars = tf.trainable_variables()
for variable, value in zip(all_vars, latest_params):
if momentum:
curr_val = self.sess.run(variable)
new_val = gamma * curr_val + (1 - gamma) * value
# TODO: use `assign` function instead of `load`
variable.load(new_val, self.sess)
else:
variable.load(value, self.sess)
def get_params(self):
""" Get model parameters.
Returns:
model_params: list
list of tf.Variables
"""
with self.graph.as_default():
model_params = self.sess.run(tf.trainable_variables())
return model_params
def get_gradients(self, data, model_len):
""" Access gradients of a given dataset.
Args:
data: dict
model_len: int
Returns:
num_samples: int
grads: tuple
"""
grads = np.zeros(model_len)
num_samples = len(data['y'])
with self.graph.as_default():
model_grads = self.sess.run(self.grads, feed_dict={self.features: data['x'],
self.labels: data['y']})
grads = process_grad(model_grads)
return num_samples, grads
def solve_inner(self, data, num_epochs=1, batch_size=32):
'''Solves local optimization problem.
Args:
data: dict with format {'x':[], 'y':[]}
num_epochs: int
batch_size: int
Returns:
soln: list
comp: float
'''
for _ in trange(num_epochs, desc='Epoch: ', leave=False, ncols=120):
for X, y in batch_data(data, batch_size):
with self.graph.as_default():
self.sess.run(self.train_op, feed_dict={self.features: X, self.labels: y})
soln = self.get_params()
comp = num_epochs * (len(data['y']) // batch_size) * batch_size * self.flops
return soln, comp
def test(self, data):
'''
Args:
data: dict of the form {'x': [], 'y': []}
Returns:
tot_correct: int
loss: float
'''
with self.graph.as_default():
tot_correct, loss = self.sess.run([self.eval_metric_ops, self.loss],
feed_dict={self.features: data['x'], self.labels: data['y']})
return tot_correct, loss
def close(self):
self.sess.close()
| [
"tensorflow.equal",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.nn.softmax",
"tensorflow.set_random_seed",
"tensorflow.RunMetadata",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.placeholder",
"fedsimul.utils.model_utils.batch_data",
"tensorflow.train.get_global_step",
"tenso... | [((751, 761), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (759, 761), True, 'import tensorflow as tf\n'), ((1331, 1380), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (1355, 1380), True, 'import tensorflow as tf\n'), ((1401, 1444), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph', 'config': 'config'}), '(graph=self.graph, config=config)\n', (1411, 1444), True, 'import tensorflow as tf\n'), ((1587, 1609), 'fedsimul.utils.tf_utils.graph_size', 'graph_size', (['self.graph'], {}), '(self.graph)\n', (1597, 1609), False, 'from fedsimul.utils.tf_utils import graph_size\n'), ((2214, 2276), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 784]', 'name': '"""features"""'}), "(tf.float32, shape=[None, 784], name='features')\n", (2228, 2276), True, 'import tensorflow as tf\n'), ((2294, 2347), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]', 'name': '"""labels"""'}), "(tf.int64, shape=[None], name='labels')\n", (2308, 2347), True, 'import tensorflow as tf\n'), ((2729, 2797), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2767, 2797), True, 'import tensorflow as tf\n'), ((4781, 4800), 'numpy.zeros', 'np.zeros', (['model_len'], {}), '(model_len)\n', (4789, 4800), True, 'import numpy as np\n'), ((5461, 5519), 'tqdm.trange', 'trange', (['num_epochs'], {'desc': '"""Epoch: """', 'leave': '(False)', 'ncols': '(120)'}), "(num_epochs, desc='Epoch: ', leave=False, ncols=120)\n", (5467, 5519), False, 'from tqdm import trange\n'), ((812, 842), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(123 + seed)'], {}), '(123 + seed)\n', (830, 842), True, 'import tensorflow as tf\n'), ((1158, 1174), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1172, 1174), True, 'import tensorflow as tf\n'), ((1732, 1748), 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), '()\n', (1746, 1748), True, 'import tensorflow as tf\n'), ((1768, 1818), 'tensorflow.profiler.ProfileOptionBuilder.float_operation', 'tf.profiler.ProfileOptionBuilder.float_operation', ([], {}), '()\n', (1816, 1818), True, 'import tensorflow as tf\n'), ((2597, 2628), 'tensorflow.argmax', 'tf.argmax', ([], {'input': 'logits', 'axis': '(1)'}), '(input=logits, axis=1)\n', (2606, 2628), True, 'import tensorflow as tf\n'), ((2659, 2703), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""softmax_tensor"""'}), "(logits, name='softmax_tensor')\n", (2672, 2703), True, 'import tensorflow as tf\n'), ((3042, 3082), 'tensorflow.equal', 'tf.equal', (['labels', "predictions['classes']"], {}), "(labels, predictions['classes'])\n", (3050, 3082), True, 'import tensorflow as tf\n'), ((5074, 5099), 'fedsimul.utils.tf_utils.process_grad', 'process_grad', (['model_grads'], {}), '(model_grads)\n', (5086, 5099), False, 'from fedsimul.utils.tf_utils import process_grad\n'), ((5545, 5573), 'fedsimul.utils.model_utils.batch_data', 'batch_data', (['data', 'batch_size'], {}), '(data, batch_size)\n', (5555, 5573), False, 'from fedsimul.utils.model_utils import batch_data\n'), ((1674, 1707), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1705, 1707), True, 'import tensorflow as tf\n'), ((1844, 1921), 'tensorflow.profiler.profile', 'tf.profiler.profile', (['self.graph'], {'run_meta': 'metadata', 'cmd': '"""scope"""', 'options': 'opts'}), "(self.graph, run_meta=metadata, cmd='scope', options=opts)\n", (1863, 1921), True, 'import tensorflow as tf\n'), ((2509, 2548), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.001)'], {}), '(0.001)\n', (2541, 2548), True, 'import tensorflow as tf\n'), ((2971, 2997), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (2995, 2997), True, 'import tensorflow as tf\n'), ((3757, 3781), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3779, 3781), True, 'import tensorflow as tf\n'), ((4466, 4490), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4488, 4490), True, 'import tensorflow as tf\n')] |
""" Runs the server """
from aaxus import app
app.run()
| [
"aaxus.app.run"
] | [((47, 56), 'aaxus.app.run', 'app.run', ([], {}), '()\n', (54, 56), False, 'from aaxus import app\n')] |
import argparse
import logging
import math
from pathlib import Path
import torch.multiprocessing as mp
import os
from datetime import datetime
import nltk
import pandas as pd
import transformers
from torch import nn
import torch.distributed
from torch._C._distributed_c10d import HashStore
from torch.utils.data import DataLoader
from tqdm import tqdm
from sentence_transformers import InputExampleDocument, BiEncoder
from sentence_transformers import LoggingHandler
from eval_agritrop import create_evaluator
# torch.distributed.init_process_group(backend="nccl",store=HashStore(), world_size=8, rank=0)
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
os.putenv("TOKENIZERS_PARALLELISM", "true")
logger = logging.getLogger(__name__)
#### /print debug information to stdout
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train / evaluate baseline indexing system on abstracts')
parser.add_argument('--dataset', '-d', type=str, nargs=1,
help='Path to the TSV corpus to use', dest='dataset',
default=['datasets/corpus_agritrop_transformers_abstract.tsv'])
parser.add_argument('--save-prefix', '-s', type=str, nargs=1,
help='Prefix for the model save directory', dest='save_prefix',
default=['output/training_agritrop_transformer_baseline-'])
parser.add_argument('--epochs', '-e', type=int, nargs=1, help="The number of epochs (for training)", dest='epochs',
default=[100])
parser.add_argument('--eval', '-l', type=str, nargs=1, help="Load model from directory and evaluate", dest='eval',
default=[])
args = parser.parse_args()
# dataset's path
agritrop_dataset_path = args.dataset[0]
# Define our Cross-Encoder
train_batch_size = 1
num_epochs = args.epochs[0]
load = len(args.eval) > 0
model_save_path = args.save_prefix[0] + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Read Agritrop's dataset
logger.info("Read Agritrop's train dataset")
df_transformer = pd.read_csv(agritrop_dataset_path, sep='\t')
# list sample
train_samples = []
dev_samples = []
test_samples = []
df_document_groups = df_transformer.groupby("doc_ids")
for group in tqdm(df_document_groups):
abstract = group[1]['abstract'].iloc[0]
concept_labels = []
labels = []
for index, row in group[1].iterrows():
split_concept_labels = list(row['sentence2'].split(","))
concate_concept = " ".join(split_concept_labels)
concept_labels.append([concate_concept])
labels.append(int(row['score']))
input_example = InputExampleDocument(document_sentences=[abstract], concept_labels=concept_labels,
labels=labels)
split = group[1]['split'].iloc[0]
if split == 'dev':
dev_samples.append(input_example)
elif split == 'test':
test_samples.append(input_example)
else:
train_samples.append(input_example)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=False, batch_size=train_batch_size)
# print(len(train_dataloader.dataset))
# We use bert-base-cased as base model and set num_labels=1, which predicts a continuous score between 0 and 1
if not load:
logger.info("Training model using 'squeezebert/squeezebert-uncased'...")
model = BiEncoder('squeezebert/squeezebert-uncased', num_labels=1, max_length=512, device="cuda:1",
freeze_transformer=False)
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
# mp.spawn(fit_model, args=(model, train_dataloader,
# None, # evaluator,
# 4, # epochs
# warmup_steps,
# model_save_path,
# True), # use amp
# nprocs=8, join=True)
model.save(model_save_path)
model.fit(train_dataloader=train_dataloader,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path, use_amp=False)
model.save(model_save_path)
else:
load_path = args.eval[0]
logger.info(f"Loading model from {load_path}")
model = BiEncoder(load_path, num_labels=1, max_length=512, device="cpu",
freeze_transformer=False)
logger.info("Evaluating...")
evaluator_dev, evaluator_test = create_evaluator(df_transformer, text_field="abstract", device="cpu")
evaluator_dev(model)
evaluator_test(model)
| [
"logging.getLogger",
"sentence_transformers.BiEncoder",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.putenv",
"tqdm.tqdm",
"sentence_transformers.LoggingHandler",
"datetime.datetime.now",
"eval_agritrop.create_evaluator",
"torch.utils.data.DataLoader",
"sentence_transformers.InputExampleDoc... | [((863, 906), 'os.putenv', 'os.putenv', (['"""TOKENIZERS_PARALLELISM"""', '"""true"""'], {}), "('TOKENIZERS_PARALLELISM', 'true')\n", (872, 906), False, 'import os\n'), ((917, 944), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (934, 944), False, 'import logging\n'), ((1026, 1124), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train / evaluate baseline indexing system on abstracts"""'}), "(description=\n 'Train / evaluate baseline indexing system on abstracts')\n", (1049, 1124), False, 'import argparse\n'), ((2310, 2354), 'pandas.read_csv', 'pd.read_csv', (['agritrop_dataset_path'], {'sep': '"""\t"""'}), "(agritrop_dataset_path, sep='\\t')\n", (2321, 2354), True, 'import pandas as pd\n'), ((2518, 2542), 'tqdm.tqdm', 'tqdm', (['df_document_groups'], {}), '(df_document_groups)\n', (2522, 2542), False, 'from tqdm import tqdm\n'), ((3446, 3515), 'torch.utils.data.DataLoader', 'DataLoader', (['train_samples'], {'shuffle': '(False)', 'batch_size': 'train_batch_size'}), '(train_samples, shuffle=False, batch_size=train_batch_size)\n', (3456, 3515), False, 'from torch.utils.data import DataLoader\n'), ((2939, 3041), 'sentence_transformers.InputExampleDocument', 'InputExampleDocument', ([], {'document_sentences': '[abstract]', 'concept_labels': 'concept_labels', 'labels': 'labels'}), '(document_sentences=[abstract], concept_labels=\n concept_labels, labels=labels)\n', (2959, 3041), False, 'from sentence_transformers import InputExampleDocument, BiEncoder\n'), ((3790, 3911), 'sentence_transformers.BiEncoder', 'BiEncoder', (['"""squeezebert/squeezebert-uncased"""'], {'num_labels': '(1)', 'max_length': '(512)', 'device': '"""cuda:1"""', 'freeze_transformer': '(False)'}), "('squeezebert/squeezebert-uncased', num_labels=1, max_length=512,\n device='cuda:1', freeze_transformer=False)\n", (3799, 3911), False, 'from sentence_transformers import InputExampleDocument, BiEncoder\n'), ((4908, 5002), 'sentence_transformers.BiEncoder', 'BiEncoder', (['load_path'], {'num_labels': '(1)', 'max_length': '(512)', 'device': '"""cpu"""', 'freeze_transformer': '(False)'}), "(load_path, num_labels=1, max_length=512, device='cpu',\n freeze_transformer=False)\n", (4917, 5002), False, 'from sentence_transformers import InputExampleDocument, BiEncoder\n'), ((5103, 5172), 'eval_agritrop.create_evaluator', 'create_evaluator', (['df_transformer'], {'text_field': '"""abstract"""', 'device': '"""cpu"""'}), "(df_transformer, text_field='abstract', device='cpu')\n", (5119, 5172), False, 'from eval_agritrop import create_evaluator\n'), ((843, 859), 'sentence_transformers.LoggingHandler', 'LoggingHandler', ([], {}), '()\n', (857, 859), False, 'from sentence_transformers import LoggingHandler\n'), ((2164, 2178), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2176, 2178), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
import time, random
def isort(i_list):
for i in range(1, len(i_list)):
for j in range(i,0, -1):
if i_list[j] < i_list[j-1]:
i_list[j], i_list[j-1] = i_list[j-1], i_list[j]
else:
break
if __name__ == "__main__":
alist = []
for i in range(50000):
alist.append(random.randint(1, 100))
start_time = time.time()
isort(alist)
end_time = time.time() - start_time
print("cost time: %ss" % (end_time)) | [
"time.time",
"random.randint"
] | [((409, 420), 'time.time', 'time.time', ([], {}), '()\n', (418, 420), False, 'import time, random\n'), ((453, 464), 'time.time', 'time.time', ([], {}), '()\n', (462, 464), False, 'import time, random\n'), ((368, 390), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (382, 390), False, 'import time, random\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import math
import networkx as nx
import functools
import scipy.stats
import random
import sys
import copy
import numpy as np
import torch
import utils
try:
sys.path.append('/opt/MatterSim/build/') # local docker or Philly
import MatterSim
except:
# local conda env only
sys.path.append('/home/hoyeung/Documents/vnla/code/build')
import MatterSim
class ShortestPathOracle(object):
''' Shortest navigation teacher '''
def __init__(self, agent_nav_actions, env_nav_actions=None):
self.scans = set()
self.graph = {}
self.paths = {}
self.distances = {}
self.agent_nav_actions = agent_nav_actions
if env_nav_actions is not None:
self.env_nav_actions = env_nav_actions
def add_scans(self, scans, path=None):
new_scans = set.difference(scans, self.scans)
if new_scans:
print('Loading navigation graphs for %d scans' % len(new_scans))
for scan in new_scans:
graph, paths, distances = self._compute_shortest_paths(scan, path=path)
self.graph[scan] = graph
self.paths[scan] = paths
self.distances[scan] = distances
self.scans.update(new_scans)
def _compute_shortest_paths(self, scan, path=None):
''' Load connectivity graph for each scan, useful for reasoning about shortest paths '''
graph = utils.load_nav_graphs(scan, path=path)
paths = dict(nx.all_pairs_dijkstra_path(graph))
distances = dict(nx.all_pairs_dijkstra_path_length(graph))
return graph, paths, distances
def _find_nearest_point(self, scan, start_point, end_points):
best_d = 1e9
best_point = None
for end_point in end_points:
d = self.distances[scan][start_point][end_point]
if d < best_d:
best_d = d
best_point = end_point
return best_d, best_point
def _find_nearest_point_on_a_path(self, scan, current_point, start_point, goal_point):
path = self.paths[scan][start_point][goal_point]
return self._find_nearest_point(scan, current_point, path)
def _shortest_path_action(self, ob):
''' Determine next action on the shortest path to goals. '''
scan = ob['scan']
start_point = ob['viewpoint']
# Find nearest goal
_, goal_point = self._find_nearest_point(scan, start_point, ob['goal_viewpoints'])
# Stop if a goal is reached
if start_point == goal_point:
return (0, 0, 0)
path = self.paths[scan][start_point][goal_point]
next_point = path[1]
# Can we see the next viewpoint?
for i, loc in enumerate(ob['navigableLocations']):
if loc.viewpointId == next_point:
# Look directly at the viewpoint before moving
if loc.rel_heading > math.pi/6.0:
return (0, 1, 0) # Turn right
elif loc.rel_heading < -math.pi/6.0:
return (0,-1, 0) # Turn left
elif loc.rel_elevation > math.pi/6.0 and ob['viewIndex'] // 12 < 2:
return (0, 0, 1) # Look up
elif loc.rel_elevation < -math.pi/6.0 and ob['viewIndex'] // 12 > 0:
return (0, 0,-1) # Look down
else:
return (i, 0, 0) # Move
# Can't see it - first neutralize camera elevation
if ob['viewIndex'] // 12 == 0:
return (0, 0, 1) # Look up
elif ob['viewIndex'] // 12 == 2:
return (0, 0,-1) # Look down
# If camera is already neutralized, decide which way to turn
target_rel = self.graph[ob['scan']].node[next_point]['position'] - ob['point'] # state.location.point
# 180deg -
target_heading = math.pi / 2.0 - math.atan2(target_rel[1], target_rel[0])
if target_heading < 0:
target_heading += 2.0 * math.pi
if ob['heading'] > target_heading and ob['heading'] - target_heading < math.pi:
return (0, -1, 0) # Turn left
if target_heading > ob['heading'] and target_heading - ob['heading'] > math.pi:
return (0, -1, 0) # Turn left
return (0, 1, 0) # Turn right
def _map_env_action_to_agent_action(self, action, ob):
ix, heading_chg, elevation_chg = action
if heading_chg > 0:
return self.agent_nav_actions.index('right')
if heading_chg < 0:
return self.agent_nav_actions.index('left')
if elevation_chg > 0:
return self.agent_nav_actions.index('up')
if elevation_chg < 0:
return self.agent_nav_actions.index('down')
if ix > 0:
return self.agent_nav_actions.index('forward')
if ob['ended']:
return self.agent_nav_actions.index('<ignore>')
return self.agent_nav_actions.index('<end>')
def interpret_agent_action(self, action_idx, ob):
'''Translate action index back to env action for simulator to take'''
# If the action is not `forward`, simply map it to the simulator's
# action space
if action_idx != self.agent_nav_actions.index('forward'):
return self.env_nav_actions[action_idx]
# If the action is forward, more complicated
scan = ob['scan']
start_point = ob['viewpoint']
# Find nearest goal view point
_, goal_point = self._find_nearest_point(scan, start_point, ob['goal_viewpoints'])
optimal_path = self.paths[scan][start_point][goal_point]
# If the goal is right in front of us, go to it.
# The dataset guarantees that the goal is always reachable.
if len(optimal_path) < 2:
return (1, 0, 0)
next_optimal_point = optimal_path[1]
# If the next optimal viewpoint is within 30 degrees of
# the center of the view, go to it.
for i, loc in enumerate(ob['navigableLocations']):
if loc.viewpointId == next_optimal_point:
if loc.rel_heading > math.pi/6.0 or loc.rel_heading < -math.pi/6.0 or \
(loc.rel_elevation > math.pi/6.0 and ob['viewIndex'] // 12 < 2) or \
(loc.rel_elevation < -math.pi/6.0 and ob['viewIndex'] // 12 > 0):
continue
else:
return (i, 0, 0)
# Otherwise, go the navigable (seeable) viewpt that has the least angular distance from the center of the current image (viewpt).
return (1, 0, 0)
def __call__(self, obs):
self.actions = list(map(self._shortest_path_action, obs))
return list(map(self._map_env_action_to_agent_action, self.actions, obs))
class FrontierShortestPathsOracle(ShortestPathOracle):
def __init__(self, agent_nav_actions, env_nav_actions=None):
super(FrontierShortestPathsOracle, self).__init__(agent_nav_actions, env_nav_actions)
# self.env_nav_actions = env_nav_actions
self.valid_rotation_action_indices = [self.agent_nav_actions.index(r) for r in ('left', 'right', 'up', 'down', '<ignore>')]
# inherit parent add_scans() function
def interpret_agent_rotations(self, rotation_action_indices, ob):
'''
rotation_action_indices : a list of int action indices
Returns:
list of fixed length agent.max_macro_action_seq_len (e.g. 8)
e.g. [(0, 1, 0), (0, 1, -1), ..... (0,0,0)]
e.g. [(0,0,0), ... (0,0,0)] if ob has ended.
'''
max_macro_action_seq_len = len(rotation_action_indices)
# [(0,0,0)] * 8
macro_rotations = [self.env_nav_actions[self.agent_nav_actions.index('<ignore>')]] * max_macro_action_seq_len
if not ob['ended']:
for i, action_idx in enumerate(rotation_action_indices):
assert action_idx in self.valid_rotation_action_indices
macro_rotations[i] = self.env_nav_actions[action_idx]
return macro_rotations
def interpret_agent_forward(self, ob):
'''
Returns:
(0, 0, 0) to ignore if trajectory has already ended
or
(1, 0, 0) to step forward to the direct facing vertex
'''
if ob['ended']:
return self.env_nav_actions[self.agent_nav_actions.index('<ignore>')]
else:
return self.env_nav_actions[self.agent_nav_actions.index('forward')]
def make_explore_instructions(self, obs):
'''
Make env level rotation instructions of each ob to explore its own panoramic sphere. The output should be informative enough for agent to collect information from all 36 facets of its panoramic sphere.
Returns:
heading_adjusts: list len=batch_size, each an env action tuple.
elevation_adjusts_1: same.
elevation_adjusts_2: list len=batch_size, each either a single action tuple e.g.(0,1,0), or double action tuple e.g.((0,0,-1), (0,0,-1)).
'''
batch_size = len(obs)
# How agent explore the entire pano sphere
# Right*11, Up/Down, Right*11, Up/Down (*2), Right*11
heading_adjusts = [()] * batch_size
elevation_adjusts_1 = [()] * batch_size
elevation_adjusts_2 = [()] * batch_size
# (0,0,1)
up_tup = self.env_nav_actions[self.agent_nav_actions.index('up')]
# (0,0,-1)
down_tup = self.env_nav_actions[self.agent_nav_actions.index('down')]
# (0,1,0)
right_tup = self.env_nav_actions[self.agent_nav_actions.index('right')]
# (0,0,0)
ignore_tup = self.env_nav_actions[self.agent_nav_actions.index('<ignore>')]
# Loop through each ob in the batch
for i, ob in enumerate(obs):
if ob['ended']:
# don't move at all.
heading_adjusts[i] = ignore_tup
elevation_adjusts_1[i] = ignore_tup
elevation_adjusts_2[i] = ignore_tup
else:
# turn right for 11 times at every elevation level.
heading_adjusts[i] = right_tup
# check agent elevation
if ob['viewIndex'] // 12 == 0:
# facing down, so need to look up twice.
elevation_adjusts_1[i] = up_tup
elevation_adjusts_2[i] = up_tup
elif ob['viewIndex'] // 12 == 2:
# facing up, so need to look down twice.
elevation_adjusts_1[i] = down_tup
elevation_adjusts_2[i] = down_tup
else:
# neutral, so need to look up once, and then look down twice
elevation_adjusts_1[i] = up_tup
elevation_adjusts_2[i] = (down_tup, down_tup)
return heading_adjusts, elevation_adjusts_1, elevation_adjusts_2
def compute_frontier_cost_single(self, ob, next_viewpoint_index_str):
'''
next_viewpoint_index_str: single str indicating viewpoint index.
e.g. '1e6b606b44df4a6086c0f97e826d4d15'
'''
# current point to next point
cost_stepping = self.distances[ob['scan']][ob['viewpoint']][next_viewpoint_index_str]
# next point to the closest goal
cost_togo, _ = self._find_nearest_point(ob['scan'], next_viewpoint_index_str, ob['goal_viewpoints'])
assert cost_stepping > 0 and cost_togo >= 0
return cost_togo , cost_stepping
def compute_frontier_costs(self, obs, viewix_next_vertex_map, timestep=None):
'''
For each ob, compute:
cost = cost-to-go + cost-stepping for all reachable vertices
'''
assert len(obs) == len(viewix_next_vertex_map)
# arr shape (batch_size, 36)
q_values_target_batch = np.ones((len(obs), len(viewix_next_vertex_map[0]))) * 1e9
# arr shape (batch_size, 36)
cost_togos_batch = np.ones((len(obs), len(viewix_next_vertex_map[0]))) * 1e9
# arr shape (batch_size, 36)
cost_stepping_batch = np.ones((len(obs), len(viewix_next_vertex_map[0]))) * 1e9
# arr shape (batch_size, )
end_target_batch = np.array([False for _ in range(len(obs))])
# Loop through batch
for i, ob in enumerate(obs):
# NOTE ended ob won't be added to hist buffer for training
if not ob['ended']:
costs = []
cost_togos = []
cost_steppings = []
for proposed_vertex in viewix_next_vertex_map[i]:
if proposed_vertex == '':
costs.append(1e9)
cost_togos.append(1e9)
cost_steppings.append(1e9)
else:
# add up cost-togo + cost-stepping
cost_togo , cost_stepping = self.compute_frontier_cost_single(ob, proposed_vertex)
costs.append(cost_togo + cost_stepping)
# keep tab cost-togo to determine ending later
cost_togos.append(cost_togo)
cost_steppings.append(cost_stepping)
assert len(cost_togos) == len(viewix_next_vertex_map[0]) # 36
assert len(cost_steppings) == len(viewix_next_vertex_map[0]) # 36
assert len(costs) == len(viewix_next_vertex_map[0]) # 36
q_values_target_batch[i, :] = costs
# get min costs for each row
# if the min index of costs also has a cost-togo = 0, then mark end for this row in end_target
end_target_batch[i] = cost_togos[costs.index(min(costs))] == 0
# for results logging
cost_togos_batch[i] = cost_togos
cost_stepping_batch[i] = cost_steppings
return q_values_target_batch, end_target_batch, cost_togos_batch, cost_stepping_batch
def _map_env_action_to_agent_action(self, action):
'''
Translate rotation env action seq into agent action index seq.
'''
ix, heading_chg, elevation_chg = action
assert ix == 0, 'Accept only rotation or ignore actions'
assert heading_chg == 0 or elevation_chg == 0, 'Accept only one rotation action at a time'
if heading_chg > 0:
return self.agent_nav_actions.index('right')
if heading_chg < 0:
return self.agent_nav_actions.index('left')
if elevation_chg > 0:
return self.agent_nav_actions.index('up')
if elevation_chg < 0:
return self.agent_nav_actions.index('down')
else:
return self.agent_nav_actions.index('<ignore>')
def translate_env_actions(self, obs, viewix_env_actions_map, max_macro_action_seq_len, sphere_size):
'''
viewix_env_actions_map : list (batch_size, 36, varies). Each [(0,1,0), (0,0,-1), ...]
Returns:
viewix_actions_map : array shape(36, batch_size, self.max_macro_action_seq_len)
'''
# tensor shape(36, batch_size, self.max_macro_action_seq_len)
viewix_actions_map = np.ones((sphere_size, len(obs), max_macro_action_seq_len), dtype='int') * \
self.agent_nav_actions.index('<ignore>')
for i, ob in enumerate(obs): # 1-100
if not ob['ended']:
for j, env_action_tup_seq in enumerate(viewix_env_actions_map[i]): # 1-36
assert len(env_action_tup_seq) <= 8
# map seq, length varies
agent_action_seq = list(map(self._map_env_action_to_agent_action, env_action_tup_seq))
assert len(agent_action_seq) <= 8
# assign action index, seq is already padded to 8 during initialization
viewix_actions_map[j, i, :len(agent_action_seq)] = agent_action_seq
return viewix_actions_map
class AskOracle(object):
DONT_ASK = 0
ASK = 1
def __init__(self, hparams, agent_ask_actions):
self.deviate_threshold = hparams.deviate_threshold
self.uncertain_threshold = hparams.uncertain_threshold
self.unmoved_threshold = hparams.unmoved_threshold
self.agent_ask_actions = agent_ask_actions
self.rule_a_e = hasattr(hparams, 'rule_a_e') and hparams.rule_a_e
self.rule_b_d = hasattr(hparams, 'rule_b_d') and hparams.rule_b_d
def _should_ask_rule_a_e(self, ob, nav_oracle=None):
if ob['queries_unused'] <= 0:
return self.DONT_ASK, 'exceed'
scan = ob['scan']
current_point = ob['viewpoint']
_, goal_point = nav_oracle._find_nearest_point(scan, current_point, ob['goal_viewpoints'])
agent_decision = int(np.argmax(ob['nav_dist']))
if current_point == goal_point and \
agent_decision == nav_oracle.agent_nav_actions.index('forward'):
return self.ASK, 'arrive'
start_point = ob['init_viewpoint']
d, _ = nav_oracle._find_nearest_point_on_a_path(scan, current_point, start_point, goal_point)
if d > self.deviate_threshold:
return self.ASK, 'deviate'
return self.DONT_ASK, 'pass'
def _should_ask_rule_b_d(self, ob, nav_oracle=None):
if ob['queries_unused'] <= 0:
return self.DONT_ASK, 'exceed'
agent_dist = ob['nav_dist']
uniform = [1. / len(agent_dist)] * len(agent_dist)
entropy_gap = scipy.stats.entropy(uniform) - scipy.stats.entropy(agent_dist)
if entropy_gap < self.uncertain_threshold - 1e-9:
return self.ASK, 'uncertain'
if len(ob['agent_path']) >= self.unmoved_threshold:
last_nodes = [t[0] for t in ob['agent_path']][-self.unmoved_threshold:]
if all(node == last_nodes[0] for node in last_nodes):
return self.ASK, 'unmoved'
if ob['queries_unused'] >= ob['traj_len'] - ob['time_step']:
return self.ASK, 'why_not'
return self.DONT_ASK, 'pass'
def _should_ask(self, ob, nav_oracle=None):
if self.rule_a_e:
return self._should_ask_rule_a_e(ob, nav_oracle=nav_oracle)
if self.rule_b_d:
return self._should_ask_rule_b_d(ob, nav_oracle=nav_oracle)
if ob['queries_unused'] <= 0:
return self.DONT_ASK, 'exceed'
# Find nearest point on the current shortest path
scan = ob['scan']
current_point = ob['viewpoint']
# Find nearest goal to current point
_, goal_point = nav_oracle._find_nearest_point(scan, current_point, ob['goal_viewpoints'])
# Rule (e): ask if the goal has been reached but the agent decides to
# go forward
agent_decision = int(np.argmax(ob['nav_dist']))
if current_point == goal_point and \
agent_decision == nav_oracle.agent_nav_actions.index('forward'):
return self.ASK, 'arrive'
start_point = ob['init_viewpoint']
# Find closest point to the current point on the path from start point
# to goal point
d, _ = nav_oracle._find_nearest_point_on_a_path(scan, current_point,
start_point, goal_point)
# Rule (a): ask if the agent deviates too far from the optimal path
if d > self.deviate_threshold:
return self.ASK, 'deviate'
# Rule (b): ask if uncertain
agent_dist = ob['nav_dist']
uniform = [1. / len(agent_dist)] * len(agent_dist)
entropy_gap = scipy.stats.entropy(uniform) - scipy.stats.entropy(agent_dist)
if entropy_gap < self.uncertain_threshold - 1e-9:
return self.ASK, 'uncertain'
# Rule (c): ask if not moving for too long
if len(ob['agent_path']) >= self.unmoved_threshold:
last_nodes = [t[0] for t in ob['agent_path']][-self.unmoved_threshold:]
if all(node == last_nodes[0] for node in last_nodes):
return self.ASK, 'unmoved'
# Rule (d): ask to spend all budget at the end
if ob['queries_unused'] >= ob['traj_len'] - ob['time_step']:
return self.ASK, 'why_not'
return self.DONT_ASK, 'pass'
def _map_env_action_to_agent_action(self, action, ob):
if ob['ended']:
return self.agent_ask_actions.index('<ignore>')
if action == self.DONT_ASK:
return self.agent_ask_actions.index('dont_ask')
return self.agent_ask_actions.index('ask')
def __call__(self, obs, nav_oracle):
should_ask_fn = functools.partial(self._should_ask, nav_oracle=nav_oracle)
actions, reasons = zip(*list(map(should_ask_fn, obs)))
actions = list(map(self._map_env_action_to_agent_action, actions, obs))
return actions, reasons
class MultistepShortestPathOracle(ShortestPathOracle):
'''For Ask Agents with direct advisors'''
def __init__(self, n_steps, agent_nav_actions, env_nav_actions):
super(MultistepShortestPathOracle, self).__init__(agent_nav_actions)
self.sim = MatterSim.Simulator()
self.sim.setRenderingEnabled(False)
self.sim.setDiscretizedViewingAngles(True)
self.sim.setCameraResolution(640, 480)
self.sim.setCameraVFOV(math.radians(60))
self.sim.setNavGraphPath(
os.path.join(os.getenv('PT_DATA_DIR'), 'connectivity'))
self.sim.init()
self.n_steps = n_steps
self.env_nav_actions = env_nav_actions
def _shortest_path_actions(self, ob):
actions = []
self.sim.newEpisode(ob['scan'], ob['viewpoint'], ob['heading'], ob['elevation'])
assert not ob['ended']
for _ in range(self.n_steps):
# Query oracle for next action
action = self._shortest_path_action(ob)
# Convert to agent action
agent_action = self._map_env_action_to_agent_action(action, ob)
actions.append(agent_action)
# Take action
self.sim.makeAction(*action)
if action == (0, 0, 0):
break
state = self.sim.getState()
ob = {
'viewpoint': state.location.viewpointId,
'viewIndex': state.viewIndex,
'heading' : state.heading,
'elevation': state.elevation,
'navigableLocations': state.navigableLocations,
'point' : state.location.point,
'ended' : ob['ended'] or action == (0, 0, 0),
'goal_viewpoints': ob['goal_viewpoints'],
'scan' : ob['scan']
}
return actions
def __call__(self, ob):
return self._shortest_path_actions(ob)
class NextOptimalOracle(object):
def __init__(self, hparams, agent_nav_actions, env_nav_actions,
agent_ask_actions):
self.type = 'next_optimal'
self.ask_oracle = make_oracle('ask', hparams, agent_ask_actions)
self.nav_oracle = make_oracle('shortest', agent_nav_actions, env_nav_actions)
def __call__(self, obs):
ask_actions, ask_reasons = self.ask_oracle(obs, self.nav_oracle)
self.nav_oracle.add_scans(set(ob['scan'] for ob in obs))
nav_actions = self.nav_oracle(obs)
return nav_actions, ask_actions, ask_reasons
def add_scans(self, scans):
self.nav_oracle.add_scans(scans)
def next_ask(self, obs):
return self.ask_oracle(obs, self.nav_oracle)
def next_nav(self, obs):
return self.nav_oracle(obs)
def interpret_agent_action(self, *args, **kwargs):
return self.nav_oracle.interpret_agent_action(*args, **kwargs)
class StepByStepSubgoalOracle(object):
def __init__(self, n_steps, agent_nav_actions, env_nav_actions, mode=None):
self.type = 'step_by_step'
self.nav_oracle = make_oracle('direct', n_steps, agent_nav_actions, env_nav_actions)
self.agent_nav_actions = agent_nav_actions
if mode == 'easy':
self._map_actions_to_instruction = self._map_actions_to_instruction_easy
elif mode == 'hard':
self._map_actions_to_instruction = self._map_actions_to_instruction_hard
else:
sys.exit('unknown step by step mode!')
def add_scans(self, scans):
self.nav_oracle.add_scans(scans)
def _make_action_name(self, a):
action_name = self.agent_nav_actions[a]
if action_name in ['up', 'down']:
return 'look ' + action_name
elif action_name in ['left', 'right']:
return 'turn ' + action_name
elif action_name == 'forward':
return 'go ' + action_name
elif action_name == '<end>':
return 'stop'
elif action_name == '<ignore>':
return ''
return None
def _map_actions_to_instruction_hard(self, actions):
agg_actions = []
cnt = 1
for i in range(1, len(actions)):
if actions[i] != actions[i - 1]:
agg_actions.append((actions[i - 1], cnt))
cnt = 1
else:
cnt += 1
agg_actions.append((actions[-1], cnt))
instruction = []
for a, c in agg_actions:
action_name = self._make_action_name(a)
if c > 1:
if 'turn' in action_name:
degree = 30 * c
if 'left' in action_name:
instruction.append('turn %d degrees left' % degree)
elif 'right' in action_name:
instruction.append('turn %d degrees right' % degree)
else:
raise ValueError('action name {} error'.format(action_name))
elif 'go' in action_name:
instruction.append('%s %d steps' % (action_name, c))
elif action_name != '':
instruction.append(action_name)
return ' , '.join(instruction)
def _map_actions_to_instruction_easy(self, actions):
instruction = []
for a in actions:
instruction.append(self._make_action_name(a))
return ' , '.join(instruction)
def __call__(self, ob):
action_seq = self.nav_oracle(ob)
verbal_instruction = self._map_actions_to_instruction(action_seq)
return action_seq, verbal_instruction
def make_oracle(oracle_type, *args, **kwargs):
if oracle_type == 'shortest':
return ShortestPathOracle(*args, **kwargs)
if oracle_type == 'next_optimal':
return NextOptimalOracle(*args, **kwargs)
if oracle_type == 'ask':
return AskOracle(*args, **kwargs)
if oracle_type == 'direct':
return MultistepShortestPathOracle(*args, **kwargs)
if oracle_type == 'verbal':
return StepByStepSubgoalOracle(*args, **kwargs)
if oracle_type == 'frontier_shortest':
return FrontierShortestPathsOracle(*args, **kwargs)
# TODO implement next
# if oracle_type == 'diverse_shortest':
# return DiverseShortestPathsOracle(*args, **kwargs)
return None
| [
"networkx.all_pairs_dijkstra_path",
"os.getenv",
"MatterSim.Simulator",
"utils.load_nav_graphs",
"numpy.argmax",
"math.radians",
"functools.partial",
"math.atan2",
"sys.exit",
"networkx.all_pairs_dijkstra_path_length",
"sys.path.append"
] | [((247, 287), 'sys.path.append', 'sys.path.append', (['"""/opt/MatterSim/build/"""'], {}), "('/opt/MatterSim/build/')\n", (262, 287), False, 'import sys\n'), ((375, 433), 'sys.path.append', 'sys.path.append', (['"""/home/hoyeung/Documents/vnla/code/build"""'], {}), "('/home/hoyeung/Documents/vnla/code/build')\n", (390, 433), False, 'import sys\n'), ((1506, 1544), 'utils.load_nav_graphs', 'utils.load_nav_graphs', (['scan'], {'path': 'path'}), '(scan, path=path)\n', (1527, 1544), False, 'import utils\n'), ((20699, 20757), 'functools.partial', 'functools.partial', (['self._should_ask'], {'nav_oracle': 'nav_oracle'}), '(self._should_ask, nav_oracle=nav_oracle)\n', (20716, 20757), False, 'import functools\n'), ((21202, 21223), 'MatterSim.Simulator', 'MatterSim.Simulator', ([], {}), '()\n', (21221, 21223), False, 'import MatterSim\n'), ((1566, 1599), 'networkx.all_pairs_dijkstra_path', 'nx.all_pairs_dijkstra_path', (['graph'], {}), '(graph)\n', (1592, 1599), True, 'import networkx as nx\n'), ((1626, 1666), 'networkx.all_pairs_dijkstra_path_length', 'nx.all_pairs_dijkstra_path_length', (['graph'], {}), '(graph)\n', (1659, 1666), True, 'import networkx as nx\n'), ((3965, 4005), 'math.atan2', 'math.atan2', (['target_rel[1]', 'target_rel[0]'], {}), '(target_rel[1], target_rel[0])\n', (3975, 4005), False, 'import math\n'), ((16921, 16946), 'numpy.argmax', 'np.argmax', (["ob['nav_dist']"], {}), "(ob['nav_dist'])\n", (16930, 16946), True, 'import numpy as np\n'), ((18917, 18942), 'numpy.argmax', 'np.argmax', (["ob['nav_dist']"], {}), "(ob['nav_dist'])\n", (18926, 18942), True, 'import numpy as np\n'), ((21397, 21413), 'math.radians', 'math.radians', (['(60)'], {}), '(60)\n', (21409, 21413), False, 'import math\n'), ((21474, 21498), 'os.getenv', 'os.getenv', (['"""PT_DATA_DIR"""'], {}), "('PT_DATA_DIR')\n", (21483, 21498), False, 'import os\n'), ((24408, 24446), 'sys.exit', 'sys.exit', (['"""unknown step by step mode!"""'], {}), "('unknown step by step mode!')\n", (24416, 24446), False, 'import sys\n')] |
import tensorflow as tf
import numpy as np
import unittest
from dnc.controller import BaseController
class DummyController(BaseController):
def network_vars(self):
self.W = tf.Variable(tf.truncated_normal([self.nn_input_size, 64]))
self.b = tf.Variable(tf.zeros([64]))
def network_op(self, X):
return tf.matmul(X, self.W) + self.b
class DummyRecurrentController(BaseController):
def network_vars(self):
self.lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(64)
self.state = tf.Variable(tf.zeros([self.batch_size, 64]), trainable=False)
self.output = tf.Variable(tf.zeros([self.batch_size, 64]), trainable=False)
def network_op(self, X, state):
X = tf.convert_to_tensor(X)
return self.lstm_cell(X, state)
def update_state(self, new_state):
return tf.group(
self.output.assign(new_state[0]),
self.state.assign(new_state[1])
)
def get_state(self):
return (self.output, self.state)
class DNCControllerTest(unittest.TestCase):
def test_construction(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 1)
self.assertFalse(controller.has_recurrent_nn)
self.assertEqual(controller.nn_input_size, 20)
self.assertEqual(controller.interface_vector_size, 38)
self.assertEqual(controller.interface_weights.get_shape().as_list(), [64, 38])
self.assertEqual(controller.nn_output_weights.get_shape().as_list(), [64, 10])
self.assertEqual(controller.mem_output_weights.get_shape().as_list(), [10, 10])
self.assertTrue(rcontroller.has_recurrent_nn)
self.assertEqual(rcontroller.nn_input_size, 20)
self.assertEqual(rcontroller.interface_vector_size, 38)
self.assertEqual(rcontroller.interface_weights.get_shape().as_list(), [64, 38])
self.assertEqual(rcontroller.nn_output_weights.get_shape().as_list(), [64, 10])
self.assertEqual(rcontroller.mem_output_weights.get_shape().as_list(), [10, 10])
def test_get_nn_output_size(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as Session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 1)
self.assertEqual(controller.get_nn_output_size(), 64)
self.assertEqual(rcontroller.get_nn_output_size(), 64)
def test_parse_interface_vector(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
zeta = np.random.uniform(-2, 2, (2, 38)).astype(np.float32)
read_keys = np.reshape(zeta[:, :10], (-1, 5, 2))
read_strengths = 1 + np.log(np.exp(np.reshape(zeta[:, 10:12], (-1, 2, ))) + 1)
write_key = np.reshape(zeta[:, 12:17], (-1, 5, 1))
write_strength = 1 + np.log(np.exp(np.reshape(zeta[:, 17], (-1, 1))) + 1)
erase_vector = 1.0 / (1 + np.exp(-1 * np.reshape(zeta[:, 18:23], (-1, 5))))
write_vector = np.reshape(zeta[:, 23:28], (-1, 5))
free_gates = 1.0 / (1 + np.exp(-1 * np.reshape(zeta[:, 28:30], (-1, 2))))
allocation_gate = 1.0 / (1 + np.exp(-1 * zeta[:, 30, np.newaxis]))
write_gate = 1.0 / (1 + np.exp(-1 * zeta[:, 31, np.newaxis]))
read_modes = np.reshape(zeta[:, 32:], (-1, 3, 2))
read_modes = np.transpose(read_modes, [0, 2, 1])
read_modes = np.reshape(read_modes, (-1, 3))
read_modes = np.exp(read_modes) / np.sum(np.exp(read_modes), axis=-1, keepdims=True)
read_modes = np.reshape(read_modes, (2, 2, 3))
read_modes = np.transpose(read_modes, [0, 2, 1])
op = controller.parse_interface_vector(zeta)
session.run(tf.initialize_all_variables())
parsed = session.run(op)
self.assertTrue(np.allclose(parsed['read_keys'], read_keys))
self.assertTrue(np.allclose(parsed['read_strengths'], read_strengths))
self.assertTrue(np.allclose(parsed['write_key'], write_key))
self.assertTrue(np.allclose(parsed['write_strength'], write_strength))
self.assertTrue(np.allclose(parsed['erase_vector'], erase_vector))
self.assertTrue(np.allclose(parsed['write_vector'], write_vector))
self.assertTrue(np.allclose(parsed['free_gates'], free_gates))
self.assertTrue(np.allclose(parsed['allocation_gate'], allocation_gate))
self.assertTrue(np.allclose(parsed['write_gate'], write_gate))
self.assertTrue(np.allclose(parsed['read_modes'], read_modes))
def test_process_input(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 2)
input_batch = np.random.uniform(0, 1, (2, 10)).astype(np.float32)
last_read_vectors = np.random.uniform(-1, 1, (2, 5, 2)).astype(np.float32)
v_op, zeta_op = controller.process_input(input_batch, last_read_vectors)
rv_op, rzeta_op, rs_op = rcontroller.process_input(input_batch, last_read_vectors, rcontroller.get_state())
session.run(tf.initialize_all_variables())
v, zeta = session.run([v_op, zeta_op])
rv, rzeta, rs = session.run([rv_op, rzeta_op, rs_op])
self.assertEqual(v.shape, (2, 10))
self.assertEqual(np.concatenate([np.reshape(val, (2, -1)) for _, val in zeta.items()], axis=1).shape, (2, 38))
self.assertEqual(rv.shape, (2, 10))
self.assertEqual(np.concatenate([np.reshape(val, (2, -1)) for _, val in rzeta.items()], axis=1).shape, (2, 38))
self.assertEqual([_s.shape for _s in rs], [(2, 64), (2, 64)])
def test_final_output(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
output_batch = np.random.uniform(0, 1, (2, 10)).astype(np.float32)
new_read_vectors = np.random.uniform(-1, 1, (2, 5, 2)).astype(np.float32)
op = controller.final_output(output_batch, new_read_vectors)
session.run(tf.initialize_all_variables())
y = session.run(op)
self.assertEqual(y.shape, (2, 10))
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"tensorflow.Graph",
"tensorflow.initialize_all_variables",
"numpy.reshape",
"numpy.allclose",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.convert_to_tensor",
"tensorflow.Session",
"numpy.exp",
"tensorflow.matmul",
"numpy.random.uniform",
"unittest.main",
"numpy.transpose",
"tensorflo... | [((7097, 7123), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (7110, 7123), False, 'import unittest\n'), ((469, 501), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', (['(64)'], {}), '(64)\n', (497, 501), True, 'import tensorflow as tf\n'), ((718, 741), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['X'], {}), '(X)\n', (738, 741), True, 'import tensorflow as tf\n'), ((1110, 1120), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1118, 1120), True, 'import tensorflow as tf\n'), ((2368, 2378), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2376, 2378), True, 'import tensorflow as tf\n'), ((2800, 2810), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2808, 2810), True, 'import tensorflow as tf\n'), ((5218, 5228), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5226, 5228), True, 'import tensorflow as tf\n'), ((6509, 6519), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6517, 6519), True, 'import tensorflow as tf\n'), ((199, 244), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[self.nn_input_size, 64]'], {}), '([self.nn_input_size, 64])\n', (218, 244), True, 'import tensorflow as tf\n'), ((275, 289), 'tensorflow.zeros', 'tf.zeros', (['[64]'], {}), '([64])\n', (283, 289), True, 'import tensorflow as tf\n'), ((336, 356), 'tensorflow.matmul', 'tf.matmul', (['X', 'self.W'], {}), '(X, self.W)\n', (345, 356), True, 'import tensorflow as tf\n'), ((535, 566), 'tensorflow.zeros', 'tf.zeros', (['[self.batch_size, 64]'], {}), '([self.batch_size, 64])\n', (543, 566), True, 'import tensorflow as tf\n'), ((619, 650), 'tensorflow.zeros', 'tf.zeros', (['[self.batch_size, 64]'], {}), '([self.batch_size, 64])\n', (627, 650), True, 'import tensorflow as tf\n'), ((1171, 1194), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (1181, 1194), True, 'import tensorflow as tf\n'), ((2429, 2452), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (2439, 2452), True, 'import tensorflow as tf\n'), ((2861, 2884), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (2871, 2884), True, 'import tensorflow as tf\n'), ((3062, 3098), 'numpy.reshape', 'np.reshape', (['zeta[:, :10]', '(-1, 5, 2)'], {}), '(zeta[:, :10], (-1, 5, 2))\n', (3072, 3098), True, 'import numpy as np\n'), ((3222, 3260), 'numpy.reshape', 'np.reshape', (['zeta[:, 12:17]', '(-1, 5, 1)'], {}), '(zeta[:, 12:17], (-1, 5, 1))\n', (3232, 3260), True, 'import numpy as np\n'), ((3474, 3509), 'numpy.reshape', 'np.reshape', (['zeta[:, 23:28]', '(-1, 5)'], {}), '(zeta[:, 23:28], (-1, 5))\n', (3484, 3509), True, 'import numpy as np\n'), ((3790, 3826), 'numpy.reshape', 'np.reshape', (['zeta[:, 32:]', '(-1, 3, 2)'], {}), '(zeta[:, 32:], (-1, 3, 2))\n', (3800, 3826), True, 'import numpy as np\n'), ((3857, 3892), 'numpy.transpose', 'np.transpose', (['read_modes', '[0, 2, 1]'], {}), '(read_modes, [0, 2, 1])\n', (3869, 3892), True, 'import numpy as np\n'), ((3922, 3953), 'numpy.reshape', 'np.reshape', (['read_modes', '(-1, 3)'], {}), '(read_modes, (-1, 3))\n', (3932, 3953), True, 'import numpy as np\n'), ((4084, 4117), 'numpy.reshape', 'np.reshape', (['read_modes', '(2, 2, 3)'], {}), '(read_modes, (2, 2, 3))\n', (4094, 4117), True, 'import numpy as np\n'), ((4147, 4182), 'numpy.transpose', 'np.transpose', (['read_modes', '[0, 2, 1]'], {}), '(read_modes, [0, 2, 1])\n', (4159, 4182), True, 'import numpy as np\n'), ((5279, 5302), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5289, 5302), True, 'import tensorflow as tf\n'), ((6570, 6593), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (6580, 6593), True, 'import tensorflow as tf\n'), ((3983, 4001), 'numpy.exp', 'np.exp', (['read_modes'], {}), '(read_modes)\n', (3989, 4001), True, 'import numpy as np\n'), ((4273, 4302), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (4300, 4302), True, 'import tensorflow as tf\n'), ((4378, 4421), 'numpy.allclose', 'np.allclose', (["parsed['read_keys']", 'read_keys'], {}), "(parsed['read_keys'], read_keys)\n", (4389, 4421), True, 'import numpy as np\n'), ((4455, 4508), 'numpy.allclose', 'np.allclose', (["parsed['read_strengths']", 'read_strengths'], {}), "(parsed['read_strengths'], read_strengths)\n", (4466, 4508), True, 'import numpy as np\n'), ((4542, 4585), 'numpy.allclose', 'np.allclose', (["parsed['write_key']", 'write_key'], {}), "(parsed['write_key'], write_key)\n", (4553, 4585), True, 'import numpy as np\n'), ((4619, 4672), 'numpy.allclose', 'np.allclose', (["parsed['write_strength']", 'write_strength'], {}), "(parsed['write_strength'], write_strength)\n", (4630, 4672), True, 'import numpy as np\n'), ((4706, 4755), 'numpy.allclose', 'np.allclose', (["parsed['erase_vector']", 'erase_vector'], {}), "(parsed['erase_vector'], erase_vector)\n", (4717, 4755), True, 'import numpy as np\n'), ((4789, 4838), 'numpy.allclose', 'np.allclose', (["parsed['write_vector']", 'write_vector'], {}), "(parsed['write_vector'], write_vector)\n", (4800, 4838), True, 'import numpy as np\n'), ((4872, 4917), 'numpy.allclose', 'np.allclose', (["parsed['free_gates']", 'free_gates'], {}), "(parsed['free_gates'], free_gates)\n", (4883, 4917), True, 'import numpy as np\n'), ((4951, 5006), 'numpy.allclose', 'np.allclose', (["parsed['allocation_gate']", 'allocation_gate'], {}), "(parsed['allocation_gate'], allocation_gate)\n", (4962, 5006), True, 'import numpy as np\n'), ((5040, 5085), 'numpy.allclose', 'np.allclose', (["parsed['write_gate']", 'write_gate'], {}), "(parsed['write_gate'], write_gate)\n", (5051, 5085), True, 'import numpy as np\n'), ((5119, 5164), 'numpy.allclose', 'np.allclose', (["parsed['read_modes']", 'read_modes'], {}), "(parsed['read_modes'], read_modes)\n", (5130, 5164), True, 'import numpy as np\n'), ((5864, 5893), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5891, 5893), True, 'import tensorflow as tf\n'), ((6945, 6974), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (6972, 6974), True, 'import tensorflow as tf\n'), ((2980, 3013), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(2)', '(2, 38)'], {}), '(-2, 2, (2, 38))\n', (2997, 3013), True, 'import numpy as np\n'), ((3645, 3681), 'numpy.exp', 'np.exp', (['(-1 * zeta[:, 30, np.newaxis])'], {}), '(-1 * zeta[:, 30, np.newaxis])\n', (3651, 3681), True, 'import numpy as np\n'), ((3723, 3759), 'numpy.exp', 'np.exp', (['(-1 * zeta[:, 31, np.newaxis])'], {}), '(-1 * zeta[:, 31, np.newaxis])\n', (3729, 3759), True, 'import numpy as np\n'), ((4011, 4029), 'numpy.exp', 'np.exp', (['read_modes'], {}), '(read_modes)\n', (4017, 4029), True, 'import numpy as np\n'), ((5478, 5510), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(2, 10)'], {}), '(0, 1, (2, 10))\n', (5495, 5510), True, 'import numpy as np\n'), ((5566, 5601), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(2, 5, 2)'], {}), '(-1, 1, (2, 5, 2))\n', (5583, 5601), True, 'import numpy as np\n'), ((6697, 6729), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(2, 10)'], {}), '(0, 1, (2, 10))\n', (6714, 6729), True, 'import numpy as np\n'), ((6784, 6819), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(2, 5, 2)'], {}), '(-1, 1, (2, 5, 2))\n', (6801, 6819), True, 'import numpy as np\n'), ((3150, 3185), 'numpy.reshape', 'np.reshape', (['zeta[:, 10:12]', '(-1, 2)'], {}), '(zeta[:, 10:12], (-1, 2))\n', (3160, 3185), True, 'import numpy as np\n'), ((3312, 3344), 'numpy.reshape', 'np.reshape', (['zeta[:, 17]', '(-1, 1)'], {}), '(zeta[:, 17], (-1, 1))\n', (3322, 3344), True, 'import numpy as np\n'), ((3405, 3440), 'numpy.reshape', 'np.reshape', (['zeta[:, 18:23]', '(-1, 5)'], {}), '(zeta[:, 18:23], (-1, 5))\n', (3415, 3440), True, 'import numpy as np\n'), ((3562, 3597), 'numpy.reshape', 'np.reshape', (['zeta[:, 28:30]', '(-1, 2)'], {}), '(zeta[:, 28:30], (-1, 2))\n', (3572, 3597), True, 'import numpy as np\n'), ((6121, 6145), 'numpy.reshape', 'np.reshape', (['val', '(2, -1)'], {}), '(val, (2, -1))\n', (6131, 6145), True, 'import numpy as np\n'), ((6301, 6325), 'numpy.reshape', 'np.reshape', (['val', '(2, -1)'], {}), '(val, (2, -1))\n', (6311, 6325), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Join, MapCompose
from scrapy.spiders import CrawlSpider, Rule
from movies.items import MoviesItem
class DoubanSpider(CrawlSpider):
name = 'douban'
allowed_domains = ['douban.com']
start_urls = ['https://movie.douban.com/top250']
rules = (
Rule(LinkExtractor(restrict_xpaths='//*[contains(@rel, "next")]')),
Rule(LinkExtractor(
restrict_xpaths='//*[contains(@class, "pic")]'), callback='parse_item')
)
def parse_item(self, response):
""" This function parses a property page.
@url https://movie.douban.com/top250
@returns items 1
@scrapes name score category url year
"""
# create the loader using the response
l = ItemLoader(item=MoviesItem(), response=response)
# Load fields using Xpath expressions
l.add_xpath('name', '//h1[1]/span[1]/text()',
MapCompose(str.strip, str.title))
l.add_xpath('score', '//*[contains(@class, "ll rating_num")]//text()',
MapCompose(lambda i: i.replace(',', ''), float), re='[.0-9]+')
l.add_xpath('category', '//*[contains(@property, "v:genre")]//text()',
MapCompose(str.strip), Join())
l.add_xpath('year', '//*[@id="content"]/h1/span[2]/text()', MapCompose(int), re='[0-9]+')
l.add_value('url', response.url)
return l.load_item()
| [
"scrapy.linkextractors.LinkExtractor",
"scrapy.loader.processors.MapCompose",
"movies.items.MoviesItem",
"scrapy.loader.processors.Join"
] | [((418, 478), 'scrapy.linkextractors.LinkExtractor', 'LinkExtractor', ([], {'restrict_xpaths': '"""//*[contains(@rel, "next")]"""'}), '(restrict_xpaths=\'//*[contains(@rel, "next")]\')\n', (431, 478), False, 'from scrapy.linkextractors import LinkExtractor\n'), ((494, 555), 'scrapy.linkextractors.LinkExtractor', 'LinkExtractor', ([], {'restrict_xpaths': '"""//*[contains(@class, "pic")]"""'}), '(restrict_xpaths=\'//*[contains(@class, "pic")]\')\n', (507, 555), False, 'from scrapy.linkextractors import LinkExtractor\n'), ((1043, 1075), 'scrapy.loader.processors.MapCompose', 'MapCompose', (['str.strip', 'str.title'], {}), '(str.strip, str.title)\n', (1053, 1075), False, 'from scrapy.loader.processors import Join, MapCompose\n'), ((1338, 1359), 'scrapy.loader.processors.MapCompose', 'MapCompose', (['str.strip'], {}), '(str.strip)\n', (1348, 1359), False, 'from scrapy.loader.processors import Join, MapCompose\n'), ((1361, 1367), 'scrapy.loader.processors.Join', 'Join', ([], {}), '()\n', (1365, 1367), False, 'from scrapy.loader.processors import Join, MapCompose\n'), ((1437, 1452), 'scrapy.loader.processors.MapCompose', 'MapCompose', (['int'], {}), '(int)\n', (1447, 1452), False, 'from scrapy.loader.processors import Join, MapCompose\n'), ((889, 901), 'movies.items.MoviesItem', 'MoviesItem', ([], {}), '()\n', (899, 901), False, 'from movies.items import MoviesItem\n')] |
import json
import os
import shutil
import urllib.request
import traceback
import logging
import psutil
from collections import defaultdict
from typing import List, Dict, Tuple
from multiprocessing import Semaphore, Pool
from subprocess import Popen, PIPE
from datetime import datetime, timedelta
from lxml import etree
from lxml.etree import Element
import pyarrow as pa
import pyarrow.parquet as pq
from google.cloud import storage
from diachronic import global_conf, Tags
PROCESS_MEM = psutil.virtual_memory().total / psutil.cpu_count()
# Fraction of (total_mem/cpu_count) that a given process uses before flushing buffer
PROCESS_MEM_LIMIT = .1
DOWNLOAD_SEMAPHORE = Semaphore(global_conf.download_parallelism)
FAILURES = []
def make_path(path: str) -> None:
if not os.path.exists(path):
os.makedirs(path)
def get_wiki_from_filename(wiki_file: str) -> str:
return wiki_file.split("-")[0]
class WikiHandler(object):
def __init__(self):
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
make_path(global_conf.input_path)
def get_filenames(self) -> List[str]:
filenames = []
for wiki in global_conf.wikis:
url_prefix = global_conf.get_url_prefix(wiki)
url = "{}dumpstatus.json".format(url_prefix)
logging.info("Grabbing filenames from {}".format(url))
conn = urllib.request.urlopen(url)
data = json.loads(conn.read().decode())
conn.close()
filenames.extend(list(data["jobs"]["metahistory7zdump"]["files"].keys()))
return filenames
def get_files_to_skip(self) -> List[str]:
client = storage.Client()
return [blob.name for blob in client.get_bucket(global_conf.bucket).list_blobs()]
def get_files_to_run(self, overwrite=False) -> List[str]:
all_filenames = self.get_filenames()
if overwrite:
logging.info("Overwrite enabled, running all {} files".format(len(all_filenames)))
return all_filenames
skipfiles = self.get_files_to_skip()
files_to_run = [f for f in all_filenames
if "{}.{}".format(f, global_conf.output_suffix) not in skipfiles]
skip_count = len(all_filenames) - len(files_to_run)
logging.info("Running {} files and skipping {}".format(len(files_to_run), skip_count))
return files_to_run
def download(self, wiki_file: str) -> None:
logging.info("Downloading {}".format(wiki_file))
wiki = get_wiki_from_filename(wiki_file)
url_prefix = global_conf.get_url_prefix(wiki)
response = urllib.request.urlopen(url_prefix + wiki_file)
download_file = open(global_conf.input_path + wiki_file, 'wb')
shutil.copyfileobj(response, download_file)
response.close()
download_file.close()
logging.info("Downloaded {}".format(wiki_file))
def run_file(self, wiki_file: str) -> None:
try:
with DOWNLOAD_SEMAPHORE:
self.download(wiki_file)
parser = WikiFileParser(wiki_file)
parser.run()
except Exception:
logging.info("Caught exception on {}".format(wiki_file))
logging.error(traceback.format_exc())
FAILURES.append(wiki_file)
os.remove(global_conf.input_path + wiki_file)
def run(self) -> None:
logging.info("Running {}".format(global_conf.month_source))
filenames_to_run = self.get_files_to_run()
pool = Pool()
pool.map_async(self.run_file, filenames_to_run, error_callback=self._on_error)
pool.close()
pool.join()
logging.info("{} Run completed. Failures: {}".format(global_conf.month_source, FAILURES))
def _on_error(self, ex: Exception):
raise ex
class WikiFileParser(object):
def __init__(self, wiki_file: str):
self.arrow_cols = ("namespace", "title", "timestamp", "text")
self.wiki_file = wiki_file
self.wiki = get_wiki_from_filename(self.wiki_file)
output_prefix = global_conf.get_output_prefix(self.wiki)
make_path(global_conf.output_path + output_prefix)
self.output_file = "{}{}.{}".format(output_prefix,
self.wiki_file,
global_conf.output_suffix)
# State trackers
self.arrow_buff = {colname: [] for colname in self.arrow_cols}
self.arrow_row, self.cur_date, self.current_revision = self.iter_reset()
self.schema: pq.ParquetSchema = None
self.writer: pq.ParquetWriter = None
def iter_reset(self) -> Tuple[Dict[str, None], datetime, None]:
self.arrow_row = {colname: None for colname in self.arrow_cols}
self.cur_date = global_conf.datetime_init
self.current_revision = None
return self.arrow_row, self.cur_date, self.current_revision
@property
def func_dict(self) -> Dict[str, callable]:
d = {
Tags.Revision.nstag: self.parse_revision,
Tags.Namespace.nstag: self.parse_namespace,
Tags.Page.nstag: self.parse_finish,
Tags.Title.nstag: self.parse_title
}
return defaultdict(lambda: (lambda x: None), **d)
def parse_title(self, elem: Element) -> None:
self.arrow_row["title"] = elem.text
def parse_namespace(self, elem: Element) -> None:
self.arrow_row["namespace"] = elem.text
def parse_revision(self, elem: Element) -> None:
if self.arrow_row["namespace"] == "0":
timestamp = datetime.strptime(elem.find(Tags.Timestamp.nstag).text[:-1], "%Y-%m-%dT%H:%M:%S")
if timestamp >= self.cur_date:
self.cur_date = datetime.combine(timestamp.date(), datetime.min.time()) + timedelta(days=1)
text = elem.find(Tags.Text.nstag).text or ""
self.arrow_row["text"] = text
self.arrow_row["timestamp"] = timestamp
for col, val in self.arrow_row.items():
self.arrow_buff[col].append(val)
elem.clear()
def parse_finish(self, elem: Element) -> None:
self.iter_reset()
# Determine whether buffer needs to be flushed based on available memory
process = psutil.Process(os.getpid())
if process.memory_info().rss / PROCESS_MEM >= PROCESS_MEM_LIMIT:
self.write()
elem.clear()
def stream(self) -> None:
stdout = Popen(["7z", "e", "-so", global_conf.input_path + self.wiki_file], stdout=PIPE).stdout
for event, elem in etree.iterparse(stdout, huge_tree=True):
self.func_dict[elem.tag](elem)
def write(self) -> None:
arrow_arrays = {colname: pa.array(arr) for colname, arr in self.arrow_buff.items()}
arrow_table = pa.Table.from_arrays(arrays=list(arrow_arrays.values()), names=list(arrow_arrays.keys()))
if not self.writer:
self.writer = pq.ParquetWriter(global_conf.output_path + self.output_file,
arrow_table.schema, compression='brotli')
self.writer.write_table(arrow_table)
self.arrow_buff = {colname: [] for colname in self.arrow_cols}
def upload(self) -> None:
client = storage.Client()
bucket = client.get_bucket(global_conf.bucket)
blob = bucket.blob(self.output_file)
with open(global_conf.output_path + self.output_file, 'rb') as pq_file:
blob.upload_from_file(pq_file)
def cleanup(self) -> None:
os.remove(global_conf.input_path + self.wiki_file)
os.remove(global_conf.output_path + self.output_file)
def run(self) -> None:
logging.info("Started parsing {}".format(self.wiki_file))
self.stream()
# Clear leftover buffer
self.write()
self.writer.close()
self.upload()
self.cleanup()
logging.info("Finished parsing {}".format(self.wiki_file))
if __name__ == "__main__":
WikiHandler().run()
| [
"psutil.virtual_memory",
"lxml.etree.iterparse",
"pyarrow.parquet.ParquetWriter",
"datetime.timedelta",
"os.remove",
"os.path.exists",
"subprocess.Popen",
"diachronic.global_conf.get_output_prefix",
"os.getpid",
"pyarrow.array",
"shutil.copyfileobj",
"logging.basicConfig",
"google.cloud.stor... | [((673, 716), 'multiprocessing.Semaphore', 'Semaphore', (['global_conf.download_parallelism'], {}), '(global_conf.download_parallelism)\n', (682, 716), False, 'from multiprocessing import Semaphore, Pool\n'), ((524, 542), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (540, 542), False, 'import psutil\n'), ((492, 515), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (513, 515), False, 'import psutil\n'), ((778, 798), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (792, 798), False, 'import os\n'), ((808, 825), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (819, 825), False, 'import os\n'), ((975, 1071), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (994, 1071), False, 'import logging\n'), ((1695, 1711), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (1709, 1711), False, 'from google.cloud import storage\n'), ((2604, 2636), 'diachronic.global_conf.get_url_prefix', 'global_conf.get_url_prefix', (['wiki'], {}), '(wiki)\n', (2630, 2636), False, 'from diachronic import global_conf, Tags\n'), ((2782, 2825), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response', 'download_file'], {}), '(response, download_file)\n', (2800, 2825), False, 'import shutil\n'), ((3557, 3563), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (3561, 3563), False, 'from multiprocessing import Semaphore, Pool\n'), ((4110, 4150), 'diachronic.global_conf.get_output_prefix', 'global_conf.get_output_prefix', (['self.wiki'], {}), '(self.wiki)\n', (4139, 4150), False, 'from diachronic import global_conf, Tags\n'), ((5272, 5313), 'collections.defaultdict', 'defaultdict', (['(lambda : lambda x: None)'], {}), '(lambda : lambda x: None, **d)\n', (5283, 5313), False, 'from collections import defaultdict\n'), ((6651, 6690), 'lxml.etree.iterparse', 'etree.iterparse', (['stdout'], {'huge_tree': '(True)'}), '(stdout, huge_tree=True)\n', (6666, 6690), False, 'from lxml import etree\n'), ((7333, 7349), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (7347, 7349), False, 'from google.cloud import storage\n'), ((7613, 7663), 'os.remove', 'os.remove', (['(global_conf.input_path + self.wiki_file)'], {}), '(global_conf.input_path + self.wiki_file)\n', (7622, 7663), False, 'import os\n'), ((7672, 7725), 'os.remove', 'os.remove', (['(global_conf.output_path + self.output_file)'], {}), '(global_conf.output_path + self.output_file)\n', (7681, 7725), False, 'import os\n'), ((1239, 1271), 'diachronic.global_conf.get_url_prefix', 'global_conf.get_url_prefix', (['wiki'], {}), '(wiki)\n', (1265, 1271), False, 'from diachronic import global_conf, Tags\n'), ((6357, 6368), 'os.getpid', 'os.getpid', ([], {}), '()\n', (6366, 6368), False, 'import os\n'), ((6537, 6616), 'subprocess.Popen', 'Popen', (["['7z', 'e', '-so', global_conf.input_path + self.wiki_file]"], {'stdout': 'PIPE'}), "(['7z', 'e', '-so', global_conf.input_path + self.wiki_file], stdout=PIPE)\n", (6542, 6616), False, 'from subprocess import Popen, PIPE\n'), ((6798, 6811), 'pyarrow.array', 'pa.array', (['arr'], {}), '(arr)\n', (6806, 6811), True, 'import pyarrow as pa\n'), ((7023, 7130), 'pyarrow.parquet.ParquetWriter', 'pq.ParquetWriter', (['(global_conf.output_path + self.output_file)', 'arrow_table.schema'], {'compression': '"""brotli"""'}), "(global_conf.output_path + self.output_file, arrow_table.\n schema, compression='brotli')\n", (7039, 7130), True, 'import pyarrow.parquet as pq\n'), ((3349, 3394), 'os.remove', 'os.remove', (['(global_conf.input_path + wiki_file)'], {}), '(global_conf.input_path + wiki_file)\n', (3358, 3394), False, 'import os\n'), ((3274, 3296), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3294, 3296), False, 'import traceback\n'), ((5853, 5870), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5862, 5870), False, 'from datetime import datetime, timedelta\n'), ((5830, 5849), 'datetime.datetime.min.time', 'datetime.min.time', ([], {}), '()\n', (5847, 5849), False, 'from datetime import datetime, timedelta\n')] |
import inspect
import pytest
import numpy as np
from datar.core.backends.pandas import Categorical, DataFrame, Series
from datar.core.backends.pandas.testing import assert_frame_equal
from datar.core.backends.pandas.core.groupby import SeriesGroupBy
from datar.core.factory import func_factory
from datar.core.tibble import (
SeriesCategorical,
SeriesRowwise,
TibbleGrouped,
TibbleRowwise,
)
from datar.tibble import tibble
from ..conftest import assert_iterable_equal
def test_transform_default():
@func_factory("transform", "x")
def double(x):
return x * 2
# scalar
out = double(3)
assert out[0] == 6
out = double(np.array([1, 2], dtype=int))
assert_iterable_equal(out, [2, 4])
@func_factory("transform", "x")
def double(x):
return x * 2
out = double([1, 2])
assert_iterable_equal(out, [2, 4])
# default on series
x = Series([2, 3], index=["a", "b"])
out = double(x)
assert isinstance(out, Series)
assert_iterable_equal(out.index, ["a", "b"])
assert_iterable_equal(out, [4, 6])
# default on dataframe
x = DataFrame({"a": [3, 4]})
out = double(x)
assert isinstance(out, DataFrame)
assert_iterable_equal(out.a, [6, 8])
# default on seriesgroupby
x = Series([1, 2, 1, 2]).groupby([1, 1, 2, 2])
out = double(x)
assert isinstance(out, SeriesGroupBy)
assert_iterable_equal(out.obj, [2, 4, 2, 4])
assert out.grouper.ngroups == 2
# on tibble grouped
x = tibble(x=[1, 2, 1, 2], g=[1, 1, 2, 2]).group_by("g")
out = double(x)
# grouping variables not included
assert_iterable_equal(out.x.obj, [2, 4, 2, 4])
x = tibble(x=[1, 2, 1, 2], g=[1, 1, 2, 2]).rowwise("g")
out = double(x)
assert isinstance(out, TibbleRowwise)
assert_frame_equal(out, out._datar["grouped"].obj)
assert_iterable_equal(out.x.obj, [2, 4, 2, 4])
assert_iterable_equal(out.group_vars, ["g"])
def test_transform_register():
@func_factory(kind="transform", data_args="x")
def double(x):
return x * 2
@double.register(DataFrame)
def _(x):
return x * 3
x = Series([2, 3])
out = double(x)
assert_iterable_equal(out, [4, 6])
double.register(Series, lambda x: x * 4)
out = double(x)
assert_iterable_equal(out, [8, 12])
x = tibble(a=[1, 3])
out = double(x)
assert_iterable_equal(out.a, [3, 9])
out = double([1, 4])
assert_iterable_equal(out, [4, 16])
# register an available string func for tranform
double.register(SeriesGroupBy, "sum")
x = Series([1, -2]).groupby([1, 2])
out = double(x)
assert_iterable_equal(out.obj, [1, -2])
# seriesrowwise
double.register(SeriesRowwise, lambda x: x + 1)
x.is_rowwise = True
out = double(x)
assert_iterable_equal(out.obj, [2, -1])
assert out.is_rowwise
def test_transform_hooks():
@func_factory(kind="transform", data_args="x")
def times(x, t):
return x * t
with pytest.raises(ValueError):
times.register(Series, meta=False, pre=1, func=None)
times.register(
Series,
func=None,
pre=lambda x, t: (x, (-t,), {}),
post=lambda out, x, t: out + t,
)
x = Series([1, 2])
out = times(x, -1)
assert_iterable_equal(out, [2, 3])
@times.register(Series, meta=False)
def _(x, t):
return x + t
out = times(x, 10)
assert_iterable_equal(out, [11, 12])
@times.register(SeriesGroupBy, meta=True)
def _(x, t):
return x + 10
x = Series([1, 2, 1, 2]).groupby([1, 1, 2, 2])
out = times(x, 1)
assert_iterable_equal(out.obj, [11, 12, 11, 12])
times.register(
SeriesGroupBy,
func=None,
pre=lambda x, t: (x, (t + 1,), {}),
post=lambda out, x, *args, **kwargs: out,
)
out = times(x, 1)
assert_iterable_equal(out, [2, 4, 2, 4])
times.register(
Series,
func=None,
pre=lambda *args, **kwargs: None,
post=lambda out, x, t: out + t,
)
x = Series([1, 2])
out = times(x, 3)
assert_iterable_equal(out, [4, 5])
@times.register(DataFrame, meta=True)
def _(x, t):
return x ** t
x = tibble(a=[1, 2], b=[2, 3])
out = times(x, 3)
assert_iterable_equal(out.a, [1, 8])
assert_iterable_equal(out.b, [8, 27])
# TibbleGrouped
times.register(
TibbleGrouped,
func=None,
pre=lambda x, t: (x, (t - 1,), {}),
post=lambda out, x, t: out.reindex([1, 0]),
)
x = x.group_by("a")
out = times(x, 3)
assert_iterable_equal(out.b, [6, 4])
@times.register(
TibbleGrouped,
meta=False,
)
def _(x, t):
out = x.transform(lambda d, t: d * t, 0, t - 1)
out.iloc[0, 1] = 10
return out
# x = tibble(a=[1, 2], b=[2, 3]) # grouped by a
out = times(x, 3)
assert isinstance(out, TibbleGrouped)
assert_iterable_equal(out.group_vars, ["a"])
assert_iterable_equal(out.b.obj, [10, 6])
def test_agg():
men = func_factory(
"agg",
"a",
name="men",
func=np.mean,
signature=inspect.signature(lambda a: None),
)
x = [1, 2, 3]
out = men(x)
assert out == 2.0
x = Series([1, 2, 3])
out = men(x)
assert out == 2.0
# SeriesGroupBy
men.register(SeriesGroupBy, func="mean")
x = Series([1, 2, 4]).groupby([1, 2, 2])
out = men(x)
assert_iterable_equal(out.index, [1, 2])
assert_iterable_equal(out, [1.0, 3.0])
# SeriesRowwise
df = tibble(x=[1, 2, 4]).rowwise()
out = men(df.x)
assert_iterable_equal(out, df.x.obj)
men.register(SeriesRowwise, func="sum")
out = men(df.x)
assert_iterable_equal(out.index, [0, 1, 2])
assert_iterable_equal(out, [1.0, 2.0, 4.0])
# TibbleRowwise
x = tibble(a=[1, 2, 3], b=[4, 5, 6]).rowwise()
out = men(x)
assert_iterable_equal(out, [2.5, 3.5, 4.5])
# TibbleGrouped
x = tibble(a=[1, 2, 3], b=[4, 5, 5]).group_by("b")
out = men(x)
assert_iterable_equal(out.a, [1.0, 2.5])
def test_varargs_data_args():
@func_factory("agg", {"x", "args[0]"})
def mulsum(x, *args):
return (x + args[0]) * args[1]
out = mulsum([1, 2], 2, 3)
assert_iterable_equal(out, [9, 12])
@func_factory("agg", {"x", "args"})
def mulsum(x, *args):
return x + args[0] + args[1]
out = mulsum([1, 2], [1, 2], [2, 3])
assert_iterable_equal(out, [4, 7])
def test_dataargs_not_exist():
fun = func_factory("agg", "y")(lambda x: None)
with pytest.raises(ValueError):
fun(1)
def test_args_frame():
@func_factory("agg", {"x", "y"})
def frame(x, y, __args_frame=None):
return __args_frame
out = frame(1, 2)
assert_iterable_equal(sorted(out.columns), ["x", "y"])
def test_args_raw():
@func_factory("agg", {"x"})
def raw(x, __args_raw=None):
return x, __args_raw["x"]
outx, rawx = raw(1)
assert isinstance(outx, Series)
assert rawx == 1
def test_apply():
@func_factory("apply", "x")
def rn(x):
return tibble(x=[1, 2, 3])
x = tibble(a=[1, 2], b=[2, 3]).rowwise()
out = rn(x)
assert out.shape == (2,)
assert out.iloc[0].shape == (3, 1)
def test_no_func_registered():
fun = func_factory("agg", "x", func=lambda x: None)
with pytest.raises(ValueError):
fun.register(SeriesGroupBy, func=None, meta=False)
def test_run_error():
@func_factory("agg", "x")
def error(x):
raise RuntimeError
with pytest.raises(ValueError, match="registered function"):
error(1)
def test_series_cat():
@func_factory("agg", "x")
def sum1(x):
return x.sum()
@sum1.register(SeriesCategorical)
def _(x):
return x[0]
out = sum1([1, 2])
assert out == 3
out = sum1(Categorical([1, 2]))
assert out == 1
def test_str_fun():
sum2 = func_factory(
"agg",
"x",
name="sum2",
qualname="sum2",
func="sum",
signature=inspect.signature(lambda x: None),
)
assert sum2([1, 2, 3]) == 6
| [
"datar.tibble.tibble",
"inspect.signature",
"datar.core.backends.pandas.testing.assert_frame_equal",
"datar.core.backends.pandas.DataFrame",
"numpy.array",
"datar.core.backends.pandas.Categorical",
"pytest.raises",
"datar.core.backends.pandas.Series",
"datar.core.factory.func_factory"
] | [((524, 554), 'datar.core.factory.func_factory', 'func_factory', (['"""transform"""', '"""x"""'], {}), "('transform', 'x')\n", (536, 554), False, 'from datar.core.factory import func_factory\n'), ((744, 774), 'datar.core.factory.func_factory', 'func_factory', (['"""transform"""', '"""x"""'], {}), "('transform', 'x')\n", (756, 774), False, 'from datar.core.factory import func_factory\n'), ((913, 945), 'datar.core.backends.pandas.Series', 'Series', (['[2, 3]'], {'index': "['a', 'b']"}), "([2, 3], index=['a', 'b'])\n", (919, 945), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((1125, 1149), 'datar.core.backends.pandas.DataFrame', 'DataFrame', (["{'a': [3, 4]}"], {}), "({'a': [3, 4]})\n", (1134, 1149), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((1801, 1851), 'datar.core.backends.pandas.testing.assert_frame_equal', 'assert_frame_equal', (['out', "out._datar['grouped'].obj"], {}), "(out, out._datar['grouped'].obj)\n", (1819, 1851), False, 'from datar.core.backends.pandas.testing import assert_frame_equal\n'), ((1990, 2035), 'datar.core.factory.func_factory', 'func_factory', ([], {'kind': '"""transform"""', 'data_args': '"""x"""'}), "(kind='transform', data_args='x')\n", (2002, 2035), False, 'from datar.core.factory import func_factory\n'), ((2153, 2167), 'datar.core.backends.pandas.Series', 'Series', (['[2, 3]'], {}), '([2, 3])\n', (2159, 2167), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((2343, 2359), 'datar.tibble.tibble', 'tibble', ([], {'a': '[1, 3]'}), '(a=[1, 3])\n', (2349, 2359), False, 'from datar.tibble import tibble\n'), ((2909, 2954), 'datar.core.factory.func_factory', 'func_factory', ([], {'kind': '"""transform"""', 'data_args': '"""x"""'}), "(kind='transform', data_args='x')\n", (2921, 2954), False, 'from datar.core.factory import func_factory\n'), ((3247, 3261), 'datar.core.backends.pandas.Series', 'Series', (['[1, 2]'], {}), '([1, 2])\n', (3253, 3261), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((4063, 4077), 'datar.core.backends.pandas.Series', 'Series', (['[1, 2]'], {}), '([1, 2])\n', (4069, 4077), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((4230, 4256), 'datar.tibble.tibble', 'tibble', ([], {'a': '[1, 2]', 'b': '[2, 3]'}), '(a=[1, 2], b=[2, 3])\n', (4236, 4256), False, 'from datar.tibble import tibble\n'), ((5276, 5293), 'datar.core.backends.pandas.Series', 'Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (5282, 5293), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((6143, 6180), 'datar.core.factory.func_factory', 'func_factory', (['"""agg"""', "{'x', 'args[0]'}"], {}), "('agg', {'x', 'args[0]'})\n", (6155, 6180), False, 'from datar.core.factory import func_factory\n'), ((6324, 6358), 'datar.core.factory.func_factory', 'func_factory', (['"""agg"""', "{'x', 'args'}"], {}), "('agg', {'x', 'args'})\n", (6336, 6358), False, 'from datar.core.factory import func_factory\n'), ((6668, 6699), 'datar.core.factory.func_factory', 'func_factory', (['"""agg"""', "{'x', 'y'}"], {}), "('agg', {'x', 'y'})\n", (6680, 6699), False, 'from datar.core.factory import func_factory\n'), ((6878, 6904), 'datar.core.factory.func_factory', 'func_factory', (['"""agg"""', "{'x'}"], {}), "('agg', {'x'})\n", (6890, 6904), False, 'from datar.core.factory import func_factory\n'), ((7079, 7105), 'datar.core.factory.func_factory', 'func_factory', (['"""apply"""', '"""x"""'], {}), "('apply', 'x')\n", (7091, 7105), False, 'from datar.core.factory import func_factory\n'), ((7329, 7374), 'datar.core.factory.func_factory', 'func_factory', (['"""agg"""', '"""x"""'], {'func': '(lambda x: None)'}), "('agg', 'x', func=lambda x: None)\n", (7341, 7374), False, 'from datar.core.factory import func_factory\n'), ((7499, 7523), 'datar.core.factory.func_factory', 'func_factory', (['"""agg"""', '"""x"""'], {}), "('agg', 'x')\n", (7511, 7523), False, 'from datar.core.factory import func_factory\n'), ((7682, 7706), 'datar.core.factory.func_factory', 'func_factory', (['"""agg"""', '"""x"""'], {}), "('agg', 'x')\n", (7694, 7706), False, 'from datar.core.factory import func_factory\n'), ((670, 697), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'int'}), '([1, 2], dtype=int)\n', (678, 697), True, 'import numpy as np\n'), ((3007, 3032), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3020, 3032), False, 'import pytest\n'), ((6546, 6570), 'datar.core.factory.func_factory', 'func_factory', (['"""agg"""', '"""y"""'], {}), "('agg', 'y')\n", (6558, 6570), False, 'from datar.core.factory import func_factory\n'), ((6596, 6621), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6609, 6621), False, 'import pytest\n'), ((7136, 7155), 'datar.tibble.tibble', 'tibble', ([], {'x': '[1, 2, 3]'}), '(x=[1, 2, 3])\n', (7142, 7155), False, 'from datar.tibble import tibble\n'), ((7384, 7409), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7397, 7409), False, 'import pytest\n'), ((7579, 7633), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""registered function"""'}), "(ValueError, match='registered function')\n", (7592, 7633), False, 'import pytest\n'), ((7880, 7899), 'datar.core.backends.pandas.Categorical', 'Categorical', (['[1, 2]'], {}), '([1, 2])\n', (7891, 7899), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((1289, 1309), 'datar.core.backends.pandas.Series', 'Series', (['[1, 2, 1, 2]'], {}), '([1, 2, 1, 2])\n', (1295, 1309), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((1512, 1550), 'datar.tibble.tibble', 'tibble', ([], {'x': '[1, 2, 1, 2]', 'g': '[1, 1, 2, 2]'}), '(x=[1, 2, 1, 2], g=[1, 1, 2, 2])\n', (1518, 1550), False, 'from datar.tibble import tibble\n'), ((1683, 1721), 'datar.tibble.tibble', 'tibble', ([], {'x': '[1, 2, 1, 2]', 'g': '[1, 1, 2, 2]'}), '(x=[1, 2, 1, 2], g=[1, 1, 2, 2])\n', (1689, 1721), False, 'from datar.tibble import tibble\n'), ((2591, 2606), 'datar.core.backends.pandas.Series', 'Series', (['[1, -2]'], {}), '([1, -2])\n', (2597, 2606), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((3563, 3583), 'datar.core.backends.pandas.Series', 'Series', (['[1, 2, 1, 2]'], {}), '([1, 2, 1, 2])\n', (3569, 3583), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((5168, 5201), 'inspect.signature', 'inspect.signature', (['(lambda a: None)'], {}), '(lambda a: None)\n', (5185, 5201), False, 'import inspect\n'), ((5407, 5424), 'datar.core.backends.pandas.Series', 'Series', (['[1, 2, 4]'], {}), '([1, 2, 4])\n', (5413, 5424), False, 'from datar.core.backends.pandas import Categorical, DataFrame, Series\n'), ((5579, 5598), 'datar.tibble.tibble', 'tibble', ([], {'x': '[1, 2, 4]'}), '(x=[1, 2, 4])\n', (5585, 5598), False, 'from datar.tibble import tibble\n'), ((5860, 5892), 'datar.tibble.tibble', 'tibble', ([], {'a': '[1, 2, 3]', 'b': '[4, 5, 6]'}), '(a=[1, 2, 3], b=[4, 5, 6])\n', (5866, 5892), False, 'from datar.tibble import tibble\n'), ((5997, 6029), 'datar.tibble.tibble', 'tibble', ([], {'a': '[1, 2, 3]', 'b': '[4, 5, 5]'}), '(a=[1, 2, 3], b=[4, 5, 5])\n', (6003, 6029), False, 'from datar.tibble import tibble\n'), ((7165, 7191), 'datar.tibble.tibble', 'tibble', ([], {'a': '[1, 2]', 'b': '[2, 3]'}), '(a=[1, 2], b=[2, 3])\n', (7171, 7191), False, 'from datar.tibble import tibble\n'), ((8080, 8113), 'inspect.signature', 'inspect.signature', (['(lambda x: None)'], {}), '(lambda x: None)\n', (8097, 8113), False, 'import inspect\n')] |
import os
import rasterio
import argparse
from PIL import Image
import subprocess
import pathlib
import shutil
from glob import glob
from numba import njit, prange
from OpenVisus import *
### Configuration
ext_name = ".tif"
dtype = "uint8[3]"
limit = 1000
###--------------
@njit(parallel=True)
def blend_rgb_ann(a, b):
#a[b[b>0]] = [255,0,0]
for i in prange(a[0].shape[0]):
for j in prange(a[0].shape[1]):
if(b[i][j] > 0):
a[0][i][j]=255
a[1][i][j]=0
a[2][i][j]=0
class tile():
def __init__(self,path,name):
self.path = path
self.name = name
self.frame = [0,0,0,0]
self.size = [0,0]
parser = argparse.ArgumentParser(description='Parse set of geotiff')
parser.add_argument('-rgb', type=str, nargs = 1, help ='rbg image path', required = True)
parser.add_argument('-ann', type=str, nargs = 1, help ='ann image path', required = False)
parser.add_argument('-out', type=str, nargs = 1, help ='output name', required = True)
args = parser.parse_args()
rgb_dir = args.rgb[0]
outdir = args.out[0]
pathlib.Path(outdir+"/temp").mkdir(parents=True, exist_ok=True)
outname = outdir.split("/")[-1]
if(outname==""):
outname = outdir.split("/")[-2]
if(args.ann):
ann_dir = args.ann[0]
# Blend rgb and annotations
for f in os.listdir(rgb_dir):
if f.endswith(ext_name):
rgb_path=rgb_dir+"/"+f
ann_path=ann_dir+"/"+f.replace("image.tif", "image_rasterized.tif")
ageo = rasterio.open(rgb_path)
a = ageo.read()
bgeo = rasterio.open(ann_path)
b = bgeo.read()
print("Blending ", rgb_path, "and", ann_path, "...")
blend_rgb_ann(a, b[0])
#tiff.imsave(outdir+"/"+f,a)
with rasterio.open(
outdir+"/"+f,
'w',
driver='GTiff',
height=ageo.height,
width=ageo.width,
count=3,
dtype=a.dtype,
crs='+proj=latlong',
transform=ageo.transform,
) as dst:
dst.write(a)
idir = outdir
else:
idir = rgb_dir
# Convert and stitch
images = []
for f in os.listdir(idir):
if f.endswith(ext_name):
filepath=idir+"/"+f
s = os.path.basename(f)
# filepath = filepath.replace('(','\(')
# filepath = filepath.replace(')','\)')
images.append(tile(filepath,s))
bbox = [99999999, 0, 99999999, 0]
count = 0
for img in images:
if count > limit:
break
count += 1
try:
ds = rasterio.open(img.path)
width = ds.width
height = ds.height
bounds = ds.bounds
except:
print("ERROR: metadata failure, skipping "+idir)
minx = bounds.left
miny = bounds.top
maxx = bounds.right
maxy = bounds.bottom
img.frame = [minx, maxx, miny, maxy]
img.size = [width, height]
#print("found gdal data", gt, "size", [height, width], "frame", [minx, maxx, miny, maxy], "psize", [maxx-minx, maxy-miny])
print("frame", img.frame)#, "psize", [(maxx-minx)/width, (maxy-miny)/height])
if(minx < bbox[0]):
bbox[0] = minx
if(miny < bbox[2]):
bbox[2] = miny
if(maxx > bbox[1]):
bbox[1] = maxx
if(maxy > bbox[3]):
bbox[3] = maxy
ratio=[(maxx-minx)/width,(maxy-miny)/height]
out_size = [bbox[1]-bbox[0], bbox[3]-bbox[2]]
img_size = [int(out_size[0]/ratio[0]), int(out_size[1]/ratio[1])]
gbox = "0 "+str(img_size[0]-1)+" 0 "+str(img_size[1]-1)
midx_name=outdir+"/global.midx"
midx_out = open(midx_name,"wt")
midx_out.write("<dataset typename='IdxMultipleDataset'>\n")
midx_out.write('<field name="voronoi">\n <code>output=voronoi()</code>\n</field>')
cwd = os.getcwd()
count = 0
for img in images:
if count > limit:
break
count += 1
lbox = "0 "+str(img.size[0]-1)+" 0 "+str(img.size[1]-1)
ancp = [int((img.frame[0]-bbox[0])/ratio[0]), int((img.frame[2]-bbox[2])/ratio[1])]
#print(ancp)
dbox = str(ancp[0])+ " " +str(ancp[0]+img.size[0]-1)+ " "+str(ancp[1])+ " "+str(ancp[1]+img.size[1]-1)
#midx_out.write('\t<dataset url="file://'+outdir+"/"+img.name+'exp.idx" name="'+img.name+'"> <M><translate x="'+str(ancp[0])+'" y="'+str(ancp[1])+'"/></M> </dataset>\n')
midx_out.write('\t<dataset url="file://'+outdir+"/"+img.name+'exp.idx" name="'+img.name+'" offset="'+str(ancp[0])+' '+str(ancp[1])+'"/>\n')
exp_idx = outdir+"/"+img.name+"exp.idx"
field=Field("data",dtype,"row_major")
CreateIdx(url=exp_idx,dims=img.size,fields=[field])
db=PyDataset(exp_idx)
#convertCommand(["create", exp_idx, "--box", lbox, "--fields", 'data '+dtype,"--time","0 0 time%03d/"])
#convert.runFromArgs(["create", exp_idx, "--box", lbox, "--fields", 'data '+dtype,"--time","0 0 time%03d/"])
print("Converting "+str(count)+"/"+str(min(limit, len(images)))+"...")
data=numpy.asarray(Image.open(img.path))
db.write(data)
#convertCommand(["import",img.path,"--dims",str(img.size[0]),str(img.size[1])," --dtype ",dtype,"--export",exp_idx," --box ",lbox, "--time", "0"])
#convert.runFromArgs(["import",img.path,"--dims",str(img.size[0]),str(img.size[1])," --dtype ",dtype,"--export",exp_idx," --box ",lbox, "--time", "0"])
midx_out.write('</dataset>')
midx_out.close();
print("Done conversion of tiles, now generating final mosaic")
def midxToIdx(filename, filename_idx):
field="output=voronoi()"
# in case it's an expression
tile_size=int(eval("4*1024"))
DATASET = LoadIdxDataset(filename)
FIELD=DATASET.getFieldByName(field)
TIME=DATASET.getDefaultTime()
Assert(FIELD.valid())
# save the new idx file
idxfile=DATASET.idxfile
idxfile.filename_template = "" # //force guess
idxfile.time_template = "" #force guess
idxfile.fields.clear()
idxfile.fields.push_back(Field("DATA", dtype, "rowmajor")) # note that compression will is empty in writing (at the end I will compress)
idxfile.save(filename_idx)
dataset = LoadIdxDataset(filename_idx)
Assert(dataset)
field=dataset.getDefaultField()
time=dataset.getDefaultTime()
Assert(field.valid())
ACCESS = DATASET.createAccess()
access = dataset.createAccess()
print("Generating tiles...",tile_size)
TILES = DATASET.generateTiles(tile_size)
TOT_TILES=TILES.size()
T1 = Time.now()
for TILE_ID in range(TOT_TILES):
TILE = TILES[TILE_ID]
t1 = Time.now()
buffer = DATASET.readFullResolutionData(ACCESS, FIELD, TIME, TILE)
msec_read = t1.elapsedMsec()
if not buffer:
continue
t1 = Time.now()
dataset.writeFullResolutionData(access, field, time, buffer, TILE)
msec_write = t1.elapsedMsec()
print("done", TILE_ID, "/", TOT_TILES, "msec_read", msec_read, "msec_write", msec_write)
#dataset.compressDataset("jpg-JPEG_QUALITYGOOD-JPEG_SUBSAMPLING_420-JPEG_OPTIMIZE")
#dataset.compressDataset("jpg-JPEG_QUALITYSUPERB-JPEG_SUBSAMPLING_420-JPEG_OPTIMIZE")
#dataset.compressDataset("jpg-JPEG_QUALITYSUPERB-JPEG_SUBSAMPLING_444-JPEG_OPTIMIZE")
#dataset.compressDataset("jpg-JPEG_QUALITYGOOD-JPEG_SUBSAMPLING_444-JPEG_OPTIMIZE")
# Make one big photomosaic
midxToIdx(os.path.abspath(midx_name), os.path.abspath(outdir+"/"+outname+".idx"))
# moving clutter to "outdir/temp" folder
for f in glob.glob(outdir+"/*tifexp*"):
subprocess.run(["mv",f,outdir+"/temp/"])
for f in glob.glob(outdir+"/*.tif"):
subprocess.run(["mv",f,outdir+"/temp/"])
subprocess.run(["mv",outdir+"/global.midx",outdir+"/temp/"])
# delete temp folder at the end
#subprocess.run(["rm","-R", outdir+"/temp"])
print("DONE")
| [
"os.listdir",
"PIL.Image.open",
"argparse.ArgumentParser",
"pathlib.Path",
"glob.glob.glob",
"rasterio.open",
"subprocess.run",
"numba.njit",
"os.getcwd",
"os.path.basename",
"os.path.abspath",
"numba.prange"
] | [((296, 315), 'numba.njit', 'njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (300, 315), False, 'from numba import njit, prange\n'), ((698, 757), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse set of geotiff"""'}), "(description='Parse set of geotiff')\n", (721, 757), False, 'import argparse\n'), ((2168, 2184), 'os.listdir', 'os.listdir', (['idir'], {}), '(idir)\n', (2178, 2184), False, 'import os\n'), ((3692, 3703), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3701, 3703), False, 'import os\n'), ((7280, 7311), 'glob.glob.glob', 'glob.glob', (["(outdir + '/*tifexp*')"], {}), "(outdir + '/*tifexp*')\n", (7289, 7311), False, 'from glob import glob\n'), ((7365, 7393), 'glob.glob.glob', 'glob.glob', (["(outdir + '/*.tif')"], {}), "(outdir + '/*.tif')\n", (7374, 7393), False, 'from glob import glob\n'), ((7438, 7504), 'subprocess.run', 'subprocess.run', (["['mv', outdir + '/global.midx', outdir + '/temp/']"], {}), "(['mv', outdir + '/global.midx', outdir + '/temp/'])\n", (7452, 7504), False, 'import subprocess\n'), ((380, 401), 'numba.prange', 'prange', (['a[0].shape[0]'], {}), '(a[0].shape[0])\n', (386, 401), False, 'from numba import njit, prange\n'), ((1348, 1367), 'os.listdir', 'os.listdir', (['rgb_dir'], {}), '(rgb_dir)\n', (1358, 1367), False, 'import os\n'), ((7154, 7180), 'os.path.abspath', 'os.path.abspath', (['midx_name'], {}), '(midx_name)\n', (7169, 7180), False, 'import os\n'), ((7182, 7230), 'os.path.abspath', 'os.path.abspath', (["(outdir + '/' + outname + '.idx')"], {}), "(outdir + '/' + outname + '.idx')\n", (7197, 7230), False, 'import os\n'), ((7314, 7358), 'subprocess.run', 'subprocess.run', (["['mv', f, outdir + '/temp/']"], {}), "(['mv', f, outdir + '/temp/'])\n", (7328, 7358), False, 'import subprocess\n'), ((7396, 7440), 'subprocess.run', 'subprocess.run', (["['mv', f, outdir + '/temp/']"], {}), "(['mv', f, outdir + '/temp/'])\n", (7410, 7440), False, 'import subprocess\n'), ((417, 438), 'numba.prange', 'prange', (['a[0].shape[1]'], {}), '(a[0].shape[1])\n', (423, 438), False, 'from numba import njit, prange\n'), ((1109, 1139), 'pathlib.Path', 'pathlib.Path', (["(outdir + '/temp')"], {}), "(outdir + '/temp')\n", (1121, 1139), False, 'import pathlib\n'), ((2248, 2267), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (2264, 2267), False, 'import os\n'), ((2531, 2554), 'rasterio.open', 'rasterio.open', (['img.path'], {}), '(img.path)\n', (2544, 2554), False, 'import rasterio\n'), ((4858, 4878), 'PIL.Image.open', 'Image.open', (['img.path'], {}), '(img.path)\n', (4868, 4878), False, 'from PIL import Image\n'), ((1526, 1549), 'rasterio.open', 'rasterio.open', (['rgb_path'], {}), '(rgb_path)\n', (1539, 1549), False, 'import rasterio\n'), ((1587, 1610), 'rasterio.open', 'rasterio.open', (['ann_path'], {}), '(ann_path)\n', (1600, 1610), False, 'import rasterio\n'), ((1774, 1943), 'rasterio.open', 'rasterio.open', (["(outdir + '/' + f)", '"""w"""'], {'driver': '"""GTiff"""', 'height': 'ageo.height', 'width': 'ageo.width', 'count': '(3)', 'dtype': 'a.dtype', 'crs': '"""+proj=latlong"""', 'transform': 'ageo.transform'}), "(outdir + '/' + f, 'w', driver='GTiff', height=ageo.height,\n width=ageo.width, count=3, dtype=a.dtype, crs='+proj=latlong',\n transform=ageo.transform)\n", (1787, 1943), False, 'import rasterio\n')] |
#!/usr/bin/env python3
"""
Created on Tue Sep 1 2020
@author: kstoreyf
"""
import numpy as np
import nbodykit
import pandas as pd
import pickle
from nbodykit import cosmology
def main():
save_fn = '../data/cosmology_train.pickle'
x = generate_training_parameters(n_train=1000)
y, extra_input = generate_data(x)
input_data, output_data = format_data(x, y,
objs_id=None)
data_to_save = make_data_to_save(input_data, output_data,
extra_input)
save_data(data_to_save, save_fn)
save_fn = '../data/cosmology_test.pickle'
x = generate_testing_parameters(n_test=100)
y, extra_input = generate_data(x)
input_data, output_data = format_data(x, y,
objs_id=None)
data_to_save = make_data_to_save(input_data, output_data,
extra_input)
save_data(data_to_save, save_fn)
# Generate the parameters that govern the output training set data
def generate_training_parameters(n_train=1000):
n_params = 3
n_points = n_train**(1./float(n_params))
assert abs(round(n_points) - n_points) < 1e-12, f"n_train must be a power of {n_params} because we're making a high-dimensional grid."
n_points = round(n_points)
omega_m = np.linspace(0.26, 0.34, n_points)
sigma_8 = np.linspace(0.7, 0.95, n_points)
omega_b = np.linspace(0.038, 0.058, n_points)
grid = np.meshgrid(omega_m, sigma_8, omega_b)
# x has shape (n_params, n_train), where n_train = n_points**n_params
x = np.array([grid[p].flatten() for p in range(n_params)])
return x
# Generate the parameters that govern the output testing set data
def generate_testing_parameters(n_test=100):
omega_m = random_between(0.26, 0.34, n_test)
sigma_8 = random_between(0.7, 0.95, n_test)
omega_b = random_between(0.038, 0.058, n_test)
# x has shape (n_params, n_test)
x = np.array([omega_m, sigma_8, omega_b])
return x
def random_between(xmin, xmax, n):
return np.random.rand(n)*(xmax-xmin)+xmin
# Generate the output data that we're interested in emulating
def generate_data(x):
redshift = 0.0
r_vals = np.linspace(50, 140, 10)
extra_input = {'redshift': redshift, 'r_vals': r_vals}
n_data = x.shape[1]
y = np.empty((len(r_vals), n_data))
for i in range(n_data):
print(i)
om, s8, ob = x[:,i]
ocdm = om - ob
m_ncdm = [] #no massive neutrinos
cosmo = cosmology.Cosmology(Omega0_b=ob, Omega0_cdm=ocdm, m_ncdm=m_ncdm)
cosmo = cosmo.match(sigma8=s8)
plin = cosmology.LinearPower(cosmo, redshift, transfer='EisensteinHu')
cf = cosmology.correlation.CorrelationFunction(plin)
y[:,i] = cf(r_vals)
return y, extra_input
# Format data into pandas data frames
def format_data(x_input, y_output, objs_id=None):
number_objs = len(x_input[0,:])
number_outputs = len(y_output[:,0])
if objs_id is None:
objs_id = [f'obj_{i}'for i in np.arange(number_objs)]
input_data = pd.DataFrame()
input_data['object_id'] = objs_id
input_data[r'$\Omega_m$'] = x_input[0,:]
input_data[r'$\sigma_8$'] = x_input[1,:]
input_data[r'$\Omega_b$'] = x_input[2,:]
output_data = pd.DataFrame()
output_data['object_id'] = objs_id
for i in np.arange(number_outputs):
output_data[r'$\xi(r_{})$'.format(i)] = y_output[i, :]
return input_data, output_data
# Format the data to save it
def make_data_to_save(input_data, output_data, extra_input=None):
data_to_save = {'input_data': input_data,
'output_data': output_data}
if extra_input is not None:
data_to_save['extra_input'] = extra_input
return data_to_save
# Save the data to a file
def save_data(data, save_fn):
with open(save_fn, 'wb') as f:
pickle.dump(data, f, protocol=3)
if __name__=='__main__':
main()
| [
"nbodykit.cosmology.correlation.CorrelationFunction",
"pickle.dump",
"numpy.random.rand",
"nbodykit.cosmology.Cosmology",
"numpy.array",
"numpy.linspace",
"nbodykit.cosmology.LinearPower",
"pandas.DataFrame",
"numpy.meshgrid",
"numpy.arange"
] | [((1335, 1368), 'numpy.linspace', 'np.linspace', (['(0.26)', '(0.34)', 'n_points'], {}), '(0.26, 0.34, n_points)\n', (1346, 1368), True, 'import numpy as np\n'), ((1383, 1415), 'numpy.linspace', 'np.linspace', (['(0.7)', '(0.95)', 'n_points'], {}), '(0.7, 0.95, n_points)\n', (1394, 1415), True, 'import numpy as np\n'), ((1430, 1465), 'numpy.linspace', 'np.linspace', (['(0.038)', '(0.058)', 'n_points'], {}), '(0.038, 0.058, n_points)\n', (1441, 1465), True, 'import numpy as np\n'), ((1477, 1515), 'numpy.meshgrid', 'np.meshgrid', (['omega_m', 'sigma_8', 'omega_b'], {}), '(omega_m, sigma_8, omega_b)\n', (1488, 1515), True, 'import numpy as np\n'), ((1972, 2009), 'numpy.array', 'np.array', (['[omega_m, sigma_8, omega_b]'], {}), '([omega_m, sigma_8, omega_b])\n', (1980, 2009), True, 'import numpy as np\n'), ((2224, 2248), 'numpy.linspace', 'np.linspace', (['(50)', '(140)', '(10)'], {}), '(50, 140, 10)\n', (2235, 2248), True, 'import numpy as np\n'), ((3099, 3113), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3111, 3113), True, 'import pandas as pd\n'), ((3310, 3324), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3322, 3324), True, 'import pandas as pd\n'), ((3377, 3402), 'numpy.arange', 'np.arange', (['number_outputs'], {}), '(number_outputs)\n', (3386, 3402), True, 'import numpy as np\n'), ((2527, 2591), 'nbodykit.cosmology.Cosmology', 'cosmology.Cosmology', ([], {'Omega0_b': 'ob', 'Omega0_cdm': 'ocdm', 'm_ncdm': 'm_ncdm'}), '(Omega0_b=ob, Omega0_cdm=ocdm, m_ncdm=m_ncdm)\n', (2546, 2591), False, 'from nbodykit import cosmology\n'), ((2646, 2709), 'nbodykit.cosmology.LinearPower', 'cosmology.LinearPower', (['cosmo', 'redshift'], {'transfer': '"""EisensteinHu"""'}), "(cosmo, redshift, transfer='EisensteinHu')\n", (2667, 2709), False, 'from nbodykit import cosmology\n'), ((2723, 2770), 'nbodykit.cosmology.correlation.CorrelationFunction', 'cosmology.correlation.CorrelationFunction', (['plin'], {}), '(plin)\n', (2764, 2770), False, 'from nbodykit import cosmology\n'), ((3901, 3933), 'pickle.dump', 'pickle.dump', (['data', 'f'], {'protocol': '(3)'}), '(data, f, protocol=3)\n', (3912, 3933), False, 'import pickle\n'), ((2071, 2088), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2085, 2088), True, 'import numpy as np\n'), ((3053, 3075), 'numpy.arange', 'np.arange', (['number_objs'], {}), '(number_objs)\n', (3062, 3075), True, 'import numpy as np\n')] |
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import fnmatch
import logging
import os
import shutil
import stat
import subprocess
import uuid
from pathlib import Path
from urllib import request
class AppRunSetupError(RuntimeError):
pass
class AppRun:
env = {
"APPIMAGE_UUID": None,
"SYSTEM_INTERP": None,
"XDG_DATA_DIRS": "$APPDIR/usr/local/share:$APPDIR/usr/share:$XDG_CONFIG_DIRS",
"XDG_CONFIG_DIRS": "$APPDIR/etc/xdg:$XDG_CONFIG_DIRS",
"LD_PRELOAD": "libapprun_hooks.so",
}
# arch mappings from the file command output to the debian format
archs_mapping = {
"ARM aarch64": "aarch64",
"ARM": "gnueabihf",
"Intel 80386": "i386",
"x86-64": "x86_64",
}
sections = {}
def __init__(
self,
version,
debug,
app_dir,
exec_path,
exec_args="$@",
cache_dir="appimage-builder-cache/runtime",
):
self.app_dir = Path(app_dir).absolute()
self.apprun_version = version
self.apprun_build_type = "Debug" if debug else "Release"
self.env["APPIMAGE_UUID"] = str(uuid.uuid4())
self.env["EXEC_PATH"] = "$APPDIR/%s" % exec_path
self.env["EXEC_ARGS"] = exec_args
self.cache_dir = Path(cache_dir).absolute()
def deploy(self):
embed_archs = self._get_embed_libc_archs()
# deploy AppRun
apprun_path = self._get_apprun_binary(embed_archs[0])
apprun_deploy_path = self.app_dir / "AppRun"
logging.info("Deploying: %s => %s" % (apprun_path, self.app_dir / "AppRun"))
shutil.copy(apprun_path, apprun_deploy_path)
apprun_deploy_path.chmod(
stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH | stat.S_IROTH
)
for arch in embed_archs:
hooks_lib = self._get_apprun_hooks_library(arch)
target_lib_dir = self._find_hooks_lib_target_lib_dir(arch)
logging.info("Deploying: %s => %s" % (hooks_lib, target_lib_dir))
shutil.copy(hooks_lib, os.path.join(target_lib_dir, "libapprun_hooks.so"))
self._generate_env_file()
def _get_embed_libc_archs(self):
libc_paths = self._find_libc_paths()
if not libc_paths:
raise AppRunSetupError("Unable to locate libc at: %s" % self.app_dir)
archs = set()
for path in libc_paths:
arch = self._get_elf_arch(path)
if arch:
archs.add(arch)
return list(archs)
def _generate_env_file(self):
with open(os.path.join(self.app_dir, ".env"), "w") as f:
for k, v in self.env.items():
f.write("%s=%s\n" % (k, v))
def _get_elf_arch(self, file):
proc_env = os.environ.copy()
proc_env["LC_ALL"] = "C"
proc = subprocess.run(
["file", "-b", file], stdout=subprocess.PIPE, env=proc_env
)
output = proc.stdout.decode("utf-8")
parts = output.split(",")
signature = ",".join(parts[1:2])
signature = signature.replace("shared object", "")
signature = signature.replace("executable", "")
return signature.strip(" ")
def _find_libc_paths(self):
paths = []
for base_path, dirs, files in os.walk(self.app_dir):
for file in files:
abs_path = os.path.join(base_path, file)
if fnmatch.fnmatch(abs_path, "*/libc.so*"):
paths.append(abs_path)
if fnmatch.fnmatch(abs_path, "*/libc-*.so*"):
paths.append(abs_path)
return paths
def _find_hooks_lib_target_lib_dir(self, arch):
lib_dirs = self.env["APPDIR_LIBRARY_PATH"]
lib_dirs = lib_dirs.replace("$APPDIR", str(self.app_dir))
lib_dirs = lib_dirs.replace("$APPDIR", str(self.app_dir))
lib_dirs = lib_dirs.split(":")
for lib_dir in lib_dirs:
for file in os.listdir(lib_dir):
file_path = os.path.join(lib_dir, file)
if os.path.isfile(file_path):
file_arch = self._get_elf_arch(file_path)
if file_arch == arch:
return lib_dir
def _get_apprun_binary(self, arch):
if arch not in self.archs_mapping:
raise AppRunSetupError("Non-supported architecture: '%s'" % arch)
self.cache_dir.mkdir(parents=True, exist_ok=True)
apprun_asset = "AppRun-%s-%s" % (
self.apprun_build_type,
self.archs_mapping[arch],
)
apprun_file = self.cache_dir / apprun_asset
if not apprun_file.exists():
url = (
"https://github.com/AppImageCrafters/AppRun/releases/download/%s/%s"
% (self.apprun_version, apprun_asset)
)
logging.info("Downloading: %s" % url)
request.urlretrieve(url, apprun_file)
return apprun_file
def _get_apprun_hooks_library(self, arch):
if arch not in self.archs_mapping:
raise AppRunSetupError("Non-supported architecture: '%s'" % arch)
self.cache_dir.mkdir(parents=True, exist_ok=True)
asset = "libapprun_hooks-%s-%s.so" % (
self.apprun_build_type,
self.archs_mapping[arch],
)
file = self.cache_dir / asset
if not file.exists():
url = (
"https://github.com/AppImageCrafters/AppRun/releases/download/%s/%s"
% (self.apprun_version, asset)
)
logging.info("Downloading: %s" % url)
request.urlretrieve(url, file)
return file
| [
"os.listdir",
"urllib.request.urlretrieve",
"pathlib.Path",
"subprocess.run",
"os.path.join",
"os.environ.copy",
"uuid.uuid4",
"os.path.isfile",
"fnmatch.fnmatch",
"shutil.copy",
"logging.info",
"os.walk"
] | [((2102, 2178), 'logging.info', 'logging.info', (["('Deploying: %s => %s' % (apprun_path, self.app_dir / 'AppRun'))"], {}), "('Deploying: %s => %s' % (apprun_path, self.app_dir / 'AppRun'))\n", (2114, 2178), False, 'import logging\n'), ((2187, 2231), 'shutil.copy', 'shutil.copy', (['apprun_path', 'apprun_deploy_path'], {}), '(apprun_path, apprun_deploy_path)\n', (2198, 2231), False, 'import shutil\n'), ((3339, 3356), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (3354, 3356), False, 'import os\n'), ((3405, 3479), 'subprocess.run', 'subprocess.run', (["['file', '-b', file]"], {'stdout': 'subprocess.PIPE', 'env': 'proc_env'}), "(['file', '-b', file], stdout=subprocess.PIPE, env=proc_env)\n", (3419, 3479), False, 'import subprocess\n'), ((3864, 3885), 'os.walk', 'os.walk', (['self.app_dir'], {}), '(self.app_dir)\n', (3871, 3885), False, 'import os\n'), ((1715, 1727), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1725, 1727), False, 'import uuid\n'), ((2539, 2604), 'logging.info', 'logging.info', (["('Deploying: %s => %s' % (hooks_lib, target_lib_dir))"], {}), "('Deploying: %s => %s' % (hooks_lib, target_lib_dir))\n", (2551, 2604), False, 'import logging\n'), ((4536, 4555), 'os.listdir', 'os.listdir', (['lib_dir'], {}), '(lib_dir)\n', (4546, 4555), False, 'import os\n'), ((5424, 5461), 'logging.info', 'logging.info', (["('Downloading: %s' % url)"], {}), "('Downloading: %s' % url)\n", (5436, 5461), False, 'import logging\n'), ((5474, 5511), 'urllib.request.urlretrieve', 'request.urlretrieve', (['url', 'apprun_file'], {}), '(url, apprun_file)\n', (5493, 5511), False, 'from urllib import request\n'), ((6146, 6183), 'logging.info', 'logging.info', (["('Downloading: %s' % url)"], {}), "('Downloading: %s' % url)\n", (6158, 6183), False, 'import logging\n'), ((6196, 6226), 'urllib.request.urlretrieve', 'request.urlretrieve', (['url', 'file'], {}), '(url, file)\n', (6215, 6226), False, 'from urllib import request\n'), ((1547, 1560), 'pathlib.Path', 'Path', (['app_dir'], {}), '(app_dir)\n', (1551, 1560), False, 'from pathlib import Path\n'), ((1853, 1868), 'pathlib.Path', 'Path', (['cache_dir'], {}), '(cache_dir)\n', (1857, 1868), False, 'from pathlib import Path\n'), ((2640, 2690), 'os.path.join', 'os.path.join', (['target_lib_dir', '"""libapprun_hooks.so"""'], {}), "(target_lib_dir, 'libapprun_hooks.so')\n", (2652, 2690), False, 'import os\n'), ((3151, 3185), 'os.path.join', 'os.path.join', (['self.app_dir', '""".env"""'], {}), "(self.app_dir, '.env')\n", (3163, 3185), False, 'import os\n'), ((3945, 3974), 'os.path.join', 'os.path.join', (['base_path', 'file'], {}), '(base_path, file)\n', (3957, 3974), False, 'import os\n'), ((3994, 4033), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['abs_path', '"""*/libc.so*"""'], {}), "(abs_path, '*/libc.so*')\n", (4009, 4033), False, 'import fnmatch\n'), ((4097, 4138), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['abs_path', '"""*/libc-*.so*"""'], {}), "(abs_path, '*/libc-*.so*')\n", (4112, 4138), False, 'import fnmatch\n'), ((4585, 4612), 'os.path.join', 'os.path.join', (['lib_dir', 'file'], {}), '(lib_dir, file)\n', (4597, 4612), False, 'import os\n'), ((4632, 4657), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (4646, 4657), False, 'import os\n')] |
import os
import yaml
import copy
import logging
from pathlib import Path
import torch
from torch.nn import *
from torch.optim import *
import torch.distributed as dist
from torch.optim.lr_scheduler import *
from torch.nn.parallel import DistributedDataParallel
from utils.metrics import *
from models import _get_model
torch.backends.cudnn.benchmark = True
class Argments(object):
@staticmethod
def _file_load(yaml_file):
with open(fr'{yaml_file}') as f:
y = yaml.safe_load(f)
return y
@staticmethod
def _module_load(d, part, **kargs):
module_obj = eval(d[part]['name'])
module_args = copy.deepcopy(d[part])
module_args.update(kargs)
del module_args['name']
part = module_obj(**module_args)
return part
def _modules_load(self):
for k, v in self._y.items():
if 'module' in k:
setattr(self, k, dict())
module = self.__dict__[k]
module['model'] = _get_model(**v['model'], model_type=self['setup/model_type']).cuda()
if self['setup/phase'] != 'infer':
module['optim'] = self._module_load(v, part='optim',
params=module['model'].parameters())
module['model'] = DistributedDataParallel(module['model'],
[self['setup/rank']])
module['lr_scheduler'] = self._module_load(v, part='lr_scheduler',
optimizer=module['optim'])
loss = [eval(l)(**v['loss_args'][l]) for l in v['loss']]
module['loss_with_weight'] = list(zip(loss, v['loss_weight']))
module['val_metric'] = eval(v['val_metric'])(**v['metric_args'])
module['test_metric'] = eval(v['test_metric'])(**v['metric_args'])
else:
module['model'] = DistributedDataParallel(module['model'],
[self['setup/rank']])
def __init__(self, yaml_file, cmd_args):
self.file_name = yaml_file
self._y = self._file_load(yaml_file)
if cmd_args.gpus != "-1":
self['setup/gpus'] = cmd_args.gpus
os.environ["CUDA_VISIBLE_DEVICES"] = self["setup/gpus"]
self['setup/index'] = cmd_args.index
self['setup/phase'] = cmd_args.phase
self['setup/local_rank'] = cmd_args.local_rank
world_size = len(self["setup/gpus"].replace(',', "").replace("'", ""))
model_path = f"outs/{self['setup/model_type']}/{self['module/model/name']}"
model_path += f"/{self['path/dataset']}"
if self['setup/index'] != -1:
model_path += f"_{self['setup/index']}"
if self['path/postfix'] != 'none':
model_path += f"_{self['path/postfix']}"
self['path/model_path'] = model_path
Path(model_path).mkdir(parents=True, exist_ok=True)
torch.cuda.set_device(cmd_args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method=f'file://{Path(model_path).resolve()}/sharedfile',
world_size=world_size,
rank=self['setup/local_rank'])
self['setup/rank'] = dist.get_rank()
self['setup/dist_size'] = dist.get_world_size()
self._modules_load()
def reset(self):
for k, v in list(self.__dict__.items()):
if 'module' in k:
del self.__dict__[k]
torch.cuda.empty_cache()
self._modules_load()
def _get(self, *keys):
v = self._y
for k in keys:
v = v[k]
return v
def _update(self, *keys, value):
k = self._y
for i in range(len(keys) - 1):
k.setdefault(keys[i], {})
k = k[keys[i]]
k[keys[-1]] = value
def __str__(self):
return f'{self.file_name}\n{self._y}'
def __contains__(self, item):
def search_recursively(d, t):
for k, v in d.items():
if k == t:
return True
elif isinstance(v, dict):
search_recursively(v, t)
return False
return search_recursively(self._y, item)
def __getitem__(self, key):
return self._get(*key.split('/'))
def __setitem__(self, key, value):
self._update(*key.split('/'), value=value)
if __name__ == '__main__':
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
file_handler = logging.FileHandler('log.log')
file_handler.setLevel(logging.INFO)
log.addHandler(stream_handler)
log.addHandler(file_handler)
Args = Argments('test.yaml')
Args._update('path', 'abcd', 'efgh', value='zzzz')
Args['path/cccc/dddd'] = 'ffff'
log.debug(Args)
log.debug(Args['path/cccc/dddd'])
# print(Args)
# print('path' in Args)
# print(Args['path/abcd/efgh'])
# print(Args['path/cccc/dddd'])
# print(Args.module['lr_scheduler'])
| [
"logging.getLogger",
"logging.StreamHandler",
"torch.distributed.get_rank",
"pathlib.Path",
"torch.nn.parallel.DistributedDataParallel",
"yaml.safe_load",
"logging.FileHandler",
"copy.deepcopy",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"models._get_model",
"torch.distributed.get_worl... | [((4679, 4706), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4696, 4706), False, 'import logging\n'), ((4760, 4783), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (4781, 4783), False, 'import logging\n'), ((4803, 4833), 'logging.FileHandler', 'logging.FileHandler', (['"""log.log"""'], {}), "('log.log')\n", (4822, 4833), False, 'import logging\n'), ((653, 675), 'copy.deepcopy', 'copy.deepcopy', (['d[part]'], {}), '(d[part])\n', (666, 675), False, 'import copy\n'), ((3089, 3131), 'torch.cuda.set_device', 'torch.cuda.set_device', (['cmd_args.local_rank'], {}), '(cmd_args.local_rank)\n', (3110, 3131), False, 'import torch\n'), ((3474, 3489), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3487, 3489), True, 'import torch.distributed as dist\n'), ((3524, 3545), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3543, 3545), True, 'import torch.distributed as dist\n'), ((3722, 3746), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3744, 3746), False, 'import torch\n'), ((494, 511), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (508, 511), False, 'import yaml\n'), ((3028, 3044), 'pathlib.Path', 'Path', (['model_path'], {}), '(model_path)\n', (3032, 3044), False, 'from pathlib import Path\n'), ((1341, 1403), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (["module['model']", "[self['setup/rank']]"], {}), "(module['model'], [self['setup/rank']])\n", (1364, 1403), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((2035, 2097), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (["module['model']", "[self['setup/rank']]"], {}), "(module['model'], [self['setup/rank']])\n", (2058, 2097), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((1017, 1078), 'models._get_model', '_get_model', ([], {'model_type': "self['setup/model_type']"}), "(**v['model'], model_type=self['setup/model_type'])\n", (1027, 1078), False, 'from models import _get_model\n'), ((3260, 3276), 'pathlib.Path', 'Path', (['model_path'], {}), '(model_path)\n', (3264, 3276), False, 'from pathlib import Path\n')] |
from graph_peak_caller.multiplegraphscallpeaks import MultipleGraphsCallpeaks
from graph_peak_caller.intervals import Intervals
from graph_peak_caller import Configuration
from graph_peak_caller.reporter import Reporter
from offsetbasedgraph import GraphWithReversals as Graph, \
DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval
import unittest
from graph_peak_caller.control.linearmap import LinearMap
from pyvg.sequences import SequenceRetriever
import logging
from graph_peak_caller.logging_config import set_logging_config
#set_logging_config(1)
import os
from graph_peak_caller.command_line_interface import run_argument_parser
class TestMultipleGraphsCallPeaks(unittest.TestCase):
def setUp(self):
self.chromosomes = ["1", "2", "3", "X", "Y"]
self.fragment_length = 5
self.read_length = 2
self.sample_reads = []
self.control_reads = []
self.linear_maps = []
self.sequence_retrievers = []
self.peaks = []
for chrom in self.chromosomes:
# Delete old files if existing
if os.path.isfile("multigraphs_%s_pvalues_indexes.npy" % chrom):
os.remove("multigraphs_%s_pvalues_indexes.npy" % chrom)
os.remove("multigraphs_%s_pvalues_values.npy" % chrom)
# Delete old files if existing
if os.path.isfile("multigraphs_%s_max_paths.intervalcollection" % chrom):
os.remove("multigraphs_%s_max_paths.intervalcollection" % chrom)
self._create_data()
self.config = Configuration()
self.config.fragment_length = self.fragment_length
self.config.read_length = self.read_length
self.config.has_control = False
self.config.min_background = 0.33
self.reporter = Reporter("multigraphs_")
def _create_data(self):
node_offset = 1
for chrom_number, chromosome in enumerate(self.chromosomes):
graph = Graph(
{i + node_offset: Block(10) for i in range(0, 3)},
{i+node_offset: [i+1+node_offset] for i in range(0, 2)})
linear_map = LinearMap.from_graph(graph)
linear_map_file_name = "linear_map_%s.npz" % chromosome
linear_map.to_file(linear_map_file_name)
self.linear_maps.append(linear_map_file_name)
self.sequence_retrievers.append(
SequenceRetriever({i+node_offset: "A" * 10
for i in range(0, 3)})
)
self._create_reads(chrom_number, chromosome, graph)
node_offset += 3
graph.convert_to_numpy_backend()
SequenceGraph.create_empty_from_ob_graph(graph).to_file(chromosome + ".nobg.sequences")
graph.to_file(chromosome + ".nobg")
def _create_reads(self, chrom_number, chrom, graph):
i = chrom_number
sample_reads = []
control_reads = []
peaks = [DirectedInterval(7, 2, [1 + 3*i, 2 + 3*i], graph)]
self.peaks.append(peaks)
for peak in peaks:
for i in range(0, 10):
left_sub = peak.get_subinterval(0, self.read_length)
sample_reads.append(left_sub)
control_reads.append(left_sub)
right_sub = peak.get_subinterval(
self.fragment_length - self.read_length,
self.fragment_length)
right_sub_reverse = right_sub.get_reverse()
sample_reads.append(right_sub_reverse)
control_reads.append(right_sub_reverse)
self.sample_reads.append(Intervals(sample_reads))
self.control_reads.append(Intervals(control_reads))
def test_run_from_init(self):
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
self.sample_reads,
self.control_reads,
self.linear_maps,
self.config,
self.reporter
)
caller.run()
self.do_asserts()
def test_run_from_init_in_two_steps(self):
set_logging_config(2)
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
self.sample_reads,
self.control_reads,
self.linear_maps,
self.config,
self.reporter,
stop_after_p_values=True
)
caller.run()
for i, chromosome in enumerate(self.chromosomes):
caller = MultipleGraphsCallpeaks(
self.chromosomes,
[chrom + ".nobg" for chrom in self.chromosomes],
None,
None,
None,
self.config,
self.reporter
)
caller.create_joined_q_value_mapping()
caller.run_from_p_values(only_chromosome=chromosome)
self.do_asserts()
def do_asserts(self):
for i, chromosome in enumerate(self.chromosomes):
final_peaks = IntervalCollection.create_list_from_file(
"multigraphs_" + chromosome + "_max_paths.intervalcollection")
for peak in self.peaks[i]:
assert peak in final_peaks
class TestMultipleGraphsCallPeaksCommandLine(TestMultipleGraphsCallPeaks):
# Same test, but using commmand line interface
def _create_reads(self, *args):
super(TestMultipleGraphsCallPeaksCommandLine, self)._create_reads(*args)
for intervals, chrom in zip(self.sample_reads, self.chromosomes):
IntervalCollection(intervals._intervals).to_file("test_sample_" + chrom + ".intervalcollection", text_file=True)
def test_typical_run(self):
print(" ========= Running start ====")
run_argument_parser(["callpeaks",
"-g", "*.nobg",
"-s", "test_sample_*.intervalcollection",
"-f", "%s" % self.fragment_length,
"-r", "%s" % self.read_length,
"-u", "100",
"-G", "150",
"-n", "multigraphs_",
"-p", "True",
"-D", "True"])
for i, chromosome in enumerate(self.chromosomes):
run_argument_parser(["callpeaks_whole_genome_from_p_values", chromosome,
"-d", "./",
"-f", "%s" % self.fragment_length,
"-r", "%s" % self.read_length,
"-n", "multigraphs_"])
self.do_asserts()
def test_count_unique_reads(self):
reads = [
IntervalCollection([
Interval(4, 10, [1, 2, 3]),
Interval(4, 5, [1]),
Interval(5, 5, [1]),
Interval(6, 2, [-3, -2, -1])
])
]
unique = MultipleGraphsCallpeaks.count_number_of_unique_reads(reads)
self.assertEqual(unique, 3)
if __name__ == "__main__":
unittest.main()
| [
"offsetbasedgraph.IntervalCollection.create_list_from_file",
"offsetbasedgraph.Interval",
"graph_peak_caller.reporter.Reporter",
"graph_peak_caller.Configuration",
"offsetbasedgraph.Block",
"graph_peak_caller.logging_config.set_logging_config",
"offsetbasedgraph.DirectedInterval",
"os.path.isfile",
... | [((7169, 7184), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7182, 7184), False, 'import unittest\n'), ((1570, 1585), 'graph_peak_caller.Configuration', 'Configuration', ([], {}), '()\n', (1583, 1585), False, 'from graph_peak_caller import Configuration\n'), ((1802, 1826), 'graph_peak_caller.reporter.Reporter', 'Reporter', (['"""multigraphs_"""'], {}), "('multigraphs_')\n", (1810, 1826), False, 'from graph_peak_caller.reporter import Reporter\n'), ((3766, 3952), 'graph_peak_caller.multiplegraphscallpeaks.MultipleGraphsCallpeaks', 'MultipleGraphsCallpeaks', (['self.chromosomes', "[(chrom + '.nobg') for chrom in self.chromosomes]", 'self.sample_reads', 'self.control_reads', 'self.linear_maps', 'self.config', 'self.reporter'], {}), "(self.chromosomes, [(chrom + '.nobg') for chrom in\n self.chromosomes], self.sample_reads, self.control_reads, self.\n linear_maps, self.config, self.reporter)\n", (3789, 3952), False, 'from graph_peak_caller.multiplegraphscallpeaks import MultipleGraphsCallpeaks\n'), ((4140, 4161), 'graph_peak_caller.logging_config.set_logging_config', 'set_logging_config', (['(2)'], {}), '(2)\n', (4158, 4161), False, 'from graph_peak_caller.logging_config import set_logging_config\n'), ((4179, 4391), 'graph_peak_caller.multiplegraphscallpeaks.MultipleGraphsCallpeaks', 'MultipleGraphsCallpeaks', (['self.chromosomes', "[(chrom + '.nobg') for chrom in self.chromosomes]", 'self.sample_reads', 'self.control_reads', 'self.linear_maps', 'self.config', 'self.reporter'], {'stop_after_p_values': '(True)'}), "(self.chromosomes, [(chrom + '.nobg') for chrom in\n self.chromosomes], self.sample_reads, self.control_reads, self.\n linear_maps, self.config, self.reporter, stop_after_p_values=True)\n", (4202, 4391), False, 'from graph_peak_caller.multiplegraphscallpeaks import MultipleGraphsCallpeaks\n'), ((5842, 6088), 'graph_peak_caller.command_line_interface.run_argument_parser', 'run_argument_parser', (["['callpeaks', '-g', '*.nobg', '-s', 'test_sample_*.intervalcollection',\n '-f', '%s' % self.fragment_length, '-r', '%s' % self.read_length, '-u',\n '100', '-G', '150', '-n', 'multigraphs_', '-p', 'True', '-D', 'True']"], {}), "(['callpeaks', '-g', '*.nobg', '-s',\n 'test_sample_*.intervalcollection', '-f', '%s' % self.fragment_length,\n '-r', '%s' % self.read_length, '-u', '100', '-G', '150', '-n',\n 'multigraphs_', '-p', 'True', '-D', 'True'])\n", (5861, 6088), False, 'from graph_peak_caller.command_line_interface import run_argument_parser\n'), ((7038, 7097), 'graph_peak_caller.multiplegraphscallpeaks.MultipleGraphsCallpeaks.count_number_of_unique_reads', 'MultipleGraphsCallpeaks.count_number_of_unique_reads', (['reads'], {}), '(reads)\n', (7090, 7097), False, 'from graph_peak_caller.multiplegraphscallpeaks import MultipleGraphsCallpeaks\n'), ((1103, 1163), 'os.path.isfile', 'os.path.isfile', (["('multigraphs_%s_pvalues_indexes.npy' % chrom)"], {}), "('multigraphs_%s_pvalues_indexes.npy' % chrom)\n", (1117, 1163), False, 'import os\n'), ((1367, 1436), 'os.path.isfile', 'os.path.isfile', (["('multigraphs_%s_max_paths.intervalcollection' % chrom)"], {}), "('multigraphs_%s_max_paths.intervalcollection' % chrom)\n", (1381, 1436), False, 'import os\n'), ((2142, 2169), 'graph_peak_caller.control.linearmap.LinearMap.from_graph', 'LinearMap.from_graph', (['graph'], {}), '(graph)\n', (2162, 2169), False, 'from graph_peak_caller.control.linearmap import LinearMap\n'), ((2964, 3017), 'offsetbasedgraph.DirectedInterval', 'DirectedInterval', (['(7)', '(2)', '[1 + 3 * i, 2 + 3 * i]', 'graph'], {}), '(7, 2, [1 + 3 * i, 2 + 3 * i], graph)\n', (2980, 3017), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n'), ((3629, 3652), 'graph_peak_caller.intervals.Intervals', 'Intervals', (['sample_reads'], {}), '(sample_reads)\n', (3638, 3652), False, 'from graph_peak_caller.intervals import Intervals\n'), ((3688, 3712), 'graph_peak_caller.intervals.Intervals', 'Intervals', (['control_reads'], {}), '(control_reads)\n', (3697, 3712), False, 'from graph_peak_caller.intervals import Intervals\n'), ((4588, 4730), 'graph_peak_caller.multiplegraphscallpeaks.MultipleGraphsCallpeaks', 'MultipleGraphsCallpeaks', (['self.chromosomes', "[(chrom + '.nobg') for chrom in self.chromosomes]", 'None', 'None', 'None', 'self.config', 'self.reporter'], {}), "(self.chromosomes, [(chrom + '.nobg') for chrom in\n self.chromosomes], None, None, None, self.config, self.reporter)\n", (4611, 4730), False, 'from graph_peak_caller.multiplegraphscallpeaks import MultipleGraphsCallpeaks\n'), ((5105, 5212), 'offsetbasedgraph.IntervalCollection.create_list_from_file', 'IntervalCollection.create_list_from_file', (["('multigraphs_' + chromosome + '_max_paths.intervalcollection')"], {}), "('multigraphs_' + chromosome +\n '_max_paths.intervalcollection')\n", (5145, 5212), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n'), ((6410, 6592), 'graph_peak_caller.command_line_interface.run_argument_parser', 'run_argument_parser', (["['callpeaks_whole_genome_from_p_values', chromosome, '-d', './', '-f', '%s' %\n self.fragment_length, '-r', '%s' % self.read_length, '-n', 'multigraphs_']"], {}), "(['callpeaks_whole_genome_from_p_values', chromosome,\n '-d', './', '-f', '%s' % self.fragment_length, '-r', '%s' % self.\n read_length, '-n', 'multigraphs_'])\n", (6429, 6592), False, 'from graph_peak_caller.command_line_interface import run_argument_parser\n'), ((1181, 1236), 'os.remove', 'os.remove', (["('multigraphs_%s_pvalues_indexes.npy' % chrom)"], {}), "('multigraphs_%s_pvalues_indexes.npy' % chrom)\n", (1190, 1236), False, 'import os\n'), ((1253, 1307), 'os.remove', 'os.remove', (["('multigraphs_%s_pvalues_values.npy' % chrom)"], {}), "('multigraphs_%s_pvalues_values.npy' % chrom)\n", (1262, 1307), False, 'import os\n'), ((1454, 1518), 'os.remove', 'os.remove', (["('multigraphs_%s_max_paths.intervalcollection' % chrom)"], {}), "('multigraphs_%s_max_paths.intervalcollection' % chrom)\n", (1463, 1518), False, 'import os\n'), ((2010, 2019), 'offsetbasedgraph.Block', 'Block', (['(10)'], {}), '(10)\n', (2015, 2019), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n'), ((2675, 2722), 'offsetbasedgraph.SequenceGraph.create_empty_from_ob_graph', 'SequenceGraph.create_empty_from_ob_graph', (['graph'], {}), '(graph)\n', (2715, 2722), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n'), ((5640, 5680), 'offsetbasedgraph.IntervalCollection', 'IntervalCollection', (['intervals._intervals'], {}), '(intervals._intervals)\n', (5658, 5680), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n'), ((6849, 6875), 'offsetbasedgraph.Interval', 'Interval', (['(4)', '(10)', '[1, 2, 3]'], {}), '(4, 10, [1, 2, 3])\n', (6857, 6875), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n'), ((6893, 6912), 'offsetbasedgraph.Interval', 'Interval', (['(4)', '(5)', '[1]'], {}), '(4, 5, [1])\n', (6901, 6912), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n'), ((6930, 6949), 'offsetbasedgraph.Interval', 'Interval', (['(5)', '(5)', '[1]'], {}), '(5, 5, [1])\n', (6938, 6949), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n'), ((6967, 6995), 'offsetbasedgraph.Interval', 'Interval', (['(6)', '(2)', '[-3, -2, -1]'], {}), '(6, 2, [-3, -2, -1])\n', (6975, 6995), False, 'from offsetbasedgraph import GraphWithReversals as Graph, DirectedInterval, IntervalCollection, Block, SequenceGraph, Interval\n')] |
import copy
import numpy as np
import torch
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.callbacks import Callback
class BestEpochCallback(Callback):
TORCH_INF = torch_inf = torch.tensor(np.Inf)
MODE_DICT = {
"min": (torch_inf, "min"),
"max": (-torch_inf, "max"),
# "max": (100, "max"),
}
MONITOR_OP_DICT = {"min": torch.lt, "max": torch.gt}
def __init__(self, monitor="", mode="min"):
super(BestEpochCallback, self).__init__()
self.monitor = monitor
self.__init_monitor_mode(monitor, mode)
self.best_epoch = 0
def __init_monitor_mode(self, monitor, mode):
if mode not in self.MODE_DICT and mode != "auto":
rank_zero_warn(
f"PrintBestEpochMetrics mode {mode} is unknown, fallback to auto mode",
RuntimeWarning,
)
mode = "auto"
if mode == "auto":
rank_zero_warn(
"mode='auto' is deprecated in v1.1 and will be removed in v1.3."
" Default value for mode with be 'min' in v1.3.",
DeprecationWarning,
)
self.MODE_DICT["auto"] = (
(-self.TORCH_INF, "max")
if monitor is not None and ("acc" in monitor or monitor.startswith("fmeasure"))
else (self.TORCH_INF, "min")
)
self.best_value, self.mode = self.MODE_DICT[mode]
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if (trainer.current_epoch + 1) % trainer.check_val_every_n_epoch != 0:
return
monitor_op = self.MONITOR_OP_DICT[self.mode]
metrics_dict = copy.copy(trainer.callback_metrics)
monitor_value = metrics_dict.get(self.monitor, self.best_value)
if monitor_op(monitor_value.type_as(self.best_value), self.best_value):
self.best_value = monitor_value
self.best_epoch = trainer.current_epoch
| [
"torch.tensor",
"copy.copy",
"pytorch_lightning.utilities.rank_zero_warn"
] | [((213, 233), 'torch.tensor', 'torch.tensor', (['np.Inf'], {}), '(np.Inf)\n', (225, 233), False, 'import torch\n'), ((1742, 1777), 'copy.copy', 'copy.copy', (['trainer.callback_metrics'], {}), '(trainer.callback_metrics)\n', (1751, 1777), False, 'import copy\n'), ((744, 855), 'pytorch_lightning.utilities.rank_zero_warn', 'rank_zero_warn', (['f"""PrintBestEpochMetrics mode {mode} is unknown, fallback to auto mode"""', 'RuntimeWarning'], {}), "(\n f'PrintBestEpochMetrics mode {mode} is unknown, fallback to auto mode',\n RuntimeWarning)\n", (758, 855), False, 'from pytorch_lightning.utilities import rank_zero_warn\n'), ((959, 1115), 'pytorch_lightning.utilities.rank_zero_warn', 'rank_zero_warn', (['"""mode=\'auto\' is deprecated in v1.1 and will be removed in v1.3. Default value for mode with be \'min\' in v1.3."""', 'DeprecationWarning'], {}), '(\n "mode=\'auto\' is deprecated in v1.1 and will be removed in v1.3. Default value for mode with be \'min\' in v1.3."\n , DeprecationWarning)\n', (973, 1115), False, 'from pytorch_lightning.utilities import rank_zero_warn\n')] |
import tensorflow as tf
import tensorflow_hub as hub
from tf_agents.networks import network
# Bert needs this (I think) TODO: Check?
import tensorflow_text as text
embedding = "https://tfhub.dev/google/nnlm-en-dim128-with-normalization/2"
tfhub_handle_encoder = (
"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1"
)
tfhub_handle_preprocess = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
class HubPolicyFC(network.Network):
"""Policy for DQN agent utilizing pre-trained NNLM embedding into FC layers."""
def __init__(
self, input_tensor_spec, action_spec, num_verb, num_obj, name="ActorNetwork"
):
super().__init__()
num_actions = action_spec.maximum - action_spec.minimum + 1
assert num_actions == num_verb * num_obj
self.num_verb = num_verb
self.num_obj = num_obj
self.hub_layer = hub.KerasLayer(
embedding,
input_shape=[],
dtype=tf.string,
trainable=True
)
self.fc1 = tf.keras.layers.Dense(128, activation="relu")
self.fc2 = tf.keras.layers.Dense(64, activation="relu")
self.bn1 = tf.keras.layers.BatchNormalization()
self.bn2 = tf.keras.layers.BatchNormalization()
self.do1 = tf.keras.layers.Dropout(0.1)
self.do2 = tf.keras.layers.Dropout(0.1)
self.verb_layer = tf.keras.layers.Dense(num_verb, activation=None)
self.obj_layer = tf.keras.layers.Dense(num_obj, activation=None)
self.number_of_strings = input_tensor_spec.shape[0]
def call(self, observation, network_state=(), training=False):
"""A wrapper around `Network.call`.
Args:
inputs: The input to `self.call`, matching `self.input_tensor_spec`
network_state: A state to pass to the network used by the RNN layer
training: Optional argument to set to training mode
Returns:
A tuple `(outputs, new_network_state)`.
"""
if network_state is not None and len(network_state) == 0:
network_state = None
flattened_observation = tf.reshape(observation, (-1))
embedded_observations = self.hub_layer(flattened_observation, training=training)
embedded_observations = tf.reshape(
embedded_observations, (observation.shape[0], observation.shape[1], 128)
)
out = self.bn1(embedded_observations, training=training)
out = self.fc1(out, training=training)
self.do1(out, training=training)
out = self.bn2(out, training=training)
out = self.fc2(out, training=training)
self.do2(out, training=training)
verb_q_value = self.verb_layer(out, training=training)
obj_q_value = self.obj_layer(out, training=training)
# q_value_multiplied = tf.matmul(verb_q_value, obj_q_value, transpose_a=True)
# q_values = tf.reshape(q_value_multiplied, (observation.shape[0], -1))
verb_q_value = tf.reshape(verb_q_value, (observation.shape[0], observation.shape[1], verb_q_value.shape[2], 1))
obj_q_value = tf.reshape(obj_q_value, (observation.shape[0], observation.shape[1], 1, obj_q_value.shape[2]))
q_values_added = tf.add(verb_q_value, obj_q_value)
q_values_added = tf.math.reduce_sum(q_values_added, axis=1)
q_values = tf.reshape(q_values_added, (observation.shape[0], -1))
return q_values, ()
class HubPolicyBert(network.Network):
"""Policy for DQN agent utilizing pre-trained smallBert into FC layers. """
def __init__(
self, input_tensor_spec, action_spec, num_verb, num_obj, name="ActorNetwork"
):
super().__init__()
num_actions = action_spec.maximum - action_spec.minimum + 1
assert num_actions == num_verb * num_obj
self.num_verb = num_verb
self.num_obj = num_obj
self.bert_preprocess_model = hub.KerasLayer(
tfhub_handle_preprocess,
input_shape=[],
dtype=tf.string,
)
self.bert_model = hub.KerasLayer(tfhub_handle_encoder, trainable=True)
self.fc1 = tf.keras.layers.Dense(128, activation="relu")
self.do1 = tf.keras.layers.Dropout(0.1)
self.verb_layer = tf.keras.layers.Dense(num_verb, activation=None)
self.obj_layer = tf.keras.layers.Dense(num_obj, activation=None)
self.verbobj_layer = tf.keras.layers.Dense(num_actions, activation=None)
self.number_of_strings = input_tensor_spec.shape[0]
def call(self, observation, network_state=(), training=False):
"""A wrapper around `Network.call`.
Args:
observation: The input to `self.call`, matching `self.input_tensor_spec`
network_state: A state to pass to the network used by the RNN layer
training: Optional argument to set to training mode
Returns:
A tuple `(outputs, new_network_state)`.
"""
if network_state is not None and len(network_state) == 0:
network_state = None
flattened_observation = tf.reshape(observation, (-1))
encoder_inputs = self.bert_preprocess_model(flattened_observation)
outputs = self.bert_model(encoder_inputs, training=training)
out = outputs["pooled_output"]
out = tf.reshape(out, (observation.shape[0], observation.shape[1], 128))
# out = self.do1(out, training=training)
# out = self.fc1(out, training=training)
verb_q_value = self.verb_layer(out, training=training)
obj_q_value = self.obj_layer(out, training=training)
# q_value_multiplied = tf.matmul(verb_q_value, obj_q_value, transpose_a=True)
# q_values = tf.reshape(q_value_multiplied, (observation.shape[0], -1))
verb_q_value = tf.reshape(verb_q_value, (observation.shape[0], observation.shape[1], verb_q_value.shape[2], 1))
obj_q_value = tf.reshape(obj_q_value, (observation.shape[0], observation.shape[1], 1, obj_q_value.shape[2]))
q_values_added = tf.add(verb_q_value, obj_q_value)
q_values_added = tf.math.reduce_sum(q_values_added, axis=1)
q_values = tf.reshape(q_values_added, (observation.shape[0], -1))
return q_values, ()
| [
"tensorflow.keras.layers.Dropout",
"tensorflow.add",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow_hub.KerasLayer",
"tensorflow.math.reduce_sum"
] | [((900, 974), 'tensorflow_hub.KerasLayer', 'hub.KerasLayer', (['embedding'], {'input_shape': '[]', 'dtype': 'tf.string', 'trainable': '(True)'}), '(embedding, input_shape=[], dtype=tf.string, trainable=True)\n', (914, 974), True, 'import tensorflow_hub as hub\n'), ((1053, 1098), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1074, 1098), True, 'import tensorflow as tf\n'), ((1118, 1162), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1139, 1162), True, 'import tensorflow as tf\n'), ((1182, 1218), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (1216, 1218), True, 'import tensorflow as tf\n'), ((1238, 1274), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (1272, 1274), True, 'import tensorflow as tf\n'), ((1294, 1322), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (1317, 1322), True, 'import tensorflow as tf\n'), ((1342, 1370), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (1365, 1370), True, 'import tensorflow as tf\n'), ((1398, 1446), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_verb'], {'activation': 'None'}), '(num_verb, activation=None)\n', (1419, 1446), True, 'import tensorflow as tf\n'), ((1472, 1519), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_obj'], {'activation': 'None'}), '(num_obj, activation=None)\n', (1493, 1519), True, 'import tensorflow as tf\n'), ((2141, 2168), 'tensorflow.reshape', 'tf.reshape', (['observation', '(-1)'], {}), '(observation, -1)\n', (2151, 2168), True, 'import tensorflow as tf\n'), ((2293, 2382), 'tensorflow.reshape', 'tf.reshape', (['embedded_observations', '(observation.shape[0], observation.shape[1], 128)'], {}), '(embedded_observations, (observation.shape[0], observation.shape[\n 1], 128))\n', (2303, 2382), True, 'import tensorflow as tf\n'), ((3004, 3104), 'tensorflow.reshape', 'tf.reshape', (['verb_q_value', '(observation.shape[0], observation.shape[1], verb_q_value.shape[2], 1)'], {}), '(verb_q_value, (observation.shape[0], observation.shape[1],\n verb_q_value.shape[2], 1))\n', (3014, 3104), True, 'import tensorflow as tf\n'), ((3124, 3222), 'tensorflow.reshape', 'tf.reshape', (['obj_q_value', '(observation.shape[0], observation.shape[1], 1, obj_q_value.shape[2])'], {}), '(obj_q_value, (observation.shape[0], observation.shape[1], 1,\n obj_q_value.shape[2]))\n', (3134, 3222), True, 'import tensorflow as tf\n'), ((3245, 3278), 'tensorflow.add', 'tf.add', (['verb_q_value', 'obj_q_value'], {}), '(verb_q_value, obj_q_value)\n', (3251, 3278), True, 'import tensorflow as tf\n'), ((3304, 3346), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['q_values_added'], {'axis': '(1)'}), '(q_values_added, axis=1)\n', (3322, 3346), True, 'import tensorflow as tf\n'), ((3366, 3420), 'tensorflow.reshape', 'tf.reshape', (['q_values_added', '(observation.shape[0], -1)'], {}), '(q_values_added, (observation.shape[0], -1))\n', (3376, 3420), True, 'import tensorflow as tf\n'), ((3928, 4000), 'tensorflow_hub.KerasLayer', 'hub.KerasLayer', (['tfhub_handle_preprocess'], {'input_shape': '[]', 'dtype': 'tf.string'}), '(tfhub_handle_preprocess, input_shape=[], dtype=tf.string)\n', (3942, 4000), True, 'import tensorflow_hub as hub\n'), ((4075, 4127), 'tensorflow_hub.KerasLayer', 'hub.KerasLayer', (['tfhub_handle_encoder'], {'trainable': '(True)'}), '(tfhub_handle_encoder, trainable=True)\n', (4089, 4127), True, 'import tensorflow_hub as hub\n'), ((4148, 4193), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (4169, 4193), True, 'import tensorflow as tf\n'), ((4213, 4241), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (4236, 4241), True, 'import tensorflow as tf\n'), ((4269, 4317), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_verb'], {'activation': 'None'}), '(num_verb, activation=None)\n', (4290, 4317), True, 'import tensorflow as tf\n'), ((4343, 4390), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_obj'], {'activation': 'None'}), '(num_obj, activation=None)\n', (4364, 4390), True, 'import tensorflow as tf\n'), ((4420, 4471), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_actions'], {'activation': 'None'}), '(num_actions, activation=None)\n', (4441, 4471), True, 'import tensorflow as tf\n'), ((5098, 5125), 'tensorflow.reshape', 'tf.reshape', (['observation', '(-1)'], {}), '(observation, -1)\n', (5108, 5125), True, 'import tensorflow as tf\n'), ((5326, 5392), 'tensorflow.reshape', 'tf.reshape', (['out', '(observation.shape[0], observation.shape[1], 128)'], {}), '(out, (observation.shape[0], observation.shape[1], 128))\n', (5336, 5392), True, 'import tensorflow as tf\n'), ((5807, 5907), 'tensorflow.reshape', 'tf.reshape', (['verb_q_value', '(observation.shape[0], observation.shape[1], verb_q_value.shape[2], 1)'], {}), '(verb_q_value, (observation.shape[0], observation.shape[1],\n verb_q_value.shape[2], 1))\n', (5817, 5907), True, 'import tensorflow as tf\n'), ((5927, 6025), 'tensorflow.reshape', 'tf.reshape', (['obj_q_value', '(observation.shape[0], observation.shape[1], 1, obj_q_value.shape[2])'], {}), '(obj_q_value, (observation.shape[0], observation.shape[1], 1,\n obj_q_value.shape[2]))\n', (5937, 6025), True, 'import tensorflow as tf\n'), ((6048, 6081), 'tensorflow.add', 'tf.add', (['verb_q_value', 'obj_q_value'], {}), '(verb_q_value, obj_q_value)\n', (6054, 6081), True, 'import tensorflow as tf\n'), ((6107, 6149), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['q_values_added'], {'axis': '(1)'}), '(q_values_added, axis=1)\n', (6125, 6149), True, 'import tensorflow as tf\n'), ((6169, 6223), 'tensorflow.reshape', 'tf.reshape', (['q_values_added', '(observation.shape[0], -1)'], {}), '(q_values_added, (observation.shape[0], -1))\n', (6179, 6223), True, 'import tensorflow as tf\n')] |
import threading
import time
import RPi.GPIO as GPIO
import logging
import logging.config
# declare logger parameters
logger = logging.getLogger(__name__)
class PWMController(threading.Thread):
""" Thread class with a stop() method.
Handy class to implement PWM on digital output pins """
def __init__(self, thread_id, pin, on_time, off_time):
threading.Thread.__init__(self)
self.__thread_id = thread_id
self.__pin = pin
self.__on_time = on_time
self.__off_time = off_time
self.__stop_event = threading.Event()
# TODO: Setting up the pins should be moved to the main script 'Controller.py'
# GPIO.setmode(GPIO.BCM)
# GPIO.setwarnings(False)
# GPIO.setup(pin, GPIO.OUT)
def stop(self):
self.__stop_event.set()
# print(str(self.__thread_id) + ": set the stop event")
def stopped(self):
return self.__stop_event.is_set()
def run(self):
while True:
if self.stopped():
# print(str(self.__thread_id) + ": thread has stopped. exiting")
break;
logger.debug(str(self.__pin) + ": ON--" + str(self.__on_time))
if self.__on_time > 0.02:
GPIO.output(self.__pin, GPIO.HIGH)
logger.debug("On wait time: %.3f" % self.__on_time)
time.sleep(self.__on_time)
logger.debug(str(self.__pin) + ": OFF--" + str(self.__off_time))
if self.__off_time > 0.02:
GPIO.output(self.__pin, GPIO.LOW)
logger.debug("Off wait time: %.3f" % self.__off_time)
time.sleep(self.__off_time)
| [
"logging.getLogger",
"threading.Thread.__init__",
"RPi.GPIO.output",
"time.sleep",
"threading.Event"
] | [((128, 155), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (145, 155), False, 'import logging\n'), ((372, 403), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (397, 403), False, 'import threading\n'), ((562, 579), 'threading.Event', 'threading.Event', ([], {}), '()\n', (577, 579), False, 'import threading\n'), ((1258, 1292), 'RPi.GPIO.output', 'GPIO.output', (['self.__pin', 'GPIO.HIGH'], {}), '(self.__pin, GPIO.HIGH)\n', (1269, 1292), True, 'import RPi.GPIO as GPIO\n'), ((1377, 1403), 'time.sleep', 'time.sleep', (['self.__on_time'], {}), '(self.__on_time)\n', (1387, 1403), False, 'import time\n'), ((1536, 1569), 'RPi.GPIO.output', 'GPIO.output', (['self.__pin', 'GPIO.LOW'], {}), '(self.__pin, GPIO.LOW)\n', (1547, 1569), True, 'import RPi.GPIO as GPIO\n'), ((1656, 1683), 'time.sleep', 'time.sleep', (['self.__off_time'], {}), '(self.__off_time)\n', (1666, 1683), False, 'import time\n')] |
import math
a = float(input('insira um valor'))
print('a porção inteira do valor {} é {}'.format(a,math.trunc(a))) | [
"math.trunc"
] | [((103, 116), 'math.trunc', 'math.trunc', (['a'], {}), '(a)\n', (113, 116), False, 'import math\n')] |
#from data_loader import *
from scipy import signal
import matplotlib.pyplot as plt
import copy
import os
import shutil
import numpy as np
def data_filter(exp_path, probe_type='point', Xtype='loc',ytype='f',num_point=0):
shutil.rmtree(exp_path+probe_type+'_expand', ignore_errors=True)
os.mkdir(exp_path+probe_type+'_expand')
for i in range(num_point):
#load force/torque data
force_path = exp_path+probe_type+'/force_'+str(i)+'.txt'
new_force_path = exp_path+probe_type+'_expand'+'/force_'+str(i)+'.txt'
force=[]
torque=[]
force_normal=[]
torque_normal=[]
displacement=[]
dataFile=open(force_path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
force.append(l2[0:3])
force_normal.append(l2[3])
displacement.append(l2[4])
dataFile.close()
if probe_type == 'line':
torque_path = exp_path+probe_type+'/torque_'+str(i)+'.txt'
dataFile=open(torque_path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
torque.append(l2[0:3])
torque_normal.append(l2[3])
dataFile.close()
elif probe_type == 'ellipse':
torque_path = exp_path+probe_type+'/torque_'+str(i)+'.txt'
dataFile=open(torque_path,'r')
for line in dataFile:
line=line.rstrip()
l=[num for num in line.split(' ')]
l2=[float(num) for num in l]
torque.append(l2[0:3])
displacement.append(l2[3])
dataFile.close()
force_normal_1d =np.array(force_normal)
#to np
force=np.array(force,ndmin=2)
torque=np.array(torque,ndmin=2)
force_normal=np.array(force_normal,ndmin=2).T
torque_normal=np.array(torque_normal,ndmin=2).T
displacement=np.array(displacement)
#filter
Wn=0.01
[b,a]=signal.butter(5,Wn,'low')
for i in range(3):
tmp_filteredForces=signal.filtfilt(b,a,force[:,i].T,padlen=150)
if i == 0:
filteredForces = np.array(tmp_filteredForces,ndmin=2).T
print(filteredForces.shape)
else:
filteredForces = np.hstack((filteredForces,np.array(tmp_filteredForces,ndmin=2).T))
if probe_type == 'line' or probe_type == 'ellipse':
for i in range(3):
tmp_filteredTorques=signal.filtfilt(b,a,torque[:,i].T,padlen=150)
if i == 0:
filteredTorques = tmp_filteredTorques.T
else:
filteredTorques = np.hstack((filteredTorques,tmp_filteredTorques.T))
filtered_force_normal=signal.filtfilt(b,a,force_normal.T,padlen=150)
if probe_type == 'line':
filtered_torque_normal=signal.filtfilt(b,a,torque_normal.T,padlen=150)
#filtered_force_normal = filtered_force_normal.T
print(filtered_force_normal.shape)
new_dataFile=open(new_force_path,'w+')
num_data = len(displacement)
#delta_d = (displacement[num_data-1]-displacement[num_data-101])/1
delta_d = 0.0002
d_expand_start = displacement[num_data-1] + delta_d
d_expand_end = 0.020
d_expand = np.arange(d_expand_start,d_expand_end,delta_d)
num_expand = d_expand.shape[0]
print('[*]',num_expand)
slope = (force_normal[num_data-1] - force_normal[num_data-301])/(displacement[num_data-1]-displacement[num_data-301])
sd = slope*delta_d
fn_expand_start = force_normal[num_data-1] + sd*1
fn_expand_end = force_normal[num_data-1] + sd*(num_expand+1)
force_normal_expand = np.arange(fn_expand_start,fn_expand_end,sd)
print('[*]',len(d_expand))
d_all = displacement.tolist()+d_expand.tolist()
fn_all = force_normal_1d.tolist()+force_normal_expand.tolist()
num_all = len(d_all) - 2
print(num_all)
d_all = d_all[0:num_all]
fn_all = fn_all[0:num_all]
for i in range(num_all):
new_dataFile.write(str(0)+' '+str(0)+' '+str(0)+' ')
new_dataFile.write(str(fn_all[i])+' '+str(d_all[i])+'\n')
new_dataFile.close()
'''
for i in range(displacement.shape[0]):
new_dataFile.write(str(filteredForces[i,0])+' '+str(filteredForces[i,1])+' '+str(filteredForces[i,2])+' ')
new_dataFile.write(str(filtered_force_normal[0,i])+' '+str(displacement[i])+'\n')
new_dataFile.close()
'''
return d_all, fn_all
d,fn = data_filter('./', probe_type='point', Xtype='loc',ytype='fn',num_point=94)
print(len(d),len(fn))
plt.plot(np.array(d),np.array(fn),color='b',marker='o',markersize=1)
plt.show() | [
"numpy.hstack",
"scipy.signal.filtfilt",
"scipy.signal.butter",
"numpy.array",
"os.mkdir",
"shutil.rmtree",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((5134, 5144), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5142, 5144), True, 'import matplotlib.pyplot as plt\n'), ((226, 294), 'shutil.rmtree', 'shutil.rmtree', (["(exp_path + probe_type + '_expand')"], {'ignore_errors': '(True)'}), "(exp_path + probe_type + '_expand', ignore_errors=True)\n", (239, 294), False, 'import shutil\n'), ((295, 338), 'os.mkdir', 'os.mkdir', (["(exp_path + probe_type + '_expand')"], {}), "(exp_path + probe_type + '_expand')\n", (303, 338), False, 'import os\n'), ((5074, 5085), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (5082, 5085), True, 'import numpy as np\n'), ((5086, 5098), 'numpy.array', 'np.array', (['fn'], {}), '(fn)\n', (5094, 5098), True, 'import numpy as np\n'), ((1898, 1920), 'numpy.array', 'np.array', (['force_normal'], {}), '(force_normal)\n', (1906, 1920), True, 'import numpy as np\n'), ((1950, 1974), 'numpy.array', 'np.array', (['force'], {'ndmin': '(2)'}), '(force, ndmin=2)\n', (1958, 1974), True, 'import numpy as np\n'), ((1989, 2014), 'numpy.array', 'np.array', (['torque'], {'ndmin': '(2)'}), '(torque, ndmin=2)\n', (1997, 2014), True, 'import numpy as np\n'), ((2145, 2167), 'numpy.array', 'np.array', (['displacement'], {}), '(displacement)\n', (2153, 2167), True, 'import numpy as np\n'), ((2223, 2250), 'scipy.signal.butter', 'signal.butter', (['(5)', 'Wn', '"""low"""'], {}), "(5, Wn, 'low')\n", (2236, 2250), False, 'from scipy import signal\n'), ((3037, 3086), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'force_normal.T'], {'padlen': '(150)'}), '(b, a, force_normal.T, padlen=150)\n', (3052, 3086), False, 'from scipy import signal\n'), ((3637, 3685), 'numpy.arange', 'np.arange', (['d_expand_start', 'd_expand_end', 'delta_d'], {}), '(d_expand_start, d_expand_end, delta_d)\n', (3646, 3685), True, 'import numpy as np\n'), ((4083, 4128), 'numpy.arange', 'np.arange', (['fn_expand_start', 'fn_expand_end', 'sd'], {}), '(fn_expand_start, fn_expand_end, sd)\n', (4092, 4128), True, 'import numpy as np\n'), ((2035, 2066), 'numpy.array', 'np.array', (['force_normal'], {'ndmin': '(2)'}), '(force_normal, ndmin=2)\n', (2043, 2066), True, 'import numpy as np\n'), ((2090, 2122), 'numpy.array', 'np.array', (['torque_normal'], {'ndmin': '(2)'}), '(torque_normal, ndmin=2)\n', (2098, 2122), True, 'import numpy as np\n'), ((2316, 2364), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'force[:, i].T'], {'padlen': '(150)'}), '(b, a, force[:, i].T, padlen=150)\n', (2331, 2364), False, 'from scipy import signal\n'), ((3161, 3211), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'torque_normal.T'], {'padlen': '(150)'}), '(b, a, torque_normal.T, padlen=150)\n', (3176, 3211), False, 'from scipy import signal\n'), ((2754, 2803), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'torque[:, i].T'], {'padlen': '(150)'}), '(b, a, torque[:, i].T, padlen=150)\n', (2769, 2803), False, 'from scipy import signal\n'), ((2417, 2454), 'numpy.array', 'np.array', (['tmp_filteredForces'], {'ndmin': '(2)'}), '(tmp_filteredForces, ndmin=2)\n', (2425, 2454), True, 'import numpy as np\n'), ((2947, 2998), 'numpy.hstack', 'np.hstack', (['(filteredTorques, tmp_filteredTorques.T)'], {}), '((filteredTorques, tmp_filteredTorques.T))\n', (2956, 2998), True, 'import numpy as np\n'), ((2577, 2614), 'numpy.array', 'np.array', (['tmp_filteredForces'], {'ndmin': '(2)'}), '(tmp_filteredForces, ndmin=2)\n', (2585, 2614), True, 'import numpy as np\n')] |
# Copyright (c) 2012 by Zuse-Institute Berlin and the Technical University of Denmark.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# Direct execution requires top level directory on python path
if __name__ == "__main__":
import os, sys, inspect
scriptdir = os.path.split(inspect.getfile( inspect.currentframe() ))[0]
packagedir = os.path.realpath(os.path.abspath(os.path.join(scriptdir,'..')))
if packagedir not in sys.path:
sys.path.insert(0, packagedir)
import os, sys, inspect, tarfile, glob, stat, getopt
from data.CBFset import CBFset
from filter import filter
def addwritepermission(tarinfo):
tarinfo.mode = tarinfo.mode | stat.S_IWRITE
return tarinfo
def pack(packname, filtexpr, setexpr, packall):
# tarfile 'filter' requires v2.7
if sys.version_info < (2,7):
raise Exception('Python 2.7 or later required..')
# Get the root directory of cblib
scriptdir = os.path.split(inspect.getfile( inspect.currentframe() ))[0]
rootdir = os.path.join(scriptdir,'..','..')
if not packall and setexpr != None:
if os.path.isfile(setexpr):
rootdir = os.path.dirname(setexpr)
else:
rootdir = setexpr
# Find all instances
files = list()
cbfset = CBFset()
cbfset.read(setexpr)
filter(filtexpr, None, cbfset, lambda x: files.append(x))
if packall:
# Find all instance information
files = files + glob.glob(os.path.join(rootdir,'instances','*.csv'))
files = files + glob.glob(os.path.join(rootdir,'instances','*.bib'))
# Find all source files from 'tools'
files = files + glob.glob(os.path.join(rootdir,'tools','*.c'))
files = files + glob.glob(os.path.join(rootdir,'tools','*.h'))
files = files + glob.glob(os.path.join(rootdir,'tools','Makefile.*'))
# Find all documents from 'docs'
files = files + glob.glob(os.path.join(rootdir,'docs','*.pdf'))
# Find all python files from 'scripts'
files = files + glob.glob(os.path.join(rootdir,'scripts','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','admin','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','data','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','dist','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','filters','*.py'))
files = files + glob.glob(os.path.join(rootdir,'scripts','solvers','*.py'))
# Find all other important files
files.append(os.path.join(rootdir,'README'))
files.append(os.path.join(rootdir,'instances','cbf','README'))
# Create compressed tar file
print('Writing '+packname+'.tar.gz')
tar = tarfile.open(os.path.join(scriptdir,packname+'.tar.gz'), 'w:gz')
for f in files:
extractname = os.path.join(packname, os.path.relpath(f, rootdir))
print(extractname)
tar.add(f, arcname=extractname, filter=addwritepermission)
tar.close()
if __name__ == "__main__":
try:
# Verify command line arguments
opts, args = getopt.gnu_getopt(sys.argv[1:], "n:s:a", "filter=")
if len(args) >= 1:
raise Exception('Incorrect usage!')
except Exception as e:
print(str(e))
raise Exception(''.join([
'Incorrect usage, try all instances', '\n',
' python ', sys.argv[0], ' -n cblib', '\n',
'or try all mixed-integer second order cone instances:', '\n',
' python ', sys.argv[0], ' -n cblib-misoco --filter="||int|| and ||cones|so|| and not ||psdcones||"']))
sys.exit(2)
packname = None
filtexpr = ""
setexpr = None
packall = False
for opt, arg in opts:
if opt == '-n':
packname = arg
elif opt == "-s":
setexpr = arg
elif opt == "-a":
packall = True
elif opt == "--filter":
filtexpr = arg
try:
if not packname:
if setexpr and os.path.exists(setexpr) and not os.path.isfile(setexpr):
packname = os.path.basename(setexpr)
if not packname:
packname = os.path.basename(os.path.dirname(setexpr))
else:
raise Exception('No pack name specified!')
print(setexpr)
pack(packname, filtexpr, setexpr, packall)
except Exception as e:
print(str(e))
| [
"os.path.exists",
"sys.path.insert",
"data.CBFset.CBFset",
"inspect.currentframe",
"os.path.join",
"os.path.isfile",
"os.path.dirname",
"os.path.basename",
"sys.exit",
"getopt.gnu_getopt",
"os.path.relpath"
] | [((1775, 1810), 'os.path.join', 'os.path.join', (['scriptdir', '""".."""', '""".."""'], {}), "(scriptdir, '..', '..')\n", (1787, 1810), False, 'import os, sys, inspect\n'), ((2007, 2015), 'data.CBFset.CBFset', 'CBFset', ([], {}), '()\n', (2013, 2015), False, 'from data.CBFset import CBFset\n'), ((1243, 1273), 'sys.path.insert', 'sys.path.insert', (['(0)', 'packagedir'], {}), '(0, packagedir)\n', (1258, 1273), False, 'import os, sys, inspect\n'), ((1855, 1878), 'os.path.isfile', 'os.path.isfile', (['setexpr'], {}), '(setexpr)\n', (1869, 1878), False, 'import os, sys, inspect\n'), ((3412, 3457), 'os.path.join', 'os.path.join', (['scriptdir', "(packname + '.tar.gz')"], {}), "(scriptdir, packname + '.tar.gz')\n", (3424, 3457), False, 'import os, sys, inspect\n'), ((3741, 3792), 'getopt.gnu_getopt', 'getopt.gnu_getopt', (['sys.argv[1:]', '"""n:s:a"""', '"""filter="""'], {}), "(sys.argv[1:], 'n:s:a', 'filter=')\n", (3758, 3792), False, 'import os, sys, inspect, tarfile, glob, stat, getopt\n'), ((1175, 1204), 'os.path.join', 'os.path.join', (['scriptdir', '""".."""'], {}), "(scriptdir, '..')\n", (1187, 1204), False, 'import os, sys, inspect\n'), ((1896, 1920), 'os.path.dirname', 'os.path.dirname', (['setexpr'], {}), '(setexpr)\n', (1911, 1920), False, 'import os, sys, inspect\n'), ((3221, 3252), 'os.path.join', 'os.path.join', (['rootdir', '"""README"""'], {}), "(rootdir, 'README')\n", (3233, 3252), False, 'import os, sys, inspect\n'), ((3270, 3321), 'os.path.join', 'os.path.join', (['rootdir', '"""instances"""', '"""cbf"""', '"""README"""'], {}), "(rootdir, 'instances', 'cbf', 'README')\n", (3282, 3321), False, 'import os, sys, inspect\n'), ((3523, 3550), 'os.path.relpath', 'os.path.relpath', (['f', 'rootdir'], {}), '(f, rootdir)\n', (3538, 3550), False, 'import os, sys, inspect\n'), ((4232, 4243), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4240, 4243), False, 'import os, sys, inspect\n'), ((1098, 1120), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1118, 1120), False, 'import os, sys, inspect\n'), ((1734, 1756), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1754, 1756), False, 'import os, sys, inspect\n'), ((2180, 2223), 'os.path.join', 'os.path.join', (['rootdir', '"""instances"""', '"""*.csv"""'], {}), "(rootdir, 'instances', '*.csv')\n", (2192, 2223), False, 'import os, sys, inspect\n'), ((2253, 2296), 'os.path.join', 'os.path.join', (['rootdir', '"""instances"""', '"""*.bib"""'], {}), "(rootdir, 'instances', '*.bib')\n", (2265, 2296), False, 'import os, sys, inspect\n'), ((2370, 2407), 'os.path.join', 'os.path.join', (['rootdir', '"""tools"""', '"""*.c"""'], {}), "(rootdir, 'tools', '*.c')\n", (2382, 2407), False, 'import os, sys, inspect\n'), ((2437, 2474), 'os.path.join', 'os.path.join', (['rootdir', '"""tools"""', '"""*.h"""'], {}), "(rootdir, 'tools', '*.h')\n", (2449, 2474), False, 'import os, sys, inspect\n'), ((2504, 2548), 'os.path.join', 'os.path.join', (['rootdir', '"""tools"""', '"""Makefile.*"""'], {}), "(rootdir, 'tools', 'Makefile.*')\n", (2516, 2548), False, 'import os, sys, inspect\n'), ((2618, 2656), 'os.path.join', 'os.path.join', (['rootdir', '"""docs"""', '"""*.pdf"""'], {}), "(rootdir, 'docs', '*.pdf')\n", (2630, 2656), False, 'import os, sys, inspect\n'), ((2732, 2772), 'os.path.join', 'os.path.join', (['rootdir', '"""scripts"""', '"""*.py"""'], {}), "(rootdir, 'scripts', '*.py')\n", (2744, 2772), False, 'import os, sys, inspect\n'), ((2802, 2851), 'os.path.join', 'os.path.join', (['rootdir', '"""scripts"""', '"""admin"""', '"""*.py"""'], {}), "(rootdir, 'scripts', 'admin', '*.py')\n", (2814, 2851), False, 'import os, sys, inspect\n'), ((2880, 2928), 'os.path.join', 'os.path.join', (['rootdir', '"""scripts"""', '"""data"""', '"""*.py"""'], {}), "(rootdir, 'scripts', 'data', '*.py')\n", (2892, 2928), False, 'import os, sys, inspect\n'), ((2957, 3005), 'os.path.join', 'os.path.join', (['rootdir', '"""scripts"""', '"""dist"""', '"""*.py"""'], {}), "(rootdir, 'scripts', 'dist', '*.py')\n", (2969, 3005), False, 'import os, sys, inspect\n'), ((3034, 3085), 'os.path.join', 'os.path.join', (['rootdir', '"""scripts"""', '"""filters"""', '"""*.py"""'], {}), "(rootdir, 'scripts', 'filters', '*.py')\n", (3046, 3085), False, 'import os, sys, inspect\n'), ((3114, 3165), 'os.path.join', 'os.path.join', (['rootdir', '"""scripts"""', '"""solvers"""', '"""*.py"""'], {}), "(rootdir, 'scripts', 'solvers', '*.py')\n", (3126, 3165), False, 'import os, sys, inspect\n'), ((4566, 4589), 'os.path.exists', 'os.path.exists', (['setexpr'], {}), '(setexpr)\n', (4580, 4589), False, 'import os, sys, inspect\n'), ((4642, 4667), 'os.path.basename', 'os.path.basename', (['setexpr'], {}), '(setexpr)\n', (4658, 4667), False, 'import os, sys, inspect\n'), ((4598, 4621), 'os.path.isfile', 'os.path.isfile', (['setexpr'], {}), '(setexpr)\n', (4612, 4621), False, 'import os, sys, inspect\n'), ((4731, 4755), 'os.path.dirname', 'os.path.dirname', (['setexpr'], {}), '(setexpr)\n', (4746, 4755), False, 'import os, sys, inspect\n')] |
from copy import deepcopy
import os
import re
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui
from GUI.Visualization import Ui_Visualization
from FAE.FeatureAnalysis.Classifier import *
from FAE.FeatureAnalysis.FeaturePipeline import FeatureAnalysisPipelines, OnePipeline
from FAE.Description.Description import Description
from FAE.Visualization.DrawROCList import DrawROCList
from FAE.Visualization.PlotMetricVsFeatureNumber import DrawCurve, DrawBar
from FAE.Visualization.FeatureSort import GeneralFeatureSort, SortRadiomicsFeature
from Utility.EcLog import eclog
class VisualizationConnection(QWidget, Ui_Visualization):
def __init__(self, parent=None):
self._root_folder = ''
self._fae = FeatureAnalysisPipelines()
self.sheet_dict = dict()
self.logger = eclog(os.path.split(__file__)[-1]).GetLogger()
self.__is_ui_ready = False
super(VisualizationConnection, self).__init__(parent)
self.setupUi(self)
self.buttonLoadResult.clicked.connect(self.LoadAll)
self.buttonClearResult.clicked.connect(self.ClearAll)
self.buttonSave.clicked.connect(self.Save)
self.buttonGenerateDescription.clicked.connect(self.GenerateDescription)
self.__plt_roc = self.canvasROC.getFigure().add_subplot(111)
self.__plt_plot = self.canvasPlot.getFigure().add_subplot(111)
self.__contribution = self.canvasFeature.getFigure().add_subplot(111)
# Update Sheet
self.tableClinicalStatistic.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tableClinicalStatistic.setSelectionBehavior(QAbstractItemView.SelectRows)
self.comboSheet.currentIndexChanged.connect(self.UpdateSheet)
self.checkMaxFeatureNumber.stateChanged.connect(self.UpdateSheet)
# self.tableClinicalStatistic.doubleClicked.connect(self.ShowOneResult)
self.tableClinicalStatistic.itemSelectionChanged.connect(self.ShowOneResult)
# Update ROC canvas
self.comboNormalizer.currentIndexChanged.connect(self.UpdateROC)
self.comboDimensionReduction.currentIndexChanged.connect(self.UpdateROC)
self.comboFeatureSelector.currentIndexChanged.connect(self.UpdateROC)
self.comboClassifier.currentIndexChanged.connect(self.UpdateROC)
self.spinBoxFeatureNumber.valueChanged.connect(self.UpdateROC)
self.checkROCCVTrain.stateChanged.connect(self.UpdateROC)
self.checkROCCVValidation.stateChanged.connect(self.UpdateROC)
self.checkROCTrain.stateChanged.connect(self.UpdateROC)
self.checkROCTest.stateChanged.connect(self.UpdateROC)
# Update Plot canvas
self.comboPlotX.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotY.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotNormalizer.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotDimensionReduction.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotFeatureSelector.currentIndexChanged.connect(self.UpdatePlot)
self.comboPlotClassifier.currentIndexChanged.connect(self.UpdatePlot)
self.spinPlotFeatureNumber.valueChanged.connect(self.UpdatePlot)
self.checkPlotCVTrain.stateChanged.connect(self.UpdatePlot)
self.checkPlotCVValidation.stateChanged.connect(self.UpdatePlot)
self.checkPlotTrain.stateChanged.connect(self.UpdatePlot)
# self.checkPlotTest.stateChanged.connect(self.UpdatePlot)
# Update Contribution canvas
self.radioContributionFeatureSelector.toggled.connect(self.UpdateContribution)
self.radioContributionClassifier.toggled.connect(self.UpdateContribution)
self.comboContributionNormalizor.currentIndexChanged.connect(self.UpdateContribution)
self.comboContributionDimension.currentIndexChanged.connect(self.UpdateContribution)
self.comboContributionFeatureSelector.currentIndexChanged.connect(self.UpdateContribution)
self.comboContributionClassifier.currentIndexChanged.connect(self.UpdateContribution)
self.spinContributeFeatureNumber.valueChanged.connect(self.UpdateContribution)
def LoadAll(self):
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.DirectoryOnly)
dlg.setOption(QFileDialog.ShowDirsOnly)
if dlg.exec_():
self._root_folder = dlg.selectedFiles()[0]
if not os.path.exists(self._root_folder):
return
if not r'.FAEresult4129074093819729087' in os.listdir(self._root_folder):
QMessageBox.about(self, 'Load Error', 'This folder is not supported for import')
return
try:
self.lineEditResultPath.setText(self._root_folder)
self._fae.LoadAll(self._root_folder)
self.SetResultDescription()
self.SetResultTable()
self.InitialUi()
except Exception as ex:
QMessageBox.about(self, "Load Error", ex.__str__())
self.logger.log('Load Error, The reason is ' + str(ex))
self.ClearAll()
return
self.buttonClearResult.setEnabled(True)
self.buttonSave.setEnabled(True)
self.buttonLoadResult.setEnabled(False)
def ClearAll(self):
self.buttonLoadResult.setEnabled(True)
self.buttonSave.setEnabled(False)
self.buttonClearResult.setEnabled(False)
self.checkROCCVTrain.setChecked(False)
self.checkROCCVValidation.setChecked(False)
self.checkROCTrain.setChecked(False)
self.checkROCTest.setChecked(False)
self.checkPlotCVTrain.setChecked(False)
self.checkPlotCVValidation.setChecked(False)
self.checkPlotTrain.setChecked(False)
# self.checkPlotTest.setChecked(False)
self.radioContributionFeatureSelector.setChecked(True)
self.radioContributionFeatureSelector.setChecked(False)
self.checkMaxFeatureNumber.setChecked(False)
self.canvasROC.getFigure().clear()
self.canvasPlot.getFigure().clear()
self.canvasFeature.getFigure().clear()
self.__plt_roc = self.canvasROC.getFigure().add_subplot(111)
self.__plt_plot = self.canvasPlot.getFigure().add_subplot(111)
self.__contribution = self.canvasFeature.getFigure().add_subplot(111)
self.canvasROC.draw()
self.canvasPlot.draw()
self.canvasFeature.draw()
self.textEditDescription.clear()
self.lineEditResultPath.clear()
self.comboSheet.clear()
self.comboClassifier.clear()
self.comboDimensionReduction.clear()
self.comboNormalizer.clear()
self.comboFeatureSelector.clear()
self.comboPlotClassifier.clear()
self.comboPlotDimensionReduction.clear()
self.comboPlotFeatureSelector.clear()
self.comboPlotNormalizer.clear()
self.comboPlotX.clear()
self.comboPlotY.clear()
self.comboContributionNormalizor.clear()
self.comboContributionDimension.clear()
self.comboContributionClassifier.clear()
self.comboContributionFeatureSelector.clear()
self.spinBoxFeatureNumber.setValue(0)
self.spinPlotFeatureNumber.setValue(0)
self.spinPlotFeatureNumber.setEnabled(False)
self.spinContributeFeatureNumber.setValue(1)
self.tableClinicalStatistic.clear()
self.tableClinicalStatistic.setRowCount(0)
self.tableClinicalStatistic.setColumnCount(0)
self.tableClinicalStatistic.setHorizontalHeaderLabels(list([]))
self.tableClinicalStatistic.setVerticalHeaderLabels(list([]))
self._fae = FeatureAnalysisPipelines()
self._root_folder = ''
self.sheet_dict = dict()
self.__is_ui_ready = False
def Save(self):
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.DirectoryOnly)
dlg.setOption(QFileDialog.ShowDirsOnly)
if dlg.exec_():
store_folder = dlg.selectedFiles()[0]
try:
self.canvasROC.getFigure().savefig(os.path.join(store_folder, 'ROC.eps'), dpi=1200)
self.canvasROC.getFigure().savefig(os.path.join(store_folder, 'ROC.jpg'), dpi=300)
except Exception as e:
QMessageBox.about(self, 'Save Figure Failed', 'There is no ROC figure.\n' + e.__str__())
try:
self.canvasPlot.getFigure().savefig(os.path.join(store_folder, 'Compare.eps'), dpi=1200)
self.canvasPlot.getFigure().savefig(os.path.join(store_folder, 'Compare.jpg'), dpi=300)
except Exception as e:
QMessageBox.about(self, 'Save Figure Failed', 'There is no AUC comparison figure.\n' + e.__str__())
try:
self.canvasFeature.getFigure().savefig(os.path.join(store_folder, 'FeatureWeights.eps'), dpi=1200)
self.canvasFeature.getFigure().savefig(os.path.join(store_folder, 'FeatureWeights.jpg'), dpi=300)
except Exception as e:
QMessageBox.about(self, 'Save Figure Failed', 'There is no Feature Contribution figure.\n' + e.__str__())
def InitialUi(self):
# Update ROC canvers
for normalizer in self._fae.GetNormalizerList():
self.comboNormalizer.addItem(normalizer.GetName())
for dimension_reduction in self._fae.GetDimensionReductionList():
self.comboDimensionReduction.addItem(dimension_reduction.GetName())
for classifier in self._fae.GetClassifierList():
self.comboClassifier.addItem(classifier.GetName())
for feature_selector in self._fae.GetFeatureSelectorList():
self.comboFeatureSelector.addItem(feature_selector.GetName())
self.spinBoxFeatureNumber.setMinimum(int(self._fae.GetFeatureNumberList()[0]))
self.spinBoxFeatureNumber.setMaximum(int(self._fae.GetFeatureNumberList()[-1]))
# Update Plot canvars
if len(self._fae.GetNormalizerList()) > 1:
self.comboPlotX.addItem('Normaliaztion')
if len(self._fae.GetDimensionReductionList()) > 1:
self.comboPlotX.addItem('Dimension Reduction')
if len(self._fae.GetFeatureSelectorList()) > 1:
self.comboPlotX.addItem('Feature Selector')
if len(self._fae.GetClassifierList()) > 1:
self.comboPlotX.addItem('Classifier')
if len(self._fae.GetFeatureNumberList()) > 1:
self.comboPlotX.addItem('Feature Number')
self.comboPlotY.addItem('AUC')
for index in self._fae.GetNormalizerList():
self.comboPlotNormalizer.addItem(index.GetName())
for index in self._fae.GetDimensionReductionList():
self.comboPlotDimensionReduction.addItem(index.GetName())
for index in self._fae.GetFeatureSelectorList():
self.comboPlotFeatureSelector.addItem(index.GetName())
for index in self._fae.GetClassifierList():
self.comboPlotClassifier.addItem(index.GetName())
self.spinPlotFeatureNumber.setMinimum(int(self._fae.GetFeatureNumberList()[0]))
self.spinPlotFeatureNumber.setMaximum(int(self._fae.GetFeatureNumberList()[-1]))
# Update Contribution canvas
for index in self._fae.GetNormalizerList():
self.comboContributionNormalizor.addItem(index.GetName())
for index in self._fae.GetDimensionReductionList():
self.comboContributionDimension.addItem(index.GetName())
for selector in self._fae.GetFeatureSelectorList():
self.comboContributionFeatureSelector.addItem(selector.GetName())
for classifier in self._fae.GetClassifierList():
specific_name = classifier.GetName() + '_coef.csv'
if self._SearchSpecificFile(int(self._fae.GetFeatureNumberList()[0]), specific_name):
self.comboContributionClassifier.addItem(classifier.GetName())
self.spinContributeFeatureNumber.setMinimum(int(self._fae.GetFeatureNumberList()[0]))
self.spinContributeFeatureNumber.setMaximum(int(self._fae.GetFeatureNumberList()[-1]))
self.__is_ui_ready = True
def UpdateROC(self):
if not self.__is_ui_ready:
return
if (self.comboNormalizer.count() == 0) or \
(self.comboDimensionReduction.count() == 0) or \
(self.comboFeatureSelector.count() == 0) or \
(self.comboClassifier.count() == 0) or \
(self.spinBoxFeatureNumber.value() == 0):
return
case_name = self.comboNormalizer.currentText() + '_' + \
self.comboDimensionReduction.currentText() + '_' + \
self.comboFeatureSelector.currentText() + '_' + \
str(self.spinBoxFeatureNumber.value()) + '_' + \
self.comboClassifier.currentText()
case_folder = os.path.join(self._root_folder, case_name)
pred_list, label_list, name_list = [], [], []
if self.checkROCCVTrain.isChecked():
train_pred = np.load(os.path.join(case_folder, 'train_predict.npy'))
train_label = np.load(os.path.join(case_folder, 'train_label.npy'))
pred_list.append(train_pred)
label_list.append(train_label)
name_list.append('CV Train')
if self.checkROCCVValidation.isChecked():
val_pred = np.load(os.path.join(case_folder, 'val_predict.npy'))
val_label = np.load(os.path.join(case_folder, 'val_label.npy'))
pred_list.append(val_pred)
label_list.append(val_label)
name_list.append('CV Validation')
if self.checkROCTrain.isChecked():
all_train_pred = np.load(os.path.join(case_folder, 'all_train_predict.npy'))
all_train_label = np.load(os.path.join(case_folder, 'all_train_label.npy'))
pred_list.append(all_train_pred)
label_list.append(all_train_label)
name_list.append('Train')
if self.checkROCTest.isChecked():
if os.path.exists(os.path.join(case_folder, 'test_label.npy')):
test_pred = np.load(os.path.join(case_folder, 'test_predict.npy'))
test_label = np.load(os.path.join(case_folder, 'test_label.npy'))
pred_list.append(test_pred)
label_list.append(test_label)
name_list.append('Test')
if len(pred_list) > 0:
DrawROCList(pred_list, label_list, name_list=name_list, is_show=False, fig=self.canvasROC.getFigure())
self.canvasROC.draw()
def _UpdatePlotButtons(self, selected_index):
index = [0, 0, 0, 0, 0]
self.comboPlotNormalizer.setEnabled(True)
self.comboPlotDimensionReduction.setEnabled(True)
self.comboPlotFeatureSelector.setEnabled(True)
self.comboPlotClassifier.setEnabled(True)
self.spinPlotFeatureNumber.setEnabled(True)
index[0] = self.comboPlotNormalizer.currentIndex()
index[1] = self.comboPlotDimensionReduction.currentIndex()
index[2] = self.comboPlotFeatureSelector.currentIndex()
index[4] = self.comboPlotClassifier.currentIndex()
index[3] = self.spinPlotFeatureNumber.value() - int(self._fae.GetFeatureNumberList()[0])
if selected_index == 0:
self.comboPlotNormalizer.setEnabled(False)
index[0] = [temp for temp in range(len(self._fae.GetNormalizerList()))]
elif selected_index == 1:
self.comboPlotDimensionReduction.setEnabled(False)
index[1] = [temp for temp in range(len(self._fae.GetDimensionReductionList()))]
elif selected_index == 2:
self.comboPlotFeatureSelector.setEnabled(False)
index[2] = [temp for temp in range(len(self._fae.GetFeatureSelectorList()))]
elif selected_index == 4:
self.comboPlotClassifier.setEnabled(False)
index[4] = [temp for temp in range(len(self._fae.GetClassifierList()))]
elif selected_index == 3:
self.spinPlotFeatureNumber.setEnabled(False)
index[3] = [temp for temp in range(len(self._fae.GetFeatureNumberList()))]
return index
def UpdatePlot(self):
if not self.__is_ui_ready:
return
if self.comboPlotX.count() == 0:
return
x_ticks = []
x_label = ''
selected_index = -1
if self.comboPlotX.currentText() == 'Normaliaztion':
selected_index = 0
x_ticks = [instance.GetName() for instance in self._fae.GetNormalizerList()]
x_label = 'Normalization Method'
elif self.comboPlotX.currentText() == 'Dimension Reduction':
selected_index = 1
x_ticks = [instance.GetName() for instance in self._fae.GetDimensionReductionList()]
x_label = 'Dimension Reduction Method'
elif self.comboPlotX.currentText() == 'Feature Selector':
selected_index = 2
x_ticks = [instance.GetName() for instance in self._fae.GetFeatureSelectorList()]
x_label = 'Feature Selecotr Method'
elif self.comboPlotX.currentText() == 'Classifier':
selected_index = 4
x_ticks = [instance.GetName() for instance in self._fae.GetClassifierList()]
x_label = 'Classifier Method'
elif self.comboPlotX.currentText() == 'Feature Number':
selected_index = 3
x_ticks = list(map(int, self._fae.GetFeatureNumberList()))
x_label = 'Feature Number'
max_axis_list = [0, 1, 2, 3, 4]
max_axis_list.remove(selected_index)
max_axis = tuple(max_axis_list)
index = self._UpdatePlotButtons(selected_index)
show_data = []
show_data_std =[]
name_list = []
if self.comboPlotY.currentText() == 'AUC':
if self.checkPlotCVTrain.isChecked():
temp = deepcopy(self._fae.GetAUCMetric()['train'])
auc_std = deepcopy(self._fae.GetAUCstdMetric()['train'])
show_data.append(temp[tuple(index)].tolist())
show_data_std.append(auc_std[tuple(index)].tolist())
name_list.append('CV Train')
if self.checkPlotCVValidation.isChecked():
temp = deepcopy(self._fae.GetAUCMetric()['val'])
auc_std = deepcopy(self._fae.GetAUCstdMetric()['val'])
show_data.append(temp[tuple(index)].tolist())
show_data_std.append(auc_std[tuple(index)].tolist())
name_list.append('CV Validation')
if self.checkPlotTrain.isChecked():
temp = deepcopy(self._fae.GetAUCMetric()['all_train'])
auc_std = deepcopy(self._fae.GetAUCstdMetric()['all_train'])
show_data.append(temp[tuple(index)].tolist())
show_data_std.append(auc_std[tuple(index)].tolist())
name_list.append('Train')
# if self.checkPlotTest.isChecked():
# temp = deepcopy(self._fae.GetAUCMetric()['test'])
# auc_std = deepcopy(self._fae.GetAUCstdMetric()['test'])
# if temp.size > 0:
# show_data.append(temp[tuple(index)].tolist())
# show_data_std.append(auc_std[tuple(index)].tolist())
# name_list.append('Test')
if len(show_data) > 0:
if selected_index == 3:
DrawCurve(x_ticks, show_data, show_data_std, xlabel=x_label, ylabel=self.comboPlotY.currentText(),
name_list=name_list, is_show=False, fig=self.canvasPlot.getFigure())
else:
DrawBar(x_ticks, show_data, ylabel=self.comboPlotY.currentText(),
name_list=name_list, is_show=False, fig=self.canvasPlot.getFigure())
self.canvasPlot.draw()
def UpdateContribution(self):
if not self.__is_ui_ready:
return
try:
one_result_folder_name = self.comboContributionNormalizor.currentText() + '_' + \
self.comboContributionDimension.currentText() + '_' + \
self.comboContributionFeatureSelector.currentText() + '_' + \
str(self.spinContributeFeatureNumber.value()) + '_' + \
self.comboContributionClassifier.currentText()
one_result_folder = os.path.join(self._root_folder, one_result_folder_name)
# This is compatible with the previous version
if not os.path.exists(one_result_folder):
one_result_folder_name = self.comboContributionNormalizor.currentText() + '_Cos_' + \
self.comboContributionFeatureSelector.currentText() + '_' + \
str(self.spinContributeFeatureNumber.value()) + '_' + \
self.comboContributionClassifier.currentText()
one_result_folder = os.path.join(self._root_folder, one_result_folder_name)
if self.radioContributionFeatureSelector.isChecked():
file_name = self.comboContributionFeatureSelector.currentText() + '_sort.csv'
file_path = os.path.join(one_result_folder, file_name)
if not os.path.exists(file_path):
file_name = self.comboContributionFeatureSelector.currentText().lower() + '_sort.csv'
file_path = os.path.join(one_result_folder, file_name)
if file_path:
df = pd.read_csv(file_path, index_col=0)
value = list(np.abs(df.iloc[:, 0]))
#add positive and negatiove info for coef
processed_feature_name = list(df.index)
original_value = list(df.iloc[:, 0])
for index in range(len(original_value)):
if original_value[index] > 0:
processed_feature_name[index] = processed_feature_name[index] + ' P'
else:
processed_feature_name[index] = processed_feature_name[index] + ' N'
GeneralFeatureSort(processed_feature_name, value, max_num=self.spinContributeFeatureNumber.value(),
is_show=False, fig=self.canvasFeature.getFigure())
elif self.radioContributionClassifier.isChecked():
specific_name = self.comboContributionClassifier.currentText() + '_coef.csv'
file_path = os.path.join(one_result_folder, specific_name)
if not os.path.exists(file_path):
specific_name = self.comboContributionClassifier.currentText().lower() + '_coef.csv'
file_path = os.path.join(one_result_folder, specific_name)
if file_path:
df = pd.read_csv(file_path, index_col=0)
feature_name = list(df.index)
value = list(np.abs(df.iloc[:, 0]))
#add positive and negatiove info for coef
processed_feature_name = list(df.index)
original_value = list(df.iloc[:, 0])
for index in range(len(original_value)):
if original_value[index] > 0:
processed_feature_name[index] = processed_feature_name[index] + ' P'
else:
processed_feature_name[index] = processed_feature_name[index] + ' N'
# try:
# SortRadiomicsFeature(processed_feature_name, value, is_show=False, fig=self.canvasFeature.getFigure())
# except:
GeneralFeatureSort(processed_feature_name, value,
is_show=False, fig=self.canvasFeature.getFigure())
self.canvasFeature.draw()
except Exception as e:
content = 'In Visualization, UpdateContribution failed'
self.logger.error('{}{}'.format(content, str(e)))
QMessageBox.about(self, content, e.__str__())
def SetResultDescription(self):
text = "Normalizer:\n"
for index in self._fae.GetNormalizerList():
text += (index.GetName() + '\n')
text += '\n'
text += "Dimension Reduction:\n"
for index in self._fae.GetDimensionReductionList():
text += (index.GetName() + '\n')
text += '\n'
text += "Feature Selector:\n"
for index in self._fae.GetFeatureSelectorList():
text += (index.GetName() + '\n')
text += '\n'
text += "Feature Number:\n"
text += "{:s} - {:s}\n".format(self._fae.GetFeatureNumberList()[0], self._fae.GetFeatureNumberList()[-1])
text += '\n'
text += "Classifier:\n"
for index in self._fae.GetClassifierList():
text += (index.GetName() + '\n')
text += '\n'
text += 'Cross Validation: ' + self._fae.GetCrossValidation().GetName()
self.textEditDescription.setPlainText(text)
def UpdateSheet(self):
if self.checkMaxFeatureNumber.isChecked():
self.comboSheet.setEnabled(False)
else:
self.comboSheet.setEnabled(True)
self.tableClinicalStatistic.clear()
self.tableClinicalStatistic.setSortingEnabled(False)
if self.comboSheet.currentText() == 'Train':
df = self.sheet_dict['train']
elif self.comboSheet.currentText() == 'Validation':
df = self.sheet_dict['val']
elif self.comboSheet.currentText() == 'Test':
df = self.sheet_dict['test']
else:
return
if self.checkMaxFeatureNumber.isChecked():
self.sheet_dict['test'] = pd.read_csv(os.path.join(self._root_folder, 'test_result.csv'), index_col=0)
data = self._fae.GetAUCMetric()['val']
std_data = self._fae.GetAUCstdMetric()['val']
df_val = self.sheet_dict['val']
df_test = self.sheet_dict['test']
name_list = []
for normalizer_index, normalizer in enumerate(self._fae.GetNormalizerList()):
for dimension_reducer_index, dimension_reducer in enumerate(self._fae.GetDimensionReductionList()):
for feature_selector_index, feature_selector in enumerate(self._fae.GetFeatureSelectorList()):
for classifier_index, classifier in enumerate(self._fae.GetClassifierList()):
sub_auc = data[normalizer_index, dimension_reducer_index, feature_selector_index, :,
classifier_index]
sub_auc_std = std_data[normalizer_index, dimension_reducer_index, feature_selector_index, :,
classifier_index]
one_se = max(sub_auc)-sub_auc_std[np.argmax(sub_auc)]
for feature_number_index in range(len(self._fae.GetFeatureNumberList())):
if data[normalizer_index, dimension_reducer_index,
feature_selector_index, feature_number_index, classifier_index] >= one_se:
name = normalizer.GetName() + '_' + dimension_reducer.GetName() + '_' + \
feature_selector.GetName() + '_' + str(self._fae.GetFeatureNumberList()[feature_number_index]) + '_' + \
classifier.GetName()
name_list.append(name)
break
# choose the selected models from all test result
df_val = df_val.loc[name_list]
max_index = df_val['auc'].idxmax()
sub_serise = df_val.loc[max_index]
max_array = sub_serise.get_values().reshape(1, -1)
max_auc_df = pd.DataFrame(data=max_array, columns=sub_serise.index.tolist(), index=[max_index])
max_auc_95ci = max_auc_df.at[max_index, 'auc 95% CIs']
max_auc_95ci = re.findall(r"\d+\.?\d*", max_auc_95ci)
sub_val_df = df_val[(df_val['auc'] >= float(max_auc_95ci[0])) & (df_val['auc'] <= float(max_auc_95ci[1]))]
index_by_val = sub_val_df.index.tolist()
df = df_test.loc[index_by_val]
df.sort_index(inplace=True)
self.tableClinicalStatistic.setRowCount(df.shape[0])
self.tableClinicalStatistic.setColumnCount(df.shape[1]+1)
headerlabels = df.columns.tolist()
headerlabels.insert(0, 'models name')
self.tableClinicalStatistic.setHorizontalHeaderLabels(headerlabels)
# self.tableClinicalStatistic.setVerticalHeaderLabels(list(df.index))
for row_index in range(df.shape[0]):
for col_index in range(df.shape[1]+1):
if col_index == 0:
self.tableClinicalStatistic.setItem(row_index, col_index,
QTableWidgetItem(df.index[row_index]))
else:
self.tableClinicalStatistic.setItem(row_index, col_index,
QTableWidgetItem(str(df.iloc[row_index, col_index-1])))
self.tableClinicalStatistic.setSortingEnabled(True)
def SetResultTable(self):
self.sheet_dict['train'] = pd.read_csv(os.path.join(self._root_folder, 'train_result.csv'), index_col=0)
self.comboSheet.addItem('Train')
self.sheet_dict['val'] = pd.read_csv(os.path.join(self._root_folder, 'val_result.csv'), index_col=0)
self.comboSheet.addItem('Validation')
if os.path.exists(os.path.join(self._root_folder, 'test_result.csv')):
self.sheet_dict['test'] = pd.read_csv(os.path.join(self._root_folder, 'test_result.csv'), index_col=0)
self.comboSheet.addItem('Test')
self.UpdateSheet()
def _SearchSpecificFile(self, feature_number, specific_file_name, specific_file_name2=''):
for rt, folder, files in os.walk(self._root_folder):
for file_name in files:
# print(file_name)
if specific_file_name2:
if (file_name.lower() == specific_file_name.lower()) and \
('_{:d}_'.format(feature_number) in rt) and \
(specific_file_name2 in rt):
return os.path.join(rt, file_name)
else:
if (file_name.lower() == specific_file_name.lower()) and ('_{:d}_'.format(feature_number) in rt):
return os.path.join(rt, file_name)
return ''
def ShowOneResult(self):
try:
# for index in self.tableClinicalStatistic.selectedIndexes():
index = self.tableClinicalStatistic.selectedIndexes()[0]
row = index.row()
one_item = self.tableClinicalStatistic.item(row, 0)
text = str(one_item.text())
current_normalizer, current_dimension_reducer, current_feature_selector, current_feature_number, current_classifier = \
text.split('_')
self.comboNormalizer.setCurrentText(current_normalizer)
self.comboDimensionReduction.setCurrentText(current_dimension_reducer)
self.comboFeatureSelector.setCurrentText(current_feature_selector)
self.comboClassifier.setCurrentText(current_classifier)
self.spinBoxFeatureNumber.setValue(int(current_feature_number))
if not (self.checkROCTrain.isChecked() or self.checkROCCVTrain.isChecked() or
self.checkROCCVValidation.isChecked() or self.checkROCTrain.isChecked()):
self.checkROCCVTrain.setCheckState(True)
self.checkROCCVValidation.setCheckState(True)
self.UpdateROC()
# Update the AUC versus feature number
self.comboPlotNormalizer.setCurrentText(current_normalizer)
self.comboPlotDimensionReduction.setCurrentText(current_dimension_reducer)
self.comboPlotFeatureSelector.setCurrentText(current_feature_selector)
self.comboPlotClassifier.setCurrentText(current_classifier)
self.comboPlotX.setCurrentText('Feature Number')
if not (self.checkPlotTrain.isChecked() or
self.checkPlotCVTrain.isChecked() or
self.checkPlotCVValidation.isChecked()):
self.checkPlotCVValidation.setCheckState(True)
self.UpdatePlot()
# Update the Contribution
self.comboContributionNormalizor.setCurrentText(current_normalizer)
self.comboContributionDimension.setCurrentText(current_dimension_reducer)
self.comboContributionFeatureSelector.setCurrentText(current_feature_selector)
self.comboContributionClassifier.setCurrentText(current_classifier)
self.spinContributeFeatureNumber.setValue(int(current_feature_number))
self.UpdateContribution()
except Exception as e:
content = 'Visualization, ShowOneResult failed: '
self.logger.error('{}{}'.format(content, str(e)))
QMessageBox.about(self, content, e.__str__())
def GenerateDescription(self):
if (self.comboNormalizer.count() == 0) or \
(self.comboDimensionReduction.count() == 0) or \
(self.comboFeatureSelector.count() == 0) or \
(self.comboClassifier.count() == 0) or \
(self.spinBoxFeatureNumber.value() == 0):
return
case_name = self.comboNormalizer.currentText() + '_' + \
self.comboDimensionReduction.currentText() + '_' + \
self.comboFeatureSelector.currentText() + '_' + \
str(self.spinBoxFeatureNumber.value()) + '_' + \
self.comboClassifier.currentText()
case_folder = os.path.join(self._root_folder, case_name)
current_pipeline = OnePipeline()
try:
current_pipeline.LoadPipeline(os.path.join(case_folder, 'pipeline_info.csv'))
except Exception as ex:
QMessageBox.about(self, "In Description, Load Pipeline_info Error", ex.__str__())
self.logger.error('Load Pipeline Error, The reason is ' + str(ex))
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.DirectoryOnly)
dlg.setOption(QFileDialog.ShowDirsOnly)
if dlg.exec_():
store_folder = dlg.selectedFiles()[0]
roc_path = os.path.join(store_folder, 'ROC.jpg')
self.canvasROC.getFigure().savefig(roc_path, dpi=300)
report = Description()
try:
report.Run(current_pipeline, self._root_folder, store_folder)
os.system("explorer.exe {:s}".format(os.path.normpath(store_folder)))
except Exception as ex:
QMessageBox.about(self, 'Description Generate Error: ', ex.__str__())
self.logger.log('Description Generate Error: ' + str(ex)) | [
"os.path.exists",
"os.listdir",
"FAE.FeatureAnalysis.FeaturePipeline.FeatureAnalysisPipelines",
"FAE.Description.Description.Description",
"os.path.join",
"os.path.split",
"os.path.normpath",
"FAE.FeatureAnalysis.FeaturePipeline.OnePipeline",
"re.findall",
"os.walk"
] | [((732, 758), 'FAE.FeatureAnalysis.FeaturePipeline.FeatureAnalysisPipelines', 'FeatureAnalysisPipelines', ([], {}), '()\n', (756, 758), False, 'from FAE.FeatureAnalysis.FeaturePipeline import FeatureAnalysisPipelines, OnePipeline\n'), ((7706, 7732), 'FAE.FeatureAnalysis.FeaturePipeline.FeatureAnalysisPipelines', 'FeatureAnalysisPipelines', ([], {}), '()\n', (7730, 7732), False, 'from FAE.FeatureAnalysis.FeaturePipeline import FeatureAnalysisPipelines, OnePipeline\n'), ((12930, 12972), 'os.path.join', 'os.path.join', (['self._root_folder', 'case_name'], {}), '(self._root_folder, case_name)\n', (12942, 12972), False, 'import os\n'), ((30293, 30319), 'os.walk', 'os.walk', (['self._root_folder'], {}), '(self._root_folder)\n', (30300, 30319), False, 'import os\n'), ((34218, 34260), 'os.path.join', 'os.path.join', (['self._root_folder', 'case_name'], {}), '(self._root_folder, case_name)\n', (34230, 34260), False, 'import os\n'), ((34288, 34301), 'FAE.FeatureAnalysis.FeaturePipeline.OnePipeline', 'OnePipeline', ([], {}), '()\n', (34299, 34301), False, 'from FAE.FeatureAnalysis.FeaturePipeline import FeatureAnalysisPipelines, OnePipeline\n'), ((20510, 20565), 'os.path.join', 'os.path.join', (['self._root_folder', 'one_result_folder_name'], {}), '(self._root_folder, one_result_folder_name)\n', (20522, 20565), False, 'import os\n'), ((28316, 28356), 're.findall', 're.findall', (['"""\\\\d+\\\\.?\\\\d*"""', 'max_auc_95ci'], {}), "('\\\\d+\\\\.?\\\\d*', max_auc_95ci)\n", (28326, 28356), False, 'import re\n'), ((29636, 29687), 'os.path.join', 'os.path.join', (['self._root_folder', '"""train_result.csv"""'], {}), "(self._root_folder, 'train_result.csv')\n", (29648, 29687), False, 'import os\n'), ((29788, 29837), 'os.path.join', 'os.path.join', (['self._root_folder', '"""val_result.csv"""'], {}), "(self._root_folder, 'val_result.csv')\n", (29800, 29837), False, 'import os\n'), ((29924, 29974), 'os.path.join', 'os.path.join', (['self._root_folder', '"""test_result.csv"""'], {}), "(self._root_folder, 'test_result.csv')\n", (29936, 29974), False, 'import os\n'), ((34836, 34873), 'os.path.join', 'os.path.join', (['store_folder', '"""ROC.jpg"""'], {}), "(store_folder, 'ROC.jpg')\n", (34848, 34873), False, 'import os\n'), ((34962, 34975), 'FAE.Description.Description.Description', 'Description', ([], {}), '()\n', (34973, 34975), False, 'from FAE.Description.Description import Description\n'), ((4401, 4434), 'os.path.exists', 'os.path.exists', (['self._root_folder'], {}), '(self._root_folder)\n', (4415, 4434), False, 'import os\n'), ((13106, 13152), 'os.path.join', 'os.path.join', (['case_folder', '"""train_predict.npy"""'], {}), "(case_folder, 'train_predict.npy')\n", (13118, 13152), False, 'import os\n'), ((13188, 13232), 'os.path.join', 'os.path.join', (['case_folder', '"""train_label.npy"""'], {}), "(case_folder, 'train_label.npy')\n", (13200, 13232), False, 'import os\n'), ((13440, 13484), 'os.path.join', 'os.path.join', (['case_folder', '"""val_predict.npy"""'], {}), "(case_folder, 'val_predict.npy')\n", (13452, 13484), False, 'import os\n'), ((13518, 13560), 'os.path.join', 'os.path.join', (['case_folder', '"""val_label.npy"""'], {}), "(case_folder, 'val_label.npy')\n", (13530, 13560), False, 'import os\n'), ((13768, 13818), 'os.path.join', 'os.path.join', (['case_folder', '"""all_train_predict.npy"""'], {}), "(case_folder, 'all_train_predict.npy')\n", (13780, 13818), False, 'import os\n'), ((13858, 13906), 'os.path.join', 'os.path.join', (['case_folder', '"""all_train_label.npy"""'], {}), "(case_folder, 'all_train_label.npy')\n", (13870, 13906), False, 'import os\n'), ((14110, 14153), 'os.path.join', 'os.path.join', (['case_folder', '"""test_label.npy"""'], {}), "(case_folder, 'test_label.npy')\n", (14122, 14153), False, 'import os\n'), ((20644, 20677), 'os.path.exists', 'os.path.exists', (['one_result_folder'], {}), '(one_result_folder)\n', (20658, 20677), False, 'import os\n'), ((21105, 21160), 'os.path.join', 'os.path.join', (['self._root_folder', 'one_result_folder_name'], {}), '(self._root_folder, one_result_folder_name)\n', (21117, 21160), False, 'import os\n'), ((21350, 21392), 'os.path.join', 'os.path.join', (['one_result_folder', 'file_name'], {}), '(one_result_folder, file_name)\n', (21362, 21392), False, 'import os\n'), ((25983, 26033), 'os.path.join', 'os.path.join', (['self._root_folder', '"""test_result.csv"""'], {}), "(self._root_folder, 'test_result.csv')\n", (25995, 26033), False, 'import os\n'), ((30027, 30077), 'os.path.join', 'os.path.join', (['self._root_folder', '"""test_result.csv"""'], {}), "(self._root_folder, 'test_result.csv')\n", (30039, 30077), False, 'import os\n'), ((34357, 34403), 'os.path.join', 'os.path.join', (['case_folder', '"""pipeline_info.csv"""'], {}), "(case_folder, 'pipeline_info.csv')\n", (34369, 34403), False, 'import os\n'), ((4514, 4543), 'os.listdir', 'os.listdir', (['self._root_folder'], {}), '(self._root_folder)\n', (4524, 4543), False, 'import os\n'), ((8123, 8160), 'os.path.join', 'os.path.join', (['store_folder', '"""ROC.eps"""'], {}), "(store_folder, 'ROC.eps')\n", (8135, 8160), False, 'import os\n'), ((8223, 8260), 'os.path.join', 'os.path.join', (['store_folder', '"""ROC.jpg"""'], {}), "(store_folder, 'ROC.jpg')\n", (8235, 8260), False, 'import os\n'), ((8481, 8522), 'os.path.join', 'os.path.join', (['store_folder', '"""Compare.eps"""'], {}), "(store_folder, 'Compare.eps')\n", (8493, 8522), False, 'import os\n'), ((8586, 8627), 'os.path.join', 'os.path.join', (['store_folder', '"""Compare.jpg"""'], {}), "(store_folder, 'Compare.jpg')\n", (8598, 8627), False, 'import os\n'), ((8862, 8910), 'os.path.join', 'os.path.join', (['store_folder', '"""FeatureWeights.eps"""'], {}), "(store_folder, 'FeatureWeights.eps')\n", (8874, 8910), False, 'import os\n'), ((8977, 9025), 'os.path.join', 'os.path.join', (['store_folder', '"""FeatureWeights.jpg"""'], {}), "(store_folder, 'FeatureWeights.jpg')\n", (8989, 9025), False, 'import os\n'), ((14192, 14237), 'os.path.join', 'os.path.join', (['case_folder', '"""test_predict.npy"""'], {}), "(case_folder, 'test_predict.npy')\n", (14204, 14237), False, 'import os\n'), ((14276, 14319), 'os.path.join', 'os.path.join', (['case_folder', '"""test_label.npy"""'], {}), "(case_folder, 'test_label.npy')\n", (14288, 14319), False, 'import os\n'), ((21417, 21442), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (21431, 21442), False, 'import os\n'), ((21582, 21624), 'os.path.join', 'os.path.join', (['one_result_folder', 'file_name'], {}), '(one_result_folder, file_name)\n', (21594, 21624), False, 'import os\n'), ((22688, 22734), 'os.path.join', 'os.path.join', (['one_result_folder', 'specific_name'], {}), '(one_result_folder, specific_name)\n', (22700, 22734), False, 'import os\n'), ((820, 843), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (833, 843), False, 'import os\n'), ((22759, 22784), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (22773, 22784), False, 'import os\n'), ((22923, 22969), 'os.path.join', 'os.path.join', (['one_result_folder', 'specific_name'], {}), '(one_result_folder, specific_name)\n', (22935, 22969), False, 'import os\n'), ((30673, 30700), 'os.path.join', 'os.path.join', (['rt', 'file_name'], {}), '(rt, file_name)\n', (30685, 30700), False, 'import os\n'), ((30872, 30899), 'os.path.join', 'os.path.join', (['rt', 'file_name'], {}), '(rt, file_name)\n', (30884, 30899), False, 'import os\n'), ((35124, 35154), 'os.path.normpath', 'os.path.normpath', (['store_folder'], {}), '(store_folder)\n', (35140, 35154), False, 'import os\n')] |
# Copyright 2017 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management.base import BaseCommand, CommandError
from bookstore.models import Book, Rating
import random
import threading
class Command(BaseCommand):
help = "Create some test commands."
def add_arguments(self, parser):
parser.add_argument("clients", default=5, nargs="?", type=int)
parser.add_argument("requests_per_client", default=20, nargs="?", type=int)
def handle(self, *args, **options):
threads = [ClientThread(options["requests_per_client"]) for i in range(options["clients"])]
[thread.start() for thread in threads]
for x in threads:
x.join()
class ClientThread(threading.Thread):
"""
"""
def __init__(self, max_requests):
super().__init__()
self._requests = 0
self._max_requests = max_requests
def run(self):
while(self._requests < self._max_requests):
books = Book.objects.all()
book = random.choice(books)
rate = random.randint(1, 5)
rating = Rating()
rating.book = book
rating.stars = rate
rating.save()
self._requests = self._requests + 1
| [
"bookstore.models.Rating",
"random.choice",
"random.randint",
"bookstore.models.Book.objects.all"
] | [((1495, 1513), 'bookstore.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (1511, 1513), False, 'from bookstore.models import Book, Rating\n'), ((1533, 1553), 'random.choice', 'random.choice', (['books'], {}), '(books)\n', (1546, 1553), False, 'import random\n'), ((1573, 1593), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (1587, 1593), False, 'import random\n'), ((1615, 1623), 'bookstore.models.Rating', 'Rating', ([], {}), '()\n', (1621, 1623), False, 'from bookstore.models import Book, Rating\n')] |
# Copyright (c) 2021, Fruiti Limited
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
def convert_custom_timestamp_range(timestamp_range: str) -> list:
result = timestamp_range.split("_")
result[0] = convert_timestamp_to_iso_datetime(result[0])
result[1] = convert_timestamp_to_iso_datetime(result[1])
return result
def convert_iso_datetime_to_timestamp(iso_datetime: str) -> int:
return int(datetime.fromisoformat(iso_datetime).timestamp())
def convert_timestamp_to_iso_datetime(timestamp: int) -> str:
return str(datetime.fromtimestamp(int(timestamp)).isoformat())
| [
"datetime.datetime.fromisoformat"
] | [((554, 590), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['iso_datetime'], {}), '(iso_datetime)\n', (576, 590), False, 'from datetime import datetime\n')] |
import numpy as np
import cv2
import argparse
from collections import deque
import keyboard as kb
import time
from pynput.keyboard import Key, Controller, Listener
class points(object):
def __init__(self, x, y):
self.x = x
self.y = y
sm_threshold = 100
lg_threshold = 200
guiding = True
keyboard = Controller()
cap = cv2.VideoCapture(0)
pts = deque(maxlen=64)
Lower_green = np.array([110, 50, 50])
Upper_green = np.array([130, 255, 255])
startPoint =endPoint = points(0,0)
recentPoints = deque()
# counter = 0
# prev_x = 0
# prev_y = 0
while True:
if kb.is_pressed('q'):
guiding = False
if kb.is_pressed('w'):
guiding = True
ret, img = cap.read()
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.inRange(hsv, Lower_green, Upper_green)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
mask = cv2.dilate(mask, kernel, iterations=1)
res = cv2.bitwise_and(img, img, mask=mask)
cnts, heir = cv2.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# Added code
recentPoints.append(points(x,y))
if len(recentPoints)>16:
recentPoints.popleft()
if len(recentPoints) == 16:
min_X = min([p.x for p in recentPoints])
max_X = max([p.x for p in recentPoints])
min_Y = min([p.y for p in recentPoints])
max_Y = max([p.y for p in recentPoints])
if max_X-min_X <= sm_threshold or max_Y-min_Y<=sm_threshold:
# EndPoint as average of recentPoints
# endPoint_X = sum([p.x for p in recentPoints])/len(recentPoints)
# endPoint_Y = sum([p.y for p in recentPoints])/ len(recentPoints)
# endPoint = points(endPoint_X, endPoint_Y)
endPoint = points(x,y)
if abs(startPoint.x-endPoint.x)*0.625 > abs(startPoint.y- endPoint.y):
if startPoint.x - endPoint.x > lg_threshold:
print('right')
keyboard.press(Key.right)
keyboard.release(Key.right)
startPoint = endPoint
recentPoints = deque()
elif startPoint.x - endPoint.x < -lg_threshold:
print('left')
keyboard.press(Key.left)
keyboard.release(Key.left)
startPoint = endPoint
recentPoints = deque()
else:
if startPoint.y - endPoint.y > lg_threshold*0.625 :
print('up')
keyboard.press(Key.up)
keyboard.release(Key.up)
startPoint = endPoint
recentPoints = deque()
elif startPoint.y - endPoint.y < -lg_threshold*0.625:
print('down')
keyboard.press(Key.down)
keyboard.release(Key.down)
startPoint = endPoint
recentPoints = deque()
#print(x, y)
# time.sleep(0.1)
# counter += 1
# if counter == 32:
# prev_x = x
# prev_y = y
# if counter > 32:
# if abs(x - prev_x) > abs(y - prev_y):
# if x - prev_x > 100:
# print('left')
# keyboard.press(Key.left)
# keyboard.release(Key.left)
# # time.sleep(0.7)
# counter = 0
# elif x - prev_x < -100:
# print('right')
# keyboard.press(Key.right)
# keyboard.release(Key.right)
# counter = 0
# else:
# if y - prev_y > 100:
# print('down')
# keyboard.press(Key.down)
# keyboard.release(Key.down)
# counter = 0
# # time.sleep(0.7)
# elif y - prev_y < -100:
# print('up')
# keyboard.press(Key.up)
# keyboard.release(Key.up)
# counter = 0
# # time.sleep(0.7)
if radius > 5:
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1]is None or pts[i] is None:
continue
thick = int(np.sqrt(len(pts) / float(i + 1)) * 2.5)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 225), thick)
cv2.imshow("Frame", img)
# cv2.imshow("mask",mask)
# cv2.imshow("res",res)
k = cv2.waitKey(1) & 0xFF
if k == ord("p"):
break
# cleanup the camera and close any open windows
cap.release()
cv2.destroyAllWindows()
| [
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"collections.deque",
"cv2.erode",
"cv2.line",
"cv2.waitKey",
"numpy.ones",
"cv2.minEnclosingCircle",
"keyboard.is_pressed",
"cv2.morphologyEx",
"cv2.circle",
"cv2.cvtColor",
"cv2.moments",
"pynput.keyboard.Controller",
"cv2.inRange"... | [((324, 336), 'pynput.keyboard.Controller', 'Controller', ([], {}), '()\n', (334, 336), False, 'from pynput.keyboard import Key, Controller, Listener\n'), ((344, 363), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (360, 363), False, 'import cv2\n'), ((371, 387), 'collections.deque', 'deque', ([], {'maxlen': '(64)'}), '(maxlen=64)\n', (376, 387), False, 'from collections import deque\n'), ((403, 426), 'numpy.array', 'np.array', (['[110, 50, 50]'], {}), '([110, 50, 50])\n', (411, 426), True, 'import numpy as np\n'), ((441, 466), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (449, 466), True, 'import numpy as np\n'), ((520, 527), 'collections.deque', 'deque', ([], {}), '()\n', (525, 527), False, 'from collections import deque\n'), ((5289, 5312), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5310, 5312), False, 'import cv2\n'), ((588, 606), 'keyboard.is_pressed', 'kb.is_pressed', (['"""q"""'], {}), "('q')\n", (601, 606), True, 'import keyboard as kb\n'), ((639, 657), 'keyboard.is_pressed', 'kb.is_pressed', (['"""w"""'], {}), "('w')\n", (652, 657), True, 'import keyboard as kb\n'), ((718, 754), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (730, 754), False, 'import cv2\n'), ((768, 793), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (775, 793), True, 'import numpy as np\n'), ((805, 847), 'cv2.inRange', 'cv2.inRange', (['hsv', 'Lower_green', 'Upper_green'], {}), '(hsv, Lower_green, Upper_green)\n', (816, 847), False, 'import cv2\n'), ((859, 896), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(2)'}), '(mask, kernel, iterations=2)\n', (868, 896), False, 'import cv2\n'), ((908, 954), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(mask, cv2.MORPH_OPEN, kernel)\n', (924, 954), False, 'import cv2\n'), ((1023, 1061), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (1033, 1061), False, 'import cv2\n'), ((1072, 1108), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (1087, 1108), False, 'import cv2\n'), ((5077, 5101), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'img'], {}), "('Frame', img)\n", (5087, 5101), False, 'import cv2\n'), ((1325, 1350), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (1347, 1350), False, 'import cv2\n'), ((1363, 1377), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (1374, 1377), False, 'import cv2\n'), ((5018, 5071), 'cv2.line', 'cv2.line', (['img', 'pts[i - 1]', 'pts[i]', '(0, 0, 225)', 'thick'], {}), '(img, pts[i - 1], pts[i], (0, 0, 225), thick)\n', (5026, 5071), False, 'import cv2\n'), ((5169, 5183), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5180, 5183), False, 'import cv2\n'), ((4776, 4819), 'cv2.circle', 'cv2.circle', (['img', 'center', '(5)', '(0, 0, 255)', '(-1)'], {}), '(img, center, 5, (0, 0, 255), -1)\n', (4786, 4819), False, 'import cv2\n'), ((2574, 2581), 'collections.deque', 'deque', ([], {}), '()\n', (2579, 2581), False, 'from collections import deque\n'), ((3145, 3152), 'collections.deque', 'deque', ([], {}), '()\n', (3150, 3152), False, 'from collections import deque\n'), ((2851, 2858), 'collections.deque', 'deque', ([], {}), '()\n', (2856, 2858), False, 'from collections import deque\n'), ((3461, 3468), 'collections.deque', 'deque', ([], {}), '()\n', (3466, 3468), False, 'from collections import deque\n')] |
import re
data = open('regex_sum_46353.txt')
numlist = list()
for line in data:
line = line.rstrip()
integers = re.findall('[0-9]+', line)
if len(integers) < 1: continue
for i in range(len(integers)):
num = float(integers[i])
numlist.append(num)
num_sum = sum(numlist)
print (num_sum)
| [
"re.findall"
] | [((120, 146), 're.findall', 're.findall', (['"""[0-9]+"""', 'line'], {}), "('[0-9]+', line)\n", (130, 146), False, 'import re\n')] |
import json
import time
import datetime
import string
import calendar
from helpers import get_cpu_temp, check_login, password_hash
import web
import gv # Gain access to ospi's settings
from urls import urls # Gain access to ospi's URL list
from webpages import ProtectedPage, WebPage
##############
## New URLs ##
urls.extend([
'/jo', 'plugins.mobile_app.options',
'/jc', 'plugins.mobile_app.cur_settings',
'/js', 'plugins.mobile_app.station_state',
'/jp', 'plugins.mobile_app.program_info',
'/jn', 'plugins.mobile_app.station_info',
'/jl', 'plugins.mobile_app.get_logs',
'/sp', 'plugins.mobile_app.set_password'])
#######################
## Class definitions ##
class options(WebPage): # /jo
"""Returns device options as json."""
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
if check_login():
jopts = {
"fwv": gv.ver_str+'-OSPi',
"tz": gv.sd['tz'],
"ext": gv.sd['nbrd'] - 1,
"seq": gv.sd['seq'],
"sdt": gv.sd['sdt'],
"mas": gv.sd['mas'],
"mton": gv.sd['mton'],
"mtof": gv.sd['mtoff'],
"urs": gv.sd['urs'],
"rso": gv.sd['rst'],
"wl": gv.sd['wl'],
"ipas": gv.sd['ipas'],
"reset": gv.sd['rbt'],
"lg": gv.sd['lg']
}
else:
jopts = {
"fwv": gv.ver_str+'-OSPi',
}
return json.dumps(jopts)
class cur_settings(ProtectedPage): # /jc
"""Returns current settings as json."""
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
jsettings = {
"devt": gv.now,
"nbrd": gv.sd['nbrd'],
"en": gv.sd['en'],
"rd": gv.sd['rd'],
"rs": gv.sd['rs'],
"mm": gv.sd['mm'],
"rdst": gv.sd['rdst'],
"loc": gv.sd['loc'],
"sbits": gv.sbits,
"ps": gv.ps,
"lrun": gv.lrun,
"ct": get_cpu_temp(gv.sd['tu']),
"tu": gv.sd['tu']
}
return json.dumps(jsettings)
class station_state(ProtectedPage): # /js
"""Returns station status and total number of stations as json."""
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
jstate = {
"sn": gv.srvals,
"nstations": gv.sd['nst']
}
return json.dumps(jstate)
class program_info(ProtectedPage): # /jp
"""Returns program data as json."""
def GET(self):
lpd = [] # Local program data
dse = int((time.time()+((gv.sd['tz']/4)-12)*3600)/86400) # days since epoch
for p in gv.pd:
op = p[:] # Make local copy of each program
if op[1] >= 128 and op[2] > 1:
rel_rem = (((op[1]-128) + op[2])-(dse % op[2])) % op[2]
op[1] = rel_rem + 128
lpd.append(op)
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
jpinfo = {
"nprogs": gv.sd['nprogs']-1,
"nboards": gv.sd['nbrd'],
"mnp": 9999,
'pd': lpd
}
return json.dumps(jpinfo)
class station_info(ProtectedPage): # /jn
"""Returns station information as json."""
def GET(self):
disable = []
for byte in gv.sd['show']:
disable.append(~byte&255)
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
jpinfo = {
"snames": gv.snames,
"ignore_rain": gv.sd['ir'],
"masop": gv.sd['mo'],
"stn_dis": disable,
"maxlen": gv.sd['snlen']
}
return json.dumps(jpinfo)
class get_logs(ProtectedPage): # /jl
"""Returns log information for specified date range."""
def GET(self):
records = self.read_log()
data = []
qdict = web.input()
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
if 'start' not in qdict or 'end' not in qdict:
return []
for r in records:
event = json.loads(r)
date = time.mktime(datetime.datetime.strptime(event["date"], "%Y-%m-%d").timetuple())
if int(qdict["start"]) <= int(date) <= int(qdict["end"]):
pid = event["program"]
if pid == "Run-once":
pid = 98
if pid == "Manual":
pid = 99
pid = int(pid)
station = int(event["station"])
duration = string.split(event["duration"], ":")
duration = (int(duration[0]) * 60) + int(duration[1])
timestamp = int(time.mktime(utc_to_local(datetime.datetime.strptime(event["date"] + " " + event["start"], "%Y-%m-%d %H:%M:%S")).timetuple()))
data.append([pid, station, duration, timestamp])
return json.dumps(data)
def read_log(self):
try:
with open('./data/log.json') as logf:
records = logf.readlines()
return records
except IOError:
return []
class set_password():
"""Save changes to device password"""
def GET(self):
qdict = web.input()
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
web.header('Cache-Control', 'no-cache')
if not(qdict.has_key('pw')) or not(qdict.has_key('npw')) or not(qdict.has_key('cpw')):
return json.dumps({"result":3})
if password_hash(qdict['pw'], gv.sd['salt']) == gv.sd['password']:
if qdict['npw'] == "":
return json.dumps({"result":3})
elif qdict['cpw'] !='' and qdict['cpw'] == qdict['npw']:
gv.sd['password'] = password_hash(qdict['npw'], gv.sd['salt'])
else:
return json.dumps({"result":4})
else:
return json.dumps({"result":2})
return json.dumps({"result":1})
def utc_to_local(utc_dt):
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= datetime.timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
| [
"urls.urls.extend",
"helpers.get_cpu_temp",
"datetime.datetime.fromtimestamp",
"json.loads",
"helpers.password_hash",
"datetime.datetime.strptime",
"string.split",
"json.dumps",
"helpers.check_login",
"web.input",
"datetime.timedelta",
"time.time",
"web.header"
] | [((334, 653), 'urls.urls.extend', 'urls.extend', (["['/jo', 'plugins.mobile_app.options', '/jc',\n 'plugins.mobile_app.cur_settings', '/js',\n 'plugins.mobile_app.station_state', '/jp',\n 'plugins.mobile_app.program_info', '/jn',\n 'plugins.mobile_app.station_info', '/jl', 'plugins.mobile_app.get_logs',\n '/sp', 'plugins.mobile_app.set_password']"], {}), "(['/jo', 'plugins.mobile_app.options', '/jc',\n 'plugins.mobile_app.cur_settings', '/js',\n 'plugins.mobile_app.station_state', '/jp',\n 'plugins.mobile_app.program_info', '/jn',\n 'plugins.mobile_app.station_info', '/jl', 'plugins.mobile_app.get_logs',\n '/sp', 'plugins.mobile_app.set_password'])\n", (345, 653), False, 'from urls import urls\n'), ((7032, 7074), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (7063, 7074), False, 'import datetime\n'), ((830, 876), 'web.header', 'web.header', (['"""Access-Control-Allow-Origin"""', '"""*"""'], {}), "('Access-Control-Allow-Origin', '*')\n", (840, 876), False, 'import web\n'), ((886, 932), 'web.header', 'web.header', (['"""Content-Type"""', '"""application/json"""'], {}), "('Content-Type', 'application/json')\n", (896, 932), False, 'import web\n'), ((942, 981), 'web.header', 'web.header', (['"""Cache-Control"""', '"""no-cache"""'], {}), "('Cache-Control', 'no-cache')\n", (952, 981), False, 'import web\n'), ((994, 1007), 'helpers.check_login', 'check_login', ([], {}), '()\n', (1005, 1007), False, 'from helpers import get_cpu_temp, check_login, password_hash\n'), ((1707, 1724), 'json.dumps', 'json.dumps', (['jopts'], {}), '(jopts)\n', (1717, 1724), False, 'import json\n'), ((1844, 1890), 'web.header', 'web.header', (['"""Access-Control-Allow-Origin"""', '"""*"""'], {}), "('Access-Control-Allow-Origin', '*')\n", (1854, 1890), False, 'import web\n'), ((1900, 1946), 'web.header', 'web.header', (['"""Content-Type"""', '"""application/json"""'], {}), "('Content-Type', 'application/json')\n", (1910, 1946), False, 'import web\n'), ((1956, 1995), 'web.header', 'web.header', (['"""Cache-Control"""', '"""no-cache"""'], {}), "('Cache-Control', 'no-cache')\n", (1966, 1995), False, 'import web\n'), ((2476, 2497), 'json.dumps', 'json.dumps', (['jsettings'], {}), '(jsettings)\n', (2486, 2497), False, 'import json\n'), ((2647, 2693), 'web.header', 'web.header', (['"""Access-Control-Allow-Origin"""', '"""*"""'], {}), "('Access-Control-Allow-Origin', '*')\n", (2657, 2693), False, 'import web\n'), ((2703, 2749), 'web.header', 'web.header', (['"""Content-Type"""', '"""application/json"""'], {}), "('Content-Type', 'application/json')\n", (2713, 2749), False, 'import web\n'), ((2759, 2798), 'web.header', 'web.header', (['"""Cache-Control"""', '"""no-cache"""'], {}), "('Cache-Control', 'no-cache')\n", (2769, 2798), False, 'import web\n'), ((2917, 2935), 'json.dumps', 'json.dumps', (['jstate'], {}), '(jstate)\n', (2927, 2935), False, 'import json\n'), ((3446, 3492), 'web.header', 'web.header', (['"""Access-Control-Allow-Origin"""', '"""*"""'], {}), "('Access-Control-Allow-Origin', '*')\n", (3456, 3492), False, 'import web\n'), ((3502, 3548), 'web.header', 'web.header', (['"""Content-Type"""', '"""application/json"""'], {}), "('Content-Type', 'application/json')\n", (3512, 3548), False, 'import web\n'), ((3558, 3597), 'web.header', 'web.header', (['"""Cache-Control"""', '"""no-cache"""'], {}), "('Cache-Control', 'no-cache')\n", (3568, 3597), False, 'import web\n'), ((3777, 3795), 'json.dumps', 'json.dumps', (['jpinfo'], {}), '(jpinfo)\n', (3787, 3795), False, 'import json\n'), ((4021, 4067), 'web.header', 'web.header', (['"""Access-Control-Allow-Origin"""', '"""*"""'], {}), "('Access-Control-Allow-Origin', '*')\n", (4031, 4067), False, 'import web\n'), ((4077, 4123), 'web.header', 'web.header', (['"""Content-Type"""', '"""application/json"""'], {}), "('Content-Type', 'application/json')\n", (4087, 4123), False, 'import web\n'), ((4133, 4172), 'web.header', 'web.header', (['"""Cache-Control"""', '"""no-cache"""'], {}), "('Cache-Control', 'no-cache')\n", (4143, 4172), False, 'import web\n'), ((4403, 4421), 'json.dumps', 'json.dumps', (['jpinfo'], {}), '(jpinfo)\n', (4413, 4421), False, 'import json\n'), ((4617, 4628), 'web.input', 'web.input', ([], {}), '()\n', (4626, 4628), False, 'import web\n'), ((4640, 4686), 'web.header', 'web.header', (['"""Access-Control-Allow-Origin"""', '"""*"""'], {}), "('Access-Control-Allow-Origin', '*')\n", (4650, 4686), False, 'import web\n'), ((4696, 4742), 'web.header', 'web.header', (['"""Content-Type"""', '"""application/json"""'], {}), "('Content-Type', 'application/json')\n", (4706, 4742), False, 'import web\n'), ((4752, 4791), 'web.header', 'web.header', (['"""Cache-Control"""', '"""no-cache"""'], {}), "('Cache-Control', 'no-cache')\n", (4762, 4791), False, 'import web\n'), ((5747, 5763), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (5757, 5763), False, 'import json\n'), ((6081, 6092), 'web.input', 'web.input', ([], {}), '()\n', (6090, 6092), False, 'import web\n'), ((6102, 6148), 'web.header', 'web.header', (['"""Access-Control-Allow-Origin"""', '"""*"""'], {}), "('Access-Control-Allow-Origin', '*')\n", (6112, 6148), False, 'import web\n'), ((6158, 6204), 'web.header', 'web.header', (['"""Content-Type"""', '"""application/json"""'], {}), "('Content-Type', 'application/json')\n", (6168, 6204), False, 'import web\n'), ((6214, 6253), 'web.header', 'web.header', (['"""Cache-Control"""', '"""no-cache"""'], {}), "('Cache-Control', 'no-cache')\n", (6224, 6253), False, 'import web\n'), ((6856, 6881), 'json.dumps', 'json.dumps', (["{'result': 1}"], {}), "({'result': 1})\n", (6866, 6881), False, 'import json\n'), ((7108, 7142), 'datetime.timedelta', 'datetime.timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (7126, 7142), False, 'import datetime\n'), ((2389, 2414), 'helpers.get_cpu_temp', 'get_cpu_temp', (["gv.sd['tu']"], {}), "(gv.sd['tu'])\n", (2401, 2414), False, 'from helpers import get_cpu_temp, check_login, password_hash\n'), ((4923, 4936), 'json.loads', 'json.loads', (['r'], {}), '(r)\n', (4933, 4936), False, 'import json\n'), ((6372, 6397), 'json.dumps', 'json.dumps', (["{'result': 3}"], {}), "({'result': 3})\n", (6382, 6397), False, 'import json\n'), ((6411, 6452), 'helpers.password_hash', 'password_hash', (["qdict['pw']", "gv.sd['salt']"], {}), "(qdict['pw'], gv.sd['salt'])\n", (6424, 6452), False, 'from helpers import get_cpu_temp, check_login, password_hash\n'), ((6813, 6838), 'json.dumps', 'json.dumps', (["{'result': 2}"], {}), "({'result': 2})\n", (6823, 6838), False, 'import json\n'), ((5394, 5430), 'string.split', 'string.split', (["event['duration']", '""":"""'], {}), "(event['duration'], ':')\n", (5406, 5430), False, 'import string\n'), ((6535, 6560), 'json.dumps', 'json.dumps', (["{'result': 3}"], {}), "({'result': 3})\n", (6545, 6560), False, 'import json\n'), ((3104, 3115), 'time.time', 'time.time', ([], {}), '()\n', (3113, 3115), False, 'import time\n'), ((6667, 6709), 'helpers.password_hash', 'password_hash', (["qdict['npw']", "gv.sd['salt']"], {}), "(qdict['npw'], gv.sd['salt'])\n", (6680, 6709), False, 'from helpers import get_cpu_temp, check_login, password_hash\n'), ((6753, 6778), 'json.dumps', 'json.dumps', (["{'result': 4}"], {}), "({'result': 4})\n", (6763, 6778), False, 'import json\n'), ((4969, 5022), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["event['date']", '"""%Y-%m-%d"""'], {}), "(event['date'], '%Y-%m-%d')\n", (4995, 5022), False, 'import datetime\n'), ((5560, 5649), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["(event['date'] + ' ' + event['start'])", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(event['date'] + ' ' + event['start'],\n '%Y-%m-%d %H:%M:%S')\n", (5586, 5649), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
# import json
from tibanna_ffcommon.exceptions import exception_coordinator
from tibanna_cgap.start_run import start_run
from tibanna_cgap.vars import AWS_REGION, LAMBDA_TYPE
config = {
'function_name': 'start_run_' + LAMBDA_TYPE,
'function_module': 'service',
'function_handler': 'handler',
'handler': 'service.handler',
'region': AWS_REGION,
'runtime': 'python3.6',
'role': 'lambda_full_s3',
'description': 'Tibanna zebra start_run',
'timeout': 300,
'memory_size': 256
}
def metadata_only(event):
# this relies on the fact that event contains and output key with output files
assert event['metadata_only']
assert event['output_files']
return real_handler(event, None)
@exception_coordinator('start_run', metadata_only)
def handler(event, context):
if event.get('push_error_to_end', True):
event['push_error_to_end'] = True # push error to end by default for pony
return real_handler(event, context)
def real_handler(event, context):
return start_run(event)
| [
"tibanna_ffcommon.exceptions.exception_coordinator",
"tibanna_cgap.start_run.start_run"
] | [((757, 806), 'tibanna_ffcommon.exceptions.exception_coordinator', 'exception_coordinator', (['"""start_run"""', 'metadata_only'], {}), "('start_run', metadata_only)\n", (778, 806), False, 'from tibanna_ffcommon.exceptions import exception_coordinator\n'), ((1051, 1067), 'tibanna_cgap.start_run.start_run', 'start_run', (['event'], {}), '(event)\n', (1060, 1067), False, 'from tibanna_cgap.start_run import start_run\n')] |
# from https://github.com/django-extensions/django-extensions/blob/master/run_tests.py
from django.conf import settings
from django.core.management import call_command
def main():
# Dynamically configure the Django settings with the minimum necessary to
# get Django running tests
settings.configure(
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sessions',
'ajaxuploader',
),
# Django replaces this, but it still wants it. *shrugs*
DATABASE_ENGINE = 'django.db.backends.sqlite3',
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
MEDIA_ROOT = '/tmp/ajaxuploader_test_media/',
MEDIA_PATH = '/media/',
ROOT_URLCONF = 'ajaxuploader.urls',
DEBUG = True,
TEMPLATE_DEBUG = True
)
# Fire off the tests
call_command('test', 'ajaxuploader')
if __name__ == '__main__':
main()
| [
"django.core.management.call_command",
"django.conf.settings.configure"
] | [((295, 722), 'django.conf.settings.configure', 'settings.configure', ([], {'INSTALLED_APPS': "('django.contrib.auth', 'django.contrib.contenttypes',\n 'django.contrib.admin', 'django.contrib.sessions', 'ajaxuploader')", 'DATABASE_ENGINE': '"""django.db.backends.sqlite3"""', 'DATABASES': "{'default': {'ENGINE': 'django.db.backends.sqlite3'}}", 'MEDIA_ROOT': '"""/tmp/ajaxuploader_test_media/"""', 'MEDIA_PATH': '"""/media/"""', 'ROOT_URLCONF': '"""ajaxuploader.urls"""', 'DEBUG': '(True)', 'TEMPLATE_DEBUG': '(True)'}), "(INSTALLED_APPS=('django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.admin',\n 'django.contrib.sessions', 'ajaxuploader'), DATABASE_ENGINE=\n 'django.db.backends.sqlite3', DATABASES={'default': {'ENGINE':\n 'django.db.backends.sqlite3'}}, MEDIA_ROOT=\n '/tmp/ajaxuploader_test_media/', MEDIA_PATH='/media/', ROOT_URLCONF=\n 'ajaxuploader.urls', DEBUG=True, TEMPLATE_DEBUG=True)\n", (313, 722), False, 'from django.conf import settings\n'), ((1002, 1038), 'django.core.management.call_command', 'call_command', (['"""test"""', '"""ajaxuploader"""'], {}), "('test', 'ajaxuploader')\n", (1014, 1038), False, 'from django.core.management import call_command\n')] |
#!/usr/bin/env python3
"""Simulation of Shor's algorithm for integer factorization."""
import cmath
import math
import numpy as np
import random
class QuMem:
"""Representation of the memory of the quantum computer."""
def __init__(self, t, n):
"""Initialize the memory. For Shor's algorithm we have t + n qubits,
where t is such that N^2 <= 2^t < 2N^2 holds.
The memory is represented by explicitly saving all the 2^(t+n) possible
states and their corresponding amplitudes.
"""
# The amplitudes and the states are represented by three lists where,
# for each i, 0 <= i < 2^(t+n), amplitudes[i] is the amplitude of the
# state |fst[i], lst[i]>.
self.amplitudes = []
self.fst = [] # Quantum state of the first t qubits.
self.lst = [] # Quantum state of the last n qubits.
self.t = t # fst width
self.n = n # lst width
# Populate the quantum state lists.
for fst in range(2**t):
for lst in range(2**n):
self.amplitudes.append(0)
self.fst.append(fst)
self.lst.append(lst)
# Initialize the memory to the state |0, 0>.
self.amplitudes[0] = 1
def measure(self):
"""Measure the first t bits of the memory. Simulated by making a
weighted random choice of one of the possible states. The weights are
the squares of the absolute values of their amplitudes."""
return np.random.choice(
self.fst, p=list(map(lambda x: abs(x)**2, self.amplitudes)))
def __len__(self):
"""Equal to 2^(t+n). This is here for convenience."""
return len(self.amplitudes)
def __iter__(self):
"""Iterator of the quantum state. This is here for convenience."""
for x in zip(self.amplitudes, self.fst, self.lst):
yield x
def __repr__(self):
"""Represented as a linear combination, a0 |0, 0> + a1 |0, 1> + ... ,
of all the possible states."""
s = ""
for ampl, fst, lst in self:
s += "{:.4f} |{},{}> + ".format(ampl, fst, lst)
return s[:-3]
def hadamard(mem):
"""Apply the Hadamard gate to the first t qubits. After this
application, the memory is in a quantum superposition where the
measuring probability is equidistributed between the first t qubits."""
for i, (_, fst, lst) in enumerate(mem):
if lst == 0: # The last n qubits remain in state |0>
mem.amplitudes[i] = 1 / math.sqrt(2**mem.t)
return mem
def mod_exp(mem, x, N):
"""Apply the operator |j, k> |-> |j, k + x^j mod N>. However, in Shor's
algorithm k = 0, so we just apply the modular exponentiation."""
for i, (_, fst, lst) in enumerate(mem):
mem.lst[i] = pow(x, fst, N)
return mem
def qft(mem):
"""Apply quantum Fourier transform to the first t qubits."""
new_amplitudes = []
N = 2**mem.t
# Calculate root of unity in two steps, as complex exponentiation is
# expensive.
w__ = cmath.exp(2 * math.pi * 1j / N)
for k, _ in enumerate(mem):
s = 0
for j in range(N):
wjk = w__**(j * k)
s += wjk * mem.amplitudes[j]
new_amplitudes.append(s / math.sqrt(N))
mem.amplitudes = new_amplitudes
return mem
def denominator(x, qmax):
"""Finds the denominator q of the best rational approximation p/q for x
with q < qmax."""
y = x
q0, q1, q2 = 0, 1, 0
while True:
z = y - math.floor(y) # decimal part of y
if z < 0.5 / qmax**2:
return q1
y = 1 / z
q2 = math.floor(y) * q1 + q0
if q2 >= qmax:
return q1
q0, q1 = q1, q2
def shor(N, a):
"""Simulation of Shor's algorithm for order finding."""
assert 1 < a < N
while True:
n = N.bit_length()
t = math.ceil(math.log(N**2, 2)) # s.t. N^2 <= 2^t < 2N^2
mem = QuMem(t, n)
hadamard(mem)
mod_exp(mem, a, N)
qft(mem)
measure = mem.measure()
if measure == 0:
print("| measured zero, trying again ...")
else:
c = measure / 2**t
q = denominator(c, N)
p = math.floor(q * c + 0.5)
print("| measured {}, approximation for {} is {}/{}"
.format(measure, c, p, q))
mod = pow(a, q, N)
print("| {}^{} mod {} = {}".format(a, q, N, mod))
if mod == 1:
print("| got {}".format(q))
return q
else:
print("| failed, trying again ...")
def prime(n):
"""Primality test by trial division."""
if n == 2:
return True
elif n < 2 or n % 2 == 0:
return False
else:
return not any(n % x == 0
for x in range(3, math.ceil(math.sqrt(n)) + 1, 2))
def odd_prime_power(n):
"""Test if n is a power of an odd prime."""
if n < 3:
return False
factor = 0
for i in range(3, math.ceil(math.sqrt(n)) + 1, 2):
if n % i == 0:
factor = i
break
if factor == 0:
return False
for i in range(2, math.ceil(math.log(n, factor)) + 1):
if factor**i == n:
return True
return False
def factorize(N):
"""Applies Shor's algorithm to the problem of integer factorization."""
assert N > 1
if N % 2 == 0:
print(N, "is even")
elif prime(N):
print(N, "is prime")
elif odd_prime_power(N):
print(N, "is a power of an odd prime")
else:
while True:
a = random.randint(2, N - 1)
d = math.gcd(a, N)
print("| picked random a =", a)
if d != 1:
print("| got lucky, {} = {} * {}, trying again...".format(
N, d, N // d))
print("|---------------------------------------------")
else:
r = shor(N, a)
if r is None:
print("| trying again ...")
print("|-----------------------------------------------")
continue
y = r // 2
if r % 2 == 1:
print("| order {} is odd, trying again ...".format(r))
print("|-----------------------------------------------")
elif not 1 < y < N - 1:
print("| 1 < {} < {} - 1 is false, trying again".format(
y, N))
print("|-----------------------------------------------")
else:
factor = max(math.gcd(y - 1, N), math.gcd(y + 1, N))
if factor == 1:
print("| factor is one, trying again ...")
print("|---------------------------------------------")
else:
print("| found factor: {} = {} * {}".format(
N, factor, N // factor))
return factor
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print("USAGE: shor.py <input>")
else:
print(factorize(int(sys.argv[1])))
| [
"math.floor",
"math.gcd",
"math.sqrt",
"math.log",
"cmath.exp",
"random.randint"
] | [((3073, 3106), 'cmath.exp', 'cmath.exp', (['(2 * math.pi * 1.0j / N)'], {}), '(2 * math.pi * 1.0j / N)\n', (3082, 3106), False, 'import cmath\n'), ((3544, 3557), 'math.floor', 'math.floor', (['y'], {}), '(y)\n', (3554, 3557), False, 'import math\n'), ((3923, 3942), 'math.log', 'math.log', (['(N ** 2)', '(2)'], {}), '(N ** 2, 2)\n', (3931, 3942), False, 'import math\n'), ((4269, 4292), 'math.floor', 'math.floor', (['(q * c + 0.5)'], {}), '(q * c + 0.5)\n', (4279, 4292), False, 'import math\n'), ((2549, 2570), 'math.sqrt', 'math.sqrt', (['(2 ** mem.t)'], {}), '(2 ** mem.t)\n', (2558, 2570), False, 'import math\n'), ((3285, 3297), 'math.sqrt', 'math.sqrt', (['N'], {}), '(N)\n', (3294, 3297), False, 'import math\n'), ((3663, 3676), 'math.floor', 'math.floor', (['y'], {}), '(y)\n', (3673, 3676), False, 'import math\n'), ((5082, 5094), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (5091, 5094), False, 'import math\n'), ((5242, 5261), 'math.log', 'math.log', (['n', 'factor'], {}), '(n, factor)\n', (5250, 5261), False, 'import math\n'), ((5668, 5692), 'random.randint', 'random.randint', (['(2)', '(N - 1)'], {}), '(2, N - 1)\n', (5682, 5692), False, 'import random\n'), ((5709, 5723), 'math.gcd', 'math.gcd', (['a', 'N'], {}), '(a, N)\n', (5717, 5723), False, 'import math\n'), ((6702, 6720), 'math.gcd', 'math.gcd', (['(y - 1)', 'N'], {}), '(y - 1, N)\n', (6710, 6720), False, 'import math\n'), ((6722, 6740), 'math.gcd', 'math.gcd', (['(y + 1)', 'N'], {}), '(y + 1, N)\n', (6730, 6740), False, 'import math\n'), ((4903, 4915), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (4912, 4915), False, 'import math\n')] |
import os
import cv2
from WordSegmentation import wordSegmentation, prepareImg
import json
import editdistance
from path import Path
from DataLoaderIAM import DataLoaderIAM, Batch
from Model import Model, DecoderType
from SamplePreprocessor import preprocess
import argparse
import tensorflow as tf
class FilePaths:
"filenames and paths to data"
fnCharList = 'D:/SimpleHTR/model/charList.txt'
fnSummary = 'D:/SimpleHTR/model/summary.json'
fnInfer = 'D:/SimpleHTR/data/test.png'
fnCorpus = 'D:/SimpleHTR/data/corpus.txt'
def infer(model, fnImg):
"recognize text in image provided by file path"
img = preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, probability) = model.inferBatch(batch, True)
print(f'Recognized: "{recognized[0]}"')
print(f'Probability: {probability[0]}')
apex=open("D:/SimpleHTR/data/output.txt","a")
apex.write(recognized[0]+" ")
apex.close()
def main():
"""reads images from data/ and outputs the word-segmentation to out/"""
# read input images from 'in' directory
imgFiles = os.listdir('D:/SimpleHTR/input/')
for (i,f) in enumerate(imgFiles):
print('Segmenting words of sample %s'%f)
# read image, prepare it by resizing it to fixed height and converting it to grayscale
img = prepareImg(cv2.imread('D:/SimpleHTR/input/%s'%f), 50)
# execute segmentation with given parameters
# -kernelSize: size of filter kernel (odd integer)
# -sigma: standard deviation of Gaussian function used for filter kernel
# -theta: approximated width/height ratio of words, filter function is distorted by this factor
# - minArea: ignore word candidates smaller than specified area
res = wordSegmentation(img, kernelSize=25, sigma=11, theta=7, minArea=100)
# write output to 'out/inputFileName' directory
'''if not os.path.exists('D:/SimpleHTR/out/%s'%f):
os.mkdir('D:/SimpleHTR/out/%s'%f)'''
# iterate over all segmented words
print('Segmented into %d words'%len(res))
for (j, w) in enumerate(res):
(wordBox, wordImg) = w
(x, y, w, h) = wordBox
cv2.imwrite('D:/SimpleHTR/data/test.png', wordImg) # save word
cv2.rectangle(img,(x,y),(x+w,y+h),0,1) # draw bounding box in summary image
os.path.join(os.path.dirname('D:/SimpleHTR/src/main.py'))
tf.compat.v1.reset_default_graph()
exec(open('main.py').read())
# output summary image with bounding boxes around words
cv2.imwrite('D:/SimpleHTR/data/summary.png', img)
apex = open("D:/SimpleHTR/data/output.txt","a")
apex.write("\n")
apex.close()
if __name__ == '__main__':
main() | [
"cv2.rectangle",
"cv2.imwrite",
"os.listdir",
"DataLoaderIAM.Batch",
"os.path.dirname",
"WordSegmentation.wordSegmentation",
"tensorflow.compat.v1.reset_default_graph",
"cv2.imread"
] | [((708, 726), 'DataLoaderIAM.Batch', 'Batch', (['None', '[img]'], {}), '(None, [img])\n', (713, 726), False, 'from DataLoaderIAM import DataLoaderIAM, Batch\n'), ((1128, 1161), 'os.listdir', 'os.listdir', (['"""D:/SimpleHTR/input/"""'], {}), "('D:/SimpleHTR/input/')\n", (1138, 1161), False, 'import os\n'), ((640, 679), 'cv2.imread', 'cv2.imread', (['fnImg', 'cv2.IMREAD_GRAYSCALE'], {}), '(fnImg, cv2.IMREAD_GRAYSCALE)\n', (650, 679), False, 'import cv2\n'), ((1813, 1881), 'WordSegmentation.wordSegmentation', 'wordSegmentation', (['img'], {'kernelSize': '(25)', 'sigma': '(11)', 'theta': '(7)', 'minArea': '(100)'}), '(img, kernelSize=25, sigma=11, theta=7, minArea=100)\n', (1829, 1881), False, 'from WordSegmentation import wordSegmentation, prepareImg\n'), ((2667, 2716), 'cv2.imwrite', 'cv2.imwrite', (['"""D:/SimpleHTR/data/summary.png"""', 'img'], {}), "('D:/SimpleHTR/data/summary.png', img)\n", (2678, 2716), False, 'import cv2\n'), ((1378, 1417), 'cv2.imread', 'cv2.imread', (["('D:/SimpleHTR/input/%s' % f)"], {}), "('D:/SimpleHTR/input/%s' % f)\n", (1388, 1417), False, 'import cv2\n'), ((2277, 2327), 'cv2.imwrite', 'cv2.imwrite', (['"""D:/SimpleHTR/data/test.png"""', 'wordImg'], {}), "('D:/SimpleHTR/data/test.png', wordImg)\n", (2288, 2327), False, 'import cv2\n'), ((2352, 2400), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0)', '(1)'], {}), '(img, (x, y), (x + w, y + h), 0, 1)\n', (2365, 2400), False, 'import cv2\n'), ((2510, 2544), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (2542, 2544), True, 'import tensorflow as tf\n'), ((2453, 2496), 'os.path.dirname', 'os.path.dirname', (['"""D:/SimpleHTR/src/main.py"""'], {}), "('D:/SimpleHTR/src/main.py')\n", (2468, 2496), False, 'import os\n')] |
#!/usr/bin/env python
"""
Module for holding information about an audio file and doing basic conversions
"""
import hashlib
import logging
import os
import subprocess
from asrtoolkit.file_utils.name_cleaners import (
generate_segmented_file_name,
sanitize_hyphens,
strip_extension,
)
from asrtoolkit.file_utils.script_input_validation import valid_input_file
LOGGER = logging.getLogger()
def cut_utterance(
source_audio_file, target_audio_file, start_time, end_time, sample_rate=16000
):
"""
source_audio_file: str, path to file
target_audio_file: str, path to file
start_time: float or str
end_time: float or str
sample_rate: int, default 16000; audio sample rate in Hz
uses sox to segment source_audio_file to create target_audio_file that
contains audio from start_time to end_time
with audio sample rate set to sample_rate
"""
subprocess.call(
"sox -V1 {} -r {} -b 16 -c 1 {} trim {} ={}".format(
source_audio_file,
sample_rate,
target_audio_file,
start_time,
end_time,
),
shell=True,
)
def degrade_audio(source_audio_file, target_audio_file=None):
"""
Degrades audio to typical G711 level.
Useful if models need to target this audio quality.
"""
valid_input_file(source_audio_file, ["mp3", "sph", "wav", "au", "raw"])
target_audio_file = (
source_audio_file if target_audio_file is None else target_audio_file
)
# degrade to 8k
tmp1 = ".".join(source_audio_file.split(".")[:-1]) + "_tmp1.wav"
subprocess.call(
"sox -V1 {} -r 8000 -e a-law {}".format(source_audio_file, tmp1),
shell=True,
)
# convert to u-law
tmp2 = ".".join(source_audio_file.split(".")[:-1]) + "_tmp2.wav"
subprocess.call(
"sox -V1 {} --rate 8000 -e u-law {}".format(tmp1, tmp2),
shell=True,
)
# upgrade to 16k a-law signed
subprocess.call(
"sox -V1 {} --rate 16000 -e signed -b 16 --channel 1 {}".format(
tmp2, target_audio_file
),
shell=True,
)
os.remove(tmp1)
os.remove(tmp2)
def combine_audio(audio_files, output_file, gain=False):
"""
Combine audio files with possible renormalization to 0dB
"""
gain_str = ""
if gain:
gain_str = "gain -n 0"
subprocess.call(
"sox -V1 -m {} {} {}".format(" ".join(audio_files), output_file, gain_str),
shell=True,
)
class audio_file(object):
"""
Create a audio_file object for
- storing location
- retrieving a unique hash
- resampling for training
- splitting into segments given an STM file
"""
def __init__(self, location=""):
"""
Populate file location info
"""
self.location = None
if not os.path.exists(location):
raise FileNotFoundError('Could not find file at "{}"'.format(location))
self.location = location
def hash(self):
"""
Returns a sha1 hash of the file
"""
if self.location:
with open(self.location, "rb") as f:
return hashlib.sha1(f.read()).hexdigest()
else:
return hashlib.sha1("".encode()).hexdigest()
def prepare_for_training(self, file_name, sample_rate=16000):
"""
Converts to single channel (from channel 1) audio file
in SPH file format
Returns audio_file object on success, else None
"""
if file_name.split(".")[-1] != "sph":
LOGGER.warning(
"Forcing training data to use SPH file format for %s", file_name
)
file_name = strip_extension(file_name) + ".sph"
file_name = sanitize_hyphens(file_name)
# return None if error code given, otherwise return audio_file object
output_file = (
audio_file(file_name)
if not subprocess.call(
"sox -V1 {} {} rate {} remix -".format(
self.location, file_name, sample_rate
),
shell=True,
)
else None
)
return output_file
def split(self, transcript, target_dir):
"""
Split audio file and transcript into many pieces based on
valid segments of transcript
"""
os.makedirs(target_dir, exist_ok=True)
for iseg, seg in enumerate(transcript.segments):
cut_utterance(
self.location,
generate_segmented_file_name(target_dir, self.location, iseg),
seg.start,
seg.stop,
)
transcript.split(target_dir)
return
| [
"logging.getLogger",
"os.path.exists",
"asrtoolkit.file_utils.name_cleaners.generate_segmented_file_name",
"asrtoolkit.file_utils.script_input_validation.valid_input_file",
"os.makedirs",
"asrtoolkit.file_utils.name_cleaners.strip_extension",
"asrtoolkit.file_utils.name_cleaners.sanitize_hyphens",
"os... | [((383, 402), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (400, 402), False, 'import logging\n'), ((1332, 1403), 'asrtoolkit.file_utils.script_input_validation.valid_input_file', 'valid_input_file', (['source_audio_file', "['mp3', 'sph', 'wav', 'au', 'raw']"], {}), "(source_audio_file, ['mp3', 'sph', 'wav', 'au', 'raw'])\n", (1348, 1403), False, 'from asrtoolkit.file_utils.script_input_validation import valid_input_file\n'), ((2138, 2153), 'os.remove', 'os.remove', (['tmp1'], {}), '(tmp1)\n', (2147, 2153), False, 'import os\n'), ((2158, 2173), 'os.remove', 'os.remove', (['tmp2'], {}), '(tmp2)\n', (2167, 2173), False, 'import os\n'), ((3775, 3802), 'asrtoolkit.file_utils.name_cleaners.sanitize_hyphens', 'sanitize_hyphens', (['file_name'], {}), '(file_name)\n', (3791, 3802), False, 'from asrtoolkit.file_utils.name_cleaners import generate_segmented_file_name, sanitize_hyphens, strip_extension\n'), ((4393, 4431), 'os.makedirs', 'os.makedirs', (['target_dir'], {'exist_ok': '(True)'}), '(target_dir, exist_ok=True)\n', (4404, 4431), False, 'import os\n'), ((2856, 2880), 'os.path.exists', 'os.path.exists', (['location'], {}), '(location)\n', (2870, 2880), False, 'import os\n'), ((3718, 3744), 'asrtoolkit.file_utils.name_cleaners.strip_extension', 'strip_extension', (['file_name'], {}), '(file_name)\n', (3733, 3744), False, 'from asrtoolkit.file_utils.name_cleaners import generate_segmented_file_name, sanitize_hyphens, strip_extension\n'), ((4563, 4624), 'asrtoolkit.file_utils.name_cleaners.generate_segmented_file_name', 'generate_segmented_file_name', (['target_dir', 'self.location', 'iseg'], {}), '(target_dir, self.location, iseg)\n', (4591, 4624), False, 'from asrtoolkit.file_utils.name_cleaners import generate_segmented_file_name, sanitize_hyphens, strip_extension\n')] |
from flask.ext.wtf import Form
from wtforms import (
TextField, IntegerField, HiddenField, SubmitField, validators
)
class MonkeyForm(Form):
id = HiddenField()
name = TextField('Name', validators=[validators.InputRequired()])
age = IntegerField(
'Age', validators=[
validators.InputRequired(message='Age should be an integer.'),
validators.NumberRange(min=0)
]
)
email = TextField(
'Email', validators=[validators.InputRequired(), validators.Email()]
)
submit_button = SubmitField('Submit')
| [
"wtforms.validators.NumberRange",
"wtforms.validators.Email",
"wtforms.SubmitField",
"wtforms.HiddenField",
"wtforms.validators.InputRequired"
] | [((156, 169), 'wtforms.HiddenField', 'HiddenField', ([], {}), '()\n', (167, 169), False, 'from wtforms import TextField, IntegerField, HiddenField, SubmitField, validators\n'), ((552, 573), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (563, 573), False, 'from wtforms import TextField, IntegerField, HiddenField, SubmitField, validators\n'), ((211, 237), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (235, 237), False, 'from wtforms import TextField, IntegerField, HiddenField, SubmitField, validators\n'), ((304, 365), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {'message': '"""Age should be an integer."""'}), "(message='Age should be an integer.')\n", (328, 365), False, 'from wtforms import TextField, IntegerField, HiddenField, SubmitField, validators\n'), ((379, 408), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(0)'}), '(min=0)\n', (401, 408), False, 'from wtforms import TextField, IntegerField, HiddenField, SubmitField, validators\n'), ((477, 503), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (501, 503), False, 'from wtforms import TextField, IntegerField, HiddenField, SubmitField, validators\n'), ((505, 523), 'wtforms.validators.Email', 'validators.Email', ([], {}), '()\n', (521, 523), False, 'from wtforms import TextField, IntegerField, HiddenField, SubmitField, validators\n')] |
import os
import shutil
import tensorflow as tf
from tensorflow import keras
from logs import logDecorator as lD
import jsonref
import numpy as np
import pickle
import warnings
from tqdm import tqdm
from modules.data import getData
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.model.getPretrained'
### turn off tensorflow info/warning/error or all python warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings("ignore")
@lD.log(logBase + '.model')
def modelImageNet(logger, modelName, weightsFile=None, input_shape=(224, 224, 3)):
try:
if weightsFile is not None:
weights = weightsFile
else:
weights = 'imagenet'
if modelName == 'Xception':
base_model = keras.applications.xception.Xception(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'VGG16':
base_model = keras.applications.vgg16.VGG16(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'VGG16_includeTop':
base_model = keras.applications.vgg16.VGG16(input_shape=input_shape, include_top=True, weights=weights)
elif modelName == 'VGG19':
base_model = keras.applications.vgg19.VGG19(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'ResNet50':
base_model = keras.applications.resnet50.ResNet50(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'InceptionV3':
base_model = keras.applications.inception_v3.InceptionV3(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'InceptionResNetV2':
base_model = keras.applications.inception_resnet_v2.InceptionResNetV2(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'MobileNet':
base_model = keras.applications.mobilenet.MobileNet(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'DenseNet':
base_model = keras.applications.densenet.DenseNet121(input_shape=input_shape, include_top=False, weights=weights)
elif modelName == 'NASNet':
base_model = keras.applications.nasnet.NASNetMobile(input_shape=input_shape, include_top=False, weights=weights)
return base_model
except Exception as e:
logger.error('Unable to get model: {} \n{}'.format(modelName, str(e)))
@lD.log(logBase + '.outputTensorBoard')
def outputTensorBoard(logger, subfolder=None):
try:
tfboardFolder = '../notebooks/tensorlog/'
if subfolder is not None:
tfboardFolder = os.path.join(tfboardFolder, subfolder)
if os.path.exists(tfboardFolder):
shutil.rmtree(tfboardFolder)
os.makedirs(tfboardFolder)
with tf.Session() as sess:
tfWriter = tf.summary.FileWriter(tfboardFolder, sess.graph)
tfWriter.close()
except Exception as e:
logger.error('Unable to output tensorboard \n{}'.format(str(e)))
@lD.log(logBase + '.visualise_graph')
def visualise_graph(logger, modelName, subfolder=None):
try:
tf.keras.backend.clear_session()
tfboardFolder = '../notebooks/tensorlog/'
if subfolder is not None:
tfboardFolder = os.path.join(tfboardFolder, subfolder)
if os.path.exists(tfboardFolder):
shutil.rmtree(tfboardFolder)
os.makedirs(tfboardFolder)
img = np.random.randint(0, 5, (1, 224, 224, 3))
modelDict = getModelFileDict()
modelLoaded = modelImageNet(modelName, modelDict[modelName])
with tf.Session() as sess:
tfWriter = tf.summary.FileWriter(tfboardFolder, sess.graph)
tfWriter.close()
except Exception as e:
logger.error('Unable to write graph into tensorboard\n{}'.format(str(e)))
@lD.log(logBase + '.visualise_layers')
def visualise_layers(logger, sess, listOfTensorNodes, inputData):
try:
outputResults = sess.run( listOfTensorNodes,
feed_dict={
'input_1:0' : inputData
})
for res, tf_node in zip(outputResults, listOfTensorNodes):
print('-'*50)
print('node: {}; shape: {}'.format(tf_node, res[0].shape))
getData.visualiseStackedArray(res[0], cmap=None)
except Exception as e:
logger.error('Unable to visualise layers \n{}'.format(str(e)))
@lD.log(logBase + '.getModelFileDict')
def getModelFileDict(logger):
try:
modelDict = {
'Xception' : '../models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
'VGG16' : '../models/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
'VGG16_includeTop' : '../models/vgg16_weights_tf_dim_ordering_tf_kernels.h5',
'VGG19' : '../models/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
'InceptionV3' : '../models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
'MobileNet' : '../models/mobilenet_1_0_224_tf_no_top.h5',
'DenseNet' : '../models/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
'NASNet' : '../models/nasnet_mobile_no_top.h5',
'ResNet50' : '../models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
'InceptionResNetV2' : '../models/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
}
return modelDict
except Exception as e:
logger.error('Unable to get model file dictionary \n{}'.format(str(e)))
@lD.log(logBase + '.checkReady')
def checkReady(logger):
try:
modelString = ['Xception', 'VGG16', 'VGG19', 'InceptionV3', 'MobileNet', 'DenseNet', 'NASNet',
'ResNet50', 'InceptionResNetV2', 'VGG16_includeTop']
modelDict = getModelFileDict()
for m in modelString:
try:
print('{} loading from {}...'.format(m, modelDict[m]), end='', flush=True)
modelLoaded = modelImageNet(modelName=m, weightsFile=modelDict[m])
print('sucessfully! '.format(m), end='', flush=True)
print('type: {}'.format(type(modelLoaded)))
except Exception as e:
print('failed. --> {}'.format(m, str(e)))
except Exception as e:
logger.error('Unable to check ready \n{}'.format(str(e)))
@lD.log(logBase + '.main')
def main(logger, resultsDict):
try:
checkReady()
except Exception as e:
logger.error('Unable to run main \n{}'.format(str(e)))
if __name__ == '__main__':
print('tf.__version__ :', tf.__version__)
print('keras.__version__:', keras.__version__) | [
"logs.logDecorator.log",
"modules.data.getData.visualiseStackedArray",
"tensorflow.keras.applications.xception.Xception",
"tensorflow.keras.applications.nasnet.NASNetMobile",
"os.path.exists",
"tensorflow.Session",
"tensorflow.keras.applications.mobilenet.MobileNet",
"tensorflow.keras.applications.vgg... | [((479, 512), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (502, 512), False, 'import warnings\n'), ((515, 541), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.model')"], {}), "(logBase + '.model')\n", (521, 541), True, 'from logs import logDecorator as lD\n'), ((2539, 2577), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.outputTensorBoard')"], {}), "(logBase + '.outputTensorBoard')\n", (2545, 2577), True, 'from logs import logDecorator as lD\n'), ((3149, 3185), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.visualise_graph')"], {}), "(logBase + '.visualise_graph')\n", (3155, 3185), True, 'from logs import logDecorator as lD\n'), ((4000, 4037), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.visualise_layers')"], {}), "(logBase + '.visualise_layers')\n", (4006, 4037), True, 'from logs import logDecorator as lD\n'), ((4668, 4705), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.getModelFileDict')"], {}), "(logBase + '.getModelFileDict')\n", (4674, 4705), True, 'from logs import logDecorator as lD\n'), ((5867, 5898), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.checkReady')"], {}), "(logBase + '.checkReady')\n", (5873, 5898), True, 'from logs import logDecorator as lD\n'), ((6702, 6727), 'logs.logDecorator.log', 'lD.log', (["(logBase + '.main')"], {}), "(logBase + '.main')\n", (6708, 6727), True, 'from logs import logDecorator as lD\n'), ((2800, 2829), 'os.path.exists', 'os.path.exists', (['tfboardFolder'], {}), '(tfboardFolder)\n', (2814, 2829), False, 'import os\n'), ((2881, 2907), 'os.makedirs', 'os.makedirs', (['tfboardFolder'], {}), '(tfboardFolder)\n', (2892, 2907), False, 'import os\n'), ((3260, 3292), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (3290, 3292), True, 'import tensorflow as tf\n'), ((3458, 3487), 'os.path.exists', 'os.path.exists', (['tfboardFolder'], {}), '(tfboardFolder)\n', (3472, 3487), False, 'import os\n'), ((3539, 3565), 'os.makedirs', 'os.makedirs', (['tfboardFolder'], {}), '(tfboardFolder)\n', (3550, 3565), False, 'import os\n'), ((3591, 3632), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(1, 224, 224, 3)'], {}), '(0, 5, (1, 224, 224, 3))\n', (3608, 3632), True, 'import numpy as np\n'), ((818, 920), 'tensorflow.keras.applications.xception.Xception', 'keras.applications.xception.Xception', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=input_shape, include_top=\n False, weights=weights)\n', (854, 920), False, 'from tensorflow import keras\n'), ((2749, 2787), 'os.path.join', 'os.path.join', (['tfboardFolder', 'subfolder'], {}), '(tfboardFolder, subfolder)\n', (2761, 2787), False, 'import os\n'), ((2843, 2871), 'shutil.rmtree', 'shutil.rmtree', (['tfboardFolder'], {}), '(tfboardFolder)\n', (2856, 2871), False, 'import shutil\n'), ((2922, 2934), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2932, 2934), True, 'import tensorflow as tf\n'), ((2967, 3015), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['tfboardFolder', 'sess.graph'], {}), '(tfboardFolder, sess.graph)\n', (2988, 3015), True, 'import tensorflow as tf\n'), ((3407, 3445), 'os.path.join', 'os.path.join', (['tfboardFolder', 'subfolder'], {}), '(tfboardFolder, subfolder)\n', (3419, 3445), False, 'import os\n'), ((3501, 3529), 'shutil.rmtree', 'shutil.rmtree', (['tfboardFolder'], {}), '(tfboardFolder)\n', (3514, 3529), False, 'import shutil\n'), ((3762, 3774), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3772, 3774), True, 'import tensorflow as tf\n'), ((3809, 3857), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['tfboardFolder', 'sess.graph'], {}), '(tfboardFolder, sess.graph)\n', (3830, 3857), True, 'import tensorflow as tf\n'), ((4517, 4565), 'modules.data.getData.visualiseStackedArray', 'getData.visualiseStackedArray', (['res[0]'], {'cmap': 'None'}), '(res[0], cmap=None)\n', (4546, 4565), False, 'from modules.data import getData\n'), ((977, 1072), 'tensorflow.keras.applications.vgg16.VGG16', 'keras.applications.vgg16.VGG16', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=input_shape, include_top=False,\n weights=weights)\n', (1007, 1072), False, 'from tensorflow import keras\n'), ((1141, 1235), 'tensorflow.keras.applications.vgg16.VGG16', 'keras.applications.vgg16.VGG16', ([], {'input_shape': 'input_shape', 'include_top': '(True)', 'weights': 'weights'}), '(input_shape=input_shape, include_top=True,\n weights=weights)\n', (1171, 1235), False, 'from tensorflow import keras\n'), ((1293, 1388), 'tensorflow.keras.applications.vgg19.VGG19', 'keras.applications.vgg19.VGG19', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=input_shape, include_top=False,\n weights=weights)\n', (1323, 1388), False, 'from tensorflow import keras\n'), ((1449, 1551), 'tensorflow.keras.applications.resnet50.ResNet50', 'keras.applications.resnet50.ResNet50', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=input_shape, include_top=\n False, weights=weights)\n', (1485, 1551), False, 'from tensorflow import keras\n'), ((1614, 1722), 'tensorflow.keras.applications.inception_v3.InceptionV3', 'keras.applications.inception_v3.InceptionV3', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=input_shape,\n include_top=False, weights=weights)\n', (1657, 1722), False, 'from tensorflow import keras\n'), ((1792, 1914), 'tensorflow.keras.applications.inception_resnet_v2.InceptionResNetV2', 'keras.applications.inception_resnet_v2.InceptionResNetV2', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=\n input_shape, include_top=False, weights=weights)\n', (1848, 1914), False, 'from tensorflow import keras\n'), ((1975, 2079), 'tensorflow.keras.applications.mobilenet.MobileNet', 'keras.applications.mobilenet.MobileNet', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=input_shape, include_top\n =False, weights=weights)\n', (2013, 2079), False, 'from tensorflow import keras\n'), ((2139, 2243), 'tensorflow.keras.applications.densenet.DenseNet121', 'keras.applications.densenet.DenseNet121', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=input_shape,\n include_top=False, weights=weights)\n', (2178, 2243), False, 'from tensorflow import keras\n'), ((2302, 2406), 'tensorflow.keras.applications.nasnet.NASNetMobile', 'keras.applications.nasnet.NASNetMobile', ([], {'input_shape': 'input_shape', 'include_top': '(False)', 'weights': 'weights'}), '(input_shape=input_shape, include_top\n =False, weights=weights)\n', (2340, 2406), False, 'from tensorflow import keras\n')] |