code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import os
import numpy as np
from sys import platform, path
if platform == "linux" or platform == "linux2":
path.insert(1, os.path.dirname(os.getcwd()) + "/src")
FILE_NAME = os.path.dirname(os.getcwd()) + "/data" + "/xAPI-Edu-Data-Edited.csv"
elif platform == "win32":
path.insert(1, os.path.dirname(os.getcwd()) + "\\src")
FILE_NAME = os.path.dirname(os.getcwd()) + "\\data" + "\\xAPI-Edu-Data-Edited.csv"
elif platform == "darwin":
path.insert(1, os.path.dirname(os.getcwd()) + "/src")
FILE_NAME = os.path.dirname(os.getcwd()) + "/data" + "/xAPI-Edu-Data-Edited.csv"
from DataPreprocessing import Preprocess, FeaturePreprocess
from DataProcessing import ModelTuning, ModelValidating, save_file, load_file
CATEGORICAL_COLUMNS = ["Gender", "Nationality", "PlaceofBirth", "StageID", "GradeID", "SectionID", "Topic",
"Semester", "Relation", "ParentAnsweringSurvey", "ParentSchoolSatisfaction",
"StudentAbsenceDays"]
PREFIXES = ["Gender", "Nationality", "PlaceofBirth", "Stage", "Grade", "Section", "Topic",
"Semester", "Relation", "Survey", "ParentSatisfaction",
"Absence"]
REMOVE_VALUES = ["G-05", "G-09"]
def preprocess_data(count_missing=False, replace_values=True, remove_values=False, encode=True,
categorical_columns=CATEGORICAL_COLUMNS,
prefixes=PREFIXES):
"""Preprocesses the raw dataset
Parameters
----------
count_missing : bool, default=False
Counts all missing values in the dataset
replace_values : bool, default=True
Replaces non significative values in the columns "Nationality" and "PlaceofBirth" with "Other"
remove_values : bool, default=False
Replaces rows with non significative values in the columns "GradeID"
encode : bool, default=True
One Hot encodes categorical columns
categorical_columns : list of str, defaut=(categorical columns of the dataset)
Columns to apply one hot encode to
prefixes : list of str, default="["Gender", "Nationality", "PlaceofBirth", "Stage", "Grade", "Section", "Topic",
"Semester", "Relation", "Survey", "ParentSatisfaction",
"Absence"]"
Prefixes for one hot encoding
Returns
----------
X_data : pandas df
feature columns
y_data : pandas df
target columns
y_labels : {ndarray, sparse matrix}
class labels
"""
preprocess = Preprocess(data=FILE_NAME)
if count_missing:
print(f"Number of rows missing values: {preprocess.check_missing_values()}")
if replace_values:
preprocess.replace_values("Nationality",
["Lybia", "Iraq", "Lebanon", "Tunisia", "SaudiArabia", "Egypt", "USA", "Venezuela",
"Iran", "Morocco", "Syria", "Palestine"], "Other")
preprocess.replace_values("PlaceofBirth",
["Lybia", "Iraq", "Lebanon", "Tunisia", "SaudiArabia", "Egypt", "USA", "Venezuela",
"Iran", "Morocco", "Syria", "Palestine"], "Other")
if remove_values:
preprocess.remove_values("GradeID", REMOVE_VALUES)
if encode:
preprocess.target_encode()
preprocess.one_hot_encode(columns=categorical_columns, prefix=prefixes)
X_data, y_data = preprocess.get_data()
y_labels = preprocess.target_decode()
return X_data, y_data, y_labels
X_data, y_data = preprocess.get_data()
return X_data, y_data
def preprocess_features(X_data, scaler_type="standard", n_components=None, plot_pca=False, threshold=0.85,
savefig=True):
"""
processes feature columns with a scaler and pca
Parameters
----------
X_data : pandas df
feature Columns
scaler_type : str, default="standard"
scalar to use ('standard'/'min_max')
n_components : int, default=None
pca components to use, if 'None' uses all components
plot_pca : bool, defaut=True
specifies if pca should be plotted
threshold : float range(0,1), default=0.85
pca variance threshold to plot vertical line at
savefig : bool, default=True
specifies if pca plot should be saved
Returns
----------
X_transformed : ndarray
preprocessed feature columns
feature_preprocess : feature_preprocess object
feature_preprocess object used (for the pipeline)
"""
if n_components is None:
n_components = len(X_data.columns)
feature_preprocess = FeaturePreprocess(X_data, n_components=n_components, scaler_type=scaler_type)
X_transformed = feature_preprocess.transform_data()
if plot_pca:
feature_preprocess.plot_pca(threshold=threshold, savefig=savefig)
return X_transformed, feature_preprocess
def create_estimators(X_data, y_data, train_size=0.7, hyperparam_tune=True, boosting=True, random_state=42,
verbose=1):
"""Splits the data in train, test and val, trains three different estimators: Decision Tree, Support Vector Machine
and Random Forest, can also tune the hyper parameters and boost the estimators with Adaboost
Parameters
----------
X_data : pandas df
feature Columns
y_data : pandas df
target column
train_size : float
Percentage for train
hyperparam_tune : bool, default=True
specifies if hyper params should be tuned
boosting : bool, default=True
specifies if estimators should be boosted
random_state : int, default=42
random state
verbose : int, default=1
verbosity level
Returns
----------
estimators : list of estimators
trained estimators
mt : ModelTuning object
ModelTuning object used (for validation set)
"""
estimators = []
mt = ModelTuning(X_data, y_data, train_size, random_state=random_state)
if verbose > 0:
print("Creating Basic Estimators...\n")
dt = mt.create_weak_learner(random_state, verbose, model_type="dt", )
svm = mt.create_weak_learner(random_state, verbose, model_type="svm")
rf = mt.create_random_forest(random_state, verbose)
estimators.extend([dt, svm, rf])
if hyperparam_tune:
if verbose > 0:
print("Tunning Hyperparams...\n")
tuned_dt = mt.tune_hyperparam(dt, random_state, verbose)
tuned_svm = mt.tune_hyperparam(svm, random_state, verbose)
tuned_rf = mt.tune_hyperparam(rf, random_state, verbose)
estimators.extend([tuned_dt, tuned_svm, tuned_rf])
if boosting:
if verbose > 0:
print("Boosting...\n")
print("Boosted dt:")
boosted_dt = mt.boost_weak_learners(tuned_dt, random_state, verbose)
if verbose > 0:
print("Boosted svm:")
boosted_svm = mt.boost_weak_learners(tuned_svm, random_state, verbose)
if verbose > 0:
print("Boosted rf:")
boosted_rf = mt.boost_weak_learners(tuned_rf, random_state, verbose)
estimators.extend([boosted_dt, boosted_svm, boosted_rf])
return estimators, mt
def get_x_y_set(mt, type="test"):
"""Gets data set from ModelTuning object
Parameters
----------
mt : ModelTuning object
ModelTuning object used
type : str, default="test"
specifies which set to return ('train'/'test'/'val')
Returns
----------
X_data, y_data : ndarray
"""
if type == "val":
return mt.get_validation_set()
if type == "train":
return mt.get_train_set()
if type == "test":
return mt.get_test_set()
def validate_estimators(estimators, X_val, y_val, y_labels, scaler_type="", plot_cf=True, clas_report=True,
savefig=True):
"""Validates estimators
Parameters
----------
estimators : list of estimators
estimators to validate
X_val : ndarray
validation data
y_val : ndarray
validation labels
y_labels : {ndarray, sparse matrix}
decoded labels
scaler_type : str, optional
scaler used ('standard'/'min_max') (for plots)
plot_cf : bool, default=True
specifies if confusion matrix should be plot
clas_report : bool, default=True
specifies if Classification Report should be printed
savefig : bool, default=True
specifies if confusion matrix should be saved as .png
"""
for est in estimators:
mv = ModelValidating(est, X_val, y_val, y_labels=y_labels, scaler=scaler_type)
if plot_cf:
mv.plot_confusion_matrix(savefig=savefig)
if clas_report:
report = mv.classification_report()
print(f"Classification Report: {est}\n{report}")
def get_n_best(estimators, X_val, y_val, y_labels, best_n=3, score="f1_score"):
"""Gets best estimators from list
Parameters
----------
estimators : list of estimators
list of trained estimators
X_val : ndarray
validation data
y_val : ndarray
validation labels
y_labels : {ndarray, sparse matrix}
decoded labels
best_n : int, default=3
number of estimators to pick
score : str, default="f1_score"
metric to use for picking best estimators ('accuracy'/'f1_score')
Returns
----------
best_est : list of estimators of len=´best_n´
"""
best_scores = []
for est in estimators:
mv = ModelValidating(est, X_val, y_val, y_labels=y_labels, scaler="")
indv_scores = mv.get_scores()
if score == "accuracy":
best_scores.append(indv_scores[0])
if score == "f1_score":
best_scores.append(indv_scores[1])
best_idx = np.argpartition(best_scores, -best_n)[-best_n:]
best_est = []
for index in best_idx:
best_est.append(estimators[index])
return best_est
def save(models, file_name=None, suffix=None):
"""Saves estimator
Parameters
----------
file_name : str, optional
name for the file if None model will be saved with suffix
suffix : str, optional
suffix to be added
"""
if file_name is None:
for model in models:
save_file(model, suffix=suffix)
else:
save_file(models, file_name=file_name)
def load(model_name):
"""Loads and returns pickle File
"""
return load_file(model_name)
| [
"numpy.argpartition",
"DataPreprocessing.FeaturePreprocess",
"os.getcwd",
"DataProcessing.save_file",
"DataProcessing.load_file",
"DataProcessing.ModelValidating",
"DataPreprocessing.Preprocess",
"DataProcessing.ModelTuning"
] | [((2478, 2504), 'DataPreprocessing.Preprocess', 'Preprocess', ([], {'data': 'FILE_NAME'}), '(data=FILE_NAME)\n', (2488, 2504), False, 'from DataPreprocessing import Preprocess, FeaturePreprocess\n'), ((4600, 4677), 'DataPreprocessing.FeaturePreprocess', 'FeaturePreprocess', (['X_data'], {'n_components': 'n_components', 'scaler_type': 'scaler_type'}), '(X_data, n_components=n_components, scaler_type=scaler_type)\n', (4617, 4677), False, 'from DataPreprocessing import Preprocess, FeaturePreprocess\n'), ((5903, 5969), 'DataProcessing.ModelTuning', 'ModelTuning', (['X_data', 'y_data', 'train_size'], {'random_state': 'random_state'}), '(X_data, y_data, train_size, random_state=random_state)\n', (5914, 5969), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((10446, 10467), 'DataProcessing.load_file', 'load_file', (['model_name'], {}), '(model_name)\n', (10455, 10467), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((8534, 8607), 'DataProcessing.ModelValidating', 'ModelValidating', (['est', 'X_val', 'y_val'], {'y_labels': 'y_labels', 'scaler': 'scaler_type'}), '(est, X_val, y_val, y_labels=y_labels, scaler=scaler_type)\n', (8549, 8607), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((9514, 9578), 'DataProcessing.ModelValidating', 'ModelValidating', (['est', 'X_val', 'y_val'], {'y_labels': 'y_labels', 'scaler': '""""""'}), "(est, X_val, y_val, y_labels=y_labels, scaler='')\n", (9529, 9578), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((9792, 9829), 'numpy.argpartition', 'np.argpartition', (['best_scores', '(-best_n)'], {}), '(best_scores, -best_n)\n', (9807, 9829), True, 'import numpy as np\n'), ((10327, 10365), 'DataProcessing.save_file', 'save_file', (['models'], {'file_name': 'file_name'}), '(models, file_name=file_name)\n', (10336, 10365), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((10277, 10308), 'DataProcessing.save_file', 'save_file', (['model'], {'suffix': 'suffix'}), '(model, suffix=suffix)\n', (10286, 10308), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((144, 155), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (153, 155), False, 'import os\n'), ((199, 210), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (208, 210), False, 'import os\n'), ((313, 324), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (322, 324), False, 'import os\n'), ((369, 380), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (378, 380), False, 'import os\n'), ((486, 497), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (495, 497), False, 'import os\n'), ((541, 552), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (550, 552), False, 'import os\n')] |
import io
import time
import wx
from .screenshot_thread import ScreenshotThread, EVT_SCREENSHOT
class DeviceFrame(wx.Frame):
def __init__(self, device,
png=True,
resize_quality=wx.IMAGE_QUALITY_NORMAL,
size_divisor=640,
*args, **kwargs):
super().__init__(None, title=device.name, *args, **kwargs)
self.device = device
self.screen_size = device.wm.get_size()
self.screen_aspect = device.wm.get_aspect()
self.resize_quality = resize_quality
self.screenshot = None
self.Bind(wx.EVT_PAINT, self.on_paint)
self.Bind(wx.EVT_SIZE, self.on_size)
self.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.Bind(wx.EVT_LEFT_UP, self.on_mouse_up)
self.Bind(wx.EVT_MOTION, self.on_mouse_move)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.SetClientSize(self.FromDIP(self.choose_size(size_divisor)))
self.pos = None
self.timestamp = None
self.moved = False
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
self.statusbar = self.CreateStatusBar(2)
self.Bind(EVT_SCREENSHOT, self.update_screenshot)
self.screenshot_thread = ScreenshotThread(self, device, png)
def choose_size(self, size_divisor):
size = self.screen_size
divisor = max(size) // size_divisor
return [ s // divisor for s in size ]
def get_coord(self, pos):
w, h = self.screen_size
cw, ch = self.GetClientSize()
acw, ach = w / cw, h / ch
x, y = pos
return int(x * acw), int(y * ach)
def get_coord_inside(self, pos):
w, h = self.screen_size
x, y = self.get_coord(pos)
inside = (0 <= x < w) and (0 <= y < h)
return x, y, inside
def get_coord_clipped(self, pos):
w, h = self.screen_size
x, y = self.get_coord(pos)
x = min(max(x, 0), w - 1)
y = min(max(y, 0), h - 1)
return x, y
def on_mouse_down(self, event):
x, y, inside = self.get_coord_inside(event.GetPosition())
if inside:
self.pos = x, y
self.timestamp = event.Timestamp
self.statusbar.SetStatusText("[down] {}".format(self.pos))
def on_mouse_move(self, event):
if self.pos:
# Skip events out of the screen
x, y, inside = self.get_coord_inside(event.GetPosition())
if inside and self.pos != (x, y):
self.moved = True
self.statusbar.SetStatusText("[drag] {}".format((x, y)))
def on_mouse_up(self, event):
x, y, inside = self.get_coord_inside(event.GetPosition())
if inside and self.pos:
new_pos = x, y
self.statusbar.SetStatusText("[up] {}".format(new_pos))
if self.moved:
elapsed = event.Timestamp - self.timestamp
self.device.input.swipe(self.pos, new_pos, elapsed)
else:
self.device.input.tap(new_pos)
self.pos = None
self.timestamp = None
self.moved = False
def on_size(self, event):
width, _ = self.GetClientSize()
size = (width, int(width / self.screen_aspect))
if self.GetClientSize() != size:
self.SetClientSize(size)
def on_paint(self, event):
dc = wx.AutoBufferedPaintDC(self)
if self.screenshot:
img = self.screenshot
img = img.Scale(*self.GetClientSize(), self.resize_quality)
dc.DrawBitmap(img.ConvertToBitmap(), 0, 0)
def update_screenshot(self, event):
self.screenshot = event.screenshot
self.statusbar.SetStatusText("%.2f FPS" % event.fps, 1)
self.Refresh()
def on_close(self, event):
self.screenshot_thread.stop()
event.Skip()
| [
"wx.AutoBufferedPaintDC"
] | [((3362, 3390), 'wx.AutoBufferedPaintDC', 'wx.AutoBufferedPaintDC', (['self'], {}), '(self)\n', (3384, 3390), False, 'import wx\n')] |
from django.template import Library
from django.utils.safestring import mark_safe
register = Library()
@register.filter
def version_name(revision, autoescape=None):
name = "<strong>%s</strong> " % revision.version_name \
if revision.version_name else ""
return mark_safe("%srev. %d" % (name, revision.revision_number))
version_name.needs_autoescape = True
| [
"django.utils.safestring.mark_safe",
"django.template.Library"
] | [((94, 103), 'django.template.Library', 'Library', ([], {}), '()\n', (101, 103), False, 'from django.template import Library\n'), ((284, 341), 'django.utils.safestring.mark_safe', 'mark_safe', (["('%srev. %d' % (name, revision.revision_number))"], {}), "('%srev. %d' % (name, revision.revision_number))\n", (293, 341), False, 'from django.utils.safestring import mark_safe\n')] |
import toml
def pipfile_decode(data):
parsed_toml = toml.loads(data)
res = dict()
# print(parsed_toml)
res["dependencies"] = parsed_toml["packages"]
res["dev-dependencies"] = parsed_toml["dev-packages"]
return res
def pipfile_encode(data):
res = dict()
res["packages"] = dict()
for i in data["dependencies"]:
res["packages"][i] = data["dependencies"][i]
new_toml = toml.dumps(res)
return new_toml
| [
"toml.loads",
"toml.dumps"
] | [((58, 74), 'toml.loads', 'toml.loads', (['data'], {}), '(data)\n', (68, 74), False, 'import toml\n'), ((419, 434), 'toml.dumps', 'toml.dumps', (['res'], {}), '(res)\n', (429, 434), False, 'import toml\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import SQLContext
from py4j.java_gateway import java_import
def register(sc):
java_import(sc._gateway.jvm, "org.apache.spark.sql.hbase.HBaseSQLContext")
__all__ = ["HBaseSQLContext"]
class HBaseSQLContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in HBase.
"""
def __init__(self, sparkContext):
"""Create a new HbaseContext.
@param sparkContext: The SparkContext to wrap.
"""
SQLContext.__init__(self, sparkContext)
self._scala_HBaseSQLContext = self._get_hbase_ctx()
@property
def _ssql_ctx(self):
if self._scala_HBaseSQLContext is None:
print ("loading hbase context ..")
self._scala_HBaseSQLContext = self._get_hbase_ctx()
if self._scala_SQLContext is None:
self._scala_SQLContext = self._scala_HBaseSQLContext
return self._scala_HBaseSQLContext
def _get_hbase_ctx(self):
return self._jvm.HBaseSQLContext(self._jsc.sc())
#TODO: add tests if for main
| [
"py4j.java_gateway.java_import",
"pyspark.sql.SQLContext.__init__"
] | [((885, 959), 'py4j.java_gateway.java_import', 'java_import', (['sc._gateway.jvm', '"""org.apache.spark.sql.hbase.HBaseSQLContext"""'], {}), "(sc._gateway.jvm, 'org.apache.spark.sql.hbase.HBaseSQLContext')\n", (896, 959), False, 'from py4j.java_gateway import java_import\n'), ((1253, 1292), 'pyspark.sql.SQLContext.__init__', 'SQLContext.__init__', (['self', 'sparkContext'], {}), '(self, sparkContext)\n', (1272, 1292), False, 'from pyspark.sql import SQLContext\n')] |
import gym
from baselines import deepq
from baselines.common.atari_wrappers_deprecated import wrap_dqn, ScaledFloatFrame
from cloud_environment import CloudEnvironment
import numpy as np
import collections
import os
import csv
import pandas as pd
#Logging
def logger_callback(locals,globals):
done = locals['done']
num_episodes = locals['num_episodes']
log_action_l = locals['log_action_l'] # actions chosen in current episode step
log_action_l.append(locals['action'])
if done:
action_counter = collections.Counter(log_action_l).items()
reward_sum = np.sum(locals['episode_rewards'])
reward_mean = np.mean(locals['episode_rewards'])
c_reward_sum = np.sum(locals['cumulative_episode_rewards'])
c_reward_mean = np.mean(locals['cumulative_episode_rewards'])
path = locals['test_file_path']
print("Writing episode {} log to ".format(num_episodes), path)
with open(path, 'a') as f:
env = locals['env']
actions_np = np.zeros(env.action_space.n)
for k, v in action_counter:
actions_np[k] = v
action_count_header = ['action_count{}'.format(i) for i in range(env.action_space.n)]
#action_q_header = ['mean_action_q{}'.format(i) for i in range(len(episode_q_t.tolist()))]
headers = ['episode','reward_sum','reward_mean','c_reward_sum','c_reward_mean']
#headers = headers + action_q_header+action_count_header
headers = headers + action_count_header
action_counts = list(actions_np)
#actions_qs = [q for q in episode_q_t.tolist()]
#output_list = [num_episodes]+[steps]+[rew100]+[rew50]+[rew10]+[episode_q_t_selected]+[episode_q_t_targets]+[episode_td_errors]+[episode_errors]+ actions_qs+action_counts
output_list = [num_episodes] + [reward_sum] + [reward_mean] + [c_reward_sum] + [c_reward_mean] + action_counts
print(headers)
print(output_list)
w = csv.writer(f)
if os.stat(path).st_size == 0:
w.writerow(headers)
w.writerow(output_list)
return False
def result_callback(ci_list,episode_list,locals,globals):
nps = len(episode_list[0])-len(ci_list[0])
cis_l = [[np.nan]*nps + cil for cil in ci_list]
e_df = pd.concat(episode_list,axis=0).reset_index(drop=True)
ci_df = pd.DataFrame(np.concatenate(cis_l),columns=['ci']).reset_index(drop=True)
output_df = pd.concat([e_df,ci_df],axis=1)
output_df.dropna(inplace=True)
output_df = output_df.reset_index(drop=True)
output_df.to_csv('eval_predictions.csv')
def main():
load_cpk="/home/nox/Masterarbeit/thesis_data/baseline_rl/simple_rl/7_unbalanced_test/experiments_unbalanced/cloud_model.pkl"
channels=3
seq_length=2
img_size=84
env = CloudEnvironment(img_size=img_size,radius=[12,13],sequence_stride=1,channels=channels,sequence_length=seq_length,ramp_step=0.1,action_type=1,action_nr=3,stochastic_irradiance=True,save_images=True)
#Note: cloud speed can be changes but may also require different ramps.. default, speed of cloud per frame at least 1 pixel in y direction
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True,
channels=channels,
seq_length=seq_length,
img_size=img_size
)
deepq.test(load_cpk=load_cpk,
result_callback=result_callback,
env=env,
q_func=model,
log_callback=logger_callback,
episode_n=1
)
if __name__ == '__main__':
main()
| [
"numpy.mean",
"cloud_environment.CloudEnvironment",
"csv.writer",
"baselines.deepq.test",
"numpy.sum",
"numpy.zeros",
"collections.Counter",
"baselines.deepq.models.cnn_to_mlp",
"numpy.concatenate",
"os.stat",
"pandas.concat"
] | [((2638, 2670), 'pandas.concat', 'pd.concat', (['[e_df, ci_df]'], {'axis': '(1)'}), '([e_df, ci_df], axis=1)\n', (2647, 2670), True, 'import pandas as pd\n'), ((3001, 3216), 'cloud_environment.CloudEnvironment', 'CloudEnvironment', ([], {'img_size': 'img_size', 'radius': '[12, 13]', 'sequence_stride': '(1)', 'channels': 'channels', 'sequence_length': 'seq_length', 'ramp_step': '(0.1)', 'action_type': '(1)', 'action_nr': '(3)', 'stochastic_irradiance': '(True)', 'save_images': '(True)'}), '(img_size=img_size, radius=[12, 13], sequence_stride=1,\n channels=channels, sequence_length=seq_length, ramp_step=0.1,\n action_type=1, action_nr=3, stochastic_irradiance=True, save_images=True)\n', (3017, 3216), False, 'from cloud_environment import CloudEnvironment\n'), ((3355, 3521), 'baselines.deepq.models.cnn_to_mlp', 'deepq.models.cnn_to_mlp', ([], {'convs': '[(32, 8, 4), (64, 4, 2), (64, 3, 1)]', 'hiddens': '[256]', 'dueling': '(True)', 'channels': 'channels', 'seq_length': 'seq_length', 'img_size': 'img_size'}), '(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], hiddens\n =[256], dueling=True, channels=channels, seq_length=seq_length,\n img_size=img_size)\n', (3378, 3521), False, 'from baselines import deepq\n'), ((3571, 3703), 'baselines.deepq.test', 'deepq.test', ([], {'load_cpk': 'load_cpk', 'result_callback': 'result_callback', 'env': 'env', 'q_func': 'model', 'log_callback': 'logger_callback', 'episode_n': '(1)'}), '(load_cpk=load_cpk, result_callback=result_callback, env=env,\n q_func=model, log_callback=logger_callback, episode_n=1)\n', (3581, 3703), False, 'from baselines import deepq\n'), ((611, 644), 'numpy.sum', 'np.sum', (["locals['episode_rewards']"], {}), "(locals['episode_rewards'])\n", (617, 644), True, 'import numpy as np\n'), ((671, 705), 'numpy.mean', 'np.mean', (["locals['episode_rewards']"], {}), "(locals['episode_rewards'])\n", (678, 705), True, 'import numpy as np\n'), ((733, 777), 'numpy.sum', 'np.sum', (["locals['cumulative_episode_rewards']"], {}), "(locals['cumulative_episode_rewards'])\n", (739, 777), True, 'import numpy as np\n'), ((806, 851), 'numpy.mean', 'np.mean', (["locals['cumulative_episode_rewards']"], {}), "(locals['cumulative_episode_rewards'])\n", (813, 851), True, 'import numpy as np\n'), ((1080, 1108), 'numpy.zeros', 'np.zeros', (['env.action_space.n'], {}), '(env.action_space.n)\n', (1088, 1108), True, 'import numpy as np\n'), ((2147, 2160), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2157, 2160), False, 'import csv\n'), ((2480, 2511), 'pandas.concat', 'pd.concat', (['episode_list'], {'axis': '(0)'}), '(episode_list, axis=0)\n', (2489, 2511), True, 'import pandas as pd\n'), ((543, 576), 'collections.Counter', 'collections.Counter', (['log_action_l'], {}), '(log_action_l)\n', (562, 576), False, 'import collections\n'), ((2560, 2581), 'numpy.concatenate', 'np.concatenate', (['cis_l'], {}), '(cis_l)\n', (2574, 2581), True, 'import numpy as np\n'), ((2181, 2194), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (2188, 2194), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author : <NAME>
@file : at_app.py
@time : 2019/01/02
@site :
@software: PyCharm
,----------------, ,---------,
,-----------------------, ," ,"|
," ,"| ," ," |
+-----------------------+ | ," ," |
| .-----------------. | | +---------+ |
| | | | | | -==----'| |
| | $ sudo rm -rf / | | | | | |
| | | | |/----|`---= | |
| | | | | ,/|==== ooo | ;
| | | | | // |(((( [33]| ,"
| `-----------------' |," .;'| |(((( | ,"
+-----------------------+ ;; | | |,"
/_)______________(_/ //' | +---------+
___________________________/___ `,
/ oooooooooooooooo .o. oooo /, \,"-----------
/ ==ooooooooooooooo==.o. ooo= // ,`\--{)B ,"
/_==__==========__==_ooo__ooo=_/' /___________,"
"""
import colorama
from colorama import Fore, Style, Back
from linktools import utils, logger
from linktools.android import Device, AdbError, AdbArgumentParser, Package, Permission, Component, Activity, Service, \
Receiver, Provider, IntentFilter
from linktools.decorator import entry_point
class PrintLevel:
min = 0
useless = 100
normal = 200
dangerous_normal = 250
dangerous = 300
title = 400
max = 1000
class PrintStream(PrintLevel):
def __init__(self, max_level=PrintLevel.max, min_level=PrintLevel.min, file=None):
self.max = max_level
self.min = min_level
self.file = file
def print(self, text: str = "", indent: int = 0, level=PrintLevel.normal):
if not self.min <= level <= self.max:
pass
elif level == PrintLevel.title:
logger.message(text, style=Style.BRIGHT, indent=indent)
elif level == PrintLevel.dangerous:
logger.message(text, fore=Fore.RED, back=Back.WHITE, style=Style.BRIGHT, indent=indent)
elif level == PrintLevel.useless:
logger.message(text, fore=Fore.YELLOW, back=Back.WHITE, style=Style.BRIGHT, indent=indent)
else:
logger.message(text, indent=indent)
def print_line(self):
logger.message()
class PrintStreamWrapper(PrintLevel):
def __init__(self, stream: PrintStream, max_level: int = PrintLevel.max, min_level: int = PrintLevel.min):
self.stream = stream
self.max_level = max_level
self.min_level = min_level
def print(self, text: str = "", indent: int = 0, level=PrintLevel.normal):
if level > self.max_level:
level = self.max_level
elif level < self.min_level:
level = self.min_level
self.stream.print(text, indent=indent, level=level)
def print_line(self):
self.stream.print_line()
def create(self, max_level: int = PrintLevel.max, min_level: int = PrintLevel.min):
if max_level > self.max_level:
max_level = self.max_level
elif min_level < self.min_level:
min_level = self.min_level
return PrintStreamWrapper(self.stream, max_level=max_level, min_level=min_level)
class PackagePrinter:
def __init__(self, stream: PrintStream, package: Package):
self.package = package
self.max_level = PrintLevel.max if self.package.enabled else PrintLevel.useless
self.min_level = PrintLevel.min
self.stream = PrintStreamWrapper(stream, max_level=self.max_level, min_level=self.min_level)
def print_package(self, indent: int = 0):
self.stream.print("Package [%s]" % self.package, indent=indent, level=self.stream.title)
self.stream.print("name=%s" % self.package.appName, indent=indent + 4, level=self.stream.normal)
self.stream.print("userId=%s" % self.package.userId, indent=indent + 4, level=self.stream.normal)
self.stream.print("gids=%s" % self.package.gids, indent=indent + 4, level=self.stream.normal)
self.stream.print("sourceDir=%s" % self.package.sourceDir, indent=indent + 4, level=self.stream.normal)
self.stream.print("versionCode=%s" % self.package.versionCode, indent=indent + 4, level=self.stream.normal)
self.stream.print("versionName=%s" % self.package.versionName, indent=indent + 4, level=self.stream.normal)
self.stream.print("enabled=%s" % self.package.enabled, indent=indent + 4, level=self.stream.normal)
self.stream.print("system=%s" % self.package.system, indent=indent + 4, level=self.stream.normal)
self.stream.print("debuggable=%s" % self.package.debuggable, indent=indent + 4,
level=self.stream.dangerous if self.package.debuggable else self.stream.normal)
self.stream.print("allowBackup=%s" % self.package.allowBackup, indent=indent + 4,
level=self.stream.dangerous if self.package.allowBackup else self.stream.normal)
self.stream.print_line()
def print_requested_permissions(self, indent: int = 4):
if not utils.is_empty(self.package.requestedPermissions):
stream = self.stream.create(max_level=PrintLevel.normal)
self.stream.print("RequestedPermissions:", indent=indent, level=self.stream.title)
for permission in self.package.requestedPermissions:
self._print_permission(stream, permission, indent=indent + 4, identity="RequestedPermission")
self.stream.print_line()
def print_permissions(self, indent: int = 4):
if not utils.is_empty(self.package.permissions):
self.stream.print("Permissions:", indent=indent, level=self.stream.title)
for permission in self.package.permissions:
self._print_permission(self.stream, permission, indent=indent + 4, identity="Permission")
self.stream.print_line()
def print_activities(self, indent: int = 4):
if not utils.is_empty(self.package.activities):
self.stream.print("Activities:", indent=indent, level=self.stream.title)
for activity in self.package.activities:
self._print_component(self.stream, activity, indent=indent + 4, identity="Activity")
self.stream.print_line()
def print_services(self, indent: int = 4):
if not utils.is_empty(self.package.services):
self.stream.print("Services:", indent=indent, level=self.stream.title)
for service in self.package.services:
self._print_component(self.stream, service, indent=indent + 4, identity="Service")
self.stream.print_line()
def print_receivers(self, indent: int = 4):
if not utils.is_empty(self.package.receivers):
self.stream.print("Receivers:", indent=indent, level=self.stream.title)
for receiver in self.package.receivers:
self._print_component(self.stream, receiver, indent=indent + 4, identity="Receiver")
self.stream.print_line()
def print_providers(self, indent: int = 4):
if not utils.is_empty(self.package.providers):
self.stream.print("Providers:", indent=indent, level=self.stream.title)
for provider in self.package.providers:
self._print_component(self.stream, provider, indent=indent + 4, identity="Provider")
self.stream.print_line()
@staticmethod
def _print_permission(stream: PrintStreamWrapper, permission: Permission, indent: int = 0, identity: str = None):
if permission.is_defined():
stream.print("%s [%s] %s" % (identity, permission, permission.protection), indent=indent,
level=stream.dangerous if permission.is_dangerous() else stream.normal)
@staticmethod
def _print_component(stream: PrintStreamWrapper, component: Component, indent: int = 0, identity: str = None):
if not component.enabled:
description = "disabled"
level = stream.useless
stream = stream.create(max_level=stream.useless)
elif component.is_dangerous():
description = "exported"
level = stream.dangerous if component.is_dangerous() else stream.normal
stream = stream.create(min_level=stream.dangerous_normal)
else:
description = "exported" if component.exported else ""
level = stream.normal
stream = stream.create(max_level=stream.normal)
stream.print("%s [%s] %s" % (identity, component, description), indent=indent, level=level)
if isinstance(component, Activity) or isinstance(component, Service) or isinstance(component, Receiver):
PackagePrinter._print_permission(stream, component.permission, indent=indent + 4, identity="Permission")
elif isinstance(component, Provider):
stream.print("Authority [%s]" % component.authority, indent=indent + 4, level=level)
PackagePrinter._print_permission(stream, component.readPermission, indent=indent + 4,
identity="ReadPermission")
PackagePrinter._print_permission(stream, component.writePermission, indent=indent + 4,
identity="writePermission")
for pattern in component.uriPermissionPatterns:
stream.print("UriPermissionPattern [%s]" % pattern, indent=indent + 4, level=level)
for permission in component.pathPermissions:
stream.print("PathPermission [%s]" % permission, indent=indent + 4,
level=stream.dangerous if permission.is_dangerous() else stream.normal)
PackagePrinter._print_permission(stream, permission.readPermission, indent=indent + 8,
identity="ReadPermission")
PackagePrinter._print_permission(stream, permission.writePermission, indent=indent + 8,
identity="writePermission")
if not utils.is_empty(component.intents):
for intent in component.intents:
PackagePrinter._print_intent(stream, intent, indent=indent + 4, level=level)
@staticmethod
def _print_intent(stream: PrintStreamWrapper, intent: IntentFilter, indent: int = 0,
level: int = PrintLevel.normal):
stream.print("IntentFilter:", indent=indent, level=level)
for action in intent.actions:
stream.print("Action [%s]" % action, indent=indent + 4, level=level)
for category in intent.categories:
stream.print("Category [%s]" % category, indent=indent + 4, level=level)
for scheme in intent.dataSchemes:
stream.print("Scheme [%s]" % scheme, indent=indent + 4, level=level)
for scheme in intent.dataSchemeSpecificParts:
stream.print("Scheme [%s]" % scheme, indent=indent + 4, level=level)
for authority in intent.dataAuthorities:
stream.print("Authority [%s]" % authority, indent=indent + 4, level=level)
for path in intent.dataPaths:
stream.print("Path [%s]" % path, indent=indent + 4, level=level)
for type in intent.dataTypes:
stream.print("Type [%s]" % type, indent=indent + 4, level=level)
@entry_point(known_errors=[AdbError])
def main():
parser = AdbArgumentParser(description='fetch application info')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-a', '--all', action='store_true', default=False,
help='fetch all apps')
group.add_argument('-t', '--top', action='store_true', default=False,
help='fetch top-level app only')
group.add_argument('-p', '--packages', metavar="pkg", action='store', nargs='+', default=None,
help='fetch target apps only')
group.add_argument('--system', action='store_true', default=False,
help='fetch system apps only')
group.add_argument('--non-system', action='store_true', default=False,
help='fetch non-system apps only')
parser.add_argument('-b', '--basic-info', action='store_true', default=False,
help='display basic info only')
parser.add_argument('-dang', '--dangerous', action='store_true', default=False,
help='display dangerous permissions and components only')
parser.add_argument('-o', '--order-by', metavar="field", action='store', nargs='+', default=['userId', 'name'],
choices=['name', 'appName', 'userId', 'sourceDir',
'enabled', 'system', 'debuggable', 'allowBackup'],
help='order by target field')
args = parser.parse_args()
device = Device(args.parse_adb_serial())
if args.top:
packages = device.get_packages(device.get_top_package_name(), basic_info=args.basic_info)
elif not utils.is_empty(args.packages):
packages = device.get_packages(*args.packages, basic_info=args.basic_info)
elif args.system:
packages = device.get_packages(system=True, basic_info=args.basic_info)
elif args.non_system:
packages = device.get_packages(non_system=True, basic_info=args.basic_info)
else:
packages = device.get_packages(basic_info=args.basic_info)
if not utils.is_empty(args.order_by):
packages = sorted(packages, key=lambda x: [utils.get_item(x, k, default="") for k in args.order_by])
min_level = PrintLevel.min
if args.dangerous:
min_level = PrintLevel.dangerous_normal
stream = PrintStream(min_level=min_level)
for package in packages:
printer = PackagePrinter(stream, package)
if not args.dangerous:
printer.print_package()
printer.print_requested_permissions()
printer.print_permissions()
printer.print_activities()
printer.print_services()
printer.print_receivers()
printer.print_providers()
continue
if package.is_dangerous():
printer.print_package()
if package.has_dangerous_permission():
printer.print_permissions()
if package.has_dangerous_activity():
printer.print_activities()
if package.has_dangerous_service():
printer.print_services()
if package.has_dangerous_receiver():
printer.print_receivers()
if package.has_dangerous_provider():
printer.print_providers()
if __name__ == '__main__':
main()
| [
"linktools.logger.message",
"linktools.decorator.entry_point",
"linktools.android.AdbArgumentParser",
"linktools.utils.is_empty",
"linktools.utils.get_item"
] | [((11455, 11491), 'linktools.decorator.entry_point', 'entry_point', ([], {'known_errors': '[AdbError]'}), '(known_errors=[AdbError])\n', (11466, 11491), False, 'from linktools.decorator import entry_point\n'), ((11517, 11572), 'linktools.android.AdbArgumentParser', 'AdbArgumentParser', ([], {'description': '"""fetch application info"""'}), "(description='fetch application info')\n", (11534, 11572), False, 'from linktools.android import Device, AdbError, AdbArgumentParser, Package, Permission, Component, Activity, Service, Receiver, Provider, IntentFilter\n'), ((2369, 2385), 'linktools.logger.message', 'logger.message', ([], {}), '()\n', (2383, 2385), False, 'from linktools import utils, logger\n'), ((13553, 13582), 'linktools.utils.is_empty', 'utils.is_empty', (['args.order_by'], {}), '(args.order_by)\n', (13567, 13582), False, 'from linktools import utils, logger\n'), ((5178, 5227), 'linktools.utils.is_empty', 'utils.is_empty', (['self.package.requestedPermissions'], {}), '(self.package.requestedPermissions)\n', (5192, 5227), False, 'from linktools import utils, logger\n'), ((5671, 5711), 'linktools.utils.is_empty', 'utils.is_empty', (['self.package.permissions'], {}), '(self.package.permissions)\n', (5685, 5711), False, 'from linktools import utils, logger\n'), ((6063, 6102), 'linktools.utils.is_empty', 'utils.is_empty', (['self.package.activities'], {}), '(self.package.activities)\n', (6077, 6102), False, 'from linktools import utils, logger\n'), ((6443, 6480), 'linktools.utils.is_empty', 'utils.is_empty', (['self.package.services'], {}), '(self.package.services)\n', (6457, 6480), False, 'from linktools import utils, logger\n'), ((6815, 6853), 'linktools.utils.is_empty', 'utils.is_empty', (['self.package.receivers'], {}), '(self.package.receivers)\n', (6829, 6853), False, 'from linktools import utils, logger\n'), ((7193, 7231), 'linktools.utils.is_empty', 'utils.is_empty', (['self.package.providers'], {}), '(self.package.providers)\n', (7207, 7231), False, 'from linktools import utils, logger\n'), ((10179, 10212), 'linktools.utils.is_empty', 'utils.is_empty', (['component.intents'], {}), '(component.intents)\n', (10193, 10212), False, 'from linktools import utils, logger\n'), ((13138, 13167), 'linktools.utils.is_empty', 'utils.is_empty', (['args.packages'], {}), '(args.packages)\n', (13152, 13167), False, 'from linktools import utils, logger\n'), ((1927, 1982), 'linktools.logger.message', 'logger.message', (['text'], {'style': 'Style.BRIGHT', 'indent': 'indent'}), '(text, style=Style.BRIGHT, indent=indent)\n', (1941, 1982), False, 'from linktools import utils, logger\n'), ((2039, 2130), 'linktools.logger.message', 'logger.message', (['text'], {'fore': 'Fore.RED', 'back': 'Back.WHITE', 'style': 'Style.BRIGHT', 'indent': 'indent'}), '(text, fore=Fore.RED, back=Back.WHITE, style=Style.BRIGHT,\n indent=indent)\n', (2053, 2130), False, 'from linktools import utils, logger\n'), ((2181, 2275), 'linktools.logger.message', 'logger.message', (['text'], {'fore': 'Fore.YELLOW', 'back': 'Back.WHITE', 'style': 'Style.BRIGHT', 'indent': 'indent'}), '(text, fore=Fore.YELLOW, back=Back.WHITE, style=Style.BRIGHT,\n indent=indent)\n', (2195, 2275), False, 'from linktools import utils, logger\n'), ((2298, 2333), 'linktools.logger.message', 'logger.message', (['text'], {'indent': 'indent'}), '(text, indent=indent)\n', (2312, 2333), False, 'from linktools import utils, logger\n'), ((13635, 13667), 'linktools.utils.get_item', 'utils.get_item', (['x', 'k'], {'default': '""""""'}), "(x, k, default='')\n", (13649, 13667), False, 'from linktools import utils, logger\n')] |
from itertools import product
from Tables import query
from .ImageHelper import *
get_width = lambda v: int(21 + 4*(v - 1))
def get_position_info(version:int) -> list:
width = get_width(version)
info = list()
color = [
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1]
]
position = [(r, c, color[r][c]) for r in range(7) for c in range(7)]
info += [x for x in position] # upper-left
info += [(x[0], x[1]+width-7, x[2]) for x in position] # upper-right
info += [(x[0]+width-7, x[1], x[2]) for x in position] # lower-left
return info
def get_separator_info(version:int) -> list:
width = get_width(version)
info = list()
info += [*[(x, 7, 0) for x in range(8)], *[(7, x, 0) for x in range(7)]] # upper-left
info += [*[(width-x-1, 7, 0) for x in range(8)], *[(width-8, x, 0) for x in range(7)]] # upper-right
info += [*[(x, width-8, 0) for x in range(8)], *[(7, width-x-1, 0) for x in range(7)]] # lower-left
return info
def get_alignment_info(version:int) -> list:
if version == 1:
return list()
alignment_pos = query.get_position_of_alignment_patterns(version)
width = get_width(version)
info = list()
color = [
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]
]
alignment = [(r, c, color[r+2][c+2]) for r in range(-2, 3) for c in range(-2, 3)]
possible_center_pos = list(product(alignment_pos, repeat=2))
occupied_info = get_position_info(version) + get_separator_info(version)
occupied_pos = [(x[0], x[1]) for x in occupied_info]
for r, c in possible_center_pos:
possible_pos = [(r+x[0], c+x[1], x[2]) for x in alignment]
for pr, pc, _ in possible_pos:
if (pr, pc) in occupied_pos:
break
else:
info += possible_pos
return info
def get_timing_info(version:int) -> list:
width = get_width(version)
info = list()
info += [(6, x, not(x%2)) for x in range(8, width-8)] # horizontal timing pattern
info += [(x, 6, not(x%2)) for x in range(8, width-8)] # vertical timing pattern
return info
def get_dark_module_info(version:int) -> list:
return [(4*version + 9, 8, 1)]
def get_format_info(version:int, ec_level:str='', mask_string:str='') -> list:
width = get_width(version)
info = list()
mask_string = mask_string or '000'
ec_level = ec_level or 'L'
format_string = query.get_format_info_string(ec_level, mask_string)
pos = [
*[(x, 8) for x in range(9) if x != 6],
*reversed([(8, x) for x in range(8) if x != 6]),
*reversed([(8, x) for x in range(width-8, width)]),
*[(x, 8) for x in range(width-7, width)]
]
for char, (r, c) in zip(format_string[::-1]*2, pos):
info += [(r, c, int(char))]
return info
def get_version_info(version:int) -> list:
if version < 7:
return list()
width = get_width(version)
info = list()
version_string = query.get_version_info_string(version)
pos = [
*[(x, y) for x in range(6) for y in range(width-11, width-8)],
*[(x, y) for y in range(6) for x in range(width-11, width-8)]
]
for char, (r, c) in zip(version_string[::-1]*2, pos):
info += [(r, c, int(char))]
return info
def get_message_info(version:int, final_message:str) -> list:
width = get_width(version)
info = list()
non_info = list()
non_info += get_position_info(version)
non_info += get_separator_info(version)
non_info += get_alignment_info(version)
non_info += get_timing_info(version)
non_info += get_dark_module_info(version)
non_info += get_version_info(version)
non_info += get_format_info(version)
forbid_pos = [ (x[0], x[1]) for x in non_info ]
i = 0
is_upward, is_first, r, c = True, True, width - 1, width - 1
while True:
if (r, c) not in forbid_pos:
info += [(r, c, int(final_message[i]))]
i += 1
if i == len(final_message):
break
if is_first:
r, c = r, c - 1
if c < 0:
r, c = r - (1 if is_upward else -1), c + 1
else:
is_first = False
else:
r, c = r - (1 if is_upward else -1), c + 1
if (r < 0) if is_upward else (r >= width):
r, c = r + (1 if is_upward else -1), c - 2
is_upward = not is_upward
if c == 6:
c = c - 1
is_first = True
return info
class QRCodeCanvas:
WHITE = 0
GRAY = 130
BLACK = 255
def __init__(self, version:int) -> None:
width = get_width(version)
self.version = version
self.data = np.zeros((width, width), dtype=int) + QRCodeCanvas.GRAY
def copy(self) -> object:
copied = QRCodeCanvas(1)
copied.version = self.version
copied.data = self.data.copy()
return copied
def set_val(self, row:int, col:int, val:int) -> None:
self.data[row][col] = val
def set_white(self, row:int, col:int) -> None:
self.set_val(row, col, QRCodeCanvas.WHITE)
def set_gray(self, row:int, col:int) -> None:
self.set_val(row, col, QRCodeCanvas.GRAY)
def set_black(self, row:int, col:int) -> None:
self.set_val(row, col, QRCodeCanvas.BLACK)
def set_by_list(self, info) -> None:
for r, c, color in info:
if color == 1:
self.set_black(r, c)
else:
self.set_white(r, c)
def do_masking(self, mask_func:object) -> None:
info = list()
info += get_position_info(self.version)
info += get_separator_info(self.version)
info += get_alignment_info(self.version)
info += get_timing_info(self.version)
info += get_dark_module_info(self.version)
info += get_version_info(self.version)
info += get_format_info(self.version)
forbid_pos = [ (x[0], x[1]) for x in info ]
for r in range(self.data.shape[0]):
for c in range(self.data.shape[1]):
if (r, c) not in forbid_pos:
self.data[r][c] = (self.data[r][c]//255 ^ mask_func(r, c)) * QRCodeCanvas.BLACK
def evaluate_mask_penalty(self) -> int:
width = self.data.shape[0]
penalty = [0, 0, 0, 0, 0]
n = [0, 3, 3, 40, 10]
def evaluate_feature_1():
## test row
for r in range(width):
curr_color = self.data[r][0]
count = 1
for c in range(1, width):
if self.data[r][c] == curr_color:
count += 1
else:
curr_color = self.data[r][c]
if count >= 5:
penalty[1] += n[1] + count - 5
count = 1
if count >= 5:
penalty[1] += n[1] + count - 5
## test column
for c in range(width):
curr_color = self.data[0][c]
count = 1
for r in range(1, width):
if self.data[r][c] == curr_color:
count += 1
else:
curr_color = self.data[r][c]
if count >= 5:
penalty[1] += n[1] + count - 5
count = 1
if count >= 5:
penalty[1] += n[1] + count - 5
def evaluate_feature_2():
block_sum = [ QRCodeCanvas.BLACK * 4, QRCodeCanvas.WHITE * 4 ]
for r in range(width-1):
for c in range(width-1):
if np.sum(self.data[r:r+2, c:c+2]) in block_sum:
penalty[2] += n[2]
def evaluate_feature_3():
ratio = [
QRCodeCanvas.BLACK,
QRCodeCanvas.WHITE,
QRCodeCanvas.BLACK,
QRCodeCanvas.BLACK,
QRCodeCanvas.BLACK,
QRCodeCanvas.WHITE,
QRCodeCanvas.BLACK
]
patterns = [
ratio + [QRCodeCanvas.WHITE]*4,
[QRCodeCanvas.WHITE]*4 + ratio
]
for r in range(width):
for c in range(width-10):
# evaluate horizontally
if list(self.data[r][c:c+11].reshape(-1)) in patterns:
penalty[3] += n[3]
# evaluate vertivally
if list(self.data.T[r][c:c+11].reshape(-1)) in patterns:
penalty[3] += n[3]
def evaluate_feature_4():
total_module = self.data.size
total_black_module = np.sum(self.data == QRCodeCanvas.WHITE)
proportion = total_black_module / total_module * 100
k = int(abs(proportion - 50) // 5)
penalty[4] += k * n[4]
evaluate_feature_1()
evaluate_feature_2()
evaluate_feature_3()
evaluate_feature_4()
return sum(penalty)
def show(self) -> None:
show_array(self.data)
def show_with_line(self) -> None:
#XXX
def add_quiet_zone(data:np.ndarray) -> np.ndarray:
width = data.shape[0]
new_width = width + 2
new_data = np.zeros((new_width, new_width), dtype=int) + QRCodeCanvas.WHITE
new_data[1:width+1, 1:width+1] = data
return new_data
def resize(data:np.ndarray) -> np.ndarray:
width = data.shape[0]
new_width = width * 9
new_data = np.zeros((new_width, new_width), dtype=int)
for r in range(width):
for c in range(width):
new_data[9*r:9*(r+1), 9*c:9*(c+1)] = data[r][c]
return new_data
new_data = np.array(Image.fromarray(resize(255 - self.data)).convert('RGB'))
for r in range(0, new_data.shape[0], 9):
for c in range(new_data.shape[1]):
if r == 0:
continue
new_data[r][c] = np.asarray([255, 115, 0])
for c in range(0, new_data.shape[0], 9):
for r in range(new_data.shape[1]):
if c == 0:
continue
new_data[r][c] = np.asarray([255, 115, 0])
show_array(new_data, cmap='viridis')
def save(self, path:str, mul:int=9) -> None:
def add_quiet_zone(data:np.ndarray) -> np.ndarray:
width = data.shape[0]
new_width = width + 8
new_data = np.zeros((new_width, new_width), dtype=int) + QRCodeCanvas.WHITE
new_data[4:width+4, 4:width+4] = data
return new_data
def resize(data:np.ndarray) -> np.ndarray:
width = data.shape[0]
new_width = width * mul
new_data = np.zeros((new_width, new_width), dtype=int)
for r in range(width):
for c in range(width):
new_data[mul*r:mul*(r+1), mul*c:mul*(c+1)] = data[r][c]
return new_data
save_array_as_image(resize(add_quiet_zone(self.data)) / 255, path)
class QRCodeExtractor:
def __init__(self):
self.a = 0
return
def __remove_quiet_zone(self, array):
upper_left, upper_right, lower_left = None, None, None
# search the upper-left black pixel
for r in range(array.shape[0]):
for c in range(array.shape[1]):
if array[r][c] == 1:
upper_left = (r, c)
break
if upper_left is not None:
break
# search the upper-right black pixel
for r in range(array.shape[0]):
for c in reversed(range(array.shape[1])):
if array[r][c] == 1:
upper_right = (r, c)
break
if upper_right is not None:
break
# search the lower-left black pixel
for r in reversed(range(array.shape[0])):
for c in range(array.shape[1]):
if array[r][c] == 1:
lower_left = (r, c)
break
if lower_left is not None:
break
# determine the boundaries of the area that excludes the quiet zone
upper_boundary = min(upper_left[0], upper_right[0])
lower_boundary = lower_left[0]
left_boundary = min(upper_right[1], lower_left[1])
right_boundary = upper_right[1]
# allocate the new matrix without the quiet zone
new_array = list()
for r in range(upper_boundary, lower_boundary+1):
row = list()
for c in range(left_boundary, right_boundary+1):
row.append(array[r][c])
new_array.append(row)
new_array = np.asarray(new_array)
assert new_array.shape[0] == new_array.shape[1], 'This QR Code is not valid.'
return new_array
def __normalize_matrix(self, array):
# check the size for representing a module
for size in range(array.shape[0]+1):
if array[size][size] == 0:
break
# allocate the normalized matrix
new_array = list()
for r in range(0, array.shape[0], size):
row = list()
for c in range(0, array.shape[1], size):
row.append(array[r][c])
new_array.append(row)
return np.asarray(new_array)
def extract(self, path):
try:
array = self.extract_from_original(path)
except:
array = self.extract_from_image(path)
assert array.shape[0] == array.shape[1], 'extraction fail'
assert (array.shape[0] - 21) % 4 == 0, 'extraction fail'
return array
def extract_from_original(self, path):
array = load_image_as_array(path)
array = self.__remove_quiet_zone(array)
array = self.__normalize_matrix(array)
return array
def extract_from_image(self, path):
print('-'*20, path, '-'*20)
print('Process: Reading image')
show_array(np.asarray(Image.open(path).convert('RGB')))
img = np.asarray(Image.open(path).convert('L'))
h, w = img.shape
if img.size > 500000:
img = bilinear_interpolation(img, 0.5)
show_array(img)
print('Process: Gray level slicing')
img_g = gray_level_slicing(img, 100)
show_array(img_g)
print('Process: Convoluting with Sobel operator in x and y direction')
img_conv = sobel_operation(img_g)
img_g_conv = gray_level_slicing(scale(img_conv), 200)
show_array(img_g_conv)
print('Process: Finding patterns')
import time
starttime = time.time()
group = pattern_detector(img_g_conv)
interval = time.time() - starttime
print('Running Time: %dm %ds' % (interval // 60, interval % 60))
print('\n\nProcess: Finding the locations of the three position patterns')
position = find_position(group)
tmat = img_g_conv.copy()
for r, c in position[0]:
highlight_pos = [(r + x, c + y) for x in range(-10, 11) for y in range(-10, 11)]
for r, c in highlight_pos:
tmat[r][c] = 255
show_array(tmat)
tmat = np.zeros_like(img_g_conv)
for r, c in position[1][0] + position[1][1] + position[1][2]:
tmat[r][c] = 255
for r, c in position[0]:
highlight_pos = [(r + x, c + y) for x in range(-3, 4) for y in range(-3, 4)]
for r, c in highlight_pos:
tmat[r][c] = 255
show_array(tmat)
print('Process: Find the four vertices of QR Code')
from_points = get_qrcode_vertex(img_g_conv, *position)
tmat = img_g_conv.copy()
for r, c in from_points:
highlight_pos = [(r + x, c + y) for x in range(-3, 4) for y in range(-3, 4)]
for r, c in highlight_pos:
tmat[r][c] = 255
show_array(tmat)
img_fp = img.copy()
for r, c in from_points:
highlight_pos = [(r + x, c + y) for x in range(-3, 4) for y in range(-3, 4)]
for r, c in highlight_pos:
img_fp[r][c] = 255
show_array(img_fp)
print('Process: Perspective transform')
from_points = [(y, x) for x, y in from_points]
img_p = perspective_transform(img, from_points)
img_p = gray_level_slicing(img_p, 100)
show_array(np.logical_not((img_p / 255)).astype(int), cmap='binary')
print('Process: Resampling')
img_r = resampling(img_p)
print('Process: Result')
img_r = np.logical_not((img_r / 255)).astype(int)
show_array(img_r, cmap='binary')
return img_r
| [
"itertools.product",
"Tables.query.get_format_info_string",
"Tables.query.get_version_info_string",
"time.time",
"Tables.query.get_position_of_alignment_patterns"
] | [((1321, 1370), 'Tables.query.get_position_of_alignment_patterns', 'query.get_position_of_alignment_patterns', (['version'], {}), '(version)\n', (1361, 1370), False, 'from Tables import query\n'), ((2712, 2763), 'Tables.query.get_format_info_string', 'query.get_format_info_string', (['ec_level', 'mask_string'], {}), '(ec_level, mask_string)\n', (2740, 2763), False, 'from Tables import query\n'), ((3286, 3324), 'Tables.query.get_version_info_string', 'query.get_version_info_string', (['version'], {}), '(version)\n', (3315, 3324), False, 'from Tables import query\n'), ((1686, 1718), 'itertools.product', 'product', (['alignment_pos'], {'repeat': '(2)'}), '(alignment_pos, repeat=2)\n', (1693, 1718), False, 'from itertools import product\n'), ((15379, 15390), 'time.time', 'time.time', ([], {}), '()\n', (15388, 15390), False, 'import time\n'), ((15455, 15466), 'time.time', 'time.time', ([], {}), '()\n', (15464, 15466), False, 'import time\n')] |
#!/usr/bin/env python
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import scipy.io
import glob
import os
import csv
import random
import tensorflow as tf
import transition_model_common as tm
# import sys
# sys.path.append('./tensorflow_hmm')
# import tensorflow_hmm.hmm as hmm
def train_model():
dl = tm.DataLoader()
n_examples = dl.num_examples
n_input = dl.feature_len
n_classes = dl.num_labels
# Parameters
learning_rate = 0.01
training_epochs = 5000
batch_size = 100
display_step = 50
tmm = tm.create_model(n_input, n_classes, train=True)
# Define loss and optimizer
# residual_pre = tf.reduce_mean(tf.squared_difference(x_pre, ae_pre_out))
residual_post = tf.reduce_mean(tf.squared_difference(tmm.x_post, tmm.ae_post_out))
# cost_current = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_current, labels=y_current))
cost_next = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=tmm.pred_next, labels=tmm.y_next))
regularizer = tf.nn.l2_loss(tmm.pred_weights[0])
for i in range(1, len(tmm.pred_weights)):
regularizer += tf.nn.l2_loss(tmm.pred_weights[i])
# total_loss = 0.01 * (residual_pre + residual_post) + cost_current + cost_next
total_loss = 0.01 * (residual_post) + cost_next + 0.001 * regularizer
# total_loss = cost_next + cost_current
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss)
# Initializing the variables
init = tf.global_variables_initializer()
# Calculate accuracy
# correct_pred_current = tf.equal(tf.argmax(pred_current, 1), tf.argmax(y_current, 1))
correct_pred_next = tf.equal(tf.argmax(tmm.pred_next, 1), tf.argmax(tmm.y_next, 1))
# accuracy_current = tf.reduce_mean(tf.cast(correct_pred_current, 'float'))
accuracy_next = tf.reduce_mean(tf.cast(correct_pred_next, 'float'))
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter("tensorboard/train", sess.graph)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
x_pre_batch, x_post_batch, y_current_batch, y_next_batch = dl.next_training_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
# feed = {x_pre: x_pre_batch, x_post: x_post_batch, y_current: y_current_batch, y_next: y_next_batch }
feed = {tmm.x_post: x_post_batch,
tmm.y_current: y_current_batch,
tmm.y_next: y_next_batch,
tmm.keep_prob: 0.7}
_, c = sess.run([optimizer, total_loss], feed_dict=feed)
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print('Epoch: {:04d} cost: {:.9f}'.format(epoch, avg_cost))
# print(' train accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.training_post_data, y_next: dl.training_next_action})))
# print(' test accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.testing_post_data, y_next: dl.testing_next_action})))
print(' train accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.training_post_data,
tmm.y_current: dl.training_current_action,
tmm.y_next: dl.training_next_action,
tmm.keep_prob: 1.0})))
print(' test accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.testing_post_data,
tmm.y_current: dl.testing_current_action,
tmm.y_next: dl.testing_next_action,
tmm.keep_prob: 1.0})))
# print(' train accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.training_pre_data, tmm.x_post: dl.training_post_data, y_current: dl.training_current_action})))
# print(' test accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.testing_pre_data, tmm.x_post: dl.testing_post_data, y_current: dl.testing_current_action})))
test_action_accuracy(accuracy_next, tmm, dl, training=False)
print("Optimization Finished!")
if not os.path.exists('./models/transition'):
os.mkdir('./models/transition')
saver.save(sess, './models/transition/model.ckpt')
writer.close()
def train_mapping():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes, train=True)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/transition/model.ckpt')
rdl = tm.RobotDataLoader(dl, tmm.x_post, tmm.ae_post_enc, tmm.keep_prob)
robot_test_data, human_test_data, y_current_test_data, y_next_test_data = rdl.extract_data_as_arrays(train=False)
n_dim1 = rdl.human_enc_dim
n_dim2 = rdl.robot_dim
# tf Graph input
# x = tf.placeholder('float', [None, n_dim2], name='x_robot_enc')
y_gt = tf.placeholder('float', [None, n_dim1], name='y_human_gt')
# y = create_mapping_model(x, n_dim2, n_dim1, train=True)
x = tmm.x_map_input
y = tmm.y_map_output
# Parameters
learning_rate = 0.001
training_epochs = 10000
batch_size = 100
display_step = 50
total_batch = 20
# Define loss and optimizer
# cost_next = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_next, labels=y_next))
residual = tf.reduce_mean(tf.squared_difference(y, y_gt))
regularizers = tf.nn.l2_loss(tmm.mapping_weights[0])
for i in range(1, len(tmm.mapping_weights)):
regularizers += tf.nn.l2_loss(tmm.mapping_weights[i])
total_loss = residual + 0.001 * regularizers
# total_loss = residual
# total_loss = 0.01 * residual + cost_next
# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss, var_list=[ae_post_out, y_current, y_next, x, y_gt, keep_prob])
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss)
new_vars = []
for var in tf.global_variables():
if 'mapping' in var.name or 'beta' in var.name:
new_vars.append(var)
# Initializing the variables
#init = tf.global_variables_initializer()
# init = tf.initialize_variables(new_vars)
init = tf.variables_initializer(new_vars)
# Launch the graph
sess.run(init)
writer = tf.summary.FileWriter("tensorboard/map", sess.graph)
# Calculate accuracy
# correct_pred_current = tf.equal(tf.argmax(pred_current, 1), tf.argmax(y_current, 1))
correct_pred_next = tf.equal(tf.argmax(tmm.pred_next, 1), tf.argmax(tmm.y_next, 1))
# accuracy_current = tf.reduce_mean(tf.cast(correct_pred_current, 'float'))
accuracy_next = tf.reduce_mean(tf.cast(correct_pred_next, 'float'))
num_training = training_epochs * total_batch * batch_size
# robot data projected to human subspace
mapped_robot_data = np.zeros((num_training, n_dim1), dtype=np.float)
action_idx_data = np.full((num_training, len(dl.index_name)), -2, dtype=np.int)
next_action_idx_data = np.full((num_training, len(dl.index_name)), -2, dtype=np.int)
data_idx = 0
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
for i in range(total_batch):
x_batch, y_batch, action_idx_batch, next_action_idx_batch = rdl.next_training_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
feed = {x: x_batch, y_gt: y_batch, tmm.keep_prob: 0.7}
_, c = sess.run([optimizer, total_loss], feed_dict=feed)
# Compute average loss
avg_cost += c / total_batch
# collect data to feed to accuracy eval (testing)
mapped_robot_enc_test = tmm.y_map_output.eval({tmm.x_map_input: robot_test_data, tmm.keep_prob: 1.0})
action_idx_test = dl.one_hot(y_current_test_data, len(dl.index_name))
next_action_idx_test = dl.one_hot(y_next_test_data, len(dl.index_name))
# collect data to feed to accuracy eval (training)
mapped_robot_enc = tmm.y_map_output.eval({tmm.x_map_input: x_batch, tmm.keep_prob: 1.0})
action_idx = dl.one_hot(action_idx_batch, len(dl.index_name))
next_action_idx = dl.one_hot(next_action_idx_batch, len(dl.index_name))
mapped_robot_data[data_idx:data_idx+batch_size,:] = mapped_robot_enc
action_idx_data[data_idx:data_idx+batch_size,:] = action_idx
next_action_idx_data[data_idx:data_idx+batch_size,:] = next_action_idx
data_idx += batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print('Epoch: {:04d} cost: {:.9f}'.format(epoch, avg_cost))
print(' accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.ae_post_enc: mapped_robot_data[0:data_idx],
tmm.y_current: action_idx_data[0:data_idx],
tmm.y_next: next_action_idx_data[0:data_idx]})))
# test_action_accuracy_map(accuracy_next, ae_post_enc, y_current, y_next,
# mapped_robot_data[0:data_idx], action_idx_data[0:data_idx],
# next_action_idx_data[0:data_idx], dl, train=True)
test_action_accuracy_map(accuracy_next, tmm, mapped_robot_enc_test,
action_idx_test, next_action_idx_test, dl, False)
print("Optimization Finished!")
if not os.path.exists('./models/map'):
os.mkdir('./models/map')
saver.save(sess, './models/map/model.ckpt')
writer.close()
'''
Map from robot state to human (encoded) state
'''
def run_mapping():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
n_dim1 = 6
n_dim2 = 7
tmm = tm.create_model(n_input, n_classes, train=True)
pred_next_sm = tf.nn.softmax(tmm.pred_next)
# pred_current_sm = tf.nn.softmax(pred_current)
# tf Graph input
# x = tf.placeholder('float', [None, n_dim2], name='x_robot_enc')
# y = create_mapping_model(x, n_dim2, n_dim1, train=False)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/map/model.ckpt')
rdl = tm.RobotDataLoader(dl, tmm.x_post, tmm.ae_post_enc)
for i in range(10):
y_human, x_robot, action_idx, next_action_idx = rdl.get_random_pair()
# y_output_pre = y_map_output.eval({x_map_input: np.expand_dims(x_robot[0], axis=0)})
y_action = dl.one_hot(np.full((1,), action_idx), len(dl.index_name))
y_output_post = tmm.y_map_output.eval({tmm.x_map_input: np.reshape(x_robot, (1,7)),
tmm.keep_prob: 1.0})
# res_current = pred_current_sm.eval({ae_pre_enc: y_output_pre, ae_post_enc: y_output_post})
res_next = pred_next_sm.eval({tmm.ae_post_enc: y_output_post,
tmm.y_current: y_action,
tmm.keep_prob: 1.0})
# res_current_idx = np.argmax(res_current)
res_next_idx = np.argmax(res_next)
print('Prediction next: {} {}, true {} {}'.format(res_next_idx, dl.index_name[res_next_idx],
next_action_idx, dl.index_name[next_action_idx]))
print(' Probabilities (next):')
for j in range(len(dl.index_name)):
name = dl.index_name[j]
tab_str = get_tab_str(name)
print(' {}{}{:.6f}'.format(name, tab_str, res_next[0,j]))
def run_demo():
index_name = ['end', 'approach', 'move', 'grasp_left', 'grasp_right', 'ungrasp_left', 'ungrasp_right',
'twist', 'push', 'neutral', 'pull', 'pinch', 'unpinch']
n_input = 159
n_classes = 13
n_dim1 = 6
n_dim2 = 7
tmm = tm.create_model(n_input, n_classes, train=True)
pred_next_sm = tf.nn.softmax(tmm.pred_next)
# pred_current_sm = tf.nn.softmax(pred_current)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/map/model.ckpt')
# INSERT ACTUAL ROBOT MEASUREMENTS HERE
# NOTE: if running from actual robot data, don't forget to divide the gripper
# state by 255 (last dimension of feature vector)
x_robot_pre = np.random.normal(size=(1,7))
x_robot_post = np.random.normal(size=(1,7))
action = np.full((1,), random.randint(1,13))
y_action = tm.DataLoader.one_hot(action, 13)
# y_output_pre = y_map_output.eval({x_map_input: x_robot_pre})
y_output_post = tmm.y_map_output.eval({tmm.x_map_input: x_robot_post,
tmm.y_current: y_action,
tmm.keep_prob: 1.0})
# res_current = pred_current_sm.eval({ae_pre_enc: y_output_pre, ae_post_enc: y_output_post})
res_next = pred_next_sm.eval({tmm.ae_post_enc: y_output_post,
tmm.y_current: y_action,
tmm.keep_prob: 1.0})
# res_current_idx = np.argmax(res_current)
res_next_idx = np.argmax(res_next)
print('Prediction next: {} {}'.format(res_next_idx, index_name[res_next_idx]))
print(' Probabilities (next):')
for j in range(len(index_name)):
name = index_name[j]
tab_str = get_tab_str(name)
print(' {}{}{:.6f}'.format(name, tab_str, res_next[0,j]))
'''
Tests the accuracy of a single action's encoding/decoding
'''
def test_action_accuracy(accuracy_next, tmm, dl=tm.DataLoader(),
training=False):
if training:
# pre_data = dl.training_pre_data
post_data = dl.training_post_data
current_action = dl.training_current_action
next_action = dl.training_next_action
type_str = 'training'
else:
# pre_data = dl.testing_pre_data
post_data = dl.testing_post_data
current_action = dl.testing_current_action
next_action = dl.testing_next_action
type_str = 'testing'
for action_idx in range(1, len(dl.index_name)):
# find matching indicies for this action
index_arr = np.full((1, 1), action_idx)
action_one_hot = dl.one_hot(index_arr, len(dl.index_name))
action_indices = np.where((current_action == action_one_hot).all(axis=1))[0]
tab_str = get_tab_str(dl.index_name[action_idx])
print(' {}:{} {} accuracy (next): {:.9f}'.format(dl.index_name[action_idx],
tab_str,
type_str,
accuracy_next.eval({tmm.x_post: post_data[action_indices,:],
tmm.y_current: current_action[action_indices,:],
tmm.y_next: next_action[action_indices,:],
tmm.keep_prob: 1.0})))
'''
Tests the accuracy of a single action's encoding/decoding during mapping
'''
def test_action_accuracy_map(accuracy_next, tmm, mapped_robot_data,
action_idx_data, next_action_idx_data, dl=tm.DataLoader(), train=False):
for action_idx in range(1, len(dl.index_name)):
# find matching indicies for this action
index_arr = np.full((1, 1), action_idx)
action_one_hot = dl.one_hot(index_arr, len(dl.index_name))
action_indices = np.where((action_idx_data == action_one_hot).all(axis=1))[0]
if train:
type_str = 'training'
else:
type_str = 'testing'
tab_str = get_tab_str(dl.index_name[action_idx])
print(' {}:{} {} accuracy (next): {:.9f}'.format(dl.index_name[action_idx],
tab_str,
type_str,
accuracy_next.eval({tmm.ae_post_enc: mapped_robot_data[action_indices,:],
tmm.y_current: action_idx_data[action_indices,:],
tmm.y_next: next_action_idx_data[action_indices,:],
tmm.keep_prob: 1.0})))
def test_model():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes)
# Calculate accuracy
# correct_pred_current = tf.equal(tf.argmax(pred_current, 1), tf.argmax(y_current, 1))
correct_pred_next = tf.equal(tf.argmax(tmm.pred_next, 1), tf.argmax(tmm.y_next, 1))
# accuracy_current = tf.reduce_mean(tf.cast(correct_pred_current, 'float'))
accuracy_next = tf.reduce_mean(tf.cast(correct_pred_next, 'float'))
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/map/model.ckpt')
print(' train accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.training_post_data,
tmm.y_current: dl.training_current_action,
tmm.y_next: dl.training_next_action})))
print(' test accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.testing_post_data,
tmm.y_current: dl.testing_current_action,
tmm.y_next: dl.testing_next_action})))
# print(' train accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.training_pre_data, tmm.x_post: dl.training_post_data, y_current: dl.training_current_action})))
# print(' test accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.testing_pre_data, tmm.x_post: dl.testing_post_data, y_current: dl.testing_current_action})))
'''
Print out the probability tables of the current pre and post-condition observations
'''
def test_sequence():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes)
pred_next_sm = tf.nn.softmax(tmm.pred_next)
# pred_current_sm = tf.nn.softmax(pred_current)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/transition/model.ckpt')
for i in range(dl.training_pre_data.shape[0]):
x_pre_data = np.expand_dims(dl.training_pre_data[i,:], axis=0)
x_post_data = np.expand_dims(dl.training_post_data[i,:], axis=0)
y_action = np.reshape(dl.training_current_action[i,:], (1, len(dl.index_name)))
# res_current = pred_current_sm.eval({x_pre: x_pre_data, tmm.x_post: x_post_data})
# res_next = pred_next_sm.eval({x_pre: x_pre_data, tmm.x_post: x_post_data})
res_next = pred_next_sm.eval({tmm.x_post: x_post_data,
tmm.y_current: y_action})
# res_current_idx = np.argmax(res_current)
res_next_idx = np.argmax(res_next)
print('Prediction next: {} {}'.format(res_next_idx, dl.index_name[res_next_idx]))
print(' Probabilities (next):')
for j in range(len(dl.index_name)):
name = dl.index_name[j]
tab_str = get_tab_str(name)
print(' {}:{}{:.6f}'.format(name, tab_str, res_next[0,j]))
break
'''
Encode the human measurements into low-dimensional subspace
'''
def encode_human():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/transition/model.ckpt')
for i in range(dl.training_pre_data.shape[0]):
x_pre_data = np.expand_dims(dl.training_pre_data[i,:], axis=0)
x_post_data = np.expand_dims(dl.training_post_data[i,:], axis=0)
# y_enc_pre = tmm.ae_pre_enc.eval({tmm.x_pre: x_pre_data})
y_enc_post = tmm.ae_post_enc.eval({tmm.x_post: x_post_data})
# Print the 6-dimensional representation
# print(y_enc_pre.tolist())
print(y_enc_post.tolist())
break
def get_tab_str(action_name):
if len(action_name) < 7:
tab_str = '\t\t\t'
elif len(action_name) >= 7 and len(action_name) < 10:
tab_str = '\t\t'
else:
tab_str = '\t'
return tab_str
def parse_args():
# Parse input arguments
parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('mode', default=None, help='train | trainmap | runmap | test | seq | encode')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.mode == 'train':
train_model()
elif args.mode == 'test':
test_model()
elif args.mode == 'seq':
test_sequence()
elif args.mode == 'encode':
encode_human()
elif args.mode == 'trainmap':
train_mapping()
elif args.mode == 'runmap':
run_mapping()
elif args.mode == 'demo':
run_demo()
| [
"tensorflow.nn.softmax",
"transition_model_common.create_model",
"tensorflow.cast",
"tensorflow.variables_initializer",
"transition_model_common.RobotDataLoader",
"os.path.exists",
"numpy.reshape",
"argparse.ArgumentParser",
"tensorflow.squared_difference",
"tensorflow.Session",
"tensorflow.plac... | [((350, 365), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (363, 365), True, 'import transition_model_common as tm\n'), ((583, 630), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {'train': '(True)'}), '(n_input, n_classes, train=True)\n', (598, 630), True, 'import transition_model_common as tm\n'), ((1076, 1110), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tmm.pred_weights[0]'], {}), '(tmm.pred_weights[0])\n', (1089, 1110), True, 'import tensorflow as tf\n'), ((1553, 1586), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1584, 1586), True, 'import tensorflow as tf\n'), ((1980, 1996), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1994, 1996), True, 'import tensorflow as tf\n'), ((5130, 5145), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (5143, 5145), True, 'import transition_model_common as tm\n'), ((5216, 5263), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {'train': '(True)'}), '(n_input, n_classes, train=True)\n', (5231, 5263), True, 'import transition_model_common as tm\n'), ((5300, 5316), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5314, 5316), True, 'import tensorflow as tf\n'), ((11028, 11043), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (11041, 11043), True, 'import transition_model_common as tm\n'), ((11144, 11191), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {'train': '(True)'}), '(n_input, n_classes, train=True)\n', (11159, 11191), True, 'import transition_model_common as tm\n'), ((11211, 11239), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['tmm.pred_next'], {}), '(tmm.pred_next)\n', (11224, 11239), True, 'import tensorflow as tf\n'), ((11483, 11499), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11497, 11499), True, 'import tensorflow as tf\n'), ((13273, 13320), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {'train': '(True)'}), '(n_input, n_classes, train=True)\n', (13288, 13320), True, 'import transition_model_common as tm\n'), ((13340, 13368), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['tmm.pred_next'], {}), '(tmm.pred_next)\n', (13353, 13368), True, 'import tensorflow as tf\n'), ((13457, 13473), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (13471, 13473), True, 'import tensorflow as tf\n'), ((15068, 15083), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (15081, 15083), True, 'import transition_model_common as tm\n'), ((16836, 16851), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (16849, 16851), True, 'import transition_model_common as tm\n'), ((18056, 18071), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (18069, 18071), True, 'import transition_model_common as tm\n'), ((18143, 18178), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {}), '(n_input, n_classes)\n', (18158, 18178), True, 'import transition_model_common as tm\n'), ((18572, 18588), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (18586, 18588), True, 'import tensorflow as tf\n'), ((19848, 19863), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (19861, 19863), True, 'import transition_model_common as tm\n'), ((19935, 19970), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {}), '(n_input, n_classes)\n', (19950, 19970), True, 'import transition_model_common as tm\n'), ((19991, 20019), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['tmm.pred_next'], {}), '(tmm.pred_next)\n', (20004, 20019), True, 'import tensorflow as tf\n'), ((20108, 20124), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (20122, 20124), True, 'import tensorflow as tf\n'), ((21405, 21420), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (21418, 21420), True, 'import transition_model_common as tm\n'), ((21492, 21527), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {}), '(n_input, n_classes)\n', (21507, 21527), True, 'import transition_model_common as tm\n'), ((21564, 21580), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (21578, 21580), True, 'import tensorflow as tf\n'), ((22464, 22551), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'ArgumentDefaultsHelpFormatter'}), '(description=__doc__, formatter_class=\n ArgumentDefaultsHelpFormatter)\n', (22478, 22551), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((777, 827), 'tensorflow.squared_difference', 'tf.squared_difference', (['tmm.x_post', 'tmm.ae_post_out'], {}), '(tmm.x_post, tmm.ae_post_out)\n', (798, 827), True, 'import tensorflow as tf\n'), ((976, 1061), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'tmm.pred_next', 'labels': 'tmm.y_next'}), '(logits=tmm.pred_next, labels=tmm.y_next\n )\n', (1015, 1061), True, 'import tensorflow as tf\n'), ((1180, 1214), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tmm.pred_weights[i]'], {}), '(tmm.pred_weights[i])\n', (1193, 1214), True, 'import tensorflow as tf\n'), ((1737, 1764), 'tensorflow.argmax', 'tf.argmax', (['tmm.pred_next', '(1)'], {}), '(tmm.pred_next, 1)\n', (1746, 1764), True, 'import tensorflow as tf\n'), ((1766, 1790), 'tensorflow.argmax', 'tf.argmax', (['tmm.y_next', '(1)'], {}), '(tmm.y_next, 1)\n', (1775, 1790), True, 'import tensorflow as tf\n'), ((1907, 1942), 'tensorflow.cast', 'tf.cast', (['correct_pred_next', '"""float"""'], {}), "(correct_pred_next, 'float')\n", (1914, 1942), True, 'import tensorflow as tf\n'), ((2006, 2018), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2016, 2018), True, 'import tensorflow as tf\n'), ((2069, 2123), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""tensorboard/train"""', 'sess.graph'], {}), "('tensorboard/train', sess.graph)\n", (2090, 2123), True, 'import tensorflow as tf\n'), ((5326, 5338), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5336, 5338), True, 'import tensorflow as tf\n'), ((5425, 5491), 'transition_model_common.RobotDataLoader', 'tm.RobotDataLoader', (['dl', 'tmm.x_post', 'tmm.ae_post_enc', 'tmm.keep_prob'], {}), '(dl, tmm.x_post, tmm.ae_post_enc, tmm.keep_prob)\n', (5443, 5491), True, 'import transition_model_common as tm\n'), ((5797, 5855), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, n_dim1]'], {'name': '"""y_human_gt"""'}), "('float', [None, n_dim1], name='y_human_gt')\n", (5811, 5855), True, 'import tensorflow as tf\n'), ((6376, 6413), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tmm.mapping_weights[0]'], {}), '(tmm.mapping_weights[0])\n', (6389, 6413), True, 'import tensorflow as tf\n'), ((6963, 6984), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6982, 6984), True, 'import tensorflow as tf\n'), ((7237, 7271), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['new_vars'], {}), '(new_vars)\n', (7261, 7271), True, 'import tensorflow as tf\n'), ((7341, 7393), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""tensorboard/map"""', 'sess.graph'], {}), "('tensorboard/map', sess.graph)\n", (7362, 7393), True, 'import tensorflow as tf\n'), ((7915, 7963), 'numpy.zeros', 'np.zeros', (['(num_training, n_dim1)'], {'dtype': 'np.float'}), '((num_training, n_dim1), dtype=np.float)\n', (7923, 7963), True, 'import numpy as np\n'), ((11509, 11521), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11519, 11521), True, 'import tensorflow as tf\n'), ((11601, 11652), 'transition_model_common.RobotDataLoader', 'tm.RobotDataLoader', (['dl', 'tmm.x_post', 'tmm.ae_post_enc'], {}), '(dl, tmm.x_post, tmm.ae_post_enc)\n', (11619, 11652), True, 'import transition_model_common as tm\n'), ((13483, 13495), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13493, 13495), True, 'import tensorflow as tf\n'), ((13775, 13804), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 7)'}), '(size=(1, 7))\n', (13791, 13804), True, 'import numpy as np\n'), ((13827, 13856), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 7)'}), '(size=(1, 7))\n', (13843, 13856), True, 'import numpy as np\n'), ((13928, 13961), 'transition_model_common.DataLoader.one_hot', 'tm.DataLoader.one_hot', (['action', '(13)'], {}), '(action, 13)\n', (13949, 13961), True, 'import transition_model_common as tm\n'), ((14621, 14640), 'numpy.argmax', 'np.argmax', (['res_next'], {}), '(res_next)\n', (14630, 14640), True, 'import numpy as np\n'), ((15695, 15722), 'numpy.full', 'np.full', (['(1, 1)', 'action_idx'], {}), '((1, 1), action_idx)\n', (15702, 15722), True, 'import numpy as np\n'), ((16989, 17016), 'numpy.full', 'np.full', (['(1, 1)', 'action_idx'], {}), '((1, 1), action_idx)\n', (16996, 17016), True, 'import numpy as np\n'), ((18329, 18356), 'tensorflow.argmax', 'tf.argmax', (['tmm.pred_next', '(1)'], {}), '(tmm.pred_next, 1)\n', (18338, 18356), True, 'import tensorflow as tf\n'), ((18358, 18382), 'tensorflow.argmax', 'tf.argmax', (['tmm.y_next', '(1)'], {}), '(tmm.y_next, 1)\n', (18367, 18382), True, 'import tensorflow as tf\n'), ((18499, 18534), 'tensorflow.cast', 'tf.cast', (['correct_pred_next', '"""float"""'], {}), "(correct_pred_next, 'float')\n", (18506, 18534), True, 'import tensorflow as tf\n'), ((18598, 18610), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (18608, 18610), True, 'import tensorflow as tf\n'), ((20134, 20146), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (20144, 20146), True, 'import tensorflow as tf\n'), ((21590, 21602), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (21600, 21602), True, 'import tensorflow as tf\n'), ((1435, 1486), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1457, 1486), True, 'import tensorflow as tf\n'), ((4932, 4969), 'os.path.exists', 'os.path.exists', (['"""./models/transition"""'], {}), "('./models/transition')\n", (4946, 4969), False, 'import os\n'), ((4983, 5014), 'os.mkdir', 'os.mkdir', (['"""./models/transition"""'], {}), "('./models/transition')\n", (4991, 5014), False, 'import os\n'), ((6321, 6351), 'tensorflow.squared_difference', 'tf.squared_difference', (['y', 'y_gt'], {}), '(y, y_gt)\n', (6342, 6351), True, 'import tensorflow as tf\n'), ((6495, 6532), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tmm.mapping_weights[i]'], {}), '(tmm.mapping_weights[i])\n', (6508, 6532), True, 'import tensorflow as tf\n'), ((7556, 7583), 'tensorflow.argmax', 'tf.argmax', (['tmm.pred_next', '(1)'], {}), '(tmm.pred_next, 1)\n', (7565, 7583), True, 'import tensorflow as tf\n'), ((7585, 7609), 'tensorflow.argmax', 'tf.argmax', (['tmm.y_next', '(1)'], {}), '(tmm.y_next, 1)\n', (7594, 7609), True, 'import tensorflow as tf\n'), ((7734, 7769), 'tensorflow.cast', 'tf.cast', (['correct_pred_next', '"""float"""'], {}), "(correct_pred_next, 'float')\n", (7741, 7769), True, 'import tensorflow as tf\n'), ((10798, 10828), 'os.path.exists', 'os.path.exists', (['"""./models/map"""'], {}), "('./models/map')\n", (10812, 10828), False, 'import os\n'), ((10842, 10866), 'os.mkdir', 'os.mkdir', (['"""./models/map"""'], {}), "('./models/map')\n", (10850, 10866), False, 'import os\n'), ((12506, 12525), 'numpy.argmax', 'np.argmax', (['res_next'], {}), '(res_next)\n', (12515, 12525), True, 'import numpy as np\n'), ((13887, 13908), 'random.randint', 'random.randint', (['(1)', '(13)'], {}), '(1, 13)\n', (13901, 13908), False, 'import random\n'), ((20299, 20349), 'numpy.expand_dims', 'np.expand_dims', (['dl.training_pre_data[i, :]'], {'axis': '(0)'}), '(dl.training_pre_data[i, :], axis=0)\n', (20313, 20349), True, 'import numpy as np\n'), ((20375, 20426), 'numpy.expand_dims', 'np.expand_dims', (['dl.training_post_data[i, :]'], {'axis': '(0)'}), '(dl.training_post_data[i, :], axis=0)\n', (20389, 20426), True, 'import numpy as np\n'), ((20921, 20940), 'numpy.argmax', 'np.argmax', (['res_next'], {}), '(res_next)\n', (20930, 20940), True, 'import numpy as np\n'), ((21755, 21805), 'numpy.expand_dims', 'np.expand_dims', (['dl.training_pre_data[i, :]'], {'axis': '(0)'}), '(dl.training_pre_data[i, :], axis=0)\n', (21769, 21805), True, 'import numpy as np\n'), ((21831, 21882), 'numpy.expand_dims', 'np.expand_dims', (['dl.training_post_data[i, :]'], {'axis': '(0)'}), '(dl.training_post_data[i, :], axis=0)\n', (21845, 21882), True, 'import numpy as np\n'), ((6848, 6899), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (6870, 6899), True, 'import tensorflow as tf\n'), ((11898, 11923), 'numpy.full', 'np.full', (['(1,)', 'action_idx'], {}), '((1,), action_idx)\n', (11905, 11923), True, 'import numpy as np\n'), ((12013, 12040), 'numpy.reshape', 'np.reshape', (['x_robot', '(1, 7)'], {}), '(x_robot, (1, 7))\n', (12023, 12040), True, 'import numpy as np\n')] |
# !/usr/bin/env python
# coding=UTF-8
"""
@Author: <NAME>
@LastEditors: <NAME>
@Description:
@Date: 2021-08-18
@LastEditTime: 2022-03-19
"""
import zipfile
import pickle
import gzip
import json
import re
from pathlib import Path
from typing import Union, Optional, Sequence
from ..strings import normalize_language, LANGUAGE
_DIR_PATH = Path(__file__).absolute().parent
__all__ = [
"fetch",
]
def fetch(name: str, **kwargs):
""" """
func = f"""_fetch_{re.sub("[_-]+dict", "", name.lower()).replace("-", "_")}"""
retval = eval(f"{func}(**kwargs)")
return retval
def _fetch_cilin() -> dict:
""" """
cilin_path = _DIR_PATH / "cilin_dict.zip"
with zipfile.ZipFile(cilin_path, "r") as archive:
cilin_dict = pickle.loads(archive.read("cilin_dict.pkl"))
return cilin_dict
def _fetch_fyh() -> tuple:
""" """
fyh_path = _DIR_PATH / "fyh_dict.zip"
with zipfile.ZipFile(fyh_path, "r") as archive:
tra_dict = pickle.loads(archive.read("tra_dict.pkl"))
var_dict = pickle.loads(archive.read("var_dict.pkl"))
hot_dict = pickle.loads(archive.read("hot_dict.pkl"))
return tra_dict, var_dict, hot_dict
def _fetch_stopwords(language: str) -> list:
""" """
stopwords_path = _DIR_PATH / "stopwords.json.gz"
with gzip.open(stopwords_path, "rt") as gz_file:
stopwords = json.load(gz_file)
stopwords = stopwords[normalize_language(language).value]
return stopwords
def _fetch_stopwords_zh() -> list:
""" """
return _fetch_stopwords("zh")
def _fetch_stopwords_en() -> list:
""" """
return _fetch_stopwords("en")
def _fetch_sim() -> dict:
""" """
sd_path = _DIR_PATH / "sim_dict.pkl"
return pickle.loads(sd_path.read_bytes())
def _fetch_hownet_en() -> dict:
""" """
hc_path = _DIR_PATH / "hownet_en.zip"
with zipfile.ZipFile(hc_path, "r") as archive:
hc = pickle.loads(archive.read("hownet_candidate/hownet_candidate.pkl"))
return hc
def _fetch_hownet_zh() -> dict:
""" """
hc_path = _DIR_PATH / "hownet_zh.json.gz"
with gzip.open(hc_path, "rt", encoding="utf-8") as f:
hc = json.load(f)
return hc
def _fetch_hownet(language: Union[str, LANGUAGE]) -> dict:
_lang = normalize_language(language)
if _lang == LANGUAGE.ENGLISH:
return _fetch_hownet_en()
elif _lang == LANGUAGE.CHINESE:
return _fetch_hownet_zh()
def _fetch_checklist(keys: Optional[Union[Sequence[str], str]] = None) -> dict:
""" """
checklist_path = _DIR_PATH / "checklist_subs.json.gz"
with gzip.open(checklist_path, "rt", encoding="utf-8") as f:
checklist_subs = json.load(f)
if keys is not None:
if isinstance(keys, str):
return checklist_subs[keys.upper()]
_keys = [k.upper() for k in keys]
return {k: v for k, v in checklist_subs.items() if k in _keys}
return checklist_subs
def _fetch_checklist_subs(keys: Optional[Union[Sequence[str], str]] = None) -> dict:
""" """
return _fetch_checklist(keys)
def _fetch_dces() -> dict:
""" """
dces_path = _DIR_PATH / "DCES.zip"
with zipfile.ZipFile(dces_path, "r") as archive:
descs = pickle.loads(archive.read("descs.pkl"))
try:
neigh = pickle.loads(archive.read("neigh.pkl"))
except ModuleNotFoundError:
print("failed to load DCES neighbor. Init from sklearn.")
from sklearn.neighbors import NearestNeighbors
neigh = NearestNeighbors(
**{
"algorithm": "auto",
"leaf_size": 30,
"metric": "euclidean",
"metric_params": None,
"n_jobs": None,
"n_neighbors": 5,
"p": 2,
"radius": 1.0,
}
)
vec_colnames = pickle.loads(archive.read("vec_colnames.pkl"))
ret = {
"descs": descs,
"neigh": neigh,
"vec_colnames": vec_colnames,
}
return ret
| [
"zipfile.ZipFile",
"pathlib.Path",
"gzip.open",
"sklearn.neighbors.NearestNeighbors",
"json.load"
] | [((686, 718), 'zipfile.ZipFile', 'zipfile.ZipFile', (['cilin_path', '"""r"""'], {}), "(cilin_path, 'r')\n", (701, 718), False, 'import zipfile\n'), ((911, 941), 'zipfile.ZipFile', 'zipfile.ZipFile', (['fyh_path', '"""r"""'], {}), "(fyh_path, 'r')\n", (926, 941), False, 'import zipfile\n'), ((1301, 1332), 'gzip.open', 'gzip.open', (['stopwords_path', '"""rt"""'], {}), "(stopwords_path, 'rt')\n", (1310, 1332), False, 'import gzip\n'), ((1365, 1383), 'json.load', 'json.load', (['gz_file'], {}), '(gz_file)\n', (1374, 1383), False, 'import json\n'), ((1857, 1886), 'zipfile.ZipFile', 'zipfile.ZipFile', (['hc_path', '"""r"""'], {}), "(hc_path, 'r')\n", (1872, 1886), False, 'import zipfile\n'), ((2095, 2137), 'gzip.open', 'gzip.open', (['hc_path', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(hc_path, 'rt', encoding='utf-8')\n", (2104, 2137), False, 'import gzip\n'), ((2157, 2169), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2166, 2169), False, 'import json\n'), ((2585, 2634), 'gzip.open', 'gzip.open', (['checklist_path', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(checklist_path, 'rt', encoding='utf-8')\n", (2594, 2634), False, 'import gzip\n'), ((2666, 2678), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2675, 2678), False, 'import json\n'), ((3147, 3178), 'zipfile.ZipFile', 'zipfile.ZipFile', (['dces_path', '"""r"""'], {}), "(dces_path, 'r')\n", (3162, 3178), False, 'import zipfile\n'), ((341, 355), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (345, 355), False, 'from pathlib import Path\n'), ((3506, 3675), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {}), "(**{'algorithm': 'auto', 'leaf_size': 30, 'metric':\n 'euclidean', 'metric_params': None, 'n_jobs': None, 'n_neighbors': 5,\n 'p': 2, 'radius': 1.0})\n", (3522, 3675), False, 'from sklearn.neighbors import NearestNeighbors\n')] |
from django import template
from django_extras.utils import humanize
register = template.Library()
@register.filter(is_safe=True)
def describe_seconds(value):
"""
Convert a seconds value into a human readable (ie week, day, hour) value.
:param value: integer value of the number of seconds.
:return: a string with the humanized value.
"""
return humanize.describe_seconds(value)
| [
"django.template.Library",
"django_extras.utils.humanize.describe_seconds"
] | [((82, 100), 'django.template.Library', 'template.Library', ([], {}), '()\n', (98, 100), False, 'from django import template\n'), ((374, 406), 'django_extras.utils.humanize.describe_seconds', 'humanize.describe_seconds', (['value'], {}), '(value)\n', (399, 406), False, 'from django_extras.utils import humanize\n')] |
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Friend relation.
Viewfinder friends define a relationship between two users predicated on confirmation of photo
sharing permission. Each friend has an associated 'status', which can be:
- 'friend': user has been marked as a friend; however, that user may not have the reverse
friendship object.
- 'muted': a friend who has attained special status as an unwanted irritant. Content shared
from these friends is not shown, though still received and can be retrieved.
- 'blocked': a friend who has attained special status as an unwanted irritant. These users will
not show up in suggestions lists and cannot be contacted for sharing.
Friends are different than contacts. Contacts are the full spectrum of social connections. A
contact doesn't become a viewfinder friend until a share has been completed.
NOTE: Next comment is outdated, but we may re-enable something similar in future.
The 'colocated_shares', 'total_shares', 'last_colocated' and 'last_share' values are used to
quantify the strength of the sharing relationship. Each time the users in a friend relationship
are co-located, 'colocated_shares' is decayed based on 'last_colocated' and the current time
and updated either with a +1 if the sharing occurs or a -1 if not. 'total_shares' is similarly
updated, though not just when the users are co-located, but on every share that a user initiates.
Friend: viewfinder friend information
"""
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import logging
import math
from functools import partial
from tornado import gen
from viewfinder.backend.base import util
from viewfinder.backend.base.exceptions import NotFoundError
from viewfinder.backend.db import db_client, vf_schema
from viewfinder.backend.db.base import DBObject
from viewfinder.backend.db.range_base import DBRangeObject
from viewfinder.backend.op.notification_manager import NotificationManager
@DBObject.map_table_attributes
class Friend(DBRangeObject):
"""Viewfinder friend data object."""
__slots__ = []
_table = DBObject._schema.GetTable(vf_schema.FRIEND)
FRIEND = 'friend'
MUTED = 'muted'
BLOCKED = 'blocked'
FRIEND_ATTRIBUTES = set(['nickname'])
"""Subset of friend attributes that should be projected to the user."""
_SHARE_HALF_LIFE = 60 * 60 * 24 * 30 # 1 month
def __init__(self, user_id=None, friend_id=None):
super(Friend, self).__init__()
self.user_id = user_id
self.friend_id = friend_id
self.status = Friend.FRIEND
def IsBlocked(self):
"""Returns true if the "friend" identified by self.friend_id is blocked."""
return self.status == Friend.BLOCKED
def DecayShares(self, timestamp):
"""Decays 'total_shares' and 'colocated_shares' based on 'timestamp'. Updates 'last_share'
and 'last_colocated' to 'timestamp'.
"""
def _ComputeDecay(shares, last_time):
if last_time is None:
assert shares is None, shares
return 0
decay = math.exp(-math.log(2) * (timestamp - last_time) /
Friend._SHARE_HALF_LIFE)
return shares * decay
self.total_shares = _ComputeDecay(self.total_shares, self.last_share)
self.last_share = timestamp
self.colocated_shares = _ComputeDecay(self.colocated_shares, self.last_colocated)
self.last_colocated = timestamp
def IncrementShares(self, timestamp, shared, colocated):
"""Decays and updates 'total_shares' and 'last_share' based on whether sharing occurred
('shared'==True). If 'colocated', the 'colocated_shares' and 'last_colocated' are updated
similarly.
"""
self.DecayShares(timestamp)
self.total_shares += (1.0 if shared else -1.0)
if colocated:
self.colocated_shares += (1.0 if shared else -1.0)
@classmethod
@gen.engine
def MakeFriends(cls, client, user_id, friend_id, callback):
"""Creates a bi-directional friendship between user_id and friend_id if it does not already
exist. Invokes the callback with the pair of friendship objects:
[(user_id=>friend_id), (friend_id=>user_id)]
"""
from viewfinder.backend.db.user import User
# Determine whether one or both sides of the friendship are missing.
forward_friend, reverse_friend = \
yield [gen.Task(Friend.Query, client, user_id, friend_id, None, must_exist=False),
gen.Task(Friend.Query, client, friend_id, user_id, None, must_exist=False)]
# Make sure that both sides of the friendship have been created.
if forward_friend is None:
forward_friend = Friend.CreateFromKeywords(user_id=user_id, friend_id=friend_id, status=Friend.FRIEND)
yield gen.Task(forward_friend.Update, client)
if reverse_friend is None:
reverse_friend = Friend.CreateFromKeywords(user_id=friend_id, friend_id=user_id, status=Friend.FRIEND)
yield gen.Task(reverse_friend.Update, client)
callback((forward_friend, reverse_friend))
@classmethod
@gen.engine
def MakeFriendsWithGroup(cls, client, user_ids, callback):
"""Creates bi-directional friendships between all the specified users. Each user will be
friends with every other user.
"""
yield [gen.Task(Friend.MakeFriends, client, user_id, friend_id)
for index, user_id in enumerate(user_ids)
for friend_id in user_ids[index + 1:]
if user_id != friend_id]
callback()
@classmethod
@gen.engine
def MakeFriendAndUpdate(cls, client, user_id, friend_dict, callback):
"""Ensures that the given user has at least a one-way friend relationship with the given
friend. Updates the friend relationship attributes with those given in "friend_dict".
"""
from viewfinder.backend.db.user import User
friend = yield gen.Task(Friend.Query, client, user_id, friend_dict['user_id'], None, must_exist=False)
if friend is None:
# Ensure that the friend exists as user in the system.
friend_user = yield gen.Task(User.Query, client, friend_dict['user_id'], None, must_exist=False)
if friend_user is None:
raise NotFoundError('User %d does not exist.' % friend_dict['user_id'])
# Create a one-way friend relationship from the calling user to the friend user.
friend = Friend.CreateFromKeywords(user_id=user_id, friend_id=friend_dict['user_id'], status=Friend.FRIEND)
# Update all given attributes.
assert friend_dict['user_id'] == friend.friend_id, (friend_dict, friend)
for key, value in friend_dict.iteritems():
if key != 'user_id':
assert key in Friend.FRIEND_ATTRIBUTES, friend_dict
setattr(friend, key, value)
yield gen.Task(friend.Update, client)
callback()
@classmethod
@gen.engine
def UpdateOperation(cls, client, callback, user_id, friend):
"""Updates friend metadata for the relationship between the given user and friend."""
# Update the metadata.
yield gen.Task(Friend.MakeFriendAndUpdate, client, user_id, friend)
# Send notifications to all the calling user's devices.
yield NotificationManager.NotifyUpdateFriend(client, friend)
callback()
| [
"math.log",
"viewfinder.backend.op.notification_manager.NotificationManager.NotifyUpdateFriend",
"viewfinder.backend.base.exceptions.NotFoundError",
"tornado.gen.Task",
"viewfinder.backend.db.base.DBObject._schema.GetTable"
] | [((2141, 2184), 'viewfinder.backend.db.base.DBObject._schema.GetTable', 'DBObject._schema.GetTable', (['vf_schema.FRIEND'], {}), '(vf_schema.FRIEND)\n', (2166, 2184), False, 'from viewfinder.backend.db.base import DBObject\n'), ((5805, 5896), 'tornado.gen.Task', 'gen.Task', (['Friend.Query', 'client', 'user_id', "friend_dict['user_id']", 'None'], {'must_exist': '(False)'}), "(Friend.Query, client, user_id, friend_dict['user_id'], None,\n must_exist=False)\n", (5813, 5896), False, 'from tornado import gen\n'), ((6687, 6718), 'tornado.gen.Task', 'gen.Task', (['friend.Update', 'client'], {}), '(friend.Update, client)\n', (6695, 6718), False, 'from tornado import gen\n'), ((6954, 7015), 'tornado.gen.Task', 'gen.Task', (['Friend.MakeFriendAndUpdate', 'client', 'user_id', 'friend'], {}), '(Friend.MakeFriendAndUpdate, client, user_id, friend)\n', (6962, 7015), False, 'from tornado import gen\n'), ((7087, 7141), 'viewfinder.backend.op.notification_manager.NotificationManager.NotifyUpdateFriend', 'NotificationManager.NotifyUpdateFriend', (['client', 'friend'], {}), '(client, friend)\n', (7125, 7141), False, 'from viewfinder.backend.op.notification_manager import NotificationManager\n'), ((4328, 4402), 'tornado.gen.Task', 'gen.Task', (['Friend.Query', 'client', 'user_id', 'friend_id', 'None'], {'must_exist': '(False)'}), '(Friend.Query, client, user_id, friend_id, None, must_exist=False)\n', (4336, 4402), False, 'from tornado import gen\n'), ((4417, 4491), 'tornado.gen.Task', 'gen.Task', (['Friend.Query', 'client', 'friend_id', 'user_id', 'None'], {'must_exist': '(False)'}), '(Friend.Query, client, friend_id, user_id, None, must_exist=False)\n', (4425, 4491), False, 'from tornado import gen\n'), ((4715, 4754), 'tornado.gen.Task', 'gen.Task', (['forward_friend.Update', 'client'], {}), '(forward_friend.Update, client)\n', (4723, 4754), False, 'from tornado import gen\n'), ((4908, 4947), 'tornado.gen.Task', 'gen.Task', (['reverse_friend.Update', 'client'], {}), '(reverse_friend.Update, client)\n', (4916, 4947), False, 'from tornado import gen\n'), ((5234, 5290), 'tornado.gen.Task', 'gen.Task', (['Friend.MakeFriends', 'client', 'user_id', 'friend_id'], {}), '(Friend.MakeFriends, client, user_id, friend_id)\n', (5242, 5290), False, 'from tornado import gen\n'), ((6004, 6080), 'tornado.gen.Task', 'gen.Task', (['User.Query', 'client', "friend_dict['user_id']", 'None'], {'must_exist': '(False)'}), "(User.Query, client, friend_dict['user_id'], None, must_exist=False)\n", (6012, 6080), False, 'from tornado import gen\n'), ((6125, 6190), 'viewfinder.backend.base.exceptions.NotFoundError', 'NotFoundError', (["('User %d does not exist.' % friend_dict['user_id'])"], {}), "('User %d does not exist.' % friend_dict['user_id'])\n", (6138, 6190), False, 'from viewfinder.backend.base.exceptions import NotFoundError\n'), ((3065, 3076), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (3073, 3076), False, 'import math\n')] |
from ensmallen_graph import EnsmallenGraph # pylint: disable=no-name-in-module
def load_hpo() -> EnsmallenGraph:
"""Test that HPO graph can be loaded."""
graph = EnsmallenGraph.from_unsorted_csv(
edge_path="./pytests/data/edges.tsv",
sources_column="subject",
destinations_column="object",
directed=False,
edge_types_column="edge_label",
node_path="./pytests/data/nodes.tsv",
nodes_column="id",
node_types_column="category",
default_edge_type='biolink:interacts_with',
default_node_type='biolink:NamedThing',
name="HPO"
)
graph.enable()
return graph
def load_pathway() -> EnsmallenGraph:
"""Test that Pathway can be loaded."""
graph = EnsmallenGraph.from_unsorted_csv(
edge_path="./pytests/data/pathway.tsv",
sources_column="Gene_A",
destinations_column="Gene_B",
directed=False,
name="Pathway"
)
graph.enable()
return graph
| [
"ensmallen_graph.EnsmallenGraph.from_unsorted_csv"
] | [((174, 553), 'ensmallen_graph.EnsmallenGraph.from_unsorted_csv', 'EnsmallenGraph.from_unsorted_csv', ([], {'edge_path': '"""./pytests/data/edges.tsv"""', 'sources_column': '"""subject"""', 'destinations_column': '"""object"""', 'directed': '(False)', 'edge_types_column': '"""edge_label"""', 'node_path': '"""./pytests/data/nodes.tsv"""', 'nodes_column': '"""id"""', 'node_types_column': '"""category"""', 'default_edge_type': '"""biolink:interacts_with"""', 'default_node_type': '"""biolink:NamedThing"""', 'name': '"""HPO"""'}), "(edge_path='./pytests/data/edges.tsv',\n sources_column='subject', destinations_column='object', directed=False,\n edge_types_column='edge_label', node_path='./pytests/data/nodes.tsv',\n nodes_column='id', node_types_column='category', default_edge_type=\n 'biolink:interacts_with', default_node_type='biolink:NamedThing', name=\n 'HPO')\n", (206, 553), False, 'from ensmallen_graph import EnsmallenGraph\n'), ((757, 924), 'ensmallen_graph.EnsmallenGraph.from_unsorted_csv', 'EnsmallenGraph.from_unsorted_csv', ([], {'edge_path': '"""./pytests/data/pathway.tsv"""', 'sources_column': '"""Gene_A"""', 'destinations_column': '"""Gene_B"""', 'directed': '(False)', 'name': '"""Pathway"""'}), "(edge_path='./pytests/data/pathway.tsv',\n sources_column='Gene_A', destinations_column='Gene_B', directed=False,\n name='Pathway')\n", (789, 924), False, 'from ensmallen_graph import EnsmallenGraph\n')] |
import sys
import getopt
import subprocess
def main(argv):
rancher_options = ''
rancher_command = ''
rancher_args = ''
try:
opts, args = getopt.getopt(argv, "o:c:a:", ["help", "rancher_options=", "rancher_command=", "rancher_args="])
except getopt.GetoptError:
print('Unrecognized Argument, See Usage Below.')
print('rancher-cli.py -o "<OPTIONS>" -c "<COMMAND>" -a "<args>"')
print('to see rancher cli help for a run rancher-cli.py help with no additional command line args')
print('to see rancher cli help for a COMMAND run rancher-cli.py -c "<COMMAND>" -a "--help" with no additional command line args')
sys.exit(2)
for opt, arg in opts:
if opt == "--help":
print('rancher-cli.py -o <rancher_options> -c <rancher_command> -a <rancher_args>')
print('rancher_options - arguments to pass to rancher, --debug --version')
print('rancher_command - arguments to pass to rancher, inspect')
print('rancher_args - COMMAND arguments to pass to rancher')
command = ['rancher --help']
proc = subprocess.Popen(command, shell=True)
stdout, stderr = proc.communicate()
sys.exit()
elif opt in ("-o", "--rancher_options"):
rancher_options = arg
elif opt in ("-c", "--rancher_command"):
rancher_command = arg
elif opt in ("-a", "--rancher_args"):
rancher_args = arg
command = ['rancher ' + rancher_options + ' ' + rancher_command + ' ' + rancher_args]
try:
exitcode = subprocess.Popen(command, shell=True, stdout=sys.stdout, stderr=sys.stderr).wait(timeout=10*60) # Timeout after 10 minutes
except subprocess.TimeoutExpired:
print("Timeout during upgrade")
exitcode = 1
exit(exitcode)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"subprocess.Popen",
"getopt.getopt",
"sys.exit"
] | [((163, 263), 'getopt.getopt', 'getopt.getopt', (['argv', '"""o:c:a:"""', "['help', 'rancher_options=', 'rancher_command=', 'rancher_args=']"], {}), "(argv, 'o:c:a:', ['help', 'rancher_options=',\n 'rancher_command=', 'rancher_args='])\n", (176, 263), False, 'import getopt\n'), ((676, 687), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (684, 687), False, 'import sys\n'), ((1135, 1172), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1151, 1172), False, 'import subprocess\n'), ((1233, 1243), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1241, 1243), False, 'import sys\n'), ((1606, 1681), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'sys.stdout', 'stderr': 'sys.stderr'}), '(command, shell=True, stdout=sys.stdout, stderr=sys.stderr)\n', (1622, 1681), False, 'import subprocess\n')] |
import unittest
import os, sys
from flask import Flask
from app import auth, api, db, home, create_app
import threading
import pytest
import requests
global flask
@pytest.fixture()
def server():
flask = create_app()
flask.run(host="localhost", port=5000)
class TestApp(unittest.TestCase):
def setUp(self, *args):
return
def tearDown(self):
pass
def test_init(self):
# self.assertEqual(type(flask), Flask)
self.assertEqual('foo'.upper(), 'FOO')
def test_db(self):
# database = db.init_app(flask)
# self.assertEqual(type(database), type(None))
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_api(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
def test_auth(self):
pass
def test_home(self):
pass
if __name__ == '__main__':
unittest.main()
| [
"pytest.fixture",
"unittest.main",
"app.create_app"
] | [((166, 182), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (180, 182), False, 'import pytest\n'), ((209, 221), 'app.create_app', 'create_app', ([], {}), '()\n', (219, 221), False, 'from app import auth, api, db, home, create_app\n'), ((1059, 1074), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1072, 1074), False, 'import unittest\n')] |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
sns.set()
path = r'C:\Users\HP\PycharmProjects\Operaciones_Calor\Data.xlsx'
df = pd.read_excel("Data.xlsx")
df2 = pd.read_excel("time.xlsx")
df3 = np.transpose(df)
ax = sns.heatmap(data=df3)
plt.show() | [
"seaborn.set",
"seaborn.heatmap",
"pandas.read_excel",
"numpy.transpose",
"matplotlib.pyplot.show"
] | [((94, 103), 'seaborn.set', 'sns.set', ([], {}), '()\n', (101, 103), True, 'import seaborn as sns\n'), ((175, 201), 'pandas.read_excel', 'pd.read_excel', (['"""Data.xlsx"""'], {}), "('Data.xlsx')\n", (188, 201), True, 'import pandas as pd\n'), ((208, 234), 'pandas.read_excel', 'pd.read_excel', (['"""time.xlsx"""'], {}), "('time.xlsx')\n", (221, 234), True, 'import pandas as pd\n'), ((241, 257), 'numpy.transpose', 'np.transpose', (['df'], {}), '(df)\n', (253, 257), True, 'import numpy as np\n'), ((264, 285), 'seaborn.heatmap', 'sns.heatmap', ([], {'data': 'df3'}), '(data=df3)\n', (275, 285), True, 'import seaborn as sns\n'), ((286, 296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (294, 296), True, 'import matplotlib.pyplot as plt\n')] |
from fe_store.models import (
ProductType, Attribute, Measure, Product, Association, Value
)
products = [
{
"id": "1",
"name": "Ноутбук Xiaomi Mi Notebook Pro 15.6",
"type": "Ноутбук",
"price": "54 000",
"features": {
"Процессор": "Core i5",
"Частота процессора": "1600 МГц",
"Объем оперативной памяти": "8 Гб",
"Объем жесткого диска": "256 Гб"
},
},
{
"id": "2",
"name": "Ноутбук Xiaomi Mi Notebook Pro 15.6",
"type": "Ноутбук",
"price": "58 000",
"features": {
"Процессор": "Core i7",
"Частота процессора": "1800 МГц",
"Объем оперативной памяти": "16 Гб",
"Объем жесткого диска": "256 Гб"
},
},
{
"id": "3",
"name": "Ноутбук Xiaomi Mi Notebook Air 12.5\"",
"type": "Ноутбук",
"price": "37 149",
"features": {
"Процессор": "Core M3",
"Частота процессора": "1200 МГц",
"Объем оперативной памяти": "8 Гб",
"Объем жесткого диска": "256 Гб"
},
},
{
"id": "4",
"name": "Планшет Apple iPad 32Gb Wi-Fi",
"type": "Планшет",
"price": "18 490",
"features": {
"Операционная система": "iOS",
"Процессор": "Apple A9 1800 МГц",
"Количество ядер": "2",
"Вычислительное ядро": "ARM8"
},
},
]
def create_fake_db(db):
db.drop_all()
db.create_all()
db.session.add(ProductType(name="Ноутбук", mp_name="Ноутбуки"))
db.session.add(ProductType(name="Планшет", mp_name="Планшеты"))
db.session.add(Attribute(name="Процессор"))
db.session.add(Attribute(name="Частота процессора", measure_id=1))
db.session.add(Attribute(name="Объем оперативной памяти", measure_id=2))
db.session.add(Attribute(name="Объем жесткого диска", measure_id=2))
db.session.add(Attribute(name="Операционная система"))
db.session.add(Attribute(name="Количество ядер"))
db.session.add(Attribute(name="Вычислительное ядро"))
db.session.add(Attribute(name="Базовая цена", measure_id=3))
db.session.add(Measure(name="МГц"))
db.session.add(Measure(name="Гб"))
db.session.add(Measure(name="руб"))
db.session.add(Value(name="Core i5"))
db.session.add(Value(name="1600"))
db.session.add(Value(name="8"))
db.session.add(Value(name="256"))
db.session.add(Value(name="Core i7"))
db.session.add(Value(name="1800"))
db.session.add(Value(name="16"))
db.session.add(Value(name="Core M3"))
db.session.add(Value(name="1200"))
db.session.add(Value(name="iOS"))
db.session.add(Value(name="Apple A9 1800 МГц"))
db.session.add(Value(name="2"))
db.session.add(Value(name="ARM8"))
db.session.add(Value(name="54000.00"))
db.session.add(Value(name="58000.00"))
db.session.add(Value(name="37149.00"))
db.session.add(Value(name="18490.00"))
prod_1 = Product(
name='Ноутбук Xiaomi Mi Notebook Pro 15.6 (Intel Core i5)',
product_type_id=1,
image="https://avatars.mds.yandex.net/get-mpic/397397/img_id5839825920611580155.jpeg/5hq"
)
prod_2 = Product(
name='Ноутбук Xiaomi Mi Notebook Pro 15.6 (Intel Core i7)',
product_type_id=1,
image="https://avatars.mds.yandex.net/get-mpic/397397/img_id5839825920611580155.jpeg/5hq"
)
prod_3 = Product(
name='Ноутбук Xiaomi Mi Notebook Air 12.5',
product_type_id=1,
image="https://avatars.mds.yandex.net/get-mpic/200316/img_id1017256079183483194/6hq"
)
prod_4 = Product(
name='Планшет Apple iPad 32Gb Wi-Fi',
product_type_id=2,
image="https://avatars.mds.yandex.net/get-mpic/397397/img_id3899168262031019869/6hq",
)
db.session.add_all([prod_1, prod_2, prod_3, prod_4])
fake_attr = [
{'product_id': 1, 'attribute_id': 1, 'value_id': 1},
{'product_id': 1, 'attribute_id': 2, 'value_id': 2},
{'product_id': 1, 'attribute_id': 3, 'value_id': 3},
{'product_id': 1, 'attribute_id': 4, 'value_id': 4},
{'product_id': 1, 'attribute_id': 8, 'value_id': 14},
{'product_id': 2, 'attribute_id': 1, 'value_id': 5},
{'product_id': 2, 'attribute_id': 2, 'value_id': 6},
{'product_id': 2, 'attribute_id': 3, 'value_id': 7},
{'product_id': 2, 'attribute_id': 4, 'value_id': 4},
{'product_id': 2, 'attribute_id': 8, 'value_id': 15},
{'product_id': 3, 'attribute_id': 1, 'value_id': 8},
{'product_id': 3, 'attribute_id': 2, 'value_id': 9},
{'product_id': 3, 'attribute_id': 3, 'value_id': 3},
{'product_id': 3, 'attribute_id': 4, 'value_id': 4},
{'product_id': 3, 'attribute_id': 8, 'value_id': 16},
{'product_id': 4, 'attribute_id': 5, 'value_id': 10},
{'product_id': 4, 'attribute_id': 1, 'value_id': 11},
{'product_id': 4, 'attribute_id': 6, 'value_id': 12},
{'product_id': 4, 'attribute_id': 7, 'value_id': 13},
{'product_id': 4, 'attribute_id': 8, 'value_id': 17},
]
for attr in fake_attr:
db.session.add(Association(
product_id=attr['product_id'],
attribute_id=attr['attribute_id'],
value_id=attr['value_id']
)
)
db.session.commit()
| [
"fe_store.models.Measure",
"fe_store.models.Association",
"fe_store.models.ProductType",
"fe_store.models.Value",
"fe_store.models.Attribute",
"fe_store.models.Product"
] | [((3030, 3221), 'fe_store.models.Product', 'Product', ([], {'name': '"""Ноутбук Xiaomi Mi Notebook Pro 15.6 (Intel Core i5)"""', 'product_type_id': '(1)', 'image': '"""https://avatars.mds.yandex.net/get-mpic/397397/img_id5839825920611580155.jpeg/5hq"""'}), "(name='Ноутбук Xiaomi Mi Notebook Pro 15.6 (Intel Core i5)',\n product_type_id=1, image=\n 'https://avatars.mds.yandex.net/get-mpic/397397/img_id5839825920611580155.jpeg/5hq'\n )\n", (3037, 3221), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((3251, 3442), 'fe_store.models.Product', 'Product', ([], {'name': '"""Ноутбук Xiaomi Mi Notebook Pro 15.6 (Intel Core i7)"""', 'product_type_id': '(1)', 'image': '"""https://avatars.mds.yandex.net/get-mpic/397397/img_id5839825920611580155.jpeg/5hq"""'}), "(name='Ноутбук Xiaomi Mi Notebook Pro 15.6 (Intel Core i7)',\n product_type_id=1, image=\n 'https://avatars.mds.yandex.net/get-mpic/397397/img_id5839825920611580155.jpeg/5hq'\n )\n", (3258, 3442), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((3472, 3642), 'fe_store.models.Product', 'Product', ([], {'name': '"""Ноутбук Xiaomi Mi Notebook Air 12.5"""', 'product_type_id': '(1)', 'image': '"""https://avatars.mds.yandex.net/get-mpic/200316/img_id1017256079183483194/6hq"""'}), "(name='Ноутбук Xiaomi Mi Notebook Air 12.5', product_type_id=1,\n image=\n 'https://avatars.mds.yandex.net/get-mpic/200316/img_id1017256079183483194/6hq'\n )\n", (3479, 3642), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((3672, 3832), 'fe_store.models.Product', 'Product', ([], {'name': '"""Планшет Apple iPad 32Gb Wi-Fi"""', 'product_type_id': '(2)', 'image': '"""https://avatars.mds.yandex.net/get-mpic/397397/img_id3899168262031019869/6hq"""'}), "(name='Планшет Apple iPad 32Gb Wi-Fi', product_type_id=2, image=\n 'https://avatars.mds.yandex.net/get-mpic/397397/img_id3899168262031019869/6hq'\n )\n", (3679, 3832), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((1581, 1628), 'fe_store.models.ProductType', 'ProductType', ([], {'name': '"""Ноутбук"""', 'mp_name': '"""Ноутбуки"""'}), "(name='Ноутбук', mp_name='Ноутбуки')\n", (1592, 1628), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((1649, 1696), 'fe_store.models.ProductType', 'ProductType', ([], {'name': '"""Планшет"""', 'mp_name': '"""Планшеты"""'}), "(name='Планшет', mp_name='Планшеты')\n", (1660, 1696), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((1718, 1745), 'fe_store.models.Attribute', 'Attribute', ([], {'name': '"""Процессор"""'}), "(name='Процессор')\n", (1727, 1745), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((1766, 1816), 'fe_store.models.Attribute', 'Attribute', ([], {'name': '"""Частота процессора"""', 'measure_id': '(1)'}), "(name='Частота процессора', measure_id=1)\n", (1775, 1816), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((1837, 1893), 'fe_store.models.Attribute', 'Attribute', ([], {'name': '"""Объем оперативной памяти"""', 'measure_id': '(2)'}), "(name='Объем оперативной памяти', measure_id=2)\n", (1846, 1893), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((1914, 1966), 'fe_store.models.Attribute', 'Attribute', ([], {'name': '"""Объем жесткого диска"""', 'measure_id': '(2)'}), "(name='Объем жесткого диска', measure_id=2)\n", (1923, 1966), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((1987, 2025), 'fe_store.models.Attribute', 'Attribute', ([], {'name': '"""Операционная система"""'}), "(name='Операционная система')\n", (1996, 2025), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2046, 2079), 'fe_store.models.Attribute', 'Attribute', ([], {'name': '"""Количество ядер"""'}), "(name='Количество ядер')\n", (2055, 2079), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2100, 2137), 'fe_store.models.Attribute', 'Attribute', ([], {'name': '"""Вычислительное ядро"""'}), "(name='Вычислительное ядро')\n", (2109, 2137), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2158, 2202), 'fe_store.models.Attribute', 'Attribute', ([], {'name': '"""Базовая цена"""', 'measure_id': '(3)'}), "(name='Базовая цена', measure_id=3)\n", (2167, 2202), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2224, 2243), 'fe_store.models.Measure', 'Measure', ([], {'name': '"""МГц"""'}), "(name='МГц')\n", (2231, 2243), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2264, 2282), 'fe_store.models.Measure', 'Measure', ([], {'name': '"""Гб"""'}), "(name='Гб')\n", (2271, 2282), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2303, 2322), 'fe_store.models.Measure', 'Measure', ([], {'name': '"""руб"""'}), "(name='руб')\n", (2310, 2322), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2344, 2365), 'fe_store.models.Value', 'Value', ([], {'name': '"""Core i5"""'}), "(name='Core i5')\n", (2349, 2365), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2386, 2404), 'fe_store.models.Value', 'Value', ([], {'name': '"""1600"""'}), "(name='1600')\n", (2391, 2404), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2425, 2440), 'fe_store.models.Value', 'Value', ([], {'name': '"""8"""'}), "(name='8')\n", (2430, 2440), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2461, 2478), 'fe_store.models.Value', 'Value', ([], {'name': '"""256"""'}), "(name='256')\n", (2466, 2478), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2499, 2520), 'fe_store.models.Value', 'Value', ([], {'name': '"""Core i7"""'}), "(name='Core i7')\n", (2504, 2520), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2541, 2559), 'fe_store.models.Value', 'Value', ([], {'name': '"""1800"""'}), "(name='1800')\n", (2546, 2559), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2580, 2596), 'fe_store.models.Value', 'Value', ([], {'name': '"""16"""'}), "(name='16')\n", (2585, 2596), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2617, 2638), 'fe_store.models.Value', 'Value', ([], {'name': '"""Core M3"""'}), "(name='Core M3')\n", (2622, 2638), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2659, 2677), 'fe_store.models.Value', 'Value', ([], {'name': '"""1200"""'}), "(name='1200')\n", (2664, 2677), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2698, 2715), 'fe_store.models.Value', 'Value', ([], {'name': '"""iOS"""'}), "(name='iOS')\n", (2703, 2715), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2736, 2767), 'fe_store.models.Value', 'Value', ([], {'name': '"""Apple A9 1800 МГц"""'}), "(name='Apple A9 1800 МГц')\n", (2741, 2767), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2788, 2803), 'fe_store.models.Value', 'Value', ([], {'name': '"""2"""'}), "(name='2')\n", (2793, 2803), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2824, 2842), 'fe_store.models.Value', 'Value', ([], {'name': '"""ARM8"""'}), "(name='ARM8')\n", (2829, 2842), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2863, 2885), 'fe_store.models.Value', 'Value', ([], {'name': '"""54000.00"""'}), "(name='54000.00')\n", (2868, 2885), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2906, 2928), 'fe_store.models.Value', 'Value', ([], {'name': '"""58000.00"""'}), "(name='58000.00')\n", (2911, 2928), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2949, 2971), 'fe_store.models.Value', 'Value', ([], {'name': '"""37149.00"""'}), "(name='37149.00')\n", (2954, 2971), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((2992, 3014), 'fe_store.models.Value', 'Value', ([], {'name': '"""18490.00"""'}), "(name='18490.00')\n", (2997, 3014), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n'), ((5220, 5329), 'fe_store.models.Association', 'Association', ([], {'product_id': "attr['product_id']", 'attribute_id': "attr['attribute_id']", 'value_id': "attr['value_id']"}), "(product_id=attr['product_id'], attribute_id=attr['attribute_id'\n ], value_id=attr['value_id'])\n", (5231, 5329), False, 'from fe_store.models import ProductType, Attribute, Measure, Product, Association, Value\n')] |
"""
This module deals with querying and downloading LAT FITS files.
"""
# Scientific Library
import numpy as np
import pandas as pd
# Requests Urls and Manupilate Files
from astropy.utils.data import download_files_in_parallel, download_file
from astroquery import fermi
from tqdm import tqdm
import requests
import shutil
import os
# import pathlib
import functools
# Logging
import logging
# logger_info = logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(level=logging.WARNING)
# from retry import retry
from retry.api import retry_call
# See https://stackoverflow.com/questions/492519/timeout-on-a-function-call
import signal
class Download:
MISSING = pd.NA
NAME = 'GCNNAME'
DONE = 'DONE'
WAIT = 2
TBUFF = 30.
INFOPRE = "https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/QueryResults.cgi?id="
TRIES = 3
def __init__(self, grbs):
self.grbs = grbs
# Transform a method into a static method. A static method does not receive an implicit first argument.
# https://docs.python.org/3/library/functions.html#staticmethod
@staticmethod
def Filename(path, sep="/"):
"""Retrieve the file name (without directory) from a full path.
Parameters
----------
path : str
Path to a GRB FITS file.
sep : str, optional
Seperator of directory, by default '/' (in Unix).
Returns
-------
str
File name without directory.
"""
return path.rsplit(sep, 1)[1]
def GRB_record(self, row: int, col: str, value):
"""Record information for the for the given grb.
Parameters
----------
row : int
Row index of the given GRB.
col : str
Colume index of the given GRB.
value
Any value to save to the unit.
"""
try:
self.grbs.at[row, col] = value
except Exception as e:
print("Fail to record: ", e)
print(type(value), value)
def Missing(self, row: int, col: str):
"""Check if the data in the give unit is missing.
Parameters
----------
row : int
Row index of the given GRB.
col : str
Colume index of the given GRB.
Returns
-------
bool
True if missing; else, False.
"""
res = pd.isna(self.grbs.at[row, col])
return np.any(res)
def Urls_resolve(self, row):
"""Retrive urls from the given row.
Parameters
----------
row : int
Row index of the given GRB.
Returns
-------
list of str, or None
Urls for a given LAT GRB FITS photon (PH) and spacecraft (SC) files.
"""
try:
urls = eval(self.grbs.at[row, 'urls'])
return urls
except Exception as e:
print("Fail to resolve urls: ", e)
print(self.grbs._repr_html_)
'''
functions for single DataFrame GRB
'''
def Query_url(self, row: int, period: float, E_MeV_i: float, E_MeV_f: float, trigtime: str, tpeak: str, timeout: float=-1.):
"""Query urls for downloading.
Parameters
----------
row : int
Row index of the given GRB.
period : float
Period after initial time in second.
E_MeV_i : float
Start energy in MeV.
E_MeV_f : float
End energy in MeV.
trigtime : str
Mission Elapsed Time in second.
tpeak : str
First low peak time in second.
timeout : float, optional
Time for timeout in second, by default -1 (no timeout).
"""
col = 'urls'
timesys = 'MET'
name = self.grbs.at[row, self.NAME]
missing = self.Missing(row, col)
if not missing:
logging.info('{}query already done'.format(' ' * 9))
return self.DONE
grb_name = 'GRB' + name
met_i = self.grbs.at[row, trigtime]
delta_t = self.grbs.at[row, tpeak]
met_f = met_i + delta_t + period # "window of 90 seconds" as apears in XHW2018.
start = met_i - self.TBUFF
stop = met_f + self.TBUFF
met = '{}, {}'.format(start, stop)
E_MeV = '{}, {}'.format(E_MeV_i, E_MeV_f)
if timeout > 0:
signal.alarm(timeout)
try:
fits_urls = retry_call(
fermi.FermiLAT.query_object,
fargs=[grb_name],
fkwargs={
'energyrange_MeV': E_MeV,
'obsdates': met,
'timesys': timesys},
tries=self.TRIES)
except Exception as e:
self.GRB_record(row, col, self.MISSING)
logging.warning('{}Query_url failed while receiving:\n{}'.format(' ' * 9, e))
return self.MISSING
#! save urls (list) as str; Please extract urls with eval() later
self.GRB_record(row, col, str(fits_urls))
print(self)
logging.info('{}query finished'.format(' ' * 9))
def Download_fits(self, row: int, out_dir, timeout: float=-1.):
"""Download fits files provided in urls and save to out_dir.
Parameters
----------
row : int
Row index of the given GRB.
out_dir : [type]
Output directory for FITS file.
timeout : float, optional
Time for timeout in second, by default -1 (no timeout).
Returns
-------
self.DONE or self.MISSING
self.DONE: if succeeded; self.MISSING: if failed
"""
urls = self.Urls_resolve(row)
# urls = eval(self.grbs.at[row, 'urls'])
col = 'fits'
name = self.grbs.at[row, self.NAME]
if not self.Missing(row, col):
logging.info('{}fits already saved'.format(' ' * 9))
return self.DONE
if timeout > 0:
signal.alarm(timeout)
# for url in urls:
try:
file_list = retry_call(
# astropy.utils.data.download_files_in_parallel
download_files_in_parallel,
fargs=[urls],
tries=self.TRIES)
except:
try:
file_list = []
for url in urls:
file_list.append(
retry_call(
# astropy.utils.data.download_file
download_file,
fargs=[url],
tries=self.TRIES)
)
except Exception as e:
self.GRB_record(row, col, self.MISSING)
logging.warning("{}while downloading fits got:\n{}".format(' ' * 9, e))
print("urls failed to download: ", urls)
return self.MISSING
# Following https://stackoverflow.com/questions/12517451/automatically-creating-directories-with-file-output
os.makedirs(out_dir, exist_ok=True)
for i, url in enumerate(urls):
filename = self.Filename(url)
filename = out_dir / filename
# filename = out_dir + "/" + filename
if filename.exists():
continue
try:
# filename2 = wget.download(url, out_dir)
# Following https://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
shutil.copyfile(file_list[i], filename)
logging.info(filename.as_posix() + ' saved')
# logging.info(filename + ' saved as ' + filename2)
except Exception as e:
logging.warning(e)
self.GRB_record(row, col, self.DONE)
return self.DONE
def Download_info(self, row, out_dir, pre=INFOPRE, wait=WAIT, timeout: float=-1.):
"""Request query page in url, and save tables to out_dir.
Parameters
----------
row : int
Row index of the given GRB.
out_dir : str
Output directory for FITS file.
pre : str, optional
prefix in url, by default INFOPRE
wait : int or float, optional
Wait time in second, by default WAIT
timeout : float, optional
Time for timeout in second, by default -1 (no timeout).
Returns
-------
self.DONE or self.MISSING
self.DONE: if succeeded; self.MISSING: if failed
"""
col = 'info'
if not self.Missing(row, col):
name = self.grbs.at[row, self.NAME]
logging.info('{}info already saved'.format(' ' * 9))
return self.DONE
urls = self.Urls_resolve(row)
# urls = self.grbs.at[row, 'urls']
try:
url = urls[0]
except:
logging.info("{}urls missing".format(' ' * 9))
return self.MISSING
ID = self.Filename(url).split("_")[0]
query_url = pre + ID
wait_times = 0
if timeout > 0:
signal.alarm(timeout)
try:
r = retry_call(
requests.get,
fargs=[query_url],
tries=self.TRIES)
except:
self.GRB_record(row, col, self.MISSING)
logging.info("{}query page downloading failed".format(' ' * 9))
return self.MISSING
query_info = r.text
dfs = pd.read_html(query_info)
status = dfs[1]
position_in_queue = status['Position in Queue']
if any(position_in_queue != 'Query complete'):
logging.info("{}Query incomplete.".format(' ' * 9))
return self.MISSING
else:
criteria = dfs[0]
filename = out_dir / 'criteria.csv'
# filename = out_dir + '/criteria.csv'
os.makedirs(os.path.dirname(filename), exist_ok=True)
criteria.to_csv(filename)
info = dfs[2]
filename = out_dir / 'info.csv'
# filename = out_dir + '/info.csv'
info.to_csv(filename)
self.GRB_record(row, col, self.DONE)
logging.info("{}query page downloaded".format(' ' * 9))
return self.DONE
class Query(Download):
FAKE = 'fake'
PERIOD = 90.
EMIN = 1e2 # 1e3 MeV / 10, as 1 + z < 10
EMAX = 5e5 # The highest energy available in LAT
TIMESYS = 'MET'
def __init__(self, grbs, out_dir, init=False, retry=True, timeout: float=-1.):
self.grbs = grbs
self.out_dir = out_dir
self.init = init
self.timeout = timeout
if self.init != False:
self.Reset(self.init)
self.Main_loop(outer_dir=out_dir)
if retry and np.sum(self._Which_missing()) > 0:
logging.info("Querying for missing information")
self.Requery()
def _repr_html_(self):
return self.grbs._repr_html_()
def Row_index(self, name):
'''Return index of the given name'''
index_np = self.grbs[self.grbs[self.NAME] == name].index
index = index_np[0]
return index
def _Which_missing(self):
"""Find locations of missing information.
Returns
-------
list of bool
Where the information is missing, the location of it will be True.
"""
urls = self.grbs['urls'].isna()
fits = self.grbs['fits'].isna()
info = self.grbs['info'].isna()
where = functools.reduce(np.logical_or, [urls, fits, info])
num = np.sum(where)
if num > 0:
print("{}\n{} GRB{} missing".format('-' * 15, num, 's' if num > 1 else ''))
print("Please Run .Requery() with(out) .Reset(init) for several times.\nIf those do not help, please download missing files manually.")
return where
def Which_missing(self):
"""Find GRBs with missing information.
Returns
-------
pandas.DataFrame or astropy.table.table.Table
GRBs with missing information.
"""
return self.grbs[self._Which_missing()]
def Main_loop(self, outer_dir):
"""Main loop to download all required data.
Parameters
----------
outer_dir : pathlib.PosixPath
Output directory.
"""
period = self.PERIOD
E_MeV_i = self.EMIN
E_MeV_f = self.EMAX
timesys = self.TIMESYS
row_index = self.grbs.index
for row in tqdm(row_index):
name = self.grbs.at[row, self.NAME]
urls = self.grbs.at[row, 'urls']
out_dir = outer_dir / name[:6]
# out_dir = outer_dir + '/' + name[:6]
timeout = self.timeout
logging.info(name + ':')
# status = self.Query_url(row=row, period=period, E_MeV_i=E_MeV_i, E_MeV_f=E_MeV_f, trigtime='GBM_MET', tpeak='tpeak_ref')
self.Query_url(row=row, period=period, E_MeV_i=E_MeV_i, E_MeV_f=E_MeV_f, trigtime='GBM_MET', tpeak='tpeak_ref', timeout=timeout)
status = self.grbs.at[row, 'urls']
if status is not self.MISSING:
self.Download_info(row=row, out_dir=out_dir, timeout=timeout)
self.Download_fits(row=row, out_dir=out_dir, timeout=timeout)
rows = self._Which_missing()
if np.sum(rows) > 0:
# pretty printing in jupyter-notebook, following https://stackoverflow.com/questions/19124601/pretty-print-an-entire-pandas-series-dataframe
display(self.grbs.loc[rows])
else:
logging.info("Congratulations! All information downloaded successfully")
def Reset(self, init=False):
"""Initialize urls of grbs with missing fits or info.
Parameters
----------
init : bool, optional
Whether to initialize the table, by default False.
"""
rows = self._Which_missing() if init==False else self.grbs.index
self.grbs.loc[rows, ('urls', 'fits', 'info')] = self.MISSING
def Requery(self):
"""Remove queried urls and run Main_loop for missing grbs."""
self.Main_loop(outer_dir=self.out_dir) | [
"logging.basicConfig",
"os.makedirs",
"functools.reduce",
"tqdm.tqdm",
"logging.warning",
"numpy.any",
"numpy.sum",
"shutil.copyfile",
"os.path.dirname",
"signal.alarm",
"retry.api.retry_call",
"pandas.read_html",
"pandas.isna",
"logging.info"
] | [((457, 499), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARNING'}), '(level=logging.WARNING)\n', (476, 499), False, 'import logging\n'), ((2439, 2470), 'pandas.isna', 'pd.isna', (['self.grbs.at[row, col]'], {}), '(self.grbs.at[row, col])\n', (2446, 2470), True, 'import pandas as pd\n'), ((2486, 2497), 'numpy.any', 'np.any', (['res'], {}), '(res)\n', (2492, 2497), True, 'import numpy as np\n'), ((7176, 7211), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (7187, 7211), False, 'import os\n'), ((9658, 9682), 'pandas.read_html', 'pd.read_html', (['query_info'], {}), '(query_info)\n', (9670, 9682), True, 'import pandas as pd\n'), ((11805, 11856), 'functools.reduce', 'functools.reduce', (['np.logical_or', '[urls, fits, info]'], {}), '(np.logical_or, [urls, fits, info])\n', (11821, 11856), False, 'import functools\n'), ((11871, 11884), 'numpy.sum', 'np.sum', (['where'], {}), '(where)\n', (11877, 11884), True, 'import numpy as np\n'), ((12855, 12870), 'tqdm.tqdm', 'tqdm', (['row_index'], {}), '(row_index)\n', (12859, 12870), False, 'from tqdm import tqdm\n'), ((4497, 4518), 'signal.alarm', 'signal.alarm', (['timeout'], {}), '(timeout)\n', (4509, 4518), False, 'import signal\n'), ((4556, 4714), 'retry.api.retry_call', 'retry_call', (['fermi.FermiLAT.query_object'], {'fargs': '[grb_name]', 'fkwargs': "{'energyrange_MeV': E_MeV, 'obsdates': met, 'timesys': timesys}", 'tries': 'self.TRIES'}), "(fermi.FermiLAT.query_object, fargs=[grb_name], fkwargs={\n 'energyrange_MeV': E_MeV, 'obsdates': met, 'timesys': timesys}, tries=\n self.TRIES)\n", (4566, 4714), False, 'from retry.api import retry_call\n'), ((6138, 6159), 'signal.alarm', 'signal.alarm', (['timeout'], {}), '(timeout)\n', (6150, 6159), False, 'import signal\n'), ((6224, 6294), 'retry.api.retry_call', 'retry_call', (['download_files_in_parallel'], {'fargs': '[urls]', 'tries': 'self.TRIES'}), '(download_files_in_parallel, fargs=[urls], tries=self.TRIES)\n', (6234, 6294), False, 'from retry.api import retry_call\n'), ((9277, 9298), 'signal.alarm', 'signal.alarm', (['timeout'], {}), '(timeout)\n', (9289, 9298), False, 'import signal\n'), ((9328, 9389), 'retry.api.retry_call', 'retry_call', (['requests.get'], {'fargs': '[query_url]', 'tries': 'self.TRIES'}), '(requests.get, fargs=[query_url], tries=self.TRIES)\n', (9338, 9389), False, 'from retry.api import retry_call\n'), ((11080, 11128), 'logging.info', 'logging.info', (['"""Querying for missing information"""'], {}), "('Querying for missing information')\n", (11092, 11128), False, 'import logging\n'), ((13106, 13130), 'logging.info', 'logging.info', (["(name + ':')"], {}), "(name + ':')\n", (13118, 13130), False, 'import logging\n'), ((13719, 13731), 'numpy.sum', 'np.sum', (['rows'], {}), '(rows)\n', (13725, 13731), True, 'import numpy as np\n'), ((13957, 14029), 'logging.info', 'logging.info', (['"""Congratulations! All information downloaded successfully"""'], {}), "('Congratulations! All information downloaded successfully')\n", (13969, 14029), False, 'import logging\n'), ((7645, 7684), 'shutil.copyfile', 'shutil.copyfile', (['file_list[i]', 'filename'], {}), '(file_list[i], filename)\n', (7660, 7684), False, 'import shutil\n'), ((10107, 10132), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (10122, 10132), False, 'import os\n'), ((7865, 7883), 'logging.warning', 'logging.warning', (['e'], {}), '(e)\n', (7880, 7883), False, 'import logging\n'), ((6567, 6623), 'retry.api.retry_call', 'retry_call', (['download_file'], {'fargs': '[url]', 'tries': 'self.TRIES'}), '(download_file, fargs=[url], tries=self.TRIES)\n', (6577, 6623), False, 'from retry.api import retry_call\n')] |
# Copyright 2022 VMware, Inc.
# SPDX-License-Identifier: Apache License 2.0
from .models import Client, Product, TestCase, UIEvent, Capture, Console
from rest_framework import serializers
class ClientSerializer(serializers.ModelSerializer):
class Meta:
model = Client
fields = ('id', 'uuid', 'role', 'locale')
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('id', 'name', 'bu_name', 'reported_issue_target', 'bug_product_name', 'str_supported_browsers',
'str_supported_features', 'hpqc_domain', 'hpqc_project')
class TestCaseSerializer(serializers.ModelSerializer):
product = serializers.CharField(source='product.name')
max_event_retry = serializers.IntegerField(required=False)
createtime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
lastruntime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
class Meta:
model = TestCase
fields = ('id', 'name', 'apptype', 'softdeleted', 'uuid', 'product', 'browser', 'status', 'build', 'resolution',
'locales', 'leader_locale', 'start_url', 'add_host', 'glossary', 'run_id', 'user', 'pool', 'max_event_retry',
'accessibility_data', 'createtime', 'lastruntime', 'access_urllist')
class UIEventSerializer(serializers.ModelSerializer):
product = serializers.CharField(source='testcase.product.name', required=False)
obj_value = serializers.CharField(trim_whitespace=False, required=False, allow_blank=True)
recordtime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
class Meta:
model = UIEvent
fields = ('id', 'testcase', 'testname', 'product', 'run_id', 'event', 'action', 'button', 'obj_text', 'obj_class',
'obj_value', 'obj_x', 'obj_y', 'obj_top', 'obj_left', 'obj_right', 'obj_bottom', 'obj_selector', 'obj_id',
'obj_xpath', 'obj_xpath2', 'obj_xpath3', 'obj_scrolltop', 'obj_scrollleft', 'platform', 'recordtime',
'obj_assert', 'obj_parent', 'obj_brother', 'obj_child', 'verify_type', 'verify_value', 'obj_xpath4',
'captureid')
extra_kwargs = {"obj_x": {"trim_whitespace": False}}
class UIEventConfigSerializer(serializers.ModelSerializer):
obj_value = serializers.CharField(trim_whitespace=False, required=False, allow_blank=True)
class Meta:
model = UIEvent
fields = ('id', 'testname', 'run_id', 'event', 'action', 'button', 'obj_text', 'obj_class', 'obj_value', 'obj_x',
'obj_y', 'obj_top', 'obj_left', 'obj_right', 'obj_bottom', 'obj_selector', 'obj_id', 'obj_xpath',
'obj_xpath2', 'obj_xpath3', 'obj_xpath4', 'obj_scrolltop', 'obj_scrollleft', 'obj_assert', 'verify_type',
'verify_value', 'replayconfig', 'replayoption', 'presleeptime', 'userxpath')
class UIEventOpenSerializer(serializers.ModelSerializer):
openurl = serializers.CharField(source='obj_xpath')
class Meta:
model = UIEvent
fields = ('id', 'action', 'openurl')
class UIEventDirectSerializer(serializers.ModelSerializer):
openurl = serializers.CharField(source='obj_xpath')
class Meta:
model = UIEvent
fields = ('id', 'action', 'openurl')
class UIEventMousedownSerializer(serializers.ModelSerializer):
class Meta:
model = UIEvent
fields = ("id", "action", "obj_value", "button", "obj_x", "obj_y", "obj_selector", "obj_id", "obj_xpath", "obj_xpath2",
"obj_xpath3", "obj_xpath4", "verify_value", "percentX", "percentY")
class UIEventKeydownSerializer(serializers.ModelSerializer):
key = serializers.CharField(source='obj_x')
class Meta:
model = UIEvent
fields = ('id', 'action', 'key')
class UIEventTypeSerializer(serializers.ModelSerializer):
class Meta:
model = UIEvent
fields = ("id", "action", "obj_value", "obj_selector", "obj_id", "obj_xpath", "obj_xpath2", "obj_xpath3", "obj_xpath4",
"verify_value")
class UIEventScreenshotSerializer(serializers.ModelSerializer):
areatype = serializers.CharField(source='obj_assert')
name = serializers.CharField(source='obj_text')
class Meta:
model = UIEvent
fields = ('id', 'action', 'areatype', 'name')
class UIEventElementScreenshotSerializer(serializers.ModelSerializer):
areatype = serializers.CharField(source='obj_assert')
name = serializers.CharField(source='obj_text')
class Meta:
model = UIEvent
fields = ('id', 'action', 'areatype', 'name', "obj_value", "obj_selector", "obj_id", "obj_xpath", "obj_xpath2",
"obj_xpath3", "obj_xpath4", "verify_value")
class UIEventMouseoverSerializer(serializers.ModelSerializer):
class Meta:
model = UIEvent
fields = ("id", "action", "obj_value", "button", "obj_x", "obj_y", "obj_selector", "obj_id", "obj_xpath", "obj_xpath2",
"obj_xpath3", "obj_xpath4", "verify_value", "percentX", "percentY")
class UIEventSelectSerializer(serializers.ModelSerializer):
reference = serializers.CharField(source='obj_text')
class Meta:
model = UIEvent
fields = ("id", "action", "obj_value", "obj_selector", "obj_id", "obj_xpath", "obj_xpath2", "obj_xpath3", "obj_xpath4",
"verify_value", "reference", "percentX", "percentY")
class UIEventAssertSerializer(serializers.ModelSerializer):
class Meta:
model = UIEvent
fields = ("id", "action", "obj_value", "obj_selector", "obj_id", "obj_xpath", "obj_xpath2", "obj_xpath3", "obj_xpath4",
"verify_value", "obj_assert")
class UIEventExecuteSerializer(serializers.ModelSerializer):
host = serializers.CharField(source='obj_brother')
command = serializers.CharField(source='verify_value')
expect = serializers.CharField(source='obj_child')
name = serializers.CharField(source='obj_parent')
class Meta:
model = UIEvent
fields = ("id", "action", "host", "command", "expect", "name")
class UIEventAccessibilitySerializer(serializers.ModelSerializer):
class Meta:
model = UIEvent
fields = ("id", "action", "obj_value", "obj_selector", "obj_id", "obj_xpath", "obj_xpath2", "obj_xpath3", "obj_xpath4",
"verify_value")
class UIEventBrowserpromptSerializer(serializers.ModelSerializer):
prompt_type = serializers.CharField(source='obj_text')
prompt_value = serializers.CharField(source='obj_value')
class Meta:
model = UIEvent
fields = ("id", "action", "prompt_type", "prompt_value")
class UIEventTabswitchSerializer(serializers.ModelSerializer):
class Meta:
model = UIEvent
fields = ("id", "action")
class CaptureSerializer(serializers.ModelSerializer):
capturetime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
class Meta:
model = Capture
fields = ('captureid', 'content', 'capturetime')
class ConsoleSerializer(serializers.ModelSerializer):
createtime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
runtime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
starttime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
stoptime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
class Meta:
model = Console
fields = tuple([f.name for f in Console._meta.get_fields()] + [
'report',
'vnc_host',
'vnc_port',
'vnc_protocol',
])
class SimpleConsoleSerializer(serializers.ModelSerializer):
createtime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
runtime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
starttime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
stoptime = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S", required=False)
class Meta:
model = Console
fields = ('uuid', 'appname', 'role', 'status',
'browser', 'locale', 'resolution',
'createtime', 'runtime', 'starttime', 'stoptime',
'report', 'vnc_host', 'vnc_port', 'vnc_protocol')
| [
"rest_framework.serializers.DateTimeField",
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.CharField"
] | [((689, 733), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""product.name"""'}), "(source='product.name')\n", (710, 733), False, 'from rest_framework import serializers\n'), ((756, 796), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (780, 796), False, 'from rest_framework import serializers\n'), ((814, 883), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (839, 883), False, 'from rest_framework import serializers\n'), ((902, 971), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (927, 971), False, 'from rest_framework import serializers\n'), ((1420, 1489), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""testcase.product.name"""', 'required': '(False)'}), "(source='testcase.product.name', required=False)\n", (1441, 1489), False, 'from rest_framework import serializers\n'), ((1506, 1584), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'trim_whitespace': '(False)', 'required': '(False)', 'allow_blank': '(True)'}), '(trim_whitespace=False, required=False, allow_blank=True)\n', (1527, 1584), False, 'from rest_framework import serializers\n'), ((1602, 1671), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (1627, 1671), False, 'from rest_framework import serializers\n'), ((2370, 2448), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'trim_whitespace': '(False)', 'required': '(False)', 'allow_blank': '(True)'}), '(trim_whitespace=False, required=False, allow_blank=True)\n', (2391, 2448), False, 'from rest_framework import serializers\n'), ((3021, 3062), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_xpath"""'}), "(source='obj_xpath')\n", (3042, 3062), False, 'from rest_framework import serializers\n'), ((3225, 3266), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_xpath"""'}), "(source='obj_xpath')\n", (3246, 3266), False, 'from rest_framework import serializers\n'), ((3745, 3782), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_x"""'}), "(source='obj_x')\n", (3766, 3782), False, 'from rest_framework import serializers\n'), ((4208, 4250), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_assert"""'}), "(source='obj_assert')\n", (4229, 4250), False, 'from rest_framework import serializers\n'), ((4262, 4302), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_text"""'}), "(source='obj_text')\n", (4283, 4302), False, 'from rest_framework import serializers\n'), ((4486, 4528), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_assert"""'}), "(source='obj_assert')\n", (4507, 4528), False, 'from rest_framework import serializers\n'), ((4540, 4580), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_text"""'}), "(source='obj_text')\n", (4561, 4580), False, 'from rest_framework import serializers\n'), ((5201, 5241), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_text"""'}), "(source='obj_text')\n", (5222, 5241), False, 'from rest_framework import serializers\n'), ((5834, 5877), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_brother"""'}), "(source='obj_brother')\n", (5855, 5877), False, 'from rest_framework import serializers\n'), ((5892, 5936), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""verify_value"""'}), "(source='verify_value')\n", (5913, 5936), False, 'from rest_framework import serializers\n'), ((5950, 5991), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_child"""'}), "(source='obj_child')\n", (5971, 5991), False, 'from rest_framework import serializers\n'), ((6003, 6045), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_parent"""'}), "(source='obj_parent')\n", (6024, 6045), False, 'from rest_framework import serializers\n'), ((6516, 6556), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_text"""'}), "(source='obj_text')\n", (6537, 6556), False, 'from rest_framework import serializers\n'), ((6576, 6617), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""obj_value"""'}), "(source='obj_value')\n", (6597, 6617), False, 'from rest_framework import serializers\n'), ((6937, 7006), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (6962, 7006), False, 'from rest_framework import serializers\n'), ((7178, 7247), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (7203, 7247), False, 'from rest_framework import serializers\n'), ((7262, 7331), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (7287, 7331), False, 'from rest_framework import serializers\n'), ((7348, 7417), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (7373, 7417), False, 'from rest_framework import serializers\n'), ((7433, 7502), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (7458, 7502), False, 'from rest_framework import serializers\n'), ((7803, 7872), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (7828, 7872), False, 'from rest_framework import serializers\n'), ((7887, 7956), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (7912, 7956), False, 'from rest_framework import serializers\n'), ((7973, 8042), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (7998, 8042), False, 'from rest_framework import serializers\n'), ((8058, 8127), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%dT%H:%M:%S"""', 'required': '(False)'}), "(format='%Y-%m-%dT%H:%M:%S', required=False)\n", (8083, 8127), False, 'from rest_framework import serializers\n')] |
# Import modules
import groupdocs_annotation_cloud
from Common import Common
class MoveFolder:
@classmethod
def Run(cls):
# Create instance of the API
api = groupdocs_annotation_cloud.FolderApi.from_config(Common.GetConfig())
try:
request = groupdocs_annotation_cloud.MoveFolderRequest("annotationdocs1", "annotationdocs1\\annotationdocs", Common.myStorage, Common.myStorage)
api.move_folder(request)
print("Expected response type is Void: 'annotationdocs1' folder moved to 'annotationdocs/annotationdocs1'.")
except groupdocs_annotation_cloud.ApiException as e:
print("Exception while calling API: {0}".format(e.message)) | [
"groupdocs_annotation_cloud.MoveFolderRequest",
"Common.Common.GetConfig"
] | [((236, 254), 'Common.Common.GetConfig', 'Common.GetConfig', ([], {}), '()\n', (252, 254), False, 'from Common import Common\n'), ((300, 438), 'groupdocs_annotation_cloud.MoveFolderRequest', 'groupdocs_annotation_cloud.MoveFolderRequest', (['"""annotationdocs1"""', '"""annotationdocs1\\\\annotationdocs"""', 'Common.myStorage', 'Common.myStorage'], {}), "('annotationdocs1',\n 'annotationdocs1\\\\annotationdocs', Common.myStorage, Common.myStorage)\n", (344, 438), False, 'import groupdocs_annotation_cloud\n')] |
"""
--- Day 15: Oxygen System ---
https://adventofcode.com/2019/day/15
"""
from collections import deque
from aocd import data
from aoc_wim.aoc2019 import IntComputer
from aoc_wim.search import AStar
from aoc_wim.zgrid import ZGrid
NACK = 0
ACK = 1
GOAL = 2
neighbours = {
-1j: 1, # NORTH
+1j: 2, # SOUTH
-1: 3, # WEST
+1: 4, # EAST
}
class Q15AStar(AStar):
def __init__(self, data):
self.grid = ZGrid({0j: "."})
self.comp = IntComputer(data)
self.comp.output = deque(maxlen=1)
self.freezer = {0j: self.comp.freeze()}
state0 = 0j
AStar.__init__(self, state0, None)
def adjacent(self, z):
self.comp.unfreeze(self.freezer[z])
for dz, input_val in neighbours.items():
self.comp.input.append(input_val)
self.comp.run(until=IntComputer.op_output)
[rc] = self.comp.output
if rc == NACK:
self.grid[z + dz] = "#"
else:
self.grid[z + dz] = "."
self.freezer[z + dz] = self.comp.freeze()
back = neighbours[-dz]
self.comp.input.append(back)
self.comp.run(until=IntComputer.op_output)
assert self.comp.output[0]
if rc == GOAL:
self.target = z + dz
yield z + dz
def draw(self):
overlay = {0: "O"}
if self.target is not None:
overlay[self.target] = "T"
self.grid.draw(overlay=overlay)
search = Q15AStar(data)
path = search.run()
print("part a", search.path_length)
search.draw()
s0 = search.freezer[search.target]
search.state0 = search.target
search.target_reached = lambda *args: False
search.comp.unfreeze(s0)
search.fscore.clear()
search.gscore.clear()
search.gscore[search.state0] = 0
search.came_from.clear()
search.closed.clear()
search.run()
print("part b", max(search.gscore.values()))
search.draw()
| [
"collections.deque",
"aoc_wim.search.AStar.__init__",
"aoc_wim.aoc2019.IntComputer",
"aoc_wim.zgrid.ZGrid"
] | [((434, 454), 'aoc_wim.zgrid.ZGrid', 'ZGrid', (["{(0.0j): '.'}"], {}), "({(0.0j): '.'})\n", (439, 454), False, 'from aoc_wim.zgrid import ZGrid\n'), ((471, 488), 'aoc_wim.aoc2019.IntComputer', 'IntComputer', (['data'], {}), '(data)\n', (482, 488), False, 'from aoc_wim.aoc2019 import IntComputer\n'), ((516, 531), 'collections.deque', 'deque', ([], {'maxlen': '(1)'}), '(maxlen=1)\n', (521, 531), False, 'from collections import deque\n'), ((608, 642), 'aoc_wim.search.AStar.__init__', 'AStar.__init__', (['self', 'state0', 'None'], {}), '(self, state0, None)\n', (622, 642), False, 'from aoc_wim.search import AStar\n')] |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from typing import Dict, Optional, Sequence
import numpy as np
import torch as to
from init_args_serializer import Serializable
from torch import nn as nn
from torch.functional import Tensor
from tqdm import tqdm
import pyrado
from pyrado.algorithms.base import Algorithm
from pyrado.algorithms.step_based.svpg import SVPGBuilder, SVPGHyperparams
from pyrado.domain_randomization.domain_parameter import DomainParam
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import inner_env
from pyrado.environments.base import Env
from pyrado.logger.step import StepLogger
from pyrado.policies.base import Policy
from pyrado.policies.recurrent.rnn import LSTMPolicy
from pyrado.sampling.parallel_evaluation import eval_domain_params
from pyrado.sampling.sampler_pool import SamplerPool
from pyrado.sampling.step_sequence import StepSequence
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.utils.data_types import EnvSpec
class ADR(Algorithm):
"""
Active Domain Randomization (ADR)
.. seealso::
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "Active Domain Randomization", arXiv, 2019
"""
name: str = "adr"
def __init__(
self,
ex_dir: pyrado.PathLike,
env: Env,
subrtn: Algorithm,
adr_hp: Dict,
svpg_hp: SVPGHyperparams,
reward_generator_hp: Dict,
max_iter: int,
num_discriminator_epoch: int,
batch_size: int,
svpg_warmup: int = 0,
num_workers: int = 4,
num_trajs_per_config: int = 8,
log_exploration: bool = False,
randomized_params: Sequence[str] = None,
logger: Optional[StepLogger] = None,
):
"""
Constructor
:param save_dir: directory to save the snapshots i.e. the results in
:param env: the environment to train in
:param subrtn: algorithm which performs the policy / value-function optimization
:param max_iter: maximum number of iterations
:param svpg_particle_hparam: SVPG particle hyperparameters
:param num_svpg_particles: number of SVPG particles
:param num_discriminator_epoch: epochs in discriminator training
:param batch_size: batch size for training
:param svpg_learning_rate: SVPG particle optimizers' learning rate
:param svpg_temperature: SVPG temperature coefficient (how strong is the influence of the particles on each other)
:param svpg_evaluation_steps: how many configurations to sample between training
:param svpg_horizon: how many steps until the particles are reset
:param svpg_kl_factor: kl reward coefficient
:param svpg_warmup: number of iterations without SVPG training in the beginning
:param svpg_serial: serial mode (see SVPG)
:param num_workers: number of environments for parallel sampling
:param num_trajs_per_config: number of trajectories to sample from each config
:param max_step_length: maximum change of physics parameters per step
:param randomized_params: which parameters to randomize
:param logger: logger for every step of the algorithm, if `None` the default logger will be created
"""
if not isinstance(env, Env):
raise pyrado.TypeErr(given=env, expected_type=Env)
if not isinstance(subrtn, Algorithm):
raise pyrado.TypeErr(given=subrtn, expected_type=Algorithm)
if not isinstance(subrtn.policy, Policy):
raise pyrado.TypeErr(given=subrtn.policy, expected_type=Policy)
# Call Algorithm's constructor
super().__init__(ex_dir, max_iter, subrtn.policy, logger)
self.log_loss = True
# Store the inputs
self.env = env
self._subrtn = subrtn
self._subrtn.save_name = "subrtn"
self.num_discriminator_epoch = num_discriminator_epoch
self.batch_size = batch_size
self.num_trajs_per_config = num_trajs_per_config
self.warm_up_time = svpg_warmup
self.log_exploration = log_exploration
self.curr_time_step = 0
randomized_params = adr_hp["randomized_params"]
# Get the number of params
if isinstance(randomized_params, list) and len(randomized_params) == 0:
randomized_params = inner_env(self.env).get_nominal_domain_param().keys()
self.params = [DomainParam(param, 1) for param in randomized_params]
self.num_params = len(self.params)
# Initialize reward generator
self.reward_generator = RewardGenerator(env.spec, logger=self.logger, **reward_generator_hp)
# Initialize logbook
self.sim_instances_full_horizon = np.random.random_sample(
(
svpg_hp["algo"]["num_particles"],
svpg_hp["algo"]["horizon"],
adr_hp["evaluation_steps"],
self.num_params,
)
)
# Initialize SVPG adapter
self.svpg_wrapper = SVPGAdapter(
env,
self.params,
subrtn.expl_strat,
self.reward_generator,
svpg_hp["algo"]["num_particles"],
horizon=svpg_hp["algo"]["horizon"],
num_rollouts_per_config=self.num_trajs_per_config,
step_length=adr_hp["step_length"],
num_workers=num_workers,
)
# Generate SVPG with default architecture using SVPGBuilder
self.svpg = SVPGBuilder(ex_dir, self.svpg_wrapper, svpg_hp).svpg
@property
def sample_count(self) -> int:
return self._subrtn.sample_count
def compute_params(self, sim_instances: to.Tensor, t: int):
"""
Compute the parameters.
:param sim_instances: Physics configurations rollout
:param t: time step to chose
:return: parameters at the time
"""
nominal = self.svpg_wrapper.nominal_dict()
keys = nominal.keys()
assert len(keys) == sim_instances[t][0].shape[0]
params = []
for sim_instance in sim_instances[t]:
d = {k: (sim_instance[i] + 0.5) * (nominal[k]) for i, k in enumerate(keys)}
params.append(d)
return params
def step(self, snapshot_mode: str, meta_info: dict = None):
rand_trajs = []
ref_trajs = []
ros = []
for i, p in enumerate(self.svpg.iter_particles):
done = False
svpg_env = self.svpg_wrapper
state = svpg_env.reset(i)
states = []
actions = []
rewards = []
infos = []
rand_trajs_now = []
exploration_logbook = []
with to.no_grad():
while not done:
action = p.expl_strat(to.as_tensor(state, dtype=to.get_default_dtype())).detach().cpu().numpy()
state, reward, done, info = svpg_env.step(action, i)
state_dict = svpg_env.array_to_dict((state + 0.5) * svpg_env.nominal())
print(state_dict, " => ", reward)
# Log visited states as dict
if self.log_exploration:
exploration_logbook.append(state_dict)
# Store rollout results
states.append(state)
rewards.append(reward)
actions.append(action)
infos.append(info)
# Extract trajectories from info
rand_trajs_now.extend(info["rand"])
rand_trajs += info["rand"]
ref_trajs += info["ref"]
ros.append(StepSequence(observations=states, actions=actions, rewards=rewards))
self.logger.add_value(f"SVPG_agent_{i}_mean_reward", np.mean(rewards))
ros[i].torch(data_type=to.DoubleTensor)
# rand_trajs_now = StepSequence.concat(rand_trajs_now)
for rt in rand_trajs_now:
self.convert_and_detach(rt)
self._subrtn.update(rand_trajs_now)
# Logging
rets = [ro.undiscounted_return() for ro in rand_trajs]
ret_avg = np.mean(rets)
ret_med = np.median(rets)
ret_std = np.std(rets)
self.logger.add_value("avg rollout len", np.mean([ro.length for ro in rand_trajs]))
self.logger.add_value("avg return", ret_avg)
self.logger.add_value("median return", ret_med)
self.logger.add_value("std return", ret_std)
# Flatten and combine all randomized and reference trajectories for discriminator
flattened_randomized = StepSequence.concat(rand_trajs)
flattened_randomized.torch(data_type=to.double)
flattened_reference = StepSequence.concat(ref_trajs)
flattened_reference.torch(data_type=to.double)
self.reward_generator.train(flattened_reference, flattened_randomized, self.num_discriminator_epoch)
pyrado.save(
self.reward_generator.discriminator, "discriminator.pt", self.save_dir, prefix="adr", use_state_dict=True
)
if self.curr_time_step > self.warm_up_time:
# Update the particles
# List of lists to comply with interface
self.svpg.update(list(map(lambda x: [x], ros)))
self.convert_and_detach(flattened_randomized)
# np.save(f'{self.save_dir}actions{self.curr_iter}', flattened_randomized.actions)
self.make_snapshot(snapshot_mode, float(ret_avg), meta_info)
self._subrtn.make_snapshot(snapshot_mode="best", curr_avg_ret=float(ret_avg))
self.curr_time_step += 1
def convert_and_detach(self, arg0):
arg0.torch(data_type=to.float)
arg0.observations = arg0.observations.float().detach()
arg0.actions = arg0.actions.float().detach()
def save_snapshot(self, meta_info: dict = None):
super().save_snapshot(meta_info)
if meta_info is not None:
raise pyrado.ValueErr(msg=f"{self.name} is not supposed be run as a subrtn!")
# This algorithm instance is not a subrtn of another algorithm
pyrado.save(self.env, "env.pkl", self.save_dir)
self._subrtn.save_snapshot(meta_info=meta_info)
# self.svpg.save_snapshot(meta_info)
class SVPGAdapter(EnvWrapper, Serializable):
"""Wrapper to encapsulate the domain parameter search as an RL task."""
def __init__(
self,
wrapped_env: Env,
parameters: Sequence[DomainParam],
inner_policy: Policy,
discriminator,
num_particles: int,
step_length: float = 0.01,
horizon: int = 50,
num_rollouts_per_config: int = 8,
num_workers: int = 4,
max_steps: int = 8,
):
"""
Constructor
:param wrapped_env: the environment to wrap
:param parameters: which physics parameters should be randomized
:param inner_policy: the policy to train the subrtn on
:param discriminator: the discriminator to distinguish reference environments from randomized ones
:param step_length: the step size
:param horizon: an svpg horizon
:param num_rollouts_per_config: number of trajectories to sample per physics configuration
:param num_workers: number of environments for parallel sampling
"""
Serializable._init(self, locals())
EnvWrapper.__init__(self, wrapped_env)
self.parameters: Sequence[DomainParam] = parameters
try:
self.pool = SamplerPool(num_workers)
except AssertionError:
Warning("THIS IS NOT MEANT TO BE PARALLEL SAMPLED")
self.inner_policy = inner_policy
self.num_particles = num_particles
self.inner_parameter_state: np.ndarray = np.zeros((self.num_particles, len(self.parameters)))
self.count = np.zeros(self.num_particles)
self.num_trajs = num_rollouts_per_config
self.svpg_max_step_length = step_length
self.discriminator = discriminator
self.max_steps = max_steps
self._adapter_obs_space = BoxSpace(-np.ones(len(parameters)), np.ones(len(parameters)))
self._adapter_act_space = BoxSpace(-np.ones(len(parameters)), np.ones(len(parameters)))
self.horizon = horizon
self.horizon_count = 0
self.reset()
@property
def obs_space(self) -> Space:
return self._adapter_obs_space
@property
def act_space(self) -> Space:
return self._adapter_act_space
def reset(self, i=None, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if i is not None:
assert domain_param is None
self.count[i] = 0
if init_state is None:
self.inner_parameter_state[i] = np.random.random_sample(len(self.parameters))
else:
self.inner_parameter_state[i] = init_state
return self.inner_parameter_state[i]
assert domain_param is None
self.count = np.zeros(self.num_particles)
if init_state is None:
self.inner_parameter_state = np.random.random_sample((self.num_particles, len(self.parameters)))
else:
self.inner_parameter_state = init_state
return self.inner_parameter_state
def step(self, act: np.ndarray, i: int) -> tuple:
if i is not None:
# Clip the action according to the maximum step length
action = np.clip(act, -1, 1) * self.svpg_max_step_length
# Perform step by moving into direction of action
self.inner_parameter_state[i] = np.clip(self.inner_parameter_state[i] + action, 0, 1)
param_norm = self.inner_parameter_state[i] + 0.5
random_parameters = [self.array_to_dict(param_norm * self.nominal())] * self.num_trajs
nominal_parameters = [self.nominal_dict()] * self.num_trajs
# Sample trajectories from random and reference environments
rand = eval_domain_params(self.pool, self.wrapped_env, self.inner_policy, random_parameters)
ref = eval_domain_params(self.pool, self.wrapped_env, self.inner_policy, nominal_parameters)
# Calculate the rewards for each trajectory
rewards = [self.discriminator.get_reward(traj) for traj in rand]
reward = np.mean(rewards)
info = dict(rand=rand, ref=ref)
# Handle step count management
done = self.count[i] >= self.max_steps - 1
self.count[i] += 1
self.horizon_count += 1
if self.count[i] % self.horizon == 0:
self.inner_parameter_state[i] = np.random.random_sample(len(self.parameters))
return self.inner_parameter_state[i], reward, done, info
raise NotImplementedError("Not parallelizable")
def eval_states(self, states: Sequence[np.ndarray]):
"""
Evaluate the states.
:param states: the states to evaluate
:return: respective rewards and according trajectories
"""
flatten = lambda l: [item for sublist in l for item in sublist]
sstates = flatten([[self.array_to_dict((state + 0.5) * self.nominal())] * self.num_trajs for state in states])
rand = eval_domain_params(self.pool, self.wrapped_env, self.inner_policy, sstates)
ref = eval_domain_params(
self.pool, self.wrapped_env, self.inner_policy, [self.nominal_dict()] * (self.num_trajs * len(states))
)
rewards = [self.discriminator.get_reward(traj) for traj in rand]
rewards = [np.mean(rewards[i * self.num_trajs : (i + 1) * self.num_trajs]) for i in range(len(states))]
return rewards, rand, ref
def params(self):
return [param.name for param in self.parameters]
def nominal(self):
return [inner_env(self.wrapped_env).get_nominal_domain_param()[k] for k in self.params()]
def nominal_dict(self):
return {k: inner_env(self.wrapped_env).get_nominal_domain_param()[k] for k in self.params()}
def array_to_dict(self, arr):
return {k: a for k, a in zip(self.params(), arr)}
class RewardGenerator:
"""Class for generating the discriminator rewards in ADR. Generates a reward using a trained discriminator network."""
def __init__(
self,
env_spec: EnvSpec,
batch_size: int,
reward_multiplier: float,
lr: float = 3e-3,
hidden_size=256,
logger: StepLogger = None,
device: str = "cuda" if to.cuda.is_available() else "cpu",
):
"""
Constructor
:param env_spec: environment specification
:param batch_size: batch size for each update step
:param reward_multiplier: factor for the predicted probability
:param lr: learning rate
:param logger: logger for every step of the algorithm, if `None` the default logger will be created
"""
self.device = device
self.batch_size = batch_size
self.reward_multiplier = reward_multiplier
self.lr = lr
spec = EnvSpec(
obs_space=BoxSpace.cat([env_spec.obs_space, env_spec.act_space]),
act_space=BoxSpace(bound_lo=[0], bound_up=[1]),
)
self.discriminator = LSTMPolicy(
spec=spec, hidden_size=hidden_size, num_recurrent_layers=1, output_nonlin=to.sigmoid
)
self.loss_fcn = nn.BCELoss()
self.optimizer = to.optim.Adam(self.discriminator.parameters(), lr=lr, eps=1e-5)
self.logger = logger
def get_reward(self, traj: StepSequence) -> to.Tensor:
"""Compute the reward of a trajectory.
Trajectories considered as not fixed yield a high reward.
:param traj: trajectory to evaluate
:return: a score
:rtype: to.Tensor
"""
traj = preprocess_rollout(traj)
with to.no_grad():
reward = self.discriminator.forward(traj)[0]
return to.log(reward.mean()) * self.reward_multiplier
def train(
self, reference_trajectory: StepSequence, randomized_trajectory: StepSequence, num_epoch: int
) -> to.Tensor:
reference_batch_generator = reference_trajectory.iterate_rollouts()
random_batch_generator = randomized_trajectory.iterate_rollouts()
loss = None
for _ in tqdm(range(num_epoch), "Discriminator Epoch", num_epoch):
for reference_batch, random_batch in zip(reference_batch_generator, random_batch_generator):
reference_batch = preprocess_rollout(reference_batch).float()
random_batch = preprocess_rollout(random_batch).float()
random_results = self.discriminator(random_batch)[0]
reference_results = self.discriminator(reference_batch)[0]
self.optimizer.zero_grad()
loss = self.loss_fcn(random_results, to.ones(random_results.shape[0], 1)) + self.loss_fcn(
reference_results, to.zeros(reference_results.shape[0], 1)
)
loss.backward()
self.optimizer.step()
# Logging
if self.logger is not None:
self.logger.add_value("discriminator_loss", loss)
return loss
def preprocess_rollout(rollout: StepSequence) -> Tensor:
"""
Extract observations and actions from a `StepSequence` and packs them into a PyTorch tensor.
:param rollout: a `StepSequence` instance containing a trajectory
:return: a PyTorch tensor` containing the trajectory
"""
if not isinstance(rollout, StepSequence):
raise pyrado.TypeErr(given=rollout, expected_type=StepSequence)
# Convert data type
rollout.torch(to.get_default_dtype())
# Extract the data
state = rollout.get_data_values("observations")[:-1]
next_state = rollout.get_data_values("observations")[1:]
action = rollout.get_data_values("actions").narrow(0, 0, next_state.shape[0])
return to.cat((state, action), 1)
| [
"numpy.clip",
"pyrado.policies.recurrent.rnn.LSTMPolicy",
"pyrado.environment_wrappers.base.EnvWrapper.__init__",
"torch.cuda.is_available",
"pyrado.algorithms.step_based.svpg.SVPGBuilder",
"pyrado.save",
"pyrado.sampling.parallel_evaluation.eval_domain_params",
"numpy.mean",
"pyrado.sampling.sample... | [((21755, 21781), 'torch.cat', 'to.cat', (['(state, action)', '(1)'], {}), '((state, action), 1)\n', (21761, 21781), True, 'import torch as to\n'), ((6471, 6608), 'numpy.random.random_sample', 'np.random.random_sample', (["(svpg_hp['algo']['num_particles'], svpg_hp['algo']['horizon'], adr_hp[\n 'evaluation_steps'], self.num_params)"], {}), "((svpg_hp['algo']['num_particles'], svpg_hp['algo'][\n 'horizon'], adr_hp['evaluation_steps'], self.num_params))\n", (6494, 6608), True, 'import numpy as np\n'), ((9932, 9945), 'numpy.mean', 'np.mean', (['rets'], {}), '(rets)\n', (9939, 9945), True, 'import numpy as np\n'), ((9964, 9979), 'numpy.median', 'np.median', (['rets'], {}), '(rets)\n', (9973, 9979), True, 'import numpy as np\n'), ((9998, 10010), 'numpy.std', 'np.std', (['rets'], {}), '(rets)\n', (10004, 10010), True, 'import numpy as np\n'), ((10387, 10418), 'pyrado.sampling.step_sequence.StepSequence.concat', 'StepSequence.concat', (['rand_trajs'], {}), '(rand_trajs)\n', (10406, 10418), False, 'from pyrado.sampling.step_sequence import StepSequence\n'), ((10505, 10535), 'pyrado.sampling.step_sequence.StepSequence.concat', 'StepSequence.concat', (['ref_trajs'], {}), '(ref_trajs)\n', (10524, 10535), False, 'from pyrado.sampling.step_sequence import StepSequence\n'), ((10708, 10831), 'pyrado.save', 'pyrado.save', (['self.reward_generator.discriminator', '"""discriminator.pt"""', 'self.save_dir'], {'prefix': '"""adr"""', 'use_state_dict': '(True)'}), "(self.reward_generator.discriminator, 'discriminator.pt', self.\n save_dir, prefix='adr', use_state_dict=True)\n", (10719, 10831), False, 'import pyrado\n'), ((11879, 11926), 'pyrado.save', 'pyrado.save', (['self.env', '"""env.pkl"""', 'self.save_dir'], {}), "(self.env, 'env.pkl', self.save_dir)\n", (11890, 11926), False, 'import pyrado\n'), ((13149, 13187), 'pyrado.environment_wrappers.base.EnvWrapper.__init__', 'EnvWrapper.__init__', (['self', 'wrapped_env'], {}), '(self, wrapped_env)\n', (13168, 13187), False, 'from pyrado.environment_wrappers.base import EnvWrapper\n'), ((13613, 13641), 'numpy.zeros', 'np.zeros', (['self.num_particles'], {}), '(self.num_particles)\n', (13621, 13641), True, 'import numpy as np\n'), ((14780, 14808), 'numpy.zeros', 'np.zeros', (['self.num_particles'], {}), '(self.num_particles)\n', (14788, 14808), True, 'import numpy as np\n'), ((17031, 17106), 'pyrado.sampling.parallel_evaluation.eval_domain_params', 'eval_domain_params', (['self.pool', 'self.wrapped_env', 'self.inner_policy', 'sstates'], {}), '(self.pool, self.wrapped_env, self.inner_policy, sstates)\n', (17049, 17106), False, 'from pyrado.sampling.parallel_evaluation import eval_domain_params\n'), ((19044, 19144), 'pyrado.policies.recurrent.rnn.LSTMPolicy', 'LSTMPolicy', ([], {'spec': 'spec', 'hidden_size': 'hidden_size', 'num_recurrent_layers': '(1)', 'output_nonlin': 'to.sigmoid'}), '(spec=spec, hidden_size=hidden_size, num_recurrent_layers=1,\n output_nonlin=to.sigmoid)\n', (19054, 19144), False, 'from pyrado.policies.recurrent.rnn import LSTMPolicy\n'), ((19187, 19199), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (19197, 19199), True, 'from torch import nn as nn\n'), ((21394, 21451), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'rollout', 'expected_type': 'StepSequence'}), '(given=rollout, expected_type=StepSequence)\n', (21408, 21451), False, 'import pyrado\n'), ((21495, 21517), 'torch.get_default_dtype', 'to.get_default_dtype', ([], {}), '()\n', (21515, 21517), True, 'import torch as to\n'), ((5056, 5100), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'env', 'expected_type': 'Env'}), '(given=env, expected_type=Env)\n', (5070, 5100), False, 'import pyrado\n'), ((5165, 5218), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'subrtn', 'expected_type': 'Algorithm'}), '(given=subrtn, expected_type=Algorithm)\n', (5179, 5218), False, 'import pyrado\n'), ((5287, 5344), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'subrtn.policy', 'expected_type': 'Policy'}), '(given=subrtn.policy, expected_type=Policy)\n', (5301, 5344), False, 'import pyrado\n'), ((6162, 6183), 'pyrado.domain_randomization.domain_parameter.DomainParam', 'DomainParam', (['param', '(1)'], {}), '(param, 1)\n', (6173, 6183), False, 'from pyrado.domain_randomization.domain_parameter import DomainParam\n'), ((7230, 7277), 'pyrado.algorithms.step_based.svpg.SVPGBuilder', 'SVPGBuilder', (['ex_dir', 'self.svpg_wrapper', 'svpg_hp'], {}), '(ex_dir, self.svpg_wrapper, svpg_hp)\n', (7241, 7277), False, 'from pyrado.algorithms.step_based.svpg import SVPGBuilder, SVPGHyperparams\n'), ((10060, 10101), 'numpy.mean', 'np.mean', (['[ro.length for ro in rand_trajs]'], {}), '([ro.length for ro in rand_trajs])\n', (10067, 10101), True, 'import numpy as np\n'), ((11727, 11798), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'msg': 'f"""{self.name} is not supposed be run as a subrtn!"""'}), "(msg=f'{self.name} is not supposed be run as a subrtn!')\n", (11742, 11798), False, 'import pyrado\n'), ((13286, 13310), 'pyrado.sampling.sampler_pool.SamplerPool', 'SamplerPool', (['num_workers'], {}), '(num_workers)\n', (13297, 13310), False, 'from pyrado.sampling.sampler_pool import SamplerPool\n'), ((15381, 15434), 'numpy.clip', 'np.clip', (['(self.inner_parameter_state[i] + action)', '(0)', '(1)'], {}), '(self.inner_parameter_state[i] + action, 0, 1)\n', (15388, 15434), True, 'import numpy as np\n'), ((15760, 15849), 'pyrado.sampling.parallel_evaluation.eval_domain_params', 'eval_domain_params', (['self.pool', 'self.wrapped_env', 'self.inner_policy', 'random_parameters'], {}), '(self.pool, self.wrapped_env, self.inner_policy,\n random_parameters)\n', (15778, 15849), False, 'from pyrado.sampling.parallel_evaluation import eval_domain_params\n'), ((15864, 15954), 'pyrado.sampling.parallel_evaluation.eval_domain_params', 'eval_domain_params', (['self.pool', 'self.wrapped_env', 'self.inner_policy', 'nominal_parameters'], {}), '(self.pool, self.wrapped_env, self.inner_policy,\n nominal_parameters)\n', (15882, 15954), False, 'from pyrado.sampling.parallel_evaluation import eval_domain_params\n'), ((16106, 16122), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (16113, 16122), True, 'import numpy as np\n'), ((17358, 17419), 'numpy.mean', 'np.mean', (['rewards[i * self.num_trajs:(i + 1) * self.num_trajs]'], {}), '(rewards[i * self.num_trajs:(i + 1) * self.num_trajs])\n', (17365, 17419), True, 'import numpy as np\n'), ((18295, 18317), 'torch.cuda.is_available', 'to.cuda.is_available', ([], {}), '()\n', (18315, 18317), True, 'import torch as to\n'), ((19652, 19664), 'torch.no_grad', 'to.no_grad', ([], {}), '()\n', (19662, 19664), True, 'import torch as to\n'), ((8452, 8464), 'torch.no_grad', 'to.no_grad', ([], {}), '()\n', (8462, 8464), True, 'import torch as to\n'), ((9565, 9581), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (9572, 9581), True, 'import numpy as np\n'), ((15226, 15245), 'numpy.clip', 'np.clip', (['act', '(-1)', '(1)'], {}), '(act, -1, 1)\n', (15233, 15245), True, 'import numpy as np\n'), ((18889, 18943), 'pyrado.spaces.box.BoxSpace.cat', 'BoxSpace.cat', (['[env_spec.obs_space, env_spec.act_space]'], {}), '([env_spec.obs_space, env_spec.act_space])\n', (18901, 18943), False, 'from pyrado.spaces.box import BoxSpace\n'), ((18967, 19003), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', ([], {'bound_lo': '[0]', 'bound_up': '[1]'}), '(bound_lo=[0], bound_up=[1])\n', (18975, 19003), False, 'from pyrado.spaces.box import BoxSpace\n'), ((9431, 9498), 'pyrado.sampling.step_sequence.StepSequence', 'StepSequence', ([], {'observations': 'states', 'actions': 'actions', 'rewards': 'rewards'}), '(observations=states, actions=actions, rewards=rewards)\n', (9443, 9498), False, 'from pyrado.sampling.step_sequence import StepSequence\n'), ((17605, 17632), 'pyrado.environment_wrappers.utils.inner_env', 'inner_env', (['self.wrapped_env'], {}), '(self.wrapped_env)\n', (17614, 17632), False, 'from pyrado.environment_wrappers.utils import inner_env\n'), ((17735, 17762), 'pyrado.environment_wrappers.utils.inner_env', 'inner_env', (['self.wrapped_env'], {}), '(self.wrapped_env)\n', (17744, 17762), False, 'from pyrado.environment_wrappers.utils import inner_env\n'), ((20669, 20704), 'torch.ones', 'to.ones', (['random_results.shape[0]', '(1)'], {}), '(random_results.shape[0], 1)\n', (20676, 20704), True, 'import torch as to\n'), ((20762, 20801), 'torch.zeros', 'to.zeros', (['reference_results.shape[0]', '(1)'], {}), '(reference_results.shape[0], 1)\n', (20770, 20801), True, 'import torch as to\n'), ((6085, 6104), 'pyrado.environment_wrappers.utils.inner_env', 'inner_env', (['self.env'], {}), '(self.env)\n', (6094, 6104), False, 'from pyrado.environment_wrappers.utils import inner_env\n'), ((8566, 8588), 'torch.get_default_dtype', 'to.get_default_dtype', ([], {}), '()\n', (8586, 8588), True, 'import torch as to\n')] |
# This file is Public Domain and may be used without restrictions.
import _jpype
import jpype
from jpype.types import *
from jpype import java
import jpype.dbapi2 as dbapi2
import common
import time
try:
import zlib
except ImportError:
zlib = None
class SQLModuleTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def assertIsSubclass(self, a, b):
self.assertTrue(issubclass(a, b), "`%s` is not a subclass of `%s`" % (a.__name__, b.__name__))
def testConstants(self):
self.assertEqual(dbapi2.apilevel, "2.0")
self.assertEqual(dbapi2.threadsafety, 2)
self.assertEqual(dbapi2.paramstyle, "qmark")
def testExceptions(self):
self.assertIsSubclass(dbapi2.Warning, Exception)
self.assertIsSubclass(dbapi2.Error, Exception)
self.assertIsSubclass(dbapi2.InterfaceError, dbapi2.Error)
self.assertIsSubclass(dbapi2.DatabaseError, dbapi2.Error)
self.assertIsSubclass(dbapi2._SQLException, dbapi2.Error)
self.assertIsSubclass(dbapi2.DataError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.OperationalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.IntegrityError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.InternalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.InternalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.ProgrammingError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.NotSupportedError, dbapi2.DatabaseError)
def testConnectionExceptions(self):
cx = dbapi2.Connection
self.assertEqual(cx.Warning, dbapi2.Warning)
self.assertEqual(cx.Error, dbapi2.Error)
self.assertEqual(cx.InterfaceError, dbapi2.InterfaceError)
self.assertEqual(cx.DatabaseError, dbapi2.DatabaseError)
self.assertEqual(cx.DataError, dbapi2.DataError)
self.assertEqual(cx.OperationalError, dbapi2.OperationalError)
self.assertEqual(cx.IntegrityError, dbapi2.IntegrityError)
self.assertEqual(cx.InternalError, dbapi2.InternalError)
self.assertEqual(cx.InternalError, dbapi2.InternalError)
self.assertEqual(cx.ProgrammingError, dbapi2.ProgrammingError)
self.assertEqual(cx.NotSupportedError, dbapi2.NotSupportedError)
def test_Date(self):
d1 = dbapi2.Date(2002, 12, 25) # noqa F841
d2 = dbapi2.DateFromTicks( # noqa F841
time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = dbapi2.Time(13, 45, 30) # noqa F841
t2 = dbapi2.TimeFromTicks( # noqa F841
time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = dbapi2.Timestamp(2002, 12, 25, 13, 45, 30) # noqa F841
t2 = dbapi2.TimestampFromTicks( # noqa F841
time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = dbapi2.Binary(b"Something")
b = dbapi2.Binary(b"") # noqa F841
def test_STRING(self):
self.assertTrue(hasattr(dbapi2, "STRING"), "module.STRING must be defined")
def test_BINARY(self):
self.assertTrue(
hasattr(dbapi2, "BINARY"), "module.BINARY must be defined."
)
def test_NUMBER(self):
self.assertTrue(
hasattr(dbapi2, "NUMBER"), "module.NUMBER must be defined."
)
def test_DATETIME(self):
self.assertTrue(
hasattr(dbapi2, "DATETIME"), "module.DATETIME must be defined."
)
def test_ROWID(self):
self.assertTrue(hasattr(dbapi2, "ROWID"), "module.ROWID must be defined.")
class SQLTablesTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testStr(self):
for i in dbapi2._types:
self.assertIsInstance(str(i), str)
def testRepr(self):
for i in dbapi2._types:
self.assertIsInstance(repr(i), str)
| [
"jpype.dbapi2.Time",
"common.JPypeTestCase.setUp",
"time.mktime",
"jpype.dbapi2.Timestamp",
"jpype.dbapi2.Date",
"jpype.dbapi2.Binary"
] | [((335, 367), 'common.JPypeTestCase.setUp', 'common.JPypeTestCase.setUp', (['self'], {}), '(self)\n', (361, 367), False, 'import common\n'), ((2372, 2397), 'jpype.dbapi2.Date', 'dbapi2.Date', (['(2002)', '(12)', '(25)'], {}), '(2002, 12, 25)\n', (2383, 2397), True, 'import jpype.dbapi2 as dbapi2\n'), ((2682, 2705), 'jpype.dbapi2.Time', 'dbapi2.Time', (['(13)', '(45)', '(30)'], {}), '(13, 45, 30)\n', (2693, 2705), True, 'import jpype.dbapi2 as dbapi2\n'), ((2996, 3038), 'jpype.dbapi2.Timestamp', 'dbapi2.Timestamp', (['(2002)', '(12)', '(25)', '(13)', '(45)', '(30)'], {}), '(2002, 12, 25, 13, 45, 30)\n', (3012, 3038), True, 'import jpype.dbapi2 as dbapi2\n'), ((3332, 3359), 'jpype.dbapi2.Binary', 'dbapi2.Binary', (["b'Something'"], {}), "(b'Something')\n", (3345, 3359), True, 'import jpype.dbapi2 as dbapi2\n'), ((3372, 3390), 'jpype.dbapi2.Binary', 'dbapi2.Binary', (["b''"], {}), "(b'')\n", (3385, 3390), True, 'import jpype.dbapi2 as dbapi2\n'), ((4115, 4147), 'common.JPypeTestCase.setUp', 'common.JPypeTestCase.setUp', (['self'], {}), '(self)\n', (4141, 4147), False, 'import common\n'), ((2471, 2516), 'time.mktime', 'time.mktime', (['(2002, 12, 25, 0, 0, 0, 0, 0, 0)'], {}), '((2002, 12, 25, 0, 0, 0, 0, 0, 0))\n', (2482, 2516), False, 'import time\n'), ((2779, 2825), 'time.mktime', 'time.mktime', (['(2001, 1, 1, 13, 45, 30, 0, 0, 0)'], {}), '((2001, 1, 1, 13, 45, 30, 0, 0, 0))\n', (2790, 2825), False, 'import time\n'), ((3117, 3165), 'time.mktime', 'time.mktime', (['(2002, 12, 25, 13, 45, 30, 0, 0, 0)'], {}), '((2002, 12, 25, 13, 45, 30, 0, 0, 0))\n', (3128, 3165), False, 'import time\n')] |
"""
Stats stuff!
"""
import textwrap
import numpy as np
import pandas as pd
from scipy import stats
from IPython.display import display
from .boxes import *
from .table_display import *
__DEBUG__ = False
def debug(*args, **kwargs):
if __DEBUG__:
print(*args, **kwargs)
class Chi2Result(object):
"""Chi2 result class.
Primarily used for pretty-printing results.
"""
def __init__(self, name1: str, name2: str, xs: pd.DataFrame, dof: int,
p: float, alpha=0.05):
"""Create a new Chi2Result instance."""
self.name1 = name1
self.name2 = name2
self.xs = xs
self.dof = dof
self.p = p
self.alpha = alpha
def __repr__(self):
"""Return a string representation of this result."""
if self.p <= self.alpha:
p_conclusion = f'p ≤ {self.alpha}'
else:
p_conclusion = f'p > {self.alpha}'
s = f"""
Chi2 analysis between {self.name1} and {self.name2}
p = {self.p:.4f} with {self.dof} degree(s) of freedom.
{p_conclusion}
"""
return textwrap.dedent(s)
def _repr_html_(self):
"""Return an HTML representation of this result."""
if self.p <= self.alpha:
p_conclusion = f'p ≤ {self.alpha}'
else:
p_conclusion = f'p > {self.alpha}'
tpl = f"""
<div style="font-family: courier; padding: 0px 10px;">
<div style="text-align:center">
Chi² analysis between <b>{self.name1}</b> and
<b>{self.name2}</b></div>
<div>p-value: <b>{self.p:.4f}</b> with
<b>{self.dof}</b> degree(s) of freedom.</div>
<div>{p_conclusion}</div>
</div>
"""
if self.p <= self.alpha:
return info(tpl, raw=True)
return box(tpl, '#efefef', '#cfcfcf', raw=True)
def Chi2(col1: pd.Series, col2: pd.Series, show_crosstab=False) -> Chi2Result:
"""Compute the Chi2 statistic."""
xs = pd.crosstab(col1, col2)
_, p, dof, expected = stats.chi2_contingency(xs)
if show_crosstab:
display(xs)
return Chi2Result(col1.name, col2.name, xs, dof, p)
class CMHResult(object):
"""Represents the result of a Cochran-Mantel-Haenszel Chi2 analysis."""
def __init__(self, STATISTIC, df, p, var1, var2, stratifier, alpha=0.05):
"""
Initialize a new CMHResult.
STATISTIC: X2 statistic
df: degrees of freedom
p: p-value
"""
self.STATISTIC = STATISTIC
self.df = df
self.p = p
self.var1 = var1
self.var2 = var2
self.stratifier = stratifier
self.alpha = alpha
def __repr__(self):
stat = round(self.STATISTIC, 5)
pval = round(self.p, 4)
df = self.df
return textwrap.dedent(f"""
Cochran-Mantel-Haenszel Chi2 test
"{self.var1}" x "{self.var2}", stratified by "{self.stratifier}"
Cochran-Mantel-Haenszel M^2 = {stat}, df = {df}, p-value = {pval}
""")
def _repr_html_(self):
stat = round(self.STATISTIC, 5)
pval = round(self.p, 4)
df = self.df
tpl = f"""
<div style="font-family: courier; font-size: 10pt; padding: 0px 10px;">
<div style="text-align:center">
Cochran-Mantel-Haenszel Chi² test
</div>
<div>
<b>{self.var1}</b> x <b>{self.var2}</b>,
stratified by <b>{self.stratifier}</b>
</div>
<div>
Cochran-Mantel-Haenszel
M^2 = {stat},
df = {df},
p-value = <b>{pval}</b>
</div>
</div>
"""
if pval > self.alpha:
return box(tpl, '#efefef', '#cfcfcf')
return box(tpl, '#b0cbe9', '#4393e1')
def CMH(df: pd.DataFrame, var: str, outcome: str, stratifier: str, raw=False):
"""Compute the CMH statistic.
Based on "Categorical Data Analysis", page 295 by Agresti (2002) and
R implementation of mantelhaen.test().
"""
df = df.copy()
df[outcome] = df[outcome].astype('category')
df[var] = df[var].astype('category')
df[stratifier] = df[stratifier].astype('category')
# Compute contingency table size KxIxJ
I = len(df[outcome].cat.categories)
J = len(df[var].cat.categories)
K = len(df[stratifier].cat.categories)
contingency_tables = np.zeros((I, J, K), dtype='float')
# Create stratified contingency tables
for k in range(K):
cat = df[stratifier].cat.categories[k]
subset = df.loc[df[stratifier] == cat, [var, outcome]]
xs = pd.crosstab(subset[outcome], subset[var], dropna=False)
contingency_tables[:, :, k] = xs
# Compute the actual CMH
STATISTIC, df, pval = CMH_numpy(contingency_tables)
if raw:
return STATISTIC, df, pval
return CMHResult(STATISTIC, df, pval, var, outcome, stratifier)
def CMH_numpy(X):
"""Compute the CMH statistic.
Based on "Categorical Data Analysis", page 295 by Agresti (2002) and
R implementation of mantelhaen.test().
"""
# I: nr. of rows
# J: nr. of columns
# K: nr. of strata
# ⚠️ Note: this does *not* match the format used when printing!
I, J, K = X.shape
debug(f"I: {I}, J: {J}, K: {K}")
debug()
df = (I - 1) * (J - 1)
debug(f'{df} degree(s) of freedom')
# Initialize m and n to a vector(0) of length df
n = np.zeros(df)
m = np.zeros(df)
V = np.zeros((df, df))
# iterate over the strata
for k in range(K):
debug(f'partial {k}')
# f holds partial contigency table k
f = X[:, :, k]
# debuggin'
debug(' f:')
debug(f)
debug()
# Sum of *all* values in the partial table
ntot = f.sum()
debug(f' ntot: {ntot}')
# Compute the sum over all row/column entries *excluding* the last
# entry. The last entries are excluded, as they hold redundant
# information in combination with the row/column totals.
colsums = f.sum(axis=0)[:-1]
rowsums = f.sum(axis=1)[:-1]
debug(' rowsums:', rowsums)
debug(' colsums:', colsums)
# f[-I, -J] holds the partial matrix, excluding the last row & column.
# The result is reshaped into a vector.
debug(' f[:-1, :-1].reshape(-1): ', f[:-1, :-1].reshape(-1))
n = n + f[:-1, :-1].reshape(-1)
# Take the outer product of the row- and colsums, divide it by the
# total of the partial table. Yields a vector of length df. This holds
# the expected value under the assumption of conditional independence.
m_k = (np.outer(rowsums, colsums) / ntot).reshape(-1)
m = m + m_k
debug(' m_k:', m_k)
debug()
# V_k holds the null covariance matrix (matrices).
k1 = np.diag(ntot * colsums)[:J, :J] - np.outer(colsums, colsums)
k2 = np.diag(ntot * rowsums)[:I, :I] - np.outer(rowsums, rowsums)
debug('np.kron(k1, k2):')
debug(np.kron(k1, k2))
debug()
V_k = np.kron(k1, k2) / (ntot**2 * (ntot - 1))
debug(' V_k:')
debug(V_k)
V = V + V_k
debug()
# Subtract the mean from the entries
n = n - m
debug(f'n: {n}')
debug()
debug('np.linalg.solve(V, n):')
debug(np.linalg.solve(V, n))
debug()
STATISTIC = np.inner(n, np.linalg.solve(V, n).transpose())
debug('STATISTIC:', STATISTIC)
pval = 1 - stats.chi2.cdf(STATISTIC, df)
return STATISTIC, df, pval
def table1(df, vars, outcome, p_name='p', p_precision=None, title=''):
"""Prepare Table 1"""
def replace(string, dict_):
for key, replacement in dict_.items():
if string == key:
return replacement
return string
# We're going to create multiple tables, one for each variable.
tables = []
col2 = df[outcome]
totals = col2.value_counts()
headers = {
header: f'{header} (n={total})' for header, total in totals.iteritems()
}
# Iterate over the variables
for v in vars:
col1 = df[v]
# Crosstab with absolute numbers
x1 = pd.crosstab(col1, col2)
# Crosstab with percentages
x2 = pd.crosstab(col1, col2, normalize='columns')
x2 = (x2 * 100).round(1)
# Chi2 is calculated using absolute nrs.
chi2, p, dof, expected = stats.chi2_contingency(x1)
# Combine absolute nrs. with percentages in a single cell.
xs = x1.astype('str') + ' (' + x2.applymap('{:3.1f}'.format) + ')'
# Add the totals ('n={total}') to the headers
xs.columns = [replace(h, headers) for h in list(xs.columns)]
# If title is provided, we'll add a level to the column index and put
# it there (on top).
if title:
colidx = pd.MultiIndex.from_product(
[[title, ], list(xs.columns)],
)
xs.columns = colidx
# Add the p-value in a new column, but only in the top row.
xs[p_name] = ''
if p_precision:
p_tpl = f"{{:.{p_precision}f}}"
xs.iloc[0, len(xs.columns) - 1] = p_tpl.format(p)
else:
xs[p_name] = np.nan
xs.iloc[0, len(xs.columns) - 1] = p
# Prepend the name of the current variable to the row index, so we can
# concat the tables later ...
xs.index = pd.MultiIndex.from_product(
[[v, ], list(xs.index)],
names=['variable', 'values']
)
tables.append(xs)
return pd.concat(tables)
| [
"textwrap.dedent",
"IPython.display.display",
"numpy.linalg.solve",
"scipy.stats.chi2.cdf",
"scipy.stats.chi2_contingency",
"pandas.crosstab",
"numpy.diag",
"numpy.kron",
"numpy.zeros",
"numpy.outer",
"pandas.concat"
] | [((2089, 2112), 'pandas.crosstab', 'pd.crosstab', (['col1', 'col2'], {}), '(col1, col2)\n', (2100, 2112), True, 'import pandas as pd\n'), ((2139, 2165), 'scipy.stats.chi2_contingency', 'stats.chi2_contingency', (['xs'], {}), '(xs)\n', (2161, 2165), False, 'from scipy import stats\n'), ((4582, 4616), 'numpy.zeros', 'np.zeros', (['(I, J, K)'], {'dtype': '"""float"""'}), "((I, J, K), dtype='float')\n", (4590, 4616), True, 'import numpy as np\n'), ((5626, 5638), 'numpy.zeros', 'np.zeros', (['df'], {}), '(df)\n', (5634, 5638), True, 'import numpy as np\n'), ((5647, 5659), 'numpy.zeros', 'np.zeros', (['df'], {}), '(df)\n', (5655, 5659), True, 'import numpy as np\n'), ((5668, 5686), 'numpy.zeros', 'np.zeros', (['(df, df)'], {}), '((df, df))\n', (5676, 5686), True, 'import numpy as np\n'), ((9807, 9824), 'pandas.concat', 'pd.concat', (['tables'], {}), '(tables)\n', (9816, 9824), True, 'import pandas as pd\n'), ((1136, 1154), 'textwrap.dedent', 'textwrap.dedent', (['s'], {}), '(s)\n', (1151, 1154), False, 'import textwrap\n'), ((2197, 2208), 'IPython.display.display', 'display', (['xs'], {}), '(xs)\n', (2204, 2208), False, 'from IPython.display import display\n'), ((2926, 3168), 'textwrap.dedent', 'textwrap.dedent', (['f"""\n Cochran-Mantel-Haenszel Chi2 test\n\n "{self.var1}" x "{self.var2}", stratified by "{self.stratifier}"\n\n Cochran-Mantel-Haenszel M^2 = {stat}, df = {df}, p-value = {pval}\n """'], {}), '(\n f"""\n Cochran-Mantel-Haenszel Chi2 test\n\n "{self.var1}" x "{self.var2}", stratified by "{self.stratifier}"\n\n Cochran-Mantel-Haenszel M^2 = {stat}, df = {df}, p-value = {pval}\n """\n )\n', (2941, 3168), False, 'import textwrap\n'), ((4808, 4863), 'pandas.crosstab', 'pd.crosstab', (['subset[outcome]', 'subset[var]'], {'dropna': '(False)'}), '(subset[outcome], subset[var], dropna=False)\n', (4819, 4863), True, 'import pandas as pd\n'), ((7547, 7568), 'numpy.linalg.solve', 'np.linalg.solve', (['V', 'n'], {}), '(V, n)\n', (7562, 7568), True, 'import numpy as np\n'), ((7697, 7726), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['STATISTIC', 'df'], {}), '(STATISTIC, df)\n', (7711, 7726), False, 'from scipy import stats\n'), ((8399, 8422), 'pandas.crosstab', 'pd.crosstab', (['col1', 'col2'], {}), '(col1, col2)\n', (8410, 8422), True, 'import pandas as pd\n'), ((8473, 8517), 'pandas.crosstab', 'pd.crosstab', (['col1', 'col2'], {'normalize': '"""columns"""'}), "(col1, col2, normalize='columns')\n", (8484, 8517), True, 'import pandas as pd\n'), ((8634, 8660), 'scipy.stats.chi2_contingency', 'stats.chi2_contingency', (['x1'], {}), '(x1)\n', (8656, 8660), False, 'from scipy import stats\n'), ((7090, 7116), 'numpy.outer', 'np.outer', (['colsums', 'colsums'], {}), '(colsums, colsums)\n', (7098, 7116), True, 'import numpy as np\n'), ((7164, 7190), 'numpy.outer', 'np.outer', (['rowsums', 'rowsums'], {}), '(rowsums, rowsums)\n', (7172, 7190), True, 'import numpy as np\n'), ((7240, 7255), 'numpy.kron', 'np.kron', (['k1', 'k2'], {}), '(k1, k2)\n', (7247, 7255), True, 'import numpy as np\n'), ((7288, 7303), 'numpy.kron', 'np.kron', (['k1', 'k2'], {}), '(k1, k2)\n', (7295, 7303), True, 'import numpy as np\n'), ((7056, 7079), 'numpy.diag', 'np.diag', (['(ntot * colsums)'], {}), '(ntot * colsums)\n', (7063, 7079), True, 'import numpy as np\n'), ((7130, 7153), 'numpy.diag', 'np.diag', (['(ntot * rowsums)'], {}), '(ntot * rowsums)\n', (7137, 7153), True, 'import numpy as np\n'), ((7611, 7632), 'numpy.linalg.solve', 'np.linalg.solve', (['V', 'n'], {}), '(V, n)\n', (7626, 7632), True, 'import numpy as np\n'), ((6871, 6897), 'numpy.outer', 'np.outer', (['rowsums', 'colsums'], {}), '(rowsums, colsums)\n', (6879, 6897), True, 'import numpy as np\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
from .kshell_utilities import atomic_numbers, loadtxt
from .general_utilities import create_spin_parity_list, gamma_strength_function_average
class LEE:
def __init__(self, directory):
self.bin_width = 0.2
self.E_max = 30
self.Ex_min = 0 # Lower limit for emitted gamma energy [MeV].
self.Ex_max = 30 # Upper limit for emitted gamma energy [MeV].
n_bins = int(np.ceil(self.E_max/self.bin_width))
E_max_adjusted = self.bin_width*n_bins
bins = np.linspace(0, E_max_adjusted, n_bins + 1)
self.bins_middle = (bins[0: -1] + bins[1:])/2
self.all_fnames = {}
self.directory = directory
for element in sorted(os.listdir(self.directory)):
"""
List all content in self.directory.
"""
if os.path.isdir(f"{self.directory}/{element}"):
"""
If element is a directory, enter it to find data files.
"""
self.all_fnames[element] = [] # Create blank entry in dict for current element.
for isotope in os.listdir(f"{self.directory}/{element}"):
"""
List all content in the element directory.
"""
if isotope.startswith("summary"):
"""
Extract summary data files.
"""
try:
"""
Example: O16.
"""
n_neutrons = int(isotope[9:11])
except ValueError:
"""
Example: Ne20.
"""
n_neutrons = int(isotope[10:12])
n_neutrons -= atomic_numbers[element.split("_")[1]]
self.all_fnames[element].append([f"{element}/{isotope}", n_neutrons])
for key in self.all_fnames:
"""
Sort each list in the dict by the number of neutrons.
"""
self.all_fnames[key].sort(key=lambda tup: tup[1]) # Why not do this when directory is listed?
# def plot_gsf(self, isotope_name):
# """
# Plot the gamma strength function for a single isotope.
# isotope_name : string
# Examples: S24, Ne30.
# Raises
# ------
# ValueError
# If isotope_name cannot be found in the calculated data
# files.
# """
# fname = None
# for fnames in self.fnames_combined:
# for i in range(len(fnames)):
# if isotope_name in fnames[i][0]:
# fname = fnames[i][0]
# if fname is None:
# msg = f"Isotope name '{isotope_name}' is not a valid name."
# raise ValueError(msg)
# res = loadtxt(self.directory + fname)
# _, ax = plt.subplots()
# Jpi_list = create_jpi_list(res.levels[:, 1], None)
# E_gs = res.levels[0, 0]
# res.transitions[:, 2] += E_gs # Add ground state energy for compatibility with Jørgen.
# gsf = strength_function_average(
# levels = res.levels,
# transitions = res.transitions,
# Jpi_list = Jpi_list,
# bin_width = self.bin_width,
# Ex_min = self.Ex_min, # [MeV].
# Ex_max = self.Ex_max, # [MeV].
# multipole_type = "M1"
# )
# bin_slice = self.bins_middle[0:len(gsf)]
# ax.plot(bin_slice, gsf, label=fname)
# ax.legend()
# ax.set_xlabel(r"$E_{\gamma}$ [MeV]")
# ax.set_ylabel(r"gsf [MeV$^{-3}$]")
# plt.show()
def calculate_low_energy_enhancement(self, filter=None):
"""
Recreate the figure from Jørgens article.
"""
self.labels = [] # Suggested labels for plotting.
self.ratios = []
self.n_neutrons = []
for key in self.all_fnames:
"""
Loop over all elements (grunnstoff).
"""
fnames = self.all_fnames[key] # For compatibility with old code.
if filter is not None:
if key.split("_")[1] not in filter:
"""
Skip elements not in filter.
"""
continue
ratios = [] # Reset ratio for every new element.
for i in range(len(fnames)):
"""
Loop over all isotopes per element.
"""
try:
res = loadtxt(f"{self.directory}/{fnames[i][0]}")
except FileNotFoundError:
print(f"File {fnames[i][0]} skipped! File not found.")
ratios.append(None) # Maintain correct list length for plotting.
continue
Jpi_list = create_spin_parity_list(
spins = res.levels[:, 1],
parities = res.levels[:, 2]
)
E_gs = res.levels[0, 0]
try:
res.transitions[:, 2] += E_gs # Add ground state energy for compatibility with Jørgen.
except IndexError:
print(f"File {fnames[i][0]} skipped! Too few / no energy levels are present in this data file.")
ratios.append(None) # Maintain correct list length for plotting.
continue
try:
gsf = strength_function_average(
levels = res.levels,
transitions = res.transitions,
Jpi_list = Jpi_list,
bin_width = self.bin_width,
Ex_min = self.Ex_min, # [MeV].
Ex_max = self.Ex_max, # [MeV].
multipole_type = "M1"
)
except IndexError:
print(f"File {fnames[i][0]} skipped! That unknown index out of bounds error in ksutil.")
ratios.append(None)
continue
# Sum gsf for low and high energy range and take the ratio.
bin_slice = self.bins_middle[0:len(gsf)]
low_idx = (bin_slice <= 2)
high_idx = (bin_slice <= 6) == (2 <= bin_slice)
low = np.sum(gsf[low_idx])
high = np.sum(gsf[high_idx])
low_high_ratio = low/high
ratios.append(low_high_ratio)
print(f"{fnames[i][0]} loaded")
if all(elem is None for elem in ratios):
"""
Skip current element if no ratios are calculated.
"""
continue
self.labels.append(fnames[0][0][:fnames[0][0].index("/")])
self.n_neutrons.append([n_neutrons for _, n_neutrons in fnames])
self.ratios.append(ratios)
def quick_plot(self):
fig, ax = plt.subplots()
for i in range(len(self.n_neutrons)):
ax.plot(self.n_neutrons[i], self.ratios[i], ".--", label=self.labels[i])
ax.set_yscale("log")
ax.set_xlabel("N")
ax.set_ylabel("Rel. amount of low-energy strength")
ax.legend()
plt.show()
def low_energy_enhancement(directory):
"""
Wrapper for easier usage.
Parameters
----------
directory : string
Directory containing subfolders with KSHELL data.
Returns
-------
res : kshell_utilities.low_energy_enhancement.LEE
Class instance containing LEE data.
"""
res = LEE(directory)
res.calculate_low_energy_enhancement()
return res | [
"numpy.ceil",
"os.listdir",
"numpy.sum",
"numpy.linspace",
"os.path.isdir",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((563, 605), 'numpy.linspace', 'np.linspace', (['(0)', 'E_max_adjusted', '(n_bins + 1)'], {}), '(0, E_max_adjusted, n_bins + 1)\n', (574, 605), True, 'import numpy as np\n'), ((7272, 7286), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7284, 7286), True, 'import matplotlib.pyplot as plt\n'), ((7579, 7589), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7587, 7589), True, 'import matplotlib.pyplot as plt\n'), ((465, 501), 'numpy.ceil', 'np.ceil', (['(self.E_max / self.bin_width)'], {}), '(self.E_max / self.bin_width)\n', (472, 501), True, 'import numpy as np\n'), ((757, 783), 'os.listdir', 'os.listdir', (['self.directory'], {}), '(self.directory)\n', (767, 783), False, 'import os\n'), ((881, 925), 'os.path.isdir', 'os.path.isdir', (['f"""{self.directory}/{element}"""'], {}), "(f'{self.directory}/{element}')\n", (894, 925), False, 'import os\n'), ((1169, 1210), 'os.listdir', 'os.listdir', (['f"""{self.directory}/{element}"""'], {}), "(f'{self.directory}/{element}')\n", (1179, 1210), False, 'import os\n'), ((6638, 6658), 'numpy.sum', 'np.sum', (['gsf[low_idx]'], {}), '(gsf[low_idx])\n', (6644, 6658), True, 'import numpy as np\n'), ((6682, 6703), 'numpy.sum', 'np.sum', (['gsf[high_idx]'], {}), '(gsf[high_idx])\n', (6688, 6703), True, 'import numpy as np\n')] |
# vim: ts=4 sw=4 expandtab
import cgi
import os.path
import re
import unittest
_identifier = re.compile('^[A-Za-z_$][A-Za-z0-9_$]*$')
_contenttypes = (
'text/javascript',
'text/ecmascript',
'application/javascript',
'application/ecmascript',
'application/x-javascript',
)
class JSVersion:
def __init__(self, jsversion, is_e4x):
self.version = jsversion
self.e4x = is_e4x
def __eq__(self, other):
return self.version == other.version and \
self.e4x == other.e4x
@classmethod
def default(klass):
return klass('default', False)
@classmethod
def fromattr(klass, attr, default_version=None):
if attr.get('type'):
return klass.fromtype(attr['type'])
if attr.get('language'):
return klass.fromlanguage(attr['language'])
return default_version
@classmethod
def fromtype(klass, type_):
typestr, typeparms = cgi.parse_header(type_)
if typestr.lower() in _contenttypes:
jsversion = typeparms.get('version', 'default')
is_e4x = typeparms.get('e4x') == '1'
return klass(jsversion, is_e4x)
return None
@classmethod
def fromlanguage(klass, language):
if language.lower() in ('javascript', 'livescript', 'mocha'):
return klass.default()
# Simplistic parsing of javascript/x.y
if language.lower().startswith('javascript'):
language = language[len('javascript'):]
if language.replace('.', '').isdigit():
return klass(language, False)
return None
def isidentifier(text):
return _identifier.match(text)
def _encode_error_keyword(s):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
s = s.replace("'", "\\'")
s = s.replace("\t", "\\t")
s = s.replace("\r", "\\r")
s = s.replace("\n", "\\n")
return s
def format_error(output_format, path, line, col, errname, errdesc):
errprefix = 'warning' #TODO
replacements = {
'__FILE__': path,
'__FILENAME__': os.path.basename(path),
'__LINE__': str(line+1),
'__COL__': str(col),
'__ERROR__': '%s: %s' % (errprefix, errdesc),
'__ERROR_NAME__': errname,
'__ERROR_PREFIX__': errprefix,
'__ERROR_MSG__': errdesc,
'__ERROR_MSGENC__': errdesc,
}
formatted_error = output_format
# If the output format starts with encode:, all of the keywords should be
# encoded.
if formatted_error.startswith('encode:'):
formatted_error = formatted_error[len('encode:'):]
encoded_keywords = replacements.keys()
else:
encoded_keywords = ['__ERROR_MSGENC__']
for keyword in encoded_keywords:
replacements[keyword] = _encode_error_keyword(replacements[keyword])
regexp = '|'.join(replacements.keys())
return re.sub(regexp, lambda match: replacements[match.group(0)],
formatted_error)
class TestUtil(unittest.TestCase):
def testIdentifier(self):
assert not isidentifier('')
assert not isidentifier('0a')
assert not isidentifier('a b')
assert isidentifier('a')
assert isidentifier('$0')
def testEncodeKeyword(self):
self.assertEquals(_encode_error_keyword(r'normal text'), 'normal text')
self.assertEquals(_encode_error_keyword(r'a\b'), r'a\\b')
self.assertEquals(_encode_error_keyword(r"identifier's"), r"identifier\'s")
self.assertEquals(_encode_error_keyword(r'"i"'), r'\"i\"')
self.assertEquals(_encode_error_keyword('a\tb'), r'a\tb')
self.assertEquals(_encode_error_keyword('a\rb'), r'a\rb')
self.assertEquals(_encode_error_keyword('a\nb'), r'a\nb')
def testFormattedError(self):
self.assertEquals(format_error('__FILE__', '__LINE__', 1, 2, 'name', 'desc'),
'__LINE__')
self.assertEquals(format_error('__FILE__', r'c:\my\file', 1, 2, 'name', 'desc'),
r'c:\my\file')
self.assertEquals(format_error('encode:__FILE__', r'c:\my\file', 1, 2, 'name', 'desc'),
r'c:\\my\\file')
self.assertEquals(format_error('__ERROR_MSGENC__', r'c:\my\file', 1, 2, 'name', r'a\b'),
r'a\\b')
self.assertEquals(format_error('encode:__ERROR_MSGENC__', r'c:\my\file', 1, 2, 'name', r'a\b'),
r'a\\b')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"cgi.parse_header",
"re.compile"
] | [((94, 134), 're.compile', 're.compile', (['"""^[A-Za-z_$][A-Za-z0-9_$]*$"""'], {}), "('^[A-Za-z_$][A-Za-z0-9_$]*$')\n", (104, 134), False, 'import re\n'), ((4500, 4515), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4513, 4515), False, 'import unittest\n'), ((960, 983), 'cgi.parse_header', 'cgi.parse_header', (['type_'], {}), '(type_)\n', (976, 983), False, 'import cgi\n')] |
from .Node import Node
from blinker import signal
###################################################################################################
# Pipeline:
#
### The main process pipeline. Responsible for orchestrating a sequence of discrete tasks, as can
### be defined as an acyclic directed graph. Each node runs a singular operation then passes/flows
### it's results downstream onto the next. Data is collected within the node's themselves until all
### it's parents upstream have finished their own respective tasks. Some nodes can produce subsets
### of their own data, indicating that all nodes downstream from it should be run that many more
### times.
###################################################################################################
class Pipeline():
""" Proccess flow pipeline. Oversees/orchestrates the running of a directed graph of arbitrary tasks """
# TODO: Validation step for initialization arguments
def __init__(self, nodes=None, global_vars=None, roots=None):
""" Pipeline initialization. Optionally can initialize with nodes, global_vars, roots """
# tree of nodes, storing return value names and its subsequent children
self.nodes = nodes if nodes is not None else {}
# Nodes with no parents. Updated as needed when new tasks are added, reduces need to search the whole graph
self.roots = roots if roots is not None else {}
# Variables that are optionally shared accross nodes and batches/runs/passes
self.global_vars = global_vars if global_vars is not None else {}
#
self.event_callbacks = {}
#
self.results = {}
# Enable debug prints
self.verbose = False
################################################################################################
# Pipeline: Add
# + node: the node that will be added to the pipeline
################################################################################################
def add(self, node):
""" Add a new node to the pipeline """
self.nodes[node] = []
self.roots[node] = node
if len(node.event_callbacks) > 0:
print(node.event_callbacks)
for event_id in node.event_callbacks:
event_callback = node.event_callbacks[event_id]
if event_id not in self.event_callbacks:
self.event_callbacks[event_id] = []
self.event_callbacks[event_id].append(event_callback)
################################################################################################
# Pipeline: Connect
# + parent: the node that will be upstream from the child
# + child: the node that will be downstream from the parent
# + parent_terminal: On what outgoing parameter is the parent node connecting
# + child_terminal: On what incoming parameter is the child node connecting
################################################################################################
def connect(self, parent=None, child=None):
""" Form a relationship between two nodes, from the parent data will be passed to child """
parent_node, parent_terminal = parent
child_node, child_terminal = child
child_node.ready[child_terminal] = False
child_node.default_ready[child_terminal] = False
if parent_node not in self.nodes:
self.nodes[parent_node] = []
# if parent_terminal not in self.nodes[parent_node]:
# self.nodes[parent_node][parent_terminal] = []
# self.nodes[parent_node][parent_terminal].append([child_node, child_terminal])
self.nodes[parent_node].append((parent_terminal, child_terminal, child_node))
if child_node in self.roots:
self.roots.pop(child_node)
return self.nodes[parent_node][-1]
###############################################################################################
# Pipeline: Start:
###############################################################################################
def start(self):
""" Initializes all the nodes and starts the first pass """
if self.verbose:
print(self.nodes)
print("############################ Starting ############################")
# runs each node's start function once at the beginning
for node in self.nodes:
node.start()
if len(self.roots) > 0:
results = False
while results == False:
signal('start').send(self)
results = self.run_pass(True)
for node in self.nodes:
node.end()
signal('end').send(self)
print("############################ Finishing ############################")
return results
###############################################################################################
# Pipeline: Run_Pass
# + done: Indicates if more passes over the data need to be done
#
### Recursively run passes over the pipeline until each node has processed all of its data.
### Since a pipeline can have input nodes that iteratively return parts of their data (batches)
### multiple runnings of these nodes must be performed (a pass). Each pass over the data runs
### all of the nodes from the start until they all report that they are done.
###############################################################################################
def run_pass(self, done):
""" Recursively runs all of the roots nodes until they report they are done """
for root in self.roots:
results, _done = self.run_node(root, {})
done = done and _done
if not done:
return False
return True
################################################################################################
# Pipeline: Process Node
# + node_id: Unique identifier for retrieving the node to be processed
#
### Recursive function to traverse the sequence of nodes (the graph) visiting each node once and
### running it's accompanied process function.
################################################################################################
def run_node(self, node, results=None):
""" Called on each node, and recursively on each child node """
if results is None:
results = {}
if all(node.ready.values()):
signal('node start').send(self, name = node.node_id)
node.global_vars = self.global_vars
results[node] = node.process()
self.global_vars = node.global_vars
signal('node complete').send(self, name = node.node_id)
if len(node.events_fired) > 0:
for event_id in node.events_fired:
event_data = node.events_fired[event_id]
self.resolve_event(event_id, event_data)
node.events_fired = {}
if node in self.nodes: # if this is a parent of another node
for parent_terminal, child_terminal, child in self.nodes[node]:
if parent_terminal in results[node]:
child.args[child_terminal] = results[node][parent_terminal]
child.ready[child_terminal] = True
self.run_node(child, results)
node.reset()
return results, node.done
def resolve_event(self, event_id, event_data):
if event_id in self.event_callbacks:
for callback in self.event_callbacks[event_id]:
callback(event_id, event_data)
# result = pipeline.run(node)
# pipeline.run(node.child[0]) | [
"blinker.signal"
] | [((4722, 4735), 'blinker.signal', 'signal', (['"""end"""'], {}), "('end')\n", (4728, 4735), False, 'from blinker import signal\n'), ((6546, 6566), 'blinker.signal', 'signal', (['"""node start"""'], {}), "('node start')\n", (6552, 6566), False, 'from blinker import signal\n'), ((6750, 6773), 'blinker.signal', 'signal', (['"""node complete"""'], {}), "('node complete')\n", (6756, 6773), False, 'from blinker import signal\n'), ((4576, 4591), 'blinker.signal', 'signal', (['"""start"""'], {}), "('start')\n", (4582, 4591), False, 'from blinker import signal\n')] |
from simulation.framework import DiscreteSimulation
from tests.helpers import TestCase
class TestSimulationFramework(TestCase):
def setUp(self):
self.sim = DiscreteSimulation(max_duration=8, available_actions=[])
def test_reset_creates_new_timeline(self):
original_timeline = self.sim.timeline
self.sim.reset()
self.assertNotEqual(self.sim.timeline, original_timeline)
def test_reset_applies_initial_values(self):
self.sim.reset(initial_values={"water": 5000})
self.assertDictEqual(self.sim.timeline.current_state.values, {"water": 5000})
| [
"simulation.framework.DiscreteSimulation"
] | [((170, 226), 'simulation.framework.DiscreteSimulation', 'DiscreteSimulation', ([], {'max_duration': '(8)', 'available_actions': '[]'}), '(max_duration=8, available_actions=[])\n', (188, 226), False, 'from simulation.framework import DiscreteSimulation\n')] |
import os
import time
while (True):
os.system("pipenv run start --token 1<PASSWORD> --board 1 --time-factor=1 --logic LowerRight")
time.sleep(1) | [
"os.system",
"time.sleep"
] | [((41, 145), 'os.system', 'os.system', (['"""pipenv run start --token 1<PASSWORD> --board 1 --time-factor=1 --logic LowerRight"""'], {}), "(\n 'pipenv run start --token 1<PASSWORD> --board 1 --time-factor=1 --logic LowerRight'\n )\n", (50, 145), False, 'import os\n'), ((140, 153), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (150, 153), False, 'import time\n')] |
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from .models import Item
from datetime import datetime
def active_auction(function):
def wrap(request, *args, **kwargs):
item = Item.objects.get(slug=kwargs['slug'])
if item.end_of_auction > timezone.now():
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap | [
"django.utils.timezone.now"
] | [((299, 313), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (311, 313), False, 'from django.utils import timezone\n')] |
#!/usr/bin/python3
# To generate an Office365 token:
# python3
# from O365 import Account
# account = Account(credentials=('yourregisteredappname', 'yoursecret'))
# account.authenticate(scopes=['files.read', 'user.read', 'offline_access'])
# It will return a URL, go to this in a browser, accept the permissions, then paste in the URL you are redirected to
# YOU MAY HAVE TO SWITCH TO THE 'OLD' VIEW TO DO THIS!
import pandas as pd
from O365 import Account
# Generated on the app registration portal
registered_app_name='yourregisteredappname'
registered_app_secret='yoursecret'
# File to download, and location to download to
dl_path='/path/to/download'
f_name='myfile.xlsx'
print("Connecting to O365")
account = Account(credentials=(registered_app_name, registered_app_secret), scopes=['files.read', 'user.read', 'offline_access'])
storage = account.storage() # here we get the storage instance that handles all the storage options.
# get the default drive
my_drive = storage.get_default_drive()
print(f"Searching for {f_name}...")
files = my_drive.search(f_name, limit=1)
if files:
numberDoc = files[0]
print("... copying to local machine")
operation = numberDoc.download(to_path=dl_path)
else:
print("File not found!")
exit()
print("Reading sheet to dataframe")
df = pd.read_excel(f'{dl_path}/{f_name}')
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
| [
"pandas.option_context",
"O365.Account",
"pandas.read_excel"
] | [((722, 846), 'O365.Account', 'Account', ([], {'credentials': '(registered_app_name, registered_app_secret)', 'scopes': "['files.read', 'user.read', 'offline_access']"}), "(credentials=(registered_app_name, registered_app_secret), scopes=[\n 'files.read', 'user.read', 'offline_access'])\n", (729, 846), False, 'from O365 import Account\n'), ((1304, 1340), 'pandas.read_excel', 'pd.read_excel', (['f"""{dl_path}/{f_name}"""'], {}), "(f'{dl_path}/{f_name}')\n", (1317, 1340), True, 'import pandas as pd\n'), ((1347, 1419), 'pandas.option_context', 'pd.option_context', (['"""display.max_rows"""', 'None', '"""display.max_columns"""', 'None'], {}), "('display.max_rows', None, 'display.max_columns', None)\n", (1364, 1419), True, 'import pandas as pd\n')] |
from flask import Blueprint
from flask.ext.restful import Api
from .endpoints.goal import Goal
from .endpoints.weight import Weight
from .endpoints.calories import Calories
api_blueprint = Blueprint('api', __name__)
api = Api(prefix='/api/v1.0')
# Register the endpoints
api.add_resource(Goal, '/goal', '/goal')
api.add_resource(Weight, '/weight/<string:id>', '/weight')
api.add_resource(Calories, '/calories/<string:id>', '/calories') | [
"flask.Blueprint",
"flask.ext.restful.Api"
] | [((190, 216), 'flask.Blueprint', 'Blueprint', (['"""api"""', '__name__'], {}), "('api', __name__)\n", (199, 216), False, 'from flask import Blueprint\n'), ((223, 246), 'flask.ext.restful.Api', 'Api', ([], {'prefix': '"""/api/v1.0"""'}), "(prefix='/api/v1.0')\n", (226, 246), False, 'from flask.ext.restful import Api\n')] |
"""Tests for Pipeline class"""
import pytest
from cognigraph.nodes.pipeline import Pipeline
import numpy as np
from numpy.testing import assert_array_equal
from cognigraph.tests.prepare_pipeline_tests import (
create_dummy_info,
ConcreteSource,
ConcreteProcessor,
ConcreteOutput,
)
@pytest.fixture(scope="function")
def pipeline():
source = ConcreteSource()
processor = ConcreteProcessor()
output = ConcreteOutput()
pipeline = Pipeline()
pipeline.add_child(source)
source.add_child(processor)
processor.add_child(output)
return pipeline
def test_pipeline_initialization(pipeline):
pipeline.chain_initialize()
source = pipeline._children[0]
processor = source._children[0]
output = processor._children[0]
assert source._initialized
assert source.mne_info is not None
assert source.mne_info["nchan"] == source.nchan
assert processor._initialized
assert output._initialized
def test_pipeline_update(pipeline):
"""Update all pipeline nodes twice and check outputs"""
pipeline.chain_initialize()
src = pipeline._children[0]
proc = src._children[0]
out = proc._children[0]
nch = src.nchan
nsamp = src.nsamp
pr_inc = proc.increment
out_inc = out.increment
pipeline.update()
assert_array_equal(src.output, np.zeros([nch, nsamp]))
assert_array_equal(proc.output, np.zeros([nch, nsamp]) + pr_inc)
assert_array_equal(out.output, proc.output + out_inc)
pipeline.update()
assert_array_equal(src.output, np.ones([nch, nsamp]))
assert_array_equal(proc.output, np.ones([nch, nsamp]) + pr_inc * 2)
assert_array_equal(out.output, proc.output + out_inc * 2)
def test_reset_mechanics(pipeline):
"""
Test if upstream output shape changes when changing number of channels in
source via _mne_info, test if src._on_critical_attr_changed is called when
this happens, test if it triggers reinitialization for node for
which _mne_info is in UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION,
finally test if history invalidation mechanics works.
"""
src = pipeline._children[0]
proc = src._children[0]
out = proc._children[0]
pipeline.chain_initialize()
pipeline.update()
new_nchan = 43
new_info = create_dummy_info(nchan=new_nchan)
assert src.n_resets == 0
assert proc.n_initializations == 1
assert proc.n_hist_invalidations == 0
src._mne_info = new_info
pipeline.update()
assert src.n_resets == 1
for i in range(3):
pipeline.update()
pipeline.update()
assert np.all(out.output)
assert out.output.shape[0] == new_nchan
assert proc.n_initializations == 2
assert proc.n_hist_invalidations == 1
def test_add_child_on_the_fly(pipeline):
src = pipeline._children[0]
pipeline.chain_initialize()
pipeline.update()
new_processor = ConcreteProcessor(increment=0.2)
src.add_child(new_processor, initialize=True)
pipeline.update()
nch = src.nchan
nsamp = src.nsamp
assert_array_equal(
new_processor.output, np.ones([nch, nsamp]) + new_processor.increment
)
assert new_processor._root is pipeline
# def test_critical_upstream_change_happened(pipeline):
# src = pipeline._children[0]
# proc = src._children[0]
# pipeline.chain_initialize()
# pipeline.update()
| [
"cognigraph.tests.prepare_pipeline_tests.ConcreteSource",
"cognigraph.tests.prepare_pipeline_tests.create_dummy_info",
"numpy.ones",
"cognigraph.tests.prepare_pipeline_tests.ConcreteProcessor",
"cognigraph.tests.prepare_pipeline_tests.ConcreteOutput",
"numpy.zeros",
"pytest.fixture",
"numpy.all",
"c... | [((301, 333), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (315, 333), False, 'import pytest\n'), ((363, 379), 'cognigraph.tests.prepare_pipeline_tests.ConcreteSource', 'ConcreteSource', ([], {}), '()\n', (377, 379), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((396, 415), 'cognigraph.tests.prepare_pipeline_tests.ConcreteProcessor', 'ConcreteProcessor', ([], {}), '()\n', (413, 415), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((429, 445), 'cognigraph.tests.prepare_pipeline_tests.ConcreteOutput', 'ConcreteOutput', ([], {}), '()\n', (443, 445), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((461, 471), 'cognigraph.nodes.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (469, 471), False, 'from cognigraph.nodes.pipeline import Pipeline\n'), ((1432, 1485), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['out.output', '(proc.output + out_inc)'], {}), '(out.output, proc.output + out_inc)\n', (1450, 1485), False, 'from numpy.testing import assert_array_equal\n'), ((1644, 1701), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['out.output', '(proc.output + out_inc * 2)'], {}), '(out.output, proc.output + out_inc * 2)\n', (1662, 1701), False, 'from numpy.testing import assert_array_equal\n'), ((2295, 2329), 'cognigraph.tests.prepare_pipeline_tests.create_dummy_info', 'create_dummy_info', ([], {'nchan': 'new_nchan'}), '(nchan=new_nchan)\n', (2312, 2329), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((2603, 2621), 'numpy.all', 'np.all', (['out.output'], {}), '(out.output)\n', (2609, 2621), True, 'import numpy as np\n'), ((2896, 2928), 'cognigraph.tests.prepare_pipeline_tests.ConcreteProcessor', 'ConcreteProcessor', ([], {'increment': '(0.2)'}), '(increment=0.2)\n', (2913, 2928), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((1335, 1357), 'numpy.zeros', 'np.zeros', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (1343, 1357), True, 'import numpy as np\n'), ((1545, 1566), 'numpy.ones', 'np.ones', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (1552, 1566), True, 'import numpy as np\n'), ((1395, 1417), 'numpy.zeros', 'np.zeros', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (1403, 1417), True, 'import numpy as np\n'), ((1604, 1625), 'numpy.ones', 'np.ones', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (1611, 1625), True, 'import numpy as np\n'), ((3098, 3119), 'numpy.ones', 'np.ones', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (3105, 3119), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''Testing for read_rockstar.py
@author: <NAME>
@contact: <EMAIL>
@status: Development
'''
import glob
from mock import call, patch
import numpy as np
import numpy.testing as npt
import os
import pdb
import pytest
import unittest
import galaxy_dive.read_data.rockstar as read_rockstar
import galaxy_dive.utils.utilities as utilities
########################################################################
########################################################################
class TestRockstarReader( unittest.TestCase ):
def setUp( self ):
self.rockstar_reader = read_rockstar.RockstarReader(
'./tests/data/rockstar_dir',
)
########################################################################
def test_get_halos( self ):
self.rockstar_reader.get_halos( 600 )
expected = 51
actual = self.rockstar_reader.halos['Np'][6723]
npt.assert_allclose( expected, actual )
| [
"numpy.testing.assert_allclose",
"galaxy_dive.read_data.rockstar.RockstarReader"
] | [((603, 660), 'galaxy_dive.read_data.rockstar.RockstarReader', 'read_rockstar.RockstarReader', (['"""./tests/data/rockstar_dir"""'], {}), "('./tests/data/rockstar_dir')\n", (631, 660), True, 'import galaxy_dive.read_data.rockstar as read_rockstar\n'), ((901, 938), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (920, 938), True, 'import numpy.testing as npt\n')] |
from django import forms
from .models import Recipe
class RecipeForm(forms.ModelForm):
class Meta:
model = Recipe
fields = (
'title',
'tags',
'cooking_time',
'text',
'image',
)
widgets = {
'tags': forms.CheckboxSelectMultiple(),
}
| [
"django.forms.CheckboxSelectMultiple"
] | [((308, 338), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {}), '()\n', (336, 338), False, 'from django import forms\n')] |
import math
from pandas_util import values_of, subset_by_value
def entropy(samples, bins=None):
"""
Compute entropy of given set of sample data points
Parameters:
samples - Pandas DataFrame; last column is taken as the class labels
Keyword Args:
bins - Number of bins/quantiles to have for continuous data
"""
# Empty sets have no entropy
if samples.empty:
return 0
# Determine class label values
class_labels = values_of(samples, samples.columns[-1], bins)
# Calculate entropy
entropy_sum = 0
for class_label in class_labels:
# Create subset of samples by filtering items based on the class label
samples_v = subset_by_value(samples, samples.columns[-1], class_label)
# Calculate part of entropy sum for class label
probabilty = len(samples_v)/len(samples)
try:
entropy_sum = entropy_sum + -1 * probabilty * math.log2(probabilty)
except ValueError:
entropy_sum = entropy_sum + 0
return entropy_sum
def info_gain(samples, feature, bins=None):
"""
Compute information gain on set of samples if split based on
provided feature.
Parameters:
samples - Pandas DataFrame; last column is taken as the class labels
feature - Name of feature; should correspond to column in samples
Keyword Args:
bins - Number of bins/quantiles to have for continuous data
"""
# Determine possible values of feature
values = values_of(samples, feature, bins)
# Calculate information gain
entropy_sum = 0
for value in values:
# Create samples subset by filtering items based on the feature value
samples_v = subset_by_value(samples, feature, value)
# Calculate weighted entropy of subset and add to sum
entropy_sum = entropy_sum + \
len(samples_v)/len(samples) * entropy(samples_v, bins)
return entropy(samples, bins) - entropy_sum
| [
"pandas_util.subset_by_value",
"pandas_util.values_of",
"math.log2"
] | [((469, 514), 'pandas_util.values_of', 'values_of', (['samples', 'samples.columns[-1]', 'bins'], {}), '(samples, samples.columns[-1], bins)\n', (478, 514), False, 'from pandas_util import values_of, subset_by_value\n'), ((1494, 1527), 'pandas_util.values_of', 'values_of', (['samples', 'feature', 'bins'], {}), '(samples, feature, bins)\n', (1503, 1527), False, 'from pandas_util import values_of, subset_by_value\n'), ((696, 754), 'pandas_util.subset_by_value', 'subset_by_value', (['samples', 'samples.columns[-1]', 'class_label'], {}), '(samples, samples.columns[-1], class_label)\n', (711, 754), False, 'from pandas_util import values_of, subset_by_value\n'), ((1705, 1745), 'pandas_util.subset_by_value', 'subset_by_value', (['samples', 'feature', 'value'], {}), '(samples, feature, value)\n', (1720, 1745), False, 'from pandas_util import values_of, subset_by_value\n'), ((932, 953), 'math.log2', 'math.log2', (['probabilty'], {}), '(probabilty)\n', (941, 953), False, 'import math\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Python imports.
import logging
# Django imports.
from django.conf.urls import url
# Rest Framework imports.
# Third Party Library imports
# local imports.
from qzzzme_app.swagger import schema_view
app_name = 'qzzzme_app'
urlpatterns = [
url(r'^docs/$', schema_view, name="schema_view"),
] | [
"django.conf.urls.url"
] | [((336, 383), 'django.conf.urls.url', 'url', (['"""^docs/$"""', 'schema_view'], {'name': '"""schema_view"""'}), "('^docs/$', schema_view, name='schema_view')\n", (339, 383), False, 'from django.conf.urls import url\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <bitbar.title>GitHub Zen</bitbar.title>
# <bitbar.version>v1.0.0</bitbar.version>
# <bitbar.author>Josh</bitbar.author>
# <bitbar.author.github>andjosh</bitbar.author.github>
# <bitbar.desc>GitHub zen in your menu bar!</bitbar.desc>
# <bitbar.dependencies>python</bitbar.dependencies>
# <bitbar.image>http://i.imgur.com/U4OHxDm.png</bitbar.image>
import urllib2
import os
api_key = os.getenv('GITHUB_TOKEN', 'Enter your GitHub.com Personal Access Token here...')
url = 'https://api.github.com/zen'
request = urllib2.Request( url, headers = { 'Authorization': 'token ' + api_key } )
response = urllib2.urlopen( request )
print ( '%s' % (response.read())).encode( 'utf-8' )
| [
"urllib2.Request",
"urllib2.urlopen",
"os.getenv"
] | [((433, 518), 'os.getenv', 'os.getenv', (['"""GITHUB_TOKEN"""', '"""Enter your GitHub.com Personal Access Token here..."""'], {}), "('GITHUB_TOKEN', 'Enter your GitHub.com Personal Access Token here...'\n )\n", (442, 518), False, 'import os\n'), ((560, 627), 'urllib2.Request', 'urllib2.Request', (['url'], {'headers': "{'Authorization': 'token ' + api_key}"}), "(url, headers={'Authorization': 'token ' + api_key})\n", (575, 627), False, 'import urllib2\n'), ((645, 669), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (660, 669), False, 'import urllib2\n')] |
from multiprocessing.sharedctypes import Value
import sympy as sp
a = sp.symbols('alpha')
def _phase2golden(alpha_l, alpha_u, functionOfAlpha, error, k, debug): #implementação do SLIDE 13
currentError = error + 1 # força o algorítimo a passar para o loop while
k += 1
while abs(currentError) > error: # Compara o erro atual com a tolerância desejada
alpha = (alpha_u+alpha_l) / 2
fk_current = functionOfAlpha.subs(a, alpha)
tau = 0.618
alpha_a = alpha_l + (1-tau) * (alpha_u-alpha_l)
alpha_b = alpha_l + tau * (alpha_u-alpha_l)
fka = functionOfAlpha.subs(a, alpha_a)
fkb = functionOfAlpha.subs(a, alpha_b)
if fka < fkb:
alpha_u = alpha_b
elif fka > fkb:
alpha_l = alpha_a
alpha = (alpha_u+alpha_l) / 2
fk_new = functionOfAlpha.subs(a, alpha)
currentError = abs(fk_current - fk_new)
k += 1
if k > 1000:
return ValueError('Convergence not reached until 1000 iterations')
if debug:
print(f'Iteration {k}')
print(f'Number of iterations {k}')
print(f'optimum alpha = {alpha}')
print(f'f(alpha*) = {fk_new}')
#print(f'Number of iterations {k}')
#print(f'optimum alpha = {alpha}')
#print(f'f(alpha*) = {fk_new}')
return alpha
def goldenSearch(functionOfAlpha, delta, debug, error):
'''
Parameters
----------
functionOfAlpha : sympy.core.add.Add
A sympy made equation.
It should be a equation parametrized in function of alpha
debug : bool
To see printed partial results
error : float, default 10e-4
Error allowed in convergence
delta : float , default 0.1
Iteration step
Returns
-------
alpha : float
returns the optimum value of alpha
'''
currentError = 100 # Número grande qualquer
k = 1 # número de steps
q = 0 # usado no método golden search
alpha = 0 # Delta acumulado
while abs(currentError) > error: # Compara o erro atual com a tolerância desejada
alpha0 = alpha + delta * 1.618 ** q
alpha1 = alpha0 + delta * 1.618 ** (q+1)
alpha2 = alpha1 + delta * 1.618 ** (q+2)
fk0 = functionOfAlpha.subs(a, alpha0) #step k
fk1 = functionOfAlpha.subs(a, alpha1) # step k+1
fk2 = functionOfAlpha.subs(a, alpha2) # step k+1
if fk1 < fk2 :
alpha_l = alpha0
alpha_u = alpha2
alpha = _phase2golden(alpha_l, alpha_u, functionOfAlpha, error, k, debug) #final alpha
break
else:
q += 1
alpha = alpha0 # garante a continuação do algoritmo
k += 1
if debug:
print(f'alpha0 = {alpha0}')
print(f'fk0 = {fk0}')
print(f'alpha1 = {alpha1}')
print(f'fk1 = {fk1}')
print(f'alpha2 = {alpha2}')
print(f'fk2 = {fk2}')
if k > 1000:
return ValueError('Convergence not reached until 1000 iterations')
return alpha | [
"sympy.symbols"
] | [((72, 91), 'sympy.symbols', 'sp.symbols', (['"""alpha"""'], {}), "('alpha')\n", (82, 91), True, 'import sympy as sp\n')] |
#!/usr/bin/env python3
import rulegenerator
from random import randint
if __name__ == '__main__':
generations = 500
width = 501
rule = randint(0, 255)
init_config = randint(0, 32)
init_config = bin(init_config)[2:]
grid = rulegenerator.generate_rule_wrap(generations, init_config, rule, width)
print("Writing to image...")
rulegenerator.to_image(grid, './examples/' + str(rule) + '.png')
print("Done.")
| [
"rulegenerator.generate_rule_wrap",
"random.randint"
] | [((142, 157), 'random.randint', 'randint', (['(0)', '(255)'], {}), '(0, 255)\n', (149, 157), False, 'from random import randint\n'), ((175, 189), 'random.randint', 'randint', (['(0)', '(32)'], {}), '(0, 32)\n', (182, 189), False, 'from random import randint\n'), ((236, 307), 'rulegenerator.generate_rule_wrap', 'rulegenerator.generate_rule_wrap', (['generations', 'init_config', 'rule', 'width'], {}), '(generations, init_config, rule, width)\n', (268, 307), False, 'import rulegenerator\n')] |
import configparser as cfg
import src.scripts.index as scripts
import os
try:
cel_det = scripts.get_env_variable('REDIS_URL')
# we want to use the heroku set add-on redis server if it's available,
# but if we can't find it, then it isn't set and we must be running
# the dev server, so we connect to it instead
except Exception:
cel_det = 'redis://localhost:6379/0'
# we run redis automatically from the /dev.sh screen, which means we
# should have an existing server running which we can connect to.
# config
config = cfg.ConfigParser()
def init():
try:
with open('config.ini', 'r+') as f:
config.read_file(f)
except IOError:
print("Can't open the config file. Writing a default")
config['MAIN'] = {'UPLOADS_FOLDER': '/tmp',
'PROJ_ROOT': os.getcwd()+"/",
'CELERY_BROKER_URL': cel_det,
'CELERY_RESULT_BACKEND': cel_det,
'COLUMNS_VIEW_ALL':'3',
'AWS_BUCKET_NAME': 'sermon-skeleton',
'AWS_PROFILE_NAME': 'sermon-skeleton',
'ORG_NAME': 'Crossroads Christian Church',
'ORG_EMAIL': '<EMAIL>',
'ORG_LINK': 'https://crossroads.org.au'}
write_config()
def write_config():
try:
with open('config.ini', 'w') as conf:
config.write(conf)
return True
except IOError:
return False
def read_config(section, option):
# get the aws details
config = cfg.ConfigParser()
config.read('config.ini')
try:
return config.get(section, option)
except cfg.NoOptionError as error:
print("Failed to get option "+option)
except cfg.NoSectionError as error:
print("Failed to get section "+section)
return False
| [
"src.scripts.index.get_env_variable",
"configparser.ConfigParser",
"os.getcwd"
] | [((549, 567), 'configparser.ConfigParser', 'cfg.ConfigParser', ([], {}), '()\n', (565, 567), True, 'import configparser as cfg\n'), ((93, 130), 'src.scripts.index.get_env_variable', 'scripts.get_env_variable', (['"""REDIS_URL"""'], {}), "('REDIS_URL')\n", (117, 130), True, 'import src.scripts.index as scripts\n'), ((1606, 1624), 'configparser.ConfigParser', 'cfg.ConfigParser', ([], {}), '()\n', (1622, 1624), True, 'import configparser as cfg\n'), ((841, 852), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (850, 852), False, 'import os\n')] |
import click
from ui.cli.command import Command
class Input(Command):
def __init__(self, title: str, value=None):
self.title = title
self.value = value
def render(self):
click.echo("%s\t%s" % (self.title, self.value))
| [
"click.echo"
] | [((206, 253), 'click.echo', 'click.echo', (["('%s\\t%s' % (self.title, self.value))"], {}), "('%s\\t%s' % (self.title, self.value))\n", (216, 253), False, 'import click\n')] |
import requests
import json
import time
import mysql.connector
from mysql.connector import Error
def saveindicador(conexion):
cur=conexion.cursor()
sqlconsulta="SELECT JSON_EXTRACT(datosjson,'$.internal_type') as internal_type,sum(JSON_EXTRACT(datosjson,'$.amount')) as total_amount FROM apidata group by JSON_EXTRACT(datosjson,'$.internal_type');"
cur.execute(sqlconsulta)
data = cur.fetchall()
print(data)
def savedatabasemsql(conexion,my_dict):
cur = conexion.cursor()
cont=0
for dato in my_dict:
json_string=(json.dumps(dato))
json_string2=json.loads(json_string)
json_final=json.dumps(json_string2,ensure_ascii=False)
sql1="insert into apidata(datosjson,categoria) values ('"+json_final+"','funcion_publica_presupuesto_1')"
try:
cur.execute(sql1)
except:
cont+=1
# print('entro')
# print(sql1)
# print(dato['description'])
if cont>0:
print("Errores: ",cont)
def consultarapicomprasmsql(apiurl,conexion):
my_dict={'data':['prueba']}
cont=1
while len(my_dict['data'])>0:
entry_url=apiurl+str(cont)
try:
r = requests.get(entry_url)
my_dict = r.json()
if len(my_dict['data'])==0:
continue
savedatabasemsql(conexion,my_dict['data'])
conexion.commit()
print('entro: '+str(cont))
cont+=1
except:
print("Ha ocurrido un error")
time.sleep(5)
apiurl = "https://datosabiertos.compraspublicas.gob.ec/PLATAFORMA/api/search_ocds?year=2021&search=&page="
try:
connection = mysql.connector.connect(host='localhost',
database='ofpindicadores',
user='ofpuser',
password='<PASSWORD>@!')
if connection.is_connected():
consultarapicomprasmsql(apiurl,connection)
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
except Error as e:
print("Error while connecting to MySQL", e)
finally:
if connection.is_connected():
connection.close()
print("MySQL connection is closed")
| [
"json.loads",
"json.dumps",
"time.sleep",
"requests.get"
] | [((555, 571), 'json.dumps', 'json.dumps', (['dato'], {}), '(dato)\n', (565, 571), False, 'import json\n'), ((594, 617), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (604, 617), False, 'import json\n'), ((637, 681), 'json.dumps', 'json.dumps', (['json_string2'], {'ensure_ascii': '(False)'}), '(json_string2, ensure_ascii=False)\n', (647, 681), False, 'import json\n'), ((1217, 1240), 'requests.get', 'requests.get', (['entry_url'], {}), '(entry_url)\n', (1229, 1240), False, 'import requests\n'), ((1551, 1564), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1561, 1564), False, 'import time\n')] |
import pytest
from jobs.statuses import JobStatus
@pytest.mark.parametrize(
"code,value",
[
(JobStatus.SENT_TO_DPS, "SENT_TO_DPS"),
(JobStatus.PROCESSED_BY_DPS, "PROCESSED_BY_DPS"),
(JobStatus.COMPLETE, "COMPLETE"),
(JobStatus.NOTIFIED_VALIDATION_FAILED, "NOTIFIED_VALIDATION_FAILED"),
(JobStatus.CLEANED_UP, "CLEANED_UP"),
],
)
def test_statuses_have_correct_value(code, value):
assert code.value == value
| [
"pytest.mark.parametrize"
] | [((53, 337), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""code,value"""', "[(JobStatus.SENT_TO_DPS, 'SENT_TO_DPS'), (JobStatus.PROCESSED_BY_DPS,\n 'PROCESSED_BY_DPS'), (JobStatus.COMPLETE, 'COMPLETE'), (JobStatus.\n NOTIFIED_VALIDATION_FAILED, 'NOTIFIED_VALIDATION_FAILED'), (JobStatus.\n CLEANED_UP, 'CLEANED_UP')]"], {}), "('code,value', [(JobStatus.SENT_TO_DPS,\n 'SENT_TO_DPS'), (JobStatus.PROCESSED_BY_DPS, 'PROCESSED_BY_DPS'), (\n JobStatus.COMPLETE, 'COMPLETE'), (JobStatus.NOTIFIED_VALIDATION_FAILED,\n 'NOTIFIED_VALIDATION_FAILED'), (JobStatus.CLEANED_UP, 'CLEANED_UP')])\n", (76, 337), False, 'import pytest\n')] |
from os import environ
from http.server import BaseHTTPRequestHandler, HTTPServer
from fritzconnection.lib.fritzstatus import FritzStatus
from fritzconnection import FritzConnection
if "FritzBoxUri" not in environ:
print("FritzBoxUri Missing")
quit()
if "FritzBoxUser" not in environ:
print("FritzBoxUser Missing")
quit()
if "FritzBoxPassword" not in environ:
print("FritzBoxPassword Missing")
quit()
fc = FritzConnection(address=environ["FritzBoxUri"], user=environ["FritzBoxUser"], password=environ["FritzBoxPassword"])
fs = FritzStatus(fc)
class Server(BaseHTTPRequestHandler):
def do_GET(self):
ip = fs.external_ip if fs.external_ip else fc.call_action("WANPPPConnection1", "GetInfo")["NewExternalIPAddress"]
if self.path == "/ip/v4":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(ip.encode())
elif self.path == "/ip/v6":
ip = fs.external_ipv6 if fs.external_ipv6 else "Not Available"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(ip.encode())
httpd = HTTPServer(("0.0.0.0", 8080), Server)
httpd.serve_forever()
| [
"fritzconnection.lib.fritzstatus.FritzStatus",
"fritzconnection.FritzConnection",
"http.server.HTTPServer"
] | [((451, 571), 'fritzconnection.FritzConnection', 'FritzConnection', ([], {'address': "environ['FritzBoxUri']", 'user': "environ['FritzBoxUser']", 'password': "environ['FritzBoxPassword']"}), "(address=environ['FritzBoxUri'], user=environ['FritzBoxUser'\n ], password=environ['FritzBoxPassword'])\n", (466, 571), False, 'from fritzconnection import FritzConnection\n'), ((573, 588), 'fritzconnection.lib.fritzstatus.FritzStatus', 'FritzStatus', (['fc'], {}), '(fc)\n', (584, 588), False, 'from fritzconnection.lib.fritzstatus import FritzStatus\n'), ((1293, 1330), 'http.server.HTTPServer', 'HTTPServer', (["('0.0.0.0', 8080)", 'Server'], {}), "(('0.0.0.0', 8080), Server)\n", (1303, 1330), False, 'from http.server import BaseHTTPRequestHandler, HTTPServer\n')] |
from django.urls import path
from . import views
urlpatterns = [
path(
"model-attrs/<int:content_type_id>/",
views.model_attrs,
name="django_signals_model_attrs",
),
]
| [
"django.urls.path"
] | [((70, 171), 'django.urls.path', 'path', (['"""model-attrs/<int:content_type_id>/"""', 'views.model_attrs'], {'name': '"""django_signals_model_attrs"""'}), "('model-attrs/<int:content_type_id>/', views.model_attrs, name=\n 'django_signals_model_attrs')\n", (74, 171), False, 'from django.urls import path\n')] |
import base64
import simplejson
from aws_xray_sdk.core import patch_all, xray_recorder
from botocore.exceptions import ClientError
from okdata.aws.logging import (
logging_wrapper,
log_add,
log_exception,
)
from okdata.resource_auth import ResourceAuthorizer
from status.StatusData import StatusData
from status.common import (
response,
response_error,
extract_bearer_token,
extract_dataset_id,
)
patch_all()
resource_authorizer = ResourceAuthorizer()
@logging_wrapper
@xray_recorder.capture("get_status_from_s3_path")
def handler(event, context):
params = event["pathParameters"]
# The s3_path parameter MUST be base64 encoded since it can contain "/"
# and any other character known to man.....
path = base64.b64decode(params["s3_path"]).decode("utf-8", "ignore")
log_add(s3_path=path)
db = StatusData()
try:
status_item = db.get_status_from_s3_path(path)
if status_item is None:
error = "Could not find item"
return response_error(404, error)
dataset_id = extract_dataset_id(status_item)
bearer_token = extract_bearer_token(event)
log_add(trace_id=status_item["trace_id"], dataset_id=dataset_id)
if dataset_id and resource_authorizer.has_access(
bearer_token,
"okdata:dataset:write",
f"okdata:dataset:{dataset_id}",
use_whitelist=True,
):
ret = {
# TODO: Return both id and trace_id until
# all clients are updated
"id": status_item["trace_id"],
"trace_id": status_item["trace_id"],
}
return response(200, simplejson.dumps(ret))
error = "Access denied"
return response_error(403, error)
except ClientError as ce:
log_exception(ce)
error_msg = f"Could not get status: {ce}"
return response_error(404, error_msg)
| [
"okdata.aws.logging.log_exception",
"aws_xray_sdk.core.patch_all",
"simplejson.dumps",
"status.common.extract_bearer_token",
"status.common.extract_dataset_id",
"base64.b64decode",
"status.StatusData.StatusData",
"aws_xray_sdk.core.xray_recorder.capture",
"okdata.aws.logging.log_add",
"okdata.reso... | [((429, 440), 'aws_xray_sdk.core.patch_all', 'patch_all', ([], {}), '()\n', (438, 440), False, 'from aws_xray_sdk.core import patch_all, xray_recorder\n'), ((463, 483), 'okdata.resource_auth.ResourceAuthorizer', 'ResourceAuthorizer', ([], {}), '()\n', (481, 483), False, 'from okdata.resource_auth import ResourceAuthorizer\n'), ((504, 552), 'aws_xray_sdk.core.xray_recorder.capture', 'xray_recorder.capture', (['"""get_status_from_s3_path"""'], {}), "('get_status_from_s3_path')\n", (525, 552), False, 'from aws_xray_sdk.core import patch_all, xray_recorder\n'), ((820, 841), 'okdata.aws.logging.log_add', 'log_add', ([], {'s3_path': 'path'}), '(s3_path=path)\n', (827, 841), False, 'from okdata.aws.logging import logging_wrapper, log_add, log_exception\n'), ((851, 863), 'status.StatusData.StatusData', 'StatusData', ([], {}), '()\n', (861, 863), False, 'from status.StatusData import StatusData\n'), ((1071, 1102), 'status.common.extract_dataset_id', 'extract_dataset_id', (['status_item'], {}), '(status_item)\n', (1089, 1102), False, 'from status.common import response, response_error, extract_bearer_token, extract_dataset_id\n'), ((1126, 1153), 'status.common.extract_bearer_token', 'extract_bearer_token', (['event'], {}), '(event)\n', (1146, 1153), False, 'from status.common import response, response_error, extract_bearer_token, extract_dataset_id\n'), ((1162, 1226), 'okdata.aws.logging.log_add', 'log_add', ([], {'trace_id': "status_item['trace_id']", 'dataset_id': 'dataset_id'}), "(trace_id=status_item['trace_id'], dataset_id=dataset_id)\n", (1169, 1226), False, 'from okdata.aws.logging import logging_wrapper, log_add, log_exception\n'), ((1771, 1797), 'status.common.response_error', 'response_error', (['(403)', 'error'], {}), '(403, error)\n', (1785, 1797), False, 'from status.common import response, response_error, extract_bearer_token, extract_dataset_id\n'), ((754, 789), 'base64.b64decode', 'base64.b64decode', (["params['s3_path']"], {}), "(params['s3_path'])\n", (770, 789), False, 'import base64\n'), ((1022, 1048), 'status.common.response_error', 'response_error', (['(404)', 'error'], {}), '(404, error)\n', (1036, 1048), False, 'from status.common import response, response_error, extract_bearer_token, extract_dataset_id\n'), ((1837, 1854), 'okdata.aws.logging.log_exception', 'log_exception', (['ce'], {}), '(ce)\n', (1850, 1854), False, 'from okdata.aws.logging import logging_wrapper, log_add, log_exception\n'), ((1920, 1950), 'status.common.response_error', 'response_error', (['(404)', 'error_msg'], {}), '(404, error_msg)\n', (1934, 1950), False, 'from status.common import response, response_error, extract_bearer_token, extract_dataset_id\n'), ((1701, 1722), 'simplejson.dumps', 'simplejson.dumps', (['ret'], {}), '(ret)\n', (1717, 1722), False, 'import simplejson\n')] |
"""
Keystone API shims. Requires v3 API. See `Keystone HTTP API
<https://developer.openstack.org/api-ref/identity/v3/>`_
"""
from requests import HTTPError
from hammers.osrest.base import BaseAPI
API = BaseAPI('identity')
def project(auth, id):
"""Retrieve project by ID"""
response = API.get(auth, '/projects/{}'.format(id))
return response.json()['project']
def projects(auth, **params):
"""
Retrieve multiple projects, optionally filtered by `params`. Keyed by ID.
Example params: ``name``, ``enabled``, or stuff from
https://developer.openstack.org/api-ref/identity/v3/?expanded=list-projects-detail#list-projects
"""
response = API.get(auth, '/projects'.format(id), params=params)
return {p['id']: p for p in response.json()['projects']}
def project_lookup(auth, name_or_id):
"""Tries to find a single project by name or ID. Raises an error if
none or multiple projects found."""
try:
return keystone_project(auth, name_or_id)
except HTTPError:
# failed lookup assuming it was an id, must be a name?
pass
projects = keystone_projects(auth, name=name_or_id)
if len(projects) < 1:
raise RuntimeError('no projects found')
elif len(projects) > 1:
raise RuntimeError('multiple projects matched provided name')
id, project = projects.popitem()
return project
def user(auth, id):
"""Retrieve information about a user by ID"""
response = API.get(auth, '/v3/users/{}'.format(id))
return response.json()['user']
def users(auth, enabled=None, name=None):
"""Retrieve multiple users, optionally filtered."""
params = {}
if name is not None:
params['name'] = name
if enabled is not None:
params['enabled'] = enabled
response = API.get(auth, '/v3/users', params=params)
return {u['id']: u for u in response.json()['users']}
def user_lookup(auth, name_or_id):
"""Tries to find a single user by name or ID. Raises an error if none
or multiple users are found."""
try:
return keystone_user(auth, name_or_id)
except HTTPError:
# failed lookup assuming it was an id, must be a name?
pass
users = keystone_users(auth, name=name_or_id)
if len(users) < 1:
raise RuntimeError('no users found')
elif len(users) > 1:
raise RuntimeError('multiple users matched provided name')
id, user = users.popitem()
return user
__all__ = [
'keystone_project',
'keystone_projects',
'keystone_project_lookup',
'keystone_user',
'keystone_users',
'keystone_user_lookup',
]
keystone_project = project
keystone_projects = projects
keystone_project_lookup = project_lookup
keystone_user = user
keystone_users = users
keystone_user_lookup = user_lookup
| [
"hammers.osrest.base.BaseAPI"
] | [((204, 223), 'hammers.osrest.base.BaseAPI', 'BaseAPI', (['"""identity"""'], {}), "('identity')\n", (211, 223), False, 'from hammers.osrest.base import BaseAPI\n')] |
import json
from data_query_engine import DataQueryEngine
class Sql4Json(object):
def __init__(self, json_str, sql_str):
self.data = json.loads(json_str, encoding="latin1")
self.query_engine = DataQueryEngine(self.data, sql_str)
def get_results(self):
return self.query_engine.get_results()
def __str__(self):
return json.dumps(self.query_engine.get_results(), sort_keys=True, indent=4, separators=(',', ': ')) | [
"json.loads",
"data_query_engine.DataQueryEngine"
] | [((148, 187), 'json.loads', 'json.loads', (['json_str'], {'encoding': '"""latin1"""'}), "(json_str, encoding='latin1')\n", (158, 187), False, 'import json\n'), ((216, 251), 'data_query_engine.DataQueryEngine', 'DataQueryEngine', (['self.data', 'sql_str'], {}), '(self.data, sql_str)\n', (231, 251), False, 'from data_query_engine import DataQueryEngine\n')] |
"""Contains derivative calculation with BackPACK."""
from test.core.derivatives.implementation.base import DerivativesImplementation
from test.utils import chunk_sizes
from typing import List
from torch import Tensor, einsum, zeros
from backpack.utils.subsampling import subsample
class BackpackDerivatives(DerivativesImplementation):
"""Derivative implementations with BackPACK."""
def __init__(self, problem):
"""Initialization.
Args:
problem: test problem
"""
problem.extend()
super().__init__(problem)
def store_forward_io(self):
"""Do one forward pass.
This implicitly saves relevant quantities for backward pass.
"""
self.problem.forward_pass()
def jac_mat_prod(self, mat): # noqa: D102
self.store_forward_io()
return self.problem.derivative.jac_mat_prod(
self.problem.module, None, None, mat
)
def jac_t_mat_prod(
self, mat: Tensor, subsampling: List[int]
) -> Tensor: # noqa: D102
self.store_forward_io()
return self.problem.derivative.jac_t_mat_prod(
self.problem.module, None, None, mat, subsampling=subsampling
)
def param_mjp(
self,
param_str: str,
mat: Tensor,
sum_batch: bool,
subsampling: List[int] = None,
) -> Tensor: # noqa: D102
self.store_forward_io()
return self.problem.derivative.param_mjp(
param_str,
self.problem.module,
None,
None,
mat,
sum_batch=sum_batch,
subsampling=subsampling,
)
def weight_jac_mat_prod(self, mat): # noqa: D102
self.store_forward_io()
return self.problem.derivative.weight_jac_mat_prod(
self.problem.module, None, None, mat
)
def bias_jac_mat_prod(self, mat): # noqa: D102
self.store_forward_io()
return self.problem.derivative.bias_jac_mat_prod(
self.problem.module, None, None, mat
)
def ea_jac_t_mat_jac_prod(self, mat): # noqa: D102
self.store_forward_io()
return self.problem.derivative.ea_jac_t_mat_jac_prod(
self.problem.module, None, None, mat
)
def sum_hessian(self): # noqa: D102
self.store_forward_io()
return self.problem.derivative.sum_hessian(self.problem.module, None, None)
def input_hessian_via_sqrt_hessian(
self, mc_samples: int = None, chunks: int = 1, subsampling: List[int] = None
) -> Tensor:
"""Computes the Hessian w.r.t. to the input from its matrix square root.
Args:
mc_samples: If int, uses an MC approximation with the specified
number of samples. If None, uses the exact hessian. Defaults to None.
chunks: Maximum sequential split of the computation. Default: ``1``.
Only used if mc_samples is specified.
subsampling: Indices of active samples. ``None`` uses all samples.
Returns:
Hessian with respect to the input. Has shape
``[N, A, B, ..., N, A, B, ...]`` where ``N`` is the batch size or number
of active samples when sub-sampling is used, and ``[A, B, ...]`` are the
input's feature dimensions.
"""
self.store_forward_io()
if mc_samples is not None:
chunk_samples = chunk_sizes(mc_samples, chunks)
chunk_weights = [samples / mc_samples for samples in chunk_samples]
individual_hessians: Tensor = sum(
weight
* self._sample_hessians_from_sqrt(
self.problem.derivative.sqrt_hessian_sampled(
self.problem.module,
None,
None,
mc_samples=samples,
subsampling=subsampling,
)
)
for weight, samples in zip(chunk_weights, chunk_samples)
)
else:
sqrt_hessian = self.problem.derivative.sqrt_hessian(
self.problem.module, None, None, subsampling=subsampling
)
individual_hessians = self._sample_hessians_from_sqrt(sqrt_hessian)
input0 = subsample(self.problem.module.input0, subsampling=subsampling)
return self._embed_sample_hessians(individual_hessians, input0)
def hessian_is_zero(self) -> bool: # noqa: D102
return self.problem.derivative.hessian_is_zero(self.problem.module)
def _sample_hessians_from_sqrt(self, sqrt: Tensor) -> Tensor:
"""Convert individual matrix square root into individual full matrix.
Args:
sqrt: individual square root of hessian
Returns:
Individual Hessians of shape ``[N, A, B, ..., A, B, ...]`` where
``input.shape[1:] = [A, B, ...]`` are the input feature dimensions
and ``N`` is the batch size.
"""
N, input_dims = sqrt.shape[1], sqrt.shape[2:]
sqrt_flat = sqrt.flatten(start_dim=2)
sample_hessians = einsum("vni,vnj->nij", sqrt_flat, sqrt_flat)
return sample_hessians.reshape(N, *input_dims, *input_dims)
def _embed_sample_hessians(
self, individual_hessians: Tensor, input: Tensor
) -> Tensor:
"""Embed Hessians w.r.t. individual samples into Hessian w.r.t. all samples.
Args:
individual_hessians: Hessians w.r.t. individual samples in the input.
input: Inputs for the for samples whose individual Hessians are passed.
Has shape ``[N, A, B, ..., A, B, ...]`` where ``N`` is the number of
active samples and ``[A, B, ...]`` are the feature dimensions.
Returns:
Hessian that contains the individual Hessians as diagonal blocks.
Has shape ``[N, A, B, ..., N, A, B, ...]``.
"""
N, D = input.shape[0], input.shape[1:].numel()
hessian = zeros(N, D, N, D, device=input.device, dtype=input.dtype)
for n in range(N):
hessian[n, :, n, :] = individual_hessians[n].reshape(D, D)
return hessian.reshape(*input.shape, *input.shape)
def hessian_mat_prod(self, mat: Tensor) -> Tensor: # noqa: D102
self.store_forward_io()
hmp = self.problem.derivative.make_hessian_mat_prod(
self.problem.module, None, None
)
return hmp(mat)
| [
"test.utils.chunk_sizes",
"torch.einsum",
"torch.zeros",
"backpack.utils.subsampling.subsample"
] | [((4342, 4404), 'backpack.utils.subsampling.subsample', 'subsample', (['self.problem.module.input0'], {'subsampling': 'subsampling'}), '(self.problem.module.input0, subsampling=subsampling)\n', (4351, 4404), False, 'from backpack.utils.subsampling import subsample\n'), ((5173, 5217), 'torch.einsum', 'einsum', (['"""vni,vnj->nij"""', 'sqrt_flat', 'sqrt_flat'], {}), "('vni,vnj->nij', sqrt_flat, sqrt_flat)\n", (5179, 5217), False, 'from torch import Tensor, einsum, zeros\n'), ((6061, 6118), 'torch.zeros', 'zeros', (['N', 'D', 'N', 'D'], {'device': 'input.device', 'dtype': 'input.dtype'}), '(N, D, N, D, device=input.device, dtype=input.dtype)\n', (6066, 6118), False, 'from torch import Tensor, einsum, zeros\n'), ((3453, 3484), 'test.utils.chunk_sizes', 'chunk_sizes', (['mc_samples', 'chunks'], {}), '(mc_samples, chunks)\n', (3464, 3484), False, 'from test.utils import chunk_sizes\n')] |
from rest_framework import serializers
from rest_framework.serializers import raise_errors_on_nested_writes
from rest_framework.utils import model_meta
import logging
logger = logging.getLogger(__file__)
class BaseSerializer(serializers.ModelSerializer):
def update(self, instance, validated_data):
raise_errors_on_nested_writes('update', self, validated_data)
info = model_meta.get_field_info(instance)
# Simply set each attribute on the instance, and then save it.
# Note that unlike `.create()` we don't need to treat many-to-many
# relationships as being a special case. During updates we already
# have an instance pk for the relationships to be associated with.
for attr, value in validated_data.items():
if attr in info.relations and info.relations[attr].to_many:
field = getattr(instance, attr)
field.set(value)
else:
setattr(instance, attr, value)
instance.update_by = self.context["request"].user.username
instance.save()
return instance
def create(self, validated_data):
instance = super(BaseSerializer, self).create(validated_data)
try:
username = self.context["request"].user.username
if hasattr(instance, 'create_by'):
instance.createBy = username
if hasattr(instance, 'create_by'):
instance.createBy = username
except Exception as e:
logger.error("create add default msg error: %s" % str(e))
instance.save()
return instance
| [
"logging.getLogger",
"rest_framework.utils.model_meta.get_field_info",
"rest_framework.serializers.raise_errors_on_nested_writes"
] | [((182, 209), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (199, 209), False, 'import logging\n'), ((326, 387), 'rest_framework.serializers.raise_errors_on_nested_writes', 'raise_errors_on_nested_writes', (['"""update"""', 'self', 'validated_data'], {}), "('update', self, validated_data)\n", (355, 387), False, 'from rest_framework.serializers import raise_errors_on_nested_writes\n'), ((404, 439), 'rest_framework.utils.model_meta.get_field_info', 'model_meta.get_field_info', (['instance'], {}), '(instance)\n', (429, 439), False, 'from rest_framework.utils import model_meta\n')] |
import os
import sys
import pytest
from . import pytest_utils
HERE = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.parametrize('o1,o2', [
(1, 2),
(3, 3.),
({'inject_test': 'foo bar{{inject_me}}', 'inject_me': 'beezbooz'},
{'inject_test': 'foo barbeezbooz', 'inject_me': 'beezbooz'}),
])
def test_assert_objects_equal_throws_assertion(o1, o2):
# match=r".* 123 .*"
with pytest.raises(AssertionError):
pytest_utils.assert_objects_equal(o1, o2)
| [
"os.path.abspath",
"pytest.mark.parametrize",
"pytest.raises"
] | [((116, 305), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""o1,o2"""', "[(1, 2), (3, 3.0), ({'inject_test': 'foo bar{{inject_me}}', 'inject_me':\n 'beezbooz'}, {'inject_test': 'foo barbeezbooz', 'inject_me': 'beezbooz'})]"], {}), "('o1,o2', [(1, 2), (3, 3.0), ({'inject_test':\n 'foo bar{{inject_me}}', 'inject_me': 'beezbooz'}, {'inject_test':\n 'foo barbeezbooz', 'inject_me': 'beezbooz'})])\n", (139, 305), False, 'import pytest\n'), ((86, 111), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import os\n'), ((407, 436), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (420, 436), False, 'import pytest\n')] |
"""Filter and convert annotations"""
import json
import os
import cv2
USED_ARTF_NAME = ["backpack", "phone", "survivor", "drill", "fire_extinguisher", "vent", "helmet", "rope", "breadcrumb",
"robot", "cube", "nothing"]
BLACKLIST = [
"../virtual/helmet/local_J01_000.jpg",
"../virtual/helmet/local_J01_001.jpg",
"../virtual/helmet/local_J01_012.jpg",
"../virtual/helmet/local_J01_013.jpg",
"../virtual/helmet/local_J01_014.jpg",
"../virtual/helmet/local_J01_015.jpg",
"../virtual/helmet/local_J01_016.jpg",
"../virtual/helmet/local_J03_000.jpg",
"../virtual/helmet/local_J03_001.jpg",
"../virtual/helmet/local_J03_002.jpg",
"../virtual/helmet/local_J03_003.jpg",
"../virtual/helmet/local_J03_004.jpg",
"../virtual/helmet/local_J03_005.jpg",
"../virtual/helmet/local_J03_006.jpg",
"../virtual/helmet/local_J03_007.jpg",
"../virtual/helmet/local_J03_008.jpg",
"../virtual/helmet/local_J03_009.jpg",
"../virtual/helmet/local_J03_010.jpg",
"../virtual/helmet/local_J03_011.jpg",
"../virtual/helmet/local_J03_012.jpg",
"../virtual/helmet/local_J03_013.jpg",
"../virtual/helmet/local_J03_014.jpg",
"../virtual/helmet/local_J03_015.jpg",
"../virtual/helmet/local_J03_016.jpg",
"../virtual/helmet/local_J03_017.jpg",
"../virtual/helmet/local_J03_018.jpg",
"../virtual/drill/drill_tunel_p_013.jpg",
"../virtual/drill/drill_tunel_p_014.jpg",
"../virtual/drill/drill_tunel_p_015.jpg",
"../virtual/drill/drill_tunel_p_016.jpg",
"../virtual/drill/drill_tunel_p_039.jpg",
"../virtual/drill/drill_tunel_p_040.jpg",
"../virtual/extinguisher/ext_tunel_p_000.jpg",
"../virtual/extinguisher/ext_tunel_p_001.jpg",
"../virtual/extinguisher/ext_tunel_p_002.jpg",
"../virtual/extinguisher/ext_tunel_p_003.jpg",
"../virtual/drill/drill_tunel_s2_000.jpg",
"../virtual/drill/drill_tunel_s2_001.jpg",
"../virtual/drill/drill_tunel_s2_002.jpg",
"../virtual/drill/drill_tunel_s2_003.jpg",
"../virtual/drill/drill_tunel_s2_004.jpg",
"../virtual/extinguisher/ext_tunel_s2_000.jpg",
"../virtual/extinguisher/ext_tunel_s2_001.jpg",
"../virtual/extinguisher/ext_tunel_s2_002.jpg",
"../virtual/extinguisher/ext_tunel_s2_004.jpg",
"../virtual/backpack/fq-99-freyja-000.jpg",
"../virtual/backpack/fq-99-freyja-001.jpg",
"../virtual/backpack/fq-99-freyja-002.jpg",
"../virtual/backpack/fq-99-freyja-003.jpg",
"../virtual/backpack/fq-99-freyja-004.jpg",
"../virtual/backpack/fq-99-freyja-005.jpg",
"../virtual/backpack/fq-99-freyja-006.jpg",
"../virtual/backpack/fq-99-freyja-007.jpg",
"../virtual/backpack/fq-99-freyja-008.jpg",
"../virtual/backpack/fq-99-freyja-029.jpg",
"../virtual/backpack/fq-99-freyja-032.jpg",
"../virtual/backpack/backpack_fp3_000.jpg",
"../virtual/backpack/backpack_fp3_001.jpg",
"../virtual/backpack/backpack_fp3_002.jpg",
"../virtual/backpack/backpack_fp3_004.jpg",
"../virtual/backpack/backpack_fp3_014.jpg",
"../virtual/backpack/backpack_fp3_015.jpg",
"../virtual/helmet/helmet_fp3_001.jpg",
"../virtual/helmet/helmet_fp3_002.jpg"
]
def manual_sorting(data, annotations_dir):
ii = 0
while True:
if ii < 0:
ii = 0
if ii >= len(data):
ii = len(data) - 1
file_name, artf_name, bbox, use_for = data[ii]
x, y, xw, yh = bbox
img = cv2.imread(os.path.join(annotations_dir, file_name), 1)
cv2.rectangle(img, (x, y), (xw, yh), (0, 0, 255))
cv2.putText(img, artf_name, (x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255))
cv2.putText(img, use_for, (10, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0))
cv2.imshow("win", img)
k = cv2.waitKey(0) & 0xFF
if k == ord("n"): # next img
ii += 1
elif k == ord("b"): # back one img
ii -= 1
elif k == ord("t"):
data[ii][3] = "train" # use for training
elif k == ord("e"):
data[ii][3] = "eval" # use for evaluation
elif k == ord("d"):
data[ii][3] = "None" # do not use
elif k == ord("q"): # close and save
break
cv2.destroyAllWindows()
return data
def main(annotation_file, out_prefix):
annotations_dir = os.path.dirname(annotation_file)
data = []
with open(annotation_file) as f:
json_data = json.load(f)
for item in json_data.values():
file_name = item['filename']
if file_name in BLACKLIST:
print(file_name)
continue
regions = item['regions']
for reg in regions:
artf_name = reg['region_attributes']['artifact']
if artf_name not in USED_ARTF_NAME:
continue
x = reg['shape_attributes']['x']
y = reg['shape_attributes']['y']
width = reg['shape_attributes']['width']
height = reg['shape_attributes']['height']
if g_manual:
# Store annotations and add a label about a future use. "None" in the beginning (do not use).
data.append( [file_name, artf_name, [x, y, x + width, y+ height], "None"] )
else:
data.append([file_name, artf_name, [x, y, x + width, y + height], "train"])
out_train = open(out_prefix + "_train.csv", "w")
out_train.write("filename,class,xmin,ymin,xmax,ymax\r\n")
if g_manual:
data = manual_sorting(data, annotations_dir)
out_eval = open(out_prefix + "_eval.csv", "w")
out_eval.write("filename,class,xmin,ymin,xmax,ymax\r\n")
for item in data:
file_name, artf_name, bbox, use_for = item
x, y, xw, yh = bbox
output_string = "%s,%s,%d,%d,%d,%d\r\n" %(file_name, artf_name, x, y, xw, yh)
if use_for == "train":
out_train.write(output_string)
elif use_for == "eval":
out_eval.write(output_string)
out_train.close()
if g_manual:
out_eval.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Filter and convert annotations.')
parser.add_argument('annotation', help='json - annotations')
parser.add_argument('--out', help='outpput csv filename', default='annotation')
parser.add_argument('--manual', help='Manual sorting', action='store_true')
args = parser.parse_args()
annotation_file = args.annotation
out_csv = args.out
g_manual = False
if args.manual:
g_manual = True
main(annotation_file, out_csv)
| [
"cv2.rectangle",
"argparse.ArgumentParser",
"os.path.join",
"cv2.putText",
"cv2.imshow",
"os.path.dirname",
"cv2.destroyAllWindows",
"json.load",
"cv2.waitKey"
] | [((4043, 4066), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4064, 4066), False, 'import cv2\n'), ((4147, 4179), 'os.path.dirname', 'os.path.dirname', (['annotation_file'], {}), '(annotation_file)\n', (4162, 4179), False, 'import os\n'), ((5997, 6067), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Filter and convert annotations."""'}), "(description='Filter and convert annotations.')\n", (6020, 6067), False, 'import argparse\n'), ((3274, 3323), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(xw, yh)', '(0, 0, 255)'], {}), '(img, (x, y), (xw, yh), (0, 0, 255))\n', (3287, 3323), False, 'import cv2\n'), ((3332, 3438), 'cv2.putText', 'cv2.putText', (['img', 'artf_name', '(x, y)'], {'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': '(1)', 'color': '(0, 0, 255)'}), '(img, artf_name, (x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=(0, 0, 255))\n', (3343, 3438), False, 'import cv2\n'), ((3443, 3549), 'cv2.putText', 'cv2.putText', (['img', 'use_for', '(10, 50)'], {'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': '(1)', 'color': '(255, 0, 0)'}), '(img, use_for, (10, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=(255, 0, 0))\n', (3454, 3549), False, 'import cv2\n'), ((3554, 3576), 'cv2.imshow', 'cv2.imshow', (['"""win"""', 'img'], {}), "('win', img)\n", (3564, 3576), False, 'import cv2\n'), ((4251, 4263), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4260, 4263), False, 'import json\n'), ((3221, 3261), 'os.path.join', 'os.path.join', (['annotations_dir', 'file_name'], {}), '(annotations_dir, file_name)\n', (3233, 3261), False, 'import os\n'), ((3590, 3604), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3601, 3604), False, 'import cv2\n')] |
from flask import current_app
from functools import wraps
from app import db
def transactional(func):
@wraps(func)
def commit_or_rollback(*args, **kwargs):
try:
res = func(*args, **kwargs)
db.session.commit()
return res
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
raise
return commit_or_rollback
| [
"app.db.session.commit",
"app.db.session.rollback",
"functools.wraps",
"flask.current_app.logger.error"
] | [((110, 121), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (115, 121), False, 'from functools import wraps\n'), ((232, 251), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (249, 251), False, 'from app import db\n'), ((318, 345), 'flask.current_app.logger.error', 'current_app.logger.error', (['e'], {}), '(e)\n', (342, 345), False, 'from flask import current_app\n'), ((358, 379), 'app.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (377, 379), False, 'from app import db\n')] |
import os, threading
import tkinter.ttk as ttk
import tkinter as tk
class Tree(ttk.Treeview):
def __init__(self, master, double_click=None, single_click=None, startpath=None, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.master = master
self.base = master.base
self.double_click = double_click
self.single_click = single_click
self.path = startpath
self.file_icn = tk.PhotoImage(data="""
iVBORw0KGgoAAAANSUhEUgAAAA0AAAARCAYAAAAG/<KEY>MAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB
3d3cuaW5rc2NhcGUub3Jnm+48GgAAAUNJREFUKJHVzzFLQmEUxvH/uaipF0EbW1rEsWyLJCirKShszIKMoqnFrU9Re+YSak
MtTU3XhnCLhj5GIEJeu5re06KScpXWnunlvM+P876SuvmIBEPhK1UyQIzRdPSbleqR+fp7aASC4UtVjj0AQED81DYr9tIIE
sgAqMuulTXFypoCmgNQoQj4XFfvtir23BABs0Cnemg+jq8R9EWVU5B4zxUr/fA1P0AArsfTAKgemEWEM9AEjr6vlpsLxqQy
gKrEAax9syDKOWjEr1LzeZUFw1EUgYt02d5G6SogIh1VNT1Rr9N+MvyBGsIyyuLwwnVdRPBEz7lYA0iNzzdK9huQnPqnSfk
nSP5S1n7fAOrAzPqtvTMNrJWaSSAB1CVdsq+Bk/7CT9CuhxEg2j8XfG2nlQ+GwoYqe6BRDzBIA7hvO638D0khbw04aabsAA
AAAElFTkSuQmCC""")
self.folder_icn = tk.PhotoImage(data="""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAMCAYAAABr5z2BAAAACXBIWXMAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB
3d3cuaW5rc2NhcGUub3Jnm+48GgAAAJBJREFUKJHdzTEKwkAUhOF/loCFRbAVr+IhLAWLCPaW3sFGPIOm1Bt4hxSSEwRs7Z
UdayErmnROO++bp93htJK0BUa8pxEq1ovZhQ/R/ni+G/LWEjW2y4Stx4NnmUU7l9R6YTxBbFLfb49sGlL4m9ieh84aAA17D
sCfDLiHdwDqrlpwDTHGAqiA+IONQIW0fAFkySdEGFdeCgAAAABJRU5ErkJggg==""")
self.config(
show="tree", columns=("fullpath", "type"), displaycolumns='')
if startpath:
self.open_directory(startpath)
else:
self.insert('', 0, text='You have not yet opened a folder.')
self.bind('<Double-Button-1>', self.double_click)
self.bind("<<TreeviewSelect>>", self.check_single_click)
self.bind("<<TreeviewOpen>>", self.update_tree)
def check_single_click(self, _):
if self.item_type(self.focus()) == 'file':
if self.single_click:
self.single_click(self.item_fullpath(self.focus()))
else:
self.toggle_node(self.focus())
def is_open(self, node):
return self.item(node, 'open')
def toggle_node(self, node):
if self.item_type(node) == 'directory':
if self.is_open(node):
self.item(node, open=False)
else:
self.item(node, open=True)
self.update_node(node)
def clear_node(self, node):
self.delete(*self.get_children(node))
def clear_tree(self):
self.clear_node('')
def fill_node(self, node, path):
self.clear_node(node)
items = [os.path.join(path, p) for p in os.listdir(path)]
directories = sorted([p for p in items if os.path.isdir(p)])
files = sorted([p for p in items if os.path.isfile(p)])
for p in directories:
name = os.path.split(p)[1]
oid = self.insert(node, tk.END, text=f" {name}", values=[p, 'directory'], image=self.folder_icn)
self.insert(oid, 0, text='dummy')
for p in files:
if os.path.isfile(p):
name = os.path.split(p)[1]
oid = self.insert(node, tk.END, text=f" {name}", values=[p, 'file'], image=self.file_icn)
def update_node(self, node):
if self.set(node, "type") != 'directory':
return
path = self.set(node, "fullpath")
self.fill_node(node, path)
def update_tree(self, *_):
self.update_node(self.focus())
def create_root(self, path):
self.clear_tree()
self.fill_node('', path)
def item_type(self, item):
return self.set(item, "type")
def item_fullpath(self, item):
return self.set(item, "fullpath")
def open_directory(self, path):
self.path = os.path.abspath(path)
threading.Thread(target=self.create_root, args=[self.path]).start()
def refresh_tree(self):
self.open_directory(self.path)
def collapse_all(self):
for node in self.get_children():
self.item(node, open=False)
# def add_node(self):
# name = enterbox("Enter file name")
# selected = self.focus() or ''
# # parent = self.parent(selected)
# # if parent == '':
# # parent = self.path
# path = os.path.join(self.item_fullpath(selected), name)
# # fullpath = os.path.join(parent_path, name)
# with open(path, 'w') as f:
# f.write("")
# self.update_node(selected)
| [
"os.listdir",
"os.path.join",
"os.path.split",
"os.path.isfile",
"os.path.isdir",
"os.path.abspath",
"tkinter.PhotoImage",
"threading.Thread"
] | [((448, 1123), 'tkinter.PhotoImage', 'tk.PhotoImage', ([], {'data': '"""\n iVBORw0KGgoAAAANSUhEUgAAAA0AAAARCAYAAAAG/<KEY>MAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB\n 3d3cuaW5rc2NhcGUub3Jnm+48GgAAAUNJREFUKJHVzzFLQmEUxvH/uaipF0EbW1rEsWyLJCirKShszIKMoqnFrU9Re+YSak\n MtTU3XhnCLhj5GIEJeu5re06KScpXWnunlvM+P876SuvmIBEPhK1UyQIzRdPSbleqR+fp7aASC4UtVjj0AQED81DYr9tIIE\n sgAqMuulTXFypoCmgNQoQj4XFfvtir23BABs0Cnemg+jq8R9EWVU5B4zxUr/fA1P0AArsfTAKgemEWEM9AEjr6vlpsLxqQy\n gKrEAax9syDKOWjEr1LzeZUFw1EUgYt02d5G6SogIh1VNT1Rr9N+MvyBGsIyyuLwwnVdRPBEz7lYA0iNzzdK9huQnPqnSfk\n nSP5S1n7fAOrAzPqtvTMNrJWaSSAB1CVdsq+Bk/7CT9CuhxEg2j8XfG2nlQ+GwoYqe6BRDzBIA7hvO638D0khbw04aabsAA\n AAAElFTkSuQmCC"""'}), '(data=\n """\n iVBORw0KGgoAAAANSUhEUgAAAA0AAAARCAYAAAAG/<KEY>MAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB\n 3d3cuaW5rc2NhcGUub3Jnm+48GgAAAUNJREFUKJHVzzFLQmEUxvH/uaipF0EbW1rEsWyLJCirKShszIKMoqnFrU9Re+YSak\n MtTU3XhnCLhj5GIEJeu5re06KScpXWnunlvM+P876SuvmIBEPhK1UyQIzRdPSbleqR+fp7aASC4UtVjj0AQED81DYr9tIIE\n sgAqMuulTXFypoCmgNQoQj4XFfvtir23BABs0Cnemg+jq8R9EWVU5B4zxUr/fA1P0AArsfTAKgemEWEM9AEjr6vlpsLxqQy\n gKrEAax9syDKOWjEr1LzeZUFw1EUgYt02d5G6SogIh1VNT1Rr9N+MvyBGsIyyuLwwnVdRPBEz7lYA0iNzzdK9huQnPqnSfk\n nSP5S1n7fAOrAzPqtvTMNrJWaSSAB1CVdsq+Bk/7CT9CuhxEg2j8XfG2nlQ+GwoYqe6BRDzBIA7hvO638D0khbw04aabsAA\n AAAElFTkSuQmCC"""\n )\n', (461, 1123), True, 'import tkinter as tk\n'), ((1140, 1560), 'tkinter.PhotoImage', 'tk.PhotoImage', ([], {'data': '"""\n iVBORw0KGgoAAAANSUhEUgAAABAAAAAMCAYAAABr5z2BAAAACXBIWXMAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB\n 3d3cuaW5rc2NhcGUub3Jnm+48GgAAAJBJREFUKJHdzTEKwkAUhOF/loCFRbAVr+IhLAWLCPaW3sFGPIOm1Bt4hxSSEwRs7Z\n UdayErmnROO++bp93htJK0BUa8pxEq1ovZhQ/R/ni+G/LWEjW2y4Stx4NnmUU7l9R6YTxBbFLfb49sGlL4m9ieh84aAA17D\n sCfDLiHdwDqrlpwDTHGAqiA+IONQIW0fAFkySdEGFdeCgAAAABJRU5ErkJggg=="""'}), '(data=\n """\n iVBORw0KGgoAAAANSUhEUgAAABAAAAAMCAYAAABr5z2BAAAACXBIWXMAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB\n 3d3cuaW5rc2NhcGUub3Jnm+48GgAAAJBJREFUKJHdzTEKwkAUhOF/loCFRbAVr+IhLAWLCPaW3sFGPIOm1Bt4hxSSEwRs7Z\n UdayErmnROO++bp93htJK0BUa8pxEq1ovZhQ/R/ni+G/LWEjW2y4Stx4NnmUU7l9R6YTxBbFLfb49sGlL4m9ieh84aAA17D\n sCfDLiHdwDqrlpwDTHGAqiA+IONQIW0fAFkySdEGFdeCgAAAABJRU5ErkJggg=="""\n )\n', (1153, 1560), True, 'import tkinter as tk\n'), ((3991, 4012), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (4006, 4012), False, 'import os, threading\n'), ((2810, 2831), 'os.path.join', 'os.path.join', (['path', 'p'], {}), '(path, p)\n', (2822, 2831), False, 'import os, threading\n'), ((3263, 3280), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (3277, 3280), False, 'import os, threading\n'), ((2841, 2857), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2851, 2857), False, 'import os, threading\n'), ((3043, 3059), 'os.path.split', 'os.path.split', (['p'], {}), '(p)\n', (3056, 3059), False, 'import os, threading\n'), ((4021, 4080), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.create_root', 'args': '[self.path]'}), '(target=self.create_root, args=[self.path])\n', (4037, 4080), False, 'import os, threading\n'), ((2910, 2926), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (2923, 2926), False, 'import os, threading\n'), ((2973, 2990), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (2987, 2990), False, 'import os, threading\n'), ((3305, 3321), 'os.path.split', 'os.path.split', (['p'], {}), '(p)\n', (3318, 3321), False, 'import os, threading\n')] |
from typing import IO
import yaml
class YamlFileDatabaseMixin:
async def load_yaml(self, file: IO) -> dict:
return yaml.safe_load(file)
async def dump_yaml(self, data: dict, file: IO):
return yaml.safe_dump(data, file)
| [
"yaml.safe_load",
"yaml.safe_dump"
] | [((130, 150), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (144, 150), False, 'import yaml\n'), ((220, 246), 'yaml.safe_dump', 'yaml.safe_dump', (['data', 'file'], {}), '(data, file)\n', (234, 246), False, 'import yaml\n')] |
from typing import Union, Optional, Sequence, Any, Mapping, List, Tuple, Callable
from collections.abc import Iterable
import operator
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
def pie_marker(
ratios: Sequence[float],
res: int = 50,
direction: str = "+",
start: float = 0.0,
) -> Tuple[list, list]:
"""
Create each slice of pie as a separate marker.
Parameters:
ratios(list): List of ratios that add up to 1.
res: Number of points around the circle.
direction: '+' for counter-clockwise, or '-' for clockwise.
start: Starting position in radians.
Returns:
xys, ss: Tuple of list of xy points and sizes of each slice in the pie marker.
"""
if np.abs(np.sum(ratios) - 1) > 0.01:
print("Warning: Ratios do not add up to 1.")
if direction == '+':
op = operator.add
elif direction == '-':
op = operator.sub
xys = [] # list of xy points of each slice
ss = [] # list of size of each slice
start = float(start)
for ratio in ratios:
# points on the circle including the origin (0,0) and the slice
end = op(start, 2 * np.pi * ratio)
n = round(ratio * res) # number of points forming the arc
x = [0] + np.cos(np.linspace(start, end, n)).tolist()
y = [0] + np.sin(np.linspace(start, end, n)).tolist()
xy = np.column_stack([x, y])
xys.append(xy)
ss.append(np.abs(xy).max())
start = end
return xys, ss
def scatter_pie(
x: Union[int, float, Sequence[int], Sequence[float]],
y: Union[int, float, Sequence[int], Sequence[float]],
ratios: Union[Sequence[float], Sequence[Sequence[float]]],
colors: Union[List, str] = "tab10",
res: int = 50,
direction: str = "+",
start: float = 0.0,
ax=None,
size=100,
edgecolor="none",
**kwargs) -> Axes:
"""
Plot scatter pie plots.
Parameters:
x: list/array of x values
y: list/array of y values
ratios: List of lists of ratios that add up to 1.
colors: List of colors in order, or name of colormap.
res: Number of points around the circle.
direction: '+' for counter-clockwise, or '-' for clockwise.
start: Starting position in radians.
kwargs: Arguments passed to :func:`matplotlib.pyplot.scatter`
Returns:
A :class:`~matplotlib.axes.Axes`
"""
if ax is None:
_, ax = plt.subplots()
# convert arguments to interables when there is only one point
if (not isinstance(x, Iterable)) and type(ratios[0]==float):
print("Plotting single point")
x = [x]
y = [y]
ratios = [ratios]
# Set colors
if type(colors) == str:
cmap = plt.get_cmap(colors)
colors = [cmap(i) for i in range(len(ratios[0]))]
# make pie marker for each unique set of ratios
df = pd.DataFrame({'x':x, 'y':y, 'ratios':ratios})
df.ratios = df.ratios.apply(tuple)
gb = df.groupby("ratios")
for ratio in gb.groups:
group = gb.get_group(ratio)
xys, ss = pie_marker(ratio, res=res, direction=direction, start=start)
for xy, s, color in zip(xys, ss, colors):
# plot non-zero slices
if s != 0:
ax.scatter(group.x, group.y, marker=xy, s=[s*s*size],
facecolor=color, edgecolor=edgecolor, **kwargs)
return ax
def get_palette(categories, cmap):
"""
Generate dictionary mapping categories to color.
"""
cc = plt.get_cmap(cmap)
if len(categories) > len(cc.colors):
raise ValueError("Number of categories more than number of colors in cmap.")
palette = {x: cc(i) for i, x in enumerate(categories)}
return palette
def scatter_pie_from_df(
df: pd.DataFrame,
x: str,
y: str,
cols: Optional[list] = [],
normalize: bool = True,
return_df: bool = False,
palette: Optional[dict] = None,
cmap: Optional[str] = "tab10",
**kwargs,
) -> Axes:
"""
Plot scatter pie based on columns in a DataFrame.
Parameters:
df: Dataframe containing x, y, and additional count columns.
x: Column to use as x-values.
y: Column to use as y-values.
cols: List of columns in dataframe to use as ratios and plotting.
If [], uses all columns besides x and y.
normalize: If True, calculate ratios using selected columns.
return_df: If True, also return normalized dataframe.
palette: Dictionary mapping column name to color.
If None, create mapping using cmap.
cmap: Name of colormap to use if palette not provided.
kwargs: Arguments passed to :func:`scatter_pie`
Returns:
A :class:`~matplotlib.axes.Axes` and normalized df if `return_df` is True.
"""
# make copy of dataframe and set xy as index
df = df.copy().set_index([x, y])
if (type(cols)==list) & (len(cols) > 1):
# used specified list of columns
df = df.loc[:, cols]
elif cols!=[]:
raise ValueError("cols must be a list of more than one column headers")
# row normalize
categories = df.columns
df = df.div(df.sum(axis=1), axis=0).fillna(0)
df = df.reset_index()
# generate mapping of category to color
if palette == None:
palette = get_palette(categories, cmap)
ratios = df[categories].to_records(index=False).tolist()
colors = [palette[cat] for cat in categories]
ax = scatter_pie(df[x].values, df[y].values, ratios, colors, **kwargs)
# generate legend as separate figure
if return_df:
return ax, df
return ax
def scatter_legend(ax, labels, palette, **kwargs):
handles = [plt.scatter([], [], color=palette[l], label=l) for l in labels]
ax.legend(handles=handles, **kwargs)
def expand_xlim(ax, percent=0.1):
lim = ax.get_xlim()
length = lim[1] - lim[0]
change = length * percent
lower = lim[0] - change
upper = lim[1] + change
ax.set_xlim(lower, upper)
return
def expand_ylim(ax, percent=0.1):
lim = ax.get_ylim()
length = lim[1] - lim[0]
change = length * percent
lower = lim[0] - change
upper = lim[1] + change
ax.set_ylim(lower, upper)
return | [
"numpy.abs",
"numpy.column_stack",
"numpy.sum",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.get_cmap"
] | [((3013, 3061), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'ratios': ratios}"], {}), "({'x': x, 'y': y, 'ratios': ratios})\n", (3025, 3061), True, 'import pandas as pd\n'), ((3646, 3664), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (3658, 3664), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1494), 'numpy.column_stack', 'np.column_stack', (['[x, y]'], {}), '([x, y])\n', (1486, 1494), True, 'import numpy as np\n'), ((2562, 2576), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2574, 2576), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2892), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['colors'], {}), '(colors)\n', (2884, 2892), True, 'import matplotlib.pyplot as plt\n'), ((5854, 5900), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'color': 'palette[l]', 'label': 'l'}), '([], [], color=palette[l], label=l)\n', (5865, 5900), True, 'import matplotlib.pyplot as plt\n'), ((824, 838), 'numpy.sum', 'np.sum', (['ratios'], {}), '(ratios)\n', (830, 838), True, 'import numpy as np\n'), ((1536, 1546), 'numpy.abs', 'np.abs', (['xy'], {}), '(xy)\n', (1542, 1546), True, 'import numpy as np\n'), ((1359, 1385), 'numpy.linspace', 'np.linspace', (['start', 'end', 'n'], {}), '(start, end, n)\n', (1370, 1385), True, 'import numpy as np\n'), ((1421, 1447), 'numpy.linspace', 'np.linspace', (['start', 'end', 'n'], {}), '(start, end, n)\n', (1432, 1447), True, 'import numpy as np\n')] |
from glacier import glacier
def f1(name: str, verbose: bool = False) -> None:
pass
def f2(name: str, verbose: bool = False) -> None:
pass
def f3(name: str, verbose: bool = False) -> None:
pass
if __name__ == '__main__':
glacier({
'run': f1,
'build': f2,
'test': f3,
})
| [
"glacier.glacier"
] | [((244, 289), 'glacier.glacier', 'glacier', (["{'run': f1, 'build': f2, 'test': f3}"], {}), "({'run': f1, 'build': f2, 'test': f3})\n", (251, 289), False, 'from glacier import glacier\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import mysql.connector
import datetime
#search 5 comment for a moment
#return 0 for error
def searchComment(momentId, startPoint, addPoint, cnx):
commentQuery = 'SELECT * FROM moment_comment WHERE moment_id = %s ORDER BY comment_id DESC LIMIT %s, 5'
pin = startPoint * 5 + addPoint
try:
commentCursor = cnx.cursor(dictionary=True)
commentCursor.execute(commentQuery, (momentId, pin))
return commentCursor.fetchall()
#return 0 for db error
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
return '0'
finally:
commentCursor.close()
#insert one comment
#return 1 for success
#return 0 for error
def createComment(userId, momentId, content, cnx):
create = datetime.datetime.now().date()
insertQuery = (
'INSERT INTO moment_comment (comment_content, moment_id, user_id, comment_time) VALUES '
'(%s, %s, %s, %s)'
)
try:
insertCursor = cnx.cursor()
insertCursor.execute(insertQuery, (content, momentId, userId, create))
cnx.commit()
return '1'
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
cnx.rollback()
return '0'
finally:
insertCursor.close()
#search 20 moment id where user leave a comment
def userComments(userId, pin, cnx):
likeQuery = 'SELECT DISTINCT(moment_id) FROM moment_comment WHERE user_id = %s ORDER by moment_id DESC LIMIT %s, 20'
try:
likeCursor = cnx.cursor()
likeCursor.execute(likeQuery, (userId, pin))
return likeCursor.fetchall()
#return 0 for db error
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
return '0'
finally:
likeCursor.close()
| [
"datetime.datetime.now"
] | [((806, 829), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (827, 829), False, 'import datetime\n')] |
import argparse
import pickle
from pathlib import Path
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-train-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--input-test-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--output-predictions-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--output-model-path', type=str, help='an integer for the accumulator')
args = parser.parse_args()
Path(args.output_predictions_data_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_model_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_predictions_data_path).parent.mkdir(parents=True, exist_ok=True)
train = pd.read_csv(args.input_train_data_path)
test = pd.read_csv(args.input_test_data_path)
train, valid = train_test_split(train, train_size=0.8, random_state=150)
x_train = train.drop(['Survived'], axis=1).copy()
y_train = train['Survived'].copy()
x_valid = valid.drop(['Survived'], axis=1).copy()
y_valid = valid['Survived'].copy()
x_test = test.drop(['Survived', 'PassengerId'], axis=1).copy()
y_test = test['Survived'].copy()
model = LogisticRegression(solver='liblinear', random_state=42)
model.fit(x_train, y_train)
pred_valid = model.predict(x_valid)
accuracy_score(y_valid, pred_valid)
confusion_matrix(y_valid, pred_valid)
print(classification_report(y_valid, pred_valid))
# Actual Test Prediction
pred_test = model.predict(x_test).astype(int)
output = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': pred_test})
output.to_csv(args.output_predictions_data_path, index=False)
print("Your submission was successfully saved!")
pickle.dump(model, open(args.output_model_path, 'wb'))
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"pathlib.Path",
"sklearn.linear_model.LogisticRegression",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] | [((275, 336), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (298, 336), False, 'import argparse\n'), ((994, 1033), 'pandas.read_csv', 'pd.read_csv', (['args.input_train_data_path'], {}), '(args.input_train_data_path)\n', (1005, 1033), True, 'import pandas as pd\n'), ((1041, 1079), 'pandas.read_csv', 'pd.read_csv', (['args.input_test_data_path'], {}), '(args.input_test_data_path)\n', (1052, 1079), True, 'import pandas as pd\n'), ((1096, 1153), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train'], {'train_size': '(0.8)', 'random_state': '(150)'}), '(train, train_size=0.8, random_state=150)\n', (1112, 1153), False, 'from sklearn.model_selection import train_test_split\n'), ((1430, 1485), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'random_state': '(42)'}), "(solver='liblinear', random_state=42)\n", (1448, 1485), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1553, 1588), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_valid', 'pred_valid'], {}), '(y_valid, pred_valid)\n', (1567, 1588), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((1590, 1627), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_valid', 'pred_valid'], {}), '(y_valid, pred_valid)\n', (1606, 1627), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((1760, 1830), 'pandas.DataFrame', 'pd.DataFrame', (["{'PassengerId': test.PassengerId, 'Survived': pred_test}"], {}), "({'PassengerId': test.PassengerId, 'Survived': pred_test})\n", (1772, 1830), True, 'import pandas as pd\n'), ((1635, 1677), 'sklearn.metrics.classification_report', 'classification_report', (['y_valid', 'pred_valid'], {}), '(y_valid, pred_valid)\n', (1656, 1677), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((750, 789), 'pathlib.Path', 'Path', (['args.output_predictions_data_path'], {}), '(args.output_predictions_data_path)\n', (754, 789), False, 'from pathlib import Path\n'), ((832, 860), 'pathlib.Path', 'Path', (['args.output_model_path'], {}), '(args.output_model_path)\n', (836, 860), False, 'from pathlib import Path\n'), ((903, 942), 'pathlib.Path', 'Path', (['args.output_predictions_data_path'], {}), '(args.output_predictions_data_path)\n', (907, 942), False, 'from pathlib import Path\n')] |
import wave
import pyaudio
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 10
CHUNK = 16000
WAV_OUTPUT_FILENAME = "audioFile2.wav"
EXTERNAL_MIC_NAME = 'USB audio CODEC: Audio (hw:1,0)'
mic_index = None
audio = pyaudio.PyAudio()
info = audio.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
print(audio.get_device_info_by_host_api_device_index(0, i).get('name'))
if (audio.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
if audio.get_device_info_by_host_api_device_index(0, i).get('name') == EXTERNAL_MIC_NAME:
mic_index = i
break
print(mic_index)
print('USB Audio Device found, recording!')
stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True,
frames_per_buffer=CHUNK, input_device_index=mic_index)
frames = []
for i in range(int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
# Stops the recording
stream.stop_stream()
stream.close()
audio.terminate()
wav_file = wave.open(WAV_OUTPUT_FILENAME, 'wb')
wav_file.setnchannels(CHANNELS)
wav_file.setsampwidth(2)
wav_file.setframerate(RATE)
wav_file.writeframes(b''.join(frames))
wav_file.close() | [
"wave.open",
"pyaudio.PyAudio"
] | [((233, 250), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (248, 250), False, 'import pyaudio\n'), ((1096, 1132), 'wave.open', 'wave.open', (['WAV_OUTPUT_FILENAME', '"""wb"""'], {}), "(WAV_OUTPUT_FILENAME, 'wb')\n", (1105, 1132), False, 'import wave\n')] |
"""
Test script for data.py classes.
"""
import os
import numpy as np
import pytest
from bilby.core.prior import PriorDict, Uniform
from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood
class TestTargetedPulsarLikelhood(object):
"""
Tests for the TargetedPulsarLikelihood class.
"""
parfile = "J0123+3456.par"
times = np.linspace(1000000000.0, 1000086340.0, 1440)
data = np.random.normal(0.0, 1e-25, size=(1440, 2))
onesdata = np.ones((1440, 2))
detector = "H1"
@classmethod
def setup_class(cls):
# create a pulsar parameter file
parcontent = """\
PSRJ J0123+3456
RAJ 01:23:45.6789
DECJ 34:56:54.321
F0 567.89
F1 -1.2e-12
PEPOCH 56789
H0 9.87e-26
COSIOTA 0.3
PSI 1.1
PHI0 2.4
"""
# add content to the par file
with open("J0123+3456.par", "w") as fp:
fp.write(parcontent)
@classmethod
def teardown_class(cls):
os.remove("J0123+3456.par")
def test_wrong_inputs(self):
"""
Test that exceptions are raised for incorrect inputs to the
TargetedPulsarLikelihood.
"""
with pytest.raises(TypeError):
TargetedPulsarLikelihood(None, None)
# create HeterodynedData object (no par file)
het = HeterodynedData(self.data, times=self.times, detector=self.detector)
priors = dict()
priors["h0"] = Uniform(0.0, 1.0e-23, "h0")
# error with no par file
with pytest.raises(ValueError):
TargetedPulsarLikelihood(het, PriorDict(priors))
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
mhet = MultiHeterodynedData(het) # multihet object for testing
with pytest.raises(TypeError):
TargetedPulsarLikelihood(het, None)
with pytest.raises(TypeError):
TargetedPulsarLikelihood(mhet, None)
def test_priors(self):
"""
Test the parsed priors.
"""
# bad priors (unexpected parameter names)
priors = dict()
priors["a"] = Uniform(0.0, 1.0, "blah")
priors["b"] = 2.0
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
with pytest.raises(ValueError):
_ = TargetedPulsarLikelihood(het, PriorDict(priors))
def test_wrong_likelihood(self):
"""
Test with a bad likelihood name.
"""
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
priors = dict()
priors["h0"] = Uniform(0.0, 1.0e-23, "h0")
with pytest.raises(ValueError):
_ = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood="blah")
def test_likelihood_null_likelihood(self):
"""
Test likelihood and null likelihood.
"""
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
priors = dict()
priors["h0"] = Uniform(0.0, 1.0e-23, "h0")
for likelihood in ["gaussian", "studentst"]:
like = TargetedPulsarLikelihood(
het, PriorDict(priors), likelihood=likelihood
)
like.parameters = {"h0": 0.0}
assert like.log_likelihood() == like.noise_log_likelihood()
def test_numba_likelihood(self):
"""
Test likelihood using numba against the standard likelihood.
"""
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
priors = dict()
priors["h0"] = Uniform(0.0, 1.0e-23, "h0")
for likelihood in ["gaussian", "studentst"]:
like1 = TargetedPulsarLikelihood(
het, PriorDict(priors), likelihood=likelihood
)
like1.parameters = {"h0": 1e-24}
like2 = TargetedPulsarLikelihood(
het, PriorDict(priors), likelihood=likelihood, numba=True
)
like2.parameters = {"h0": 1e-24}
assert np.allclose(
[like1.log_likelihood()], [like2.log_likelihood()], atol=1e-10, rtol=0.0
)
| [
"numpy.random.normal",
"cwinpy.HeterodynedData",
"numpy.ones",
"cwinpy.MultiHeterodynedData",
"cwinpy.TargetedPulsarLikelihood",
"numpy.linspace",
"pytest.raises",
"bilby.core.prior.PriorDict",
"bilby.core.prior.Uniform",
"os.remove"
] | [((372, 417), 'numpy.linspace', 'np.linspace', (['(1000000000.0)', '(1000086340.0)', '(1440)'], {}), '(1000000000.0, 1000086340.0, 1440)\n', (383, 417), True, 'import numpy as np\n'), ((429, 473), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1e-25)'], {'size': '(1440, 2)'}), '(0.0, 1e-25, size=(1440, 2))\n', (445, 473), True, 'import numpy as np\n'), ((489, 507), 'numpy.ones', 'np.ones', (['(1440, 2)'], {}), '((1440, 2))\n', (496, 507), True, 'import numpy as np\n'), ((989, 1016), 'os.remove', 'os.remove', (['"""J0123+3456.par"""'], {}), "('J0123+3456.par')\n", (998, 1016), False, 'import os\n'), ((1335, 1403), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector'}), '(self.data, times=self.times, detector=self.detector)\n', (1350, 1403), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((1452, 1477), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1e-23)', '"""h0"""'], {}), "(0.0, 1e-23, 'h0')\n", (1459, 1477), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((1630, 1721), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (1645, 1721), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((1754, 1779), 'cwinpy.MultiHeterodynedData', 'MultiHeterodynedData', (['het'], {}), '(het)\n', (1774, 1779), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((2169, 2194), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1.0)', '"""blah"""'], {}), "(0.0, 1.0, 'blah')\n", (2176, 2194), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((2236, 2327), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (2251, 2327), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((2569, 2660), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (2584, 2660), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((2726, 2751), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1e-23)', '"""h0"""'], {}), "(0.0, 1e-23, 'h0')\n", (2733, 2751), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((3011, 3102), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (3026, 3102), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((3168, 3193), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1e-23)', '"""h0"""'], {}), "(0.0, 1e-23, 'h0')\n", (3175, 3193), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((3632, 3723), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (3647, 3723), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((3789, 3814), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1e-23)', '"""h0"""'], {}), "(0.0, 1e-23, 'h0')\n", (3796, 3814), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((1191, 1215), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1204, 1215), False, 'import pytest\n'), ((1229, 1265), 'cwinpy.TargetedPulsarLikelihood', 'TargetedPulsarLikelihood', (['None', 'None'], {}), '(None, None)\n', (1253, 1265), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((1527, 1552), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1540, 1552), False, 'import pytest\n'), ((1825, 1849), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1838, 1849), False, 'import pytest\n'), ((1863, 1898), 'cwinpy.TargetedPulsarLikelihood', 'TargetedPulsarLikelihood', (['het', 'None'], {}), '(het, None)\n', (1887, 1898), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((1913, 1937), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1926, 1937), False, 'import pytest\n'), ((1951, 1987), 'cwinpy.TargetedPulsarLikelihood', 'TargetedPulsarLikelihood', (['mhet', 'None'], {}), '(mhet, None)\n', (1975, 1987), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((2359, 2384), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2372, 2384), False, 'import pytest\n'), ((2768, 2793), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2781, 2793), False, 'import pytest\n'), ((1596, 1613), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (1605, 1613), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((2432, 2449), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (2441, 2449), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((2841, 2858), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (2850, 2858), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((3316, 3333), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (3325, 3333), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((3938, 3955), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (3947, 3955), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((4106, 4123), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (4115, 4123), False, 'from bilby.core.prior import PriorDict, Uniform\n')] |
import pickle
import torch
import numpy as np
from transformers import RobertaModel, RobertaTokenizerFast
from retriever_train.dataset_config import DATASET_CONFIG, BASE_CONFIG
from retriever_train.data_utils import Instance
from retriever_train.utils import init_parent_model
class PrefixSuffixWrapper(object):
def __init__(self, model_path, config_only=False):
self.model_path = model_path
self.args = torch.load("{}/training_args.bin".format(self.model_path))
if "prefix_truncate_dir" not in self.args:
# hack for backward compatability
self.args.prefix_truncate_dir = "left"
self.args.device = torch.cuda.current_device()
if self.args.data_dir in DATASET_CONFIG:
self.config = DATASET_CONFIG[self.args.data_dir]
else:
self.config = BASE_CONFIG
print(self.config)
if not config_only:
self.model, self.tokenizer = init_parent_model(checkpoint_dir=model_path,
args=self.args,
model_class=RobertaModel,
tokenizer_class=RobertaTokenizerFast)
def preprocess_sentences(self, contexts, vectors_type="prefix"):
args = self.args
tokenizer = self.tokenizer
instances = []
all_context_ids = []
for context in contexts:
context = " ".join(context.split())
context_ids = tokenizer(context)["input_ids"]
if vectors_type == "suffix":
placeholder_prefix_ids = tokenizer("left context <mask> right context")["input_ids"]
all_context_ids.append([placeholder_prefix_ids, context_ids])
else:
all_context_ids.append([context_ids, context_ids])
instance = Instance(
self.args, self.config, all_context_ids
)
instance.preprocess(tokenizer)
return instance
def encode_batch(self, contexts, vectors_type="prefix"):
args = self.args
instance = self.preprocess_sentences(contexts, vectors_type)
input_tensors = {
"prefices": torch.tensor(instance.prefices).unsqueeze(0),
"prefix_masks": torch.tensor(instance.prefix_masks).unsqueeze(0),
"suffices": torch.tensor(instance.suffices).unsqueeze(0),
"suffix_masks": torch.tensor(instance.suffix_masks).unsqueeze(0)
}
return self.model.get_vectors(input_tensors, vectors_type=vectors_type)
| [
"retriever_train.utils.init_parent_model",
"torch.tensor",
"retriever_train.data_utils.Instance",
"torch.cuda.current_device"
] | [((666, 693), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (691, 693), False, 'import torch\n'), ((1902, 1951), 'retriever_train.data_utils.Instance', 'Instance', (['self.args', 'self.config', 'all_context_ids'], {}), '(self.args, self.config, all_context_ids)\n', (1910, 1951), False, 'from retriever_train.data_utils import Instance\n'), ((953, 1082), 'retriever_train.utils.init_parent_model', 'init_parent_model', ([], {'checkpoint_dir': 'model_path', 'args': 'self.args', 'model_class': 'RobertaModel', 'tokenizer_class': 'RobertaTokenizerFast'}), '(checkpoint_dir=model_path, args=self.args, model_class=\n RobertaModel, tokenizer_class=RobertaTokenizerFast)\n', (970, 1082), False, 'from retriever_train.utils import init_parent_model\n'), ((2244, 2275), 'torch.tensor', 'torch.tensor', (['instance.prefices'], {}), '(instance.prefices)\n', (2256, 2275), False, 'import torch\n'), ((2318, 2353), 'torch.tensor', 'torch.tensor', (['instance.prefix_masks'], {}), '(instance.prefix_masks)\n', (2330, 2353), False, 'import torch\n'), ((2392, 2423), 'torch.tensor', 'torch.tensor', (['instance.suffices'], {}), '(instance.suffices)\n', (2404, 2423), False, 'import torch\n'), ((2466, 2501), 'torch.tensor', 'torch.tensor', (['instance.suffix_masks'], {}), '(instance.suffix_masks)\n', (2478, 2501), False, 'import torch\n')] |
#! /usr/bin/env python3
import os
from random import choice
import filetype
from telegram.ext import Updater, CommandHandler
from tinydb import TinyDB, where
db = TinyDB('db.json')
admins_table = db.table("admins")
given_questions = db.table("given")
quiet_categories = ['meme']
def register_admin(update, context):
chat_id = update.message.chat_id
admins_table.insert({'chat_id': chat_id})
context.bot.send_message(chat_id=chat_id, text="Added you to admins list")
def start(update, context):
chat_id = update.message.chat_id
context.bot.send_message(chat_id=chat_id, text="Использование: /question 3")
def question(update, context):
chat_id = update.message.chat_id
category = update.message.text.split()[-1].lower()
question_requested: bool = False
if category not in os.listdir('questions'):
text = "Не смог найти такую категорию вопросов"
else:
if given_questions.search((where('chat_id') == chat_id) & (where('category') == category)):
text = "Уже выдал вам задачу на эту категорию"
else:
text = f"Вот вопрос на {category}"
question_requested = True
context.bot.send_message(chat_id=chat_id, text=text)
if not question_requested:
return
filename = f"questions/{category}/" + choice(os.listdir(f'questions/{category}/'))
print(filename)
with open(filename, 'rb') as doc:
if category not in quiet_categories:
for admin in admins_table.all():
context.bot.send_message(chat_id=admin['chat_id'],
text=f'User {update.message.from_user} (category: {category}):')
context.bot.send_document(chat_id=admin['chat_id'], document=doc)
doc.seek(0)
given_questions.insert({'chat_id': chat_id, 'category': category})
if filetype.is_image(filename):
context.bot.send_photo(chat_id=chat_id, photo=doc)
else:
context.bot.send_document(chat_id=chat_id, document=doc)
def main():
telegram_token = os.getenv('TELEGRAM_BOT_TOKEN')
assert telegram_token is not None, "env TELEGRAM_BOT_TOKEN is not set"
telegram_password = os.getenv('TELEGRAM_BOT_ADMIN_PASSWORD')
assert telegram_password is not None, "env TELEGRAM_BOT_ADMIN_PASSWORD is not set"
updater = Updater(telegram_token, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler('question', question))
dp.add_handler(CommandHandler('start', start))
dp.add_handler(CommandHandler(f'register_admin_{telegram_password}', register_admin))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| [
"os.listdir",
"tinydb.TinyDB",
"filetype.is_image",
"os.getenv",
"tinydb.where",
"telegram.ext.CommandHandler",
"telegram.ext.Updater"
] | [((165, 182), 'tinydb.TinyDB', 'TinyDB', (['"""db.json"""'], {}), "('db.json')\n", (171, 182), False, 'from tinydb import TinyDB, where\n'), ((2088, 2119), 'os.getenv', 'os.getenv', (['"""TELEGRAM_BOT_TOKEN"""'], {}), "('TELEGRAM_BOT_TOKEN')\n", (2097, 2119), False, 'import os\n'), ((2220, 2260), 'os.getenv', 'os.getenv', (['"""TELEGRAM_BOT_ADMIN_PASSWORD"""'], {}), "('TELEGRAM_BOT_ADMIN_PASSWORD')\n", (2229, 2260), False, 'import os\n'), ((2363, 2404), 'telegram.ext.Updater', 'Updater', (['telegram_token'], {'use_context': '(True)'}), '(telegram_token, use_context=True)\n', (2370, 2404), False, 'from telegram.ext import Updater, CommandHandler\n'), ((816, 839), 'os.listdir', 'os.listdir', (['"""questions"""'], {}), "('questions')\n", (826, 839), False, 'import os\n'), ((1878, 1905), 'filetype.is_image', 'filetype.is_image', (['filename'], {}), '(filename)\n', (1895, 1905), False, 'import filetype\n'), ((2452, 2488), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""question"""', 'question'], {}), "('question', question)\n", (2466, 2488), False, 'from telegram.ext import Updater, CommandHandler\n'), ((2509, 2539), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (2523, 2539), False, 'from telegram.ext import Updater, CommandHandler\n'), ((2560, 2629), 'telegram.ext.CommandHandler', 'CommandHandler', (['f"""register_admin_{telegram_password}"""', 'register_admin'], {}), "(f'register_admin_{telegram_password}', register_admin)\n", (2574, 2629), False, 'from telegram.ext import Updater, CommandHandler\n'), ((1318, 1354), 'os.listdir', 'os.listdir', (['f"""questions/{category}/"""'], {}), "(f'questions/{category}/')\n", (1328, 1354), False, 'import os\n'), ((942, 958), 'tinydb.where', 'where', (['"""chat_id"""'], {}), "('chat_id')\n", (947, 958), False, 'from tinydb import TinyDB, where\n'), ((974, 991), 'tinydb.where', 'where', (['"""category"""'], {}), "('category')\n", (979, 991), False, 'from tinydb import TinyDB, where\n')] |
###############################################################################
# mockDensData.py: generate mock data following a given density
###############################################################################
import os, os.path
import pickle
import multiprocessing
from optparse import OptionParser
import numpy
from scipy import ndimage
import fitsio
from galpy.util import bovy_coords, multi
import mwdust
import define_rcsample
import fitDens
import densprofiles
dmap= None
dmapg15= None
apo= None
def generate(locations,
type='exp',
sample='lowlow',
extmap='green15',
nls=101,
nmock=1000,
H0=-1.49,
_dmapg15=None,
ncpu=1):
"""
NAME:
generate
PURPOSE:
generate mock data following a given density
INPUT:
locations - locations to be included in the sample
type= ('exp') type of density profile to sample from
sample= ('lowlow') for selecting mock parameters
extmap= ('green15') extinction map to use ('marshall06' and others use Green15 to fill in unobserved regions)
nls= (101) number of longitude bins to use for each field
nmock= (1000) number of mock data points to generate
H0= (-1.49) absolute magnitude (can be array w/ sampling spread)
ncpu= (1) number of cpus to use to compute the probability
OUTPUT:
mockdata recarray with tags 'RC_GALR_H', 'RC_GALPHI_H', 'RC_GALZ_H'
HISTORY:
2015-04-03 - Written - Bovy (IAS)
"""
if isinstance(H0,float): H0= [H0]
# Setup the density function and its initial parameters
rdensfunc= fitDens._setup_densfunc(type)
mockparams= _setup_mockparams_densfunc(type,sample)
densfunc= lambda x,y,z: rdensfunc(x,y,z,params=mockparams)
# Setup the extinction map
global dmap
global dmapg15
if _dmapg15 is None: dmapg15= mwdust.Green15(filter='2MASS H')
else: dmapg15= _dmapg15
if isinstance(extmap,mwdust.DustMap3D.DustMap3D):
dmap= extmap
elif extmap.lower() == 'green15':
dmap= dmapg15
elif extmap.lower() == 'marshall06':
dmap= mwdust.Marshall06(filter='2MASS H')
elif extmap.lower() == 'sale14':
dmap= mwdust.Sale14(filter='2MASS H')
elif extmap.lower() == 'drimmel03':
dmap= mwdust.Drimmel03(filter='2MASS H')
# Use brute-force rejection sampling to make no approximations
# First need to estimate the max probability to use in rejection;
# Loop through all locations and compute sampling probability on grid in
# (l,b,D)
# First restore the APOGEE selection function (assumed pre-computed)
global apo
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
# Now compute the necessary coordinate transformations and evaluate the
# maximum probability
distmods= numpy.linspace(7.,15.5,301)
ds= 10.**(distmods/5-2.)
nbs= nls
lnprobs= numpy.empty((len(locations),len(distmods),nbs,nls))
radii= []
lcens, bcens= [], []
lnprobs= multi.parallel_map(lambda x: _calc_lnprob(locations[x],nls,nbs,
ds,distmods,
H0,
densfunc),
range(len(locations)),
numcores=numpy.amin([len(locations),
multiprocessing.cpu_count(),ncpu]))
lnprobs= numpy.array(lnprobs)
for ll, loc in enumerate(locations):
lcen, bcen= apo.glonGlat(loc)
rad= apo.radius(loc)
radii.append(rad) # save for later
lcens.append(lcen[0])
bcens.append(bcen[0])
maxp= (numpy.exp(numpy.nanmax(lnprobs))-10.**-8.)*1.1 # Just to be sure
# Now generate mock data using rejection sampling
nout= 0
arlocations= numpy.array(locations)
arradii= numpy.array(radii)
arlcens= numpy.array(lcens)
arbcens= numpy.array(bcens)
out= numpy.recarray((nmock,),
dtype=[('RC_DIST_H','f8'),
('RC_DM_H','f8'),
('RC_GALR_H','f8'),
('RC_GALPHI_H','f8'),
('RC_GALZ_H','f8')])
while nout < nmock:
nnew= 2*(nmock-nout)
# nnew new locations
locIndx= numpy.floor(numpy.random.uniform(size=nnew)*len(locations)).astype('int')
newlocations= arlocations[locIndx]
# Point within these locations
newds_coord= numpy.random.uniform(size=nnew)
newds= 10.**((newds_coord*(numpy.amax(distmods)-numpy.amin(distmods))\
+numpy.amin(distmods))/5.-2.)
newdls_coord= numpy.random.uniform(size=nnew)
newdls= newdls_coord*2.*arradii[locIndx]\
-arradii[locIndx]
newdbs_coord= numpy.random.uniform(size=nnew)
newdbs= newdbs_coord*2.*arradii[locIndx]\
-arradii[locIndx]
newr2s= newdls**2.+newdbs**2.
keepIndx= newr2s < arradii[locIndx]**2.
newlocations= newlocations[keepIndx]
newds_coord= newds_coord[keepIndx]
newdls_coord= newdls_coord[keepIndx]
newdbs_coord= newdbs_coord[keepIndx]
newds= newds[keepIndx]
newdls= newdls[keepIndx]
newdbs= newdbs[keepIndx]
newls= newdls+arlcens[locIndx][keepIndx]
newbs= newdbs+arbcens[locIndx][keepIndx]
# Reject?
tps= numpy.zeros_like(newds)
for nloc in list(set(newlocations)):
lindx= newlocations == nloc
pindx= arlocations == nloc
coord= numpy.array([newds_coord[lindx]*(len(distmods)-1.),
newdbs_coord[lindx]*(nbs-1.),
newdls_coord[lindx]*(nls-1.)])
tps[lindx]= \
numpy.exp(ndimage.interpolation.map_coordinates(\
lnprobs[pindx][0],
coord,cval=-10.,
order=1))-10.**-8.
XYZ= bovy_coords.lbd_to_XYZ(newls,newbs,newds,degree=True)
Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=define_rcsample._R0,
Ysun=0.,
Zsun=define_rcsample._Z0)
testp= numpy.random.uniform(size=len(newds))*maxp
keepIndx= tps > testp
if numpy.sum(keepIndx) > nmock-nout:
rangeIndx= numpy.zeros(len(keepIndx),dtype='int')
rangeIndx[keepIndx]= numpy.arange(numpy.sum(keepIndx))
keepIndx*= (rangeIndx < nmock-nout)
out['RC_DIST_H'][nout:nout+numpy.sum(keepIndx)]= newds[keepIndx]
out['RC_DM_H'][nout:nout+numpy.sum(keepIndx)]= newds_coord[keepIndx]*(numpy.amax(distmods)-numpy.amin(distmods))\
+numpy.amin(distmods)
out['RC_GALR_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[0][keepIndx]
out['RC_GALPHI_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[1][keepIndx]
out['RC_GALZ_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[2][keepIndx]
nout= nout+numpy.sum(keepIndx)
return (out,lnprobs)
def _setup_mockparams_densfunc(type,sample):
"""Return the parameters of the mock density for this type"""
if type.lower() == 'exp':
if sample.lower() == 'lowlow':
return [0.,1./0.3]
elif sample.lower() == 'solar':
return [1./3.,1./0.3]
else:
return [1./3.,1./0.3]
elif type.lower() == 'expplusconst':
if sample.lower() == 'lowlow':
return [0.,1./0.3,numpy.log(0.1)]
else:
return [1./3.,1./0.3,numpy.log(0.1)]
elif type.lower() == 'twoexp':
return [1./3.,1./0.3,1./4.,1./0.5,densprofiles.logit(0.5)]
elif type.lower() == 'brokenexp':
if sample.lower() == 'lowlow':
return [-0.2,1./.3,0.2,numpy.log(11.)]
elif sample.lower() == 'solar':
return [-1./6.,1./0.3,1./2.,numpy.log(8.)]
else:
return [-1./6.,1./0.3,1./2.,numpy.log(6.)]
elif type.lower() == 'brokenexpflare':
if sample.lower() == 'lowlow':
return [-0.2,1./.3,0.2,numpy.log(11.),-0.1]
elif sample.lower() == 'solar':
return [-1./6.,1./0.3,1./2.,numpy.log(8.),-0.1]
else:
return [-1./6.,1./0.3,1./2.,numpy.log(6.),-0.1]
elif type.lower() == 'gaussexp':
if sample.lower() == 'lowlow':
return [.4,1./0.3,numpy.log(11.)]
else:
return [1./3.,1./0.3,numpy.log(10.)]
def _calc_lnprob(loc,nls,nbs,ds,distmods,H0,densfunc):
lcen, bcen= apo.glonGlat(loc)
rad= apo.radius(loc)
ls= numpy.linspace(lcen-rad,lcen+rad,nls)
bs= numpy.linspace(bcen-rad,bcen+rad,nbs)
# Tile these
tls= numpy.tile(ls,(len(ds),len(bs),1))
tbs= numpy.swapaxes(numpy.tile(bs,(len(ds),len(ls),1)),1,2)
tds= numpy.tile(ds,(len(ls),len(bs),1)).T
XYZ= bovy_coords.lbd_to_XYZ(tls.flatten(),
tbs.flatten(),
tds.flatten(),
degree=True)
Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=define_rcsample._R0,
Ysun=0.,
Zsun=define_rcsample._Z0)
# Evaluate probability density
tH= numpy.tile(distmods.T,(1,len(ls),len(bs),1))[0].T
for ii in range(tH.shape[1]):
for jj in range(tH.shape[2]):
try:
tH[:,ii,jj]+= dmap(ls[jj],bs[ii],ds)
except (IndexError, TypeError,ValueError):
try:
tH[:,ii,jj]+= dmapg15(ls[jj],bs[ii],ds)
except IndexError: # assume zero outside
pass
tH= tH.flatten()+H0[0]
ps= densfunc(Rphiz[0],Rphiz[1],Rphiz[2])*apo(loc,tH)\
*numpy.fabs(numpy.cos(tbs.flatten()/180.*numpy.pi))\
*tds.flatten()**3.
return numpy.log(numpy.reshape(ps,(len(distmods),nbs,nls))\
+10.**-8.)
def get_options():
usage = "usage: %prog [options] <savefilename>\n\nsavefilename= name of the file that the mock data will be saved to"
parser = OptionParser(usage=usage)
parser.add_option("--type",dest='type',default='exp',
help="Type of density profile")
parser.add_option("--sample",dest='sample',default='lowlow',
help="Sample parameter for mock parameters")
parser.add_option("--H0",dest='H0',default=-1.49,type='float',
help="RC absolute magnitude")
parser.add_option("--nls",dest='nls',default=101,type='int',
help="Number of longitudes to bin each field in")
parser.add_option("--nmock",dest='nmock',default=20000,type='int',
help="Number of mock samples to generate")
# Dust map to use
parser.add_option("--extmap",dest='extmap',default='green15',
help="Dust map to use ('Green15', 'Marshall03', 'Drimmel03', 'Sale14', or 'zero'")
# Multiprocessing?
parser.add_option("-m","--multi",dest='multi',default=1,type='int',
help="number of cpus to use")
return parser
if __name__ == '__main__':
parser= get_options()
options, args= parser.parse_args()
data= define_rcsample.get_rcsample()
locations= list(set(list(data['LOCATION_ID'])))
#locations= [4240,4242]
out= generate(locations,
type=options.type,
sample=options.sample,
extmap=options.extmap,
nls=options.nls,
nmock=options.nmock,
H0=options.H0,
ncpu=options.multi)
fitsio.write(args[0],out[0],clobber=True)
| [
"mwdust.Drimmel03",
"define_rcsample.get_rcsample",
"densprofiles.logit",
"numpy.log",
"multiprocessing.cpu_count",
"numpy.array",
"mwdust.Green15",
"os.path.exists",
"numpy.linspace",
"numpy.nanmax",
"numpy.amax",
"numpy.amin",
"mwdust.Marshall06",
"pickle.load",
"galpy.util.bovy_coords... | [((1685, 1714), 'fitDens._setup_densfunc', 'fitDens._setup_densfunc', (['type'], {}), '(type)\n', (1708, 1714), False, 'import fitDens\n'), ((2767, 2793), 'os.path.exists', 'os.path.exists', (['selectFile'], {}), '(selectFile)\n', (2781, 2793), False, 'import os, os.path\n'), ((2999, 3029), 'numpy.linspace', 'numpy.linspace', (['(7.0)', '(15.5)', '(301)'], {}), '(7.0, 15.5, 301)\n', (3013, 3029), False, 'import numpy\n'), ((3669, 3689), 'numpy.array', 'numpy.array', (['lnprobs'], {}), '(lnprobs)\n', (3680, 3689), False, 'import numpy\n'), ((4060, 4082), 'numpy.array', 'numpy.array', (['locations'], {}), '(locations)\n', (4071, 4082), False, 'import numpy\n'), ((4096, 4114), 'numpy.array', 'numpy.array', (['radii'], {}), '(radii)\n', (4107, 4114), False, 'import numpy\n'), ((4128, 4146), 'numpy.array', 'numpy.array', (['lcens'], {}), '(lcens)\n', (4139, 4146), False, 'import numpy\n'), ((4160, 4178), 'numpy.array', 'numpy.array', (['bcens'], {}), '(bcens)\n', (4171, 4178), False, 'import numpy\n'), ((4188, 4330), 'numpy.recarray', 'numpy.recarray', (['(nmock,)'], {'dtype': "[('RC_DIST_H', 'f8'), ('RC_DM_H', 'f8'), ('RC_GALR_H', 'f8'), (\n 'RC_GALPHI_H', 'f8'), ('RC_GALZ_H', 'f8')]"}), "((nmock,), dtype=[('RC_DIST_H', 'f8'), ('RC_DM_H', 'f8'), (\n 'RC_GALR_H', 'f8'), ('RC_GALPHI_H', 'f8'), ('RC_GALZ_H', 'f8')])\n", (4202, 4330), False, 'import numpy\n'), ((8910, 8953), 'numpy.linspace', 'numpy.linspace', (['(lcen - rad)', '(lcen + rad)', 'nls'], {}), '(lcen - rad, lcen + rad, nls)\n', (8924, 8953), False, 'import numpy\n'), ((8956, 8999), 'numpy.linspace', 'numpy.linspace', (['(bcen - rad)', '(bcen + rad)', 'nbs'], {}), '(bcen - rad, bcen + rad, nbs)\n', (8970, 8999), False, 'import numpy\n'), ((9362, 9490), 'galpy.util.bovy_coords.XYZ_to_galcencyl', 'bovy_coords.XYZ_to_galcencyl', (['XYZ[:, 0]', 'XYZ[:, 1]', 'XYZ[:, 2]'], {'Xsun': 'define_rcsample._R0', 'Ysun': '(0.0)', 'Zsun': 'define_rcsample._Z0'}), '(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=\n define_rcsample._R0, Ysun=0.0, Zsun=define_rcsample._Z0)\n', (9390, 9490), False, 'from galpy.util import bovy_coords, multi\n'), ((10481, 10506), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (10493, 10506), False, 'from optparse import OptionParser\n'), ((11604, 11634), 'define_rcsample.get_rcsample', 'define_rcsample.get_rcsample', ([], {}), '()\n', (11632, 11634), False, 'import define_rcsample\n'), ((12012, 12055), 'fitsio.write', 'fitsio.write', (['args[0]', 'out[0]'], {'clobber': '(True)'}), '(args[0], out[0], clobber=True)\n', (12024, 12055), False, 'import fitsio\n'), ((1937, 1969), 'mwdust.Green15', 'mwdust.Green15', ([], {'filter': '"""2MASS H"""'}), "(filter='2MASS H')\n", (1951, 1969), False, 'import mwdust\n'), ((4745, 4776), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'nnew'}), '(size=nnew)\n', (4765, 4776), False, 'import numpy\n'), ((4920, 4951), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'nnew'}), '(size=nnew)\n', (4940, 4951), False, 'import numpy\n'), ((5054, 5085), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'nnew'}), '(size=nnew)\n', (5074, 5085), False, 'import numpy\n'), ((5656, 5679), 'numpy.zeros_like', 'numpy.zeros_like', (['newds'], {}), '(newds)\n', (5672, 5679), False, 'import numpy\n'), ((6220, 6276), 'galpy.util.bovy_coords.lbd_to_XYZ', 'bovy_coords.lbd_to_XYZ', (['newls', 'newbs', 'newds'], {'degree': '(True)'}), '(newls, newbs, newds, degree=True)\n', (6242, 6276), False, 'from galpy.util import bovy_coords, multi\n'), ((6289, 6417), 'galpy.util.bovy_coords.XYZ_to_galcencyl', 'bovy_coords.XYZ_to_galcencyl', (['XYZ[:, 0]', 'XYZ[:, 1]', 'XYZ[:, 2]'], {'Xsun': 'define_rcsample._R0', 'Ysun': '(0.0)', 'Zsun': 'define_rcsample._Z0'}), '(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=\n define_rcsample._R0, Ysun=0.0, Zsun=define_rcsample._Z0)\n', (6317, 6417), False, 'from galpy.util import bovy_coords, multi\n'), ((2860, 2881), 'pickle.load', 'pickle.load', (['savefile'], {}), '(savefile)\n', (2871, 2881), False, 'import pickle\n'), ((6639, 6658), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (6648, 6658), False, 'import numpy\n'), ((7058, 7078), 'numpy.amin', 'numpy.amin', (['distmods'], {}), '(distmods)\n', (7068, 7078), False, 'import numpy\n'), ((7328, 7347), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (7337, 7347), False, 'import numpy\n'), ((2188, 2223), 'mwdust.Marshall06', 'mwdust.Marshall06', ([], {'filter': '"""2MASS H"""'}), "(filter='2MASS H')\n", (2205, 2223), False, 'import mwdust\n'), ((3922, 3943), 'numpy.nanmax', 'numpy.nanmax', (['lnprobs'], {}), '(lnprobs)\n', (3934, 3943), False, 'import numpy\n'), ((6781, 6800), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (6790, 6800), False, 'import numpy\n'), ((2275, 2306), 'mwdust.Sale14', 'mwdust.Sale14', ([], {'filter': '"""2MASS H"""'}), "(filter='2MASS H')\n", (2288, 2306), False, 'import mwdust\n'), ((3620, 3647), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3645, 3647), False, 'import multiprocessing\n'), ((6052, 6140), 'scipy.ndimage.interpolation.map_coordinates', 'ndimage.interpolation.map_coordinates', (['lnprobs[pindx][0]', 'coord'], {'cval': '(-10.0)', 'order': '(1)'}), '(lnprobs[pindx][0], coord, cval=-10.0,\n order=1)\n', (6089, 6140), False, 'from scipy import ndimage\n'), ((6885, 6904), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (6894, 6904), False, 'import numpy\n'), ((6956, 6975), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (6965, 6975), False, 'import numpy\n'), ((7001, 7021), 'numpy.amax', 'numpy.amax', (['distmods'], {}), '(distmods)\n', (7011, 7021), False, 'import numpy\n'), ((7022, 7042), 'numpy.amin', 'numpy.amin', (['distmods'], {}), '(distmods)\n', (7032, 7042), False, 'import numpy\n'), ((7114, 7133), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (7123, 7133), False, 'import numpy\n'), ((7192, 7211), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (7201, 7211), False, 'import numpy\n'), ((7268, 7287), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (7277, 7287), False, 'import numpy\n'), ((7817, 7831), 'numpy.log', 'numpy.log', (['(0.1)'], {}), '(0.1)\n', (7826, 7831), False, 'import numpy\n'), ((7880, 7894), 'numpy.log', 'numpy.log', (['(0.1)'], {}), '(0.1)\n', (7889, 7894), False, 'import numpy\n'), ((7973, 7996), 'densprofiles.logit', 'densprofiles.logit', (['(0.5)'], {}), '(0.5)\n', (7991, 7996), False, 'import densprofiles\n'), ((2361, 2395), 'mwdust.Drimmel03', 'mwdust.Drimmel03', ([], {'filter': '"""2MASS H"""'}), "(filter='2MASS H')\n", (2377, 2395), False, 'import mwdust\n'), ((4580, 4611), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'nnew'}), '(size=nnew)\n', (4600, 4611), False, 'import numpy\n'), ((4869, 4889), 'numpy.amin', 'numpy.amin', (['distmods'], {}), '(distmods)\n', (4879, 4889), False, 'import numpy\n'), ((8110, 8125), 'numpy.log', 'numpy.log', (['(11.0)'], {}), '(11.0)\n', (8119, 8125), False, 'import numpy\n'), ((4812, 4832), 'numpy.amax', 'numpy.amax', (['distmods'], {}), '(distmods)\n', (4822, 4832), False, 'import numpy\n'), ((4833, 4853), 'numpy.amin', 'numpy.amin', (['distmods'], {}), '(distmods)\n', (4843, 4853), False, 'import numpy\n'), ((8206, 8220), 'numpy.log', 'numpy.log', (['(8.0)'], {}), '(8.0)\n', (8215, 8220), False, 'import numpy\n'), ((8275, 8289), 'numpy.log', 'numpy.log', (['(6.0)'], {}), '(6.0)\n', (8284, 8289), False, 'import numpy\n'), ((8407, 8422), 'numpy.log', 'numpy.log', (['(11.0)'], {}), '(11.0)\n', (8416, 8422), False, 'import numpy\n'), ((8508, 8522), 'numpy.log', 'numpy.log', (['(8.0)'], {}), '(8.0)\n', (8517, 8522), False, 'import numpy\n'), ((8582, 8596), 'numpy.log', 'numpy.log', (['(6.0)'], {}), '(6.0)\n', (8591, 8596), False, 'import numpy\n'), ((8708, 8723), 'numpy.log', 'numpy.log', (['(11.0)'], {}), '(11.0)\n', (8717, 8723), False, 'import numpy\n'), ((8771, 8786), 'numpy.log', 'numpy.log', (['(10.0)'], {}), '(10.0)\n', (8780, 8786), False, 'import numpy\n')] |
#!/usr/bin/env python
# coding=utf-8
import eventlet
# BGPSpeaker needs sockets patched -> breaks SRL registration if done too late
# eventlet.monkey_patch( socket=True, select=True ) # adding only ( socket=True ) allows SRL, but then BGP doesn't work :(
eventlet.monkey_patch() # need thread too
# Google core libraries don't support eventlet; workaround
import grpc
from grpc.experimental import eventlet as grpc_eventlet
grpc_eventlet.init_eventlet() # Fix gRPC eventlet interworking, early
# May need to start a separate Python process for BGP
from datetime import datetime, timezone, timedelta
import time
import sys
import logging
import socket
import os
import re
import struct
import ipaddress
import json
import traceback
import subprocess
from threading import Timer
import pwd
# sys.path.append('/usr/lib/python3.6/site-packages/sdk_protos')
import sdk_service_pb2
import sdk_service_pb2_grpc
import lldp_service_pb2
import config_service_pb2
import sdk_common_pb2
# Local gNMI connection
from pygnmi.client import gNMIclient, telemetryParser
# To report state back
import telemetry_service_pb2
import telemetry_service_pb2_grpc
from logging.handlers import RotatingFileHandler
#
# BGP imports
#
import netns
import signal
from ryu.services.protocols.bgp.bgpspeaker import (BGPSpeaker,
EVPN_MULTICAST_ETAG_ROUTE,
EVPN_MAC_IP_ADV_ROUTE,
RF_L2_EVPN,
PMSI_TYPE_INGRESS_REP)
from ryu.lib.packet.bgp import (EvpnNLRI, BGPEvpnMacMobilityExtendedCommunity,
BGP_ATTR_TYPE_ORIGINATOR_ID,
BGP_ATTR_TYPE_EXTENDED_COMMUNITIES)
# Ryu has its own threading model
from ryu.lib import hub
#
# eBPF ARP filter imports
#
from bcc import BPF
from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp
from ryu.ofproto import ether, inet
SO_TIMESTAMP = 29 # us precision
SO_TIMESTAMPNS = 35 # Higher ns precision
############################################################
## Agent will start with this name
############################################################
agent_name='static_vxlan_agent'
############################################################
## Open a GRPC channel to connect to sdk_mgr on the dut
## sdk_mgr will be listening on 50053
############################################################
#channel = grpc.insecure_channel('unix:///opt/srlinux/var/run/sr_sdk_service_manager:50053')
channel = grpc.insecure_channel('127.0.0.1:50053')
metadata = [('agent_name', agent_name)]
stub = sdk_service_pb2_grpc.SdkMgrServiceStub(channel)
# Try global gNMI connection
#gnmi = gNMIclient(target=('unix:///opt/srlinux/var/run/sr_gnmi_server',57400),
# username="admin",password="<PASSWORD>",
# insecure=True, debug=False)
#gnmi.connect()
############################################################
## Subscribe to required event
## This proc handles subscription of: Interface, LLDP,
## Route, Network Instance, Config
############################################################
def Subscribe(stream_id, option):
# XXX Does not pass pylint
op = sdk_service_pb2.NotificationRegisterRequest.AddSubscription
if option == 'cfg':
entry = config_service_pb2.ConfigSubscriptionRequest()
# entry.key.js_path = '.' + agent_name
request = sdk_service_pb2.NotificationRegisterRequest(op=op, stream_id=stream_id, config=entry)
subscription_response = stub.NotificationRegister(request=request, metadata=metadata)
logging.info( f'Status of subscription response for {option}:: {subscription_response.status}' )
############################################################
## Subscribe to all the events that Agent needs
############################################################
def Subscribe_Notifications(stream_id):
'''
Agent will receive notifications to what is subscribed here.
'''
if not stream_id:
logging.info("Stream ID not sent.")
return False
# Subscribe to config changes, first
Subscribe(stream_id, 'cfg')
def Add_Telemetry( path_obj_list ):
telemetry_stub = telemetry_service_pb2_grpc.SdkMgrTelemetryServiceStub(channel)
telemetry_update_request = telemetry_service_pb2.TelemetryUpdateRequest()
for js_path,obj in path_obj_list:
telemetry_info = telemetry_update_request.state.add()
telemetry_info.key.js_path = js_path
telemetry_info.data.json_content = json.dumps(obj)
logging.info(f"Telemetry_Update_Request :: {telemetry_update_request}")
telemetry_response = telemetry_stub.TelemetryAddOrUpdate(request=telemetry_update_request, metadata=metadata)
return telemetry_response
def Remove_Telemetry(js_paths):
telemetry_stub = telemetry_service_pb2_grpc.SdkMgrTelemetryServiceStub(channel)
telemetry_del_request = telemetry_service_pb2.TelemetryDeleteRequest()
for path in js_paths:
telemetry_key = telemetry_del_request.key.add()
telemetry_key.js_path = path
logging.info(f"Telemetry_Delete_Request :: {telemetry_del_request}")
telemetry_response = telemetry_stub.TelemetryDelete(request=telemetry_del_request, metadata=metadata)
return telemetry_response
def Configure_BFD(state,remote_evpn_vtep):
logging.info(f"Configure_BFD :: remote_evpn_vtep={remote_evpn_vtep}")
nh_group_name = f"vtep-{remote_evpn_vtep}"
static_route = {
"static-routes": {
"route": [
{
"prefix": f"{remote_evpn_vtep}/32",
"admin-state": "enable",
"next-hop-group": nh_group_name
}
]
},
"next-hop-groups": {
"group": [
{
"name": nh_group_name,
"nexthop": [
{
"index": 0,
"ip-address": f"{remote_evpn_vtep}",
"admin-state": "enable",
"failure-detection": {
"enable-bfd": {
# XXX Need to specify local VTEP IP in config, TODO read this
# using c.get( system0.0 IP )
"local-address": f"{state.params[ 'peer_address' ]}"
}
}
}
]
}
]
}
}
updates = [
('/bfd/subinterface[name=system0.0]', { 'admin-state': 'enable' } ),
('/network-instance[name=default]', static_route)
]
with gNMIclient(target=('unix:///opt/srlinux/var/run/sr_gnmi_server',57400),
username="admin",password="<PASSWORD>",insecure=True) as c:
c.set( encoding='json_ietf', update=updates )
#global gnmi
#gnmi.set( encoding='json_ietf', update=updates )
def AnnounceMulticastRoute( state, rd, vtep_ip, vni ):
state.speaker.evpn_prefix_add(
route_type=EVPN_MULTICAST_ETAG_ROUTE,
route_dist=rd,
# esi=0, # should be ignored
ethernet_tag_id=0,
# mac_addr='00:11:22:33:44:55', # not relevant for MC route
ip_addr=state.params['source_address'], # originator == proxy IP
tunnel_type='vxlan',
vni=vni, # Sent as label
gw_ip_addr=vtep_ip,
next_hop=vtep_ip, # on behalf of remote VTEP
pmsi_tunnel_type=PMSI_TYPE_INGRESS_REP,
# Added via patch
tunnel_endpoint_ip=vtep_ip
)
def WithdrawMulticastRoute( state, rd, vtep_ip ):
try:
state.speaker.evpn_prefix_del(
route_type=EVPN_MULTICAST_ETAG_ROUTE, # RT3
route_dist=rd, # original RD
# vni=mac_vrf['vni'], # not used/allowed in withdraw
ethernet_tag_id=0
)
except Exception as ex:
logging.error( ex )
def AnnounceRoute( state, mac_vrf, vtep_ip, mac, ip, mobility_seq ):
state.speaker.evpn_prefix_add(
route_type=EVPN_MAC_IP_ADV_ROUTE, # RT2
route_dist=AutoRouteDistinguisher(vtep_ip,mac_vrf),
esi=0, # Single homed
ethernet_tag_id=0,
mac_addr=mac,
ip_addr=ip if state.params['include_ip'] else None, # Enables remote peers to perform proxy ARP
next_hop=vtep_ip, # on behalf of remote VTEP
tunnel_type='vxlan',
vni=mac_vrf['vni'],
gw_ip_addr=vtep_ip,
mac_mobility=mobility_seq # Sequence number for MAC mobility
)
def WithdrawRoute( state, mac_vrf, vtep_ip, mac, ip=None ):
try:
state.speaker.evpn_prefix_del(
route_type=EVPN_MAC_IP_ADV_ROUTE, # RT2
route_dist=AutoRouteDistinguisher(vtep_ip,mac_vrf), # original RD
# vni=mac_vrf['vni'], # not used/allowed in withdraw
ethernet_tag_id=0,
mac_addr=mac,
ip_addr=ip if state.params['include_ip'] else None
)
except Exception as ex:
logging.error( ex )
# Also remove telemetry
js_path = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{vtep_ip}"}}.mac_vrf{{.name=="{mac_vrf["name"]}"}}.mac{{.address=="{mac}"}}'
Remove_Telemetry( [js_path] )
def UpdateMACVRF( state, mac_vrf, new_vni=None, new_evi=None ):
logging.info( f"UpdateMACVRF mac_vrf={mac_vrf} new_vni={new_vni} new_evi={new_evi}" )
if new_evi:
# Clean up old VTEPs, RDs need to be changed
for static_vtep in list( mac_vrf['vxlan_vteps'].keys() ):
Remove_Static_VTEP( state, mac_vrf, static_vtep, clear_macs=False )
mac_vrf['evi'] = new_evi
if new_vni:
# Clean up old EVPN routes, VNI needs to be changed
for vtep_ip,macs in mac_vrf['vxlan_vteps'].items():
rd = AutoRouteDistinguisher( vtep_ip, mac_vrf )
WithdrawMulticastRoute( state, rd, vtep_ip )
for mac,status in macs.items():
if status=='static_announced':
WithdrawRoute( state, mac_vrf, vtep_ip, mac )
mac_vrf['vxlan_vteps'][ vtep_ip ][ mac ] = 'static'
mac_vrf['vni'] = new_vni
# Make sure all VTEPs exist
if mac_vrf['admin_state'] == "enable":
for vtep_ip,macs in mac_vrf['vxlan_vteps'].items():
Add_Static_VTEP( state, mac_vrf, vtep_ip )
for mac,status in macs.items():
if status != 'static_announced' or new_evi:
AnnounceRoute( state, mac_vrf, vtep_ip, mac, ip=None, mobility_seq=-1 )
mac_vrf['vxlan_vteps'][ vtep_ip ][ mac ] = 'static_announced'
else:
logging.info( "UpdateMACVRF: admin-state not 'enable'" )
# Updates a single static VTEP
def UpdateMACVRF_StaticVTEP( state, mac_vrf, vtep_ip, macs ):
logging.info( f"UpdateMACVRF_StaticVTEP mac_vrf={mac_vrf} vtep_ip={vtep_ip} macs={macs}" )
vteps = mac_vrf['vxlan_vteps']
vtep = vteps[ vtep_ip ] if vtep_ip in vteps else None
if hasattr( state, 'speaker' ): # BGP running?
if vtep:
# Clean up old MAC routes
macs_to_keep = list( macs.keys() )
for mac in vtep.keys():
if mac not in macs_to_keep:
WithdrawRoute( state, mac_vrf, vtep_ip, mac )
else:
Add_Static_VTEP( state, mac_vrf, vtep_ip )
# Announce new MACs
for mac in macs.keys():
if not vtep or mac not in vtep or vtep[mac] != 'static_announced':
AnnounceRoute( state, mac_vrf, vtep_ip, mac, ip=None, mobility_seq=-1 )
macs[ mac ] = 'static_announced'
vteps[ vtep_ip ] = macs
#
# Runs BGP EVPN as a separate thread>, using Ryu hub
#
#from threading import Thread
#class BGPEVPNThread(Thread):
# def __init__(self):
# Thread.__init__(self)
def runBGPThread( state ):
LOCAL_LOOPBACK = state.params['source_address']
NEIGHBOR = state.params[ 'peer_address' ]
if NEIGHBOR=="127.0.0.1": # Connect to 127.0.0.1 does not work
NEIGHBOR = LOCAL_LOOPBACK
evpn_vteps = {}
def best_path_change_handler(event):
logging.info( f'BGP best path changed: {event.path} prefix={event.prefix} NLRI={event.path.nlri}' )
# event.remote_as, event.prefix, event.nexthop, event.is_withdraw, event.path )
try:
# Could remove VTEP IP upon withdraw too
if not event.is_withdraw:
originator_id = event.path.get_pattr(BGP_ATTR_TYPE_ORIGINATOR_ID)
if event.path.nlri.type == EvpnNLRI.INCLUSIVE_MULTICAST_ETHERNET_TAG:
# SRL EVPN VTEP does not normally include an 'originator' attribute
if originator_id and originator_id.value != event.nexthop:
logging.info( f"Detected another EVPN proxy: {originator_id.value}" )
# TODO if (state.enabled), remove upon withdraw
# Fails: timeout
# Configure_BFD(state,originator_id.value)
else:
logging.info( f"Multicast route from EVPN VTEP: {event.nexthop}" )
evpn_vteps[ event.nexthop ] = event.remote_as
# Could withdraw routes and remove static MACs if this IP matches
# a static vtep in our configuration
data = { 'evpn_vteps': sorted(evpn_vteps.keys()) }
Add_Telemetry( [('.vxlan_proxy', data)] )
# check for RT2 MAC moves between static VTEPs and EVPN VTEPs
# event.label is reduced to the 20-bit MPLS label
elif hasattr( event.path.nlri, 'vni'):
vni = event.path.nlri.vni
if vni not in state.mac_vrfs:
logging.warning( f"BGP: No mac-vrf mapping for VNI: {vni}" )
return
mac_vrf = state.mac_vrfs[ vni ]
logging.info( f"Received EVPN route update for VNI {vni}: {mac_vrf}" )
mac = event.path.nlri.mac_addr
if mac in mac_vrf['macs']:
cur = mac_vrf['macs'][ mac ]
# Don't bother checking IP; SRL MAC-VRF doesn't send it
# Only other proxies do
if cur['vtep'] != event.nexthop and cur['vtep'] != 'tbd':
logging.info( f"EVPN MAC-move detected {cur['vtep']} -> {event.nexthop}" )
# if this is from an EVPN VTEP, withdraw our route - our job is done
if not originator_id or originator_id.value == event.nexthop:
logging.info( f"Removing MAC moved to EVPN VTEP {event.nexthop} from EVPN proxy: {mac}" )
WithdrawRoute( state, mac_vrf, cur['vtep'], mac, cur['ip'] )
del mac_vrf['macs'][ mac ]
# else (from other EVPN proxy) only withdraw if VTEP IP changed, but don't remove MAC
# as we need to keep track of the mobility sequence number
elif originator_id and originator_id.value != event.nexthop:
# Check Mobility sequence - route may be stale
def GetMACMobility():
ext_comms = event.path.get_pattr(BGP_ATTR_TYPE_EXTENDED_COMMUNITIES)
for c in ext_comms.communities:
if isinstance( c, BGPEvpnMacMobilityExtendedCommunity ):
return c.sequence_number
return -1 # not present
if GetMACMobility() < cur['seq']:
logging.info( f"Local mobility sequence {cur['seq']} higher than peer - keeping route" )
return
logging.info( f"Withdrawing MAC {mac} route announced by other EVPN proxy {originator_id.value} with different VTEP: {event.nexthop}" )
WithdrawRoute( state, mac_vrf, cur['vtep'], mac, cur['ip'] )
cur['vtep'] = "tbd" # Mark as withdrawn
else:
logging.warning( "TODO: Compare/update mobility sequence number, even if same VTEP nexthop?" )
else:
logging.info( "Not multicast and no VNI -> ignoring" )
# Never remove EVPN VTEP from list, assume once EVPN = always EVPN
except Exception as ex:
tb_str = ''.join(traceback.format_tb(ex.__traceback__))
logging.error( f"Exception in best_path_change_handler: {ex} ~ {tb_str}" )
def peer_up_handler(router_id, remote_as):
logging.warning( f'Peer UP: {router_id} {remote_as}' )
# Start ARP thread if not already
if not hasattr(state,'arp_threads') and state.params['vxlan_interfaces']!=[]:
logging.info( "Starting ARP listener thread(s)..." )
state.arp_threads = {}
for i in state.params['vxlan_interfaces']:
state.arp_threads[i] = {}
state.arp_threads[i]['thread'] = hub.spawn( ARP_receiver_thread, state, i, evpn_vteps )
def peer_down_handler(router_id, remote_as):
logging.warning( f'Peer DOWN: {router_id} {remote_as}' )
# need to create socket on localhost on a non-default port, not port 179
# Need to connect from loopback IP, not 127.0.0.x
# Router ID is used as tunnel endpoint in BGP UPDATEs
# => Code updated to allow any tunnel endpoint IP
# Wait for gNMI socket to exist
# while not os.path.exists('/opt/srlinux/var/run/sr_gnmi_server'):
# logging.info("Waiting for gNMI unix socket to be created...")
# eventlet.sleep(1)
# During system startup, wait for netns to be created
while not os.path.exists('/var/run/netns/srbase-default'):
logging.info("Waiting for srbase-default netns to be created...")
eventlet.sleep(1)
logging.info("Starting BGP thread in srbase-default netns...")
# Requires root permissions
# Ryu modified to support net_ns parameter, needed for reconnections
# with netns.NetNS(nsname="srbase-default"):
logging.info("Starting BGPSpeaker in netns...")
state.speaker = BGPSpeaker(bgp_server_hosts=[LOCAL_LOOPBACK],
bgp_server_port=1179,
net_ns="srbase-default", # custom addition
as_number=state.params['local_as'],
local_pref=state.params['local_preference'],
router_id=LOCAL_LOOPBACK,
best_path_change_handler=best_path_change_handler,
peer_up_handler=peer_up_handler,
peer_down_handler=peer_down_handler)
# Add any static VTEPs/VNIs, before starting ARP thread
for vni,mac_vrf in state.mac_vrfs.items():
UpdateMACVRF( state, mac_vrf )
logging.info( f"Connecting to neighbor {NEIGHBOR}..." )
# TODO enable_four_octet_as_number=True, enable_enhanced_refresh=True
state.speaker.neighbor_add( NEIGHBOR,
remote_as=state.params['peer_as'],
local_as=state.params['local_as'],
enable_ipv4=False, enable_evpn=True,
connect_mode='active') # iBGP with SRL
# After connecting to BGP peer, start ARP thread (in different netns)
eventlet.sleep(10) # Wait for peer_up event using peer_up_handler
# hub.spawn( ARP_receiver_thread, speaker, params, evpn_vteps )
while True:
logging.info( "eventlet sleep loop..." )
eventlet.sleep(30) # every 30s wake up
def AutoRouteDistinguisher( vtep_ip, mac_vrf ):
# For RD, use the static VTEP's IP, just as would happen when it would
# advertise the routes itself. This implies we need to create a VRF
# per static VTEP locally
return f"{vtep_ip}:{mac_vrf['evi']}"
def AutoRouteTarget( state, mac_vrf ):
return f"{state.params['local_as']}:{mac_vrf['evi']}"
def Add_Static_VTEP( state, mac_vrf, remote_ip, dynamic=False ):
rd = AutoRouteDistinguisher( remote_ip, mac_vrf )
if rd not in state.bgp_vrfs:
rt = AutoRouteTarget(state,mac_vrf)
logging.info(f"Add_Static_VTEP: Adding VRF...RD={rd} RT={rt}")
state.speaker.vrf_add(route_dist=rd,import_rts=[rt],export_rts=[rt],route_family=RF_L2_EVPN)
state.bgp_vrfs[ rd ] = remote_ip
else:
logging.info(f"Add_Static_VTEP: Assuming VRF for RD={rd} exists...")
js_path = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{remote_ip}"}}'
now_ts = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
data = {
'last_update' : { "value" : now_ts },
}
if dynamic:
data['dynamic'] = { "value" : True }
js_path2 = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{remote_ip}"}}.mac_vrf{{.name=="{mac_vrf["name"]}"}}'
data2 = { 'evi': { 'value': mac_vrf['evi'] }, 'vni': { 'value': mac_vrf['vni'] } }
Add_Telemetry( [(js_path, data),(js_path2,data2)] )
logging.info("Adding EVPN multicast route...")
#
# For RD use the static VTEP's IP, just like it would do if it was
# EVPN enabled itself. That way, any proxy will announce the same
# route
#
AnnounceMulticastRoute( state, rd, remote_ip, mac_vrf['vni'] )
return True
def Remove_Static_VTEP( state, mac_vrf, remote_ip, clear_macs=True ):
rd = AutoRouteDistinguisher( remote_ip, mac_vrf )
if rd not in state.bgp_vrfs:
logging.warning( f"Remove_Static_VTEP: BGP MAC VRF does not exists: {rd}" )
return False
logging.info(f"Remove_Static_VTEP: Removing VRF...RD={rd}")
# Deleting the VRF should withdraw all routes too? Doesn't look like it
WithdrawMulticastRoute(state,rd,remote_ip)
state.speaker.vrf_del(route_dist=rd)
# This isn't sufficient
js_path = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{remote_ip}"}}'
js_path2 = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{remote_ip}"}}.mac_vrf{{.name=="{mac_vrf["name"]}"}}'
Remove_Telemetry( [js_path,js_path2] )
if clear_macs:
del mac_vrf['vxlan_vteps'][ remote_ip ]
del state.bgp_vrfs[ rd ]
return True
def HandleTCPTimestamps( ipHeaders, tcpHeaders, ancdata ):
ts_sec = 0
ts_ns = 0
if ( len(ancdata)>0 ):
for i in ancdata:
logging.info(f'HandleTCPTimestamps ancdata: cmsg_level={i[0]}, cmsg_type={i[1]}, cmsg_data({len(i[2])})={i[2]})');
if (i[0]!=socket.SOL_SOCKET or i[1]!=SO_TIMESTAMP): # Removed 'NS'
continue
tmp = (struct.unpack("iiii",i[2]))
ts_sec = tmp[0]
ts_ns = tmp[2]
break
ts = [ (o.ts_val,o.ts_ecr) for o in tcpHeaders.option if o.kind == tcp.TCP_OPTION_KIND_TIMESTAMPS ]
logging.info( f"HandleTCPTimestamps: {ts_sec}.{ts_ns}={ts} IP {ipHeaders.src}>{ipHeaders.dst}" )
def ARP_receiver_thread( state, vxlan_intf, evpn_vteps ):
logging.info( f"Starting ARP listener on interface={vxlan_intf} params {state.params}" )
# initialize BPF - load source code from filter-vxlan-arp.c
_self = state.arp_threads[vxlan_intf]
_self['bpf'] = bpf = BPF(src_file = "filter-vxlan-arp.c",debug = 0)
#load eBPF program http_filter of type SOCKET_FILTER into the kernel eBPF vm
#more info about eBPF program types
#http://man7.org/linux/man-pages/man2/bpf.2.html
function_arp_filter = bpf.load_func("vxlan_arp_filter", BPF.SOCKET_FILTER)
#create raw socket, bind it to interface
#attach bpf program to socket created
with netns.NetNS(nsname="srbase"):
BPF.attach_raw_socket(function_arp_filter, vxlan_intf)
socket_fd = function_arp_filter.sock
sock = socket.fromfd(socket_fd,socket.PF_PACKET,socket.SOCK_RAW,socket.IPPROTO_IP)
# sock.setsockopt(socket.SOL_SOCKET, SO_TIMESTAMP, 1) # Not NS
sock.setblocking(True)
# To make sendto work?
# sock.bind((vxlan_intf, 0x0800))
_self['socket'] = sock # Used for close()
try:
while 1:
packet_str = os.read(socket_fd,2048)
packet_bytearray = bytearray(packet_str)
try:
# or recvmmsg for multiple?
# raw_data, ancdata, flags, address = sock.recvmsg(65535, 1024)
# packet_bytearray = bytearray(raw_data)
pkt = packet.Packet( packet_bytearray )
#
# 6 layers:
# 0: ethernet
# 1: IP -> VTEP IP (other side, local VTEP)
# 2: UDP
# 3: VXLAN -> VNI
# 4: ethernet (inner)
# 5: ARP -> MAC, IP
#
for p in pkt:
logging.debug( f"ARP packet:{p.protocol_name}={p}" )
if p.protocol_name == 'vlan':
logging.debug( f'vlan id = {p.vid}' )
elif p.protocol_name == 'vxlan':
logging.info( f'vni = {p.vni}' )
_ip = pkt.get_protocol( ipv4.ipv4 )
#_tcp = pkt.get_protocol( tcp.tcp )
#if _tcp:
# HandleTCPTimestamps( _ip, _tcp, ancdata )
# continue
_vxlan = pkt.get_protocol( vxlan.vxlan )
_arp = pkt.get_protocol( arp.arp )
vni = _vxlan.vni
if vni not in state.mac_vrfs:
logging.info( f"VNI not enabled for proxy EVPN: {vni}" )
continue;
mac_vrf = state.mac_vrfs[ vni ]
# To compensate for lack of VXLAN flow hashing, we vary the src IP
# Correct it by removing the added entropy (IP ID) in 2nd octet
# if _arp.opcode == 24:
# digits = [ int(i) for i in _ip.src.split('.') ]
# digits[1] ^= _ip.identification % 256
# _ip.src = ".".join( map(str,digits) )
if _ip.src in evpn_vteps:
if (state.params['ecmp_path_probes'] and _ip.dst in evpn_vteps
and _arp.opcode==24): # Ignore regular responses
ReplyARPProbe( state, sock, pkt, _ip.src, _ip.dst, _arp.opcode, mac_vrf )
else:
logging.info( f"ARP({'req' if _arp.opcode==1 else 'res'}) from EVPN VTEP {_ip.src} -> ignoring" )
continue
elif _ip.dst in evpn_vteps: # typically == us, always? not when routing VXLAN to other VTEPs
static_vtep = _ip.src
mac = _arp.src_mac # Same field in both request and response packets
ip = _arp.src_ip
logging.info( f"ARP({'req' if _arp.opcode==1 else 'res'}) from static VTEP: {mac} {ip}" )
else:
logging.info( f"ARP packet:neither src={_ip.src} nor dst={_ip.dst} is EVPN vtep? {evpn_vteps}" )
continue;
# Check that the static VTEP is configured. Could dynamically add VTEPs
# upon discovery (but requires ARP snooping)
if static_vtep not in mac_vrf['vxlan_vteps']:
if not state.params[ "auto_discover_static_vteps" ]:
logging.info( f"VTEP {static_vtep} not configured in mac-vrf and auto-discovery disabled" )
continue
else:
logging.info( f"Dynamically adding auto-discovered VTEP {static_vtep}" )
Add_Static_VTEP( state, mac_vrf, static_vtep, dynamic=True )
mac_vrf['vxlan_vteps'][ static_vtep ] = "dynamic-from-arp"
# Announce EVPN route(s)
mobility_seq = None # First time: no attribute
if mac in mac_vrf['macs']:
cur = mac_vrf['macs'][ mac ]
logging.info( f"MAC {mac} already announced: {cur}, checking for MAC move" )
# TODO various cases: different IP, different VTEP, ...
if cur['vtep'] == static_vtep:
logging.info( f"VNI {vni}: MAC {mac} already announced with VTEP {static_vtep}" )
# If IP remains the same, do nothing
if cur['ip'] == ip:
continue
# Could also opt to keep both routes: MAC -> [ip],
# Spec says: "If there are multiple IP addresses associated with a MAC address,
# then multiple MAC/IP Advertisement routes MUST be generated, one for
# each IP address. For instance, this may be the case when there are
# both an IPv4 and an IPv6 address associated with the same MAC address
# for dual-IP-stack scenarios. When the IP address is dissociated with
# the MAC address, then the MAC/IP Advertisement route with that
# particular IP address MUST be withdrawn."
#
# For the purpose of this EVPN proxy application (L2 reachability)
# it is sufficient to keep 1 IP address association
# Maybe keep track of sequence number per IP, with newer ones having a higher sequence?
logging.info( f"IP change detected: {cur['ip']}->{ip}, updating EVPN" )
# RFC talks about different ESI as reason for mobility seq inc
# We have ESI 0 == single homed
mobility_seq = cur['seq'] + 1
#
# If this is the last MAC route for this VTEP, could also remove the VRF
# and withdraw the multicast route? (for dynamically added VRFs)
#
if cur['vtep'] != "tbd":
logging.info( f"VTEP changed {cur['vtep']}->{static_vtep}, withdrawing my route" )
WithdrawRoute( state, mac_vrf, cur['vtep'], mac, cur['ip'] )
mac_vrf['ips'].pop( cur['ip'], None ) # Remove any IP mapping too
else:
logging.info( f"EVPN route for {mac} already withdrawn triggered by other EVPN proxy route" )
# Could add a timestamp (last seen) + aging
logging.info( f"VNI {vni}: MAC {mac} moved to {static_vtep} new mobility_seq={mobility_seq}" )
cur.update( { 'vtep' : static_vtep, 'ip': ip, 'seq' : mobility_seq } )
else:
logging.info( f"VNI {vni}: MAC {mac} never seen before, associating with VTEP {static_vtep}" )
mac_vrf['macs'].update( { mac : { 'vtep': static_vtep, 'ip': ip, 'seq': -1 } } )
js_path = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{static_vtep}"}}.mac_vrf{{.name=="{mac_vrf["name"]}"}}.mac{{.address=="{mac}"}}'
now_ts = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
data = {
'last_update' : { "value" : now_ts },
'ip' : { "value" : ip },
'evpn_mac_mobility' : { "value": mobility_seq } # None -> not shown
}
Add_Telemetry( [(js_path, data)] )
logging.info( f"Announcing EVPN MAC route...evpn_vteps={evpn_vteps}" )
AnnounceRoute(state, mac_vrf, static_vtep, mac, ip, mobility_seq)
if state.params['include_ip']:
mac_vrf['ips'].update( { ip: { 'mac' : mac, 'vtep' : static_vtep } } ) # Also track IP mobility
except Exception as e:
tb_str = ''.join(traceback.format_tb(e.__traceback__))
logging.error( f"Error processing ARP: {e} ~ {tb_str}" )
# Debug - requires '/sys/kernel/debug/tracing/trace_pipe' to be mounted
# (task, pid, cpu, flags, ts, msg) = bpf.trace_fields( nonblocking=True )
# print( f'trace_fields: {msg}' )
except Exception as ex:
tb_str = ''.join(traceback.format_tb(ex.__traceback__))
logging.error( f"Exiting ARP socket while loop: {ex} ~ {tb_str}" )
# Only happens upon exception
bpf.cleanup()
def ReplyARPProbe(state,socket,rx_pkt,dest_vtep_ip,local_vtep_ip,opcode,mac_vrf):
"""
Replies to a special ARP probe packet over VXLAN to another agent, to measure RTT and packet loss
Uses MAC addresses / IPs gleaned from ARP packet on the wire
"""
logging.debug( f"ReplyARPProbe dest_vtep_ip={dest_vtep_ip} local_vtep_ip={local_vtep_ip}" )
# TODO Use Linux kernel? https://www.kernel.org/doc/html/latest/networking/timestamping.html
# SO_TIMESTAMPNS = 35
# s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3))
# s.setsockopt(socket.SOL_SOCKET, SO_TIMESTAMPNS, 1)
# raw_data, ancdata, flags, address = s.recvmsg(65535, 1024)
def get_timestamp_us(): # 40-bit
now = datetime.now(timezone.utc)
# epoch = datetime(1970, 1, 1, tzinfo=timezone.utc) # use POSIX epoch
# posix_timestamp_micros = (now - epoch) // timedelta(microseconds=1)
# posix_timestamp_millis = posix_timestamp_micros // 1000
# return posix_timestamp_millis
return int( int(now.timestamp() * 1000000) & 0xffffffffff )
# Not currently used
def start_timer():
def on_timer():
now_ts = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
key = f'{{.mac_vrf=="{mac_vrf["name"]}"}}{{.vtep_ip=="{dest_vtep_ip}"}}{{.vni=={mac_vrf["vni"]}}}'
js_path = f'.vxlan_proxy.path_probe_to{key}.at{{.timestamp=="{now_ts}"}}'
values = list( mac_vrf['path_probes'][ dest_vtep_ip ]['paths'].values() )
good = list( [ i for i in values if i!="missing" ] )
lost = len(values)-len(good)
if len(values)==0:
loss = 100.0
else:
loss = 100.0 * (lost / len(values))
avg = sum(good) // len(good)
data = {
'result' : { "value" : f"Avg rtt latency: {avg} us loss: {loss:.1f}% probes: {mac_vrf['path_probes'][ dest_vtep_ip ]['paths']}" },
'latency' : avg,
'sent' : len(values),
'lost' : lost,
'probes' : sorted(good),
'peer_uplinks' : [ f"{mac} = {i['count']} probes, {i['hops']} hop(s) away"
for mac,i in mac_vrf['path_probes'][ dest_vtep_ip ]['interfaces'].items() ]
}
Add_Telemetry( [(js_path, data)] )
mac_vrf['path_probes'].pop( dest_vtep_ip, None )
mac_vrf['path_probes'][ dest_vtep_ip ] = { 'paths': {}, 'interfaces': {} }
for path in range(1,4):
mac_vrf['path_probes'][ dest_vtep_ip ][ 'paths' ][ path ] = "missing"
Timer( 1, on_timer ).start()
_eths = rx_pkt.get_protocols( ethernet.ethernet ) # outer+inner
# Check for probe request or reflected probe
RFC5494_EXP1 = 24 # See https://datatracker.ietf.org/doc/html/rfc5494
_arp = rx_pkt.get_protocol( arp.arp )
phase = int(_arp.src_mac[1],16) # vxping sends MAC with '2'
if phase!=2:
return
path = int(_arp.src_mac[0],16)
# Decode received IP header, for identification and TTL
_ip = rx_pkt.get_protocol( ipv4.ipv4 )
_udp = rx_pkt.get_protocol( udp.udp )
e = ethernet.ethernet(dst=_eths[0].src, # nexthop MAC, per vxlan_intf
src=_eths[0].dst, # source interface MAC, per uplink
ethertype=ether.ETH_TYPE_IP)
# Add entropy to source IP for hashing of return flow
digits = [ int(i) for i in local_vtep_ip.split('.') ]
digits[1] ^= _ip.identification % 256
local_vtep_ip_hashed = ".".join( map(str,digits) )
i = ipv4.ipv4(dst=dest_vtep_ip,src=local_vtep_ip_hashed,proto=inet.IPPROTO_UDP,
tos=0xc0,identification=_ip.identification,flags=(1<<1)) # Set DF
u = udp.udp(src_port=_udp.src_port,dst_port=4789) # vary source == timestamp
v = vxlan.vxlan(vni=mac_vrf['vni'])
# src == interface MAC, to measure ECMP spread
e2 = ethernet.ethernet(dst=_eths[0].src,src=_eths[0].dst,ethertype=ether.ETH_TYPE_ARP)
e2._MIN_PAYLOAD_LEN = 0 # Avoid padding
# Reflect timestamp for ARP replies, include IP TTL
dst_mac = f'{_ip.ttl:02x}:{_eths[1].src[3:]}'
ts = get_timestamp_us()
ts_mac = ""
for b in range(0,5): # 40 bit
ts_mac = f":{(ts%256):02x}" + ts_mac
ts = ts // 256
src_mac = f'{path:1x}1' + ts_mac # Reply -> use '1'
a = arp.arp(hwtype=1, proto=0x0800, hlen=6, plen=4, opcode=RFC5494_EXP1,
src_mac=src_mac, src_ip=local_vtep_ip,
dst_mac=dst_mac, dst_ip=dest_vtep_ip )
p = packet.Packet()
for h in [e,i,u,v,e2,a]:
p.add_protocol(h)
p.serialize()
# logging.debug( f"Sending/reflecting ARP probe response: {pkt}" )
socket.sendall( p.data )
##################################################################
## Proc to process the config Notifications received by auto_config_agent
## At present processing config from js_path containing agent_name
##################################################################
def Handle_Notification(obj, state):
if obj.HasField('config'):
logging.info(f"GOT CONFIG :: {obj.config.key.js_path}")
json_str = obj.config.data.json.replace("'", "\"")
data = json.loads(json_str) if json_str != "" else {}
# net_inst = obj.config.key.keys[0] # always "default"
if obj.config.key.js_path == ".network_instance.protocols.static_vxlan_agent":
logging.info(f"Got config for agent, now will handle it :: \n{obj.config}\
Operation :: {obj.config.op}\nData :: {obj.config.data.json}")
if obj.config.op == 2:
logging.info(f"Delete config scenario")
# TODO if this is the last namespace, unregister?
# response=stub.AgentUnRegister(request=sdk_service_pb2.AgentRegistrationRequest(), metadata=metadata)
# logging.info( f'Handle_Config: Unregister response:: {response}' )
# state = State() # Reset state, works?
state.params[ "admin_state" ] = "disable" # Only stop service for this namespace
else:
# JvB there should be a helper for this
if 'admin_state' in data:
state.params[ "admin_state" ] = data['admin_state'][12:]
if 'local_as' in data:
state.params[ "local_as" ] = int( data['local_as']['value'] )
if 'peer_as' in data:
state.params[ "peer_as" ] = int( data['peer_as']['value'] )
if 'local_preference' in data:
state.params[ "local_preference" ] = int( data['local_preference']['value'] )
if 'source_address' in data:
state.params[ "source_address" ] = data['source_address']['value']
if 'peer_address' in data:
state.params[ "peer_address" ] = data['peer_address']['value']
state.params[ "vxlan_interfaces" ] = []
state.params[ "include_ip" ] = False
state.params[ "ecmp_path_probes" ] = False
state.params[ "auto_discover_static_vteps" ] = False
if 'proof_of_concept' in data:
poc = data['proof_of_concept']
if 'vxlan_arp_learning_interfaces' in poc:
state.params[ "vxlan_interfaces" ] = [ i['value'] for i in poc['vxlan_arp_learning_interfaces'] ]
if 'include_ip' in poc:
state.params[ "include_ip" ] = bool( poc['include_ip']['value'] )
if 'ecmp_path_probes' in poc:
state.params[ "ecmp_path_probes" ] = bool( poc['ecmp_path_probes']['value'] )
if 'auto_discover_static_vteps' in poc:
state.params[ "auto_discover_static_vteps" ] = bool( poc['auto_discover_static_vteps']['value'] )
# cleanup ARP thread always, use link()?
if hasattr( state, 'arp_threads' ):
logging.info( f"Cleaning up ARP threads and sockets: {state.arp_threads}" )
for t in state.arp_threads.values():
t['socket'].close() # This ends the thread and cleans up bpf? Nope
t['bpf'].cleanup()
t['thread'].kill()
del state.arp_threads
# if enabled, start separate thread for BGP EVPN interactions
def shutdown_bgp():
state.speaker.shutdown()
del state.speaker
state.bgp_vrfs = {} # Reset
# state.mac_vrfs = {} do not clean this
hub.kill( state.bgpThread )
if state.params[ "admin_state" ] == "enable":
# BGPEVPNThread().start()
if hasattr( state, 'bgpThread' ):
shutdown_bgp()
logging.info( "old BGP thread shutdown" )
state.bgpThread = hub.spawn( runBGPThread, state )
elif hasattr( state, 'bgpThread' ):
shutdown_bgp()
del state.bgpThread
Remove_Telemetry( [".vxlan_proxy"] ) # Works?
logging.info( "BGP shutdown" )
return True
elif obj.config.key.js_path == ".network_instance.protocols.bgp_evpn.bgp_instance.static_vxlan_agent":
mac_vrf_name = obj.config.key.keys[0]
admin_state = data['admin_state'][12:] if 'admin_state' in data else None
vni = int( data['vni']['value'] ) if 'vni' in data else None
evi = int( data['evi']['value'] ) if 'evi' in data else None
# Index by VNI
if vni:
# Support VNI/EVI modifications
new_vni = None
new_evi = None
if admin_state == "enable":
if vni not in state.mac_vrfs and mac_vrf_name not in state.mac_vrfs:
vrf = { 'name': mac_vrf_name,
'admin_state': admin_state, 'vni': vni, 'evi': evi,
'macs': {}, 'ips': {}, 'vxlan_vteps': {}, 'path_probes': {} }
state.mac_vrfs[ vni ] = state.mac_vrfs[ mac_vrf_name ] = vrf
else:
if vni not in state.mac_vrfs:
orig_vrf = state.mac_vrfs[ mac_vrf_name ]
new_vni = vni
logging.info( f"VNI modified on {mac_vrf_name}: {orig_vrf['vni']}->{vni}" )
state.mac_vrfs[ vni ] = orig_vrf
state.mac_vrfs.pop( orig_vrf['vni'], None )
if evi != state.mac_vrfs[ vni ][ 'evi' ]:
new_evi = evi
logging.info( f"EVI modified on {mac_vrf_name}: {state.mac_vrfs[ vni ][ 'evi' ]}->{new_evi}" )
state.mac_vrfs[ vni ][ 'admin_state' ] = "enable"
if hasattr( state, 'speaker' ): # BGP running?
UpdateMACVRF( state, state.mac_vrfs[ vni ], new_vni=new_vni, new_evi=new_evi )
else:
logging.info( "BGP thread not running yet, postponing UpdateMACVRF" )
else:
logging.info( f"mac-vrf {mac_vrf_name} disabled, removing state" )
if vni in state.mac_vrfs:
old_vrf = state.mac_vrfs[ vni ]
elif mac_vrf_name in state.mac_vrfs:
old_vrf = state.mac_vrfs[ mac_vrf_name ]
vni = old_vrf['vni']
else:
return
old_vrf[ "admin_state" ] = "disable"
UpdateMACVRF( state, old_vrf )
state.mac_vrfs.pop( vni, None )
state.mac_vrfs.pop( old_vrf['name'], None )
elif obj.config.key.js_path == ".network_instance.protocols.bgp_evpn.bgp_instance.static_vxlan_agent.static_vtep":
mac_vrf_name = obj.config.key.keys[0]
vtep_ip = obj.config.key.keys[2]
if mac_vrf_name in state.mac_vrfs:
mac_vrf = state.mac_vrfs[ mac_vrf_name ]
if obj.config.op == 2: # delete static VTEP
# All MAC routes get withdrawn too
Remove_Static_VTEP( state, mac_vrf, vtep_ip, clear_macs=True )
else:
static_macs = {}
if 'static_vtep' in data and 'static_macs' in data['static_vtep']:
static_macs = { m['value'] : "static"
for m in data['static_vtep']['static_macs'] }
UpdateMACVRF_StaticVTEP( state, mac_vrf, vtep_ip, static_macs )
else:
logging.error( f"mac-vrf not found in state: {mac_vrf_name}" )
else:
logging.info(f"Unexpected notification : {obj}")
return False
class State(object):
def __init__(self):
self.params = {} # Set through config
self.bgp_vrfs = {}
self.mac_vrfs = {} # Map of vni -> mac-vrf { vxlan_vteps, evi, learned macs }
# self.vni_2_evi = {} # Mapping of VNI to EVI
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
##################################################################################################
## This is the main proc where all processing for auto_config_agent starts.
## Agent registration, notification registration, Subscrition to notifications.
## Waits on the subscribed Notifications and once any config is received, handles that config
## If there are critical errors, Unregisters the fib_agent gracefully.
##################################################################################################
def Run():
sub_stub = sdk_service_pb2_grpc.SdkNotificationServiceStub(channel)
# optional agent_liveliness=<seconds> to have system kill unresponsive agents
response = stub.AgentRegister(request=sdk_service_pb2.AgentRegistrationRequest(), metadata=metadata)
logging.info(f"Registration response : {response.status}")
request=sdk_service_pb2.NotificationRegisterRequest(op=sdk_service_pb2.NotificationRegisterRequest.Create)
create_subscription_response = stub.NotificationRegister(request=request, metadata=metadata)
stream_id = create_subscription_response.stream_id
logging.info(f"Create subscription response received. stream_id : {stream_id}")
try:
Subscribe_Notifications(stream_id)
stream_request = sdk_service_pb2.NotificationStreamRequest(stream_id=stream_id)
stream_response = sub_stub.NotificationStream(stream_request, metadata=metadata)
state = State()
count = 1
for r in stream_response:
logging.info(f"Count :: {count} NOTIFICATION:: \n{r.notification}")
count += 1
for obj in r.notification:
if obj.HasField('config') and obj.config.key.js_path == ".commit.end":
logging.info('TO DO -commit.end config')
else:
Handle_Notification(obj, state)
logging.info(f'Updated state: {state}')
except grpc._channel._Rendezvous as err:
logging.info(f'GOING TO EXIT NOW: {err}')
except Exception as e:
logging.error(f'Exception caught :: {e}')
#if file_name != None:
# Update_Result(file_name, action='delete')
try:
response = stub.AgentUnRegister(request=sdk_service_pb2.AgentRegistrationRequest(), metadata=metadata)
logging.error(f'Run try: Unregister response:: {response}')
except grpc._channel._Rendezvous as err:
logging.info(f'GOING TO EXIT NOW: {err}')
sys.exit()
return True
sys.exit()
return True
############################################################
## Gracefully handle SIGTERM signal
## When called, will unregister Agent and gracefully exit
############################################################
def Exit_Gracefully(signum, frame):
logging.info( f"Caught signal :: {signum}\n will unregister EVPN proxy agent" )
try:
response=stub.AgentUnRegister(request=sdk_service_pb2.AgentRegistrationRequest(), metadata=metadata)
logging.info( f'Exit_Gracefully: Unregister response:: {response}' )
finally:
logging.info( f'GOING TO EXIT NOW' )
sys.exit()
##################################################################################################
## Main from where the Agent starts
## Log file is written to: /var/log/srlinux/stdout/evpn_proxy_agent.log
## Signals handled for graceful exit: SIGTERM
##################################################################################################
if __name__ == '__main__':
# grpc_eventlet.init_eventlet() # Fix gRPC eventlet interworking
# hostname = socket.gethostname()
stdout_dir = '/var/log/srlinux/stdout' # PyTEnv.SRL_STDOUT_DIR
signal.signal(signal.SIGTERM, Exit_Gracefully)
if not os.path.exists(stdout_dir):
os.makedirs(stdout_dir, exist_ok=True)
log_filename = f'{stdout_dir}/{agent_name}.log'
logging.basicConfig(
handlers=[RotatingFileHandler(log_filename, maxBytes=3000000,backupCount=5)],
format='%(asctime)s,%(msecs)03d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S', level=logging.INFO)
logging.info("START TIME :: {}".format(datetime.now()))
if Run():
logging.info('Agent unregistered and BGP shutdown')
else:
logging.info('Should not happen')
| [
"socket.sendall",
"pygnmi.client.gNMIclient",
"traceback.format_tb",
"logging.debug",
"telemetry_service_pb2_grpc.SdkMgrTelemetryServiceStub",
"eventlet.monkey_patch",
"sys.exit",
"bcc.BPF.attach_raw_socket",
"os.read",
"logging.info",
"logging.error",
"sdk_service_pb2.AgentRegistrationRequest... | [((257, 280), 'eventlet.monkey_patch', 'eventlet.monkey_patch', ([], {}), '()\n', (278, 280), False, 'import eventlet\n'), ((428, 457), 'grpc.experimental.eventlet.init_eventlet', 'grpc_eventlet.init_eventlet', ([], {}), '()\n', (455, 457), True, 'from grpc.experimental import eventlet as grpc_eventlet\n'), ((2608, 2648), 'grpc.insecure_channel', 'grpc.insecure_channel', (['"""127.0.0.1:50053"""'], {}), "('127.0.0.1:50053')\n", (2629, 2648), False, 'import grpc\n'), ((2696, 2743), 'sdk_service_pb2_grpc.SdkMgrServiceStub', 'sdk_service_pb2_grpc.SdkMgrServiceStub', (['channel'], {}), '(channel)\n', (2734, 2743), False, 'import sdk_service_pb2_grpc\n'), ((3708, 3812), 'logging.info', 'logging.info', (['f"""Status of subscription response for {option}:: {subscription_response.status}"""'], {}), "(\n f'Status of subscription response for {option}:: {subscription_response.status}'\n )\n", (3720, 3812), False, 'import logging\n'), ((4316, 4378), 'telemetry_service_pb2_grpc.SdkMgrTelemetryServiceStub', 'telemetry_service_pb2_grpc.SdkMgrTelemetryServiceStub', (['channel'], {}), '(channel)\n', (4369, 4378), False, 'import telemetry_service_pb2_grpc\n'), ((4410, 4456), 'telemetry_service_pb2.TelemetryUpdateRequest', 'telemetry_service_pb2.TelemetryUpdateRequest', ([], {}), '()\n', (4454, 4456), False, 'import telemetry_service_pb2\n'), ((4659, 4730), 'logging.info', 'logging.info', (['f"""Telemetry_Update_Request :: {telemetry_update_request}"""'], {}), "(f'Telemetry_Update_Request :: {telemetry_update_request}')\n", (4671, 4730), False, 'import logging\n'), ((4929, 4991), 'telemetry_service_pb2_grpc.SdkMgrTelemetryServiceStub', 'telemetry_service_pb2_grpc.SdkMgrTelemetryServiceStub', (['channel'], {}), '(channel)\n', (4982, 4991), False, 'import telemetry_service_pb2_grpc\n'), ((5020, 5066), 'telemetry_service_pb2.TelemetryDeleteRequest', 'telemetry_service_pb2.TelemetryDeleteRequest', ([], {}), '()\n', (5064, 5066), False, 'import telemetry_service_pb2\n'), ((5186, 5254), 'logging.info', 'logging.info', (['f"""Telemetry_Delete_Request :: {telemetry_del_request}"""'], {}), "(f'Telemetry_Delete_Request :: {telemetry_del_request}')\n", (5198, 5254), False, 'import logging\n'), ((5438, 5507), 'logging.info', 'logging.info', (['f"""Configure_BFD :: remote_evpn_vtep={remote_evpn_vtep}"""'], {}), "(f'Configure_BFD :: remote_evpn_vtep={remote_evpn_vtep}')\n", (5450, 5507), False, 'import logging\n'), ((9033, 9121), 'logging.info', 'logging.info', (['f"""UpdateMACVRF mac_vrf={mac_vrf} new_vni={new_vni} new_evi={new_evi}"""'], {}), "(\n f'UpdateMACVRF mac_vrf={mac_vrf} new_vni={new_vni} new_evi={new_evi}')\n", (9045, 9121), False, 'import logging\n'), ((10452, 10545), 'logging.info', 'logging.info', (['f"""UpdateMACVRF_StaticVTEP mac_vrf={mac_vrf} vtep_ip={vtep_ip} macs={macs}"""'], {}), "(\n f'UpdateMACVRF_StaticVTEP mac_vrf={mac_vrf} vtep_ip={vtep_ip} macs={macs}')\n", (10464, 10545), False, 'import logging\n'), ((17172, 17234), 'logging.info', 'logging.info', (['"""Starting BGP thread in srbase-default netns..."""'], {}), "('Starting BGP thread in srbase-default netns...')\n", (17184, 17234), False, 'import logging\n'), ((17385, 17432), 'logging.info', 'logging.info', (['"""Starting BGPSpeaker in netns..."""'], {}), "('Starting BGPSpeaker in netns...')\n", (17397, 17432), False, 'import logging\n'), ((17452, 17791), 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker', 'BGPSpeaker', ([], {'bgp_server_hosts': '[LOCAL_LOOPBACK]', 'bgp_server_port': '(1179)', 'net_ns': '"""srbase-default"""', 'as_number': "state.params['local_as']", 'local_pref': "state.params['local_preference']", 'router_id': 'LOCAL_LOOPBACK', 'best_path_change_handler': 'best_path_change_handler', 'peer_up_handler': 'peer_up_handler', 'peer_down_handler': 'peer_down_handler'}), "(bgp_server_hosts=[LOCAL_LOOPBACK], bgp_server_port=1179, net_ns=\n 'srbase-default', as_number=state.params['local_as'], local_pref=state.\n params['local_preference'], router_id=LOCAL_LOOPBACK,\n best_path_change_handler=best_path_change_handler, peer_up_handler=\n peer_up_handler, peer_down_handler=peer_down_handler)\n", (17462, 17791), False, 'from ryu.services.protocols.bgp.bgpspeaker import BGPSpeaker, EVPN_MULTICAST_ETAG_ROUTE, EVPN_MAC_IP_ADV_ROUTE, RF_L2_EVPN, PMSI_TYPE_INGRESS_REP\n'), ((18166, 18219), 'logging.info', 'logging.info', (['f"""Connecting to neighbor {NEIGHBOR}..."""'], {}), "(f'Connecting to neighbor {NEIGHBOR}...')\n", (18178, 18219), False, 'import logging\n'), ((18675, 18693), 'eventlet.sleep', 'eventlet.sleep', (['(10)'], {}), '(10)\n', (18689, 18693), False, 'import eventlet\n'), ((20278, 20324), 'logging.info', 'logging.info', (['"""Adding EVPN multicast route..."""'], {}), "('Adding EVPN multicast route...')\n", (20290, 20324), False, 'import logging\n'), ((20842, 20901), 'logging.info', 'logging.info', (['f"""Remove_Static_VTEP: Removing VRF...RD={rd}"""'], {}), "(f'Remove_Static_VTEP: Removing VRF...RD={rd}')\n", (20854, 20901), False, 'import logging\n'), ((21957, 22061), 'logging.info', 'logging.info', (['f"""HandleTCPTimestamps: {ts_sec}.{ts_ns}={ts} IP {ipHeaders.src}>{ipHeaders.dst}"""'], {}), "(\n f'HandleTCPTimestamps: {ts_sec}.{ts_ns}={ts} IP {ipHeaders.src}>{ipHeaders.dst}'\n )\n", (21969, 22061), False, 'import logging\n'), ((22117, 22208), 'logging.info', 'logging.info', (['f"""Starting ARP listener on interface={vxlan_intf} params {state.params}"""'], {}), "(\n f'Starting ARP listener on interface={vxlan_intf} params {state.params}')\n", (22129, 22208), False, 'import logging\n'), ((22337, 22380), 'bcc.BPF', 'BPF', ([], {'src_file': '"""filter-vxlan-arp.c"""', 'debug': '(0)'}), "(src_file='filter-vxlan-arp.c', debug=0)\n", (22340, 22380), False, 'from bcc import BPF\n'), ((22878, 22956), 'socket.fromfd', 'socket.fromfd', (['socket_fd', 'socket.PF_PACKET', 'socket.SOCK_RAW', 'socket.IPPROTO_IP'], {}), '(socket_fd, socket.PF_PACKET, socket.SOCK_RAW, socket.IPPROTO_IP)\n', (22891, 22956), False, 'import socket\n'), ((30809, 30903), 'logging.debug', 'logging.debug', (['f"""ReplyARPProbe dest_vtep_ip={dest_vtep_ip} local_vtep_ip={local_vtep_ip}"""'], {}), "(\n f'ReplyARPProbe dest_vtep_ip={dest_vtep_ip} local_vtep_ip={local_vtep_ip}')\n", (30822, 30903), False, 'import logging\n'), ((33660, 33747), 'ryu.lib.packet.ethernet.ethernet', 'ethernet.ethernet', ([], {'dst': '_eths[0].src', 'src': '_eths[0].dst', 'ethertype': 'ether.ETH_TYPE_IP'}), '(dst=_eths[0].src, src=_eths[0].dst, ethertype=ether.\n ETH_TYPE_IP)\n', (33677, 33747), False, 'from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp\n'), ((34076, 34216), 'ryu.lib.packet.ipv4.ipv4', 'ipv4.ipv4', ([], {'dst': 'dest_vtep_ip', 'src': 'local_vtep_ip_hashed', 'proto': 'inet.IPPROTO_UDP', 'tos': '(192)', 'identification': '_ip.identification', 'flags': '(1 << 1)'}), '(dst=dest_vtep_ip, src=local_vtep_ip_hashed, proto=inet.\n IPPROTO_UDP, tos=192, identification=_ip.identification, flags=1 << 1)\n', (34085, 34216), False, 'from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp\n'), ((34242, 34288), 'ryu.lib.packet.udp.udp', 'udp.udp', ([], {'src_port': '_udp.src_port', 'dst_port': '(4789)'}), '(src_port=_udp.src_port, dst_port=4789)\n', (34249, 34288), False, 'from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp\n'), ((34322, 34353), 'ryu.lib.packet.vxlan.vxlan', 'vxlan.vxlan', ([], {'vni': "mac_vrf['vni']"}), "(vni=mac_vrf['vni'])\n", (34333, 34353), False, 'from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp\n'), ((34413, 34501), 'ryu.lib.packet.ethernet.ethernet', 'ethernet.ethernet', ([], {'dst': '_eths[0].src', 'src': '_eths[0].dst', 'ethertype': 'ether.ETH_TYPE_ARP'}), '(dst=_eths[0].src, src=_eths[0].dst, ethertype=ether.\n ETH_TYPE_ARP)\n', (34430, 34501), False, 'from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp\n'), ((34845, 34993), 'ryu.lib.packet.arp.arp', 'arp.arp', ([], {'hwtype': '(1)', 'proto': '(2048)', 'hlen': '(6)', 'plen': '(4)', 'opcode': 'RFC5494_EXP1', 'src_mac': 'src_mac', 'src_ip': 'local_vtep_ip', 'dst_mac': 'dst_mac', 'dst_ip': 'dest_vtep_ip'}), '(hwtype=1, proto=2048, hlen=6, plen=4, opcode=RFC5494_EXP1, src_mac=\n src_mac, src_ip=local_vtep_ip, dst_mac=dst_mac, dst_ip=dest_vtep_ip)\n', (34852, 34993), False, 'from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp\n'), ((35029, 35044), 'ryu.lib.packet.packet.Packet', 'packet.Packet', ([], {}), '()\n', (35042, 35044), False, 'from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp\n'), ((35188, 35210), 'socket.sendall', 'socket.sendall', (['p.data'], {}), '(p.data)\n', (35202, 35210), False, 'import socket\n'), ((44092, 44148), 'sdk_service_pb2_grpc.SdkNotificationServiceStub', 'sdk_service_pb2_grpc.SdkNotificationServiceStub', (['channel'], {}), '(channel)\n', (44139, 44148), False, 'import sdk_service_pb2_grpc\n'), ((44341, 44399), 'logging.info', 'logging.info', (['f"""Registration response : {response.status}"""'], {}), "(f'Registration response : {response.status}')\n", (44353, 44399), False, 'import logging\n'), ((44413, 44516), 'sdk_service_pb2.NotificationRegisterRequest', 'sdk_service_pb2.NotificationRegisterRequest', ([], {'op': 'sdk_service_pb2.NotificationRegisterRequest.Create'}), '(op=sdk_service_pb2.\n NotificationRegisterRequest.Create)\n', (44456, 44516), False, 'import sdk_service_pb2\n'), ((44668, 44747), 'logging.info', 'logging.info', (['f"""Create subscription response received. stream_id : {stream_id}"""'], {}), "(f'Create subscription response received. stream_id : {stream_id}')\n", (44680, 44747), False, 'import logging\n'), ((46048, 46058), 'sys.exit', 'sys.exit', ([], {}), '()\n', (46056, 46058), False, 'import sys\n'), ((46331, 46416), 'logging.info', 'logging.info', (['f"""Caught signal :: {signum}\n will unregister EVPN proxy agent"""'], {}), '(f"""Caught signal :: {signum}\n will unregister EVPN proxy agent"""\n )\n', (46343, 46416), False, 'import logging\n'), ((47243, 47289), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'Exit_Gracefully'], {}), '(signal.SIGTERM, Exit_Gracefully)\n', (47256, 47289), False, 'import signal\n'), ((3415, 3461), 'config_service_pb2.ConfigSubscriptionRequest', 'config_service_pb2.ConfigSubscriptionRequest', ([], {}), '()\n', (3459, 3461), False, 'import config_service_pb2\n'), ((3527, 3616), 'sdk_service_pb2.NotificationRegisterRequest', 'sdk_service_pb2.NotificationRegisterRequest', ([], {'op': 'op', 'stream_id': 'stream_id', 'config': 'entry'}), '(op=op, stream_id=stream_id,\n config=entry)\n', (3570, 3616), False, 'import sdk_service_pb2\n'), ((4127, 4162), 'logging.info', 'logging.info', (['"""Stream ID not sent."""'], {}), "('Stream ID not sent.')\n", (4139, 4162), False, 'import logging\n'), ((4639, 4654), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (4649, 4654), False, 'import json\n'), ((6494, 6626), 'pygnmi.client.gNMIclient', 'gNMIclient', ([], {'target': "('unix:///opt/srlinux/var/run/sr_gnmi_server', 57400)", 'username': '"""admin"""', 'password': '"""<PASSWORD>"""', 'insecure': '(True)'}), "(target=('unix:///opt/srlinux/var/run/sr_gnmi_server', 57400),\n username='admin', password='<PASSWORD>', insecure=True)\n", (6504, 6626), False, 'from pygnmi.client import gNMIclient, telemetryParser\n'), ((10298, 10352), 'logging.info', 'logging.info', (['"""UpdateMACVRF: admin-state not \'enable\'"""'], {}), '("UpdateMACVRF: admin-state not \'enable\'")\n', (10310, 10352), False, 'import logging\n'), ((11727, 11834), 'logging.info', 'logging.info', (['f"""BGP best path changed: {event.path} prefix={event.prefix} NLRI={event.path.nlri}"""'], {}), "(\n f'BGP best path changed: {event.path} prefix={event.prefix} NLRI={event.path.nlri}'\n )\n", (11739, 11834), False, 'import logging\n'), ((15952, 16004), 'logging.warning', 'logging.warning', (['f"""Peer UP: {router_id} {remote_as}"""'], {}), "(f'Peer UP: {router_id} {remote_as}')\n", (15967, 16004), False, 'import logging\n'), ((16469, 16523), 'logging.warning', 'logging.warning', (['f"""Peer DOWN: {router_id} {remote_as}"""'], {}), "(f'Peer DOWN: {router_id} {remote_as}')\n", (16484, 16523), False, 'import logging\n'), ((17026, 17073), 'os.path.exists', 'os.path.exists', (['"""/var/run/netns/srbase-default"""'], {}), "('/var/run/netns/srbase-default')\n", (17040, 17073), False, 'import os\n'), ((17080, 17145), 'logging.info', 'logging.info', (['"""Waiting for srbase-default netns to be created..."""'], {}), "('Waiting for srbase-default netns to be created...')\n", (17092, 17145), False, 'import logging\n'), ((17151, 17168), 'eventlet.sleep', 'eventlet.sleep', (['(1)'], {}), '(1)\n', (17165, 17168), False, 'import eventlet\n'), ((18827, 18865), 'logging.info', 'logging.info', (['"""eventlet sleep loop..."""'], {}), "('eventlet sleep loop...')\n", (18839, 18865), False, 'import logging\n'), ((18873, 18891), 'eventlet.sleep', 'eventlet.sleep', (['(30)'], {}), '(30)\n', (18887, 18891), False, 'import eventlet\n'), ((19480, 19542), 'logging.info', 'logging.info', (['f"""Add_Static_VTEP: Adding VRF...RD={rd} RT={rt}"""'], {}), "(f'Add_Static_VTEP: Adding VRF...RD={rd} RT={rt}')\n", (19492, 19542), False, 'import logging\n'), ((19700, 19768), 'logging.info', 'logging.info', (['f"""Add_Static_VTEP: Assuming VRF for RD={rd} exists..."""'], {}), "(f'Add_Static_VTEP: Assuming VRF for RD={rd} exists...')\n", (19712, 19768), False, 'import logging\n'), ((20740, 20813), 'logging.warning', 'logging.warning', (['f"""Remove_Static_VTEP: BGP MAC VRF does not exists: {rd}"""'], {}), "(f'Remove_Static_VTEP: BGP MAC VRF does not exists: {rd}')\n", (20755, 20813), False, 'import logging\n'), ((22735, 22763), 'netns.NetNS', 'netns.NetNS', ([], {'nsname': '"""srbase"""'}), "(nsname='srbase')\n", (22746, 22763), False, 'import netns\n'), ((22771, 22825), 'bcc.BPF.attach_raw_socket', 'BPF.attach_raw_socket', (['function_arp_filter', 'vxlan_intf'], {}), '(function_arp_filter, vxlan_intf)\n', (22792, 22825), False, 'from bcc import BPF\n'), ((31267, 31293), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (31279, 31293), False, 'from datetime import datetime, timezone, timedelta\n'), ((35565, 35620), 'logging.info', 'logging.info', (['f"""GOT CONFIG :: {obj.config.key.js_path}"""'], {}), "(f'GOT CONFIG :: {obj.config.key.js_path}')\n", (35577, 35620), False, 'import logging\n'), ((43131, 43179), 'logging.info', 'logging.info', (['f"""Unexpected notification : {obj}"""'], {}), "(f'Unexpected notification : {obj}')\n", (43143, 43179), False, 'import logging\n'), ((44823, 44885), 'sdk_service_pb2.NotificationStreamRequest', 'sdk_service_pb2.NotificationStreamRequest', ([], {'stream_id': 'stream_id'}), '(stream_id=stream_id)\n', (44864, 44885), False, 'import sdk_service_pb2\n'), ((46537, 46603), 'logging.info', 'logging.info', (['f"""Exit_Gracefully: Unregister response:: {response}"""'], {}), "(f'Exit_Gracefully: Unregister response:: {response}')\n", (46549, 46603), False, 'import logging\n'), ((46627, 46661), 'logging.info', 'logging.info', (['f"""GOING TO EXIT NOW"""'], {}), "(f'GOING TO EXIT NOW')\n", (46639, 46661), False, 'import logging\n'), ((46672, 46682), 'sys.exit', 'sys.exit', ([], {}), '()\n', (46680, 46682), False, 'import sys\n'), ((47301, 47327), 'os.path.exists', 'os.path.exists', (['stdout_dir'], {}), '(stdout_dir)\n', (47315, 47327), False, 'import os\n'), ((47337, 47375), 'os.makedirs', 'os.makedirs', (['stdout_dir'], {'exist_ok': '(True)'}), '(stdout_dir, exist_ok=True)\n', (47348, 47375), False, 'import os\n'), ((47740, 47791), 'logging.info', 'logging.info', (['"""Agent unregistered and BGP shutdown"""'], {}), "('Agent unregistered and BGP shutdown')\n", (47752, 47791), False, 'import logging\n'), ((47810, 47843), 'logging.info', 'logging.info', (['"""Should not happen"""'], {}), "('Should not happen')\n", (47822, 47843), False, 'import logging\n'), ((7693, 7710), 'logging.error', 'logging.error', (['ex'], {}), '(ex)\n', (7706, 7710), False, 'import logging\n'), ((8752, 8769), 'logging.error', 'logging.error', (['ex'], {}), '(ex)\n', (8765, 8769), False, 'import logging\n'), ((16140, 16190), 'logging.info', 'logging.info', (['"""Starting ARP listener thread(s)..."""'], {}), "('Starting ARP listener thread(s)...')\n", (16152, 16190), False, 'import logging\n'), ((19852, 19866), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19864, 19866), False, 'from datetime import datetime, timezone, timedelta\n'), ((21776, 21803), 'struct.unpack', 'struct.unpack', (['"""iiii"""', 'i[2]'], {}), "('iiii', i[2])\n", (21789, 21803), False, 'import struct\n'), ((23204, 23228), 'os.read', 'os.read', (['socket_fd', '(2048)'], {}), '(socket_fd, 2048)\n', (23211, 23228), False, 'import os\n'), ((30424, 30488), 'logging.error', 'logging.error', (['f"""Exiting ARP socket while loop: {ex} ~ {tb_str}"""'], {}), "(f'Exiting ARP socket while loop: {ex} ~ {tb_str}')\n", (30437, 30488), False, 'import logging\n'), ((35696, 35716), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (35706, 35716), False, 'import json\n'), ((35906, 36081), 'logging.info', 'logging.info', (['f"""Got config for agent, now will handle it :: \n{obj.config} Operation :: {obj.config.op}\nData :: {obj.config.data.json}"""'], {}), '(\n f"""Got config for agent, now will handle it :: \n{obj.config} Operation :: {obj.config.op}\nData :: {obj.config.data.json}"""\n )\n', (35918, 36081), False, 'import logging\n'), ((44274, 44316), 'sdk_service_pb2.AgentRegistrationRequest', 'sdk_service_pb2.AgentRegistrationRequest', ([], {}), '()\n', (44314, 44316), False, 'import sdk_service_pb2\n'), ((45053, 45124), 'logging.info', 'logging.info', (['f"""Count :: {count} NOTIFICATION:: \n{r.notification}"""'], {}), '(f"""Count :: {count} NOTIFICATION:: \n{r.notification}""")\n', (45065, 45124), False, 'import logging\n'), ((45492, 45533), 'logging.info', 'logging.info', (['f"""GOING TO EXIT NOW: {err}"""'], {}), "(f'GOING TO EXIT NOW: {err}')\n", (45504, 45533), False, 'import logging\n'), ((45570, 45611), 'logging.error', 'logging.error', (['f"""Exception caught :: {e}"""'], {}), "(f'Exception caught :: {e}')\n", (45583, 45611), False, 'import logging\n'), ((47701, 47715), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (47713, 47715), False, 'from datetime import datetime, timezone, timedelta\n'), ((15825, 15897), 'logging.error', 'logging.error', (['f"""Exception in best_path_change_handler: {ex} ~ {tb_str}"""'], {}), "(f'Exception in best_path_change_handler: {ex} ~ {tb_str}')\n", (15838, 15897), False, 'import logging\n'), ((16360, 16412), 'ryu.lib.hub.spawn', 'hub.spawn', (['ARP_receiver_thread', 'state', 'i', 'evpn_vteps'], {}), '(ARP_receiver_thread, state, i, evpn_vteps)\n', (16369, 16412), False, 'from ryu.lib import hub\n'), ((23457, 23488), 'ryu.lib.packet.packet.Packet', 'packet.Packet', (['packet_bytearray'], {}), '(packet_bytearray)\n', (23470, 23488), False, 'from ryu.lib.packet import packet, ipv4, udp, vxlan, ethernet, arp, tcp\n'), ((29672, 29740), 'logging.info', 'logging.info', (['f"""Announcing EVPN MAC route...evpn_vteps={evpn_vteps}"""'], {}), "(f'Announcing EVPN MAC route...evpn_vteps={evpn_vteps}')\n", (29684, 29740), False, 'import logging\n'), ((30378, 30415), 'traceback.format_tb', 'traceback.format_tb', (['ex.__traceback__'], {}), '(ex.__traceback__)\n', (30397, 30415), False, 'import traceback\n'), ((33121, 33139), 'threading.Timer', 'Timer', (['(1)', 'on_timer'], {}), '(1, on_timer)\n', (33126, 33139), False, 'from threading import Timer\n'), ((36123, 36162), 'logging.info', 'logging.info', (['f"""Delete config scenario"""'], {}), "(f'Delete config scenario')\n", (36135, 36162), False, 'import logging\n'), ((38523, 38596), 'logging.info', 'logging.info', (['f"""Cleaning up ARP threads and sockets: {state.arp_threads}"""'], {}), "(f'Cleaning up ARP threads and sockets: {state.arp_threads}')\n", (38535, 38596), False, 'import logging\n'), ((39145, 39170), 'ryu.lib.hub.kill', 'hub.kill', (['state.bgpThread'], {}), '(state.bgpThread)\n', (39153, 39170), False, 'from ryu.lib import hub\n'), ((39451, 39481), 'ryu.lib.hub.spawn', 'hub.spawn', (['runBGPThread', 'state'], {}), '(runBGPThread, state)\n', (39460, 39481), False, 'from ryu.lib import hub\n'), ((45838, 45897), 'logging.error', 'logging.error', (['f"""Run try: Unregister response:: {response}"""'], {}), "(f'Run try: Unregister response:: {response}')\n", (45851, 45897), False, 'import logging\n'), ((46466, 46508), 'sdk_service_pb2.AgentRegistrationRequest', 'sdk_service_pb2.AgentRegistrationRequest', ([], {}), '()\n', (46506, 46508), False, 'import sdk_service_pb2\n'), ((47469, 47535), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (['log_filename'], {'maxBytes': '(3000000)', 'backupCount': '(5)'}), '(log_filename, maxBytes=3000000, backupCount=5)\n', (47488, 47535), False, 'from logging.handlers import RotatingFileHandler\n'), ((15780, 15817), 'traceback.format_tb', 'traceback.format_tb', (['ex.__traceback__'], {}), '(ex.__traceback__)\n', (15799, 15817), False, 'import traceback\n'), ((23787, 23837), 'logging.debug', 'logging.debug', (['f"""ARP packet:{p.protocol_name}={p}"""'], {}), "(f'ARP packet:{p.protocol_name}={p}')\n", (23800, 23837), False, 'import logging\n'), ((24382, 24436), 'logging.info', 'logging.info', (['f"""VNI not enabled for proxy EVPN: {vni}"""'], {}), "(f'VNI not enabled for proxy EVPN: {vni}')\n", (24394, 24436), False, 'import logging\n'), ((26582, 26656), 'logging.info', 'logging.info', (['f"""MAC {mac} already announced: {cur}, checking for MAC move"""'], {}), "(f'MAC {mac} already announced: {cur}, checking for MAC move')\n", (26594, 26656), False, 'import logging\n'), ((28827, 28929), 'logging.info', 'logging.info', (['f"""VNI {vni}: MAC {mac} moved to {static_vtep} new mobility_seq={mobility_seq}"""'], {}), "(\n f'VNI {vni}: MAC {mac} moved to {static_vtep} new mobility_seq={mobility_seq}'\n )\n", (28839, 28929), False, 'import logging\n'), ((29031, 29133), 'logging.info', 'logging.info', (['f"""VNI {vni}: MAC {mac} never seen before, associating with VTEP {static_vtep}"""'], {}), "(\n f'VNI {vni}: MAC {mac} never seen before, associating with VTEP {static_vtep}'\n )\n", (29043, 29133), False, 'import logging\n'), ((30063, 30117), 'logging.error', 'logging.error', (['f"""Error processing ARP: {e} ~ {tb_str}"""'], {}), "(f'Error processing ARP: {e} ~ {tb_str}')\n", (30076, 30117), False, 'import logging\n'), ((31705, 31719), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (31717, 31719), False, 'from datetime import datetime, timezone, timedelta\n'), ((39375, 39414), 'logging.info', 'logging.info', (['"""old BGP thread shutdown"""'], {}), "('old BGP thread shutdown')\n", (39387, 39414), False, 'import logging\n'), ((39673, 39701), 'logging.info', 'logging.info', (['"""BGP shutdown"""'], {}), "('BGP shutdown')\n", (39685, 39701), False, 'import logging\n'), ((45275, 45315), 'logging.info', 'logging.info', (['"""TO DO -commit.end config"""'], {}), "('TO DO -commit.end config')\n", (45287, 45315), False, 'import logging\n'), ((45398, 45437), 'logging.info', 'logging.info', (['f"""Updated state: {state}"""'], {}), "(f'Updated state: {state}')\n", (45410, 45437), False, 'import logging\n'), ((45959, 46000), 'logging.info', 'logging.info', (['f"""GOING TO EXIT NOW: {err}"""'], {}), "(f'GOING TO EXIT NOW: {err}')\n", (45971, 46000), False, 'import logging\n'), ((46013, 46023), 'sys.exit', 'sys.exit', ([], {}), '()\n', (46021, 46023), False, 'import sys\n'), ((12325, 12392), 'logging.info', 'logging.info', (['f"""Detected another EVPN proxy: {originator_id.value}"""'], {}), "(f'Detected another EVPN proxy: {originator_id.value}')\n", (12337, 12392), False, 'import logging\n'), ((12582, 12646), 'logging.info', 'logging.info', (['f"""Multicast route from EVPN VTEP: {event.nexthop}"""'], {}), "(f'Multicast route from EVPN VTEP: {event.nexthop}')\n", (12594, 12646), False, 'import logging\n'), ((13376, 13444), 'logging.info', 'logging.info', (['f"""Received EVPN route update for VNI {vni}: {mac_vrf}"""'], {}), "(f'Received EVPN route update for VNI {vni}: {mac_vrf}')\n", (13388, 13444), False, 'import logging\n'), ((15600, 15652), 'logging.info', 'logging.info', (['"""Not multicast and no VNI -> ignoring"""'], {}), "('Not multicast and no VNI -> ignoring')\n", (15612, 15652), False, 'import logging\n'), ((23898, 23933), 'logging.debug', 'logging.debug', (['f"""vlan id = {p.vid}"""'], {}), "(f'vlan id = {p.vid}')\n", (23911, 23933), False, 'import logging\n'), ((25171, 25278), 'logging.info', 'logging.info', (['f"""ARP({\'req\' if _arp.opcode == 1 else \'res\'}) from EVPN VTEP {_ip.src} -> ignoring"""'], {}), '(\n f"ARP({\'req\' if _arp.opcode == 1 else \'res\'}) from EVPN VTEP {_ip.src} -> ignoring"\n )\n', (25183, 25278), False, 'import logging\n'), ((25542, 25641), 'logging.info', 'logging.info', (['f"""ARP({\'req\' if _arp.opcode == 1 else \'res\'}) from static VTEP: {mac} {ip}"""'], {}), '(\n f"ARP({\'req\' if _arp.opcode == 1 else \'res\'}) from static VTEP: {mac} {ip}"\n )\n', (25554, 25641), False, 'import logging\n'), ((25657, 25761), 'logging.info', 'logging.info', (['f"""ARP packet:neither src={_ip.src} nor dst={_ip.dst} is EVPN vtep? {evpn_vteps}"""'], {}), "(\n f'ARP packet:neither src={_ip.src} nor dst={_ip.dst} is EVPN vtep? {evpn_vteps}'\n )\n", (25669, 25761), False, 'import logging\n'), ((26040, 26139), 'logging.info', 'logging.info', (['f"""VTEP {static_vtep} not configured in mac-vrf and auto-discovery disabled"""'], {}), "(\n f'VTEP {static_vtep} not configured in mac-vrf and auto-discovery disabled'\n )\n", (26052, 26139), False, 'import logging\n'), ((26184, 26254), 'logging.info', 'logging.info', (['f"""Dynamically adding auto-discovered VTEP {static_vtep}"""'], {}), "(f'Dynamically adding auto-discovered VTEP {static_vtep}')\n", (26196, 26254), False, 'import logging\n'), ((26785, 26864), 'logging.info', 'logging.info', (['f"""VNI {vni}: MAC {mac} already announced with VTEP {static_vtep}"""'], {}), "(f'VNI {vni}: MAC {mac} already announced with VTEP {static_vtep}')\n", (26797, 26864), False, 'import logging\n'), ((27914, 27983), 'logging.info', 'logging.info', (['f"""IP change detected: {cur[\'ip\']}->{ip}, updating EVPN"""'], {}), '(f"IP change detected: {cur[\'ip\']}->{ip}, updating EVPN")\n', (27926, 27983), False, 'import logging\n'), ((28391, 28476), 'logging.info', 'logging.info', (['f"""VTEP changed {cur[\'vtep\']}->{static_vtep}, withdrawing my route"""'], {}), '(f"VTEP changed {cur[\'vtep\']}->{static_vtep}, withdrawing my route"\n )\n', (28403, 28476), False, 'import logging\n'), ((28664, 28765), 'logging.info', 'logging.info', (['f"""EVPN route for {mac} already withdrawn triggered by other EVPN proxy route"""'], {}), "(\n f'EVPN route for {mac} already withdrawn triggered by other EVPN proxy route'\n )\n", (28676, 28765), False, 'import logging\n'), ((29375, 29389), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (29387, 29389), False, 'from datetime import datetime, timezone, timedelta\n'), ((30017, 30053), 'traceback.format_tb', 'traceback.format_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (30036, 30053), False, 'import traceback\n'), ((41622, 41686), 'logging.info', 'logging.info', (['f"""mac-vrf {mac_vrf_name} disabled, removing state"""'], {}), "(f'mac-vrf {mac_vrf_name} disabled, removing state')\n", (41634, 41686), False, 'import logging\n'), ((43049, 43109), 'logging.error', 'logging.error', (['f"""mac-vrf not found in state: {mac_vrf_name}"""'], {}), "(f'mac-vrf not found in state: {mac_vrf_name}')\n", (43062, 43109), False, 'import logging\n'), ((45763, 45805), 'sdk_service_pb2.AgentRegistrationRequest', 'sdk_service_pb2.AgentRegistrationRequest', ([], {}), '()\n', (45803, 45805), False, 'import sdk_service_pb2\n'), ((13238, 13296), 'logging.warning', 'logging.warning', (['f"""BGP: No mac-vrf mapping for VNI: {vni}"""'], {}), "(f'BGP: No mac-vrf mapping for VNI: {vni}')\n", (13253, 13296), False, 'import logging\n'), ((23997, 24027), 'logging.info', 'logging.info', (['f"""vni = {p.vni}"""'], {}), "(f'vni = {p.vni}')\n", (24009, 24027), False, 'import logging\n'), ((41519, 41586), 'logging.info', 'logging.info', (['"""BGP thread not running yet, postponing UpdateMACVRF"""'], {}), "('BGP thread not running yet, postponing UpdateMACVRF')\n", (41531, 41586), False, 'import logging\n'), ((13763, 13835), 'logging.info', 'logging.info', (['f"""EVPN MAC-move detected {cur[\'vtep\']} -> {event.nexthop}"""'], {}), '(f"EVPN MAC-move detected {cur[\'vtep\']} -> {event.nexthop}")\n', (13775, 13835), False, 'import logging\n'), ((40856, 40929), 'logging.info', 'logging.info', (['f"""VNI modified on {mac_vrf_name}: {orig_vrf[\'vni\']}->{vni}"""'], {}), '(f"VNI modified on {mac_vrf_name}: {orig_vrf[\'vni\']}->{vni}")\n', (40868, 40929), False, 'import logging\n'), ((41163, 41256), 'logging.info', 'logging.info', (['f"""EVI modified on {mac_vrf_name}: {state.mac_vrfs[vni][\'evi\']}->{new_evi}"""'], {}), '(\n f"EVI modified on {mac_vrf_name}: {state.mac_vrfs[vni][\'evi\']}->{new_evi}")\n', (41175, 41256), False, 'import logging\n'), ((14024, 14116), 'logging.info', 'logging.info', (['f"""Removing MAC moved to EVPN VTEP {event.nexthop} from EVPN proxy: {mac}"""'], {}), "(\n f'Removing MAC moved to EVPN VTEP {event.nexthop} from EVPN proxy: {mac}')\n", (14036, 14116), False, 'import logging\n'), ((15159, 15302), 'logging.info', 'logging.info', (['f"""Withdrawing MAC {mac} route announced by other EVPN proxy {originator_id.value} with different VTEP: {event.nexthop}"""'], {}), "(\n f'Withdrawing MAC {mac} route announced by other EVPN proxy {originator_id.value} with different VTEP: {event.nexthop}'\n )\n", (15171, 15302), False, 'import logging\n'), ((15479, 15581), 'logging.warning', 'logging.warning', (['"""TODO: Compare/update mobility sequence number, even if same VTEP nexthop?"""'], {}), "(\n 'TODO: Compare/update mobility sequence number, even if same VTEP nexthop?'\n )\n", (15494, 15581), False, 'import logging\n'), ((15019, 15110), 'logging.info', 'logging.info', (['f"""Local mobility sequence {cur[\'seq\']} higher than peer - keeping route"""'], {}), '(\n f"Local mobility sequence {cur[\'seq\']} higher than peer - keeping route")\n', (15031, 15110), False, 'import logging\n')] |
"""
My commenly used Modules:
(1) get_directory_name(caption) -- browses for directory name
(2) get_file_name(caption) -- browses for a specific file
(3) save_file_name(caption) -- saves named file to selected directory
(4) get_exif(fn) -- gets inag exif data
(5) get_gps_deg(exf) -- extracts GPS data from exif
(6) get_r_of_phi(gps) -- given gps coordinase calaculate Polar and Lateral radius of earth in feet
(7) calculate_distance(end_gps_deg,start_gps_deg) -- Calculates distance in feet between 2 GPS coordinats
(8) date_to_nth_day(date, format='%Y\%m\%d') -- calculates days since begining of year
(9) calculate_sun_angle(gps_deg,dt) -- calculates sun angle (declination and azmeth)
(10)
"""
import tkinter
from tkinter import *
from tkinter import filedialog
import PIL
from PIL import Image,ImageChops
from PIL.ExifTags import TAGS
from DateTime import DateTime
from datetime import datetime
import pandas as pd
import numpy
import math
root = tkinter.Tk()
FOV = 78.8 #Mavic Pro camer field of view 78.9 Deg
EarthMeanRadius = 6371.01 # In km
AstronomicalUnit = 149597890 # In km
def get_directory_name(caption):
dirname = tkinter.filedialog.askdirectory(parent=root,initialdir="/",title=caption)
if len(dirname ) > 0:
print (' You chose %s' % dirname)
return dirname
def get_file_name(caption):
file = tkinter.filedialog.askopenfile(parent=root,mode='rb',title=caption)
if file != None:
data = file.read()
#file.close()
print (" I got %d bytes from this file." % len(data))
return file
def save_file_name(caption):
myFormats = [
('Windows Bitmap','*.bmp'),
('Portable Network Graphics','*.png'),
('JPEG / JFIF','*.jpg'),
('CompuServer GIF','*.gif'),
]
fileName = tkinter.filedialog.asksaveasfilename(parent=root,filetypes=myFormats ,title=caption)
if len(fileName ) > 0:
print ('Now saving under %s' % nomFichier)
return fileName
def get_exif(fn):
ret = {}
i = PIL.Image.open(fn)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
#print('decoded',decoded)
ret[decoded] = value
return ret
def get_gps_deg(exf):
GPS = []
gps = exf['GPSInfo']
GPS0 = float(gps[2][0][0])/float(gps[2][0][1]) + float(gps[2][1][0])/(60.*float(gps[2][1][1])) + float(gps[2][2][0])/(3600*float(gps[2][2][1]))
if (gps[1] == u'S'):
GPS0 = -GPS0
GPS.append(GPS0)
GPS1 = float(gps[4][0][0])/float(gps[4][0][1]) + float(gps[4][1][0])/(60.*float(gps[4][1][1])) + float(gps[4][2][0])/(3600*float(gps[4][2][1]))
if (gps[3] == u'W'):
GPS1 = -GPS1
GPS.append(GPS1)
GPS2 = float(gps[6][0])/float(gps[6][1])
GPS.append(GPS2)
return GPS
def get_r_of_phi(gps):
phi = (math.pi/180.)*gps[0]
a = 7923.00*2640 # Equatorial radius of earth in feet
b = 7899.86*2640 # Polar radius of earth in feet
r = []
r1 = a*b/math.sqrt(a*a - (a*a - b*b)*math.cos(phi))
r.append(r1)
r2 = r1*math.cos(phi)
r.append(r2)
#print 'r = ',r
return r
def calculate_distance(end_gps_deg,start_gps_deg):
r = get_r_of_phi(start_gps_deg)
delta_0 = (math.pi/180)*r[0]*(end_gps_deg[0] - start_gps_deg[0])
delta_1 = (math.pi/180)*r[1]*(end_gps_deg[1] - start_gps_deg[1])
#dist = math.sqrt(delta0*delta0 + delta1*delta1)
return [delta_0,delta_1]
def date_to_nth_day(date, format='%Y\%m\%d'):
date = pd.to_datetime(date, format=format)
new_year_day = pd.Timestamp(year=date.year, month=1, day=1)
return (date - new_year_day).days + 1
def calculate_sun_angle(gps_deg,dt):
t = DateTime(dt) # Day of year calculation
n = date_to_nth_day(t.Date())
#print 'Day of the year', n
azimuth_angle = 0.
solar_noon = 8.0 - (24.0/360.0)*gps_deg[1] # 8 = GMT-4 + 12 for 24 hour clock
angle_hour = DateTime.hour(dt) + DateTime.minute(dt)/60 + DateTime.second(dt)/3600 - solar_noon
hour_angle = (math.pi/180.)*15.0*angle_hour
#print 'solar noon', solar_noon, ' now ', dt, ' angle_hour ',angle_hour,' hour_angle ',hour_angle
declination_angle = 23.45*(math.pi/180.)*math.sin((math.pi/180.)*360.0*(284.0+n)/365.0)
#print 'day ', n,'declination_angle', (180.0/math.pi)*declination_angle
sin_declination_angle = math.sin(declination_angle)
cos_declination_angle = math.cos(declination_angle)
latitude_angle = (math.pi/180.)*gps_deg[0]
cos_latatude_angle = math.cos(latitude_angle)
sin_latatude_angle = math.sin(latitude_angle)
cos_hour_angle = math.cos(hour_angle)
altitude_angle = cos_latatude_angle*cos_hour_angle*cos_declination_angle
altitude_angle += sin_declination_angle*sin_latatude_angle
altitude_angle = math.asin(altitude_angle)
#altitude_angle = (math.pi/180.0)*52.17
dY = -math.sin(hour_angle)
dX = math.tan(declination_angle)*cos_latatude_angle - sin_latatude_angle*cos_hour_angle
azimuth_angle = math.atan2(dY,dX)
if (azimuth_angle < 0.0):
azimuth_angle += 2.0*math.pi
# parallax correction
Parallax=(EarthMeanRadius/AstronomicalUnit)*math.sin(altitude_angle)
#print 'Parallax',Parallax
#azimuth_angle += Parallax
# in km but that dosent matter since it only used as a rartio
#print 'altitude_angle',(180./math.pi)*altitude_angle, ' azimuth_angle ',(180./math.pi)* azimuth_angle
return [altitude_angle, azimuth_angle]
| [
"tkinter.filedialog.askdirectory",
"PIL.Image.open",
"math.tan",
"DateTime.DateTime",
"math.asin",
"tkinter.filedialog.asksaveasfilename",
"PIL.ExifTags.TAGS.get",
"DateTime.DateTime.hour",
"math.cos",
"DateTime.DateTime.second",
"tkinter.Tk",
"DateTime.DateTime.minute",
"math.atan2",
"tki... | [((992, 1004), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (1002, 1004), False, 'import tkinter\n'), ((1186, 1261), 'tkinter.filedialog.askdirectory', 'tkinter.filedialog.askdirectory', ([], {'parent': 'root', 'initialdir': '"""/"""', 'title': 'caption'}), "(parent=root, initialdir='/', title=caption)\n", (1217, 1261), False, 'import tkinter\n'), ((1393, 1462), 'tkinter.filedialog.askopenfile', 'tkinter.filedialog.askopenfile', ([], {'parent': 'root', 'mode': '"""rb"""', 'title': 'caption'}), "(parent=root, mode='rb', title=caption)\n", (1423, 1462), False, 'import tkinter\n'), ((1821, 1910), 'tkinter.filedialog.asksaveasfilename', 'tkinter.filedialog.asksaveasfilename', ([], {'parent': 'root', 'filetypes': 'myFormats', 'title': 'caption'}), '(parent=root, filetypes=myFormats,\n title=caption)\n', (1857, 1910), False, 'import tkinter\n'), ((2051, 2069), 'PIL.Image.open', 'PIL.Image.open', (['fn'], {}), '(fn)\n', (2065, 2069), False, 'import PIL\n'), ((3559, 3594), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {'format': 'format'}), '(date, format=format)\n', (3573, 3594), True, 'import pandas as pd\n'), ((3619, 3663), 'pandas.Timestamp', 'pd.Timestamp', ([], {'year': 'date.year', 'month': '(1)', 'day': '(1)'}), '(year=date.year, month=1, day=1)\n', (3631, 3663), True, 'import pandas as pd\n'), ((3764, 3776), 'DateTime.DateTime', 'DateTime', (['dt'], {}), '(dt)\n', (3772, 3776), False, 'from DateTime import DateTime\n'), ((4433, 4460), 'math.sin', 'math.sin', (['declination_angle'], {}), '(declination_angle)\n', (4441, 4460), False, 'import math\n'), ((4491, 4518), 'math.cos', 'math.cos', (['declination_angle'], {}), '(declination_angle)\n', (4499, 4518), False, 'import math\n'), ((4593, 4617), 'math.cos', 'math.cos', (['latitude_angle'], {}), '(latitude_angle)\n', (4601, 4617), False, 'import math\n'), ((4644, 4668), 'math.sin', 'math.sin', (['latitude_angle'], {}), '(latitude_angle)\n', (4652, 4668), False, 'import math\n'), ((4695, 4715), 'math.cos', 'math.cos', (['hour_angle'], {}), '(hour_angle)\n', (4703, 4715), False, 'import math\n'), ((4891, 4916), 'math.asin', 'math.asin', (['altitude_angle'], {}), '(altitude_angle)\n', (4900, 4916), False, 'import math\n'), ((5149, 5167), 'math.atan2', 'math.atan2', (['dY', 'dX'], {}), '(dY, dX)\n', (5159, 5167), False, 'import math\n'), ((2151, 2169), 'PIL.ExifTags.TAGS.get', 'TAGS.get', (['tag', 'tag'], {}), '(tag, tag)\n', (2159, 2169), False, 'from PIL.ExifTags import TAGS\n'), ((3112, 3125), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (3120, 3125), False, 'import math\n'), ((4280, 4335), 'math.sin', 'math.sin', (['(math.pi / 180.0 * 360.0 * (284.0 + n) / 365.0)'], {}), '(math.pi / 180.0 * 360.0 * (284.0 + n) / 365.0)\n', (4288, 4335), False, 'import math\n'), ((4993, 5013), 'math.sin', 'math.sin', (['hour_angle'], {}), '(hour_angle)\n', (5001, 5013), False, 'import math\n'), ((5316, 5340), 'math.sin', 'math.sin', (['altitude_angle'], {}), '(altitude_angle)\n', (5324, 5340), False, 'import math\n'), ((5040, 5067), 'math.tan', 'math.tan', (['declination_angle'], {}), '(declination_angle)\n', (5048, 5067), False, 'import math\n'), ((3998, 4015), 'DateTime.DateTime.hour', 'DateTime.hour', (['dt'], {}), '(dt)\n', (4011, 4015), False, 'from DateTime import DateTime\n'), ((4043, 4062), 'DateTime.DateTime.second', 'DateTime.second', (['dt'], {}), '(dt)\n', (4058, 4062), False, 'from DateTime import DateTime\n'), ((3066, 3079), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (3074, 3079), False, 'import math\n'), ((4018, 4037), 'DateTime.DateTime.minute', 'DateTime.minute', (['dt'], {}), '(dt)\n', (4033, 4037), False, 'from DateTime import DateTime\n')] |
import os
import numpy as np
import pandas as pd
import yaml
from . import model as model_lib
from . import training, tensorize, io_local
def main():
#Turn off warnings:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
###Load training data - Put the path to your own data here
training_data_path = "/root/training/training_preprocessed.csv"
training_df = pd.read_csv(training_data_path)
###Dump all Peptides containing selenocystein
training_df = training_df.loc[~training_df.modified_sequence.str.contains("U")]
print("CSV Loaded, shape is {}.".format(training_df.shape))
###Load Untrained Retention Time Model and prepare its training data
iRT_model_dir = "/root/training/iRT/"
iRT_model, iRT_config = model_lib.load(iRT_model_dir, trained=False)
iRT_callbacks = training.get_callbacks(iRT_model_dir)
iRT_raw_mean = training_df.uRT.mean()
iRT_raw_var = training_df.uRT.var()
iRT_config['iRT_rescaling_mean'] = float(iRT_raw_mean)
iRT_config['iRT_rescaling_var'] = float(iRT_raw_var)
with open(iRT_model_dir + "config_new.yml", "w") as config_outfile:
yaml.dump(iRT_config, config_outfile)
###Load Untrained Fragmentation Model and prepare its training data
msms_model_dir = "/root/training/msms/"
msms_model, msms_config = model_lib.load(msms_model_dir, trained=False)
msms_callbacks = training.get_callbacks(msms_model_dir)
#The intensity lists are already in proper order, but might have some missing values and need to be padded to the correct length
#(Only a peptide of the maximal length 29 will have 522 values, but all lists need to be of this length)
intensities_length = 522
print("iRT and Fragmentation Intensity Models Loaded.")
#Compile the models once, and then call fit separately - useful if you lack memory or space and have to partition your training data
training.compile_model(iRT_model, iRT_config)
training.compile_model(msms_model, msms_config)
training_tensorized = tensorize.csv(training_df[['modified_sequence', 'collision_energy', 'precursor_charge']], nlosses=3)
print("CSV Tensorized.")
training_tensorized['prediction'] = np.reshape(
np.asarray((training_df.uRT - iRT_raw_mean) / np.sqrt(iRT_raw_var)),
(-1,1))
training_df.relative_intensities = training_df.relative_intensities.apply(eval)
training_df.relative_intensities = training_df.relative_intensities.apply(
lambda ls: np.nan_to_num(np.pad(ls, pad_width=(0,intensities_length-len(ls)),constant_values=-1, mode="constant"),-1))
training_tensorized['intensities_raw'] = np.stack(training_df.relative_intensities)
###Write and reload training data in hdf5 format
hdf5_path = "/root/training/training_data.hdf5"
io_local.to_hdf5(training_tensorized,hdf5_path)
print("Training Data Written to HDF5 File.")
#Load the hdf5 again
training_loaded = io_local.from_hdf5(hdf5_path)
print("Training Data Reloaded from HDF5 File.\nCommencing Training of iRT Model...")
###Train both models
iRT_history = training.train_model(training_loaded, iRT_model, iRT_config, iRT_callbacks)
iRT_epochs = len(iRT_history.history['val_loss'])
iRT_val_loss = iRT_history.history['val_loss'][-1]
iRT_weights_filename = "{}/weight_{:02d}_{:.5f}.hdf5".format(iRT_model_dir, iRT_epochs, iRT_val_loss)
iRT_model.save_weights(iRT_weights_filename)
print("Training of iRT Model Complete.\nCommencing Training of Fragmentation Intensity Model...")
msms_history = training.train_model(training_loaded, msms_model, msms_config, msms_callbacks)
#Save the weights to a file named by the val_loss and the epochs
msms_epochs = len(msms_history.history['val_loss'])
msms_val_loss = msms_history.history['val_loss'][-1]
msms_weights_filename = "{}/weight_{:02d}_{:.5f}.hdf5".format(msms_model_dir, msms_epochs, msms_val_loss)
msms_model.save_weights(msms_weights_filename)
print("Training of Fragmentation Intensity Model Complete.")
print("Done! You may now use these models for your predictions.")
if __name__ == '__main__':
main()
| [
"numpy.stack",
"numpy.sqrt",
"pandas.read_csv",
"yaml.dump"
] | [((368, 399), 'pandas.read_csv', 'pd.read_csv', (['training_data_path'], {}), '(training_data_path)\n', (379, 399), True, 'import pandas as pd\n'), ((2552, 2594), 'numpy.stack', 'np.stack', (['training_df.relative_intensities'], {}), '(training_df.relative_intensities)\n', (2560, 2594), True, 'import numpy as np\n'), ((1097, 1134), 'yaml.dump', 'yaml.dump', (['iRT_config', 'config_outfile'], {}), '(iRT_config, config_outfile)\n', (1106, 1134), False, 'import yaml\n'), ((2194, 2214), 'numpy.sqrt', 'np.sqrt', (['iRT_raw_var'], {}), '(iRT_raw_var)\n', (2201, 2214), True, 'import numpy as np\n')] |
from secml.testing import CUnitTest
from secml.core.attr_utils import extract_attr
class TestAttributeUtilities(CUnitTest):
"""Unit test for secml.core.attr_utils."""
def test_extract_attr(self):
# Toy class for testing
class Foo:
def __init__(self):
self.a = 5
self._b = 5
self._c = 5
self._d = 5
@property
def b(self):
pass
@property
def c(self):
pass
@c.setter
def c(self):
pass
f = Foo()
self.logger.info(
"Testing attributes extraction based on accessibility...")
def check_attrs(code, expected):
self.assertTrue(
set(attr for attr in extract_attr(f, code)) == expected)
check_attrs('pub', {'a'})
check_attrs('r', {'_b'})
check_attrs('rw', {'_c'})
check_attrs('pub+r', {'a', '_b'})
check_attrs('pub+rw', {'a', '_c'})
check_attrs('pub+pro', {'a', '_d'})
check_attrs('r+rw', {'_b', '_c'})
check_attrs('r+pro', {'_b', '_d'})
check_attrs('rw+pro', {'_c', '_d'})
check_attrs('pub+r+rw', {'a', '_b', '_c'})
check_attrs('pub+r+pro', {'a', '_b', '_d'})
check_attrs('pub+rw+pro', {'a', '_c', '_d'})
check_attrs('pub+r+rw+pro', {'a', '_b', '_c', '_d'})
if __name__ == '__main__':
CUnitTest.main()
| [
"secml.core.attr_utils.extract_attr",
"secml.testing.CUnitTest.main"
] | [((1480, 1496), 'secml.testing.CUnitTest.main', 'CUnitTest.main', ([], {}), '()\n', (1494, 1496), False, 'from secml.testing import CUnitTest\n'), ((834, 855), 'secml.core.attr_utils.extract_attr', 'extract_attr', (['f', 'code'], {}), '(f, code)\n', (846, 855), False, 'from secml.core.attr_utils import extract_attr\n')] |
from time import perf_counter
import numpy as np
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import *
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from hpsklearn import HyperoptEstimator, svc, random_forest, knn
from hyperopt import tpe
from sklearn.metrics import f1_score
def scorer(yt, yp): return 1 - f1_score(yt, yp, average='macro')
if __name__=='__main__':
np.random.seed(42)
train_X = np.load('data/train_X.npy')
test_X = np.load('data/test_X.npy')
train_Y = np.load('data/train_Y.npy')
test_Y = np.load('data/test_Y.npy')
estim = HyperoptEstimator(classifier=random_forest('rf'),algo=tpe.suggest,loss_fn=scorer,max_evals=200,trial_timeout=1200)
estim.fit(train_X, train_Y)
yp = estim.predict(test_X)
print(f1_score(test_Y, yp, average='macro')) | [
"sklearn.metrics.f1_score",
"numpy.load",
"numpy.random.seed",
"hpsklearn.random_forest"
] | [((545, 563), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (559, 563), True, 'import numpy as np\n'), ((578, 605), 'numpy.load', 'np.load', (['"""data/train_X.npy"""'], {}), "('data/train_X.npy')\n", (585, 605), True, 'import numpy as np\n'), ((619, 645), 'numpy.load', 'np.load', (['"""data/test_X.npy"""'], {}), "('data/test_X.npy')\n", (626, 645), True, 'import numpy as np\n'), ((660, 687), 'numpy.load', 'np.load', (['"""data/train_Y.npy"""'], {}), "('data/train_Y.npy')\n", (667, 687), True, 'import numpy as np\n'), ((701, 727), 'numpy.load', 'np.load', (['"""data/test_Y.npy"""'], {}), "('data/test_Y.npy')\n", (708, 727), True, 'import numpy as np\n'), ((481, 514), 'sklearn.metrics.f1_score', 'f1_score', (['yt', 'yp'], {'average': '"""macro"""'}), "(yt, yp, average='macro')\n", (489, 514), False, 'from sklearn.metrics import f1_score\n'), ((929, 966), 'sklearn.metrics.f1_score', 'f1_score', (['test_Y', 'yp'], {'average': '"""macro"""'}), "(test_Y, yp, average='macro')\n", (937, 966), False, 'from sklearn.metrics import f1_score\n'), ((770, 789), 'hpsklearn.random_forest', 'random_forest', (['"""rf"""'], {}), "('rf')\n", (783, 789), False, 'from hpsklearn import HyperoptEstimator, svc, random_forest, knn\n')] |
import os
import sys
import shutil
import re
def print_m(msg):
msg_color = '\033[0m'
sys.stdout.write(f'{msg_color}{msg}{msg_color}\n')
def print_w(warning):
warn_color = '\033[93m'
msg_color = '\033[0m'
sys.stdout.write(f'{warn_color}Warning: {warning} {msg_color}\n')
def print_e(error):
err_color = '\033[91m'
msg_color = '\033[0m'
sys.stdout.write(f'{err_color}Error: {error} {msg_color}\n')
def file_exist(file_path, ext=''):
if not os.path.exists(file_path) or not os.path.isfile(file_path):
return False
elif ext in os.path.splitext(file_path)[1] or not ext:
return True
return False
def folder_exist(folder_path):
if not os.path.exists(folder_path) or os.path.isfile(folder_path):
return False
else:
return True
def make_clean_folder(path_folder):
if not os.path.exists(path_folder):
os.makedirs(path_folder)
else:
shutil.rmtree(path_folder)
os.makedirs(path_folder)
def sorted_alphanum(file_list):
"""sort the file list by arrange the numbers in filenames in increasing order
:param file_list: a file list
:return: sorted file list
"""
if len(file_list) <= 1:
return file_list
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(file_list, key=alphanum_key)
def get_file_list(path, ext=''):
if not os.path.exists(path):
raise OSError('Path {} not exist!'.format(path))
file_list = []
for filename in os.listdir(path):
file_ext = os.path.splitext(filename)[1]
if (ext in file_ext or not ext) and os.path.isfile(os.path.join(path, filename)):
file_list.append(os.path.join(path, filename))
file_list = sorted_alphanum(file_list)
return file_list | [
"os.path.exists",
"re.split",
"os.listdir",
"os.makedirs",
"os.path.splitext",
"os.path.join",
"os.path.isfile",
"shutil.rmtree",
"sys.stdout.write"
] | [((95, 145), 'sys.stdout.write', 'sys.stdout.write', (['f"""{msg_color}{msg}{msg_color}\n"""'], {}), "(f'{msg_color}{msg}{msg_color}\\n')\n", (111, 145), False, 'import sys\n'), ((228, 293), 'sys.stdout.write', 'sys.stdout.write', (['f"""{warn_color}Warning: {warning} {msg_color}\n"""'], {}), "(f'{warn_color}Warning: {warning} {msg_color}\\n')\n", (244, 293), False, 'import sys\n'), ((373, 433), 'sys.stdout.write', 'sys.stdout.write', (['f"""{err_color}Error: {error} {msg_color}\n"""'], {}), "(f'{err_color}Error: {error} {msg_color}\\n')\n", (389, 433), False, 'import sys\n'), ((1601, 1617), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1611, 1617), False, 'import os\n'), ((734, 761), 'os.path.isfile', 'os.path.isfile', (['folder_path'], {}), '(folder_path)\n', (748, 761), False, 'import os\n'), ((863, 890), 'os.path.exists', 'os.path.exists', (['path_folder'], {}), '(path_folder)\n', (877, 890), False, 'import os\n'), ((900, 924), 'os.makedirs', 'os.makedirs', (['path_folder'], {}), '(path_folder)\n', (911, 924), False, 'import os\n'), ((943, 969), 'shutil.rmtree', 'shutil.rmtree', (['path_folder'], {}), '(path_folder)\n', (956, 969), False, 'import shutil\n'), ((978, 1002), 'os.makedirs', 'os.makedirs', (['path_folder'], {}), '(path_folder)\n', (989, 1002), False, 'import os\n'), ((1482, 1502), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1496, 1502), False, 'import os\n'), ((482, 507), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (496, 507), False, 'import os\n'), ((515, 540), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (529, 540), False, 'import os\n'), ((703, 730), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (717, 730), False, 'import os\n'), ((1638, 1664), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1654, 1664), False, 'import os\n'), ((1362, 1387), 're.split', 're.split', (['"""([0-9]+)"""', 'key'], {}), "('([0-9]+)', key)\n", (1370, 1387), False, 'import re\n'), ((1727, 1755), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1739, 1755), False, 'import os\n'), ((1787, 1815), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1799, 1815), False, 'import os\n'), ((579, 606), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (595, 606), False, 'import os\n')] |
import time
import base64
import hashlib
import hmac
from requests.auth import AuthBase
class CoinbaseProAuth(AuthBase):
"""Request authorization.
Provided by Coinbase Pro:
https://docs.pro.coinbase.com/?python#signing-a-message
"""
def __init__(self, api_key, secret_key, passphrase):
self.api_key = api_key
self.secret_key = secret_key
self.passphrase = passphrase
def __call__(self, request):
timestamp = str(time.time())
message = timestamp + request.method + request.path_url + (request.body or "")
message = message.encode("ascii")
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest())
request.headers.update(
{
"CB-ACCESS-SIGN": signature_b64,
"CB-ACCESS-TIMESTAMP": timestamp,
"CB-ACCESS-KEY": self.api_key,
"CB-ACCESS-PASSPHRASE": self.passphrase,
"Content-Type": "application/json",
}
)
return request
| [
"hmac.new",
"time.time",
"base64.b64decode"
] | [((635, 668), 'base64.b64decode', 'base64.b64decode', (['self.secret_key'], {}), '(self.secret_key)\n', (651, 668), False, 'import base64\n'), ((689, 732), 'hmac.new', 'hmac.new', (['hmac_key', 'message', 'hashlib.sha256'], {}), '(hmac_key, message, hashlib.sha256)\n', (697, 732), False, 'import hmac\n'), ((474, 485), 'time.time', 'time.time', ([], {}), '()\n', (483, 485), False, 'import time\n')] |
from django.conf import settings
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(auto_now=True)
published_date = models.DateTimeField(blank=True, null=True)
views = models.BigIntegerField(default=0)
cover = models.ImageField(blank=True, default=None, upload_to='images/%Y/%m/')
attachment = models.FileField(blank=True, default=None, upload_to='attachment')
tags = models.ManyToManyField('blog.Tag')
def publish(self):
self.published_date = timezone.now()
self.save()
def approved_comments(self):
return self.comments.filter(approved_comment=True)
def likes_count(self):
return PostLike.objects.filter(post=self).count()
def __str__(self):
return '{} ({})'.format(self.title, self.author)
class Tag(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class PostLike(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
post = models.ForeignKey('blog.Post', on_delete=models.CASCADE)
created_date = models.DateTimeField(auto_now=True)
def __str__(self):
return '{} - {}'.format(self.post.title, self.user)
class Comment(models.Model):
post = models.ForeignKey('blog.Post', on_delete=models.CASCADE, related_name='comments')
author = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def __str__(self):
return self.text
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.FileField",
"django.db.models.BooleanField",
"django.utils.timezone.now",
"django.db.models.ImageField",
"django.db.models.BigIntegerField",
"django.db.models.DateTimeField",
"django.... | [((136, 205), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n', (153, 205), False, 'from django.db import models\n'), ((218, 250), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (234, 250), False, 'from django.db import models\n'), ((262, 280), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (278, 280), False, 'from django.db import models\n'), ((300, 335), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (320, 335), False, 'from django.db import models\n'), ((357, 400), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (377, 400), False, 'from django.db import models\n'), ((413, 446), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (435, 446), False, 'from django.db import models\n'), ((459, 529), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'default': 'None', 'upload_to': '"""images/%Y/%m/"""'}), "(blank=True, default=None, upload_to='images/%Y/%m/')\n", (476, 529), False, 'from django.db import models\n'), ((547, 613), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'default': 'None', 'upload_to': '"""attachment"""'}), "(blank=True, default=None, upload_to='attachment')\n", (563, 613), False, 'from django.db import models\n'), ((625, 659), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""blog.Tag"""'], {}), "('blog.Tag')\n", (647, 659), False, 'from django.db import models\n'), ((1046, 1077), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1062, 1077), False, 'from django.db import models\n'), ((1169, 1238), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n', (1186, 1238), False, 'from django.db import models\n'), ((1250, 1306), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""blog.Post"""'], {'on_delete': 'models.CASCADE'}), "('blog.Post', on_delete=models.CASCADE)\n", (1267, 1306), False, 'from django.db import models\n'), ((1326, 1361), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1346, 1361), False, 'from django.db import models\n'), ((1487, 1573), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""blog.Post"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""comments"""'}), "('blog.Post', on_delete=models.CASCADE, related_name=\n 'comments')\n", (1504, 1573), False, 'from django.db import models\n'), ((1582, 1614), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1598, 1614), False, 'from django.db import models\n'), ((1626, 1644), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1642, 1644), False, 'from django.db import models\n'), ((1664, 1706), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (1684, 1706), False, 'from django.db import models\n'), ((1730, 1764), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1749, 1764), False, 'from django.db import models\n'), ((714, 728), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (726, 728), False, 'from django.utils import timezone\n')] |
import socket
import threading
import sys
import subprocess
from colorama import init
from colorama import Fore, Back, Style
init()
def sending(server):
while True:
user = subprocess.run(['whoami'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip().split('\\')[-1]
print(Fore.GREEN + user + ":", end=" ")
msg = input()
if msg == 'quit':
server.close
sys.exit()
#print ('outgoing:', msg)
colonstring = ": "
toSend = "\n" + Fore.GREEN + user + colonstring + msg
server.sendall(toSend.encode('utf-8'))
def listening(server):
while True:
print(server.recv(1024).decode('utf-8') + "\n")
print(Fore.GREEN, end=" ")
# Create a socket object
server = socket.socket()
# Define the port on which you want to connect
port = int(input('Enter port\n'))
# connect to the server on local computer
server.connect(('127.0.0.1', port))
print('creating threads')
listener = threading.Thread(target = listening, args = [server])
sender = threading.Thread(target = sending, args = [server])
listener.start()
sender.start()
| [
"socket.socket",
"subprocess.run",
"sys.exit",
"threading.Thread",
"colorama.init"
] | [((125, 131), 'colorama.init', 'init', ([], {}), '()\n', (129, 131), False, 'from colorama import init\n'), ((787, 802), 'socket.socket', 'socket.socket', ([], {}), '()\n', (800, 802), False, 'import socket\n'), ((1003, 1052), 'threading.Thread', 'threading.Thread', ([], {'target': 'listening', 'args': '[server]'}), '(target=listening, args=[server])\n', (1019, 1052), False, 'import threading\n'), ((1066, 1113), 'threading.Thread', 'threading.Thread', ([], {'target': 'sending', 'args': '[server]'}), '(target=sending, args=[server])\n', (1082, 1113), False, 'import threading\n'), ((434, 444), 'sys.exit', 'sys.exit', ([], {}), '()\n', (442, 444), False, 'import sys\n'), ((186, 236), 'subprocess.run', 'subprocess.run', (["['whoami']"], {'stdout': 'subprocess.PIPE'}), "(['whoami'], stdout=subprocess.PIPE)\n", (200, 236), False, 'import subprocess\n')] |
#!/usr/bin/env python
# coding: utf-8
import random
import matplotlib.pyplot as plt
cities = ['A', 'B', 'C', 'E', 'M', 'S']
# Represent the Graph as a dictionary of dictionaries
A = {'A':0, 'B':10, 'C':15, 'E':14, 'M':11, 'S':10}
B = {'A':10, 'B':0, 'C':8, 'E':13 ,'M':15 ,'S':9}
C = {'A':15, 'B':8, 'C':0, 'E':11,'M':16,'S':10}
E = {'A':14, 'B':13, 'C':11, 'E':0,'M':9,'S':6}
M = {'A':11, 'B':15, 'C':16, 'E':9,'M':0,'S':9}
S = {'A':10, 'B':9, 'C':10, 'E':6,'M':9,'S':0}
Graph = {'A':A, 'B':B, 'C':C, 'E':E, 'M':M, 'S':S}
# We have to swap two cities to get a neighbour
# total 15 possible swaps for 6 cities
all_swaps = []
for i in range(len(cities)):
for j in range(i+1, len(cities)):
all_swaps.append((cities[i], cities[j]))
# Compute the total distance travelled to complete a cycle
# start from a city and visit every city and return to the same city
# Input: A path e.g. C-A-M-S-B-E as a list
def total_distance(path):
distance = 0
for i in range(1, len(path)):
# add the distance between two adjacent cities
distance += Graph.get(path[i-1]).get(path[i])
# finally add the distance to the first city from the last city
distance += Graph.get(path[i]).get(path[0])
return distance
# swap neighbourhood structure
# Given a path two randomly chosen cities are swapped
# but if two cities to be swapped are specified then swap those
def swap_neighbour(path, swap=None):
if swap is None:
swap = random.sample(path, 2)
# find the position of the two cities in the path
pos_city1 = path.index(swap[0])
pos_city2 = path.index(swap[1])
# Now swap the two cities in the path
new_path = list(path)
new_path[pos_city1] = swap[1]
new_path[pos_city2] = swap[0]
return new_path
# We need to keep all neighbours of a path as candidates
# compute their fit and find the best fitting neighbour
class Neighbour:
def __init__(self, path, swap):
self.swap = swap
self.path = swap_neighbour(path, swap)
self.fpath = total_distance(self.path)
# Initialization
MaxIter = 20
# TABU list has dis-allowed swaps; initially nothing is disallowed
TABU_list = [('A','A'), ('B', 'B')] # A list of two elements; initial values not relevant
# Initial solution construction phase
# Generate an initial solution at random
# Note that the solution is represented as a sequence of cities (a path not a cycle)
sol = random.sample(cities, 6)
fsol = total_distance(sol)
solbest = sol
fsolbest = fsol
# keep a list of fsol and fsolbest for plotting
y_fsol = [fsol]
y_fsolbest = [fsolbest]
print("Iteration: 0, Best solution: " + '-'.join(solbest) + ", Best Fit: " + str(fsolbest))
# Improvement phase
for iteration in range(MaxIter):
# find all neighbours of sol
all_neighbours = []
for swap in all_swaps:
all_neighbours.append(Neighbour(sol, swap))
# sort the neighbours in order of fit/total distance
sorted_neighbours = sorted(all_neighbours, key=lambda obj: obj.fpath)
# if the top element is in tabu list don't take it
# but we need to check for Aspiration; that means if the tabu neighbour gives better solution than the
# fsolbest then we should take it
for neigh in sorted_neighbours:
# check if it is in tabu list
if neigh.swap in TABU_list:
# check for Aspiration
if neigh.fpath < fsolbest:
sol = neigh.path
fsol = neigh.fpath
solbest = neigh.path
fsolbest = neigh.fpath
# modify tabu list
TABU_list[-1] = TABU_list[0]
TABU_list[0] = neigh.swap
break
# else go to the next element in the candidate list of neighbours
else:
if neigh.fpath < fsolbest:
# update the best fit
solbest = neigh.path
fsolbest = neigh.fpath
# we should treat the top neighbour as the sol
sol = neigh.path
fsol = neigh.fpath
# modify tabu list
TABU_list[-1] = TABU_list[0]
TABU_list[0] = neigh.swap
break
# Add to the list for plotting
y_fsol.append(fsol)
y_fsolbest.append(fsolbest)
print("Iteration: " + str(iteration+1) + " Best solution: " + '-'.join(solbest) + " Best Fit: "+str(fsolbest))
print("Best solution: " + '-'.join(solbest))
print("Shortest Distance: " + str(fsolbest))
# Plotting in matplotlib
x = range(1, MaxIter+2)
plt.plot(x, y_fsol, 'b', label='fSol')
plt.plot(x, y_fsolbest, ':r', label='fSolBest')
plt.xlabel('Iteration')
plt.ylabel('Total Distance')
plt.title('TABU Search')
plt.legend()
plt.show()
| [
"random.sample",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((2439, 2463), 'random.sample', 'random.sample', (['cities', '(6)'], {}), '(cities, 6)\n', (2452, 2463), False, 'import random\n'), ((4586, 4624), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_fsol', '"""b"""'], {'label': '"""fSol"""'}), "(x, y_fsol, 'b', label='fSol')\n", (4594, 4624), True, 'import matplotlib.pyplot as plt\n'), ((4625, 4672), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_fsolbest', '""":r"""'], {'label': '"""fSolBest"""'}), "(x, y_fsolbest, ':r', label='fSolBest')\n", (4633, 4672), True, 'import matplotlib.pyplot as plt\n'), ((4673, 4696), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (4683, 4696), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4725), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Distance"""'], {}), "('Total Distance')\n", (4707, 4725), True, 'import matplotlib.pyplot as plt\n'), ((4726, 4750), 'matplotlib.pyplot.title', 'plt.title', (['"""TABU Search"""'], {}), "('TABU Search')\n", (4735, 4750), True, 'import matplotlib.pyplot as plt\n'), ((4751, 4763), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4761, 4763), True, 'import matplotlib.pyplot as plt\n'), ((4764, 4774), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4772, 4774), True, 'import matplotlib.pyplot as plt\n'), ((1468, 1490), 'random.sample', 'random.sample', (['path', '(2)'], {}), '(path, 2)\n', (1481, 1490), False, 'import random\n')] |
from django.contrib import admin
from . import models
class SightingAdmin(admin.ModelAdmin):
list_display = ('superhero', 'power', 'location', 'sighted_on')
date_hierarchy = 'sighted_on'
search_fields = ['superhero']
ordering = ['superhero']
admin.site.register(models.Origin)
admin.site.register(models.Location)
admin.site.register(models.Sighting, SightingAdmin)
| [
"django.contrib.admin.site.register"
] | [((262, 296), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Origin'], {}), '(models.Origin)\n', (281, 296), False, 'from django.contrib import admin\n'), ((297, 333), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Location'], {}), '(models.Location)\n', (316, 333), False, 'from django.contrib import admin\n'), ((334, 385), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Sighting', 'SightingAdmin'], {}), '(models.Sighting, SightingAdmin)\n', (353, 385), False, 'from django.contrib import admin\n')] |
print ('mp4を動画と音声に分割する')
import os
import datetime
import ffmpeg
from pydub import AudioSegment
from ConfigX import ConfigX
# 文字列を右側から印文字を検索し、右側の文字を切り出す
# @param string s 対象文字列
# @param $mark 印文字
# @return 印文字から右側の文字列
def stringRightRev(s, mark):
a =s.rfind(mark)
res = s[a+len(mark):]
return res
# 文字列を右側から印文字を検索し、左側の文字を切り出す
# @param string s 対象文字列
# @param $mark 印文字
# @return 印文字から左側の文字列
def stringLeftRev(s, mark):
a =s.rfind(mark)
res = s[0:a]
return res
# mp4を音声なしmp4とmp3に分割する。
# @param string input_fp 入力動画ファイルパス
# @param string output_mp4_fp 出力動画ファイルパス(音声なしmp4)
# @param string output_mp3_fp 出力音声ファイルパス(mp3)
#
def splitVideoAndSound(input_fp, output_mp4_fp, output_mp3_fp):
sound = AudioSegment.from_file(input_fp, "mp4")
sound_sec = sound.duration_seconds # 再生時間
print(f'音声ファイルの長さ={str(sound_sec)}')
movie = cv2.VideoCapture(input_fp)
width = movie.get(cv2.CAP_PROP_FRAME_WIDTH)
height = movie.get(cv2.CAP_PROP_FRAME_HEIGHT)
video_frame_count = movie.get(cv2.CAP_PROP_FRAME_COUNT) # フレーム数を取得する
video_fps = movie.get(cv2.CAP_PROP_FPS)
print(f'入力動画ファイルのFPS={str(video_fps)}')
#video_time = (video_fps * video_frame_count) / 1000
print(f'入力動画ファイルのフレーム数={str(video_frame_count)}')
output_video_fps = video_frame_count / sound_sec
print(f'出力動画ファイルのfps={str(output_video_fps)}')
# 出力動画ファイルの用意(mp4)
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
output = cv2.VideoWriter(output_mp4_fp, int(fourcc), output_video_fps, (int(width), int(height)))
print(f'{output_mp4_fp}の出力開始')
while True:
ret, frame = movie.read()
output.write(frame)
if not ret:
break
# 映像オブジェクトの解放
movie.release()
print(f'{output_mp4_fp}の出力完了')
# 音声ファイルの出力
sound.export(output_mp3_fp, format="mp3")
print(f'{output_mp3_fp}の出力完了')
dt = datetime.datetime.now()
u = dt.strftime("%M%S")
configX = ConfigX();
configs = configX.getConfigs('./config.txt');
input_mp4_fp = configs['input_mp4_fp'];
print('入力ファイルパスmp4→' + input_mp4_fp)
# position = configs['position']
# volume = configs['volume']
# print('position=' + position)
# print('volume=' + volume)
# position = int(position);
# volume = int(volume);
ext = stringRightRev(input_mp4_fp, '.')
ext = ext.lower()
if ext != 'mp4':
print('mp4ファイルでありません。処理を中断します。')
exit()
left_path = stringLeftRev(input_mp4_fp, '.')
output_mp4_fp = left_path + '_' + u + '.mp4'
output_mp3_fp = left_path + '_' + u + '.mp3'
print('出力ファイルパスmp4→' + output_mp4_fp)
print('出力ファイルパスmp3→' + output_mp3_fp)
# 動画の再生時間を取得する
probe = ffmpeg.probe(input_mp4_fp)
movInfo = probe['streams'][0];
mov_duration = float(movInfo['duration']); # 動画再生時間
#動画の元の音声を取得
print('mp4読み込み中')
audio1 = AudioSegment.from_file(input_mp4_fp, "mp4")
audio1_duration =audio1.duration_seconds ; # audio1の再生時間を取得
if mov_duration > audio1_duration + 2:
print('動画の再生時間→' + str(mov_duration))
print('音声の再生時間→' + str(audio1_duration))
add_ms = (mov_duration - audio1_duration) * 1000;
print(audio1_duration)
add_silent = AudioSegment.silent(duration=add_ms) #1秒
audio1 = audio1 + add_silent;
print(audio1.duration_seconds )
print('mp3読み込み中')
audio2 = AudioSegment.from_file(input_mp3_fp, "mp3")
print('mp3の音量調整')
audio2 = audio2 - 10 #音量を変更
# 元音声と追加音声のミキシング
print('音声のミキシング中')
audio3 = audio1.overlay(audio2, position=position)
print('ミキシングした音声を仮出力')
audio3.export(tmp_fn, format="mp3")
# 既存があれば除去する
print('既存ファイルを除去')
if os.path.exists(output_mp4_fp):
os.remove(output_mp4_fp)
# 映像を読みこむ
print('映像と音声の再結合処理中...')
stream_video = ffmpeg.input(input_mp4_fp)
stream_audio = ffmpeg.input(tmp_fn)
stream = ffmpeg.output(stream_video, stream_audio, output_mp4_fp, vcodec="copy", acodec="copy")
print('最終出力中...')
ffmpeg.run(stream)
os.remove(tmp_fn)
input_fp = 'test_data/MVI_0887.MP4'
output_mp4_fp = 'test_data/output10.mp4'
output_mp3_fp = 'test_data/output10.mp3'
splitVideoAndSound(input_fp, output_mp4_fp, output_mp3_fp)
print('Success')
print('Success!')
| [
"os.path.exists",
"ffmpeg.output",
"ffmpeg.input",
"ffmpeg.run",
"datetime.datetime.now",
"ffmpeg.probe",
"pydub.AudioSegment.from_file",
"pydub.AudioSegment.silent",
"ConfigX.ConfigX",
"os.remove"
] | [((1905, 1928), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1926, 1928), False, 'import datetime\n'), ((1964, 1973), 'ConfigX.ConfigX', 'ConfigX', ([], {}), '()\n', (1971, 1973), False, 'from ConfigX import ConfigX\n'), ((2645, 2671), 'ffmpeg.probe', 'ffmpeg.probe', (['input_mp4_fp'], {}), '(input_mp4_fp)\n', (2657, 2671), False, 'import ffmpeg\n'), ((2796, 2839), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['input_mp4_fp', '"""mp4"""'], {}), "(input_mp4_fp, 'mp4')\n", (2818, 2839), False, 'from pydub import AudioSegment\n'), ((3264, 3307), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['input_mp3_fp', '"""mp3"""'], {}), "(input_mp3_fp, 'mp3')\n", (3286, 3307), False, 'from pydub import AudioSegment\n'), ((3539, 3568), 'os.path.exists', 'os.path.exists', (['output_mp4_fp'], {}), '(output_mp4_fp)\n', (3553, 3568), False, 'import os\n'), ((3650, 3676), 'ffmpeg.input', 'ffmpeg.input', (['input_mp4_fp'], {}), '(input_mp4_fp)\n', (3662, 3676), False, 'import ffmpeg\n'), ((3692, 3712), 'ffmpeg.input', 'ffmpeg.input', (['tmp_fn'], {}), '(tmp_fn)\n', (3704, 3712), False, 'import ffmpeg\n'), ((3722, 3812), 'ffmpeg.output', 'ffmpeg.output', (['stream_video', 'stream_audio', 'output_mp4_fp'], {'vcodec': '"""copy"""', 'acodec': '"""copy"""'}), "(stream_video, stream_audio, output_mp4_fp, vcodec='copy',\n acodec='copy')\n", (3735, 3812), False, 'import ffmpeg\n'), ((3827, 3845), 'ffmpeg.run', 'ffmpeg.run', (['stream'], {}), '(stream)\n', (3837, 3845), False, 'import ffmpeg\n'), ((3847, 3864), 'os.remove', 'os.remove', (['tmp_fn'], {}), '(tmp_fn)\n', (3856, 3864), False, 'import os\n'), ((731, 770), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['input_fp', '"""mp4"""'], {}), "(input_fp, 'mp4')\n", (753, 770), False, 'from pydub import AudioSegment\n'), ((3125, 3161), 'pydub.AudioSegment.silent', 'AudioSegment.silent', ([], {'duration': 'add_ms'}), '(duration=add_ms)\n', (3144, 3161), False, 'from pydub import AudioSegment\n'), ((3574, 3598), 'os.remove', 'os.remove', (['output_mp4_fp'], {}), '(output_mp4_fp)\n', (3583, 3598), False, 'import os\n')] |
"""
Module:
unicon.plugins.nd
Authors:
<NAME> (<EMAIL>)
Description:
This subpackage implements ND
"""
# from unicon.plugins.linux import LinuxConnection
from unicon.plugins.linux import LinuxConnection,LinuxServiceList
from unicon.plugins.linux.statemachine import LinuxStateMachine
from unicon.plugins.linux.connection_provider import LinuxConnectionProvider
from unicon.plugins.linux.settings import LinuxSettings
# from unicon.plugins.confd import ConfdConnection, ConfdServiceList, ConfdConnectionProvider
# from unicon.plugins.confd.settings import ConfdSettings
class NDConnection(LinuxConnection):
"""
Connection class for ND connections.
Extends the Linux connection to function with 'nd' os.
"""
os = 'nd'
state_machine_class = LinuxStateMachine
connection_provider_class = LinuxConnectionProvider
subcommand_list = LinuxServiceList
settings = LinuxSettings()
| [
"unicon.plugins.linux.settings.LinuxSettings"
] | [((908, 923), 'unicon.plugins.linux.settings.LinuxSettings', 'LinuxSettings', ([], {}), '()\n', (921, 923), False, 'from unicon.plugins.linux.settings import LinuxSettings\n')] |
"""Test responses from Denon/Marantz."""
from pyavreceiver.denon.response import DenonMessage
def test_separate(message_none):
"""Test separation of messages."""
assert message_none.separate("PWON") == ("PW", None, "ON")
assert message_none.separate("PWSTANDBY") == ("PW", None, "STANDBY")
assert message_none.separate("MVMAX 80") == ("MV", "MAX", "80")
assert message_none.separate("CVFL 60 ") == ("CV", "FL", "60")
assert message_none.separate("CVFL60") == ("CV", "FL", "60")
assert message_none.separate("CV FHL 44") == ("CV", "FHL", "44")
assert message_none.separate("CVNEW SPEC 55") == ("CV", "NEW SPEC", "55")
assert message_none.separate("CVUNKNOWNCOMMAND55") == (
"CV",
"UNKNOWNCOMMAND55",
None,
)
assert message_none.separate("MUON") == ("MU", None, "ON")
assert message_none.separate("SIPHONO") == ("SI", None, "PHONO")
assert message_none.separate("SI PHONO ") == ("SI", None, "PHONO")
assert message_none.separate("SIUSB DIRECT") == ("SI", None, "USB DIRECT")
assert message_none.separate("SINEW SOURCE VARIETY") == (
"SI",
None,
"NEW SOURCE VARIETY",
)
assert message_none.separate("SLPOFF") == ("SLP", None, "OFF")
assert message_none.separate("SLP OFF") == ("SLP", None, "OFF")
assert message_none.separate("MSDOLBY D+ +PL2X C") == (
"MS",
None,
"DOLBY D+ +PL2X C",
)
assert message_none.separate("MSYET ANOTHER POINTLESS DSP") == (
"MS",
None,
"YET ANOTHER POINTLESS DSP",
)
assert message_none.separate("PSDELAY 000") == ("PS", "DELAY", "000")
assert message_none.separate("PSTONE CTRL ON") == ("PS", "TONE CTRL", "ON")
assert message_none.separate("PSTONE CTRLOFF") == ("PS", "TONE CTRL", "OFF")
assert message_none.separate("PSSB MTRX ON") == ("PS", "SB", "MTRX ON")
assert message_none.separate("PSSB ON") == ("PS", "SB", "ON")
assert message_none.separate("PSMULTEQ BYP.LR") == ("PS", "MULTEQ", "BYP.LR")
assert message_none.separate("PSDCO OFF") == ("PS", "DCO", "OFF")
assert message_none.separate("PSLFE -8") == ("PS", "LFE", "-8")
assert message_none.separate("PSNEWPARAM OK") == ("PS", "NEWPARAM", "OK")
assert message_none.separate("PSUNKNOWNCOMMAND55") == (
"PS",
"UNKNOWNCOMMAND55",
None,
)
assert message_none.separate("MV60") == ("MV", None, "60")
assert message_none.separate("MV595") == ("MV", None, "595")
assert message_none.separate("Z2PSBAS 51") == ("Z2PS", "BAS", "51")
assert message_none.separate("Z260") == ("Z2", None, "60")
assert message_none.separate("Z2ON") == ("Z2", None, "ON")
assert message_none.separate("Z2PHONO") == ("Z2", None, "PHONO")
assert message_none.separate("Z3PSBAS 51") == ("Z3PS", "BAS", "51")
assert message_none.separate("Z360") == ("Z3", None, "60")
assert message_none.separate("Z3ON") == ("Z3", None, "ON")
assert message_none.separate("Z3PHONO") == ("Z3", None, "PHONO")
assert message_none.separate("NEWCMD 50") == ("NEWCMD", None, "50")
assert message_none.separate("NEWCMD WITH PARAMS 50") == (
"NEWCMD WITH PARAMS",
None,
"50",
)
assert message_none.separate("UNPARSABLE") == ("UNPARSABLE", None, None)
assert message_none.separate("FAKEFOR TESTS") == ("FAKEFO", None, "R TESTS")
assert message_none.separate("FAKENORTEST") == ("FAKEN", "OR", "TEST")
def test_format_db(message_none):
"""Test format to decibel."""
assert message_none.parse_value("MV", None, "60") == -20
assert message_none.parse_value("MV", None, "595") == -20.5
assert message_none.parse_value("MV", None, "80") == 0
assert message_none.parse_value("MV", None, "805") == 0.5
assert message_none.parse_value("MV", None, "00") == -80
assert message_none.parse_value("MV", "MAX", "80") == 0
assert message_none.parse_value("CV", "FL", "50") == 0
assert message_none.parse_value("CV", "SL", "39") == -11
assert message_none.parse_value("CV", "FHL", "545") == 4.5
assert message_none.parse_value("SSLEV", "FL", "50") == 0
assert message_none.parse_value("PS", "BAS", "50") == 0
assert message_none.parse_value("PS", "BAS", "39") == -11
assert message_none.parse_value("PS", "TRE", "545") == 4.5
assert message_none.parse_value("PS", "LFE", "-6") == -6
assert message_none.parse_value("Z2", None, "60") == -20
assert message_none.parse_value("Z2", None, "595") == -20.5
assert message_none.parse_value("Z2", None, "80") == 0
assert message_none.parse_value("Z2", None, "805") == 0.5
assert message_none.parse_value("Z2", None, "00") == -80
def test_attribute_assignment(command_dict):
"""Test assignment of attr."""
msg = DenonMessage("PWON", command_dict)
assert msg.parsed == ("PW", None, "ON")
assert str(msg) == "PWON"
assert repr(msg) == "PWON"
assert msg.group == "PW"
msg = DenonMessage("MV75", command_dict)
assert msg.parsed == ("MV", None, -5)
assert msg.message == "MV75"
assert msg.raw_value == "75"
msg = DenonMessage("MVMAX 80", command_dict)
assert msg.parsed == ("MV", "MAX", 0)
assert msg.message == "MVMAX 80"
assert msg.raw_value == "80"
msg = DenonMessage("CVFL 51", command_dict)
assert msg.parsed == ("CV", "FL", 1)
assert msg.message == "CVFL 51"
assert msg.raw_value == "51"
assert msg.group == "CVFL"
msg = DenonMessage("MSDOLBY D+ +PL2X C", command_dict)
assert msg.parsed == ("MS", None, "DOLBY D+ +PL2X C")
msg = DenonMessage("PSDYNVOL LOW", command_dict)
assert msg.parsed == ("PS", "DYNVOL", "LOW")
assert msg.message == "PSDYNVOL LOW"
assert msg.raw_value == "LOW"
assert msg.group == "PSDYNVOL"
msg = DenonMessage("PSDELAY 000", command_dict)
assert msg.parsed == ("PS", "DELAY", "000")
assert msg.message == "PSDELAY 000"
assert msg.raw_value == "000"
assert msg.group == "PSDELAY"
def test_state_update_dict(command_dict):
"""Test create the update dict."""
assert DenonMessage("PWON", command_dict).state_update == {"power": True}
assert DenonMessage("MVMAX 80", command_dict).state_update == {"max_volume": 0}
assert DenonMessage("PWSTANDBY", command_dict).state_update == {"power": False}
assert DenonMessage("MV75", command_dict).state_update == {"volume": -5}
assert DenonMessage("MV56", command_dict).state_update == {"volume": -24}
assert DenonMessage("CVFL 51", command_dict).state_update == {"channel_level_fl": 1}
assert DenonMessage("SSLEVFL 50", command_dict).state_update == {
"channel_level_fl": 0
}
assert DenonMessage("PSNEWPARAM LOW", command_dict).state_update == {
"PS_NEWPARAM": "LOW"
}
assert DenonMessage("MSDOLBY D+ +PL2X C", command_dict).state_update == {
"sound_mode": "DOLBY D+ +PL2X C"
}
assert DenonMessage("PSBAS 39", command_dict).state_update == {"bass": -11}
assert DenonMessage("MUON", command_dict).state_update == {"mute": True}
assert DenonMessage("SIPHONO", command_dict).state_update == {"source": "PHONO"}
assert DenonMessage("SIBD", command_dict).state_update == {"source": "BD"}
assert DenonMessage("SINEW SOURCE TYPE", command_dict).state_update == {
"source": "NEW SOURCE TYPE"
}
assert DenonMessage("DCAUTO", command_dict).state_update == {
"digital_signal_mode": "AUTO"
}
assert DenonMessage("PSTONE CTRL ON", command_dict).state_update == {
"tone_control": True
}
assert DenonMessage("PSSBMTRX ON", command_dict).state_update == {
"surround_back": "MTRX ON"
}
assert DenonMessage("PSDYNVOL MED", command_dict).state_update == {
"dsp_dynamic_range_control": "medium"
}
assert DenonMessage("NEWPARAM ANYVALUE", command_dict).state_update == {
"NEWPARAM": "ANYVALUE"
}
assert DenonMessage("PSNEWPARAM ANYVALUE", command_dict).state_update == {
"PS_NEWPARAM": "ANYVALUE"
}
assert DenonMessage("PSNEWPARAM", command_dict).state_update == {
"PS_NEWPARAM": None
}
def test_bad_value_handling(command_dict):
"""Test error handling for values that don't conform to spec."""
assert DenonMessage("MVSTRING", command_dict).state_update == {
"volume_string": None
}
assert DenonMessage("MV1000", command_dict).state_update == {}
def test_multiple_types(command_dict):
"""Test handling multiple types of value."""
assert DenonMessage("PSDIL OFF", command_dict).state_update == {
"dialog_level": False
}
assert DenonMessage("PSDIL ON", command_dict).state_update == {"dialog_level": True}
assert DenonMessage("PSDIL 55", command_dict).state_update == {"dialog_level": 5}
assert DenonMessage("PSDIL 45", command_dict).state_update == {"dialog_level": -5}
def test_unnamed_param(command_dict):
"""Test an unnamed parsed parameter."""
assert DenonMessage("PSDELAY 000", command_dict).state_update == {"PS_DELAY": "000"}
def test_zones(command_dict):
"""Test parsing zone commands."""
assert DenonMessage("ZMON", command_dict).state_update == {"zone1_power": True}
assert DenonMessage("ZMOFF", command_dict).state_update == {"zone1_power": False}
assert DenonMessage("Z2PSBAS 51", command_dict).state_update == {"zone2_bass": 1}
assert DenonMessage("Z3PSTRE 445", command_dict).state_update == {
"zone3_treble": -5.5
}
assert DenonMessage("Z260", command_dict).state_update == {"zone2_volume": -20}
assert DenonMessage("Z2ON", command_dict).state_update == {"zone2_power": True}
assert DenonMessage("Z2PHONO", command_dict).state_update == {
"zone2_source": "PHONO"
}
assert DenonMessage("Z2SOURCE", command_dict).state_update == {
"zone2_source": "SOURCE"
}
assert DenonMessage("Z360", command_dict).state_update == {"zone3_volume": -20}
assert DenonMessage("Z3OFF", command_dict).state_update == {"zone3_power": False}
assert DenonMessage("Z3SOURCE", command_dict).state_update == {
"zone3_source": "SOURCE"
}
def test_sequence(command_dict):
"""Test a long sequence."""
seq = [
"PW?"
"PWON"
"MV56"
"MVMAX 80"
"MUOFF"
"SITV"
"SVOFF"
"PSDYNVOL OFF"
"PWON"
"PWON"
"MV56"
"MVMAX 80"
]
for command in seq:
DenonMessage(command, command_dict)
invalid_seq = [
"90f9jf3^F*)UF(U(*#fjliuF(#)U(F@ujniljf(@#)&%T^GHkjbJBVKjY*(Y#*(@&5-00193ljl",
"",
" ",
" b b b ",
".:':>,",
"578934",
"None",
"\r",
"MV ",
" MV",
]
for command in invalid_seq:
DenonMessage(command, command_dict)
def test_learning_commands(command_dict):
"""Test saving learned commands."""
assert DenonMessage("PWON", command_dict).new_command is None
assert DenonMessage("PWSCREENSAVER", command_dict).new_command == {
"cmd": "PW",
"prm": None,
"val": "SCREENSAVER",
}
assert DenonMessage("PSNEW", command_dict).new_command == {
"cmd": "PS",
"prm": "NEW",
"val": None,
}
# The parser matches param to "EFF" and then sees "ECT" as value
# - this is not ideal behavior - the parser should know that "ECT"
# as an argument should be preceded by a space
assert DenonMessage("PSEFFECT", command_dict).parsed == ("PS", "EFF", "ECT")
assert DenonMessage("PSEFF ECT", command_dict).parsed == ("PS", "EFF", "ECT")
assert DenonMessage("CVATMOS RIGHT 52", command_dict).new_command == {
"cmd": "CV",
"prm": "ATMOS RIGHT",
"val": "52",
}
assert DenonMessage("NEWCMD MEDIUM", command_dict).new_command == {
"cmd": "NEWCMD",
"prm": None,
"val": "MEDIUM",
}
assert DenonMessage("UNPARSABLE", command_dict).new_command is None
| [
"pyavreceiver.denon.response.DenonMessage"
] | [((4813, 4847), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PWON"""', 'command_dict'], {}), "('PWON', command_dict)\n", (4825, 4847), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((4993, 5027), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MV75"""', 'command_dict'], {}), "('MV75', command_dict)\n", (5005, 5027), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((5147, 5185), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MVMAX 80"""', 'command_dict'], {}), "('MVMAX 80', command_dict)\n", (5159, 5185), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((5309, 5346), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""CVFL 51"""', 'command_dict'], {}), "('CVFL 51', command_dict)\n", (5321, 5346), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((5499, 5547), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MSDOLBY D+ +PL2X C"""', 'command_dict'], {}), "('MSDOLBY D+ +PL2X C', command_dict)\n", (5511, 5547), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((5617, 5659), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSDYNVOL LOW"""', 'command_dict'], {}), "('PSDYNVOL LOW', command_dict)\n", (5629, 5659), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((5830, 5871), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSDELAY 000"""', 'command_dict'], {}), "('PSDELAY 000', command_dict)\n", (5842, 5871), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((10494, 10529), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['command', 'command_dict'], {}), '(command, command_dict)\n', (10506, 10529), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((10892, 10927), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['command', 'command_dict'], {}), '(command, command_dict)\n', (10904, 10927), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6122, 6156), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PWON"""', 'command_dict'], {}), "('PWON', command_dict)\n", (6134, 6156), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6200, 6238), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MVMAX 80"""', 'command_dict'], {}), "('MVMAX 80', command_dict)\n", (6212, 6238), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6284, 6323), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PWSTANDBY"""', 'command_dict'], {}), "('PWSTANDBY', command_dict)\n", (6296, 6323), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6368, 6402), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MV75"""', 'command_dict'], {}), "('MV75', command_dict)\n", (6380, 6402), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6445, 6479), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MV56"""', 'command_dict'], {}), "('MV56', command_dict)\n", (6457, 6479), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6523, 6560), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""CVFL 51"""', 'command_dict'], {}), "('CVFL 51', command_dict)\n", (6535, 6560), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6612, 6652), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""SSLEVFL 50"""', 'command_dict'], {}), "('SSLEVFL 50', command_dict)\n", (6624, 6652), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6718, 6762), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSNEWPARAM LOW"""', 'command_dict'], {}), "('PSNEWPARAM LOW', command_dict)\n", (6730, 6762), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6827, 6875), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MSDOLBY D+ +PL2X C"""', 'command_dict'], {}), "('MSDOLBY D+ +PL2X C', command_dict)\n", (6839, 6875), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((6952, 6990), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSBAS 39"""', 'command_dict'], {}), "('PSBAS 39', command_dict)\n", (6964, 6990), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7032, 7066), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MUON"""', 'command_dict'], {}), "('MUON', command_dict)\n", (7044, 7066), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7109, 7146), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""SIPHONO"""', 'command_dict'], {}), "('SIPHONO', command_dict)\n", (7121, 7146), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7194, 7228), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""SIBD"""', 'command_dict'], {}), "('SIBD', command_dict)\n", (7206, 7228), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7273, 7320), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""SINEW SOURCE TYPE"""', 'command_dict'], {}), "('SINEW SOURCE TYPE', command_dict)\n", (7285, 7320), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7392, 7428), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""DCAUTO"""', 'command_dict'], {}), "('DCAUTO', command_dict)\n", (7404, 7428), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7502, 7546), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSTONE CTRL ON"""', 'command_dict'], {}), "('PSTONE CTRL ON', command_dict)\n", (7514, 7546), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7611, 7652), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSSBMTRX ON"""', 'command_dict'], {}), "('PSSBMTRX ON', command_dict)\n", (7623, 7652), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7723, 7765), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSDYNVOL MED"""', 'command_dict'], {}), "('PSDYNVOL MED', command_dict)\n", (7735, 7765), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7847, 7894), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""NEWPARAM ANYVALUE"""', 'command_dict'], {}), "('NEWPARAM ANYVALUE', command_dict)\n", (7859, 7894), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((7961, 8010), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSNEWPARAM ANYVALUE"""', 'command_dict'], {}), "('PSNEWPARAM ANYVALUE', command_dict)\n", (7973, 8010), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((8080, 8120), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSNEWPARAM"""', 'command_dict'], {}), "('PSNEWPARAM', command_dict)\n", (8092, 8120), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((8298, 8336), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MVSTRING"""', 'command_dict'], {}), "('MVSTRING', command_dict)\n", (8310, 8336), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((8402, 8438), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""MV1000"""', 'command_dict'], {}), "('MV1000', command_dict)\n", (8414, 8438), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((8559, 8598), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSDIL OFF"""', 'command_dict'], {}), "('PSDIL OFF', command_dict)\n", (8571, 8598), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((8664, 8702), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSDIL ON"""', 'command_dict'], {}), "('PSDIL ON', command_dict)\n", (8676, 8702), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((8753, 8791), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSDIL 55"""', 'command_dict'], {}), "('PSDIL 55', command_dict)\n", (8765, 8791), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((8839, 8877), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSDIL 45"""', 'command_dict'], {}), "('PSDIL 45', command_dict)\n", (8851, 8877), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9010, 9051), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSDELAY 000"""', 'command_dict'], {}), "('PSDELAY 000', command_dict)\n", (9022, 9051), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9169, 9203), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""ZMON"""', 'command_dict'], {}), "('ZMON', command_dict)\n", (9181, 9203), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9253, 9288), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""ZMOFF"""', 'command_dict'], {}), "('ZMOFF', command_dict)\n", (9265, 9288), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9340, 9380), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z2PSBAS 51"""', 'command_dict'], {}), "('Z2PSBAS 51', command_dict)\n", (9352, 9380), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9426, 9467), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z3PSTRE 445"""', 'command_dict'], {}), "('Z3PSTRE 445', command_dict)\n", (9438, 9467), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9533, 9567), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z260"""', 'command_dict'], {}), "('Z260', command_dict)\n", (9545, 9567), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9617, 9651), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z2ON"""', 'command_dict'], {}), "('Z2ON', command_dict)\n", (9629, 9651), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9701, 9738), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z2PHONO"""', 'command_dict'], {}), "('Z2PHONO', command_dict)\n", (9713, 9738), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9806, 9844), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z2SOURCE"""', 'command_dict'], {}), "('Z2SOURCE', command_dict)\n", (9818, 9844), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9914, 9948), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z360"""', 'command_dict'], {}), "('Z360', command_dict)\n", (9926, 9948), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((9998, 10033), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z3OFF"""', 'command_dict'], {}), "('Z3OFF', command_dict)\n", (10010, 10033), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((10084, 10122), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""Z3SOURCE"""', 'command_dict'], {}), "('Z3SOURCE', command_dict)\n", (10096, 10122), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((11023, 11057), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PWON"""', 'command_dict'], {}), "('PWON', command_dict)\n", (11035, 11057), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((11089, 11132), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PWSCREENSAVER"""', 'command_dict'], {}), "('PWSCREENSAVER', command_dict)\n", (11101, 11132), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((11239, 11274), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSNEW"""', 'command_dict'], {}), "('PSNEW', command_dict)\n", (11251, 11274), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((11567, 11605), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSEFFECT"""', 'command_dict'], {}), "('PSEFFECT', command_dict)\n", (11579, 11605), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((11648, 11687), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""PSEFF ECT"""', 'command_dict'], {}), "('PSEFF ECT', command_dict)\n", (11660, 11687), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((11731, 11777), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""CVATMOS RIGHT 52"""', 'command_dict'], {}), "('CVATMOS RIGHT 52', command_dict)\n", (11743, 11777), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((11884, 11927), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""NEWCMD MEDIUM"""', 'command_dict'], {}), "('NEWCMD MEDIUM', command_dict)\n", (11896, 11927), False, 'from pyavreceiver.denon.response import DenonMessage\n'), ((12033, 12073), 'pyavreceiver.denon.response.DenonMessage', 'DenonMessage', (['"""UNPARSABLE"""', 'command_dict'], {}), "('UNPARSABLE', command_dict)\n", (12045, 12073), False, 'from pyavreceiver.denon.response import DenonMessage\n')] |
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
print(np.sqrt(eigvals[-1]))
print(np.sqrt(eigvals[-2]))
# Collect the top k eigenvectors (projected samples)
#X_pc = np.column_stack((eigvecs[:, -i]
# for i in range(1, n_components + 1)))
# scikit-learnの結果と比べてみても, たぶんこれが正しい気がする
# ただ結局各成分にスケール因子が入るだけなので、
# 学習という意味ではどちらでも良いのかもしれない
X_pc = np.column_stack((np.sqrt(eigvals[-i]) * eigvecs[:, -i]
for i in range(1, n_components + 1)))
# PCA固有ベクトルvをデータサンプルに直すには X v とする必要がある
# ここで正規化された特異ベクトルの間の関係を使う。
# X v_i = sigma_i a_i (sigma_i = sqrt(lambda_i))
# よって sqrt(lambda_i) a_i で主成分方向に基底変換したデータサンプルになる。
return X_pc
##
# 本文の後で出てくるバージョン
# 計算は一緒で返すものが違うだけ
# 固有値と固有ベクトルを返す
#
def rbf_kernel_pca2(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
lambdas: list
Eigenvalues
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
alphas = np.column_stack((eigvecs[:, -i]
for i in range(1, n_components + 1)))
# Collect the corresponding eigenvalues
lambdas = [eigvals[-i] for i in range(1, n_components + 1)]
return alphas, lambdas
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
##
# カーネルを線形にしてみた
# test_kpca.py で使う
#
def linear_kernel_pca(X, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# 線形カーネル関数は内積(x_i, x_j)とする
N = X.shape[0]
K = np.ones((N, N))
for i in range(N):
for j in range(N):
K[i, j] = np.dot(X[i, :], X[j, :])
print(K.shape)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
print(np.sqrt(eigvals[-1]))
print(np.sqrt(eigvals[-2]))
# Collect the top k eigenvectors (projected samples)
#X_pc = np.column_stack((eigvecs[:, -i]
# for i in range(1, n_components + 1)))
# scikit-learnの結果と比べてみても, たぶんこれが正しい気がする
# ただ結局各成分にスケール因子が入るだけなので、
# 学習という意味ではどちらでも良いのかもしれない
X_pc = np.column_stack((np.sqrt(eigvals[-i]) * eigvecs[:, -i]
for i in range(1, n_components + 1)))
# PCA固有ベクトルvをデータサンプルに直すには X v とする必要がある
# ここで正規化された特異ベクトルの間の関係を使う。
# X v_i = sigma_i a_i (sigma_i = sqrt(lambda_i))
# よって sqrt(lambda_i) a_i で主成分方向に基底変換したデータサンプルになる。
return X_pc
| [
"scipy.linalg.eigh",
"scipy.spatial.distance.squareform",
"numpy.sqrt",
"numpy.ones",
"scipy.exp",
"scipy.spatial.distance.pdist",
"numpy.dot"
] | [((660, 683), 'scipy.spatial.distance.pdist', 'pdist', (['X', '"""sqeuclidean"""'], {}), "(X, 'sqeuclidean')\n", (665, 683), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((759, 779), 'scipy.spatial.distance.squareform', 'squareform', (['sq_dists'], {}), '(sq_dists)\n', (769, 779), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((832, 858), 'scipy.exp', 'exp', (['(-gamma * mat_sq_dists)'], {}), '(-gamma * mat_sq_dists)\n', (835, 858), False, 'from scipy import exp\n'), ((1138, 1145), 'scipy.linalg.eigh', 'eigh', (['K'], {}), '(K)\n', (1142, 1145), False, 'from scipy.linalg import eigh\n'), ((2460, 2483), 'scipy.spatial.distance.pdist', 'pdist', (['X', '"""sqeuclidean"""'], {}), "(X, 'sqeuclidean')\n", (2465, 2483), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2559, 2579), 'scipy.spatial.distance.squareform', 'squareform', (['sq_dists'], {}), '(sq_dists)\n', (2569, 2579), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2632, 2658), 'scipy.exp', 'exp', (['(-gamma * mat_sq_dists)'], {}), '(-gamma * mat_sq_dists)\n', (2635, 2658), False, 'from scipy import exp\n'), ((2938, 2945), 'scipy.linalg.eigh', 'eigh', (['K'], {}), '(K)\n', (2942, 2945), False, 'from scipy.linalg import eigh\n'), ((3906, 3921), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (3913, 3921), True, 'import numpy as np\n'), ((4317, 4324), 'scipy.linalg.eigh', 'eigh', (['K'], {}), '(K)\n', (4321, 4324), False, 'from scipy.linalg import eigh\n'), ((923, 938), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (930, 938), True, 'import numpy as np\n'), ((1156, 1176), 'numpy.sqrt', 'np.sqrt', (['eigvals[-1]'], {}), '(eigvals[-1])\n', (1163, 1176), True, 'import numpy as np\n'), ((1188, 1208), 'numpy.sqrt', 'np.sqrt', (['eigvals[-2]'], {}), '(eigvals[-2])\n', (1195, 1208), True, 'import numpy as np\n'), ((2723, 2738), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (2730, 2738), True, 'import numpy as np\n'), ((4102, 4117), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (4109, 4117), True, 'import numpy as np\n'), ((4335, 4355), 'numpy.sqrt', 'np.sqrt', (['eigvals[-1]'], {}), '(eigvals[-1])\n', (4342, 4355), True, 'import numpy as np\n'), ((4367, 4387), 'numpy.sqrt', 'np.sqrt', (['eigvals[-2]'], {}), '(eigvals[-2])\n', (4374, 4387), True, 'import numpy as np\n'), ((3994, 4018), 'numpy.dot', 'np.dot', (['X[i, :]', 'X[j, :]'], {}), '(X[i, :], X[j, :])\n', (4000, 4018), True, 'import numpy as np\n'), ((1516, 1536), 'numpy.sqrt', 'np.sqrt', (['eigvals[-i]'], {}), '(eigvals[-i])\n', (1523, 1536), True, 'import numpy as np\n'), ((4695, 4715), 'numpy.sqrt', 'np.sqrt', (['eigvals[-i]'], {}), '(eigvals[-i])\n', (4702, 4715), True, 'import numpy as np\n')] |
# Generated by Django 3.0.4 on 2020-03-26 14:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('business', '0030_auto_20200326_1533'),
]
operations = [
migrations.AddField(
model_name='request',
name='lang',
field=models.CharField(default='en', max_length=2),
),
]
| [
"django.db.models.CharField"
] | [((334, 378), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""en"""', 'max_length': '(2)'}), "(default='en', max_length=2)\n", (350, 378), False, 'from django.db import migrations, models\n')] |
"""The test for the sensibo update platform."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import patch
from pysensibo.model import SensiboData
from pytest import MonkeyPatch
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.util import dt
from tests.common import async_fire_time_changed
async def test_select(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo update."""
state1 = hass.states.get("update.hallway_update_available")
state2 = hass.states.get("update.kitchen_update_available")
assert state1.state == STATE_ON
assert state1.attributes["installed_version"] == "SKY30046"
assert state1.attributes["latest_version"] == "SKY30048"
assert state1.attributes["title"] == "skyv2"
assert state2.state == STATE_OFF
monkeypatch.setattr(get_data.parsed["ABC999111"], "fw_ver", "SKY30048")
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("update.hallway_update_available")
assert state1.state == STATE_OFF
| [
"homeassistant.util.dt.utcnow",
"datetime.timedelta",
"unittest.mock.patch"
] | [((1097, 1220), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data"""'], {'return_value': 'get_data'}), "(\n 'homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data'\n , return_value=get_data)\n", (1102, 1220), False, 'from unittest.mock import patch\n'), ((1298, 1309), 'homeassistant.util.dt.utcnow', 'dt.utcnow', ([], {}), '()\n', (1307, 1309), False, 'from homeassistant.util import dt\n'), ((1312, 1332), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (1321, 1332), False, 'from datetime import timedelta\n')] |
import subprocess
from flask import Flask
from os import environ
BOT_START_FILE = 'run_bot.py'
# for start PYTHON_PROCESS = 'python3'
# for testing
PYTHON_PROCESS = r"C:\Python3.7\python.exe"
app = Flask(__name__)
@app.route("/", methods=["GET"])
def index():
return "Bot is On"
print(f"Running {BOT_START_FILE}")
bot_process = subprocess.Popen([PYTHON_PROCESS, BOT_START_FILE])
app.run()
| [
"subprocess.Popen",
"flask.Flask"
] | [((201, 216), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (206, 216), False, 'from flask import Flask\n'), ((339, 389), 'subprocess.Popen', 'subprocess.Popen', (['[PYTHON_PROCESS, BOT_START_FILE]'], {}), '([PYTHON_PROCESS, BOT_START_FILE])\n', (355, 389), False, 'import subprocess\n')] |
__all__ = [
"BoltQuoteHelper",
"rewrite_traceback",
"fake_traceback",
"internal",
"INTERNAL_CODE",
"SAFE_BUILTINS",
]
from bisect import bisect
from dataclasses import dataclass, field
from types import CodeType, TracebackType
from typing import Dict, List, Set, TypeVar
from mecha.utils import QuoteHelperWithUnicode, string_to_number
T = TypeVar("T")
INTERNAL_CODE: Set[CodeType] = {string_to_number.__code__}
def internal(f: T) -> T:
INTERNAL_CODE.add(f.__code__) # type: ignore
return f
SAFE_BUILTINS: List[str] = [
"abs",
"all",
"any",
"ascii",
"bin",
"bool",
"callable",
"chr",
"dict",
"divmod",
"enumerate",
"filter",
"float",
"frozenset",
"hasattr",
"hash",
"hex",
"id",
"int",
"isinstance",
"issubclass",
"iter",
"len",
"list",
"map",
"max",
"min",
"next",
"object",
"oct",
"ord",
"pow",
"print",
"range",
"repr",
"reversed",
"round",
"set",
"slice",
"sorted",
"str",
"sum",
"tuple",
"type",
"zip",
]
@dataclass
class BoltQuoteHelper(QuoteHelperWithUnicode):
"""Quote helper used for bolt."""
escape_sequences: Dict[str, str] = field(
default_factory=lambda: {
r"\\": "\\",
r"\f": "\f",
r"\n": "\n",
r"\r": "\r",
r"\t": "\t",
}
)
def rewrite_traceback(exc: Exception) -> Exception:
tb = exc.__traceback__ and exc.__traceback__.tb_next
stack: List[TracebackType] = []
while tb is not None:
if tb.tb_frame.f_code in INTERNAL_CODE:
tb = tb.tb_next
continue
line_numbers = tb.tb_frame.f_globals.get("_mecha_lineno")
if line_numbers:
n1, n2 = line_numbers
lineno = n2[bisect(n1, tb.tb_lineno) - 1]
stack.append(fake_traceback(exc, tb, lineno))
else:
stack.append(tb)
tb = tb.tb_next
tb_next = None
for tb in reversed(stack):
tb.tb_next = tb_next
tb_next = tb
return exc.with_traceback(tb)
def fake_traceback(exc: Exception, tb: TracebackType, lineno: int) -> TracebackType: # type: ignore
name = tb.tb_frame.f_code.co_name
filename = tb.tb_frame.f_globals["__file__"]
if name == "<module>":
name = tb.tb_frame.f_globals.get("__name__")
code = compile("\n" * (lineno - 1) + "raise _mecha_exc", filename, "exec")
if name:
code = code.replace(co_name=name)
try:
exec(code, {"_mecha_exc": exc})
except Exception as exc:
return exc.__traceback__.tb_next # type: ignore
| [
"bisect.bisect",
"dataclasses.field",
"typing.TypeVar"
] | [((368, 380), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (375, 380), False, 'from typing import Dict, List, Set, TypeVar\n'), ((1271, 1375), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : {'\\\\\\\\': '\\\\', '\\\\f': '\\x0c', '\\\\n': '\\n', '\\\\r': '\\r', '\\\\t': '\\t'})"}), "(default_factory=lambda : {'\\\\\\\\': '\\\\', '\\\\f': '\\x0c', '\\\\n': '\\n',\n '\\\\r': '\\r', '\\\\t': '\\t'})\n", (1276, 1375), False, 'from dataclasses import dataclass, field\n'), ((1876, 1900), 'bisect.bisect', 'bisect', (['n1', 'tb.tb_lineno'], {}), '(n1, tb.tb_lineno)\n', (1882, 1900), False, 'from bisect import bisect\n')] |
#!/bin/python
import os
import js2py
from pathlib import Path
from configStrategies import cS
from configIndicators import cI
class _settings:
def __init__(self, **entries):
'''
print(entries)
def iterate(self, DATA):
for W in DATA.keys():
if type(DATA[W]) == dict:
iterate(self,DATA[W])
else:
self.__dict__.update(DATA)
iterate(self,entries)
'''
self.__dict__.update(entries)
def getstrat(self, name):
return self.strategies[name]
def getSettings(specific=None):
HOME = str(Path.home())
s = {
'Global': {'gekkoPath': HOME + '/gekko', 'configFilename': 'example-config.js', 'save_dir': "output", 'log_name': 'evolution_gen.csv', 'RemoteAWS': '../AmazonSetup/hosts', 'GekkoURLs': ['http://localhost:3000'], 'showFailedStrategies': False},
# Hosts list of remote machines running gekko, to distribute evaluation load;
# option values: path to HOSTS file list OR False;
# Your gekko local URL - CHECK THIS!
# genetic algorithm settings
'generations': {'gekkoDebug': True, 'showIndividualEvaluationInfo': False, 'parameter_spread': 60, 'POP_SIZE': 30, 'NBEPOCH': 800, 'evaluateSettingsPeriodically': 20, 'deltaDays': 90, 'ParallelCandlestickDataset': 1, 'cxpb': 0.8, 'mutpb': 0.2, '_lambda': 7, 'PRoFIGA_beta': 0.005, 'ageBoundaries': (9, 19), 'candleSize': 10, 'proofSize': 12, 'DRP': 70, 'ParallelBacktests': 6, 'finaltest': {'NBBESTINDS': 1, 'NBADDITIONALINDS': 4}, 'chromosome': {'GeneSize': 2, 'Density': 3}, 'weights': {'profit': 1.0, 'sharpe': 0.1}, 'interpreteBacktestProfit': 'v3'},
# show gekko verbose (strat info) - gekko must start with -d flag;
# Verbose single evaluation results;
# if parameter is set to value rather than tuple limits at settings, make the value
# a tuple based on chosen spread value (percents); value: 10 --spread=50--> value: (5,15)
# Initial population size, per locale
# number of epochs to run
# show current best settings on every X epochs. (or False)
# time window size on days of candlesticks for each evaluation
# Number of candlestick data loaded simultaneously in each locale;
# slower EPOCHS, theoretical better evolution;
# seems broken. values other than 1 makes evolution worse.
# -- Genetic Algorithm Parameters # Probabilty of crossover # Probability of mutation; # size of offspring generated per epoch; # weight of PRoFIGA calculations on variability of population size # minimum age to die, age when everyone dies (on EPOCHS) # candle size for gekko backtest, in minutes
# Date range persistence; Number of subsequent rounds
# until another time range in dataset is selected;
# mode of profit interpretation: v1, v2 or v3.
# please check the first functions at evaluation.gekko.backtest
# to understand what is this. has big impact on evolutionary agenda.
# bayesian optimization settings
'bayesian': {'gekkoDebug': False, 'deltaDays': 60, 'num_rounds': 10, 'random_state': 2017, 'num_iter': 50, 'init_points': 9, 'parallel': False, 'show_chart': False, 'save': True, 'parameter_spread': 100, 'candleSize': 30, 'historySize': 10, 'watch': {"exchange": "poloniex", "currency": 'USDT', "asset": 'BTC'}, 'interpreteBacktestProfit': 'v3'},
# show gekko verbose (strat info) - gekko must start with -d flag;
# time window size on days of candlesticks for each evaluation
# number of evaluation rounds
# seed for randomziation of values
# number of iterations on each round
# number of random values to start bayes evaluation
# candleSize & historySize on Gekko, for all evals
'dataset': {'dataset_source': None, '!dataset_source': {"exchange": "kraken", "currency": 'USD', "asset": 'LTC'}, 'eval_dataset_source': None, 'dataset_span': 0, 'eval_dataset_span': 0},
# -- Gekko Dataset Settings
# leave the ! on the ignored entry as convenient;
# dataset_source can be set to None so it searches from any source; # in case of specifying exchange-currency-asset, rename this removing the '!', and del the original key above.
# span in days from the end of dataset to the beggining. Or zero.
# (to restrain length);
'strategies': cS,
'indicators': cI,
'skeletons': {'ontrend': {"SMA_long": 1000, "SMA_short": 50}},
}
if specific != None:
if not specific:
return _settings(**s)
else:
return _settings(** s[specific])
return s
def get_configjs(filename="example-config.js"):
with open(filename, "r") as f:
text = f.read()
text = text.replace("module.exports = config;", "config;")
return js2py.eval_js(text).to_dict()
| [
"pathlib.Path.home",
"js2py.eval_js"
] | [((634, 645), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (643, 645), False, 'from pathlib import Path\n'), ((4901, 4920), 'js2py.eval_js', 'js2py.eval_js', (['text'], {}), '(text)\n', (4914, 4920), False, 'import js2py\n')] |
import sys
import hypothesis.strategies as st
from hypothesis import given
def is_pytest():
return "pytest" in sys.modules
def pytest_configure(config):
# Workaround for Hypothesis bug causing flaky tests if they use characters
# or text: https://github.com/HypothesisWorks/hypothesis/issues/2108
@given(st.text())
def foo(x):
pass
foo()
return
# PYTEST_RUNNING = False
# def pytest_configure(config):
# global PYTEST_RUNNING
# PYTEST_RUNNING = True
# return
# def pytest_unconfigure(config):
# global PYTEST_RUNNING
# PYTEST_RUNNING = False
# return
| [
"hypothesis.strategies.text"
] | [((324, 333), 'hypothesis.strategies.text', 'st.text', ([], {}), '()\n', (331, 333), True, 'import hypothesis.strategies as st\n')] |
#!/usr/bin/env python3
#Basic Diffie Hellman
import random, numpy, sys, argparse
if "-h" in sys.argv or "--help" in sys.argv:
print("Usage:")
print("./Diffie_Hellman.py")
print("./Diffie_Hellman.py XOR")
print()
print("XOR option replaces (mod) with ^, and outputs how often it works.")
exit()
if "XOR" in sys.argv:
XOR=True
else:
XOR=False
#thanks, StackOverflow
def primesfromXtoN(low,high):
return [x for x in range(low, high)
if all(x % y != 0 for y in range(low, x))]
LargePrimes = primesfromXtoN(1000,2000)
primes = primesfromXtoN(100,600)
def initialise():
global primes , LargePrimes , p , g
p = random.choice(LargePrimes)
g = random.choice(range(3,p-1))
class client:
def __init__(self):
global p , g
self.secret = random.randrange(3,p-1)
if XOR:
self.Pkey = int((g^self.secret) % p)
else:
self.Pkey = int((g**self.secret) % p)
def recieve(self,b):
self.s = (b**self.secret) % p
def run():
global worked, ran
ran+=1
initialise()
c1 = client()
c2 = client()
c1.recieve(c2.Pkey)
c2.recieve(c1.Pkey)
if c1.s == c2.s:
worked+=1
return True
else:
return False
if __name__ == "__main__":
worked=0
ran = 0
while True:
run()
if XOR:
print(str(worked/ran*100)+"%")
else:
print(worked)
| [
"random.choice",
"random.randrange"
] | [((663, 689), 'random.choice', 'random.choice', (['LargePrimes'], {}), '(LargePrimes)\n', (676, 689), False, 'import random, numpy, sys, argparse\n'), ((809, 835), 'random.randrange', 'random.randrange', (['(3)', '(p - 1)'], {}), '(3, p - 1)\n', (825, 835), False, 'import random, numpy, sys, argparse\n')] |
from collections.abc import Container, Sequence
from ipaddress import (IPv4Address, IPv4Network, IPv6Address, IPv6Network,
ip_address, ip_network)
from .exceptions import IncorrectIPCount, UntrustedIP
MSG = ("Trusted list should be a sequence of sets "
"with either addresses or networks.")
IP_CLASSES = (IPv4Address, IPv6Address, IPv4Network, IPv6Network)
def parse_trusted_list(lst):
if isinstance(lst, str) or not isinstance(lst, Sequence):
raise TypeError(MSG)
out = []
has_ellipsis = False
for elem in lst:
if elem is ...:
has_ellipsis = True
new_elem = ...
else:
if has_ellipsis:
raise ValueError(
"Ellipsis is allowed only at the end of list")
if isinstance(elem, str) or not isinstance(elem, Container):
raise TypeError(MSG)
new_elem = []
for item in elem:
if isinstance(item, IP_CLASSES):
new_elem.append(item)
continue
try:
new_elem.append(ip_address(item))
except ValueError:
try:
new_elem.append(ip_network(item))
except ValueError:
raise ValueError(
"{!r} is not IPv4 or IPv6 address or network"
.format(item))
out.append(new_elem)
return out
def remote_ip(trusted, ips):
if len(trusted) + 1 != len(ips):
raise IncorrectIPCount(len(trusted) + 1, ips)
for i in range(len(trusted)):
ip = ips[i]
tr = trusted[i]
if tr is ...:
return ip
check_ip(tr, ip)
return ips[-1]
def check_ip(trusted, ip):
for elem in trusted:
if isinstance(elem, (IPv4Address, IPv6Address)):
if elem == ip:
break
else:
if ip in elem:
break
else:
raise UntrustedIP(ip, trusted)
| [
"ipaddress.ip_network",
"ipaddress.ip_address"
] | [((1141, 1157), 'ipaddress.ip_address', 'ip_address', (['item'], {}), '(item)\n', (1151, 1157), False, 'from ipaddress import IPv4Address, IPv4Network, IPv6Address, IPv6Network, ip_address, ip_network\n'), ((1259, 1275), 'ipaddress.ip_network', 'ip_network', (['item'], {}), '(item)\n', (1269, 1275), False, 'from ipaddress import IPv4Address, IPv4Network, IPv6Address, IPv6Network, ip_address, ip_network\n')] |
from django.db.models.signals import post_save
from django.dispatch import receiver
from communique.utils.utils_signals import generate_notifications
from user.models import NotificationRegistration
from .models import Program
@receiver(post_save, sender=Program)
def post_program_save_callback(sender, **kwargs):
"""
Creates notifications for all users registered for these kind of notifications when a program is added/updated
"""
generate_notifications(NotificationRegistration.PROGRAMS, kwargs) | [
"django.dispatch.receiver",
"communique.utils.utils_signals.generate_notifications"
] | [((231, 266), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Program'}), '(post_save, sender=Program)\n', (239, 266), False, 'from django.dispatch import receiver\n'), ((452, 517), 'communique.utils.utils_signals.generate_notifications', 'generate_notifications', (['NotificationRegistration.PROGRAMS', 'kwargs'], {}), '(NotificationRegistration.PROGRAMS, kwargs)\n', (474, 517), False, 'from communique.utils.utils_signals import generate_notifications\n')] |
import os
from smsapi.client import SmsApiPlClient
access_token = os.getenv('SMSAPI_ACCESS_TOKEN')
client = SmsApiPlClient(access_token=access_token)
def send_push():
r = client.push.send(app_id='app id', alert='push notification text')
print(r.id, r.date_created, r.scheduled_date, r.summary.points, r.summary.recipients_count,
r.summary.error_code, r.app.name, r.payload.alert) | [
"smsapi.client.SmsApiPlClient",
"os.getenv"
] | [((70, 102), 'os.getenv', 'os.getenv', (['"""SMSAPI_ACCESS_TOKEN"""'], {}), "('SMSAPI_ACCESS_TOKEN')\n", (79, 102), False, 'import os\n'), ((114, 155), 'smsapi.client.SmsApiPlClient', 'SmsApiPlClient', ([], {'access_token': 'access_token'}), '(access_token=access_token)\n', (128, 155), False, 'from smsapi.client import SmsApiPlClient\n')] |
from collections import deque
import string
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import torch.optim as optim
from datasets import TextDataset
from model import LSTMAE
from generate import generate
import numpy as np
data_root = 'Data'
max_length = 50
batch_size = 50
num_epochs = 100
learning_rate = 0.0003
print_interval = 2
dataset = TextDataset(data_root, max_length)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
vocab_size = len(dataset.get_charset())
losses = deque([], maxlen=print_interval)
input_size = max_length
hidden_size = 16
model = LSTMAE(vocab_size, input_size, hidden_size, num_layers=1, isCuda=False)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss_func = nn.CrossEntropyLoss()
losses = deque([], maxlen=print_interval)
for epoch in range(num_epochs):
batch_index = 0
for batch_i, samples in enumerate(dataloader):
model.zero_grad()
word_set = samples
batch_index += 1
print('This is the {}th batch.'.format(batch_index))
loss = 0.
output = model(word_set)
label = word_set[:, 1:].reshape(-1)
loss = loss_func(output[:,:-1,:].reshape(-1, len(dataset.get_charset())), label)
# for char_ind in range(batch_size):
# output = model(samples[char_ind])
# target = samples[char_ind].clone()
# loss += loss_func(output, target)
losses.append(loss.item())
loss.backward()
optimizer.step()
if batch_i % print_interval == 0:
print(loss.item())
with open("output.txt", "a") as f:
auto_write = generate([np.random.choice(list(range(len(dataset.get_charset()))))], dataset, model)
f.write(" ".join(auto_write))
print(" ".join(auto_write))
'''
if batch_i % print_interval == 0:
print(generate('Charles', dataset, model))
print('[%03d] %05d/%05d Loss: %.4f' % (
epoch + 1,
batch_i,
len(dataset) // batch_size,
sum(losses) / len(losses)
))
'''
torch.save(model.state_dict(), 'model.pt')
| [
"collections.deque",
"torch.nn.CrossEntropyLoss",
"model.LSTMAE",
"torch.utils.data.DataLoader",
"datasets.TextDataset"
] | [((377, 411), 'datasets.TextDataset', 'TextDataset', (['data_root', 'max_length'], {}), '(data_root, max_length)\n', (388, 411), False, 'from datasets import TextDataset\n'), ((425, 481), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True)\n', (435, 481), False, 'from torch.utils.data import DataLoader\n'), ((531, 563), 'collections.deque', 'deque', (['[]'], {'maxlen': 'print_interval'}), '([], maxlen=print_interval)\n', (536, 563), False, 'from collections import deque\n'), ((614, 685), 'model.LSTMAE', 'LSTMAE', (['vocab_size', 'input_size', 'hidden_size'], {'num_layers': '(1)', 'isCuda': '(False)'}), '(vocab_size, input_size, hidden_size, num_layers=1, isCuda=False)\n', (620, 685), False, 'from model import LSTMAE\n'), ((759, 780), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (778, 780), True, 'import torch.nn as nn\n'), ((791, 823), 'collections.deque', 'deque', (['[]'], {'maxlen': 'print_interval'}), '([], maxlen=print_interval)\n', (796, 823), False, 'from collections import deque\n')] |
# %%
import timeit
import tqdm
from os import path
import inspect
import numpy as np
import dill
import init
import fastg3.crisp as g3crisp
from plot_utils import plot_bench
from constants import N_REPEATS, N_STEPS, DILL_FOLDER
from number_utils import format_number
from dataset_utils import AVAILABLE_DATASETS, load_dataset
MAX_SYN = 100000000
def gen_setup(dataset_name, f):
return f'''
import init
import fastg3.crisp as g3crisp
from sql_utils import g3_sql_bench
from dataset_utils import load_dataset
df, X, Y = load_dataset('{dataset_name}', n_tuples_syn={MAX_SYN})
df = df.sample(frac={str(f)}, replace=False, random_state=27)
'''
def time_test(dataset_name, frac_samples):
to_benchmark, labels = init.gen_time_benchmark()
for f in tqdm.tqdm(frac_samples):
setup=gen_setup(dataset_name, f)
for cmd in to_benchmark:
if cmd != 'G3_SQL':
duration_mean = timeit.timeit(cmd, setup=setup, number=N_REPEATS)/N_REPEATS*1000
to_benchmark[cmd].append(duration_mean)
else:
exec(setup)
to_benchmark[cmd].append(1000*eval(f'g3_sql_bench(df, X, Y, n_repeats={N_REPEATS})'))
yaxis_name=f"Average time on {str(N_REPEATS)} runs (ms)"
return to_benchmark, labels, yaxis_name
def approx_test(dataset_name, frac_samples):
to_benchmark, labels = init.gen_sampling_benchmark()
for f in tqdm.tqdm(frac_samples):
setup=gen_setup(dataset_name, f)
exec(setup)
true_g3=eval('g3crisp.g3_hash(df, X, Y)')
for cmd in to_benchmark:
to_benchmark[cmd].append(abs(true_g3-eval(cmd)))
yaxis_name=f"Absolute error"# mean on {str(N_REPEATS)} runs"
return to_benchmark, labels, yaxis_name
if __name__ == '__main__':
STEP=1/N_STEPS
frac_samples = list(np.arange(STEP, 1+STEP, STEP))
for dataset_name in AVAILABLE_DATASETS:
for test_name in ['time', 'approx']:
script_name = inspect.stack()[0].filename.split('.')[0]
file_path = './'+path.join(DILL_FOLDER, f'{script_name}_{test_name}_{dataset_name}.d')
if path.isfile(file_path):
print(f'{file_path} found! Skipping...')
continue
else:
print(f'{file_path} in progress...')
if test_name=='time':
to_benchmark, labels, yaxis_name = time_test(dataset_name, frac_samples)
else:
to_benchmark, labels, yaxis_name = approx_test(dataset_name, frac_samples)
fig, ax = plot_bench(to_benchmark,
frac_samples,
labels,
xlabel="Number of tuples",
ylabel=yaxis_name,
logy=False,
savefig=False
)
if dataset_name=='syn':
dataset_size=MAX_SYN
else:
dataset_size = len(load_dataset(dataset_name)[0].index)
ax.xaxis.set_major_formatter(lambda x, pos: format_number(x*dataset_size))
dill.dump((fig, {"dataset_size": dataset_size}), open(file_path, "wb")) | [
"init.gen_sampling_benchmark",
"number_utils.format_number",
"inspect.stack",
"tqdm.tqdm",
"os.path.join",
"os.path.isfile",
"dataset_utils.load_dataset",
"timeit.timeit",
"plot_utils.plot_bench",
"numpy.arange",
"init.gen_time_benchmark"
] | [((718, 743), 'init.gen_time_benchmark', 'init.gen_time_benchmark', ([], {}), '()\n', (741, 743), False, 'import init\n'), ((757, 780), 'tqdm.tqdm', 'tqdm.tqdm', (['frac_samples'], {}), '(frac_samples)\n', (766, 780), False, 'import tqdm\n'), ((1367, 1396), 'init.gen_sampling_benchmark', 'init.gen_sampling_benchmark', ([], {}), '()\n', (1394, 1396), False, 'import init\n'), ((1410, 1433), 'tqdm.tqdm', 'tqdm.tqdm', (['frac_samples'], {}), '(frac_samples)\n', (1419, 1433), False, 'import tqdm\n'), ((1824, 1855), 'numpy.arange', 'np.arange', (['STEP', '(1 + STEP)', 'STEP'], {}), '(STEP, 1 + STEP, STEP)\n', (1833, 1855), True, 'import numpy as np\n'), ((2126, 2148), 'os.path.isfile', 'path.isfile', (['file_path'], {}), '(file_path)\n', (2137, 2148), False, 'from os import path\n'), ((2558, 2681), 'plot_utils.plot_bench', 'plot_bench', (['to_benchmark', 'frac_samples', 'labels'], {'xlabel': '"""Number of tuples"""', 'ylabel': 'yaxis_name', 'logy': '(False)', 'savefig': '(False)'}), "(to_benchmark, frac_samples, labels, xlabel='Number of tuples',\n ylabel=yaxis_name, logy=False, savefig=False)\n", (2568, 2681), False, 'from plot_utils import plot_bench\n'), ((2041, 2110), 'os.path.join', 'path.join', (['DILL_FOLDER', 'f"""{script_name}_{test_name}_{dataset_name}.d"""'], {}), "(DILL_FOLDER, f'{script_name}_{test_name}_{dataset_name}.d')\n", (2050, 2110), False, 'from os import path\n'), ((3010, 3041), 'number_utils.format_number', 'format_number', (['(x * dataset_size)'], {}), '(x * dataset_size)\n', (3023, 3041), False, 'from number_utils import format_number\n'), ((920, 969), 'timeit.timeit', 'timeit.timeit', (['cmd'], {'setup': 'setup', 'number': 'N_REPEATS'}), '(cmd, setup=setup, number=N_REPEATS)\n', (933, 969), False, 'import timeit\n'), ((2917, 2943), 'dataset_utils.load_dataset', 'load_dataset', (['dataset_name'], {}), '(dataset_name)\n', (2929, 2943), False, 'from dataset_utils import AVAILABLE_DATASETS, load_dataset\n'), ((1970, 1985), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1983, 1985), False, 'import inspect\n')] |
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
import datetime
from frappe.utils import flt
from erpnext.accounts.utils import get_balance_on
from frappe.utils import (flt, getdate, get_url, now,
nowtime, get_time, today, get_datetime, add_days)
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters, columns)
return columns, data
def get_columns():
return [
{
"label": _("الحساب البنكي"),
"fieldname": "account",
"fieldtype": "Link",
"options": "Account",
"width": 250
},
{
"label": _("أول المدة"),
"fieldname": "opening",
"fieldtype": "Currency",
"width": 130
},
{
"label": _("مدين"),
"fieldname": "incoming",
"fieldtype": "Currency",
"width": 130
},
{
"label": _("تحت التحصيل"),
"fieldname": "receivable",
"fieldtype": "Currency",
"width": 130
},
{
"label": _("دائن"),
"fieldname": "outgoing",
"fieldtype": "Currency",
"width": 130
},
{
"label": _("برسم الدفع"),
"fieldname": "payable",
"fieldtype": "Currency",
"width": 130
},
{
"label": _("الإجمالي"),
"fieldname": "total",
"fieldtype": "Currency",
"width": 130
}
]
def get_data(filters, columns):
item_price_qty_data = []
item_price_qty_data = get_item_price_qty_data(filters)
return item_price_qty_data
def get_item_price_qty_data(filters):
to_date = filters.get("to_date")
from_date = filters.get("from_date")
from_dateo = getdate(from_date) - datetime.timedelta(days=1)
result = []
item_results = frappe.db.sql("""
SELECT
`tabAccount`.name as account
FROM
`tabAccount`
WHERE
`tabAccount`.disabled = 0
and `tabAccount`.is_group = 0
and `tabAccount`.account_type = 'Bank'
and `tabAccount`.name not in ("1331 - اوراق قبض مستلمة - CA", "1332 - اوراق قبض برسم التحصيل - CA", "1334 - اوراق قبض مندوبي البيع - CA", "1910 - حساب افتتاحي مؤقت - CA", "2140 - اوراق الدفع - CA")
""", filters, as_dict=1)
if item_results:
for item_dict in item_results:
data = {
'account': item_dict.account,
}
opening = get_balance_on(account=item_dict.account, date=getdate(from_dateo), party_type=None, party=None, company=None,
in_account_currency=True, cost_center=None, ignore_account_permission=False)
data['opening'] = opening
accounto = item_dict.account
incoming = frappe.db.sql(""" select
ifnull(sum(`tabGL Entry`.debit), 0) as debit
from
`tabGL Entry`
where
`tabGL Entry`.is_cancelled = 0
and `tabGL Entry`.account = '{accounto}'
and `tabGL Entry`.posting_date between '{from_date}' and '{to_date}'
""".format(accounto=accounto, from_date=from_date, to_date=to_date), as_dict=0)
data['incoming'] = incoming[0][0]
outgoing = frappe.db.sql(""" select
ifnull(sum(`tabGL Entry`.credit), 0) as credit
from
`tabGL Entry`
where
`tabGL Entry`.is_cancelled = 0
and `tabGL Entry`.account = '{accounto}'
and `tabGL Entry`.posting_date between '{from_date}' and '{to_date}'
""".format(accounto=accounto, from_date=from_date, to_date=to_date), as_dict=0)
data['outgoing'] = outgoing[0][0]
receivable = frappe.db.sql(""" select
ifnull(sum(`tabPayment Entry`.paid_amount), 0) as paid_amount
from
`tabPayment Entry`
where
`tabPayment Entry`.docstatus = 1
and `tabPayment Entry`.payment_type = "Receive"
and `tabPayment Entry`.mode_of_payment_type = "Cheque"
and `tabPayment Entry`.cheque_status = "تحت التحصيل"
and `tabPayment Entry`.account = '{accounto}'
and `tabPayment Entry`.reference_date between '{from_date}' and '{to_date}'
""".format(accounto=accounto, from_date=from_date, to_date=to_date),as_dict=0)
data['receivable'] = receivable[0][0]
payable = frappe.db.sql(""" select
ifnull(sum(`tabPayment Entry`.paid_amount), 0) as paid_amount
from
`tabPayment Entry`
where
`tabPayment Entry`.docstatus = 1
and `tabPayment Entry`.payment_type = "Pay"
and `tabPayment Entry`.mode_of_payment_type = "Cheque"
and `tabPayment Entry`.cheque_status_pay = "حافظة شيكات برسم الدفع"
and `tabPayment Entry`.account = '{accounto}'
and `tabPayment Entry`.reference_date between '{from_date}' and '{to_date}'
""".format(accounto=accounto, from_date=from_date, to_date=to_date), as_dict=0)
data['payable'] = payable[0][0]
data['total'] = opening + incoming[0][0] + receivable[0][0] - outgoing[0][0] - payable[0][0]
result.append(data)
return result
| [
"datetime.timedelta",
"frappe.db.sql",
"frappe.utils.getdate",
"frappe._"
] | [((1963, 2512), 'frappe.db.sql', 'frappe.db.sql', (['"""\n SELECT\n `tabAccount`.name as account\n FROM\n `tabAccount`\n WHERE\n `tabAccount`.disabled = 0\n and `tabAccount`.is_group = 0\n and `tabAccount`.account_type = \'Bank\'\n and `tabAccount`.name not in ("1331 - اوراق قبض مستلمة - CA", "1332 - اوراق قبض برسم التحصيل - CA", "1334 - اوراق قبض مندوبي البيع - CA", "1910 - حساب افتتاحي مؤقت - CA", "2140 - اوراق الدفع - CA")\n """', 'filters'], {'as_dict': '(1)'}), '(\n """\n SELECT\n `tabAccount`.name as account\n FROM\n `tabAccount`\n WHERE\n `tabAccount`.disabled = 0\n and `tabAccount`.is_group = 0\n and `tabAccount`.account_type = \'Bank\'\n and `tabAccount`.name not in ("1331 - اوراق قبض مستلمة - CA", "1332 - اوراق قبض برسم التحصيل - CA", "1334 - اوراق قبض مندوبي البيع - CA", "1910 - حساب افتتاحي مؤقت - CA", "2140 - اوراق الدفع - CA")\n """\n , filters, as_dict=1)\n', (1976, 2512), False, 'import frappe\n'), ((1879, 1897), 'frappe.utils.getdate', 'getdate', (['from_date'], {}), '(from_date)\n', (1886, 1897), False, 'from frappe.utils import flt, getdate, get_url, now, nowtime, get_time, today, get_datetime, add_days\n'), ((1900, 1926), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1918, 1926), False, 'import datetime\n'), ((496, 514), 'frappe._', '_', (['"""الحساب البنكي"""'], {}), "('الحساب البنكي')\n", (497, 514), False, 'from frappe import msgprint, _\n'), ((686, 700), 'frappe._', '_', (['"""أول المدة"""'], {}), "('أول المدة')\n", (687, 700), False, 'from frappe import msgprint, _\n'), ((842, 851), 'frappe._', '_', (['"""مدين"""'], {}), "('مدين')\n", (843, 851), False, 'from frappe import msgprint, _\n'), ((994, 1010), 'frappe._', '_', (['"""تحت التحصيل"""'], {}), "('تحت التحصيل')\n", (995, 1010), False, 'from frappe import msgprint, _\n'), ((1155, 1164), 'frappe._', '_', (['"""دائن"""'], {}), "('دائن')\n", (1156, 1164), False, 'from frappe import msgprint, _\n'), ((1307, 1322), 'frappe._', '_', (['"""برسم الدفع"""'], {}), "('برسم الدفع')\n", (1308, 1322), False, 'from frappe import msgprint, _\n'), ((1464, 1477), 'frappe._', '_', (['"""الإجمالي"""'], {}), "('الإجمالي')\n", (1465, 1477), False, 'from frappe import msgprint, _\n'), ((2714, 2733), 'frappe.utils.getdate', 'getdate', (['from_dateo'], {}), '(from_dateo)\n', (2721, 2733), False, 'from frappe.utils import flt, getdate, get_url, now, nowtime, get_time, today, get_datetime, add_days\n')] |
"""
Contabo API
The version of the OpenAPI document: 1.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import unittest
import pfruck_contabo
from pfruck_contabo.api.instance_actions_audits_api import InstanceActionsAuditsApi # noqa: E501
class TestInstanceActionsAuditsApi(unittest.TestCase):
"""InstanceActionsAuditsApi unit test stubs"""
def setUp(self):
self.api = InstanceActionsAuditsApi() # noqa: E501
def tearDown(self):
pass
def test_retrieve_instances_actions_audits_list(self):
"""Test case for retrieve_instances_actions_audits_list
List history about your actions (audit) triggered via the API # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"pfruck_contabo.api.instance_actions_audits_api.InstanceActionsAuditsApi"
] | [((777, 792), 'unittest.main', 'unittest.main', ([], {}), '()\n', (790, 792), False, 'import unittest\n'), ((431, 457), 'pfruck_contabo.api.instance_actions_audits_api.InstanceActionsAuditsApi', 'InstanceActionsAuditsApi', ([], {}), '()\n', (455, 457), False, 'from pfruck_contabo.api.instance_actions_audits_api import InstanceActionsAuditsApi\n')] |
import prime
MAX = 28124
prime._refresh(MAX/2)
abundants = [n for n in range(1, MAX) if sum(prime.all_factors(n)) > n+n]
abundants_dict = dict.fromkeys(abundants, 1)
total = 0
for n in range(1, MAX):
sum_of_abundants = 0
for a in abundants:
if a > n: break
if abundants_dict.get(n - a):
sum_of_abundants = 1
break
if not sum_of_abundants:
total = total + n
print(total) | [
"prime._refresh",
"prime.all_factors"
] | [((27, 50), 'prime._refresh', 'prime._refresh', (['(MAX / 2)'], {}), '(MAX / 2)\n', (41, 50), False, 'import prime\n'), ((95, 115), 'prime.all_factors', 'prime.all_factors', (['n'], {}), '(n)\n', (112, 115), False, 'import prime\n')] |
# We often don't use all members of all the pyuv callbacks
# pylint: disable=unused-argument
import sys, hashlib
import logging
import os
import pickle
import signal
import argparse
import re
import pyuv
from ..__main__ import getObjectFileHash
class HashCache:
def __init__(self, loop, excludePatterns, disableWatching):
self._loop = loop
self._watchedDirectories = {}
self._handlers = []
self._excludePatterns = excludePatterns or []
self._disableWatching = disableWatching
self._count = 0
def getFileHash(self, path):
logging.debug("getting hash for %s", path)
dirname, basename = os.path.split(os.path.normcase(path))
watchedDirectory = self._watchedDirectories.get(dirname, {})
hashsum = watchedDirectory.get(basename)
if hashsum:
logging.debug("using cached hashsum %s", hashsum)
return hashsum
hashsum = getObjectFileHash(path)
watchedDirectory[basename] = hashsum
if dirname not in self._watchedDirectories and not self.isExcluded(dirname) and not self._disableWatching:
logging.debug("starting to watch directory %s for changes", dirname)
self._startWatching(dirname)
self._watchedDirectories[dirname] = watchedDirectory
logging.debug("calculated and stored hashsum %s", hashsum)
self._count += 1
return hashsum
def _startWatching(self, dirname):
ev = pyuv.fs.FSEvent(self._loop)
ev.start(dirname, 0, self._onPathChange)
self._handlers.append(ev)
def _onPathChange(self, handle, filename, events, error):
watchedDirectory = self._watchedDirectories[handle.path]
logging.info("detected modifications in %s", handle.path)
if filename in watchedDirectory:
logging.debug("invalidating cached hashsum for %s", os.path.join(handle.path, filename))
self._count -= len(watchedDirectory[filename])
del watchedDirectory[filename]
def __del__(self):
for ev in self._handlers:
ev.stop()
def isExcluded(self, dirname):
# as long as we do not have more than _MAXCACHE regex we can
# rely on the internal cacheing of re.match
excluded = any(re.search(pattern, dirname, re.IGNORECASE) for pattern in self._excludePatterns)
if excluded:
logging.info("NOT watching %s", dirname)
return excluded
class file_buffer:
def __init__(self):
self._buffer = dict()
def get(self, filename):
h = self._buffer.get(filename)
if h:
uniq = self.uniq(filename)
if uniq:
h = self._buffer.get(uniq)
if not h:
h = self.get_file_hash(filename)
self._buffer[filename] = self._buffer[uniq] = h
return '|'.join((h, '1'))
else:
return '|'.join((h, '0'))
else:
uniq = self.uniq(filename)
if uniq:
h = self.get_file_hash(filename)
self._buffer[filename] = self._buffer[uniq] = h
return '|'.join((h, '1'))
def get2(self, filename):
h = self._buffer.get(filename)
if h:
uniq = self.uniq(filename)
if uniq:
h = self._buffer.get(uniq)
if h: return h
else:
h = self.get_file_hash(filename)
self._buffer[filename] = self._buffer[uniq] = h
return h
else:
uniq = self.uniq(filename)
if uniq:
h = self.get_file_hash(filename)
self._buffer[filename] = self._buffer[uniq] = h
return h
def add(self, filename, hash):
self._buffer[filename] = hash
u = self.uniq(filename)
if u:
self._buffer[u] = hash
def uniq(self, filename):
try:
stat = os.stat(filename)
return '|'.join([filename, str(stat.st_mtime_ns), str(stat.st_size), str(stat.st_ctime_ns)])
except:pass
def __len__(self):
return len(self._buffer)
def get_file_hash(self, path):
return getObjectFileHash(path)
class Connection:
_buffer = file_buffer()
def __init__(self, pipe, cache, onCloseCallback):
self._readBuffer = b''
self._pipe = pipe
self._cache = cache
self._onCloseCallback = onCloseCallback
pipe.start_read(self._onClientRead)
def _onClientRead(self, pipe, data, error):
self._readBuffer += data
if self._readBuffer.endswith(b'\x00'):
paths = self._readBuffer[:-1].decode('utf-8').splitlines()
logging.debug("received request to hash %d paths", len(paths))
try:
hashes = map(self._cache.getFileHash, paths)
response = '\n'.join(hashes).encode('utf-8')
except OSError as e:
response = b'!' + pickle.dumps(e)
pipe.write(response + b'\x00', self._onWriteDone)
elif self._readBuffer.endswith(b'\x01'):
data = self._readBuffer[:-1].decode('utf-8').splitlines()
if data:
self.__class__.__dict__[data[0]](self, pipe, data[1:])
def close(self, pipe, data):
logging.info('exit command')
sys.exit(0)
def get_buffer_hash(self, pipe, data):
result = []
for file in data:
r = self._buffer.get(file)
if r: result.append(r)
response = '\n'.join(result).encode('utf-8')
pipe.write(response + b'\x00', self._onWriteDone)
def get_buffer_hash2(self, pipe, data):
result = []
for file in data:
r = self._buffer.get2(file)
if r: result.append(r)
response = '\n'.join(result).encode('utf-8')
pipe.write(response + b'\x00', self._onWriteDone)
def add_buffer_hash(self, pipe, data):
for item in data:
file, hash = item.split('|')
self._buffer.add(file, hash)
pipe.write(str(len(data)).encode('utf-8') + b'\x00', self._onWriteDone)
def count(self, pipe, data):
pipe.write( str(self._cache._count).encode('utf-8') + b'\x00', self._onWriteDone )
def count2(self, pipe, data):
pipe.write( str(len(self._buffer)).encode('utf-8') + b'\x00', self._onWriteDone )
def _onWriteDone(self, pipe, error):
logging.debug("sent response to client, closing connection")
self._pipe.close()
self._onCloseCallback(self)
class PipeServer:
def __init__(self, loop, address, cache):
self._pipeServer = pyuv.Pipe(loop)
self._pipeServer.bind(address)
self._connections = []
self._cache = cache
def listen(self):
self._pipeServer.listen(self._onConnection)
def _onConnection(self, pipe, error):
logging.debug("detected incoming connection")
client = pyuv.Pipe(self._pipeServer.loop)
pipe.accept(client)
self._connections.append(Connection(client, self._cache, self._connections.remove))
def closeHandlers(handle):
for h in handle.loop.handles:
h.close()
def onSigint(handle, signum):
logging.info("Ctrl+C detected, shutting down")
closeHandlers(handle)
def onSigterm(handle, signum):
logging.info("Server was killed by SIGTERM")
closeHandlers(handle)
def main():
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description='Server process for clcache to cache hash values of headers \
and observe them for changes.')
parser.add_argument('--exclude', metavar='REGEX', action='append', \
help='Regex ( re.search() ) for exluding of directory watching. Can be specified \
multiple times. Example: --exclude \\\\build\\\\')
parser.add_argument('--disable_watching', action='store_true', help='Disable watching of directories which \
we have in the cache.')
args = parser.parse_args()
for pattern in args.exclude or []:
logging.info("Not watching paths which match: %s", pattern)
if args.disable_watching:
logging.info("Disabled directory watching")
eventLoop = pyuv.Loop.default_loop()
cache = HashCache(eventLoop, vars(args)['exclude'], args.disable_watching)
server = PipeServer(eventLoop, r'\\.\pipe\clcache_srv', cache)
server.listen()
signalHandle = pyuv.Signal(eventLoop)
signalHandle.start(onSigint, signal.SIGINT)
signalHandle.start(onSigterm, signal.SIGTERM)
logging.info("clcachesrv started")
eventLoop.run()
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"pyuv.fs.FSEvent",
"logging.debug",
"argparse.ArgumentParser",
"pickle.dumps",
"pyuv.Signal",
"os.path.join",
"pyuv.Pipe",
"pyuv.Loop.default_loop",
"sys.exit",
"os.stat",
"os.path.normcase",
"logging.info",
"re.search"
] | [((7319, 7365), 'logging.info', 'logging.info', (['"""Ctrl+C detected, shutting down"""'], {}), "('Ctrl+C detected, shutting down')\n", (7331, 7365), False, 'import logging\n'), ((7429, 7473), 'logging.info', 'logging.info', (['"""Server was killed by SIGTERM"""'], {}), "('Server was killed by SIGTERM')\n", (7441, 7473), False, 'import logging\n'), ((7518, 7612), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s [%(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s [%(levelname)s]: %(message)s',\n level=logging.INFO)\n", (7537, 7612), False, 'import logging\n'), ((7623, 7810), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Server process for clcache to cache hash values of headers and observe them for changes."""'}), "(description=\n 'Server process for clcache to cache hash values of headers and observe them for changes.'\n )\n", (7646, 7810), False, 'import argparse\n'), ((8465, 8489), 'pyuv.Loop.default_loop', 'pyuv.Loop.default_loop', ([], {}), '()\n', (8487, 8489), False, 'import pyuv\n'), ((8678, 8700), 'pyuv.Signal', 'pyuv.Signal', (['eventLoop'], {}), '(eventLoop)\n', (8689, 8700), False, 'import pyuv\n'), ((8804, 8838), 'logging.info', 'logging.info', (['"""clcachesrv started"""'], {}), "('clcachesrv started')\n", (8816, 8838), False, 'import logging\n'), ((588, 630), 'logging.debug', 'logging.debug', (['"""getting hash for %s"""', 'path'], {}), "('getting hash for %s', path)\n", (601, 630), False, 'import logging\n'), ((1322, 1380), 'logging.debug', 'logging.debug', (['"""calculated and stored hashsum %s"""', 'hashsum'], {}), "('calculated and stored hashsum %s', hashsum)\n", (1335, 1380), False, 'import logging\n'), ((1482, 1509), 'pyuv.fs.FSEvent', 'pyuv.fs.FSEvent', (['self._loop'], {}), '(self._loop)\n', (1497, 1509), False, 'import pyuv\n'), ((1729, 1786), 'logging.info', 'logging.info', (['"""detected modifications in %s"""', 'handle.path'], {}), "('detected modifications in %s', handle.path)\n", (1741, 1786), False, 'import logging\n'), ((5394, 5422), 'logging.info', 'logging.info', (['"""exit command"""'], {}), "('exit command')\n", (5406, 5422), False, 'import logging\n'), ((5431, 5442), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5439, 5442), False, 'import sys, hashlib\n'), ((6529, 6589), 'logging.debug', 'logging.debug', (['"""sent response to client, closing connection"""'], {}), "('sent response to client, closing connection')\n", (6542, 6589), False, 'import logging\n'), ((6746, 6761), 'pyuv.Pipe', 'pyuv.Pipe', (['loop'], {}), '(loop)\n', (6755, 6761), False, 'import pyuv\n'), ((6986, 7031), 'logging.debug', 'logging.debug', (['"""detected incoming connection"""'], {}), "('detected incoming connection')\n", (6999, 7031), False, 'import logging\n'), ((7049, 7081), 'pyuv.Pipe', 'pyuv.Pipe', (['self._pipeServer.loop'], {}), '(self._pipeServer.loop)\n', (7058, 7081), False, 'import pyuv\n'), ((8305, 8364), 'logging.info', 'logging.info', (['"""Not watching paths which match: %s"""', 'pattern'], {}), "('Not watching paths which match: %s', pattern)\n", (8317, 8364), False, 'import logging\n'), ((8404, 8447), 'logging.info', 'logging.info', (['"""Disabled directory watching"""'], {}), "('Disabled directory watching')\n", (8416, 8447), False, 'import logging\n'), ((673, 695), 'os.path.normcase', 'os.path.normcase', (['path'], {}), '(path)\n', (689, 695), False, 'import os\n'), ((848, 897), 'logging.debug', 'logging.debug', (['"""using cached hashsum %s"""', 'hashsum'], {}), "('using cached hashsum %s', hashsum)\n", (861, 897), False, 'import logging\n'), ((1141, 1209), 'logging.debug', 'logging.debug', (['"""starting to watch directory %s for changes"""', 'dirname'], {}), "('starting to watch directory %s for changes', dirname)\n", (1154, 1209), False, 'import logging\n'), ((2405, 2445), 'logging.info', 'logging.info', (['"""NOT watching %s"""', 'dirname'], {}), "('NOT watching %s', dirname)\n", (2417, 2445), False, 'import logging\n'), ((4029, 4046), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (4036, 4046), False, 'import os\n'), ((1892, 1927), 'os.path.join', 'os.path.join', (['handle.path', 'filename'], {}), '(handle.path, filename)\n', (1904, 1927), False, 'import os\n'), ((2291, 2333), 're.search', 're.search', (['pattern', 'dirname', 're.IGNORECASE'], {}), '(pattern, dirname, re.IGNORECASE)\n', (2300, 2333), False, 'import re\n'), ((5063, 5078), 'pickle.dumps', 'pickle.dumps', (['e'], {}), '(e)\n', (5075, 5078), False, 'import pickle\n')] |
from dataclasses import dataclass
from pathlib import PurePath
from typing import Optional
@dataclass(frozen=True)
class EnvironmentSettings:
working_dir: PurePath
project_dir: Optional[PurePath]
app_dir: PurePath
cache_dir: PurePath
catalog_url: str
| [
"dataclasses.dataclass"
] | [((94, 116), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (103, 116), False, 'from dataclasses import dataclass\n')] |
#-*- coding: utf-8 -*-
import xlrd
import xlwt
import re
# 检查是否满足报考条件
def check(row_value):
zy = row_value[11]
if not checkZY(zy):
return False
xw = row_value[13]
if not checkXW(xw):
return False
if checkSpecial(row_value):
return False
return True
# 检查是否满足专业要求
def checkZY(value):
pat = re.compile(u'不限|限制|生物工程|化工')
if re.search(pat, value):
return True
return False
# 检查是否满足学位要求
def checkXW(value):
pat = re.compile(u'学士|不|无')
if re.search(pat, value):
return True
return False
# 减产是否需要满足特殊要求
def checkSpecial(row_value):
pat = re.compile(u'是')
for i in range(16, 19):
value = row_value[i]
if re.search(pat, value):
return True
return False
# 根据条件筛选出职位
def filterTitle():
data = xlrd.open_workbook('gjgwy.xls')
output = xlwt.Workbook(encoding='utf-8')
for sheet in data.sheets():
output_sheet = output.add_sheet(sheet.name)
output_row = 1
for row in range(sheet.nrows):
row_value = sheet.row_values(row)
if len(row_value) < 11:
continue
choosed = True
if row != 2 and not check(row_value):
choosed = False
if choosed == True:
for col in range(sheet.ncols):
output_sheet.row(output_row).write(col, sheet.cell(row,col).value)
output_sheet.flush_row_data()
output_row += 1
output.save('output.xls')
if __name__ == '__main__':
filterTitle()
| [
"re.search",
"xlrd.open_workbook",
"xlwt.Workbook",
"re.compile"
] | [((346, 374), 're.compile', 're.compile', (['u"""不限|限制|生物工程|化工"""'], {}), "(u'不限|限制|生物工程|化工')\n", (356, 374), False, 'import re\n'), ((382, 403), 're.search', 're.search', (['pat', 'value'], {}), '(pat, value)\n', (391, 403), False, 'import re\n'), ((487, 508), 're.compile', 're.compile', (['u"""学士|不|无"""'], {}), "(u'学士|不|无')\n", (497, 508), False, 'import re\n'), ((516, 537), 're.search', 're.search', (['pat', 'value'], {}), '(pat, value)\n', (525, 537), False, 'import re\n'), ((632, 648), 're.compile', 're.compile', (['u"""是"""'], {}), "(u'是')\n", (642, 648), False, 'import re\n'), ((825, 856), 'xlrd.open_workbook', 'xlrd.open_workbook', (['"""gjgwy.xls"""'], {}), "('gjgwy.xls')\n", (843, 856), False, 'import xlrd\n'), ((870, 901), 'xlwt.Workbook', 'xlwt.Workbook', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (883, 901), False, 'import xlwt\n'), ((717, 738), 're.search', 're.search', (['pat', 'value'], {}), '(pat, value)\n', (726, 738), False, 'import re\n')] |
from typing import List, Optional
from athenian.api.models.web.base_model_ import AllOf, Model
from athenian.api.models.web.jira_epic_issue_common import JIRAEpicIssueCommon
from athenian.api.models.web.pull_request import PullRequest
class _JIRAIssueSpecials(Model):
"""Details specific to JIRA issues."""
openapi_types = {
"type": str,
"project": str,
"prs": Optional[List[PullRequest]],
}
attribute_map = {
"type": "type",
"project": "project",
"prs": "prs",
}
__enable_slots__ = False
def __init__(self,
type: Optional[str] = None,
project: Optional[str] = None,
prs: Optional[List[PullRequest]] = None):
"""JIRAIssue - a model defined in OpenAPI
:param type: The type of this JIRAIssue.
:param project: The project of this JIRAIssue.
:param prs: The prs of this JIRAIssue.
"""
self._type = type
self._project = project
self._prs = prs
@property
def type(self) -> str:
"""Gets the type of this JIRAIssue.
Name of the issue type. The details are returned in `FilteredJIRAStuff.issue_types`.
:return: The type of this JIRAIssue.
"""
return self._type
@type.setter
def type(self, type: str):
"""Sets the type of this JIRAIssue.
Name of the issue type. The details are returned in `FilteredJIRAStuff.issue_types`.
:param type: The type of this JIRAIssue.
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
@property
def project(self) -> str:
"""Gets the project of this JIRAIssue.
Identifier of the project where this issue exists.
:return: The project of this JIRAIssue.
"""
return self._project
@project.setter
def project(self, project: str):
"""Sets the project of this JIRAIssue.
Identifier of the project where this issue exists.
:param project: The project of this JIRAIssue.
"""
if project is None:
raise ValueError("Invalid value for `project`, must not be `None`")
self._project = project
@property
def prs(self) -> Optional[List[PullRequest]]:
"""Gets the prs of this JIRAIssue.
Details about the mapped PRs. `jira` field is unfilled.
:return: The prs of this JIRAIssue.
"""
return self._prs
@prs.setter
def prs(self, prs: Optional[List[PullRequest]]):
"""Sets the prs of this JIRAIssue.
Details about the mapped PRs. `jira` field is unfilled.
:param prs: The prs of this JIRAIssue.
"""
self._prs = prs
JIRAIssue = AllOf(JIRAEpicIssueCommon, _JIRAIssueSpecials, name="JIRAIssue", module=__name__)
| [
"athenian.api.models.web.base_model_.AllOf"
] | [((2827, 2913), 'athenian.api.models.web.base_model_.AllOf', 'AllOf', (['JIRAEpicIssueCommon', '_JIRAIssueSpecials'], {'name': '"""JIRAIssue"""', 'module': '__name__'}), "(JIRAEpicIssueCommon, _JIRAIssueSpecials, name='JIRAIssue', module=\n __name__)\n", (2832, 2913), False, 'from athenian.api.models.web.base_model_ import AllOf, Model\n')] |
'''
##vpc-isolate
What it does: turn off dns resource
change network acl to new empty one with deny all
add iam policy, to all users in the account, which limits vpc use: ec2 and sg use in the vpc
Usage: AUTO: vpc_isolate
Limitation: None
'''
import boto3
from botocore.exceptions import ClientError
import json
text_output = str()
def create_acl(ec2_client, vpc_id):
# An array which will contain all vpc subnets
association_ids = []
try:
# getting all acls within the vpc, to get the subnet associations from them
network_acl_iterator = ec2_client.describe_network_acls(
Filters=[{
'Name': 'vpc-id',
'Values': [
vpc_id,
]},
],)
# Going through all each acl and checking for associated subnets
for acl in network_acl_iterator.get('NetworkAcls'):
associations = acl.get('Associations')
if associations:
# gets all subnets from the acl in an array and adding it to association_ids array
association_ids += [association.get('NetworkAclAssociationId') for association in associations]
# checking if there any subnets to associate with the new acl
if association_ids:
# creating new acl with deny all rules
network_acl = ec2_client.create_network_acl(VpcId=vpc_id)
# associating each subnet with the new acl
for id in association_ids:
ec2_client.replace_network_acl_association(
AssociationId=id,
DryRun=False,
NetworkAclId=network_acl.get('NetworkAclId')
)
except ClientError as e:
global text_output
text_output = f'Unexpected error: {e}\n'
return text_output
def attach_policy_to_all_users(boto_session, policy_arn):
global text_output
iam_client = boto_session.client('iam')
# getting all users in the account
users = iam_client.list_users()['Users']
try:
# attaching isolate policy to each user
for user in users:
iam_client.attach_user_policy(
UserName=user.get('UserName'),
PolicyArn=policy_arn
)
text_output = 'Vpc isolated successfully!'
except ClientError as e:
text_output = f'Unexpected error: {e}\n'
return text_output
def create_deny_policy(boto_session, region, vpc_id):
# Create IAM client
iam_client = boto_session.client('iam')
# Policy to deny user use of ec2 and security groups in the specified vpc
# user can create an instance with existing sg if this resource is not in deny.
deny_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Action": "ec2:*",
"Effect": "Deny",
"Resource": [
f"arn:aws:ec2:{region}:*:vpc/{vpc_id}",
f"arn:aws:ec2:{region}:*:security-group/*"
],
"Condition": {
"ArnEquals": {
f"ec2:Vpc": f"arn:aws:ec2:{region}:*:vpc/{vpc_id}"
}
}
}
]
}
try:
# Create a policy
iam_client.create_policy(
PolicyName=f'isolate_deny_{vpc_id}_access_policy',
PolicyDocument=json.dumps(deny_policy)
)
except ClientError as e:
global text_output
text_output = f'Unexpected error: {e}\n'
return text_output
# Pull the account and check if the policy exists. If not - make it
def check_for_policy(boto_session, policy_arn):
# Create IAM client
iam_client = boto_session.client('iam')
try:
# Check to see if the deny policy exists in the account currently
iam_client.get_policy(PolicyArn=policy_arn)
except ClientError as e:
error = e.response['Error']['Code']
if error == 'NoSuchEntity':
# If the policy isn't there - add it into the account
return False
else:
return f'Unexpected error: {e}\n'
return True
def run_action(boto_session, rule, entity, params):
# getting parameters needed for all functions
vpc_id = entity.get('id')
region = entity.get('region')
global text_output
try:
# getting the vpc from ec2 recourse
ec2_client = boto_session.client('ec2')
vpc = ec2_client.describe_vpcs(VpcIds=[vpc_id,],).get('Vpcs')[0]
account_id = vpc.get('OwnerId')
policy_arn = f"arn:aws:iam::{account_id}:policy/isolate_deny_{vpc_id}_access_policy"
# disabling vpc's DNS
ec2_client.modify_vpc_attribute(EnableDnsSupport={
'Value': False
}, VpcId=vpc_id)
# creating a new acl with deny all rules
text_output = create_acl(ec2_client, vpc_id)
# checking if there was an error and if policy that limits all users in the account exist
if text_output == '' and not check_for_policy(boto_session, policy_arn):
# if policy doesn't exist , create it
text_output = create_deny_policy(boto_session, region, vpc_id)
if 'error' in text_output:
return text_output
# attaching policy to all users in the account
text_output = attach_policy_to_all_users(boto_session, policy_arn)
except ClientError as e:
text_output = f'Unexpected error: {e}\n'
return text_output
| [
"json.dumps"
] | [((3585, 3608), 'json.dumps', 'json.dumps', (['deny_policy'], {}), '(deny_policy)\n', (3595, 3608), False, 'import json\n')] |