code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import logging
from typing import Callable
from typing import List
import numpy as np
import torch.utils.data
from .video_dataset import VideoDataset
from .video_dataset import VideoRecord
LOG = logging.getLogger(__name__)
# line_profiler injects a "profile" into __builtins__. When not running under
# line_profiler we need to inject our own passthrough
if type(__builtins__) is not dict or "profile" not in __builtins__:
profile = lambda f: f
class TsnDataset(torch.utils.data.Dataset):
"""
Wraps a :class:`VideoDataset` to implement TSN sampling
"""
def __init__(
self,
dataset: VideoDataset,
num_segments: int = 3,
segment_length: int = 1,
transform: Callable = None,
random_shift: bool = True,
test_mode: bool = False,
):
"""
Args:
dataset: Video dataset to load TSN-sampled segments from.
num_segments: Number of segments per clip.
segment_length: Length of segment in number of frames.
transform: A applied to the list of frames sampled from the clip
random_shift:
test_mode: Whether to return center sampled frames from each segment.
"""
self.dataset = dataset
self.num_segments = num_segments
self.segment_length = segment_length
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
def __getitem__(self, index):
record = self.dataset.video_records[index]
if self.test_mode:
segment_start_idxs = self._get_test_indices(record)
else:
segment_start_idxs = (
self._sample_indices(record)
if self.random_shift
else self._get_val_indices(record)
)
return self._get(record, segment_start_idxs)
def __len__(self):
return len(self.dataset)
@profile
def _get(self, record: VideoRecord, segment_start_idxs: List[int]):
images = self.dataset.load_frames(
record, self._get_frame_idxs(segment_start_idxs, record)
)
if self.transform is not None:
images = self.transform(images)
metadata = record.metadata
return images, metadata
def _sample_indices(self, record: VideoRecord):
average_duration = (
record.num_frames - self.segment_length + 1
) // self.num_segments
if average_duration > 0:
offsets = np.multiply(
list(range(self.num_segments)), average_duration
) + np.random.randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(
np.random.randint(
record.num_frames - self.segment_length + 1, size=self.num_segments
)
)
else:
offsets = np.zeros((self.num_segments,))
return offsets
def _get_val_indices(self, record: VideoRecord):
if record.num_frames > self.num_segments + self.segment_length - 1:
tick = (record.num_frames - self.segment_length + 1) / float(
self.num_segments
)
offsets = np.array(
[int(tick / 2.0 + tick * x) for x in range(self.num_segments)]
)
else:
offsets = np.zeros((self.num_segments,))
return offsets
def _get_test_indices(self, record: VideoRecord):
tick = (record.num_frames - self.segment_length + 1) / float(self.num_segments)
offsets = np.array(
[int(tick / 2.0 + tick * x) for x in range(self.num_segments)]
)
return offsets
def _get_frame_idxs(
self, segment_start_idxs: List[int], record: VideoRecord
) -> List[int]:
seg_idxs = []
for seg_ind in segment_start_idxs:
p = int(seg_ind)
for i in range(self.segment_length):
seg_idxs.append(p)
if p < record.num_frames:
p += 1
return seg_idxs
| [
"logging.getLogger",
"numpy.zeros",
"numpy.random.randint"
] | [((199, 226), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (216, 226), False, 'import logging\n'), ((3419, 3449), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (3427, 3449), True, 'import numpy as np\n'), ((2618, 2677), 'numpy.random.randint', 'np.random.randint', (['average_duration'], {'size': 'self.num_segments'}), '(average_duration, size=self.num_segments)\n', (2635, 2677), True, 'import numpy as np\n'), ((2952, 2982), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (2960, 2982), True, 'import numpy as np\n'), ((2777, 2868), 'numpy.random.randint', 'np.random.randint', (['(record.num_frames - self.segment_length + 1)'], {'size': 'self.num_segments'}), '(record.num_frames - self.segment_length + 1, size=self.\n num_segments)\n', (2794, 2868), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import webob
import tg.decorators
from decorator import decorator
from pylons import request
import mock
import simplejson
from allura.lib import helpers as h
_patched = False
def apply():
global _patched
if _patched:
return
_patched = True
old_lookup_template_engine = tg.decorators.Decoration.lookup_template_engine
@h.monkeypatch(tg.decorators.Decoration)
def lookup_template_engine(self, request):
'''Wrapper to handle totally borked-up HTTP-ACCEPT headers'''
try:
return old_lookup_template_engine(self, request)
except:
pass
environ = dict(request.environ, HTTP_ACCEPT='*/*')
request = webob.Request(environ)
return old_lookup_template_engine(self, request)
@h.monkeypatch(tg, tg.decorators)
def override_template(controller, template):
'''Copy-pasted patch to allow multiple colons in a template spec'''
if hasattr(controller, 'decoration'):
decoration = controller.decoration
else:
return
if hasattr(decoration, 'engines'):
engines = decoration.engines
else:
return
for content_type, content_engine in engines.iteritems():
template = template.split(':', 1)
template.extend(content_engine[2:])
try:
override_mapping = request._override_mapping
except AttributeError:
override_mapping = request._override_mapping = {}
override_mapping[controller.im_func] = {content_type: template}
@h.monkeypatch(tg, tg.decorators)
@decorator
def without_trailing_slash(func, *args, **kwargs):
'''Monkey-patched to use 301 redirects for SEO, and handle query strings'''
response_type = getattr(request, 'response_type', None)
if (request.method == 'GET' and request.path.endswith('/') and not response_type):
location = request.path_url[:-1]
if request.query_string:
location += '?' + request.query_string
raise webob.exc.HTTPMovedPermanently(location=location)
return func(*args, **kwargs)
@h.monkeypatch(tg, tg.decorators)
@decorator
def with_trailing_slash(func, *args, **kwargs):
'''Monkey-patched to use 301 redirects for SEO, and handle query strings'''
response_type = getattr(request, 'response_type', None)
if (request.method == 'GET' and not request.path.endswith('/') and not response_type):
location = request.path_url + '/'
if request.query_string:
location += '?' + request.query_string
raise webob.exc.HTTPMovedPermanently(location=location)
return func(*args, **kwargs)
# http://blog.watchfire.com/wfblog/2011/10/json-based-xss-exploitation.html
# change < to its unicode escape when rendering JSON out of turbogears
# This is to avoid IE9 and earlier, which don't know the json content type
# and may attempt to render JSON data as HTML if the URL ends in .html
original_tg_jsonify_GenericJSON_encode = tg.jsonify.GenericJSON.encode
escape_pattern_with_lt = re.compile(
simplejson.encoder.ESCAPE.pattern.rstrip(']') + '<' + ']')
@h.monkeypatch(tg.jsonify.GenericJSON)
def encode(self, o):
# ensure_ascii=False forces encode_basestring() to be called instead of
# encode_basestring_ascii() and encode_basestring_ascii may likely be c-compiled
# and thus not monkeypatchable
with h.push_config(self, ensure_ascii=False), \
h.push_config(simplejson.encoder, ESCAPE=escape_pattern_with_lt), \
mock.patch.dict(simplejson.encoder.ESCAPE_DCT, {'<': r'\u003C'}):
return original_tg_jsonify_GenericJSON_encode(self, o)
# must be saved outside the newrelic() method so that multiple newrelic()
# calls (e.g. during tests) don't cause the patching to get applied to itself
# over and over
old_controller_call = tg.controllers.DecoratedController._call
def newrelic():
@h.monkeypatch(tg.controllers.DecoratedController,
tg.controllers.decoratedcontroller.DecoratedController)
def _call(self, controller, *args, **kwargs):
'''Set NewRelic transaction name to actual controller name'''
import newrelic.agent
newrelic.agent.set_transaction_name(
newrelic.agent.callable_name(controller))
return old_controller_call(self, controller, *args, **kwargs)
| [
"allura.lib.helpers.monkeypatch",
"webob.Request",
"mock.patch.dict",
"pylons.request.path.endswith",
"allura.lib.helpers.push_config",
"simplejson.encoder.ESCAPE.pattern.rstrip",
"webob.exc.HTTPMovedPermanently"
] | [((1232, 1271), 'allura.lib.helpers.monkeypatch', 'h.monkeypatch', (['tg.decorators.Decoration'], {}), '(tg.decorators.Decoration)\n', (1245, 1271), True, 'from allura.lib import helpers as h\n'), ((1659, 1691), 'allura.lib.helpers.monkeypatch', 'h.monkeypatch', (['tg', 'tg.decorators'], {}), '(tg, tg.decorators)\n', (1672, 1691), True, 'from allura.lib import helpers as h\n'), ((2481, 2513), 'allura.lib.helpers.monkeypatch', 'h.monkeypatch', (['tg', 'tg.decorators'], {}), '(tg, tg.decorators)\n', (2494, 2513), True, 'from allura.lib import helpers as h\n'), ((3071, 3103), 'allura.lib.helpers.monkeypatch', 'h.monkeypatch', (['tg', 'tg.decorators'], {}), '(tg, tg.decorators)\n', (3084, 3103), True, 'from allura.lib import helpers as h\n'), ((4156, 4193), 'allura.lib.helpers.monkeypatch', 'h.monkeypatch', (['tg.jsonify.GenericJSON'], {}), '(tg.jsonify.GenericJSON)\n', (4169, 4193), True, 'from allura.lib import helpers as h\n'), ((4972, 5082), 'allura.lib.helpers.monkeypatch', 'h.monkeypatch', (['tg.controllers.DecoratedController', 'tg.controllers.decoratedcontroller.DecoratedController'], {}), '(tg.controllers.DecoratedController, tg.controllers.\n decoratedcontroller.DecoratedController)\n', (4985, 5082), True, 'from allura.lib import helpers as h\n'), ((1573, 1595), 'webob.Request', 'webob.Request', (['environ'], {}), '(environ)\n', (1586, 1595), False, 'import webob\n'), ((2772, 2798), 'pylons.request.path.endswith', 'request.path.endswith', (['"""/"""'], {}), "('/')\n", (2793, 2798), False, 'from pylons import request\n'), ((2978, 3027), 'webob.exc.HTTPMovedPermanently', 'webob.exc.HTTPMovedPermanently', ([], {'location': 'location'}), '(location=location)\n', (3008, 3027), False, 'import webob\n'), ((3570, 3619), 'webob.exc.HTTPMovedPermanently', 'webob.exc.HTTPMovedPermanently', ([], {'location': 'location'}), '(location=location)\n', (3600, 3619), False, 'import webob\n'), ((4440, 4479), 'allura.lib.helpers.push_config', 'h.push_config', (['self'], {'ensure_ascii': '(False)'}), '(self, ensure_ascii=False)\n', (4453, 4479), True, 'from allura.lib import helpers as h\n'), ((4499, 4563), 'allura.lib.helpers.push_config', 'h.push_config', (['simplejson.encoder'], {'ESCAPE': 'escape_pattern_with_lt'}), '(simplejson.encoder, ESCAPE=escape_pattern_with_lt)\n', (4512, 4563), True, 'from allura.lib import helpers as h\n'), ((4583, 4647), 'mock.patch.dict', 'mock.patch.dict', (['simplejson.encoder.ESCAPE_DCT', "{'<': '\\\\u003C'}"], {}), "(simplejson.encoder.ESCAPE_DCT, {'<': '\\\\u003C'})\n", (4598, 4647), False, 'import mock\n'), ((3363, 3389), 'pylons.request.path.endswith', 'request.path.endswith', (['"""/"""'], {}), "('/')\n", (3384, 3389), False, 'from pylons import request\n'), ((4091, 4136), 'simplejson.encoder.ESCAPE.pattern.rstrip', 'simplejson.encoder.ESCAPE.pattern.rstrip', (['"""]"""'], {}), "(']')\n", (4131, 4136), False, 'import simplejson\n')] |
#!/usr/bin/python3
import cartinit
from kivy.app import App
from kivy.uix.screenmanager import Screen, ScreenManager, SlideTransition
from kivy.lang import Builder
from buttons import RoundedButton
cartinit.init()
# create ScreenManager as root, put all screens into
sm = ScreenManager()
sm.transition = SlideTransition()
screens = []
# load kv files
Builder.load_file('screens.kv')
class DefaultScreen(Screen):
# DefaultScreen, other screen should be subclass of DefaultScreen
pass
class MainScreen(DefaultScreen):
# main menu on startup
pass
class CartApp(App):
# main app
def build(self):
return sm
if __name__ == '__main__':
app = CartApp()
screens.append(MainScreen())
sm.switch_to(screens[-1])
app.run()
| [
"kivy.uix.screenmanager.ScreenManager",
"kivy.lang.Builder.load_file",
"cartinit.init",
"kivy.uix.screenmanager.SlideTransition"
] | [((200, 215), 'cartinit.init', 'cartinit.init', ([], {}), '()\n', (213, 215), False, 'import cartinit\n'), ((275, 290), 'kivy.uix.screenmanager.ScreenManager', 'ScreenManager', ([], {}), '()\n', (288, 290), False, 'from kivy.uix.screenmanager import Screen, ScreenManager, SlideTransition\n'), ((307, 324), 'kivy.uix.screenmanager.SlideTransition', 'SlideTransition', ([], {}), '()\n', (322, 324), False, 'from kivy.uix.screenmanager import Screen, ScreenManager, SlideTransition\n'), ((355, 386), 'kivy.lang.Builder.load_file', 'Builder.load_file', (['"""screens.kv"""'], {}), "('screens.kv')\n", (372, 386), False, 'from kivy.lang import Builder\n')] |
# The MIT License (MIT)
#
# Copyright © 2021 <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import numpy as np
from random import random, seed
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
# FrankeFunction: a two-variables function to create the dataset of our vanilla problem
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
# 3D plot of FrankeFunction
def Plot_FrankeFunction(x,y,z, title="Dataset"):
fig = plt.figure(figsize=(8, 7))
ax = fig.gca(projection="3d")
# Plot the surface.
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_zlabel(r"$z$")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.title(title)
plt.show()
# Create xyz dataset from the FrankeFunction with a added normal distributed noise
def create_xyz_dataset(n,mu_N, sigma_N):
x = np.linspace(0,1,n)
y = np.linspace(0,1,n)
x,y = np.meshgrid(x,y)
z = FrankeFunction(x,y) +mu_N +sigma_N*np.random.randn(n,n)
return x,y,z
# Error analysis: MSE and R2 score
def R2(z_data, z_model):
return 1 - np.sum((z_data - z_model) ** 2) / np.sum((z_data - np.mean(z_data)) ** 2)
def MSE(z_data,z_model):
n = np.size(z_model)
return np.sum((z_data-z_model)**2)/n
# SVD theorem
def SVD(A):
U, S, VT = np.linalg.svd(A,full_matrices=True)
D = np.zeros((len(U),len(VT)))
print("shape D= ", np.shape(D))
print("Shape S= ",np.shape(S))
print("lenVT =",len(VT))
print("lenU =",len(U))
D = np.eye(len(U),len(VT))*S
"""
for i in range(0,VT.shape[0]): #was len(VT)
D[i,i]=S[i]
print("i=",i)"""
return U @ D @ VT
# SVD inversion
def SVDinv(A):
U, s, VT = np.linalg.svd(A)
# reciprocals of singular values of s
d = 1.0 / s
# create m x n D matrix
D = np.zeros(A.shape)
# populate D with n x n diagonal matrix
D[:A.shape[1], :A.shape[1]] = np.diag(d)
UT = np.transpose(U)
V = np.transpose(VT)
return np.matmul(V,np.matmul(D.T,UT))
# Design matrix for two indipendent variables x,y
def create_X(x, y, n):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta, number of feutures (degree of polynomial)
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
def scale_Xz(X_train, X_test, z_train, z_test, with_std=False):
scaler_X = StandardScaler(with_std=with_std) #with_std=False
scaler_X.fit(X_train)
X_train = scaler_X.transform(X_train)
X_test = scaler_X.transform(X_test)
scaler_z = StandardScaler(with_std=with_std) #with_std=False
z_train = np.squeeze(scaler_z.fit_transform(z_train.reshape(-1, 1))) #scaler_z.fit_transform(z_train) #
z_test = np.squeeze(scaler_z.transform(z_test.reshape(-1, 1))) #scaler_z.transform(z_test) #
return X_train, X_test, z_train, z_test
# Splitting and rescaling data (rescaling is optional)
# Default values: 20% of test data and the scaler is StandardScaler without std.dev.
def Split_and_Scale(X,z,test_size=0.2, scale=True, with_std=False):
#Splitting training and test data
X_train, X_test, z_train, z_test = train_test_split(X, z, test_size=test_size)
# Rescaling X and z (optional)
if scale:
X_train, X_test, z_train, z_test = scale_Xz(X_train, X_test, z_train, z_test, with_std=with_std)
return X_train, X_test, z_train, z_test
# OLS equation
def OLS_solver(X_train, X_test, z_train, z_test):
# Calculating Beta Ordinary Least Square Equation with matrix pseudoinverse
# Altervatively to Numpy pseudoinverse it is possible to use the SVD theorem to evalute the inverse of a matrix (even in case it is singular). Just replace 'np.linalg.pinv' with 'SVDinv'.
ols_beta = np.linalg.pinv(X_train.T @ X_train) @ X_train.T @ z_train
z_tilde = X_train @ ols_beta # z_prediction of the train data
z_predict = X_test @ ols_beta # z_prediction of the test data
return ols_beta, z_tilde, z_predict
# Return the rolling mean of a vector and two values at one sigma from the rolling average
def Rolling_Mean(vector, windows=3):
vector_df = pd.DataFrame({'vector': vector})
# computing the rolling average
rolling_mean = vector_df.vector.rolling(windows).mean().to_numpy()
# computing the values at two sigmas from the rolling average
rolling_std = vector_df.vector.rolling(windows).std().to_numpy()
value_up = rolling_mean + rolling_std
value_down = rolling_mean - rolling_std
return rolling_mean, value_down, value_up
# Plot MSE in function of complexity of the model (rolling mean)
def plot_ols_complexity(x, y, z, maxdegree = 20, title="MSE as a function of model complexity"):
complexity = np.arange(0,maxdegree+1)
MSE_train_set = []
MSE_test_set = []
for degree in complexity:
X = create_X(x, y, degree)
X_train, X_test, z_train, z_test = Split_and_Scale(X,np.ravel(z)) #StardardScaler, test_size=0.2, scale=true
ols_beta, z_tilde,z_predict = OLS_solver(X_train, X_test, z_train, z_test)
MSE_train_set.append(MSE(z_train,z_tilde))
MSE_test_set.append(MSE(z_test,z_predict))
plt.figure( figsize = ( 10, 7))
MSE_train_mean, MSE_train_down, MSE_train_up = Rolling_Mean(MSE_train_set)
plt.plot(complexity, MSE_train_mean, label ="Train (rolling ave.)", color="purple")
plt.fill_between(complexity, MSE_train_down, MSE_train_up, alpha=0.2, color="purple")
MSE_test_mean, MSE_test_down, MSE_test_up = Rolling_Mean(MSE_test_set)
plt.plot(complexity, MSE_test_mean, label ="Test (rolling ave.)", color="orange")
plt.fill_between(complexity, MSE_test_down, MSE_test_up, alpha=0.2, color="orange")
plt.plot(complexity, MSE_train_set, '--', alpha=0.3, color="purple", label ="Train (actual values)")
plt.plot(complexity, MSE_test_set, '--', alpha=0.3, color="orange", label ="Test (actual values)")
plt.xlabel("Complexity")
plt.ylabel("MSE")
plt.xlim(complexity[~np.isnan(MSE_train_mean)][0]-1,complexity[-1]+1)
plt.title("Plot of the MSE as a function of complexity of the model\n– Rolling mean and one-sigma region –")
plt.legend()
plt.grid()
plt.show()
def ridge_reg(X_train, X_test, z_train, z_test, lmd = 10**(-12)):
ridge_beta = np.linalg.pinv(X_train.T @ X_train + lmd*np.eye(len(X_train.T))) @ X_train.T @ z_train #psudoinverse
z_model = X_train @ ridge_beta #calculates model
z_predict = X_test @ ridge_beta
#finds the lambda that gave the best MSE
#best_lamda = lambdas[np.where(MSE_values == np.min(MSE_values))[0]]
return ridge_beta, z_model, z_predict
def lasso_reg(X_train, X_test, z_train, z_test, lmd = 10**(-12)):
RegLasso = linear_model.Lasso(lmd)
_ = RegLasso.fit(X_train,z_train)
z_model = RegLasso.predict(X_train)
z_predict = RegLasso.predict(X_test)
return z_model, z_predict
| [
"matplotlib.pyplot.grid",
"numpy.linalg.pinv",
"sklearn.linear_model.Lasso",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.linspace",
"numpy.matmul",
"pandas.DataFrame",
... | [((1979, 2005), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (1989, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2516), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2509, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2529, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2669, 2689), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2680, 2689), True, 'import numpy as np\n'), ((2696, 2716), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2707, 2716), True, 'import numpy as np\n'), ((2726, 2743), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2737, 2743), True, 'import numpy as np\n'), ((3012, 3028), 'numpy.size', 'np.size', (['z_model'], {}), '(z_model)\n', (3019, 3028), True, 'import numpy as np\n'), ((3112, 3148), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {'full_matrices': '(True)'}), '(A, full_matrices=True)\n', (3125, 3148), True, 'import numpy as np\n'), ((3517, 3533), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (3530, 3533), True, 'import numpy as np\n'), ((3628, 3645), 'numpy.zeros', 'np.zeros', (['A.shape'], {}), '(A.shape)\n', (3636, 3645), True, 'import numpy as np\n'), ((3724, 3734), 'numpy.diag', 'np.diag', (['d'], {}), '(d)\n', (3731, 3734), True, 'import numpy as np\n'), ((3744, 3759), 'numpy.transpose', 'np.transpose', (['U'], {}), '(U)\n', (3756, 3759), True, 'import numpy as np\n'), ((3768, 3784), 'numpy.transpose', 'np.transpose', (['VT'], {}), '(VT)\n', (3780, 3784), True, 'import numpy as np\n'), ((4075, 4090), 'numpy.ones', 'np.ones', (['(N, l)'], {}), '((N, l))\n', (4082, 4090), True, 'import numpy as np\n'), ((4284, 4317), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (4298, 4317), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4458, 4491), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (4472, 4491), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5046, 5089), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'z'], {'test_size': 'test_size'}), '(X, z, test_size=test_size)\n', (5062, 5089), False, 'from sklearn.model_selection import train_test_split\n'), ((6015, 6047), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': vector}"], {}), "({'vector': vector})\n", (6027, 6047), True, 'import pandas as pd\n'), ((6608, 6635), 'numpy.arange', 'np.arange', (['(0)', '(maxdegree + 1)'], {}), '(0, maxdegree + 1)\n', (6617, 6635), True, 'import numpy as np\n'), ((7057, 7084), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (7067, 7084), True, 'import matplotlib.pyplot as plt\n'), ((7181, 7268), 'matplotlib.pyplot.plot', 'plt.plot', (['complexity', 'MSE_train_mean'], {'label': '"""Train (rolling ave.)"""', 'color': '"""purple"""'}), "(complexity, MSE_train_mean, label='Train (rolling ave.)', color=\n 'purple')\n", (7189, 7268), True, 'import matplotlib.pyplot as plt\n'), ((7269, 7359), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['complexity', 'MSE_train_down', 'MSE_train_up'], {'alpha': '(0.2)', 'color': '"""purple"""'}), "(complexity, MSE_train_down, MSE_train_up, alpha=0.2, color\n ='purple')\n", (7285, 7359), True, 'import matplotlib.pyplot as plt\n'), ((7434, 7519), 'matplotlib.pyplot.plot', 'plt.plot', (['complexity', 'MSE_test_mean'], {'label': '"""Test (rolling ave.)"""', 'color': '"""orange"""'}), "(complexity, MSE_test_mean, label='Test (rolling ave.)', color='orange'\n )\n", (7442, 7519), True, 'import matplotlib.pyplot as plt\n'), ((7520, 7608), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['complexity', 'MSE_test_down', 'MSE_test_up'], {'alpha': '(0.2)', 'color': '"""orange"""'}), "(complexity, MSE_test_down, MSE_test_up, alpha=0.2, color=\n 'orange')\n", (7536, 7608), True, 'import matplotlib.pyplot as plt\n'), ((7613, 7717), 'matplotlib.pyplot.plot', 'plt.plot', (['complexity', 'MSE_train_set', '"""--"""'], {'alpha': '(0.3)', 'color': '"""purple"""', 'label': '"""Train (actual values)"""'}), "(complexity, MSE_train_set, '--', alpha=0.3, color='purple', label=\n 'Train (actual values)')\n", (7621, 7717), True, 'import matplotlib.pyplot as plt\n'), ((7718, 7820), 'matplotlib.pyplot.plot', 'plt.plot', (['complexity', 'MSE_test_set', '"""--"""'], {'alpha': '(0.3)', 'color': '"""orange"""', 'label': '"""Test (actual values)"""'}), "(complexity, MSE_test_set, '--', alpha=0.3, color='orange', label=\n 'Test (actual values)')\n", (7726, 7820), True, 'import matplotlib.pyplot as plt\n'), ((7827, 7851), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Complexity"""'], {}), "('Complexity')\n", (7837, 7851), True, 'import matplotlib.pyplot as plt\n'), ((7856, 7873), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSE"""'], {}), "('MSE')\n", (7866, 7873), True, 'import matplotlib.pyplot as plt\n'), ((7952, 8073), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot of the MSE as a function of complexity of the model\n– Rolling mean and one-sigma region –"""'], {}), '(\n """Plot of the MSE as a function of complexity of the model\n– Rolling mean and one-sigma region –"""\n )\n', (7961, 8073), True, 'import matplotlib.pyplot as plt\n'), ((8065, 8077), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8075, 8077), True, 'import matplotlib.pyplot as plt\n'), ((8082, 8092), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8090, 8092), True, 'import matplotlib.pyplot as plt\n'), ((8097, 8107), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8105, 8107), True, 'import matplotlib.pyplot as plt\n'), ((8637, 8660), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (['lmd'], {}), '(lmd)\n', (8655, 8660), False, 'from sklearn import linear_model\n'), ((1646, 1706), 'numpy.exp', 'np.exp', (['(-(0.25 * (9 * x - 2) ** 2) - 0.25 * (9 * y - 2) ** 2)'], {}), '(-(0.25 * (9 * x - 2) ** 2) - 0.25 * (9 * y - 2) ** 2)\n', (1652, 1706), True, 'import numpy as np\n'), ((1707, 1759), 'numpy.exp', 'np.exp', (['(-(9 * x + 1) ** 2 / 49.0 - 0.1 * (9 * y + 1))'], {}), '(-(9 * x + 1) ** 2 / 49.0 - 0.1 * (9 * y + 1))\n', (1713, 1759), True, 'import numpy as np\n'), ((1761, 1818), 'numpy.exp', 'np.exp', (['(-(9 * x - 7) ** 2 / 4.0 - 0.25 * (9 * y - 3) ** 2)'], {}), '(-(9 * x - 7) ** 2 / 4.0 - 0.25 * (9 * y - 3) ** 2)\n', (1767, 1818), True, 'import numpy as np\n'), ((1819, 1863), 'numpy.exp', 'np.exp', (['(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)'], {}), '(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)\n', (1825, 1863), True, 'import numpy as np\n'), ((2318, 2335), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (2331, 2335), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((2370, 2397), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (2388, 2397), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3040, 3071), 'numpy.sum', 'np.sum', (['((z_data - z_model) ** 2)'], {}), '((z_data - z_model) ** 2)\n', (3046, 3071), True, 'import numpy as np\n'), ((3206, 3217), 'numpy.shape', 'np.shape', (['D'], {}), '(D)\n', (3214, 3217), True, 'import numpy as np\n'), ((3241, 3252), 'numpy.shape', 'np.shape', (['S'], {}), '(S)\n', (3249, 3252), True, 'import numpy as np\n'), ((3808, 3826), 'numpy.matmul', 'np.matmul', (['D.T', 'UT'], {}), '(D.T, UT)\n', (3817, 3826), True, 'import numpy as np\n'), ((3930, 3941), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (3938, 3941), True, 'import numpy as np\n'), ((3948, 3959), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (3956, 3959), True, 'import numpy as np\n'), ((2786, 2807), 'numpy.random.randn', 'np.random.randn', (['n', 'n'], {}), '(n, n)\n', (2801, 2807), True, 'import numpy as np\n'), ((2905, 2936), 'numpy.sum', 'np.sum', (['((z_data - z_model) ** 2)'], {}), '((z_data - z_model) ** 2)\n', (2911, 2936), True, 'import numpy as np\n'), ((5644, 5679), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(X_train.T @ X_train)'], {}), '(X_train.T @ X_train)\n', (5658, 5679), True, 'import numpy as np\n'), ((6806, 6817), 'numpy.ravel', 'np.ravel', (['z'], {}), '(z)\n', (6814, 6817), True, 'import numpy as np\n'), ((2956, 2971), 'numpy.mean', 'np.mean', (['z_data'], {}), '(z_data)\n', (2963, 2971), True, 'import numpy as np\n'), ((7899, 7923), 'numpy.isnan', 'np.isnan', (['MSE_train_mean'], {}), '(MSE_train_mean)\n', (7907, 7923), True, 'import numpy as np\n')] |
""" Runs tests for Ptyhon Odin SDK """
import unittest
from os import environ
import random
from pymongo import MongoClient
import pyodin as odin
class OdinSdkTest(unittest.TestCase):
""" Establish OdinSdkTest object """
def setUp(self):
client = MongoClient(environ.get('ODIN_MONGODB'))
mongodb = client['odin']
self.collection = mongodb['observability']
def tearDown(self):
self.collection.delete_many({"id" : "test_id"})
def test_condition_not_odin_env(self):
""" Run condition operation outside of Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
odin_test = odin.Odin(config="job.yml", path_type="relative")
cond = odin_test.condition(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(cond, True)
self.assertEqual(None, result)
def test_watch_not_odin_env(self):
""" Run watch operation outside of Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
odin_test = odin.Odin(config="job.yml", path_type="relative")
odin_test.watch(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(None, result)
def test_condition(self):
""" Run condition operation inside Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
# test True sets odin exc env to true and in turn enables logging everything to the DB
odin_test = odin.Odin(test=True, config="job.yml", path_type="relative")
cond = odin_test.condition(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(cond, True)
self.assertEqual(test_desc, result['description'])
def test_watch(self):
""" Run watch operation inside Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
# test True sets odin exc env to true and in turn enables logging everything to the DB
odin_test = odin.Odin(test=True, config="job.yml", path_type="relative")
odin_test.watch(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(test_desc, result['description'])
if __name__ == "__main__":
unittest.main() # run all tests
| [
"unittest.main",
"pyodin.Odin",
"os.environ.get",
"random.randint"
] | [((2522, 2537), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2535, 2537), False, 'import unittest\n'), ((597, 627), 'random.randint', 'random.randint', (['(100000)', '(999999)'], {}), '(100000, 999999)\n', (611, 627), False, 'import random\n'), ((699, 748), 'pyodin.Odin', 'odin.Odin', ([], {'config': '"""job.yml"""', 'path_type': '"""relative"""'}), "(config='job.yml', path_type='relative')\n", (708, 748), True, 'import pyodin as odin\n'), ((1067, 1097), 'random.randint', 'random.randint', (['(100000)', '(999999)'], {}), '(100000, 999999)\n', (1081, 1097), False, 'import random\n'), ((1169, 1218), 'pyodin.Odin', 'odin.Odin', ([], {'config': '"""job.yml"""', 'path_type': '"""relative"""'}), "(config='job.yml', path_type='relative')\n", (1178, 1218), True, 'import pyodin as odin\n'), ((1480, 1510), 'random.randint', 'random.randint', (['(100000)', '(999999)'], {}), '(100000, 999999)\n', (1494, 1510), False, 'import random\n'), ((1677, 1737), 'pyodin.Odin', 'odin.Odin', ([], {'test': '(True)', 'config': '"""job.yml"""', 'path_type': '"""relative"""'}), "(test=True, config='job.yml', path_type='relative')\n", (1686, 1737), True, 'import pyodin as odin\n'), ((2059, 2089), 'random.randint', 'random.randint', (['(100000)', '(999999)'], {}), '(100000, 999999)\n', (2073, 2089), False, 'import random\n'), ((2256, 2316), 'pyodin.Odin', 'odin.Odin', ([], {'test': '(True)', 'config': '"""job.yml"""', 'path_type': '"""relative"""'}), "(test=True, config='job.yml', path_type='relative')\n", (2265, 2316), True, 'import pyodin as odin\n'), ((278, 305), 'os.environ.get', 'environ.get', (['"""ODIN_MONGODB"""'], {}), "('ODIN_MONGODB')\n", (289, 305), False, 'from os import environ\n')] |
from artemis.general.dict_ops import cross_dict_dicts, merge_dicts
__author__ = 'peter'
def test_cross_dict_dicts():
assert cross_dict_dicts({'a':{'aa': 1}, 'b':{'bb': 2}}, {'c': {'cc': 3}, 'd': {'dd': 4}}) == {
('a','c'):{'aa':1, 'cc':3},
('a','d'):{'aa':1, 'dd':4},
('b','c'):{'bb':2, 'cc':3},
('b','d'):{'bb':2, 'dd':4}
}
def test_dict_merge():
assert merge_dicts({'a': 1, 'b': 2, 'c': 3}, {'c': 4, 'd': 5}, {'d': 6, 'e': 7}) == {
'a': 1,
'b': 2,
'c': 4,
'd': 6,
'e': 7,
}
if __name__ == "__main__":
test_dict_merge()
test_cross_dict_dicts()
| [
"artemis.general.dict_ops.cross_dict_dicts",
"artemis.general.dict_ops.merge_dicts"
] | [((131, 220), 'artemis.general.dict_ops.cross_dict_dicts', 'cross_dict_dicts', (["{'a': {'aa': 1}, 'b': {'bb': 2}}", "{'c': {'cc': 3}, 'd': {'dd': 4}}"], {}), "({'a': {'aa': 1}, 'b': {'bb': 2}}, {'c': {'cc': 3}, 'd': {\n 'dd': 4}})\n", (147, 220), False, 'from artemis.general.dict_ops import cross_dict_dicts, merge_dicts\n'), ((409, 482), 'artemis.general.dict_ops.merge_dicts', 'merge_dicts', (["{'a': 1, 'b': 2, 'c': 3}", "{'c': 4, 'd': 5}", "{'d': 6, 'e': 7}"], {}), "({'a': 1, 'b': 2, 'c': 3}, {'c': 4, 'd': 5}, {'d': 6, 'e': 7})\n", (420, 482), False, 'from artemis.general.dict_ops import cross_dict_dicts, merge_dicts\n')] |
import numpy as np
import pandas as pd
import os.path as path
import abydos.distance as abd
import abydos.phonetic as abp
import pytest
from scipy.sparse import csc_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
import name_matching.name_matcher as nm
@pytest.fixture
def name_match():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
data = pd.read_csv(path.join(package_dir, 'test','test_names.csv'))
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
'company_name', data, start_processing=False, transform=False)
return name_matcher
@pytest.fixture
def adjusted_name():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
return pd.read_csv(path.join(package_dir, 'test','adjusted_test_names.csv'))
@pytest.fixture
def words():
return ['fun', 'small', 'pool', 'fun', 'small', 'pool', 'sign',
'small', 'pool', 'sign', 'sign', 'small', 'pool', 'sign', 'paper',
'oppose', 'paper', 'oppose', 'brown', 'pig', 'fat', 'oppose', 'paper',
'oppose', 'brown', 'pig', 'fat', 'snail']
@pytest.mark.parametrize("method",
["",
None,
'no_method']
)
def test_make_distance_metrics_error(name_match, method):
with pytest.raises(TypeError):
name_match.set_distance_metrics([method])
@pytest.mark.parametrize("method, result",
[['indel', abd.Indel()],
['discounted_levenshtein', abd.DiscountedLevenshtein()],
['tichy', abd.Tichy()],
['cormodeL_z', abd.CormodeLZ()],
['iterative_sub_string', abd.IterativeSubString()],
['baulieu_xiii', abd.BaulieuXIII()],
['clement', abd.Clement()],
['dice_asymmetricI', abd.DiceAsymmetricI()],
['kuhns_iii', abd.KuhnsIII()],
['overlap', abd.Overlap()],
['pearson_ii', abd.PearsonII()],
['weighted_jaccard', abd.WeightedJaccard()],
['warrens_iv', abd.WarrensIV()],
['bag', abd.Bag()],
['rouge_l', abd.RougeL()],
['ratcliff_obershelp', abd.RatcliffObershelp()],
['ncd_bz2', abd.NCDbz2()],
['fuzzy_wuzzy_partial_string',
abd.FuzzyWuzzyPartialString()],
['fuzzy_wuzzy_token_sort', abd.FuzzyWuzzyTokenSort()],
['fuzzy_wuzzy_token_set', abd.FuzzyWuzzyTokenSet()],
['editex', abd.Editex()],
['typo', abd.Typo()],
['lig_3', abd.LIG3()],
['ssk', abd.SSK()],
['refined_soundex', abd.PhoneticDistance(transforms=abp.RefinedSoundex(
max_length=30), metric=abd.Levenshtein(), encode_alpha=True)],
['double_metaphone', abd.PhoneticDistance(transforms=abp.DoubleMetaphone(max_length=30), metric=abd.Levenshtein(), encode_alpha=True)]]
)
def test_make_distance_metrics(name_match, method, result):
name_match.set_distance_metrics([method])
assert type(name_match._distance_metrics.popitem()[1][0]) == type(result)
@pytest.mark.parametrize("kwargs_str, result_1, result_2, result_3, result_4",
[[{"ngrams": (4, 5)}, 0, False, (4, 5), 5000],
[{"low_memory": True}, 0, True, (2, 3), 5000],
[{"legal_suffixes": True}, 244, False, (2, 3), 5000],
[{"legal_suffixes": True, "number_of_rows": 8,
"ngrams": (1, 2, 3)}, 244, False, (1, 2, 3), 8],
])
def test_initialisation(kwargs_str, result_1, result_2, result_3, result_4):
name_match = nm.NameMatcher(**kwargs_str)
assert len(name_match._word_set) == result_1
assert name_match._low_memory == result_2
assert name_match._vec.ngram_range == result_3
assert name_match._number_of_rows == result_4
@pytest.mark.parametrize("occ, result_1, result_2, result_3, result_4, result_5",
[[1, '', '', '', '', ''],
[2, 'a-nd', 'Hndkiewicz,2Nicolas',
'Tashirian', '<NAME>', 'Marquardt,'],
[3, '<NAME>-nd', 'Hndkiewicz,2Nicolas',
'Runolfsson, <NAME>', '<NAME>', '<NAME>,'],
])
def test_preprocess_reduce(name_match, adjusted_name, occ, result_1, result_2, result_3, result_4, result_5):
name_match._column_matching = 'company_name'
new_names = name_match._preprocess_reduce(
adjusted_name, occurence_count=occ)
assert new_names.loc[1866, 'company_name'] == result_1
assert new_names.loc[1423, 'company_name'] == result_2
assert new_names.loc[268, 'company_name'] == result_3
assert new_names.loc[859, 'company_name'] == result_4
assert new_names.loc[1918, 'company_name'] == result_5
@pytest.mark.parametrize("col, start_pro, transform",
[['company_name', False, False],
['no_name', False, False],
['company_name', True, False],
['company_name', True, True],
['company_name', True, True],
])
def test_load_and_process_master_data(adjusted_name, col, start_pro, transform):
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
column=col,
df_matching_data=adjusted_name,
start_processing=start_pro,
transform=transform)
assert name_matcher._column == col
pd.testing.assert_frame_equal(
name_matcher._df_matching_data, adjusted_name)
assert name_matcher._preprocessed == start_pro
if transform & start_pro:
assert type(name_matcher._n_grams_matching) == csc_matrix
@pytest.mark.parametrize("trans, common",
[[False, False],
[True, False],
[False, True],
[True, True],
])
def test_process_matching_data(name_match, trans, common):
name_match._postprocess_common_words = common
name_match._process_matching_data(transform=trans)
assert name_match._preprocessed
if trans:
assert type(name_match._n_grams_matching) == csc_matrix
else:
assert name_match._n_grams_matching is None
if common:
assert len(name_match._word_set) > 0
else:
assert len(name_match._word_set) == 0
@pytest.mark.parametrize("lower_case, punctuations, ascii, result_1, result_2, result_3",
[[False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray', 'Ösinski-Schinner'],
[True, False, False, 'schumm plc',
'towne, johnston and murray', 'ösinski-schinner'],
[False, True, False, 'Schumm PLC',
'Towne Johnston and Murray', 'ÖsinskiSchinner'],
[False, False, True, 'Schumm PLC',
'Towne, Johnston and Murray', 'Osinski-Schinner'],
[False, True, True, 'Schumm PLC',
'Towne Johnston and Murray', 'OsinskiSchinner'],
[True, False, True, 'schumm plc',
'towne, johnston and murray', 'osinski-schinner'],
[True, True, False, 'schumm plc',
'towne johnston and murray', 'ösinskischinner'],
[True, True, True, 'schumm plc',
'towne johnston and murray', 'osinskischinner'],
])
def test_preprocess(name_match, lower_case, punctuations, ascii, result_1, result_2, result_3):
name_match._preprocess_lowercase = lower_case
name_match._preprocess_punctuations = punctuations
name_match._preprocess_ascii = ascii
new_df = name_match.preprocess(
name_match._df_matching_data, 'company_name')
assert new_df.loc[0, 'company_name'] == result_1
assert new_df.loc[2, 'company_name'] == result_2
assert new_df.loc[784, 'company_name'] == result_3
@pytest.mark.parametrize("low_memory, ngrams, result_1, result_2, result_3",
[[1, (5, 6), 0.02579, 0.00781, 0.01738],
[6, (2, 3), 0.009695, 0.01022, 0.01120],
[8, (1, 2), 0.027087, 0.02765, 0.02910],
[0, (5, 6), 0.02579, 0.00781, 0.01738],
[0, (2, 3), 0.009695, 0.01022, 0.01120],
[0, (1, 2), 0.027087, 0.02765, 0.02910],
])
def test_transform_data(name_match, low_memory, ngrams, result_1, result_2, result_3):
name_match._low_memory = low_memory
name_match._vec = TfidfVectorizer(
lowercase=False, analyzer="char", ngram_range=ngrams)
name_match._process_matching_data(transform=False)
name_match.transform_data()
assert name_match._n_grams_matching.data[10] == pytest.approx(
result_1, 0.001)
assert name_match._n_grams_matching.data[181] == pytest.approx(
result_2, 0.001)
assert name_match._n_grams_matching.data[1000] == pytest.approx(
result_3, 0.001)
@pytest.mark.parametrize("to_be_matched, possible_matches, metrics, result",
[('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 5),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 7),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 11),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',
'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 4),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 6),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',
'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'discounted_levenshtein'], 4),
('Schumm PLC', ['Torphy-Corkery', '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('Schumm PLC', ['Torphy-Corkery', '<NAME> and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'iterative_sub_string'], 8),
('Schumm PLC', ['Torphy-Corkery', '<NAME>', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 8)
])
def test_score_matches(to_be_matched, possible_matches, metrics, result):
name_match = nm.NameMatcher()
name_match.set_distance_metrics(metrics)
assert np.argmax(name_match._score_matches(
to_be_matched, possible_matches)) == result
@pytest.mark.parametrize("number_of_matches, match_score, metrics, result",
[(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1]]), ['weighted_jaccard'], [0]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(3, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], [2, 1, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['tichy', 'overlap', 'bag'], [2, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'overlap', 'bag'], [0, 2]),
(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'iterative_sub_string'], [1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'bag'], [1, 0]),
(1, np.array([[0.3, 0.3, 0.8, 0.2, 0.2]]), [
'weighted_jaccard'], [0]),
(3, np.array([[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(2, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]]), [
'weighted_jaccard', 'iterative_sub_string'], [0, 0]),
(1, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]]), [
'overlap', 'iterative_sub_string'], [1]),
(1, np.array(
[[-0.5, -0.8, -0.3, -0.7, 0, 2]]), ['bag'], [0]),
(3, np.array([[10, 8, 7, 6, 12, 15, 14, 88]]), [
'weighted_jaccard'], [0]),
(2, np.array([[1, 0.3], [0.1, 0.4]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1])
])
def test_rate_matches(number_of_matches, match_score, metrics, result):
name_match = nm.NameMatcher()
name_match._number_of_matches = number_of_matches
name_match.set_distance_metrics(metrics)
ind = name_match._rate_matches(match_score)
print(ind)
assert len(ind) == np.min([number_of_matches, match_score.shape[0]])
assert list(ind) == result
def test_vectorise_data(name_match):
name_match._vectorise_data(transform=False)
assert len(name_match._vec.vocabulary_) > 0
@pytest.mark.parametrize("match, number_of_matches, word_set, score, result",
[(pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=['match_name_0', 'score_0', 'match_index_0', 'original_name']), 1, set(['De', 'Bank', 'nl']), 0, 94.553),
(pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'original_name']), 1, set(['komt', 'niet', 'voor']), 0, 69.713),
(pd.Series(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1', 'match_index_1', 'original_name']), 1, set(['De', 'Bank', 'nl']), 1, 0.4),
(pd.Series(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1', 'match_index_1', 'original_name']), 1, set(['De', 'Bank', 'nl']), 0, 86.031),
])
def test_postprocess(name_match, match, number_of_matches, word_set, score, result):
name_match._number_of_matches = number_of_matches
name_match._word_set = word_set
new_match = name_match.postprocess(match)
assert new_match.loc[f'score_{score}'] == pytest.approx(result, 0.0001)
@pytest.mark.parametrize("indicator, punctuations, word_set, cut_off, result_1, result_2",
[('legal', False, set(), 0.01, 'plc.', 'bedrijf'),
('legal', True, set(), 0.01, 'plc', 'bedrijf'),
('legal', True, set(['bedrijf']),
0.01, 'bedrijf', 'Group'),
('common', True, set(), 0.01, 'Group', 'West'),
('common', True, set(), 0.3, 'and', 'Group'),
('common', True, set(['West']),
0.3, 'West', 'bedrijf'),
('someting', True, set(['key']), 0.01, 'key', 'val')
])
def test_make_no_scoring_words(name_match, indicator, punctuations, word_set, cut_off, result_1, result_2):
name_match._preprocess_punctuations = punctuations
new_word_set = name_match._make_no_scoring_words(
indicator, word_set, cut_off)
print(new_word_set)
assert new_word_set.issuperset(set([result_1]))
assert not new_word_set.issuperset(set([result_2]))
def test_search_for_possible_matches_error(adjusted_name):
name_matcher = nm.NameMatcher()
with pytest.raises(RuntimeError):
name_matcher._search_for_possible_matches(adjusted_name)
@pytest.mark.parametrize("top_n, low_memory, result_1, result_2",
[(10, 0, 1518, 144),
(50, 0, 1992, 9),
(100, 0, 1999, 6),
(1, 0, 44, 144),
(10, 8, 1518, 144),
(50, 8, 1992, 9),
(100, 8, 1999, 6),
(1, 8, 44, 144)
])
def test_search_for_possible_matches(name_match, adjusted_name, top_n, low_memory, result_1, result_2):
name_match._column_matching = 'company_name'
name_match._low_memory = low_memory
name_match._top_n = top_n
name_match._process_matching_data(True)
possible_match = name_match._search_for_possible_matches(adjusted_name)
assert possible_match.shape[1] == top_n
assert np.max(possible_match) < len(adjusted_name)
assert np.all(possible_match.astype(int) == possible_match)
assert np.max(possible_match[44, :]) == result_1
assert np.min(possible_match[144, :]) == result_2
@pytest.mark.parametrize("common_words, num_matches, possible_matches, matching_series, result_0, result_1",
[(True, 3, np.array([29, 343, 727, 855, 1702]), pd.Series(
['Company and Sons'], index=['company_name']), 36.03, 31.33),
(False, 2, np.array([29, 343, 727, ]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([29, 343]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([[29, 343], [0, 0]]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([29, 343, 727, 855, 1702]), pd.Series(
['Company and Sons'], index=['company_name']), 72.28, 71.28)
])
def test_fuzzy_matches(name_match, common_words, num_matches, possible_matches, matching_series, result_0, result_1):
name_match._column_matching = 'company_name'
name_match._number_of_matches = num_matches
name_match._postprocess_common_words = common_words
name_match._word_set = set(['Sons', 'and'])
match = name_match.fuzzy_matches(possible_matches, matching_series)
assert match['score_0'] == pytest.approx(result_0, 0.0001)
assert match['score_1'] == pytest.approx(result_1, 0.0001)
assert match['match_index_0'] in possible_matches
assert match['match_index_1'] in possible_matches
def test_do_name_matching_full(name_match, adjusted_name):
result = name_match.match_names(adjusted_name, 'company_name')
assert np.sum(result['match_index'] == result.index) == 1922
def test_do_name_matching_split(name_match, adjusted_name):
name_match._preprocess_split = True
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_series(name_match, adjusted_name):
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_error(adjusted_name):
name_match = nm.NameMatcher()
with pytest.raises(ValueError):
name_match.match_names(adjusted_name, 'company_name')
@pytest.mark.parametrize("verbose", [True, False])
def test_do_name_matching_print(capfd, name_match, adjusted_name, verbose):
name_match._verbose = verbose
name_match.match_names(adjusted_name.iloc[:5].copy(), 'company_name')
out, err = capfd.readouterr()
if verbose:
assert out.find('preprocessing') > -1
assert out.find('searching') > -1
assert out.find('possible') > -1
assert out.find('fuzzy') > -1
assert out.find('done') > -1
else:
assert out == ''
@pytest.mark.parametrize("word, occurence_count, result",
[['fun snail pool', 2, 'snail'],
['fun snail pool', 3, 'fun snail'],
['fun snail pool', 1, ''],
['fun small pool', 3, 'fun small pool'],
['fun snail', 3, 'fun snail'],
['fun small pool', 5, 'fun small pool']])
def test_select_top_words(word, words, occurence_count, result):
word_counts = pd.Series(words).value_counts()
name_match = nm.NameMatcher()
new_word = name_match._select_top_words(
word.split(), word_counts, occurence_count)
assert new_word == result
@pytest.mark.parametrize("match, num_of_matches, result",
[[{'match_name_1': 'fun', 'match_name_2': 'dog',
'match_name_0': 'cat'}, 3, ['cat', 'fun', 'dog']],
[{'match_name_1': 'fun', 'match_name_2': 'dog',
'match_name_0': 'cat'}, 2, ['cat', 'fun']],
[{'match_name_1': 'fun', 'match_name_0': 'cat'},
2, ['cat', 'fun']],
[{'match_name_1': 'fun', 'match_name_2': 'dog', 'match_name_0': 'cat'}, 0, []]])
def test_get_alternative_names(match, num_of_matches, result):
name_match = nm.NameMatcher(number_of_matches=num_of_matches)
res = name_match._get_alternative_names(pd.Series(match))
assert res == result
@pytest.mark.parametrize("preprocess_punctuations, output, input, x",
[[True, '_blame_', {'test': ['fun...', 'done'], 'num':['_.blame._']}, 2],
[True, 'done', {'test': ['fun. . . ',
'done'], 'num':['_.blame._']}, 1],
[True, 'fun', {
'test': ['fun. . . ', 'done'], 'num':['_.blame._']}, 0],
[False, 'fun. . .', {
'test': ['fun. . . ', 'done'], 'num':['_.blame._']}, 0],
[False, 'fun. . .', {
'num': ['_.blame._'], 'test': ['fun. . . ', 'done']}, 1]
])
def test_preprocess_word_list(preprocess_punctuations, output, input, x):
name_match = nm.NameMatcher(punctuations=preprocess_punctuations)
res = name_match._preprocess_word_list(input)
print(res)
assert res[x] == output
@pytest.mark.parametrize("num_matches, match_score, match, result, y",
[[3, np.array([[1, 1, 1], [1, 1, 1], [0, 0, 0]]), pd.Series(dtype=float), 100, 0],
[2, np.array([[1, 1], [0.4, 0.4], [0, 0]]),
pd.Series(dtype=float), 40, 1],
[1, np.array([[1, 1], [1, 1], [0, 0]]),
pd.Series(dtype=float), 100, 0]
])
def test_adjust_scores(num_matches, match_score, match, result, y):
name_match = nm.NameMatcher(number_of_matches=num_matches)
match = name_match._adjust_scores(match_score, match)
assert match[y] == result
@pytest.mark.parametrize("string, stringlist, result_1, result_2, y",
[['know sign first', ['know', 'know sign', 'know sign first'], 'know first', 'know first', 2],
['know sign first', ['know', 'know sign',
'know sign first'], 'know first', 'know', 1],
['know sign first', ['know', 'know sign',
'know sign first'], 'know first', 'know', 0],
['know first', ['know', 'know', 'know'],
'know first', 'know', 1],
['pool sign small', ['sign small',
'small pool sign', 'small'], '', '', 0],
['pool sign small know', ['sign small',
'small pool sign', 'small'], 'know', '', 0],
['know pool sign small', ['sign small',
'small pool sign', 'small'], 'know', '', 0],
['pool sign small', ['sign small',
'small pool know sign', 'small'], '', 'know', 1],
])
def test_process_words(words, string, stringlist, result_1, result_2, y):
name_match = nm.NameMatcher()
name_match._word_set = set(words)
string, stringlist = name_match._process_words(string, stringlist)
assert string == result_1
assert stringlist[y] == result_2
@pytest.mark.parametrize("word_set, cut_off, result_1, result_2",
[[set(), 0, 1518, 'Group'],
[set(), 0, 1518, 'and'],
[set(), 0.1, 7, 'Group'],
[set(), 0.1, 7, 'LLC'],
[set(), 0.12, 6, 'LLC'],
[set(), 0.2, 1, 'and'],
[set(['apple']), 1, 1, 'apple'],
[set(['apple']), 0, 1519, 'apple'],
[set(['apple']), 0, 1519, 'Group']
])
def test_process_common_words(name_match, word_set, cut_off, result_1, result_2):
words = name_match._process_common_words(word_set, cut_off)
assert result_2 in words
assert len(words) == result_1
@pytest.mark.parametrize("word_set, preprocess, result_1, result_2",
[[set(), True, 244, 'company'],
[set(), True, 244, '3ao'],
[set(), True, 244, 'gmbh'],
[set(), False, 312, '& company'],
[set(), False, 312, '3ao'],
[set(), False, 312, 'g.m.b.h.'],
[set(['apple']), True, 245, 'apple'],
[set(['apple']), False, 313, 'apple'],
[set(['apple..']), True, 245, 'apple..'],
[set(['apple..']), False, 313, 'apple..']
])
def test_process_legal_words(word_set, preprocess, result_1, result_2):
name_match = nm.NameMatcher()
name_match._preprocess_punctuations = preprocess
words = name_match._process_legal_words(word_set)
assert result_2 in words
assert len(words) == result_1
| [
"abydos.distance.Overlap",
"abydos.phonetic.RefinedSoundex",
"numpy.array",
"abydos.distance.Levenshtein",
"abydos.distance.KuhnsIII",
"abydos.distance.BaulieuXIII",
"name_matching.name_matcher.NameMatcher",
"pandas.testing.assert_frame_equal",
"abydos.distance.WeightedJaccard",
"abydos.phonetic.D... | [((1163, 1221), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['', None, 'no_method']"], {}), "('method', ['', None, 'no_method'])\n", (1186, 1221), False, 'import pytest\n'), ((3589, 3929), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kwargs_str, result_1, result_2, result_3, result_4"""', "[[{'ngrams': (4, 5)}, 0, False, (4, 5), 5000], [{'low_memory': True}, 0, \n True, (2, 3), 5000], [{'legal_suffixes': True}, 244, False, (2, 3), \n 5000], [{'legal_suffixes': True, 'number_of_rows': 8, 'ngrams': (1, 2, \n 3)}, 244, False, (1, 2, 3), 8]]"], {}), "('kwargs_str, result_1, result_2, result_3, result_4',\n [[{'ngrams': (4, 5)}, 0, False, (4, 5), 5000], [{'low_memory': True}, 0,\n True, (2, 3), 5000], [{'legal_suffixes': True}, 244, False, (2, 3), \n 5000], [{'legal_suffixes': True, 'number_of_rows': 8, 'ngrams': (1, 2, \n 3)}, 244, False, (1, 2, 3), 8]])\n", (3612, 3929), False, 'import pytest\n'), ((4395, 4672), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""occ, result_1, result_2, result_3, result_4, result_5"""', "[[1, '', '', '', '', ''], [2, 'a-nd', 'Hndkiewicz,2Nicolas', 'Tashirian',\n '<NAME>', 'Marquardt,'], [3, '<NAME>-nd', 'Hndkiewicz,2Nicolas',\n 'Runolfsson, <NAME>', '<NAME>', '<NAME>,']]"], {}), "('occ, result_1, result_2, result_3, result_4, result_5'\n , [[1, '', '', '', '', ''], [2, 'a-nd', 'Hndkiewicz,2Nicolas',\n 'Tashirian', '<NAME>', 'Marquardt,'], [3, '<NAME>-nd',\n 'Hndkiewicz,2Nicolas', 'Runolfsson, <NAME>', '<NAME>', '<NAME>,']])\n", (4418, 4672), False, 'import pytest\n'), ((5372, 5585), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""col, start_pro, transform"""', "[['company_name', False, False], ['no_name', False, False], ['company_name',\n True, False], ['company_name', True, True], ['company_name', True, True]]"], {}), "('col, start_pro, transform', [['company_name', \n False, False], ['no_name', False, False], ['company_name', True, False],\n ['company_name', True, True], ['company_name', True, True]])\n", (5395, 5585), False, 'import pytest\n'), ((6303, 6410), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trans, common"""', '[[False, False], [True, False], [False, True], [True, True]]'], {}), "('trans, common', [[False, False], [True, False], [\n False, True], [True, True]])\n", (6326, 6410), False, 'import pytest\n'), ((6997, 7814), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lower_case, punctuations, ascii, result_1, result_2, result_3"""', "[[False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray',\n 'Ösinski-Schinner'], [True, False, False, 'schumm plc',\n 'towne, johnston and murray', 'ösinski-schinner'], [False, True, False,\n 'Schumm PLC', 'Towne Johnston and Murray', 'ÖsinskiSchinner'], [False, \n False, True, 'Schumm PLC', 'Towne, Johnston and Murray',\n 'Osinski-Schinner'], [False, True, True, 'Schumm PLC',\n 'Towne Johnston and Murray', 'OsinskiSchinner'], [True, False, True,\n 'schumm plc', 'towne, johnston and murray', 'osinski-schinner'], [True,\n True, False, 'schumm plc', 'towne johnston and murray',\n 'ösinskischinner'], [True, True, True, 'schumm plc',\n 'towne johnston and murray', 'osinskischinner']]"], {}), "(\n 'lower_case, punctuations, ascii, result_1, result_2, result_3', [[\n False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray',\n 'Ösinski-Schinner'], [True, False, False, 'schumm plc',\n 'towne, johnston and murray', 'ösinski-schinner'], [False, True, False,\n 'Schumm PLC', 'Towne Johnston and Murray', 'ÖsinskiSchinner'], [False, \n False, True, 'Schumm PLC', 'Towne, Johnston and Murray',\n 'Osinski-Schinner'], [False, True, True, 'Schumm PLC',\n 'Towne Johnston and Murray', 'OsinskiSchinner'], [True, False, True,\n 'schumm plc', 'towne, johnston and murray', 'osinski-schinner'], [True,\n True, False, 'schumm plc', 'towne johnston and murray',\n 'ösinskischinner'], [True, True, True, 'schumm plc',\n 'towne johnston and murray', 'osinskischinner']])\n", (7020, 7814), False, 'import pytest\n'), ((8705, 9040), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""low_memory, ngrams, result_1, result_2, result_3"""', '[[1, (5, 6), 0.02579, 0.00781, 0.01738], [6, (2, 3), 0.009695, 0.01022, \n 0.0112], [8, (1, 2), 0.027087, 0.02765, 0.0291], [0, (5, 6), 0.02579, \n 0.00781, 0.01738], [0, (2, 3), 0.009695, 0.01022, 0.0112], [0, (1, 2), \n 0.027087, 0.02765, 0.0291]]'], {}), "('low_memory, ngrams, result_1, result_2, result_3',\n [[1, (5, 6), 0.02579, 0.00781, 0.01738], [6, (2, 3), 0.009695, 0.01022,\n 0.0112], [8, (1, 2), 0.027087, 0.02765, 0.0291], [0, (5, 6), 0.02579, \n 0.00781, 0.01738], [0, (2, 3), 0.009695, 0.01022, 0.0112], [0, (1, 2), \n 0.027087, 0.02765, 0.0291]])\n", (8728, 9040), False, 'import pytest\n'), ((9808, 12548), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""to_be_matched, possible_matches, metrics, result"""', "[('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], \n 2), ('De Nederlandsche Bank', ['Nederlandsche Bank',\n 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'],\n ['weighted_jaccard', 'discounted_levenshtein'], 5), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein', 'iterative_sub_string'], 7), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 6), ('De Nederlandsche Bank', [\n 'Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank',\n 'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 11),\n ('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'],\n 2), ('De Nederlandsche Bank', ['Nederlandsche Bank',\n 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'\n ], ['weighted_jaccard', 'discounted_levenshtein'], 4), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein', 'iterative_sub_string'], 6), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 6), ('De Nederlandsche Bank', [\n 'Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank',\n 'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 6), (\n 'Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),\n ('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein'], 4), ('Schumm PLC', ['Torphy-Corkery',\n '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'], [\n 'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], \n 6), ('Schumm PLC', ['Torphy-Corkery', '<NAME> and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 8), ('Schumm PLC', [\n 'Torphy-Corkery', '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'\n ], ['weighted_jaccard', 'overlap', 'bag'], 8)]"], {}), "('to_be_matched, possible_matches, metrics, result',\n [('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], \n 2), ('De Nederlandsche Bank', ['Nederlandsche Bank',\n 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'],\n ['weighted_jaccard', 'discounted_levenshtein'], 5), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein', 'iterative_sub_string'], 7), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 6), ('De Nederlandsche Bank', [\n 'Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank',\n 'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 11),\n ('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'],\n 2), ('De Nederlandsche Bank', ['Nederlandsche Bank',\n 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'\n ], ['weighted_jaccard', 'discounted_levenshtein'], 4), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein', 'iterative_sub_string'], 6), (\n 'De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',\n 'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 6), ('De Nederlandsche Bank', [\n 'Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank',\n 'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 6), (\n 'Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),\n ('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'discounted_levenshtein'], 4), ('Schumm PLC', ['Torphy-Corkery',\n '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'], [\n 'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], \n 6), ('Schumm PLC', ['Torphy-Corkery', '<NAME> and Tillman',\n 'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard',\n 'overlap', 'iterative_sub_string'], 8), ('Schumm PLC', [\n 'Torphy-Corkery', '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'\n ], ['weighted_jaccard', 'overlap', 'bag'], 8)])\n", (9831, 12548), False, 'import pytest\n'), ((19203, 19427), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""top_n, low_memory, result_1, result_2"""', '[(10, 0, 1518, 144), (50, 0, 1992, 9), (100, 0, 1999, 6), (1, 0, 44, 144),\n (10, 8, 1518, 144), (50, 8, 1992, 9), (100, 8, 1999, 6), (1, 8, 44, 144)]'], {}), "('top_n, low_memory, result_1, result_2', [(10, 0, \n 1518, 144), (50, 0, 1992, 9), (100, 0, 1999, 6), (1, 0, 44, 144), (10, \n 8, 1518, 144), (50, 8, 1992, 9), (100, 8, 1999, 6), (1, 8, 44, 144)])\n", (19226, 19427), False, 'import pytest\n'), ((22674, 22723), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""verbose"""', '[True, False]'], {}), "('verbose', [True, False])\n", (22697, 22723), False, 'import pytest\n'), ((23200, 23478), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""word, occurence_count, result"""', "[['fun snail pool', 2, 'snail'], ['fun snail pool', 3, 'fun snail'], [\n 'fun snail pool', 1, ''], ['fun small pool', 3, 'fun small pool'], [\n 'fun snail', 3, 'fun snail'], ['fun small pool', 5, 'fun small pool']]"], {}), "('word, occurence_count, result', [['fun snail pool',\n 2, 'snail'], ['fun snail pool', 3, 'fun snail'], ['fun snail pool', 1,\n ''], ['fun small pool', 3, 'fun small pool'], ['fun snail', 3,\n 'fun snail'], ['fun small pool', 5, 'fun small pool']])\n", (23223, 23478), False, 'import pytest\n'), ((23901, 24319), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""match, num_of_matches, result"""', "[[{'match_name_1': 'fun', 'match_name_2': 'dog', 'match_name_0': 'cat'}, 3,\n ['cat', 'fun', 'dog']], [{'match_name_1': 'fun', 'match_name_2': 'dog',\n 'match_name_0': 'cat'}, 2, ['cat', 'fun']], [{'match_name_1': 'fun',\n 'match_name_0': 'cat'}, 2, ['cat', 'fun']], [{'match_name_1': 'fun',\n 'match_name_2': 'dog', 'match_name_0': 'cat'}, 0, []]]"], {}), "('match, num_of_matches, result', [[{'match_name_1':\n 'fun', 'match_name_2': 'dog', 'match_name_0': 'cat'}, 3, ['cat', 'fun',\n 'dog']], [{'match_name_1': 'fun', 'match_name_2': 'dog', 'match_name_0':\n 'cat'}, 2, ['cat', 'fun']], [{'match_name_1': 'fun', 'match_name_0':\n 'cat'}, 2, ['cat', 'fun']], [{'match_name_1': 'fun', 'match_name_2':\n 'dog', 'match_name_0': 'cat'}, 0, []]])\n", (23924, 24319), False, 'import pytest\n'), ((24708, 25183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preprocess_punctuations, output, input, x"""', "[[True, '_blame_', {'test': ['fun...', 'done'], 'num': ['_.blame._']}, 2],\n [True, 'done', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._']}, 1\n ], [True, 'fun', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._']},\n 0], [False, 'fun. . .', {'test': ['fun. . . ', 'done'], 'num': [\n '_.blame._']}, 0], [False, 'fun. . .', {'num': ['_.blame._'], 'test': [\n 'fun. . . ', 'done']}, 1]]"], {}), "('preprocess_punctuations, output, input, x', [[True,\n '_blame_', {'test': ['fun...', 'done'], 'num': ['_.blame._']}, 2], [\n True, 'done', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._']}, 1],\n [True, 'fun', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._']}, 0],\n [False, 'fun. . .', {'test': ['fun. . . ', 'done'], 'num': ['_.blame._'\n ]}, 0], [False, 'fun. . .', {'num': ['_.blame._'], 'test': ['fun. . . ',\n 'done']}, 1]])\n", (24731, 25183), False, 'import pytest\n'), ((26401, 27187), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""string, stringlist, result_1, result_2, y"""', "[['know sign first', ['know', 'know sign', 'know sign first'], 'know first',\n 'know first', 2], ['know sign first', ['know', 'know sign',\n 'know sign first'], 'know first', 'know', 1], ['know sign first', [\n 'know', 'know sign', 'know sign first'], 'know first', 'know', 0], [\n 'know first', ['know', 'know', 'know'], 'know first', 'know', 1], [\n 'pool sign small', ['sign small', 'small pool sign', 'small'], '', '', \n 0], ['pool sign small know', ['sign small', 'small pool sign', 'small'],\n 'know', '', 0], ['know pool sign small', ['sign small',\n 'small pool sign', 'small'], 'know', '', 0], ['pool sign small', [\n 'sign small', 'small pool know sign', 'small'], '', 'know', 1]]"], {}), "('string, stringlist, result_1, result_2, y', [[\n 'know sign first', ['know', 'know sign', 'know sign first'],\n 'know first', 'know first', 2], ['know sign first', ['know',\n 'know sign', 'know sign first'], 'know first', 'know', 1], [\n 'know sign first', ['know', 'know sign', 'know sign first'],\n 'know first', 'know', 0], ['know first', ['know', 'know', 'know'],\n 'know first', 'know', 1], ['pool sign small', ['sign small',\n 'small pool sign', 'small'], '', '', 0], ['pool sign small know', [\n 'sign small', 'small pool sign', 'small'], 'know', '', 0], [\n 'know pool sign small', ['sign small', 'small pool sign', 'small'],\n 'know', '', 0], ['pool sign small', ['sign small',\n 'small pool know sign', 'small'], '', 'know', 1]])\n", (26424, 27187), False, 'import pytest\n'), ((483, 499), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (497, 499), True, 'import name_matching.name_matcher as nm\n'), ((4167, 4195), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '(**kwargs_str)\n', (4181, 4195), True, 'import name_matching.name_matcher as nm\n'), ((5834, 5850), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (5848, 5850), True, 'import name_matching.name_matcher as nm\n'), ((6067, 6143), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['name_matcher._df_matching_data', 'adjusted_name'], {}), '(name_matcher._df_matching_data, adjusted_name)\n', (6096, 6143), True, 'import pandas as pd\n'), ((9359, 9428), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'lowercase': '(False)', 'analyzer': '"""char"""', 'ngram_range': 'ngrams'}), "(lowercase=False, analyzer='char', ngram_range=ngrams)\n", (9374, 9428), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((13365, 13381), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (13379, 13381), True, 'import name_matching.name_matcher as nm\n'), ((16040, 16056), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (16054, 16056), True, 'import name_matching.name_matcher as nm\n'), ((19080, 19096), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (19094, 19096), True, 'import name_matching.name_matcher as nm\n'), ((22264, 22299), 'numpy.any', 'np.any', (["(result['match_index'] == 44)"], {}), "(result['match_index'] == 44)\n", (22270, 22299), True, 'import numpy as np\n'), ((22453, 22488), 'numpy.any', 'np.any', (["(result['match_index'] == 44)"], {}), "(result['match_index'] == 44)\n", (22459, 22488), True, 'import numpy as np\n'), ((22556, 22572), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (22570, 22572), True, 'import name_matching.name_matcher as nm\n'), ((23754, 23770), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (23768, 23770), True, 'import name_matching.name_matcher as nm\n'), ((24569, 24617), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {'number_of_matches': 'num_of_matches'}), '(number_of_matches=num_of_matches)\n', (24583, 24617), True, 'import name_matching.name_matcher as nm\n'), ((25563, 25615), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {'punctuations': 'preprocess_punctuations'}), '(punctuations=preprocess_punctuations)\n', (25577, 25615), True, 'import name_matching.name_matcher as nm\n'), ((26264, 26309), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {'number_of_matches': 'num_matches'}), '(number_of_matches=num_matches)\n', (26278, 26309), True, 'import name_matching.name_matcher as nm\n'), ((27824, 27840), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (27838, 27840), True, 'import name_matching.name_matcher as nm\n'), ((29648, 29664), 'name_matching.name_matcher.NameMatcher', 'nm.NameMatcher', ([], {}), '()\n', (29662, 29664), True, 'import name_matching.name_matcher as nm\n'), ((415, 463), 'os.path.join', 'path.join', (['package_dir', '"""test"""', '"""test_names.csv"""'], {}), "(package_dir, 'test', 'test_names.csv')\n", (424, 463), True, 'import os.path as path\n'), ((787, 844), 'os.path.join', 'path.join', (['package_dir', '"""test"""', '"""adjusted_test_names.csv"""'], {}), "(package_dir, 'test', 'adjusted_test_names.csv')\n", (796, 844), True, 'import os.path as path\n'), ((1392, 1416), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1405, 1416), False, 'import pytest\n'), ((9578, 9608), 'pytest.approx', 'pytest.approx', (['result_1', '(0.001)'], {}), '(result_1, 0.001)\n', (9591, 9608), False, 'import pytest\n'), ((9671, 9701), 'pytest.approx', 'pytest.approx', (['result_2', '(0.001)'], {}), '(result_2, 0.001)\n', (9684, 9701), False, 'import pytest\n'), ((9765, 9795), 'pytest.approx', 'pytest.approx', (['result_3', '(0.001)'], {}), '(result_3, 0.001)\n', (9778, 9795), False, 'import pytest\n'), ((16242, 16291), 'numpy.min', 'np.min', (['[number_of_matches, match_score.shape[0]]'], {}), '([number_of_matches, match_score.shape[0]])\n', (16248, 16291), True, 'import numpy as np\n'), ((17864, 17893), 'pytest.approx', 'pytest.approx', (['result', '(0.0001)'], {}), '(result, 0.0001)\n', (17877, 17893), False, 'import pytest\n'), ((19106, 19133), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (19119, 19133), False, 'import pytest\n'), ((20050, 20072), 'numpy.max', 'np.max', (['possible_match'], {}), '(possible_match)\n', (20056, 20072), True, 'import numpy as np\n'), ((20169, 20198), 'numpy.max', 'np.max', (['possible_match[44, :]'], {}), '(possible_match[44, :])\n', (20175, 20198), True, 'import numpy as np\n'), ((20222, 20252), 'numpy.min', 'np.min', (['possible_match[144, :]'], {}), '(possible_match[144, :])\n', (20228, 20252), True, 'import numpy as np\n'), ((21676, 21707), 'pytest.approx', 'pytest.approx', (['result_0', '(0.0001)'], {}), '(result_0, 0.0001)\n', (21689, 21707), False, 'import pytest\n'), ((21739, 21770), 'pytest.approx', 'pytest.approx', (['result_1', '(0.0001)'], {}), '(result_1, 0.0001)\n', (21752, 21770), False, 'import pytest\n'), ((22018, 22063), 'numpy.sum', 'np.sum', (["(result['match_index'] == result.index)"], {}), "(result['match_index'] == result.index)\n", (22024, 22063), True, 'import numpy as np\n'), ((22582, 22607), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22595, 22607), False, 'import pytest\n'), ((24662, 24678), 'pandas.Series', 'pd.Series', (['match'], {}), '(match)\n', (24671, 24678), True, 'import pandas as pd\n'), ((1549, 1560), 'abydos.distance.Indel', 'abd.Indel', ([], {}), '()\n', (1558, 1560), True, 'import abydos.distance as abd\n'), ((1616, 1643), 'abydos.distance.DiscountedLevenshtein', 'abd.DiscountedLevenshtein', ([], {}), '()\n', (1641, 1643), True, 'import abydos.distance as abd\n'), ((1682, 1693), 'abydos.distance.Tichy', 'abd.Tichy', ([], {}), '()\n', (1691, 1693), True, 'import abydos.distance as abd\n'), ((1737, 1752), 'abydos.distance.CormodeLZ', 'abd.CormodeLZ', ([], {}), '()\n', (1750, 1752), True, 'import abydos.distance as abd\n'), ((1806, 1830), 'abydos.distance.IterativeSubString', 'abd.IterativeSubString', ([], {}), '()\n', (1828, 1830), True, 'import abydos.distance as abd\n'), ((1876, 1893), 'abydos.distance.BaulieuXIII', 'abd.BaulieuXIII', ([], {}), '()\n', (1891, 1893), True, 'import abydos.distance as abd\n'), ((1934, 1947), 'abydos.distance.Clement', 'abd.Clement', ([], {}), '()\n', (1945, 1947), True, 'import abydos.distance as abd\n'), ((1997, 2018), 'abydos.distance.DiceAsymmetricI', 'abd.DiceAsymmetricI', ([], {}), '()\n', (2016, 2018), True, 'import abydos.distance as abd\n'), ((2061, 2075), 'abydos.distance.KuhnsIII', 'abd.KuhnsIII', ([], {}), '()\n', (2073, 2075), True, 'import abydos.distance as abd\n'), ((2116, 2129), 'abydos.distance.Overlap', 'abd.Overlap', ([], {}), '()\n', (2127, 2129), True, 'import abydos.distance as abd\n'), ((2173, 2188), 'abydos.distance.PearsonII', 'abd.PearsonII', ([], {}), '()\n', (2186, 2188), True, 'import abydos.distance as abd\n'), ((2238, 2259), 'abydos.distance.WeightedJaccard', 'abd.WeightedJaccard', ([], {}), '()\n', (2257, 2259), True, 'import abydos.distance as abd\n'), ((2303, 2318), 'abydos.distance.WarrensIV', 'abd.WarrensIV', ([], {}), '()\n', (2316, 2318), True, 'import abydos.distance as abd\n'), ((2355, 2364), 'abydos.distance.Bag', 'abd.Bag', ([], {}), '()\n', (2362, 2364), True, 'import abydos.distance as abd\n'), ((2405, 2417), 'abydos.distance.RougeL', 'abd.RougeL', ([], {}), '()\n', (2415, 2417), True, 'import abydos.distance as abd\n'), ((2469, 2492), 'abydos.distance.RatcliffObershelp', 'abd.RatcliffObershelp', ([], {}), '()\n', (2490, 2492), True, 'import abydos.distance as abd\n'), ((2533, 2545), 'abydos.distance.NCDbz2', 'abd.NCDbz2', ([], {}), '()\n', (2543, 2545), True, 'import abydos.distance as abd\n'), ((2635, 2664), 'abydos.distance.FuzzyWuzzyPartialString', 'abd.FuzzyWuzzyPartialString', ([], {}), '()\n', (2662, 2664), True, 'import abydos.distance as abd\n'), ((2720, 2745), 'abydos.distance.FuzzyWuzzyTokenSort', 'abd.FuzzyWuzzyTokenSort', ([], {}), '()\n', (2743, 2745), True, 'import abydos.distance as abd\n'), ((2800, 2824), 'abydos.distance.FuzzyWuzzyTokenSet', 'abd.FuzzyWuzzyTokenSet', ([], {}), '()\n', (2822, 2824), True, 'import abydos.distance as abd\n'), ((2864, 2876), 'abydos.distance.Editex', 'abd.Editex', ([], {}), '()\n', (2874, 2876), True, 'import abydos.distance as abd\n'), ((2914, 2924), 'abydos.distance.Typo', 'abd.Typo', ([], {}), '()\n', (2922, 2924), True, 'import abydos.distance as abd\n'), ((2963, 2973), 'abydos.distance.LIG3', 'abd.LIG3', ([], {}), '()\n', (2971, 2973), True, 'import abydos.distance as abd\n'), ((3010, 3019), 'abydos.distance.SSK', 'abd.SSK', ([], {}), '()\n', (3017, 3019), True, 'import abydos.distance as abd\n'), ((13635, 13672), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1]])\n', (13643, 13672), True, 'import numpy as np\n'), ((13732, 13796), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]])\n', (13740, 13796), True, 'import numpy as np\n'), ((13913, 14007), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, \n 0.3, 0.2, 0.1]])\n', (13921, 14007), True, 'import numpy as np\n'), ((14146, 14240), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, \n 0.3, 0.2, 0.1]])\n', (14154, 14240), True, 'import numpy as np\n'), ((14333, 14397), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]])\n', (14341, 14397), True, 'import numpy as np\n'), ((14486, 14580), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, \n 0.3, 0.2, 0.1]])\n', (14494, 14580), True, 'import numpy as np\n'), ((14698, 14792), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]'], {}), '([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, \n 0.3, 0.2, 0.1]])\n', (14706, 14792), True, 'import numpy as np\n'), ((14896, 14933), 'numpy.array', 'np.array', (['[[0.3, 0.3, 0.8, 0.2, 0.2]]'], {}), '([[0.3, 0.3, 0.8, 0.2, 0.2]])\n', (14904, 14933), True, 'import numpy as np\n'), ((15021, 15085), 'numpy.array', 'np.array', (['[[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]]'], {}), '([[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]])\n', (15029, 15085), True, 'import numpy as np\n'), ((15202, 15268), 'numpy.array', 'np.array', (['[[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]]'], {}), '([[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]])\n', (15210, 15268), True, 'import numpy as np\n'), ((15383, 15449), 'numpy.array', 'np.array', (['[[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]]'], {}), '([[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]])\n', (15391, 15449), True, 'import numpy as np\n'), ((15552, 15594), 'numpy.array', 'np.array', (['[[-0.5, -0.8, -0.3, -0.7, 0, 2]]'], {}), '([[-0.5, -0.8, -0.3, -0.7, 0, 2]])\n', (15560, 15594), True, 'import numpy as np\n'), ((15672, 15713), 'numpy.array', 'np.array', (['[[10, 8, 7, 6, 12, 15, 14, 88]]'], {}), '([[10, 8, 7, 6, 12, 15, 14, 88]])\n', (15680, 15713), True, 'import numpy as np\n'), ((15801, 15833), 'numpy.array', 'np.array', (['[[1, 0.3], [0.1, 0.4]]'], {}), '([[1, 0.3], [0.1, 0.4]])\n', (15809, 15833), True, 'import numpy as np\n'), ((16565, 16697), 'pandas.Series', 'pd.Series', (["['Nederandsche', 0, 2, 'De Nederlandsche Bank']"], {'index': "['match_name_0', 'score_0', 'match_index_0', 'original_name']"}), "(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=[\n 'match_name_0', 'score_0', 'match_index_0', 'original_name'])\n", (16574, 16697), True, 'import pandas as pd\n'), ((16763, 16895), 'pandas.Series', 'pd.Series', (["['Nederandsche', 0, 2, 'De Nederlandsche Bank']"], {'index': "['match_name_0', 'score_0', 'match_index_0', 'original_name']"}), "(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=[\n 'match_name_0', 'score_0', 'match_index_0', 'original_name'])\n", (16772, 16895), True, 'import pandas as pd\n'), ((16996, 17207), 'pandas.Series', 'pd.Series', (["['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank']"], {'index': "['match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1',\n 'match_index_1', 'original_name']"}), "(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3,\n 'De Nederlandsche Bank'], index=['match_name_0', 'score_0',\n 'match_index_0', 'match_name_1', 'score_1', 'match_index_1',\n 'original_name'])\n", (17005, 17207), True, 'import pandas as pd\n'), ((17294, 17505), 'pandas.Series', 'pd.Series', (["['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank']"], {'index': "['match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1',\n 'match_index_1', 'original_name']"}), "(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3,\n 'De Nederlandsche Bank'], index=['match_name_0', 'score_0',\n 'match_index_0', 'match_name_1', 'score_1', 'match_index_1',\n 'original_name'])\n", (17303, 17505), True, 'import pandas as pd\n'), ((20412, 20447), 'numpy.array', 'np.array', (['[29, 343, 727, 855, 1702]'], {}), '([29, 343, 727, 855, 1702])\n', (20420, 20447), True, 'import numpy as np\n'), ((20449, 20504), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (20458, 20504), True, 'import pandas as pd\n'), ((20589, 20613), 'numpy.array', 'np.array', (['[29, 343, 727]'], {}), '([29, 343, 727])\n', (20597, 20613), True, 'import numpy as np\n'), ((20617, 20672), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (20626, 20672), True, 'import pandas as pd\n'), ((20756, 20775), 'numpy.array', 'np.array', (['[29, 343]'], {}), '([29, 343])\n', (20764, 20775), True, 'import numpy as np\n'), ((20777, 20832), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (20786, 20832), True, 'import pandas as pd\n'), ((20916, 20945), 'numpy.array', 'np.array', (['[[29, 343], [0, 0]]'], {}), '([[29, 343], [0, 0]])\n', (20924, 20945), True, 'import numpy as np\n'), ((20947, 21002), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (20956, 21002), True, 'import pandas as pd\n'), ((21086, 21121), 'numpy.array', 'np.array', (['[29, 343, 727, 855, 1702]'], {}), '([29, 343, 727, 855, 1702])\n', (21094, 21121), True, 'import numpy as np\n'), ((21123, 21178), 'pandas.Series', 'pd.Series', (["['Company and Sons']"], {'index': "['company_name']"}), "(['Company and Sons'], index=['company_name'])\n", (21132, 21178), True, 'import pandas as pd\n'), ((23705, 23721), 'pandas.Series', 'pd.Series', (['words'], {}), '(words)\n', (23714, 23721), True, 'import pandas as pd\n'), ((25812, 25855), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1], [0, 0, 0]]'], {}), '([[1, 1, 1], [1, 1, 1], [0, 0, 0]])\n', (25820, 25855), True, 'import numpy as np\n'), ((25857, 25879), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (25866, 25879), True, 'import pandas as pd\n'), ((25920, 25958), 'numpy.array', 'np.array', (['[[1, 1], [0.4, 0.4], [0, 0]]'], {}), '([[1, 1], [0.4, 0.4], [0, 0]])\n', (25928, 25958), True, 'import numpy as np\n'), ((25987, 26009), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (25996, 26009), True, 'import pandas as pd\n'), ((26052, 26086), 'numpy.array', 'np.array', (['[[1, 1], [1, 1], [0, 0]]'], {}), '([[1, 1], [1, 1], [0, 0]])\n', (26060, 26086), True, 'import numpy as np\n'), ((26118, 26140), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (26127, 26140), True, 'import pandas as pd\n'), ((366, 388), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (378, 388), True, 'import os.path as path\n'), ((738, 760), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (750, 760), True, 'import os.path as path\n'), ((3100, 3133), 'abydos.phonetic.RefinedSoundex', 'abp.RefinedSoundex', ([], {'max_length': '(30)'}), '(max_length=30)\n', (3118, 3133), True, 'import abydos.phonetic as abp\n'), ((3173, 3190), 'abydos.distance.Levenshtein', 'abd.Levenshtein', ([], {}), '()\n', (3188, 3190), True, 'import abydos.distance as abd\n'), ((3292, 3326), 'abydos.phonetic.DoubleMetaphone', 'abp.DoubleMetaphone', ([], {'max_length': '(30)'}), '(max_length=30)\n', (3311, 3326), True, 'import abydos.phonetic as abp\n'), ((3335, 3352), 'abydos.distance.Levenshtein', 'abd.Levenshtein', ([], {}), '()\n', (3350, 3352), True, 'import abydos.distance as abd\n')] |
import numpy as np
import math
import ROOT
import sys
class DistrReader:
def __init__(self, dataset):
self.stat_error = 0
self.sys_error = 0
self.plambda = 0
self.dataset = str(dataset)
self.hist = ROOT.TH1D('','', 100, -0.2, 0.2)
self.distr = ROOT.TH1D('','', 64, 0, 64)
self.CalcLambda()
def GetStatError(self):
return self.stat_error
def GetSysError(self):
return self.sys_error
def GetLambda(self):
return self.plambda
def Reset(self):
self.stat_error = 0
self.sys_error = 0
self.plambda = 0
self.dataset = ''
def CalcLambda(self):
for asic in range(4):
for channel in range(16):
hfile = ROOT.TFile("%s/hist_as%d_ch%d.root" %(self.dataset, asic, channel))
self.hNoise = hfile.Get('noise')
self.hSignal = hfile.Get('signal')
self.hNoise.SetDirectory(0)
self.hSignal.SetDirectory(0)
hfile.Close()
hist_s = self.hSignal.Clone()
hist_n = self.hNoise.Clone()
hist_s.GetXaxis().SetRangeUser(-40, 100) # 0pe position
p0 = hist_s.GetMaximumBin()
hist_s.GetXaxis().SetRangeUser(120, 250) # 1pe position
p1 = hist_s.GetMaximumBin()
thrsh = int((p0+p1)/1.9)
del hist_s
del hist_n
hist_s = self.hSignal
hist_n = self.hNoise
N0_s = hist_s.Integral(1, thrsh)
N0_su = hist_s.Integral(1, hist_s.FindBin(hist_s.GetXaxis().GetBinCenter(thrsh) + 30))
N0_sl = hist_s.Integral(1, hist_s.FindBin(hist_s.GetXaxis().GetBinCenter(thrsh) - 30))
N0_n = hist_n.Integral(1, thrsh)
N0_nu = hist_n.Integral(1, hist_n.FindBin(hist_n.GetXaxis().GetBinCenter(thrsh) + 30))
N0_nl = hist_n.Integral(1, hist_n.FindBin(hist_n.GetXaxis().GetBinCenter(thrsh) - 30))
N_s = hist_s.Integral() + hist_s.GetBinContent(hist_s.GetNbinsX() + 1)
N_n = hist_n.Integral() + hist_n.GetBinContent(hist_n.GetNbinsX() + 1)
P0_s = N0_s / N_s
P0_su = N0_su / N_s
P0_sl = N0_sl / N_s
P0_n = N0_n / N_n
P0_nu = N0_nu / N_n
P0_nl = N0_nl / N_n
err_s_stat = np.sqrt(N_s * (1 - P0_s) * P0_s) / N0_s
err_n_stat = np.sqrt(N_n * (1 - P0_n) * P0_n) / N0_n
err_s_sys = ROOT.TMath.Log(P0_sl) - ROOT.TMath.Log(P0_su)
err_n_sys = ROOT.TMath.Log(P0_nl) - ROOT.TMath.Log(P0_nu)
err_tot_sys = np.sqrt(np.power(err_s_sys, 2) + np.power(err_n_sys, 2))
err_tot_stat = np.sqrt(np.power(err_s_stat, 2) + np.power(err_n_stat, 2))
self.sys_error += np.power(err_tot_sys, 2)
self.stat_error += np.power(err_tot_stat, 2)
Plambda = - (ROOT.TMath.Log(P0_s) - ROOT.TMath.Log(P0_n))
self.plambda += Plambda
self.hist.Fill(Plambda)
self.distr.Fill(asic * 16 + channel, Plambda)
hist_s.Delete()
hist_n.Delete()
self.stat_error = np.sqrt(self.GetStatError())
self.sys_error = np.sqrt(self.GetSysError())
def GetLambdaHist(self):
return self.hist
def GetLambdaDistr(self):
return self.distr
# #
# PEd = PEdistr('/Volumes/Untitled/zenin/linearity_465/linearity_465_sipm/hists/3500_4_465')
#
# total = PEd.GetLambda()
# stat_err = PEd.GetStatError()
# sys_err = PEd.GetSysError()
#
# print('total lambda = %f \u00B1 %f stat \u00B1 %f sys'%(total, stat_err, sys_err))
# print('relative uncertainty = %f%% stat + %f%% sys'%(stat_err/total*100, sys_err/total*100))
#
# h = PEd.GetLambdaDistr().Clone()
# print(h.GetBinContent(9))
# h.Draw()
| [
"numpy.sqrt",
"ROOT.TH1D",
"numpy.power",
"ROOT.TMath.Log",
"ROOT.TFile"
] | [((243, 276), 'ROOT.TH1D', 'ROOT.TH1D', (['""""""', '""""""', '(100)', '(-0.2)', '(0.2)'], {}), "('', '', 100, -0.2, 0.2)\n", (252, 276), False, 'import ROOT\n'), ((297, 325), 'ROOT.TH1D', 'ROOT.TH1D', (['""""""', '""""""', '(64)', '(0)', '(64)'], {}), "('', '', 64, 0, 64)\n", (306, 325), False, 'import ROOT\n'), ((814, 882), 'ROOT.TFile', 'ROOT.TFile', (["('%s/hist_as%d_ch%d.root' % (self.dataset, asic, channel))"], {}), "('%s/hist_as%d_ch%d.root' % (self.dataset, asic, channel))\n", (824, 882), False, 'import ROOT\n'), ((3065, 3089), 'numpy.power', 'np.power', (['err_tot_sys', '(2)'], {}), '(err_tot_sys, 2)\n', (3073, 3089), True, 'import numpy as np\n'), ((3126, 3151), 'numpy.power', 'np.power', (['err_tot_stat', '(2)'], {}), '(err_tot_stat, 2)\n', (3134, 3151), True, 'import numpy as np\n'), ((2594, 2626), 'numpy.sqrt', 'np.sqrt', (['(N_s * (1 - P0_s) * P0_s)'], {}), '(N_s * (1 - P0_s) * P0_s)\n', (2601, 2626), True, 'import numpy as np\n'), ((2663, 2695), 'numpy.sqrt', 'np.sqrt', (['(N_n * (1 - P0_n) * P0_n)'], {}), '(N_n * (1 - P0_n) * P0_n)\n', (2670, 2695), True, 'import numpy as np\n'), ((2732, 2753), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_sl'], {}), '(P0_sl)\n', (2746, 2753), False, 'import ROOT\n'), ((2756, 2777), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_su'], {}), '(P0_su)\n', (2770, 2777), False, 'import ROOT\n'), ((2806, 2827), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_nl'], {}), '(P0_nl)\n', (2820, 2827), False, 'import ROOT\n'), ((2830, 2851), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_nu'], {}), '(P0_nu)\n', (2844, 2851), False, 'import ROOT\n'), ((2891, 2913), 'numpy.power', 'np.power', (['err_s_sys', '(2)'], {}), '(err_s_sys, 2)\n', (2899, 2913), True, 'import numpy as np\n'), ((2916, 2938), 'numpy.power', 'np.power', (['err_n_sys', '(2)'], {}), '(err_n_sys, 2)\n', (2924, 2938), True, 'import numpy as np\n'), ((2979, 3002), 'numpy.power', 'np.power', (['err_s_stat', '(2)'], {}), '(err_s_stat, 2)\n', (2987, 3002), True, 'import numpy as np\n'), ((3005, 3028), 'numpy.power', 'np.power', (['err_n_stat', '(2)'], {}), '(err_n_stat, 2)\n', (3013, 3028), True, 'import numpy as np\n'), ((3186, 3206), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_s'], {}), '(P0_s)\n', (3200, 3206), False, 'import ROOT\n'), ((3209, 3229), 'ROOT.TMath.Log', 'ROOT.TMath.Log', (['P0_n'], {}), '(P0_n)\n', (3223, 3229), False, 'import ROOT\n')] |
"""
Bad Apps Blog
Author: <NAME> (a.k.a. 7UR7L3)
(Initial commit is based on the official Flask tutorial)
About: This app began as an (essentially) exact copy
of the official Flask tutorial (linke below). It is
intented as an opportunity to practice application
security, secure design, and secure coding techniques.
At the end of the Flask tutorial, the interested student
is challenged to implement several features. In order to
achive that goal, we will attempt to implement those features
while "pushing left" (security-wise) in the process.
Official Flask tutorial : https://flask.palletsprojects.com/en/2.0.x/tutorial/
"""
import os
import secrets
from flask import Flask
import logging
logging.basicConfig(level=logging.INFO,format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
APP_VERSION = '0.0.1',
DB_VERSION = '0.0.1',
DATABASE=os.path.join(app.instance_path, 'bad_apps_blog.sqlite'),
CSRF_TOKEN_AGE = 3600 # seconds
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
app.logger.info('loading configuraion from config.py in instance folder')
else:
# load the test config if passed in
test_config['SECRET_KEY'] = secrets.token_hex(32)
test_config['CSRF_TOKEN_AGE'] = 2
app.config.from_mapping(test_config)
app.logger.info('generating test configuration')
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
app.logger.info('created instance folder')
except OSError as e:
app.logger.info('instance folder already exists')
# register the config generator with the current app instance
from . import gen_config
gen_config.init_app(app)
# register the DBs with the current app instance
from . import db
db.init_app(app)
# register the authorization blueprint
from . import auth
app.register_blueprint(auth.bp)
# register the blog blueprint
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app
| [
"logging.basicConfig",
"secrets.token_hex",
"os.makedirs",
"flask.Flask",
"os.path.join"
] | [((703, 821), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')\n", (722, 821), False, 'import logging\n'), ((896, 942), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (901, 942), False, 'from flask import Flask\n'), ((1478, 1499), 'secrets.token_hex', 'secrets.token_hex', (['(32)'], {}), '(32)\n', (1495, 1499), False, 'import secrets\n'), ((1703, 1733), 'os.makedirs', 'os.makedirs', (['app.instance_path'], {}), '(app.instance_path)\n', (1714, 1733), False, 'import os\n'), ((1050, 1105), 'os.path.join', 'os.path.join', (['app.instance_path', '"""bad_apps_blog.sqlite"""'], {}), "(app.instance_path, 'bad_apps_blog.sqlite')\n", (1062, 1105), False, 'import os\n')] |
from django.conf.urls import patterns, url
from status import views
urlpatterns = patterns('',
url(r'^ups$', views.ups_status, name='ups_status'),
url(r'^tor$', views.tor_status, name='tor_status'),
)
| [
"django.conf.urls.url"
] | [((97, 146), 'django.conf.urls.url', 'url', (['"""^ups$"""', 'views.ups_status'], {'name': '"""ups_status"""'}), "('^ups$', views.ups_status, name='ups_status')\n", (100, 146), False, 'from django.conf.urls import patterns, url\n'), ((150, 199), 'django.conf.urls.url', 'url', (['"""^tor$"""', 'views.tor_status'], {'name': '"""tor_status"""'}), "('^tor$', views.tor_status, name='tor_status')\n", (153, 199), False, 'from django.conf.urls import patterns, url\n')] |
import os
import json
from common import update_json_file, get_logger, exec_cmd
from yamlparser import Parser
from pathlib import Path
logger = get_logger("update-image")
# Functions that work to update gluu_versions.json
def determine_final_official_and_dev_version(tag_list):
"""
Determine official version i.e 4.1.0 , 4.2.2..etc using oxauths repo
@param tag_list:
@return:
"""
# Check for the highest major.minor.patch i.e 4.2.0 vs 4.2.2
dev_image = ""
patch_list = []
for tag in tag_list:
patch_list.append(int(tag[4:5]))
# Remove duplicates
patch_list = list(set(patch_list))
# Sort
patch_list.sort()
highest_major_minor_patch_number = str(patch_list[-1])
versions_list = []
for tag in tag_list:
if "dev" in tag and tag[4:5] == highest_major_minor_patch_number:
dev_image = tag[0:5] + "_dev"
# Exclude any tag with the following
if "dev" not in tag and "a" not in tag and tag[4:5] == highest_major_minor_patch_number:
versions_list.append(int(tag[6:8]))
# A case were only a dev version of a new patch is available then a lower stable patch should be checked.
# i.e there is no 4.3.0_01 but there is 4.2.2_dev
if not versions_list:
highest_major_minor_patch_number = str(int(highest_major_minor_patch_number) - 1)
for tag in tag_list:
if not dev_image and "dev" in tag and tag[4:5] == highest_major_minor_patch_number:
dev_image = tag[0:5] + "_dev"
# Exclude any tag with the following
if "dev" not in tag and "a" not in tag and tag[4:5] == highest_major_minor_patch_number:
versions_list.append(int(tag[6:8]))
# Remove duplicates
versions_list = list(set(versions_list))
# Sort
versions_list.sort()
# Return highest patch
highest_major_minor_patch_image_patch = str(versions_list[-1])
if len(highest_major_minor_patch_image_patch) == 1:
highest_major_minor_patch_image_patch = "0" + highest_major_minor_patch_image_patch
highest_major_minor_patch_image = ""
for tag in tag_list:
if "dev" not in tag and highest_major_minor_patch_image_patch in tag \
and tag[4:5] == highest_major_minor_patch_number:
highest_major_minor_patch_image = tag
return highest_major_minor_patch_image, dev_image
def determine_major_version(all_repos_tags):
"""
Determine official major version i.e 4.1 , 4.2..etc using oxauths repo
@param all_repos_tags:
@return:
"""
versions_list = []
for tag in all_repos_tags["oxauth"]:
# Exclude any tag with the following
if "dev" not in tag \
and "latest" not in tag \
and "secret" not in tag \
and "gluu-engine" not in tag:
versions_list.append(float(tag[0:3]))
# Remove duplicates
versions_list = list(set(versions_list))
# Sort
versions_list.sort()
# Return highest version
return versions_list[-1]
def get_docker_repo_tag(org, repo):
"""
Returns a dictionary of all available tags for a certain repo
:param org:
:param repo:
:return:
"""
logger.info("Getting docker tag for repository {}.".format(repo))
exec_get_repo_tag_curl_command = ["curl", "-s",
"https://hub.docker.com/v2/repositories/{}/{}/tags/?page_size=100".format(org,
repo)]
stdout, stderr, retcode = None, None, None
try:
stdout, stderr, retcode = exec_cmd(" ".join(exec_get_repo_tag_curl_command))
except (IndexError, Exception):
manual_curl_command = " ".join(exec_get_repo_tag_curl_command)
logger.error("Failed to curl\n{}".format(manual_curl_command))
all_tags = json.loads(stdout)["results"]
image_tags = []
for tag in all_tags:
image_tags.append(tag["name"])
image_tags_dict = dict()
image_tags_dict[repo] = image_tags
return image_tags_dict
def filter_all_repo_dictionary_tags(all_repos_tags, major_official_version):
"""
Analyze the dictionary containing all repos and keeps only the list of tags and versions matching the major version
@param all_repos_tags:
@param major_official_version:
"""
filtered_all_repos_tags = dict()
for repo, tag_list in all_repos_tags.items():
temp_filtered_tag_list = []
for tag in tag_list:
if major_official_version == tag[0:3]:
temp_filtered_tag_list.append(tag)
filtered_all_repos_tags[repo] = temp_filtered_tag_list
return filtered_all_repos_tags
def analyze_filtered_dict_return_final_dict(filtered_all_repos_tags, major_official_version):
"""
Analyze filtered dictionary and return the final dict with only one official version and one dev version
@param filtered_all_repos_tags:
@param major_official_version:
"""
final_official_version_dict = dict()
final_dev_version_dict = dict()
# Gluus main values.yaml
gluu_values_file = Path("../pygluu/kubernetes/templates/helm/gluu/values.yaml").resolve()
gluu_values_file_parser = Parser(gluu_values_file, True)
dev_version = ""
def update_dicts_and_yamls(name, rep, tags_list, helm_name=None):
final_tag, final_dev_tag = determine_final_official_and_dev_version(tags_list)
final_official_version_dict[name + "_IMAGE_NAME"] = "gluufederation/" + rep
final_dev_version_dict[name + "_IMAGE_NAME"] = "gluufederation/" + rep
final_official_version_dict[name + "_IMAGE_TAG"], final_dev_version_dict[name + "_IMAGE_TAG"] \
= final_tag, final_dev_tag
if rep != "upgrade":
if helm_name:
gluu_values_file_parser[helm_name]["image"]["repository"] = "gluufederation/" + rep
gluu_values_file_parser[helm_name]["image"]["tag"] = final_tag
else:
gluu_values_file_parser[rep]["image"]["repository"] = "gluufederation/" + rep
gluu_values_file_parser[rep]["image"]["tag"] = final_tag
for repo, tag_list in filtered_all_repos_tags.items():
official_version, dev_version = determine_final_official_and_dev_version(tag_list)
if repo == "casa":
update_dicts_and_yamls("CASA", repo, tag_list)
elif repo == "oxd-server":
update_dicts_and_yamls("OXD", repo, tag_list)
elif repo == "fido2":
update_dicts_and_yamls("FIDO2", repo, tag_list)
elif repo == "scim":
update_dicts_and_yamls("SCIM", repo, tag_list)
elif repo == "config-init":
update_dicts_and_yamls("CONFIG", repo, tag_list, "config")
elif repo == "cr-rotate":
update_dicts_and_yamls("CACHE_REFRESH_ROTATE", repo, tag_list)
elif repo == "certmanager":
update_dicts_and_yamls("CERT_MANAGER", repo, tag_list, "oxauth-key-rotation")
elif repo == "opendj":
update_dicts_and_yamls("LDAP", repo, tag_list, "opendj")
elif repo == "jackrabbit":
update_dicts_and_yamls("JACKRABBIT", repo, tag_list)
elif repo == "oxauth":
update_dicts_and_yamls("OXAUTH", repo, tag_list)
elif repo == "oxpassport":
update_dicts_and_yamls("OXPASSPORT", repo, tag_list)
elif repo == "oxshibboleth":
update_dicts_and_yamls("OXSHIBBOLETH", repo, tag_list)
elif repo == "oxtrust":
update_dicts_and_yamls("OXTRUST", repo, tag_list)
elif repo == "persistence":
update_dicts_and_yamls("PERSISTENCE", repo, tag_list)
elif repo == "upgrade":
update_dicts_and_yamls("UPGRADE", repo, tag_list)
gluu_versions_dict = {major_official_version: final_official_version_dict,
dev_version: final_dev_version_dict}
gluu_values_file_parser.dump_it()
return gluu_versions_dict
def main():
all_repos_tags = dict()
org = os.environ.get("ORG_NAME", "gluufederation")
gluu_docker_repositories_names_used_in_cn = ["casa", "fido2", "scim", "config-init",
"cr-rotate", "certmanager", "opendj", "jackrabbit", "oxauth",
"oxd-server", "oxpassport", "oxshibboleth",
"oxtrust", "persistence", "upgrade"]
for repo in gluu_docker_repositories_names_used_in_cn:
all_repos_tags.update(get_docker_repo_tag(org, repo))
major_official_version = str(determine_major_version(all_repos_tags))
filtered_all_repos_tags = filter_all_repo_dictionary_tags(all_repos_tags, major_official_version)
final_gluu_versions_dict = analyze_filtered_dict_return_final_dict(filtered_all_repos_tags, major_official_version)
update_json_file(final_gluu_versions_dict, '../pygluu/kubernetes/templates/gluu_versions.json')
if __name__ == '__main__':
main()
| [
"json.loads",
"pathlib.Path",
"os.environ.get",
"yamlparser.Parser",
"common.update_json_file",
"common.get_logger"
] | [((145, 171), 'common.get_logger', 'get_logger', (['"""update-image"""'], {}), "('update-image')\n", (155, 171), False, 'from common import update_json_file, get_logger, exec_cmd\n'), ((5283, 5313), 'yamlparser.Parser', 'Parser', (['gluu_values_file', '(True)'], {}), '(gluu_values_file, True)\n', (5289, 5313), False, 'from yamlparser import Parser\n'), ((8116, 8160), 'os.environ.get', 'os.environ.get', (['"""ORG_NAME"""', '"""gluufederation"""'], {}), "('ORG_NAME', 'gluufederation')\n", (8130, 8160), False, 'import os\n'), ((8964, 9063), 'common.update_json_file', 'update_json_file', (['final_gluu_versions_dict', '"""../pygluu/kubernetes/templates/gluu_versions.json"""'], {}), "(final_gluu_versions_dict,\n '../pygluu/kubernetes/templates/gluu_versions.json')\n", (8980, 9063), False, 'from common import update_json_file, get_logger, exec_cmd\n'), ((3921, 3939), 'json.loads', 'json.loads', (['stdout'], {}), '(stdout)\n', (3931, 3939), False, 'import json\n'), ((5182, 5242), 'pathlib.Path', 'Path', (['"""../pygluu/kubernetes/templates/helm/gluu/values.yaml"""'], {}), "('../pygluu/kubernetes/templates/helm/gluu/values.yaml')\n", (5186, 5242), False, 'from pathlib import Path\n')] |
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
from datetime import date
import math
import openpyxl
import pandas as pd
fname = 'https://www.governing.com/gov-data/census/state-minority-population-data-estimates.html'
req = Request(fname, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req)
page_soup = soup(webpage, "html.parser")
containers = page_soup.findAll("table")
container = containers[1]
A = container.findAll("tr")
tmp_list = [[], [], [], [], []]
for x in range(1, 52):
if x == 9:
continue
B = A[x].findAll("td")
for c in range(1, 6):
s = str(B[c])
s1 = s.replace('<td>', '')
s2 = s1.replace('</td>', '')
s3 = s2.replace('%', '')
tmp_list[c-1].append(float(s3))
df = pd.read_excel('states_info.xlsx')
headers_list = ['hispanic', 'white', 'black', 'asian', 'american indian']
for pos in range(5):
df[headers_list[pos]] = tmp_list[pos]
df.to_excel('states_info.xlsx')
| [
"bs4.BeautifulSoup",
"urllib.request.Request",
"urllib.request.urlopen",
"pandas.read_excel"
] | [((261, 314), 'urllib.request.Request', 'Request', (['fname'], {'headers': "{'User-Agent': 'Mozilla/5.0'}"}), "(fname, headers={'User-Agent': 'Mozilla/5.0'})\n", (268, 314), False, 'from urllib.request import Request, urlopen\n'), ((325, 337), 'urllib.request.urlopen', 'urlopen', (['req'], {}), '(req)\n', (332, 337), False, 'from urllib.request import Request, urlopen\n'), ((350, 378), 'bs4.BeautifulSoup', 'soup', (['webpage', '"""html.parser"""'], {}), "(webpage, 'html.parser')\n", (354, 378), True, 'from bs4 import BeautifulSoup as soup\n'), ((788, 821), 'pandas.read_excel', 'pd.read_excel', (['"""states_info.xlsx"""'], {}), "('states_info.xlsx')\n", (801, 821), True, 'import pandas as pd\n')] |
from django.core.exceptions import FieldError
from staff.models import Staff
import re
def get_choices():
# choices in a seperated funtion to change it easier
STATUS_CHOICES = (
('', ''),
('Test', 'Test'),
('Fertig', 'Fertig'),
('Löschen', 'Löschen'),
('Vertrieb', 'Vertrieb'),
('Produktion', 'Produktion'),
('Bearbeitung', 'Bearbeitung'),
)
return STATUS_CHOICES
STAFFCHOICESONE = set()
for staff in Staff.objects.all():
STAFFCHOICESONE.add((staff.initialies, staff.name))
STAFFCHOICESTWO = set()
STAFFCHOICESTWO.add(('', ''))
for staff in Staff.objects.all():
STAFFCHOICESTWO.add((staff.initialies, staff.name))
def check_for_update(queryset):
try:
for object in queryset:
time_in_weeks = (object.finished_until - object.created_at) / 7
object.time_in_weeks = time_in_weeks.days
object.save()
except:
pass
def check_form_and_db(form, queryset):
"""
get data from(bevor it was saved) and get data from current object
check if there are changes between them
:param form:
:param queryset:
:return: boolean update
"""
update = False
if queryset.box != form.instance.box:
update = True
elif queryset.customer != form.instance.customer:
update = True
elif queryset.hardware != form.instance.hardware:
update = True
elif queryset.created_at != form.instance.created_at:
update = True
elif queryset.status != form.instance.status:
update = False
elif queryset.finished_until != form.instance.finished_until:
update = True
elif queryset.optional_status != form.instance.optional_status:
update = False
elif queryset.finished_until != form.instance.finished_until:
update = True
elif queryset.staff != form.instance.staff:
update = True
elif queryset.time_in_weeks != int(form.instance.time_in_weeks):
update = True
elif queryset.remark != form.instance.remark:
update = True
elif queryset.production_remark != form.instance.production_remark:
update = False
return update
def update_time_in_weeks(date1, date2):
days = (date2 - date1).days
if days > 7:
return days / 7
else:
return days
COLORS = {
'Fertig': '#33cc00',
'Test': '#99ff99',
'Bearbeitung': '#ffff00',
'Produktion': '#ffffcc',
'Vertrieb': '#ff99ff',
'Löschen': '#ffffff'
}
def searching(model, search_string, *args, **kwargs):
'''
usage e.g.:
t = searching(ModelName, search_string, 'Foo', 'Bar', **kwargs)
tmp = ModelName.objects.none()
for i in t:
tmp = i | tmp #merge Querysets
:param model: Django Modelobject
:param search_string: self explaning
:param args: datatypes that should be excluded
:param kwargs: can contain exlude or exact as key with a list of values containing the field name/-s
:return: list of querysets gte 1
'''
types = [field.get_internal_type() for field in model._meta.get_fields()]
names = [f.name for f in [field for field in model._meta.get_fields()]]
field_name_dict = dict(zip(names, types))
excat_fields = []
foreignKeyFields = None
special_filter = None
if kwargs:
try:
foreignKeyFields = kwargs['foreignKeyFields']
except KeyError:
pass
try:
special_filter = kwargs['filter']
except KeyError:
pass
try:
field_name_dict = remove_items_dict(field_name_dict, kwargs['exclude'])
except KeyError:
pass
try:
excat_fields = kwargs['exact']
except KeyError:
pass
# to use following e.g. in function call:
# data = {'exclude': liste['foo', ]}
# searching(modelname, searchstring, kwargs=data)
try:
if 'exclude' in kwargs['kwargs']:
field_name_dict = remove_items_dict(field_name_dict, kwargs['kwargs']['exclude'])
elif 'exact' in kwargs:
excat_fields = kwargs['exact']
except KeyError:
pass
if args:
field_name_dict = remove_items_dict(field_name_dict, args)
if special_filter is not None:
tmp = model.objects.filter(**{special_filter[0]: special_filter[1]})
else:
tmp = model.objects.all()
liste = []
for key, value in field_name_dict.items():
if value != 'ForeignKey' and value != 'ManyToManyField':
if key in excat_fields:
filter = f'{key}__iexact'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
filter = f'{key}__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
elif value == 'ManyToManyField' and key == 'customer_collar':
filter = f'{key}__serialno__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
filter = f'{key}__pk__iexact'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
if foreignKeyFields is not None:
for keyfield in foreignKeyFields:
filter = f'{key}__{keyfield}__icontains'
try:
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
except FieldError:
pass
else:
filter = f'{key}__name__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
return liste
def remove_items_dict(dictionary, keys):
'''
Remove items from dictonary
:param dictionary:
:param keys:
:return:
'''
return {key: value for key, value in dictionary.items() if key not in keys and value not in keys}
def move_ids_from_remark_to_ids(text):
'''
extract ids from allready existing production_remark to new field ids
:param text:
:return: ids as string seperated by ;
'''
range_ids = re.findall(r'[0-9]*-[0-9]*', text)
tmp_string = '; '.join(range_ids)
tmp = re.sub(r'[0-9]*-[0-9]*', '', text)
id_list = list(filter(lambda x: len(x) > 4, filter(None, re.findall(r'[\d]*', tmp))))
new_string = '; '.join(id_list)
return f'{new_string}; {tmp_string}'
def filter_ids(obj, id):
'''
:param id:
:return:
'''
queryset = obj.objects.all().only('pk', 'ids')
for i in queryset:
if i.ids is not None:
if '-' in i.ids:
x = i.ids.split('; ')
x = list(filter(lambda x: '-' in x, x))
for ids in x:
if int(ids.split('-')[0]) > int(id) or int(id) < int(ids.split('-')[1]):
return i.pk
else:
if id in i.ids:
return i.pk
else:
return None
else:
if id in i.ids:
return i.pk
return None
| [
"re.sub",
"re.findall",
"staff.models.Staff.objects.all"
] | [((476, 495), 'staff.models.Staff.objects.all', 'Staff.objects.all', ([], {}), '()\n', (493, 495), False, 'from staff.models import Staff\n'), ((621, 640), 'staff.models.Staff.objects.all', 'Staff.objects.all', ([], {}), '()\n', (638, 640), False, 'from staff.models import Staff\n'), ((6602, 6635), 're.findall', 're.findall', (['"""[0-9]*-[0-9]*"""', 'text'], {}), "('[0-9]*-[0-9]*', text)\n", (6612, 6635), False, 'import re\n'), ((6686, 6719), 're.sub', 're.sub', (['"""[0-9]*-[0-9]*"""', '""""""', 'text'], {}), "('[0-9]*-[0-9]*', '', text)\n", (6692, 6719), False, 'import re\n'), ((6782, 6807), 're.findall', 're.findall', (['"""[\\\\d]*"""', 'tmp'], {}), "('[\\\\d]*', tmp)\n", (6792, 6807), False, 'import re\n')] |
from threading import Thread
from flask import current_app,render_template
from flask_mail import Message
from . import mail
def send_async_email(app,msg):
with app.app_context():
mail.send(msg)
def send_email(to,subject,template,**kwargs):
app=current_app._get_current_object()
msg=Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX']+' '+subject,
sender=app.config['FLASKY_MAIL_SENDER'],recipients=[to])
msg.body=render_template(template+'.txt',**kwargs)
msg.html=render_template(template+'.html',**kwargs)
thr=Thread(target=send_async_email,args=[app,msg])
thr.start()
return thr | [
"flask.render_template",
"flask_mail.Message",
"flask.current_app._get_current_object",
"threading.Thread"
] | [((263, 296), 'flask.current_app._get_current_object', 'current_app._get_current_object', ([], {}), '()\n', (294, 296), False, 'from flask import current_app, render_template\n'), ((305, 433), 'flask_mail.Message', 'Message', (["(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject)"], {'sender': "app.config['FLASKY_MAIL_SENDER']", 'recipients': '[to]'}), "(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=\n app.config['FLASKY_MAIL_SENDER'], recipients=[to])\n", (312, 433), False, 'from flask_mail import Message\n'), ((453, 497), 'flask.render_template', 'render_template', (["(template + '.txt')"], {}), "(template + '.txt', **kwargs)\n", (468, 497), False, 'from flask import current_app, render_template\n'), ((508, 553), 'flask.render_template', 'render_template', (["(template + '.html')"], {}), "(template + '.html', **kwargs)\n", (523, 553), False, 'from flask import current_app, render_template\n'), ((559, 607), 'threading.Thread', 'Thread', ([], {'target': 'send_async_email', 'args': '[app, msg]'}), '(target=send_async_email, args=[app, msg])\n', (565, 607), False, 'from threading import Thread\n')] |
import pickle
from copy import deepcopy
from graphviz import Digraph
from torch.nn import Conv2d, MaxPool2d, ELU, Dropout, BatchNorm2d
import pandas as pd
from EEGNAS.model_generation.abstract_layers import IdentityLayer, ConvLayer, PoolingLayer, ActivationLayer
from EEGNAS.model_generation.custom_modules import IdentityModule
SHORT_NAMES = {Conv2d: 'C',
MaxPool2d: 'M',
ELU: 'E',
Dropout: 'D',
BatchNorm2d: 'B'}
def get_layer_stats(layer, delimiter):
if type(layer) == ELU or type(layer) == BatchNorm2d or type(layer) == Dropout:
return ''
elif type(layer) == Conv2d:
return f'{delimiter}f:{layer.out_channels},k:{layer.kernel_size[0]}'
elif type(layer) == MaxPool2d:
return f'{delimiter}k:{layer.kernel_size[0]},s:{layer.stride[0]}'
else:
return ''
def export_eegnas_table(models, filename):
model_series = []
for model_idx, model in enumerate(models):
layer_list = []
module_list = list(model._modules.values())[:-1]
module_list = [l for l in module_list if type(l) != IdentityModule]
for layer_idx, layer in enumerate(module_list):
layer_str = f'{SHORT_NAMES[type(layer)]}'
layer_str += get_layer_stats(layer, ' ')
layer_list.append(layer_str)
layer_series = pd.Series(layer_list)
layer_series.name = f'Model {model_idx}'
model_series.append(pd.Series(layer_list))
df = pd.DataFrame(model_series).transpose()
df.columns = [f'Model {i+1}' for i in range(len(models))]
df.to_csv(filename)
def plot_eegnas_model(model, f, subgraph_idx, nodes):
nodes = deepcopy(nodes)
multiplier = 1
module_list = list(model._modules.values())[:-1]
module_list = [l for l in module_list if type(l) != IdentityModule]
for layer_idx, layer in enumerate(module_list):
if type(layer) == BatchNorm2d or type(layer) == Dropout or type(layer) == ELU:
if layer_idx < len(module_list) - 1 and type(module_list[layer_idx + 1]) == type(layer):
multiplier += 1
continue
layer_str = f'{SHORT_NAMES[type(layer)]}'
layer_str += get_layer_stats(layer, ',')
layer_str = f'<<B>{layer_str}</B>>'
if multiplier > 1:
f.node(f'{subgraph_idx}_{layer_idx}', label=layer_str, xlabel=f'<<B>X {multiplier}</B>>')
else:
f.node(f'{subgraph_idx}_{layer_idx}', label=layer_str)
nodes.append(f'{subgraph_idx}_{layer_idx}')
if type(layer) == BatchNorm2d or type(layer) == Dropout or type(layer) == ELU:
if layer_idx < len(module_list) - 1 and type(module_list[layer_idx + 1]) != type(layer):
multiplier = 1
nodes.append('output')
for idx in range(len(nodes) - 1):
f.edge(nodes[idx], nodes[idx+1])
def create_ensemble_digraph(weighted_population, n_members):
f = Digraph('EEGNAS model', filename='EEGNAS_model.gv', graph_attr={'dpi':'300'}, format='png')
f.attr('node', shape='box')
f.node(f'input', label='<<B>Input: (Bsize, 240, 22)</B>>')
f.node(f'output', label='<<B>Output: (Bsize, 5, 22)</B>>')
nodes = ['input']
for i in range(n_members):
plot_eegnas_model(weighted_population[i]['finalized_model'], f, i, nodes)
f.render('test_eegnas_graphviz', view=False)
sum_path = "/home/user/Documents/eladr/netflowinsights/CDN_overflow_prediction/eegnas_models/195_10_input_height_240_normalized_handovers_all_inheritance_fold9_architectures_iteration_1.p"
per_path = '/home/user/Documents/eladr/netflowinsights/CDN_overflow_prediction/eegnas_models/197_10_input_height_240_normalized_per_handover_handovers_all_inheritance_fold9_architectures_iteration_1.p'
weighted_population_per = pickle.load(open(per_path, 'rb'))
weighted_population_sum = pickle.load(open(sum_path, 'rb'))
# export_eegnas_table([weighted_population_per[i]['finalized_model'] for i in range(5)], 'per_architectures.csv')
# export_eegnas_table([weighted_population_sum[i]['finalized_model'] for i in range(5)], 'sum_architectures.csv')
create_ensemble_digraph(weighted_population_per, 5)
| [
"graphviz.Digraph",
"pandas.Series",
"pandas.DataFrame",
"copy.deepcopy"
] | [((1689, 1704), 'copy.deepcopy', 'deepcopy', (['nodes'], {}), '(nodes)\n', (1697, 1704), False, 'from copy import deepcopy\n'), ((2947, 3043), 'graphviz.Digraph', 'Digraph', (['"""EEGNAS model"""'], {'filename': '"""EEGNAS_model.gv"""', 'graph_attr': "{'dpi': '300'}", 'format': '"""png"""'}), "('EEGNAS model', filename='EEGNAS_model.gv', graph_attr={'dpi':\n '300'}, format='png')\n", (2954, 3043), False, 'from graphviz import Digraph\n'), ((1365, 1386), 'pandas.Series', 'pd.Series', (['layer_list'], {}), '(layer_list)\n', (1374, 1386), True, 'import pandas as pd\n'), ((1464, 1485), 'pandas.Series', 'pd.Series', (['layer_list'], {}), '(layer_list)\n', (1473, 1485), True, 'import pandas as pd\n'), ((1496, 1522), 'pandas.DataFrame', 'pd.DataFrame', (['model_series'], {}), '(model_series)\n', (1508, 1522), True, 'import pandas as pd\n')] |
from requests.auth import AuthBase
import hmac
import base64
import hashlib
import urlparse
import urllib
#add your custom auth handler class to this module
class MyEncryptedCredentialsAuthHAndler(AuthBase):
def __init__(self,**args):
# setup any auth-related data here
#self.username = args['username']
#self.password = args['password']
pass
def __call__(self, r):
# modify and return the request
#r.headers['foouser'] = self.username
#r.headers['foopass'] = self.password
return r
#template
class MyCustomAuth(AuthBase):
def __init__(self,**args):
# setup any auth-related data here
#self.username = args['username']
#self.password = args['password']
pass
def __call__(self, r):
# modify and return the request
#r.headers['foouser'] = self.username
#r.headers['foopass'] = self.password
return r
class MyCustomOpsViewAuth(AuthBase):
def __init__(self,**args):
self.username = args['username']
self.password = args['password']
self.url = args['url']
pass
def __call__(self, r):
#issue a PUT request (not a get) to the url from self.url
payload = {'username': self.username,'password':self.password}
auth_response = requests.put(self.url,params=payload,verify=false)
#get the auth token from the auth_response.
#I have no idea where this is in your response,look in your documentation ??
tokenstring = "mytoken"
headers = {'X-Opsview-Username': self.username,'X-Opsview-Token':tokenstring}
r.headers = headers
return r
class MyUnifyAuth(AuthBase):
def __init__(self,**args):
self.username = args['username']
self.password = args['password']
self.url = args['url']
pass
def __call__(self, r):
login_url = '%s?username=%s&login=login&password=%s' % self.url,self.username,self.password
login_response = requests.get(login_url)
cookies = login_response.cookies
if cookies:
r.cookies = cookies
return r
#example of adding a client certificate
class MyAzureCertAuthHAndler(AuthBase):
def __init__(self,**args):
self.cert = args['certPath']
pass
def __call__(self, r):
r.cert = self.cert
return r
#example of adding a client certificate
class GoogleBigQueryCertAuthHandler(AuthBase):
def __init__(self,**args):
self.cert = args['certPath']
pass
def __call__(self, r):
r.cert = self.cert
return r
#cloudstack auth example
class CloudstackAuth(AuthBase):
def __init__(self,**args):
# setup any auth-related data here
self.apikey = args['apikey']
self.secretkey = args['secretkey']
pass
def __call__(self, r):
# modify and return the request
parsed = urlparse.urlparse(r.url)
url = parsed.geturl().split('?',1)[0]
url_params= urlparse.parse_qs(parsed.query)
#normalize the list value
for param in url_params:
url_params[param] = url_params[param][0]
url_params['apikey'] = self.apikey
keys = sorted(url_params.keys())
sig_params = []
for k in keys:
sig_params.append(k + '=' + urllib.quote_plus(url_params[k]).replace("+", "%20"))
query = '&'.join(sig_params)
signature = base64.b64encode(hmac.new(
self.secretkey,
msg=query.lower(),
digestmod=hashlib.sha1
).digest())
query += '&signature=' + urllib.quote_plus(signature)
r.url = url + '?' + query
return r | [
"urlparse.parse_qs",
"urllib.quote_plus",
"urlparse.urlparse"
] | [((3075, 3099), 'urlparse.urlparse', 'urlparse.urlparse', (['r.url'], {}), '(r.url)\n', (3092, 3099), False, 'import urlparse\n'), ((3166, 3197), 'urlparse.parse_qs', 'urlparse.parse_qs', (['parsed.query'], {}), '(parsed.query)\n', (3183, 3197), False, 'import urlparse\n'), ((3821, 3849), 'urllib.quote_plus', 'urllib.quote_plus', (['signature'], {}), '(signature)\n', (3838, 3849), False, 'import urllib\n'), ((3517, 3549), 'urllib.quote_plus', 'urllib.quote_plus', (['url_params[k]'], {}), '(url_params[k])\n', (3534, 3549), False, 'import urllib\n')] |
import sys
from setuptools import setup
required_packages = ["boombox", "Pillow", "PyYAML", "rich"]
win_packages = ["keyboard"]
unix_packages = ["pynput"]
WIN = "win32"
LINUX = "linux"
MACOS = "darwin"
if sys.platform == WIN:
required_packages += win_packages
elif sys.platform in (LINUX, MACOS):
required_packages += unix_packages
setup(
name="pantheras_box",
version="0.1.0",
packages=[
"pantheras_box",
"pantheras_box.story",
"pantheras_box.sounds",
"pantheras_box.backend",
"pantheras_box.frontend",
"pantheras_box.keyboard_handlers",
],
url="",
license="MIT",
author="<NAME>",
author_email="",
description="Pantheras box TUI game.",
install_requires=required_packages,
entry_points={
"console_scripts": [
"pantheras-box = pantheras_box.run:run_game",
],
},
package_data={"": ["**/*.txt", "**/*.yaml", "**/*.png", "**/*.wav"]},
include_package_data=True,
)
| [
"setuptools.setup"
] | [((345, 897), 'setuptools.setup', 'setup', ([], {'name': '"""pantheras_box"""', 'version': '"""0.1.0"""', 'packages': "['pantheras_box', 'pantheras_box.story', 'pantheras_box.sounds',\n 'pantheras_box.backend', 'pantheras_box.frontend',\n 'pantheras_box.keyboard_handlers']", 'url': '""""""', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '""""""', 'description': '"""Pantheras box TUI game."""', 'install_requires': 'required_packages', 'entry_points': "{'console_scripts': ['pantheras-box = pantheras_box.run:run_game']}", 'package_data': "{'': ['**/*.txt', '**/*.yaml', '**/*.png', '**/*.wav']}", 'include_package_data': '(True)'}), "(name='pantheras_box', version='0.1.0', packages=['pantheras_box',\n 'pantheras_box.story', 'pantheras_box.sounds', 'pantheras_box.backend',\n 'pantheras_box.frontend', 'pantheras_box.keyboard_handlers'], url='',\n license='MIT', author='<NAME>', author_email='', description=\n 'Pantheras box TUI game.', install_requires=required_packages,\n entry_points={'console_scripts': [\n 'pantheras-box = pantheras_box.run:run_game']}, package_data={'': [\n '**/*.txt', '**/*.yaml', '**/*.png', '**/*.wav']}, include_package_data\n =True)\n", (350, 897), False, 'from setuptools import setup\n')] |
from zipcodes import is_valid
from random import randint
from all_lunch_locs import call_lunch_api
default_max = 30
default_range = 20
def random_zip():
# because what matters is good food, not close food.
random_zip = 0
# because strings are required for this module
while not is_valid(str(random_zip)):
range_start = 10 ** (4)
range_end = (10 ** 5) - 1
random_zip = randint(range_start, range_end)
return str(random_zip)
def within_lunch_range(input_number):
return int(input_number) <= default_max
def set_values_with_default(loc=random_zip(), range=default_range):
return {'location': loc, 'range': range}
def two_params(first_param, second_param):
if is_valid(first_param) and within_lunch_range(second_param):
return set_values_with_default(first_param, second_param)
else:
return set_values_with_default()
def split_params(param_text):
if not param_text: # no params, default random zip code, 20 miles
return set_values_with_default()
params = param_text.split()
if len(params) == 2: # two values
return two_params(params[0], params[1])
if len(params) == 1 and is_valid(params[0]): # one value
return set_values_with_default(loc=params[0])
else:
return set_values_with_default()
def select_random_location(lunch_response):
number_locs = len(lunch_response['businesses'])
selected_loc = randint(0, number_locs - 1)
return lunch_response['businesses'][selected_loc]
def build_response_text(loc_dict):
return f'The Wheel of Lunch has selected {loc_dict["name"]} at {" ".join(loc_dict["location"]["display_address"])}'
def create_lunch_event(request):
param_dict = split_params(request.get('text'))
response = call_lunch_api(location=param_dict['location'], range=param_dict['range'])
location = select_random_location(response.json())
return build_response_text(location)
if __name__ == '__main__':
# format of the json
# CombinedMultiDict([ImmutableMultiDict([]), ImmutableMultiDict(
# [('token', 'workspace token'), ('team_id', 'team_id'), ('team_domain', 'some_string_name'),
# ('channel_id', 'some_channel_id'), ('channel_name', 'some_channel_name'), ('user_id', 'user_id_requested'), ('user_name', 'user_name_requested'),
# ('command', '/lunch'), ('text', '80233'), #<---- args
# ('response_url', 'response url'),
# ('trigger_id', 'slash trigger command')])])
print(create_lunch_event({'text': '80020 20'}))
print(create_lunch_event({'text': '20'}))
| [
"zipcodes.is_valid",
"all_lunch_locs.call_lunch_api",
"random.randint"
] | [((1451, 1478), 'random.randint', 'randint', (['(0)', '(number_locs - 1)'], {}), '(0, number_locs - 1)\n', (1458, 1478), False, 'from random import randint\n'), ((1791, 1865), 'all_lunch_locs.call_lunch_api', 'call_lunch_api', ([], {'location': "param_dict['location']", 'range': "param_dict['range']"}), "(location=param_dict['location'], range=param_dict['range'])\n", (1805, 1865), False, 'from all_lunch_locs import call_lunch_api\n'), ((411, 442), 'random.randint', 'randint', (['range_start', 'range_end'], {}), '(range_start, range_end)\n', (418, 442), False, 'from random import randint\n'), ((722, 743), 'zipcodes.is_valid', 'is_valid', (['first_param'], {}), '(first_param)\n', (730, 743), False, 'from zipcodes import is_valid\n'), ((1193, 1212), 'zipcodes.is_valid', 'is_valid', (['params[0]'], {}), '(params[0])\n', (1201, 1212), False, 'from zipcodes import is_valid\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from lottery.branch import base
import models.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank
from platforms.platform import get_platform
from foundations import paths
import json
import os
import datasets.registry
import copy
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import tqdm
import seaborn as sns
import pandas as pd
import numpy as np
from utils.tensor_utils import generate_mask_active, erank, shuffle_tensor, mutual_coherence
sns.set_style("whitegrid")
class Branch(base.Branch):
def branch_function(self, seed: int, erank_path: str = '', coherence_path: str = '',
frobenius_path: str = '', min_singular_path: str = '', nuclear_path: str = '',
normalized: bool = False, batch_average: int = 1):
# Randomize the mask.
orig_mask = Mask.load(self.level_root)
best_mask = Mask()
start_step = self.lottery_desc.str_to_step('0ep')
# Use level 0 model for dense pre-pruned model
if not get_platform().is_primary_process: return
base_model = models.registry.load(self.level_root.replace(f'level_{self.level}', 'level_0'), start_step,
self.lottery_desc.model_hparams)
orig_model = PrunedModel(base_model, Mask.ones_like(base_model))
model_graduate = copy.deepcopy(orig_model)
model = copy.deepcopy(orig_model)
lth_model = PrunedModel(copy.deepcopy(base_model), orig_mask)
# Randomize while keeping the same layerwise proportions as the original mask.
prunable_tensors = set(orig_model.prunable_layer_names) - set(orig_model.prunable_conv_names)
tensors = {k[6:]: v.clone() for k, v in orig_model.state_dict().items() if k[6:] in prunable_tensors}
train_loader = datasets.registry.get(self.lottery_desc.dataset_hparams, train=True)
input = []
offset = 1 if batch_average > 1 else 0
for b in range(batch_average):
input.append(list(train_loader)[b+offset][0])
singular_values = []
eranks_values = []
# lth_features = lth_model.intermediate(input)
# _, s, _ = torch.svd(lth_features[-1], compute_uv=False)
# if normalized:
# s = s / s[0]
# singular_values.append(s)
eranks = np.load(os.path.join(self.level_root, '../', erank_path), allow_pickle=True)
coherence = np.load(os.path.join(self.level_root, '../', coherence_path), allow_pickle=True)
frobenius = np.load(os.path.join(self.level_root, '../', frobenius_path), allow_pickle=True)
min_singular = np.load(os.path.join(self.level_root, '../', min_singular_path), allow_pickle=True)
nuclear = np.load(os.path.join(self.level_root, '../', nuclear_path), allow_pickle=True)
erank_seeds = []
coherence_seeds = []
frobenius_seeds = []
min_singular_seeds = []
nuclear_seeds = []
for layer in range(eranks.shape[0]):
erank_seeds.append(np.argmax(eranks[layer, :]))
coherence_seeds.append(np.argmax(coherence[layer, :]))
frobenius_seeds.append(np.argmax(frobenius[layer, :]))
min_singular_seeds.append(np.argmax(min_singular[layer, :]))
nuclear_seeds.append(np.argmax(nuclear[layer, :]))
# Assign all masks to model
for b in range(batch_average):
lth_features = lth_model.intermediate(input[b])
_, s, _ = torch.svd(lth_features[-1], compute_uv=False)
if normalized:
s = s / s[0]
eranks_values.append(erank(lth_features[-1]))
singular_values.append(s)
for seeds in [erank_seeds, coherence_seeds, frobenius_seeds, min_singular_seeds, nuclear_seeds, [seed] * len(erank_seeds)]:
curr_mask = Mask()
for i, (name, param) in enumerate(tensors.items()):
curr_mask[name] = shuffle_tensor(orig_mask[name], int(seed + seeds[i])).int()
model_graduate.register_buffer(PrunedModel.to_mask_name(name), curr_mask[name].float())
features = model_graduate.intermediate(input[b])
_, s, _ = torch.svd(features[-1], compute_uv=False)
if normalized:
s = s / s[0]
eranks_values.append(erank(features[-1]))
singular_values.append(s)
model_graduate = copy.deepcopy(orig_model)
# features = lth_model(in)
types = ['lth', 'erank', 'mutual coherence', 'frobenius', 'min singular', 'nuclear', 'random']
data = pd.concat([pd.DataFrame(
{'svd_value': list(singular_values[i].detach().numpy()), 'type': [types[i % len(types)]] * len(singular_values[i]),
'svd_index': list(range(len(singular_values[i])))}) for i in range(len(types) * batch_average)], ignore_index=True)
#
f = sns.lineplot(data=data.loc[data['type'] != 'nuclear'], x='svd_index', y='svd_value', hue='type', markers=True, dashes=False, style="type")
f.set(yscale='log')
f.get_figure().savefig(os.path.join(self.branch_root, 'svd_plot.pdf'))
@staticmethod
def description():
return "Plot singular values."
@staticmethod
def name():
return 'singular_values'
| [
"utils.tensor_utils.erank",
"pruning.pruned_model.PrunedModel.to_mask_name",
"matplotlib.use",
"pruning.mask.Mask.load",
"pruning.mask.Mask.ones_like",
"os.path.join",
"numpy.argmax",
"seaborn.set_style",
"pruning.mask.Mask",
"seaborn.lineplot",
"torch.svd",
"copy.deepcopy",
"platforms.platf... | [((689, 710), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (703, 710), False, 'import matplotlib\n'), ((909, 935), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (922, 935), True, 'import seaborn as sns\n'), ((1281, 1307), 'pruning.mask.Mask.load', 'Mask.load', (['self.level_root'], {}), '(self.level_root)\n', (1290, 1307), False, 'from pruning.mask import Mask\n'), ((1328, 1334), 'pruning.mask.Mask', 'Mask', ([], {}), '()\n', (1332, 1334), False, 'from pruning.mask import Mask\n'), ((1791, 1816), 'copy.deepcopy', 'copy.deepcopy', (['orig_model'], {}), '(orig_model)\n', (1804, 1816), False, 'import copy\n'), ((1833, 1858), 'copy.deepcopy', 'copy.deepcopy', (['orig_model'], {}), '(orig_model)\n', (1846, 1858), False, 'import copy\n'), ((5386, 5529), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': "data.loc[data['type'] != 'nuclear']", 'x': '"""svd_index"""', 'y': '"""svd_value"""', 'hue': '"""type"""', 'markers': '(True)', 'dashes': '(False)', 'style': '"""type"""'}), "(data=data.loc[data['type'] != 'nuclear'], x='svd_index', y=\n 'svd_value', hue='type', markers=True, dashes=False, style='type')\n", (5398, 5529), True, 'import seaborn as sns\n'), ((1738, 1764), 'pruning.mask.Mask.ones_like', 'Mask.ones_like', (['base_model'], {}), '(base_model)\n', (1752, 1764), False, 'from pruning.mask import Mask\n'), ((1891, 1916), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (1904, 1916), False, 'import copy\n'), ((2778, 2826), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'erank_path'], {}), "(self.level_root, '../', erank_path)\n", (2790, 2826), False, 'import os\n'), ((2875, 2927), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'coherence_path'], {}), "(self.level_root, '../', coherence_path)\n", (2887, 2927), False, 'import os\n'), ((2976, 3028), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'frobenius_path'], {}), "(self.level_root, '../', frobenius_path)\n", (2988, 3028), False, 'import os\n'), ((3080, 3135), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'min_singular_path'], {}), "(self.level_root, '../', min_singular_path)\n", (3092, 3135), False, 'import os\n'), ((3182, 3232), 'os.path.join', 'os.path.join', (['self.level_root', '"""../"""', 'nuclear_path'], {}), "(self.level_root, '../', nuclear_path)\n", (3194, 3232), False, 'import os\n'), ((3929, 3974), 'torch.svd', 'torch.svd', (['lth_features[-1]'], {'compute_uv': '(False)'}), '(lth_features[-1], compute_uv=False)\n', (3938, 3974), False, 'import torch\n'), ((5584, 5630), 'os.path.join', 'os.path.join', (['self.branch_root', '"""svd_plot.pdf"""'], {}), "(self.branch_root, 'svd_plot.pdf')\n", (5596, 5630), False, 'import os\n'), ((1463, 1477), 'platforms.platform.get_platform', 'get_platform', ([], {}), '()\n', (1475, 1477), False, 'from platforms.platform import get_platform\n'), ((3472, 3499), 'numpy.argmax', 'np.argmax', (['eranks[layer, :]'], {}), '(eranks[layer, :])\n', (3481, 3499), True, 'import numpy as np\n'), ((3536, 3566), 'numpy.argmax', 'np.argmax', (['coherence[layer, :]'], {}), '(coherence[layer, :])\n', (3545, 3566), True, 'import numpy as np\n'), ((3603, 3633), 'numpy.argmax', 'np.argmax', (['frobenius[layer, :]'], {}), '(frobenius[layer, :])\n', (3612, 3633), True, 'import numpy as np\n'), ((3673, 3706), 'numpy.argmax', 'np.argmax', (['min_singular[layer, :]'], {}), '(min_singular[layer, :])\n', (3682, 3706), True, 'import numpy as np\n'), ((3741, 3769), 'numpy.argmax', 'np.argmax', (['nuclear[layer, :]'], {}), '(nuclear[layer, :])\n', (3750, 3769), True, 'import numpy as np\n'), ((4064, 4087), 'utils.tensor_utils.erank', 'erank', (['lth_features[-1]'], {}), '(lth_features[-1])\n', (4069, 4087), False, 'from utils.tensor_utils import generate_mask_active, erank, shuffle_tensor, mutual_coherence\n'), ((4291, 4297), 'pruning.mask.Mask', 'Mask', ([], {}), '()\n', (4295, 4297), False, 'from pruning.mask import Mask\n'), ((4663, 4704), 'torch.svd', 'torch.svd', (['features[-1]'], {'compute_uv': '(False)'}), '(features[-1], compute_uv=False)\n', (4672, 4704), False, 'import torch\n'), ((4902, 4927), 'copy.deepcopy', 'copy.deepcopy', (['orig_model'], {}), '(orig_model)\n', (4915, 4927), False, 'import copy\n'), ((4806, 4825), 'utils.tensor_utils.erank', 'erank', (['features[-1]'], {}), '(features[-1])\n', (4811, 4825), False, 'from utils.tensor_utils import generate_mask_active, erank, shuffle_tensor, mutual_coherence\n'), ((4515, 4545), 'pruning.pruned_model.PrunedModel.to_mask_name', 'PrunedModel.to_mask_name', (['name'], {}), '(name)\n', (4539, 4545), False, 'from pruning.pruned_model import PrunedModel\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from collections import OrderedDict
from decimal import Decimal
from parser_data import InlineList, DuplicationList
from state import State, StateMachine
from type_check import is_int, is_float, is_sci_notation
from format import format
from error import DMPException
class ParserStateMachine(StateMachine):
def __init__(self, options):
self.data = OrderedDict()
initial = NeutralState(self.data)
initial.parent = initial
super(ParserStateMachine, self).__init__(initial, options)
def get_data(self):
return self.data
def preprocess(self, val):
return val.strip()
class DataState(State):
def __init__(self, data):
super(DataState, self).__init__()
self.data = data
class NeutralState(DataState):
def run(self, line):
if '=' in line:
key, val = [val.strip() for val in line.split('=')]
old_data = self.data.get(key, None)
if old_data is None:
# First time we got said data, just add it in
self.data[key] = self.read_data(val)
elif isinstance(old_data, DuplicationList):
# The stored data is a list, append to it
self.data[key].append(val)
else:
# We got the same key? Turn the stored data into a list
old_val = self.data[key]
self.data[key] = DuplicationList()
self.data[key].append(old_val)
self.data[key].append(val)
return self.finish_state()
else:
self.debug('= DICT =')
return self.rerun_with_state(
DictState(self.data).set_parent(self.parent)
)
def read_data(self, val):
if ',' in val:
space_formatted = ', ' in val
val = [subval.strip() for subval in val.split(',')]
val = [self.read_data(subval) for subval in val]
val = InlineList(val)
val.space_formatted = space_formatted
elif val == 'True':
val = True
elif val == 'False':
val = False
elif is_sci_notation(val):
val = Decimal(val)
elif is_int(val):
val = Decimal(val)
elif is_float(val):
val = Decimal(val)
return val
class DictState(DataState):
def __init__(self, data):
super(DictState, self).__init__(data)
self.val = OrderedDict()
self.run = self.state_name
def state_name(self, val):
self.debug('= NAME = ')
self.name = val
self.run = self.state_open
def state_open(self, val):
self.debug('= OPEN = ')
if val != '{':
raise State.Error("Expected dict open brace")
self.depth += 1
self.run = self.state_data
def state_data(self, val):
if val == '}':
self.debug('= CLOSED = ')
if not self.data.get(self.name, False):
self.data[self.name] = DuplicationList()
self.data.get(self.name).append(self.val)
self.depth -= 1
return self.finish_state()
else:
self.debug('= DATA = ')
return self.rerun_with_state(
NeutralState(self.val).set_parent(self)
)
class PostProcessor(object):
'''
Module for post processing
'''
PROCESSORS = {}
def register_processor(mapping, name):
def wrapper(func):
mapping[name] = func
return func
return wrapper
@classmethod
def run(Class, data):
return Class().process(data)
def process(self, data):
'''
Does special post-processing based on a file schema
'''
# This
if "GAME" in data.keys():
scenarios = data["GAME"][0]["SCENARIO"]
for scenario in scenarios:
if "name" in data.keys():
self.process_scenario(scenario)
elif "name" in data.keys():
self.process_scenario(data)
return data
def process_scenario(self, scenario):
processor = self.PROCESSORS.get(scenario["name"], False)
if processor:
processor(self, scenario)
@register_processor(PROCESSORS, "ResearchAndDevelopment")
def process_rnd(self, scenario):
# We know for sure that each tech has a list of parts
# but the list is a duplication list (therefore sometimes parses as a single item)
for tech in scenario.get("Tech", {}):
if "part" in tech.keys() and not isinstance(tech["part"], list):
tech["part"] = DuplicationList([tech["part"]])
def load(fp, options=None):
config = {
# 'verbose': True,
}
if options is not None:
config.update(options)
machine = ParserStateMachine(config)
try:
machine.runAll(fp)
except State.Error as err:
raise DMPException.wraps(err)
return PostProcessor.run(machine.get_data())
def dump(data, options=None):
config = {
# 'verbose': True,
}
if options is not None:
config.update(options)
lines = []
for key, val in data.iteritems():
lines += format(key, val)
# Adds Trailing newline
lines.append('')
return '\n'.join(lines)
def _test(infile, outfile):
with open(infile, 'r') as fp:
data = load(fp)
with open(infile, 'r') as fp:
raw = fp.read()
# print json.dumps(data, indent=4)
out = dump(data)
with open(outfile, 'w') as fp:
fp.write(out)
import subprocess
subprocess.call(['diff', infile, outfile])
subprocess.call(['rm', outfile])
if __name__ == "__main__":
ALL_DATA = [
"ContractSystem.txt",
"Funding.txt",
"PCScenario.txt",
"ProgressTracking.txt",
"Reputation.txt",
"ResearchAndDevelopment.txt",
"ResourceScenario.txt",
"ScenarioDestructibles.txt",
"ScenarioNewGameIntro.txt",
"ScenarioUpgradeableFacilities.txt",
"StrategySystem.txt",
"VesselRecovery.txt",
]
outfile = './tmp.txt'
import os.path
for filename in ALL_DATA:
infile = os.path.join('../Universe/Scenarios/Saevon/', filename)
_test(infile, outfile)
| [
"parser_data.DuplicationList",
"type_check.is_sci_notation",
"collections.OrderedDict",
"type_check.is_int",
"error.DMPException.wraps",
"state.State.Error",
"type_check.is_float",
"subprocess.call",
"parser_data.InlineList",
"format.format",
"decimal.Decimal"
] | [((5702, 5744), 'subprocess.call', 'subprocess.call', (["['diff', infile, outfile]"], {}), "(['diff', infile, outfile])\n", (5717, 5744), False, 'import subprocess\n'), ((5749, 5781), 'subprocess.call', 'subprocess.call', (["['rm', outfile]"], {}), "(['rm', outfile])\n", (5764, 5781), False, 'import subprocess\n'), ((411, 424), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (422, 424), False, 'from collections import OrderedDict\n'), ((2517, 2530), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2528, 2530), False, 'from collections import OrderedDict\n'), ((5312, 5328), 'format.format', 'format', (['key', 'val'], {}), '(key, val)\n', (5318, 5328), False, 'from format import format\n'), ((2018, 2033), 'parser_data.InlineList', 'InlineList', (['val'], {}), '(val)\n', (2028, 2033), False, 'from parser_data import InlineList, DuplicationList\n'), ((2798, 2837), 'state.State.Error', 'State.Error', (['"""Expected dict open brace"""'], {}), "('Expected dict open brace')\n", (2809, 2837), False, 'from state import State, StateMachine\n'), ((5029, 5052), 'error.DMPException.wraps', 'DMPException.wraps', (['err'], {}), '(err)\n', (5047, 5052), False, 'from error import DMPException\n'), ((3083, 3100), 'parser_data.DuplicationList', 'DuplicationList', ([], {}), '()\n', (3098, 3100), False, 'from parser_data import InlineList, DuplicationList\n'), ((4737, 4768), 'parser_data.DuplicationList', 'DuplicationList', (["[tech['part']]"], {}), "([tech['part']])\n", (4752, 4768), False, 'from parser_data import InlineList, DuplicationList\n'), ((1464, 1481), 'parser_data.DuplicationList', 'DuplicationList', ([], {}), '()\n', (1479, 1481), False, 'from parser_data import InlineList, DuplicationList\n'), ((2202, 2222), 'type_check.is_sci_notation', 'is_sci_notation', (['val'], {}), '(val)\n', (2217, 2222), False, 'from type_check import is_int, is_float, is_sci_notation\n'), ((2242, 2254), 'decimal.Decimal', 'Decimal', (['val'], {}), '(val)\n', (2249, 2254), False, 'from decimal import Decimal\n'), ((2268, 2279), 'type_check.is_int', 'is_int', (['val'], {}), '(val)\n', (2274, 2279), False, 'from type_check import is_int, is_float, is_sci_notation\n'), ((2299, 2311), 'decimal.Decimal', 'Decimal', (['val'], {}), '(val)\n', (2306, 2311), False, 'from decimal import Decimal\n'), ((2325, 2338), 'type_check.is_float', 'is_float', (['val'], {}), '(val)\n', (2333, 2338), False, 'from type_check import is_int, is_float, is_sci_notation\n'), ((2358, 2370), 'decimal.Decimal', 'Decimal', (['val'], {}), '(val)\n', (2365, 2370), False, 'from decimal import Decimal\n')] |
import torch
import math
from .grids import *
from .conversions import *
# =============================================================================
# Equirectangular mapping functions
# =============================================================================
#
# Note that there is no concept of padding for spherical images because there
# are no image boundaries.
# #
def equirectangular_kernel(shape, kernel_size, dilation=1):
"""
Returns a kernel sampling grid with angular spacing according to the provided shape (and associated computed angular resolution) of an equirectangular image
shape: (H, W)
kernel_size: (kh, kw)
"""
# For convenience
kh, kw = kernel_size
# Get equirectangular grid resolution
res_lon, res_lat = get_equirectangular_grid_resolution(shape)
# Build the kernel according to the angular resolution of the equirectangular image
dlon = torch.zeros(kernel_size)
dlat = torch.zeros(kernel_size)
for i in range(kh):
cur_i = i - (kh // 2)
for j in range(kw):
cur_j = j - (kw // 2)
dlon[i, j] = cur_j * dilation * res_lon
# Flip sign is because +Y is down
dlat[i, j] = cur_i * dilation * -res_lat
# Returns the kernel differentials as kh x kw
return dlon, dlat
def grid_projection_map(shape, kernel_size, stride=1, dilation=1):
# For convenience
H, W = shape
kh, kw = kernel_size
# Get lat/lon mesh grid and resolution
lon, lat = spherical_meshgrid(shape)
# Get the kernel differentials
dlon, dlat = equirectangular_kernel(shape, kernel_size, dilation)
# Equalize views
lat = lat.view(H, W, 1)
lon = lon.view(H, W, 1)
dlon = dlon.view(1, 1, kh * kw)
dlat = dlat.view(1, 1, kh * kw)
# Compute the "projection"
map_lat = lat + dlat
map_lon = lon + dlon
# Convert the spherical coordinates to pixel coordinates
# H x W x KH*KW x 2
map_pixels = convert_spherical_to_image(
torch.stack((map_lon, map_lat), -1), shape)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# H x W x KH*KW x 2
return map_pixels
def inverse_gnomonic_projection_map(shape, kernel_size, stride=1, dilation=1):
# For convenience
H, W = shape
kh, kw = kernel_size
# Get lat/lon mesh grid and resolution
lon, lat = spherical_meshgrid(shape)
# Get the kernel differentials
dlon, dlat = equirectangular_kernel(shape, kernel_size, dilation)
# Equalize views
lat = lat.view(H, W, 1)
lon = lon.view(H, W, 1)
dlon = dlon.view(1, 1, kh * kw)
dlat = dlat.view(1, 1, kh * kw)
# Compute the inverse gnomonic projection of each tangent grid (the kernel) back onto sphere at each pixel of the equirectangular image.
rho = (dlon**2 + dlat**2).sqrt()
nu = rho.atan()
map_lat = (nu.cos() * lat.sin() + dlat * nu.sin() * lat.cos() / rho).asin()
map_lon = lon + torch.atan2(
dlon * nu.sin(),
rho * lat.cos() * nu.cos() - dlat * lat.sin() * nu.sin())
# Handle the (0,0) case
map_lat[..., [kh * kw // 2]] = lat
map_lon[..., [kh * kw // 2]] = lon
# Compensate for longitudinal wrap around
map_lon = ((map_lon + math.pi) % (2 * math.pi)) - math.pi
# Convert the spherical coordinates to pixel coordinates
# H x W x KH*KW x 2
map_pixels = convert_spherical_to_image(
torch.stack((map_lon, map_lat), -1), shape)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# H x W x KH*KW x 2
return map_pixels
def inverse_equirectangular_projection_map(shape,
kernel_size,
stride=1,
dilation=1):
# For convenience
H, W = shape
kh, kw = kernel_size
# Get lat/lon mesh grid and resolution
lon, lat = spherical_meshgrid(shape)
# Get the kernel differentials
dlon, dlat = equirectangular_kernel(shape, kernel_size, dilation)
# Equalize views
lat = lat.view(H, W, 1)
lon = lon.view(H, W, 1)
dlon = dlon.view(1, 1, kh * kw)
dlat = dlat.view(1, 1, kh * kw)
# Compute the inverse equirectangular projection of each tangent grid (the kernel) back onto sphere at each pixel of the equirectangular image.
# Compute the projection back onto sphere
map_lat = lat + dlat
map_lon = lon + dlon / map_lat.cos()
# Compensate for longitudinal wrap around
map_lon = ((map_lon + math.pi) % (2 * math.pi)) - math.pi
# Convert the spherical coordinates to pixel coordinates
# H x W x KH*KW x 2
map_pixels = convert_spherical_to_image(
torch.stack((map_lon, map_lat), -1), shape)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# H x W x KH*KW x 2
return map_pixels
# =============================================================================
# Cube map mapping functions
# =============================================================================
def cube_kernel(cube_dim, kernel_size, dilation=1):
"""
Returns a kernel sampling grid with angular spacing according to the provided cube dimension (and associated computed angular resolution) of a cube map
cube_dim: length of side of square face of cube map
kernel_size: (kh, kw)
"""
# For convenience
kh, kw = kernel_size
cube_res = 1 / cube_dim
# Build the kernel according to the angular resolution of the cube face
dx = torch.zeros(kernel_size)
dy = torch.zeros(kernel_size)
for i in range(kh):
cur_i = i - (kh // 2)
for j in range(kw):
cur_j = j - (kw // 2)
dx[i, j] = cur_j * dilation * cube_res
# Flip sign is because +Y is down
dy[i, j] = cur_i * dilation * -cube_res
# Returns the kernel differentials as kh x kw
return dx, dy
def inverse_cube_face_projection_map(cube_dim,
kernel_size,
stride=1,
dilation=1,
polar=False):
"""
Creates a sampling map which models each face of the cube as an gnomonic projection (equatorial aspect) of the sphere. Warps the kernel according to the inverse gnomonic projection for the face.
"""
# For convenience
kh, kw = kernel_size
# Get a meshgrid of a cube face in terms of spherical coordinates
face_lon, face_lat = cube_face_spherical_meshgrid(cube_dim, polar)
# Get the kernel differentials
dx, dy = cube_kernel(cube_dim, kernel_size, dilation)
# Equalize views
face_lat = face_lat.view(cube_dim, cube_dim, 1)
face_lon = face_lon.view(cube_dim, cube_dim, 1)
dx = dx.view(1, 1, kh * kw)
dy = dy.view(1, 1, kh * kw)
# Compute the inverse gnomonic projection of each tangent grid (the kernel) back onto sphere at each pixel of the cube face
rho = (dx**2 + dy**2).sqrt()
nu = rho.atan()
map_lat = (nu.cos() * face_lat.sin() +
dy * nu.sin() * face_lat.cos() / rho).asin()
map_lon = face_lon + torch.atan2(
dx * nu.sin(),
rho * face_lat.cos() * nu.cos() - dy * face_lat.sin() * nu.sin())
# Handle the (0,0) case
map_lat[..., [kh * kw // 2]] = face_lat
map_lon[..., [kh * kw // 2]] = face_lon
# Create the sample map in terms of spherical coordinates
map_face = torch.stack((map_lon, map_lat), -1)
# Convert the cube coordinates on the sphere to pixels in the cube map
map_pixels = convert_spherical_to_cube_face(map_face, cube_dim)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# cube_dime x cube_dim x KH*KW x 2
return map_pixels | [
"torch.stack",
"torch.zeros"
] | [((922, 946), 'torch.zeros', 'torch.zeros', (['kernel_size'], {}), '(kernel_size)\n', (933, 946), False, 'import torch\n'), ((958, 982), 'torch.zeros', 'torch.zeros', (['kernel_size'], {}), '(kernel_size)\n', (969, 982), False, 'import torch\n'), ((5778, 5802), 'torch.zeros', 'torch.zeros', (['kernel_size'], {}), '(kernel_size)\n', (5789, 5802), False, 'import torch\n'), ((5812, 5836), 'torch.zeros', 'torch.zeros', (['kernel_size'], {}), '(kernel_size)\n', (5823, 5836), False, 'import torch\n'), ((7720, 7755), 'torch.stack', 'torch.stack', (['(map_lon, map_lat)', '(-1)'], {}), '((map_lon, map_lat), -1)\n', (7731, 7755), False, 'import torch\n'), ((2018, 2053), 'torch.stack', 'torch.stack', (['(map_lon, map_lat)', '(-1)'], {}), '((map_lon, map_lat), -1)\n', (2029, 2053), False, 'import torch\n'), ((3503, 3538), 'torch.stack', 'torch.stack', (['(map_lon, map_lat)', '(-1)'], {}), '((map_lon, map_lat), -1)\n', (3514, 3538), False, 'import torch\n'), ((4875, 4910), 'torch.stack', 'torch.stack', (['(map_lon, map_lat)', '(-1)'], {}), '((map_lon, map_lat), -1)\n', (4886, 4910), False, 'import torch\n')] |
import json
from typing import Any, Callable, Dict, Optional
import attr
from .interfaces import Event, Router
@attr.s(kw_only=True)
class SingleRoute(Router):
"""
Routes to a single defined route without any conditions.
:param route: The single defined route. Only set via ``add_route``.
"""
route: Optional[Callable] = attr.ib(init=False, default=None)
def add_route(self, *, fn: Callable) -> None:
"""
Adds the single route.
:param fn: The callable to route to.
:type fn: callable
:raises ValueError: Raised when a single route has already been defined.
"""
if self.route is not None:
raise ValueError("Single route is already defined. SingleRoute can only have a single defined route.")
self.route = fn
def get_route(self, *, event: Optional[Event]) -> Callable:
"""
Returns the defined route
:raises ValueError: Raised if no route is defined.
:rtype: callable
"""
if self.route is None:
raise ValueError("No route defined.")
return self.route
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
route = self.get_route(event=event)
return route(event=event)
@attr.s(kw_only=True)
class EventField(Router):
"""
Routes on a the value of the specified top-level ``key`` in the
given ``Event.raw`` dict.
:param key: The name of the top-level key to look for when routing.
:param routes: The routes mapping. Only set via ``add_route``
"""
key: str = attr.ib(kw_only=True)
routes: Dict[str, Callable] = attr.ib(init=False, factory=dict)
def add_route(self, *, fn: Callable, key: str) -> None:
"""
Adds the route with the given key.
:param fn: The callable to route to.
:type fn: callable
:param key: The key to associate the route with.
:type fn: str
"""
self.routes[key] = fn
def get_route(self, *, event: Event) -> Callable:
"""
Returns the matching route for the value of the ``key`` in the
given event.
:raises ValueError: Raised if no route is defined or routing key is
not present in the event.
:rtype: callable
"""
field_value: str = event.raw.get(self.key, None)
if field_value is None:
raise ValueError(f"Routing key ({self.key}) not present in the event.")
try:
return self.routes[field_value]
except KeyError:
raise ValueError(f"No route configured for given field ({field_value}).")
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
route = self.get_route(event=event)
return route(event=event)
@attr.s(kw_only=True)
class SQSMessage:
meta: Dict[str, Any] = attr.ib(factory=dict)
body: Dict[str, Any] = attr.ib(factory=dict)
key: str = attr.ib()
event: Event = attr.ib()
@classmethod
def from_raw_sqs_message(cls, *, raw_message: Dict[str, Any], key_name: str, event: Event):
meta = {}
attributes = raw_message.pop("attributes", None)
if attributes:
meta.update(attributes)
body = body = raw_message.pop("body", "")
message_attribites = raw_message.pop("messageAttributes", None)
key = None
if message_attribites:
key_attribute = message_attribites.get(key_name, None)
if key_attribute is not None:
key = key_attribute["stringValue"]
for k, value in raw_message.items():
meta[k] = value
# Attempt to decode json body.
body = json.loads(body)
return cls(meta=meta, body=body, key=key, event=event)
@attr.s(kw_only=True)
class SQSMessageField(Router):
"""
Processes all message records in a given ``Event``, routing each based on
on the configured key.
:param key: The name of the message-level key to look for when routing.
:param routes: The routes mapping. Only set via ``add_route``
"""
key: str = attr.ib(kw_only=True)
routes: Dict[str, Callable] = attr.ib(init=False, factory=dict)
def _get_message(self, raw_message: Dict[str, Any], event: Event) -> SQSMessage:
return SQSMessage.from_raw_sqs_message(raw_message=raw_message, key_name=self.key, event=event)
def add_route(self, *, fn: Callable, key: str) -> None:
"""
Adds the route with the given key.
:param fn: The callable to route to.
:type fn: callable
:param key: The key to associate the route with.
:type fn: str
"""
self.routes[key] = fn
def get_route(self, *, message: SQSMessage) -> Callable:
"""
Returns the matching route for the value of the ``key`` in the
given message.
:raises ValueError: Raised if no route is defined or routing key is
not present in the message.
:rtype: callable
"""
field_value: str = message.key
if field_value is None:
raise ValueError(f"Routing key ({self.key}) not present in the message.")
try:
return self.routes[field_value]
except KeyError:
raise ValueError(f"No route configured for given field ({field_value}).")
def dispatch(self, *, event: Event) -> Any:
"""
Iterates over all the message records in the given Event and executes the
applicable callable as determined by the configured routes.
:param event: The event to parse for messages.
"""
messages = event.raw.get("Records", None)
if messages is None:
raise ValueError("No messages present in Event.")
for raw_message in messages:
message = self._get_message(raw_message, event=event)
route = self.get_route(message=message)
# Process each message now.
route(message=message)
# SQS Lambdas don't return a value.
return None
@attr.s(kw_only=True)
class GenericSQSMessage(Router):
"""
Routes to a single defined route without any conditions.
:param route: The single defined route. Only set via ``add_route``.
"""
route: Optional[Callable] = attr.ib(init=False, default=None)
def _get_message(self, raw_message: Dict[str, Any], event: Event) -> SQSMessage:
return SQSMessage.from_raw_sqs_message(raw_message=raw_message, key_name=None, event=event)
def add_route(self, *, fn: Callable) -> None:
"""
Adds the single route.
:param fn: The callable to route to.
:type fn: callable
:raises ValueError: Raised when a single route has already been defined.
"""
if self.route is not None:
raise ValueError("Single route is already defined. SingleRoute can only have a single defined route.")
self.route = fn
def get_route(self, *, message: SQSMessage) -> Callable:
"""
Returns the defined route
:raises ValueError: Raised if no route is defined.
:rtype: callable
"""
if self.route is None:
raise ValueError("No route defined.")
return self.route
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
messages = event.raw.get("Records", None)
if messages is None:
raise ValueError("No messages present in Event.")
for raw_message in messages:
message = self._get_message(raw_message, event=event)
route = self.get_route(message=message)
# Process each message now.
route(message=message)
# SQS Lambdas don't return a value.
return None
| [
"json.loads",
"attr.s",
"attr.ib"
] | [((117, 137), 'attr.s', 'attr.s', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (123, 137), False, 'import attr\n'), ((1411, 1431), 'attr.s', 'attr.s', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (1417, 1431), False, 'import attr\n'), ((3057, 3077), 'attr.s', 'attr.s', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (3063, 3077), False, 'import attr\n'), ((4039, 4059), 'attr.s', 'attr.s', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (4045, 4059), False, 'import attr\n'), ((6326, 6346), 'attr.s', 'attr.s', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (6332, 6346), False, 'import attr\n'), ((348, 381), 'attr.ib', 'attr.ib', ([], {'init': '(False)', 'default': 'None'}), '(init=False, default=None)\n', (355, 381), False, 'import attr\n'), ((1727, 1748), 'attr.ib', 'attr.ib', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (1734, 1748), False, 'import attr\n'), ((1783, 1816), 'attr.ib', 'attr.ib', ([], {'init': '(False)', 'factory': 'dict'}), '(init=False, factory=dict)\n', (1790, 1816), False, 'import attr\n'), ((3123, 3144), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (3130, 3144), False, 'import attr\n'), ((3172, 3193), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (3179, 3193), False, 'import attr\n'), ((3209, 3218), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3216, 3218), False, 'import attr\n'), ((3238, 3247), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3245, 3247), False, 'import attr\n'), ((4371, 4392), 'attr.ib', 'attr.ib', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (4378, 4392), False, 'import attr\n'), ((4427, 4460), 'attr.ib', 'attr.ib', ([], {'init': '(False)', 'factory': 'dict'}), '(init=False, factory=dict)\n', (4434, 4460), False, 'import attr\n'), ((6563, 6596), 'attr.ib', 'attr.ib', ([], {'init': '(False)', 'default': 'None'}), '(init=False, default=None)\n', (6570, 6596), False, 'import attr\n'), ((3956, 3972), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (3966, 3972), False, 'import json\n')] |
"""autogenerated by genpy from multi_map_server/VerticalOccupancyGridList.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class VerticalOccupancyGridList(genpy.Message):
_md5sum = "7ef85cc95b82747f51eb01a16bd7c795"
_type = "multi_map_server/VerticalOccupancyGridList"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 x
float32 y
int32[] upper
int32[] lower
int32[] mass
"""
__slots__ = ['x','y','upper','lower','mass']
_slot_types = ['float32','float32','int32[]','int32[]','int32[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
x,y,upper,lower,mass
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(VerticalOccupancyGridList, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.upper is None:
self.upper = []
if self.lower is None:
self.lower = []
if self.mass is None:
self.mass = []
else:
self.x = 0.
self.y = 0.
self.upper = []
self.lower = []
self.mass = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(self.upper)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.upper))
length = len(self.lower)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.lower))
length = len(self.mass)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.mass))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.x, _x.y,) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.upper = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.lower = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.mass = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(self.upper)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.upper.tostring())
length = len(self.lower)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.lower.tostring())
length = len(self.mass)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.mass.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.x, _x.y,) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.upper = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.lower = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.mass = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2f = struct.Struct("<2f")
| [
"struct.calcsize",
"struct.pack",
"struct.unpack",
"genpy.DeserializationError",
"struct.Struct"
] | [((6005, 6025), 'struct.Struct', 'struct.Struct', (['"""<2f"""'], {}), "('<2f')\n", (6018, 6025), False, 'import struct\n'), ((3115, 3139), 'struct.calcsize', 'struct.calcsize', (['pattern'], {}), '(pattern)\n', (3130, 3139), False, 'import struct\n'), ((3159, 3197), 'struct.unpack', 'struct.unpack', (['pattern', 'str[start:end]'], {}), '(pattern, str[start:end])\n', (3172, 3197), False, 'import struct\n'), ((3343, 3367), 'struct.calcsize', 'struct.calcsize', (['pattern'], {}), '(pattern)\n', (3358, 3367), False, 'import struct\n'), ((3387, 3425), 'struct.unpack', 'struct.unpack', (['pattern', 'str[start:end]'], {}), '(pattern, str[start:end])\n', (3400, 3425), False, 'import struct\n'), ((3571, 3595), 'struct.calcsize', 'struct.calcsize', (['pattern'], {}), '(pattern)\n', (3586, 3595), False, 'import struct\n'), ((3614, 3652), 'struct.unpack', 'struct.unpack', (['pattern', 'str[start:end]'], {}), '(pattern, str[start:end])\n', (3627, 3652), False, 'import struct\n'), ((5225, 5249), 'struct.calcsize', 'struct.calcsize', (['pattern'], {}), '(pattern)\n', (5240, 5249), False, 'import struct\n'), ((5480, 5504), 'struct.calcsize', 'struct.calcsize', (['pattern'], {}), '(pattern)\n', (5495, 5504), False, 'import struct\n'), ((5735, 5759), 'struct.calcsize', 'struct.calcsize', (['pattern'], {}), '(pattern)\n', (5750, 5759), False, 'import struct\n'), ((2089, 2122), 'struct.pack', 'struct.pack', (['pattern', '*self.upper'], {}), '(pattern, *self.upper)\n', (2100, 2122), False, 'import struct\n'), ((2243, 2276), 'struct.pack', 'struct.pack', (['pattern', '*self.lower'], {}), '(pattern, *self.lower)\n', (2254, 2276), False, 'import struct\n'), ((2396, 2428), 'struct.pack', 'struct.pack', (['pattern', '*self.mass'], {}), '(pattern, *self.mass)\n', (2407, 2428), False, 'import struct\n'), ((3713, 3742), 'genpy.DeserializationError', 'genpy.DeserializationError', (['e'], {}), '(e)\n', (3739, 3742), False, 'import genpy\n'), ((5904, 5933), 'genpy.DeserializationError', 'genpy.DeserializationError', (['e'], {}), '(e)\n', (5930, 5933), False, 'import genpy\n')] |
"""
==============
GLVQ Benchmark
==============
This example shows the differences between the 4 different GLVQ implementations and LMNN.
The Image Segmentation dataset is used for training and test. Each plot shows the projection
and classification from each implementation. Because Glvq can't project the data on its own
a PCA is used.
"""
from __future__ import with_statement
import numpy as np
import matplotlib.pyplot as plt
from metric_learn import LMNN
from sklearn.decomposition import PCA
from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel
from sklearn_lvq.utils import _to_tango_colors, _tango_color
print(__doc__)
def plot(data, target, target_p, prototype, prototype_label, p):
p.scatter(data[:, 0], data[:, 1], c=_to_tango_colors(target, 0), alpha=0.5)
p.scatter(data[:, 0], data[:, 1], c=_to_tango_colors(target_p, 0),
marker='.')
p.scatter(prototype[:, 0], prototype[:, 1],
c=_tango_color('aluminium', 5), marker='D')
try:
p.scatter(prototype[:, 0], prototype[:, 1], s=60,
c=_to_tango_colors(prototype_label, 0), marker='.')
except:
p.scatter(prototype[:, 0], prototype[:, 1], s=60,
c=_tango_color(prototype_label), marker='.')
p.axis('equal')
y = []
x = []
with open('segmentation.data') as f:
for line in f:
v = line.split(',')
y.append(v[0])
x.append(v[1:])
x = np.asarray(x, dtype='float64')
y = np.asarray(y)
lmnn = LMNN(k=5, learn_rate=1e-6)
lmnn.fit(x, y)
x_t = lmnn.transform(x)
p1 = plt.subplot(231)
p1.scatter(x_t[:, 0], x_t[:, 1], c=_to_tango_colors(y, 0))
p1.axis('equal')
p1.set_title('LMNN')
# GLVQ
glvq = GlvqModel()
glvq.fit(x, y)
p2 = plt.subplot(232)
p2.set_title('GLVQ')
plot(PCA().fit_transform(x), y, glvq.predict(x), glvq.w_, glvq.c_w_, p2)
# GRLVQ
grlvq = GrlvqModel()
grlvq.fit(x, y)
p3 = plt.subplot(233)
p3.set_title('GRLVQ')
plot(grlvq.project(x, 2),
y, grlvq.predict(x), grlvq.project(grlvq.w_, 2),
grlvq.c_w_, p3)
# GMLVQ
gmlvq = GmlvqModel()
gmlvq.fit(x, y)
p4 = plt.subplot(234)
p4.set_title('GMLVQ')
plot(gmlvq.project(x, 2),
y, gmlvq.predict(x), gmlvq.project(gmlvq.w_, 2),
gmlvq.c_w_, p4)
# LGMLVQ
lgmlvq = LgmlvqModel()
lgmlvq.fit(x, y)
p5 = plt.subplot(235)
elem_set = list(set(lgmlvq.c_w_))
p5.set_title('LGMLVQ 1')
plot(lgmlvq.project(x, 1, 2, True),
y, lgmlvq.predict(x), lgmlvq.project(np.array([lgmlvq.w_[1]]), 1, 2),
elem_set.index(lgmlvq.c_w_[1]), p5)
p6 = plt.subplot(236)
p6.set_title('LGMLVQ 2')
plot(lgmlvq.project(x, 6, 2, True),
y, lgmlvq.predict(x), lgmlvq.project(np.array([lgmlvq.w_[6]]), 6, 2),
elem_set.index(lgmlvq.c_w_[6]), p6)
plt.show()
| [
"sklearn_lvq.GmlvqModel",
"sklearn_lvq.LgmlvqModel",
"sklearn.decomposition.PCA",
"numpy.asarray",
"sklearn_lvq.utils._to_tango_colors",
"numpy.array",
"metric_learn.LMNN",
"sklearn_lvq.utils._tango_color",
"sklearn_lvq.GlvqModel",
"matplotlib.pyplot.subplot",
"sklearn_lvq.GrlvqModel",
"matplo... | [((1441, 1471), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': '"""float64"""'}), "(x, dtype='float64')\n", (1451, 1471), True, 'import numpy as np\n'), ((1476, 1489), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1486, 1489), True, 'import numpy as np\n'), ((1498, 1525), 'metric_learn.LMNN', 'LMNN', ([], {'k': '(5)', 'learn_rate': '(1e-06)'}), '(k=5, learn_rate=1e-06)\n', (1502, 1525), False, 'from metric_learn import LMNN\n'), ((1570, 1586), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (1581, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1710), 'sklearn_lvq.GlvqModel', 'GlvqModel', ([], {}), '()\n', (1708, 1710), False, 'from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel\n'), ((1731, 1747), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (1742, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1871), 'sklearn_lvq.GrlvqModel', 'GrlvqModel', ([], {}), '()\n', (1869, 1871), False, 'from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel\n'), ((1893, 1909), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (1904, 1909), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2062), 'sklearn_lvq.GmlvqModel', 'GmlvqModel', ([], {}), '()\n', (2060, 2062), False, 'from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel\n'), ((2084, 2100), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (2095, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2243, 2256), 'sklearn_lvq.LgmlvqModel', 'LgmlvqModel', ([], {}), '()\n', (2254, 2256), False, 'from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel\n'), ((2279, 2295), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (2290, 2295), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2528), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (2523, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2707, 2717), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2715, 2717), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1644), 'sklearn_lvq.utils._to_tango_colors', '_to_tango_colors', (['y', '(0)'], {}), '(y, 0)\n', (1638, 1644), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((2433, 2457), 'numpy.array', 'np.array', (['[lgmlvq.w_[1]]'], {}), '([lgmlvq.w_[1]])\n', (2441, 2457), True, 'import numpy as np\n'), ((2632, 2656), 'numpy.array', 'np.array', (['[lgmlvq.w_[6]]'], {}), '([lgmlvq.w_[6]])\n', (2640, 2656), True, 'import numpy as np\n'), ((757, 784), 'sklearn_lvq.utils._to_tango_colors', '_to_tango_colors', (['target', '(0)'], {}), '(target, 0)\n', (773, 784), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((837, 866), 'sklearn_lvq.utils._to_tango_colors', '_to_tango_colors', (['target_p', '(0)'], {}), '(target_p, 0)\n', (853, 866), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((958, 986), 'sklearn_lvq.utils._tango_color', '_tango_color', (['"""aluminium"""', '(5)'], {}), "('aluminium', 5)\n", (970, 986), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((1774, 1779), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (1777, 1779), False, 'from sklearn.decomposition import PCA\n'), ((1087, 1123), 'sklearn_lvq.utils._to_tango_colors', '_to_tango_colors', (['prototype_label', '(0)'], {}), '(prototype_label, 0)\n', (1103, 1123), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n'), ((1227, 1256), 'sklearn_lvq.utils._tango_color', '_tango_color', (['prototype_label'], {}), '(prototype_label)\n', (1239, 1256), False, 'from sklearn_lvq.utils import _to_tango_colors, _tango_color\n')] |
def setopts(defaults, given):
"""Override default keyword dictionary options.
kwargs = setopts(defaults, kwargs)
A warning is shown if kwargs contains a key not found in default.
"""
# Override defaults
for key, value in given.items():
if type(given[key]) == dict:
setopts(defaults[key], given[key])
continue
if key in defaults:
defaults[key] = value
else:
warning('Ignoring invalid keyword option "%s".' % key)
return defaults
def log_test():
log("Test 1", {"logging": True})
log("Test 2", {"logging": False})
def log(msg, opts):
"""Print message to console or file."""
import os
import sys
if not 'logging' in opts:
opts = opts.copy()
opts['logging'] = False
pre = sys._getframe(1).f_code.co_name + '(): '
if isinstance(opts['logging'], bool) and opts['logging']:
if pythonshell() == 'jupyter-notebook':
# Don't show full path information.
msg = msg.replace(opts['cachedir'] + os.path.sep, '')
msg = msg.replace(opts['cachedir'], '')
print(pre + msg)
elif hasattr(opts['logging'], 'write'):
opts['logging'].write(pre + msg + "\n")
opts['logging'].flush()
else:
pass # TODO: error
def jsonparse(res, url):
"""Try/catch of json.loads() function with short error message."""
from json import loads
try:
return loads(res.read().decode('utf-8'))
except:
error('Could not parse JSON from %s' % url)
def pythonshell():
"""Determine python shell
pythonshell() returns
'shell' if started python on command line using "python"
'ipython' if started ipython on command line using "ipython"
'ipython-notebook' if running in Spyder or started with "ipython qtconsole"
'jupyter-notebook' if running in a Jupyter notebook started using executable
named jupyter-notebook
On Windows, jupyter-notebook cannot be detected and ipython-notebook
will be returned.
See also https://stackoverflow.com/a/37661854
"""
import os
env = os.environ
program = ''
if '_' in env:
program = os.path.basename(env['_'])
shell = 'shell'
try:
shell_name = get_ipython().__class__.__name__
if shell_name == 'TerminalInteractiveShell':
shell = 'ipython'
elif shell_name == 'ZMQInteractiveShell':
if 'jupyter-notebook' in program:
shell = 'jupyter-notebook'
else:
shell = 'ipython-notebook'
# Not needed, but could be used
#if 'spyder' in sys.modules:
# shell = 'spyder-notebook'
except:
pass
return shell
def warning_test():
"""For testing warning function."""
# Should show warnings in order and only HAPIWarning {1,2} should
# have a different format
from warnings import warn
warn('Normal warning 1')
warn('Normal warning 2')
warning('HAPI Warning 1')
warning('HAPI Warning 2')
warn('Normal warning 3')
warn('Normal warning 4')
def warning(*args):
"""Display a short warning message.
warning(message) raises a warning of type HAPIWarning and displays
"Warning: " + message. Use for warnings when a full stack trace is not
needed.
"""
import warnings
from os import path
from sys import stderr
from inspect import stack
message = args[0]
if len(args) > 1:
fname = args[1]
else:
fname = stack()[1][1]
#line = stack()[1][2]
fname = path.basename(fname)
# Custom warning format function
def _warning(message, category=UserWarning, filename='', lineno=-1, file=None, line=''):
if category.__name__ == "HAPIWarning":
stderr.write("\x1b[31mWarning in " + fname + "\x1b[0m: " + str(message) + "\n")
else:
# Use default showwarning function.
showwarning_default(message, category=UserWarning,
filename='', lineno=-1,
file=None, line='')
stderr.flush()
# Reset showwarning function to default
warnings.showwarning = showwarning_default
class HAPIWarning(Warning):
pass
# Copy default showwarning function
showwarning_default = warnings.showwarning
# Use custom warning function instead of default
warnings.showwarning = _warning
# Raise warning
warnings.warn(message, HAPIWarning)
class HAPIError(Exception):
pass
def error(msg, debug=False):
"""Display a short error message.
error(message) raises an error of type HAPIError and displays
"Error: " + message. Use for errors when a full stack trace is not needed.
If debug=True, full stack trace is shown.
"""
import sys
from inspect import stack
from os import path
debug = False
if pythonshell() != 'shell':
try:
from IPython.core.interactiveshell import InteractiveShell
except:
pass
sys.stdout.flush()
fname = stack()[1][1]
fname = path.basename(fname)
#line = stack()[1][2]
def exception_handler_ipython(self, exc_tuple=None,
filename=None, tb_offset=None,
exception_only=False,
running_compiled_code=False):
#import traceback
exception = sys.exc_info()
if not debug and exception[0].__name__ == "HAPIError":
sys.stderr.write("\033[0;31mHAPIError:\033[0m " + str(exception[1]))
else:
# Use default
showtraceback_default(self, exc_tuple=None,
filename=None, tb_offset=None,
exception_only=False,
running_compiled_code=False)
sys.stderr.flush()
# Reset back to default
InteractiveShell.showtraceback = showtraceback_default
def exception_handler(exception_type, exception, traceback):
if not debug and exception_type.__name__ == "HAPIError":
print("\033[0;31mHAPIError:\033[0m %s" % exception)
else:
# Use default.
sys.__excepthook__(exception_type, exception, traceback)
sys.stderr.flush()
# Reset back to default
sys.excepthook = sys.__excepthook__
if pythonshell() == 'shell':
sys.excepthook = exception_handler
else:
try:
# Copy default function
showtraceback_default = InteractiveShell.showtraceback
# TODO: Use set_custom_exc
# https://ipython.readthedocs.io/en/stable/api/generated/IPython.core.interactiveshell.html
InteractiveShell.showtraceback = exception_handler_ipython
except:
# IPython over-rides this, so this does nothing in IPython shell.
# https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook
# Don't need to copy default function as it is provided as sys.__excepthook__.
sys.excepthook = exception_handler
raise HAPIError(msg)
def head(url):
"""HTTP HEAD request on URL."""
import urllib3
http = urllib3.PoolManager()
try:
res = http.request('HEAD', url, retries=2)
if res.status != 200:
raise Exception('Head request failed on ' + url)
return res.headers
except Exception as e:
raise e
return res.headers
def urlopen(url):
"""Wrapper to request.get() in urllib3"""
import sys
from json import load
# https://stackoverflow.com/a/2020083
def get_full_class_name(obj):
module = obj.__class__.__module__
if module is None or module == str.__class__.__module__:
return obj.__class__.__name__
return module + '.' + obj.__class__.__name__
import urllib3
c = " If problem persists, a contact email for the server may be listed "
c = c + "at http://hapi-server.org/servers/"
try:
http = urllib3.PoolManager()
res = http.request('GET', url, preload_content=False, retries=2)
if res.status != 200:
try:
jres = load(res)
if 'status' in jres:
if 'message' in jres['status']:
error('\n%s\n %s\n' % (url, jres['status']['message']))
error("Problem with " + url + \
". Server responded with non-200 HTTP status (" \
+ str(res.status) + \
") and invalid HAPI JSON error message in response body." + c)
except:
error("Problem with " + url + \
". Server responded with non-200 HTTP status (" + \
str(res.status) + \
") and no HAPI JSON error message in response body." + c)
except urllib3.exceptions.NewConnectionError:
error('Connection error for : ' + url + c)
except urllib3.exceptions.ConnectTimeoutError:
error('Connection timeout for: ' + url + c)
except urllib3.exceptions.MaxRetryError:
error('Failed to connect to: ' + url + c)
except urllib3.exceptions.ReadTimeoutError:
error('Read timeout for: ' + url + c)
except urllib3.exceptions.LocationParseError:
error('Could not parse URL: ' + url)
except urllib3.exceptions.LocationValueError:
error('Invalid URL: ' + url)
except urllib3.exceptions.HTTPError as e:
error('Exception ' + get_full_class_name(e) + " for: " + url)
except Exception as e:
error(type(sys.exc_info()[1]).__name__ + ': ' \
+ str(e) + ' for URL: ' + url)
return res
def urlretrieve(url, fname, check_last_modified=False, **kwargs):
"""Download URL to file
urlretrieve(url, fname, check_last_modified=False, **kwargs)
If check_last_modified=True, `fname` is found, URL returns Last-Modfied
header, and `fname` timestamp is after Last-Modfied timestamp, the URL
is not downloaded.
"""
import shutil
from os import path, utime, makedirs
from time import mktime, strptime
if check_last_modified:
if modified(url, fname, **kwargs):
log('Downloading ' + url + ' to ' + fname, kwargs)
res = urlretrieve(url, fname, check_last_modified=False)
if "Last-Modified" in res.headers:
# Change access and modfied time to match that on server.
# TODO: Won't need if using file.head in modified().
urlLastModified = mktime(strptime(res.headers["Last-Modified"],
"%a, %d %b %Y %H:%M:%S GMT"))
utime(fname, (urlLastModified, urlLastModified))
else:
log('Local version of ' + fname + ' is up-to-date; using it.', kwargs)
dirname = path.dirname(fname)
if not path.exists(dirname):
makedirs(dirname)
with open(fname, 'wb') as out:
res = urlopen(url)
shutil.copyfileobj(res, out)
return res
def modified(url, fname, **kwargs):
"""Check if timestamp on file is later than Last-Modifed in HEAD request"""
from os import stat, path
from time import mktime, strptime
debug = False
if not path.exists(fname):
return True
# HEAD request on url
log('Making head request on ' + url, kwargs)
headers = head(url)
# TODO: Write headers to file.head
if debug:
print("Header:\n--\n")
print(headers)
print("--")
# TODO: Get this from file.head if found
fileLastModified = stat(fname).st_mtime
if "Last-Modified" in headers:
urlLastModified = mktime(strptime(headers["Last-Modified"],
"%a, %d %b %Y %H:%M:%S GMT"))
if debug:
print("File Last Modified = %s" % fileLastModified)
print("URL Last Modified = %s" % urlLastModified)
if urlLastModified > fileLastModified:
return True
return False
else:
if debug:
print("No Last-Modified header. Will re-download")
# TODO: Read file.head and compare etag
return True
def urlquote(url):
"""Python 2/3 urlquote compatability function.
If Python 3, returns
urllib.parse.quote(url)
If Python 2, returns
urllib.quote(url)
"""
import sys
if sys.version_info[0] == 2:
from urllib import quote
return quote(url)
import urllib.parse
return urllib.parse.quote(url)
| [
"os.path.exists",
"time.strptime",
"shutil.copyfileobj",
"os.makedirs",
"inspect.stack",
"sys.stderr.flush",
"sys._getframe",
"os.utime",
"urllib.quote",
"os.path.dirname",
"sys.exc_info",
"json.load",
"urllib3.PoolManager",
"os.path.basename",
"warnings.warn",
"os.stat",
"sys.stdout... | [((3033, 3057), 'warnings.warn', 'warn', (['"""Normal warning 1"""'], {}), "('Normal warning 1')\n", (3037, 3057), False, 'from warnings import warn\n'), ((3062, 3086), 'warnings.warn', 'warn', (['"""Normal warning 2"""'], {}), "('Normal warning 2')\n", (3066, 3086), False, 'from warnings import warn\n'), ((3153, 3177), 'warnings.warn', 'warn', (['"""Normal warning 3"""'], {}), "('Normal warning 3')\n", (3157, 3177), False, 'from warnings import warn\n'), ((3182, 3206), 'warnings.warn', 'warn', (['"""Normal warning 4"""'], {}), "('Normal warning 4')\n", (3186, 3206), False, 'from warnings import warn\n'), ((3687, 3707), 'os.path.basename', 'path.basename', (['fname'], {}), '(fname)\n', (3700, 3707), False, 'from os import stat, path\n'), ((4584, 4619), 'warnings.warn', 'warnings.warn', (['message', 'HAPIWarning'], {}), '(message, HAPIWarning)\n', (4597, 4619), False, 'import warnings\n'), ((5173, 5191), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5189, 5191), False, 'import sys\n'), ((5231, 5251), 'os.path.basename', 'path.basename', (['fname'], {}), '(fname)\n', (5244, 5251), False, 'from os import stat, path\n'), ((7389, 7410), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (7408, 7410), False, 'import urllib3\n'), ((11085, 11104), 'os.path.dirname', 'path.dirname', (['fname'], {}), '(fname)\n', (11097, 11104), False, 'from os import stat, path\n'), ((2258, 2284), 'os.path.basename', 'os.path.basename', (["env['_']"], {}), "(env['_'])\n", (2274, 2284), False, 'import os\n'), ((4220, 4234), 'sys.stderr.flush', 'stderr.flush', ([], {}), '()\n', (4232, 4234), False, 'from sys import stderr\n'), ((5567, 5581), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5579, 5581), False, 'import sys\n'), ((6015, 6033), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (6031, 6033), False, 'import sys\n'), ((6444, 6462), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (6460, 6462), False, 'import sys\n'), ((8214, 8235), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (8233, 8235), False, 'import urllib3\n'), ((11116, 11136), 'os.path.exists', 'path.exists', (['dirname'], {}), '(dirname)\n', (11127, 11136), False, 'from os import stat, path\n'), ((11146, 11163), 'os.makedirs', 'makedirs', (['dirname'], {}), '(dirname)\n', (11154, 11163), False, 'from os import path, utime, makedirs\n'), ((11235, 11263), 'shutil.copyfileobj', 'shutil.copyfileobj', (['res', 'out'], {}), '(res, out)\n', (11253, 11263), False, 'import shutil\n'), ((11501, 11519), 'os.path.exists', 'path.exists', (['fname'], {}), '(fname)\n', (11512, 11519), False, 'from os import stat, path\n'), ((11838, 11849), 'os.stat', 'stat', (['fname'], {}), '(fname)\n', (11842, 11849), False, 'from os import stat, path\n'), ((12710, 12720), 'urllib.quote', 'quote', (['url'], {}), '(url)\n', (12715, 12720), False, 'from urllib import quote\n'), ((5205, 5212), 'inspect.stack', 'stack', ([], {}), '()\n', (5210, 5212), False, 'from inspect import stack\n'), ((6378, 6434), 'sys.__excepthook__', 'sys.__excepthook__', (['exception_type', 'exception', 'traceback'], {}), '(exception_type, exception, traceback)\n', (6396, 6434), False, 'import sys\n'), ((11927, 11990), 'time.strptime', 'strptime', (["headers['Last-Modified']", '"""%a, %d %b %Y %H:%M:%S GMT"""'], {}), "(headers['Last-Modified'], '%a, %d %b %Y %H:%M:%S GMT')\n", (11935, 11990), False, 'from time import mktime, strptime\n'), ((823, 839), 'sys._getframe', 'sys._getframe', (['(1)'], {}), '(1)\n', (836, 839), False, 'import sys\n'), ((3633, 3640), 'inspect.stack', 'stack', ([], {}), '()\n', (3638, 3640), False, 'from inspect import stack\n'), ((8379, 8388), 'json.load', 'load', (['res'], {}), '(res)\n', (8383, 8388), False, 'from json import load\n'), ((10924, 10972), 'os.utime', 'utime', (['fname', '(urlLastModified, urlLastModified)'], {}), '(fname, (urlLastModified, urlLastModified))\n', (10929, 10972), False, 'from os import path, utime, makedirs\n'), ((10789, 10856), 'time.strptime', 'strptime', (["res.headers['Last-Modified']", '"""%a, %d %b %Y %H:%M:%S GMT"""'], {}), "(res.headers['Last-Modified'], '%a, %d %b %Y %H:%M:%S GMT')\n", (10797, 10856), False, 'from time import mktime, strptime\n'), ((9813, 9827), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9825, 9827), False, 'import sys\n')] |
from __future__ import unicode_literals, print_function, division
import feedparser
import dataset
from twisted.internet.reactor import callLater
from threading import Thread
import twisted.internet.error
import logging
logger = logging.getLogger('module_rss')
DATABASE = None
updater = None
botref = None
config = {}
def init(bot, testing=False):
''' Initialize updater '''
global DATABASE
global config
global botref
global updater
global logger
if testing:
DATABASE = dataset.connect('sqlite:///:memory:')
else:
DATABASE = dataset.connect('sqlite:///databases/rss.db')
logger.info('RSS module initialized')
botref = bot
config = bot.config.get('rss', {})
finalize()
# As there's no signal if this is a rehash or restart
# update feeds in 30 seconds
updater = callLater(30, update_feeds)
def finalize():
''' Finalize updater (rehash etc) so we don't leave an updater running '''
global updater
global logger
logger.info('RSS module finalized')
if updater:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = None
def get_feeds(**kwargs):
''' Get feeds from database '''
return [
Feed(f['network'], f['channel'], f['id'])
for f in list(DATABASE['feeds'].find(**kwargs))
]
def find_feed(network, channel, **kwargs):
''' Find specific feed from database '''
f = DATABASE['feeds'].find_one(network=network, channel=channel, **kwargs)
if not f:
return
return Feed(f['network'], f['channel'], f['id'])
def add_feed(network, channel, url):
''' Add feed to database '''
f = Feed(network=network, channel=channel, url=url)
return (f.initialized, f.read())
def remove_feed(network, channel, id):
''' Remove feed from database '''
f = find_feed(network=network, channel=channel, id=int(id))
if not f:
return
DATABASE['feeds'].delete(id=f.id)
DATABASE['items_%i' % (f.id)].drop()
return f
def update_feeds(cancel=True, **kwargs):
# from time import sleep
''' Update all feeds in the DB '''
global config
global updater
global logger
logger.info('Updating RSS feeds started')
for f in get_feeds(**kwargs):
Thread(target=f.update).start()
# If we get a cancel, cancel the existing updater
# and start a new one
# NOTE: Not sure if needed, as atm cancel isn't used in any command...
if cancel:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = callLater(5 * 60, update_feeds)
def command_rss(bot, user, channel, args):
commands = ['list', 'add', 'remove', 'latest', 'update']
args = args.split()
if not args or args[0] not in commands:
return bot.say(channel, 'rss: valid arguments are [%s]' % (', '.join(commands)))
command = args[0]
network = bot.network.alias
# Get latest feed item from database
# Not needed? mainly for debugging
# Possibly useful for checking if feed still exists?
if command == 'latest':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss latest <id from list>"')
feed = find_feed(network=network, channel=channel, id=int(args[1]))
if not feed:
return bot.say(channel, 'feed not found, no action taken')
item = feed.get_latest()
if not item:
return bot.say(channel, 'no items in feed')
return bot.say(channel, feed.get_item_str(item))
# List all feeds for current network && channel
if command == 'list':
feeds = get_feeds(network=network, channel=channel)
if not feeds:
return bot.say(channel, 'no feeds set up')
for f in feeds:
bot.say(channel, '%02i: %s <%s>' % (f.id, f.name, f.url))
return
# Rest of the commands are only for admins
if not bot.factory.isAdmin(user):
return bot.say(channel, 'only "latest" and "list" available for non-admins')
# Add new feed for channel
if command == 'add':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss add url"')
init, items = add_feed(network, channel, url=args[1])
if not init:
return bot.say(channel, 'feed already added')
return bot.say(channel, 'feed added with %i items' % len(items))
# remove feed from channel
if command == 'remove':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss remove <id from list>"')
feed = remove_feed(network, channel, id=args[1])
if not feed:
return bot.say(channel, 'feed not found, no action taken')
return bot.say(channel, 'feed "%s" <%s> removed' % (feed.name, feed.url))
# If there's no args, update all feeds (even for other networks)
# If arg exists, try to update the feed...
if command == 'update':
if len(args) < 2:
bot.say(channel, 'feeds updating')
update_feeds()
return
feed = find_feed(network, channel, id=int(args[1]))
if not feed:
return bot.say(channel, 'feed not found, no action taken')
feed.update()
return
class Feed(object):
''' Feed object to simplify feed handling '''
def __init__(self, network, channel, id=None, url=None):
# Not sure if (this complex) init is needed...
self.id = id
self.network = network
self.channel = channel
self.url = url
if url:
self.url = url
self.initialized = False
# load feed details from database
self._get_feed_from_db()
def __repr__(self):
return '(%s, %s, %s)' % (self.url, self.channel, self.network)
def __unicode__(self):
return '%i - %s' % (self.id, self.url)
def __init_feed(self):
''' Initialize databases for feed '''
DATABASE['feeds'].insert({
'network': self.network,
'channel': self.channel,
'url': self.url,
'name': '',
})
# Update feed to match the created
feed = self._get_feed_from_db()
# Initialize item-database for feed
self.__save_item({
'title': 'PLACEHOLDER',
'link': 'https://github.com/lepinkainen/pyfibot/',
'printed': True,
})
self.initialized = True
return feed
def __get_items_tbl(self):
''' Get table for feeds items '''
return DATABASE[('items_%i' % (self.id))]
def __parse_feed(self):
''' Parse items from feed '''
f = feedparser.parse(self.url)
if self.initialized:
self.update_feed_info({'name': f['channel']['title']})
items = [{
'title': i['title'],
'link': i['link'],
} for i in f['items']]
return (f, items)
def __save_item(self, item, table=None):
''' Save item to feeds database '''
if table is None:
table = self.__get_items_tbl()
# If override is set or the item cannot be found, it's a new one
if not table.find_one(title=item['title'], link=item['link']):
# If printed isn't set, set it to the value in self.initialized (True, if initializing, else False)
# This is to prevent flooding when adding a new feed...
if 'printed' not in item:
item['printed'] = self.initialized
table.insert(item)
def __mark_printed(self, item, table=None):
''' Mark item as printed '''
if table is None:
table = self.__get_items_tbl()
table.update({'id': item['id'], 'printed': True}, ['id'])
def _get_feed_from_db(self):
''' Get self from database '''
feed = None
if self.url and not self.id:
feed = DATABASE['feeds'].find_one(network=self.network, channel=self.channel, url=self.url)
if self.id:
feed = DATABASE['feeds'].find_one(network=self.network, channel=self.channel, id=self.id)
if not feed:
feed = self.__init_feed()
self.id = feed['id']
self.network = feed['network']
self.channel = feed['channel']
self.url = feed['url']
# TODO: Name could just be the domain part of url?
self.name = feed['name']
return feed
def get_item_str(self, item):
return '[%s] %s <%s>' % (''.join([c for c in self.name][0:18]), item['title'], item['link'])
def get_latest(self):
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(order_by='id'))]
if not items:
return
return items[-1]
def update_feed_info(self, data):
''' Update feed information '''
data['id'] = self.id
if 'url' in data:
self.url = data['url']
DATABASE['feeds'].update(data, ['id'])
# Update self to match new...
self._get_feed_from_db()
def read(self):
''' Read new items from feed '''
f, items = self.__parse_feed()
# Get table -reference to speed up stuff...
tbl = self.__get_items_tbl()
# Save items in DB, saving takes care of duplicate checks
for i in reversed(items):
self.__save_item(i, tbl)
# Set initialized to False, as we have read everything...
self.initialized = False
return items
def get_new_items(self, mark_printed=False):
''' Get all items which are not marked as printed, if mark_printed is set, update printed also. '''
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(printed=False))]
if mark_printed:
for i in items:
self.__mark_printed(i, tbl)
return items
def update(self):
global logger
global botref
# If botref isn't defined, bot isn't running, no need to run
# (used for tests?)
if not botref:
return
# Read all items for feed
logger.debug('Feed "%s" updating' % (self.name))
self.read()
# Get number of unprinted items (and don't mark as printed)
items = self.get_new_items(False)
if len(items) == 0:
logger.debug('Feed "%s" containes no new items, doing nothing.' % (self.name))
return
logger.debug('Feed "%s" updated with %i new items' % (self.name, len(items)))
# If bot instance isn't found, don't print anything
bot_instance = botref.find_bot_for_network(self.network)
if not bot_instance:
logger.error('Bot instance for "%s" not found, not printing' % (self.name))
return
logger.debug('Printing new items for "%s"' % (self.name))
# Get all new (not printed) items and print them
items = self.get_new_items(True)
for i in items:
bot_instance.say(self.channel, self.get_item_str(i))
if __name__ == '__main__':
f = Feed('ircnet', '#pyfibot', 'http://feeds.feedburner.com/ampparit-kaikki?format=xml')
f.read()
for i in f.get_new_items(True):
print(i)
| [
"logging.getLogger",
"feedparser.parse",
"twisted.internet.reactor.callLater",
"threading.Thread",
"dataset.connect"
] | [((231, 262), 'logging.getLogger', 'logging.getLogger', (['"""module_rss"""'], {}), "('module_rss')\n", (248, 262), False, 'import logging\n'), ((844, 871), 'twisted.internet.reactor.callLater', 'callLater', (['(30)', 'update_feeds'], {}), '(30, update_feeds)\n', (853, 871), False, 'from twisted.internet.reactor import callLater\n'), ((512, 549), 'dataset.connect', 'dataset.connect', (['"""sqlite:///:memory:"""'], {}), "('sqlite:///:memory:')\n", (527, 549), False, 'import dataset\n'), ((579, 624), 'dataset.connect', 'dataset.connect', (['"""sqlite:///databases/rss.db"""'], {}), "('sqlite:///databases/rss.db')\n", (594, 624), False, 'import dataset\n'), ((2648, 2679), 'twisted.internet.reactor.callLater', 'callLater', (['(5 * 60)', 'update_feeds'], {}), '(5 * 60, update_feeds)\n', (2657, 2679), False, 'from twisted.internet.reactor import callLater\n'), ((6716, 6742), 'feedparser.parse', 'feedparser.parse', (['self.url'], {}), '(self.url)\n', (6732, 6742), False, 'import feedparser\n'), ((2315, 2338), 'threading.Thread', 'Thread', ([], {'target': 'f.update'}), '(target=f.update)\n', (2321, 2338), False, 'from threading import Thread\n')] |
__author__ = "<NAME> - 16600748"
from capstone import *
import pefile, os
# samplePaths = ["testSamples/" + sample for sample in os.listdir("testSamples")]
samplePaths = ["../bin-utf8-vec/benignSamples/" + sample for sample in os.listdir("../bin-utf8-vec/benignSamples")] + \
["../bin-utf8-vec/malwareSamples/" + sample for sample in os.listdir("../bin-utf8-vec/malwareSamples")] + \
["../bin-utf8-vec/ransomwareSamples/" + sample for sample in os.listdir("../bin-utf8-vec/ransomwareSamples")]
opcodeSet = set()
opCodeDicts = []
opCodeFreqs = {}
nSamples = len(samplePaths)
count = 1
for sample in samplePaths:
try:
pe = pefile.PE(sample, fast_load=True)
entryPoint = pe.OPTIONAL_HEADER.AddressOfEntryPoint
data = pe.get_memory_mapped_image()[entryPoint:]
cs = Cs(CS_ARCH_X86, CS_MODE_32)
opcodes = []
for i in cs.disasm(data, 0x1000):
opcodes.append(i.mnemonic)
opcodeDict = {}
total = len(opcodes)
opcodeSet = set(list(opcodeSet) + opcodes)
for opcode in opcodeSet:
freq = 1
for op in opcodes:
if opcode == op:
freq += 1
try:
opCodeFreqs[opcode] += freq
except:
opCodeFreqs[opcode] = freq
opcodeDict[opcode] = round((freq / total) * 100, 2)
opCodeDicts.append(opcodeDict)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
except Exception as e:
print(e)
# for opcode in opcodeSet:
# print(opcode, str(opcodeDict[opcode]) + "%")
# for opcodeDict in opCodeDicts:
# freqSorted = sorted(opcodeDict, key=opcodeDict.get)[-1:0:-1]
# print(opcodeDict[freqSorted[0]], opcodeDict[freqSorted[1]], opcodeDict[freqSorted[2]], freqSorted)
opCodeFreqsSorted = sorted(opCodeFreqs, key=opCodeFreqs.get)[-1:0:-1]
with open("top50opcodes.csv", "w") as f:
f.write("opcode, frequency\n")
for opcode in opCodeFreqsSorted[:50]:
f.write(str(opcode) + ", " + str(opCodeFreqs[opcode]) + "\n")
print(opcode, opCodeFreqs[opcode])
| [
"os.system",
"os.listdir",
"pefile.PE"
] | [((639, 672), 'pefile.PE', 'pefile.PE', (['sample'], {'fast_load': '(True)'}), '(sample, fast_load=True)\n', (648, 672), False, 'import pefile, os\n'), ((1426, 1444), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1435, 1444), False, 'import pefile, os\n'), ((447, 494), 'os.listdir', 'os.listdir', (['"""../bin-utf8-vec/ransomwareSamples"""'], {}), "('../bin-utf8-vec/ransomwareSamples')\n", (457, 494), False, 'import pefile, os\n'), ((229, 272), 'os.listdir', 'os.listdir', (['"""../bin-utf8-vec/benignSamples"""'], {}), "('../bin-utf8-vec/benignSamples')\n", (239, 272), False, 'import pefile, os\n'), ((336, 380), 'os.listdir', 'os.listdir', (['"""../bin-utf8-vec/malwareSamples"""'], {}), "('../bin-utf8-vec/malwareSamples')\n", (346, 380), False, 'import pefile, os\n')] |
import unittest
import os
import sys
import StringIO
path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lvsm')))
from lvsm.modules import keepalived
class Keepalived(unittest.TestCase):
"""Tests for the functionality of the keepalived module"""
def setUp(self):
args = {'keepalived-mib': 'KEEPALIVED-MIB',
'snmp_community': 'private',
'snmp_host': 'localhost',
'snmp_user': '',
'snmp_password': '',
'cache_dir': path + '/cache'
}
self.director = keepalived.Keepalived(path + '/scripts/ipvsadm3',
path + '/etc/keepalived.conf',
restart_cmd='',
nodes='',
args=args)
def test_show(self):
self.maxDiff = None
# Testing show on non-standard ports
expected_result = ['',
'Layer 4 Load balancing',
'======================',
'TCP 192.0.2.2:8888 rr ',
' -> 192.0.2.200:8888 Masq 1 0 0 ',
' -> 192.0.2.201:8888 Masq 1 0 0 ',
'',
'UDP 192.0.2.2:domain rr ',
' -> 192.0.2.202:domain Masq 1 0 0 ',
' -> 192.0.2.203:domain Masq 1 0 0 ',
'',
'']
self.assertEqual(self.director.show(numeric=False, color=False), expected_result)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"os.path.dirname",
"lvsm.modules.keepalived.Keepalived"
] | [((77, 102), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (92, 102), False, 'import os\n'), ((2035, 2050), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2048, 2050), False, 'import unittest\n'), ((649, 770), 'lvsm.modules.keepalived.Keepalived', 'keepalived.Keepalived', (["(path + '/scripts/ipvsadm3')", "(path + '/etc/keepalived.conf')"], {'restart_cmd': '""""""', 'nodes': '""""""', 'args': 'args'}), "(path + '/scripts/ipvsadm3', path +\n '/etc/keepalived.conf', restart_cmd='', nodes='', args=args)\n", (670, 770), False, 'from lvsm.modules import keepalived\n'), ((152, 177), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (167, 177), False, 'import os\n')] |
"""
Copyright 2014-2018 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: auditResults/urlsCSV.py
Author: <NAME>
"""
# reports/urls.py
from __future__ import absolute_import
from django.conf.urls import url
from .viewsCSV import GroupResultsViewCSV
from .viewsCSV import GroupResultsAuditGroupViewCSV
from .viewsCSV import GroupRuleGroupResultsViewCSV
urlpatterns = [
url(r'^all/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/$',
GroupResultsViewCSV,
name='group_results_csv'),
url(r'^all/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/g/(?P<audit_group_slug>[\w-]+)/$',
GroupResultsAuditGroupViewCSV,
name='group_results_audit_group_csv'),
# Rule grouping result views
url(r'^some/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/rg/(?P<rule_group_slug>[\w-]+)/$',
GroupRuleGroupResultsViewCSV,
name='group_rule_group_results_csv')
]
| [
"django.conf.urls.url"
] | [((886, 1002), 'django.conf.urls.url', 'url', (['"""^all/(?P<result_slug>[\\\\w-]+)/(?P<rule_grouping>[\\\\w-]+)/$"""', 'GroupResultsViewCSV'], {'name': '"""group_results_csv"""'}), "('^all/(?P<result_slug>[\\\\w-]+)/(?P<rule_grouping>[\\\\w-]+)/$',\n GroupResultsViewCSV, name='group_results_csv')\n", (889, 1002), False, 'from django.conf.urls import url\n'), ((1016, 1187), 'django.conf.urls.url', 'url', (['"""^all/(?P<result_slug>[\\\\w-]+)/(?P<rule_grouping>[\\\\w-]+)/g/(?P<audit_group_slug>[\\\\w-]+)/$"""', 'GroupResultsAuditGroupViewCSV'], {'name': '"""group_results_audit_group_csv"""'}), "('^all/(?P<result_slug>[\\\\w-]+)/(?P<rule_grouping>[\\\\w-]+)/g/(?P<audit_group_slug>[\\\\w-]+)/$'\n , GroupResultsAuditGroupViewCSV, name='group_results_audit_group_csv')\n", (1019, 1187), False, 'from django.conf.urls import url\n'), ((1229, 1399), 'django.conf.urls.url', 'url', (['"""^some/(?P<result_slug>[\\\\w-]+)/(?P<rule_grouping>[\\\\w-]+)/rg/(?P<rule_group_slug>[\\\\w-]+)/$"""', 'GroupRuleGroupResultsViewCSV'], {'name': '"""group_rule_group_results_csv"""'}), "('^some/(?P<result_slug>[\\\\w-]+)/(?P<rule_grouping>[\\\\w-]+)/rg/(?P<rule_group_slug>[\\\\w-]+)/$'\n , GroupRuleGroupResultsViewCSV, name='group_rule_group_results_csv')\n", (1232, 1399), False, 'from django.conf.urls import url\n')] |
"""Test."""
import os
import unittest
import pytest
from utils import www
TEST_JSON_URL = os.path.join(
'https://raw.githubusercontent.com',
'nuuuwan/misc-sl-data/master',
'sl_power_station_info.json',
)
TEST_TSV_URL = os.path.join(
'https://raw.githubusercontent.com',
'nuuuwan/gig-data/master',
'province.tsv',
)
TEST_INVALID_URL = 'http://www.29df.c'
TEST_IMAGE_LINK = 'https://www.python.org/static/img/python-logo@2x.png'
class testWWW(unittest.TestCase):
"""Test."""
@pytest.mark.slow
def test_read(self):
"""Test."""
data = www.read(TEST_JSON_URL)
self.assertIn('Station', data)
data_selenium = www.read(TEST_JSON_URL, use_selenium=True)
self.assertIn(data, data_selenium)
def test_read_json(self):
"""Test."""
data = www.read_json(TEST_JSON_URL)
self.assertIn('Station', data[0])
def test_read_tsv(self):
"""Test."""
data = www.read_tsv(TEST_TSV_URL)
self.assertEqual(len(data), 9)
self.assertEqual(data[0]['province_id'], 'LK-1')
def test_invalid_url(self):
"""Test."""
data = www.read_json(TEST_INVALID_URL)
self.assertEqual(data, None)
def test_download_binary(self):
"""Test."""
file_name = '/tmp/utils.test_www.file.png'
www.download_binary(
TEST_IMAGE_LINK,
file_name,
)
@pytest.mark.slow
def test_exists(self):
"""Test."""
self.assertTrue(www.exists('https://www.python.org/'))
self.assertFalse(www.exists('https://www.python123.org/'))
def test_get_all_urls(self):
"""Test."""
self.assertGreater(
len(www.get_all_urls('https://www.python.org/')),
50,
)
| [
"utils.www.exists",
"utils.www.read",
"utils.www.download_binary",
"os.path.join",
"utils.www.get_all_urls",
"utils.www.read_tsv",
"utils.www.read_json"
] | [((93, 207), 'os.path.join', 'os.path.join', (['"""https://raw.githubusercontent.com"""', '"""nuuuwan/misc-sl-data/master"""', '"""sl_power_station_info.json"""'], {}), "('https://raw.githubusercontent.com',\n 'nuuuwan/misc-sl-data/master', 'sl_power_station_info.json')\n", (105, 207), False, 'import os\n'), ((235, 331), 'os.path.join', 'os.path.join', (['"""https://raw.githubusercontent.com"""', '"""nuuuwan/gig-data/master"""', '"""province.tsv"""'], {}), "('https://raw.githubusercontent.com', 'nuuuwan/gig-data/master',\n 'province.tsv')\n", (247, 331), False, 'import os\n'), ((592, 615), 'utils.www.read', 'www.read', (['TEST_JSON_URL'], {}), '(TEST_JSON_URL)\n', (600, 615), False, 'from utils import www\n'), ((679, 721), 'utils.www.read', 'www.read', (['TEST_JSON_URL'], {'use_selenium': '(True)'}), '(TEST_JSON_URL, use_selenium=True)\n', (687, 721), False, 'from utils import www\n'), ((831, 859), 'utils.www.read_json', 'www.read_json', (['TEST_JSON_URL'], {}), '(TEST_JSON_URL)\n', (844, 859), False, 'from utils import www\n'), ((967, 993), 'utils.www.read_tsv', 'www.read_tsv', (['TEST_TSV_URL'], {}), '(TEST_TSV_URL)\n', (979, 993), False, 'from utils import www\n'), ((1158, 1189), 'utils.www.read_json', 'www.read_json', (['TEST_INVALID_URL'], {}), '(TEST_INVALID_URL)\n', (1171, 1189), False, 'from utils import www\n'), ((1343, 1390), 'utils.www.download_binary', 'www.download_binary', (['TEST_IMAGE_LINK', 'file_name'], {}), '(TEST_IMAGE_LINK, file_name)\n', (1362, 1390), False, 'from utils import www\n'), ((1520, 1557), 'utils.www.exists', 'www.exists', (['"""https://www.python.org/"""'], {}), "('https://www.python.org/')\n", (1530, 1557), False, 'from utils import www\n'), ((1584, 1624), 'utils.www.exists', 'www.exists', (['"""https://www.python123.org/"""'], {}), "('https://www.python123.org/')\n", (1594, 1624), False, 'from utils import www\n'), ((1724, 1767), 'utils.www.get_all_urls', 'www.get_all_urls', (['"""https://www.python.org/"""'], {}), "('https://www.python.org/')\n", (1740, 1767), False, 'from utils import www\n')] |
import pytest
import re
import unittest
import metric_learn
import numpy as np
from sklearn import clone
from test.test_utils import ids_metric_learners, metric_learners, remove_y
from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22
def remove_spaces(s):
return re.sub(r'\s+', '', s)
def sk_repr_kwargs(def_kwargs, nndef_kwargs):
"""Given the non-default arguments, and the default
keywords arguments, build the string that will appear
in the __repr__ of the estimator, depending on the
version of scikit-learn.
"""
if SKLEARN_AT_LEAST_0_22:
def_kwargs = {}
def_kwargs.update(nndef_kwargs)
args_str = ",".join(f"{key}={repr(value)}"
for key, value in def_kwargs.items())
return args_str
class TestStringRepr(unittest.TestCase):
def test_covariance(self):
def_kwargs = {'preprocessor': None}
nndef_kwargs = {}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.Covariance())),
remove_spaces(f"Covariance({merged_kwargs})"))
def test_lmnn(self):
def_kwargs = {'convergence_tol': 0.001, 'init': 'auto', 'k': 3,
'learn_rate': 1e-07, 'max_iter': 1000, 'min_iter': 50,
'n_components': None, 'preprocessor': None,
'random_state': None, 'regularization': 0.5,
'verbose': False}
nndef_kwargs = {'convergence_tol': 0.01, 'k': 6}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LMNN(convergence_tol=0.01, k=6))),
remove_spaces(f"LMNN({merged_kwargs})"))
def test_nca(self):
def_kwargs = {'init': 'auto', 'max_iter': 100, 'n_components': None,
'preprocessor': None, 'random_state': None, 'tol': None,
'verbose': False}
nndef_kwargs = {'max_iter': 42}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.NCA(max_iter=42))),
remove_spaces(f"NCA({merged_kwargs})"))
def test_lfda(self):
def_kwargs = {'embedding_type': 'weighted', 'k': None,
'n_components': None, 'preprocessor': None}
nndef_kwargs = {'k': 2}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LFDA(k=2))),
remove_spaces(f"LFDA({merged_kwargs})"))
def test_itml(self):
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'preprocessor': None,
'prior': 'identity', 'random_state': None, 'verbose': False}
nndef_kwargs = {'gamma': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.ITML(gamma=0.5))),
remove_spaces(f"ITML({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'verbose': False}
nndef_kwargs = {'num_constraints': 7}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.ITML_Supervised(num_constraints=7))),
remove_spaces(f"ITML_Supervised({merged_kwargs})"))
def test_lsml(self):
def_kwargs = {'max_iter': 1000, 'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False}
nndef_kwargs = {'tol': 0.1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LSML(tol=0.1))),
remove_spaces(f"LSML({merged_kwargs})"))
def_kwargs = {'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False,
'weights': None}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LSML_Supervised(verbose=True))),
remove_spaces(f"LSML_Supervised({merged_kwargs})"))
def test_sdml(self):
def_kwargs = {'balance_param': 0.5, 'preprocessor': None,
'prior': 'identity', 'random_state': None,
'sparsity_param': 0.01, 'verbose': False}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.SDML(verbose=True))),
remove_spaces(f"SDML({merged_kwargs})"))
def_kwargs = {'balance_param': 0.5, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'sparsity_param': 0.01,
'verbose': False}
nndef_kwargs = {'sparsity_param': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.SDML_Supervised(sparsity_param=0.5))),
remove_spaces(f"SDML_Supervised({merged_kwargs})"))
def test_rca(self):
def_kwargs = {'n_components': None, 'preprocessor': None}
nndef_kwargs = {'n_components': 3}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.RCA(n_components=3))),
remove_spaces(f"RCA({merged_kwargs})"))
def_kwargs = {'chunk_size': 2, 'n_components': None, 'num_chunks': 100,
'preprocessor': None, 'random_state': None}
nndef_kwargs = {'num_chunks': 5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.RCA_Supervised(num_chunks=5))),
remove_spaces(f"RCA_Supervised({merged_kwargs})"))
def test_mlkr(self):
def_kwargs = {'init': 'auto', 'max_iter': 1000,
'n_components': None, 'preprocessor': None,
'random_state': None, 'tol': None, 'verbose': False}
nndef_kwargs = {'max_iter': 777}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MLKR(max_iter=777))),
remove_spaces(f"MLKR({merged_kwargs})"))
def test_mmc(self):
def_kwargs = {'convergence_threshold': 0.001, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'preprocessor': None,
'random_state': None, 'verbose': False}
nndef_kwargs = {'diagonal': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MMC(diagonal=True))),
remove_spaces(f"MMC({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 1e-06, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'num_constraints': None,
'preprocessor': None, 'random_state': None,
'verbose': False}
nndef_kwargs = {'max_iter': 1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.MMC_Supervised(max_iter=1))),
remove_spaces(f"MMC_Supervised({merged_kwargs})"))
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_is_independent_from_metric_learner(estimator,
build_dataset):
"""Tests that the get_metric method returns a function that is independent
from the original metric learner"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
# we fit the metric learner on it and then we compute the metric on some
# points
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
score = metric(X[0], X[1])
# then we refit the estimator on another dataset
model.fit(*remove_y(model, np.sin(input_data), labels))
# we recompute the distance between the two points: it should be the same
score_bis = metric(X[0], X[1])
assert score_bis == score
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_raises_error(estimator, build_dataset):
"""Tests that the metric returned by get_metric raises errors similar to
the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_raises = [(X[0].tolist() + [5.2], X[1]), # vectors with
# different dimensions
(X[0:4], X[1:5]), # 2D vectors
(X[0].tolist() + [5.2], X[1] + [7.2])]
# vectors of same dimension but incompatible with what the metric learner
# was trained on
for u, v in list_test_get_metric_raises:
with pytest.raises(ValueError):
metric(u, v)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_works_does_not_raise(estimator, build_dataset):
"""Tests that the metric returned by get_metric does not raise errors (or
warnings) similarly to the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_doesnt_raise = [(X[0], X[1]),
(X[0].tolist(), X[1].tolist()),
(X[0][None], X[1][None])]
for u, v in list_test_get_metric_doesnt_raise:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
# Test that the scalar case works
model.components_ = np.array([3.1])
metric = model.get_metric()
for u, v in [(5, 6.7), ([5], [6.7]), ([[5]], [[6.7]])]:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_n_components(estimator, build_dataset):
"""Check that estimators that have a n_components parameters can use it
and that it actually works as expected"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
if hasattr(model, 'n_components'):
set_random_state(model)
model.set_params(n_components=None)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1], X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] - 1)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1] - 1, X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] + 1)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=0)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
if __name__ == '__main__':
unittest.main()
| [
"metric_learn.SDML",
"metric_learn.sklearn_shims.set_random_state",
"numpy.array",
"sklearn.clone",
"numpy.sin",
"unittest.main",
"metric_learn.MMC",
"metric_learn.MMC_Supervised",
"metric_learn.MLKR",
"metric_learn.NCA",
"test.test_utils.remove_y",
"metric_learn.SDML_Supervised",
"metric_le... | [((7493, 7591), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""estimator, build_dataset"""', 'metric_learners'], {'ids': 'ids_metric_learners'}), "('estimator, build_dataset', metric_learners, ids=\n ids_metric_learners)\n", (7516, 7591), False, 'import pytest\n'), ((8409, 8507), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""estimator, build_dataset"""', 'metric_learners'], {'ids': 'ids_metric_learners'}), "('estimator, build_dataset', metric_learners, ids=\n ids_metric_learners)\n", (8432, 8507), False, 'import pytest\n'), ((9366, 9464), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""estimator, build_dataset"""', 'metric_learners'], {'ids': 'ids_metric_learners'}), "('estimator, build_dataset', metric_learners, ids=\n ids_metric_learners)\n", (9389, 9464), False, 'import pytest\n'), ((10462, 10560), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""estimator, build_dataset"""', 'metric_learners'], {'ids': 'ids_metric_learners'}), "('estimator, build_dataset', metric_learners, ids=\n ids_metric_learners)\n", (10485, 10560), False, 'import pytest\n'), ((292, 313), 're.sub', 're.sub', (['"""\\\\s+"""', '""""""', 's'], {}), "('\\\\s+', '', s)\n", (298, 313), False, 'import re\n'), ((7919, 7935), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (7924, 7935), False, 'from sklearn import clone\n'), ((7938, 7961), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (7954, 7961), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((8772, 8788), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (8777, 8788), False, 'from sklearn import clone\n'), ((8791, 8814), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (8807, 8814), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((9761, 9777), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (9766, 9777), False, 'from sklearn import clone\n'), ((9780, 9803), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (9796, 9803), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((10269, 10284), 'numpy.array', 'np.array', (['[3.1]'], {}), '([3.1])\n', (10277, 10284), True, 'import numpy as np\n'), ((10803, 10819), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (10808, 10819), False, 'from sklearn import clone\n'), ((11942, 11957), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11955, 11957), False, 'import unittest\n'), ((10862, 10885), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (10878, 10885), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((11054, 11070), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (11059, 11070), False, 'from sklearn import clone\n'), ((11075, 11098), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (11091, 11098), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((11281, 11297), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (11286, 11297), False, 'from sklearn import clone\n'), ((11302, 11325), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (11318, 11325), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((11609, 11625), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (11614, 11625), False, 'from sklearn import clone\n'), ((11630, 11653), 'metric_learn.sklearn_shims.set_random_state', 'set_random_state', (['model'], {}), '(model)\n', (11646, 11653), False, 'from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22\n'), ((8062, 8097), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (8070, 8097), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((8828, 8863), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (8836, 8863), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((9317, 9342), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9330, 9342), False, 'import pytest\n'), ((9817, 9852), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (9825, 9852), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((10133, 10151), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (10145, 10151), False, 'import pytest\n'), ((10382, 10400), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (10394, 10400), False, 'import pytest\n'), ((11385, 11410), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11398, 11410), False, 'import pytest\n'), ((11700, 11725), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11713, 11725), False, 'import pytest\n'), ((8239, 8257), 'numpy.sin', 'np.sin', (['input_data'], {}), '(input_data)\n', (8245, 8257), True, 'import numpy as np\n'), ((10941, 10976), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (10949, 10976), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((11164, 11199), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (11172, 11199), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((1022, 1047), 'metric_learn.Covariance', 'metric_learn.Covariance', ([], {}), '()\n', (1045, 1047), False, 'import metric_learn\n'), ((1607, 1651), 'metric_learn.LMNN', 'metric_learn.LMNN', ([], {'convergence_tol': '(0.01)', 'k': '(6)'}), '(convergence_tol=0.01, k=6)\n', (1624, 1651), False, 'import metric_learn\n'), ((2047, 2076), 'metric_learn.NCA', 'metric_learn.NCA', ([], {'max_iter': '(42)'}), '(max_iter=42)\n', (2063, 2076), False, 'import metric_learn\n'), ((2414, 2436), 'metric_learn.LFDA', 'metric_learn.LFDA', ([], {'k': '(2)'}), '(k=2)\n', (2431, 2436), False, 'import metric_learn\n'), ((2861, 2889), 'metric_learn.ITML', 'metric_learn.ITML', ([], {'gamma': '(0.5)'}), '(gamma=0.5)\n', (2878, 2889), False, 'import metric_learn\n'), ((3350, 3397), 'metric_learn.ITML_Supervised', 'metric_learn.ITML_Supervised', ([], {'num_constraints': '(7)'}), '(num_constraints=7)\n', (3378, 3397), False, 'import metric_learn\n'), ((3768, 3794), 'metric_learn.LSML', 'metric_learn.LSML', ([], {'tol': '(0.1)'}), '(tol=0.1)\n', (3785, 3794), False, 'import metric_learn\n'), ((4235, 4277), 'metric_learn.LSML_Supervised', 'metric_learn.LSML_Supervised', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4263, 4277), False, 'import metric_learn\n'), ((4685, 4716), 'metric_learn.SDML', 'metric_learn.SDML', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4702, 4716), False, 'import metric_learn\n'), ((5160, 5208), 'metric_learn.SDML_Supervised', 'metric_learn.SDML_Supervised', ([], {'sparsity_param': '(0.5)'}), '(sparsity_param=0.5)\n', (5188, 5208), False, 'import metric_learn\n'), ((5496, 5528), 'metric_learn.RCA', 'metric_learn.RCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (5512, 5528), False, 'import metric_learn\n'), ((5877, 5918), 'metric_learn.RCA_Supervised', 'metric_learn.RCA_Supervised', ([], {'num_chunks': '(5)'}), '(num_chunks=5)\n', (5904, 5918), False, 'import metric_learn\n'), ((6327, 6358), 'metric_learn.MLKR', 'metric_learn.MLKR', ([], {'max_iter': '(777)'}), '(max_iter=777)\n', (6344, 6358), False, 'import metric_learn\n'), ((6845, 6876), 'metric_learn.MMC', 'metric_learn.MMC', ([], {'diagonal': '(True)'}), '(diagonal=True)\n', (6861, 6876), False, 'import metric_learn\n'), ((7388, 7427), 'metric_learn.MMC_Supervised', 'metric_learn.MMC_Supervised', ([], {'max_iter': '(1)'}), '(max_iter=1)\n', (7415, 7427), False, 'import metric_learn\n'), ((11445, 11480), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (11453, 11480), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n'), ((11760, 11795), 'test.test_utils.remove_y', 'remove_y', (['model', 'input_data', 'labels'], {}), '(model, input_data, labels)\n', (11768, 11795), False, 'from test.test_utils import ids_metric_learners, metric_learners, remove_y\n')] |
import numpy as np
import pandas as pd
import scipy as sc
from scipy.stats import randint, norm, multivariate_normal, ortho_group
from scipy import linalg
from scipy.linalg import subspace_angles, orth
from scipy.optimize import fmin
import math
from statistics import mean
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import itertools as it
import seaborn as sns
import matplotlib.pyplot as plt
from cluster.selfrepresentation import ElasticNetSubspaceClustering
import time
# functions for simulate data
def first_simulation(p, dim, k):
b = [orth(np.random.rand(p, dim)) for i in range(k + 1)]
return b
def find_theta_max(b, t, k):
theta_max = []
for i in range(1, k + 1):
for j in range(1, i):
theta_max.append(subspace_angles(b[i], b[j]).max())
max_avg_theta = mean(theta_max)
theta = max_avg_theta * t
return theta
def second_simulation(p, k, dim, theta, b):
def find_a_for_theta(a, b=b, k=k, theta=theta):
temp_theta = []
for i in range(1, k + 1):
for j in range(1, i):
temp_theta.append(subspace_angles(b[0] * (1 - a) + b[i] * a, b[0] * (1 - a) + b[j] * a).max())
return mean(temp_theta) - theta
a = sc.optimize.bisect(find_a_for_theta, 0, 1)
B = [b[0] * (1 - a) + b[i] * a for i in range(1, k + 1)]
return B
def third_simulation(n, p, dim, B, k, theta):
z = np.random.randint(0, k, n)
w = np.random.multivariate_normal(mean=np.zeros(dim), cov=np.diag(np.ones(dim)), size=n)
X = np.zeros((n, p))
for i in range(n):
X[i,] = np.random.multivariate_normal(mean=np.array(np.dot(np.array(w[i, :]), B[z[i]].T)).flatten(),
cov=np.diag(np.ones(p))) # sigma value is missing
return n, p, dim, theta, X, z, B
# data simulation
def final_data_simulation(k):
nn = [2 ** j for j in range(3, 11)]
pp = [2 ** j for j in range(4, 8)]
dd = [2 ** -j for j in range(1, 5)]
tt = [10 ** -j for j in range(0, 3)]
df = pd.DataFrame(columns=['n', 'p', 'dim', 'theta', 'X', 'z', 'B'])
for p in pp:
for d in dd:
dim = int(d * p)
b = first_simulation(p=p, dim=dim, k=k)
for t in tt:
theta = find_theta_max(b=b, t=t, k=k)
for n in nn:
B = second_simulation(p=p, k=k, dim=dim, theta=theta, b=b)
row = pd.Series(list(third_simulation(n=n, p=p, dim=dim, B=B, k=k, theta=theta)[0:7]),
["n", "p", "dim", "theta", "X", "z", "B"])
df = df.append([row], ignore_index=True)
return df
df = final_data_simulation(4)
X = df['X'][31]
z = df['z'][31]
z
dim = 4
p = 16
k = 4
kmeans = KMeans(n_clusters=k)
kmeans
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
# for i in range(k) :
i = 1
df_new = temp_df[temp_df['cluster'] == i].drop(['cluster'], axis=1)
cluster_kmean = KMeans(n_clusters=k).fit_predict(X)
data = {'cluster1': z, 'cluster2': cluster_kmean}
clusters = pd.DataFrame(data, index=range(len(z)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
c = [i for i in range(k)]
for l, p in enumerate(all_per):
dic = dict(zip(c, p))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
accuracy_rate_all_per.max(), len(cluster_kmean)
per = all_per[2]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
clusters.groupby(['cluster2', 'premut_cluster']).size()
# find kmeans clusters and subspaces
def pca_subspace(df, i, dim):
df_new = df[df['cluster'] == i].drop(['cluster'], axis=1)
pca_components_number = len(df_new) - 1 if len(df_new) < dim else dim # handling with low n (lower than dim)
pca = PCA(n_components=pca_components_number)
pca.fit_transform(df_new)
B_kmeans = pca.components_
return B_kmeans.T
def find_kmeans_subspace(X, k, dim):
kmeans = KMeans(n_clusters=k)
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
B_kmean = [pca_subspace(temp_df, i, dim) for i in range(k)]
return B_kmean
def find_ensc_subspace(X, k, dim):
temp_df = pd.DataFrame(X)
temp_df['cluster'] = ElasticNetSubspaceClustering(n_clusters=k, algorithm='lasso_lars', gamma=50).fit(X.T)
B_ensc = [pca_subspace(temp_df, i, dim) for i in range(k)]
return B_ensc
# Recovery Performance
def performance_measure1(k, B1, B2):
all_per = list(it.permutations(range(k)))
sum_cos_angles_all_per = np.zeros(len(all_per))
for l, val in enumerate(all_per):
for i in range(k):
if B2[val[i]].shape[1] > 0: # handling with empty clusters
sum_cos_angles_all_per[l] += (math.cos(
subspace_angles(B1[i], B2[val[i]]).max())) ** 2 # use min or max????????????????
cost_subspace = sum_cos_angles_all_per.max()
return cost_subspace
# WHAT ARE WE DOING WITH EMPTY CLUSTERS
def performance_measure2(k, cluster1, cluster2):
data = {'cluster1': cluster1, 'cluster2': cluster2}
clusters = pd.DataFrame(data, index=range(len(cluster1)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
for l, per in enumerate(all_per):
c = [i for i in range(k)]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
cost_cluster = (accuracy_rate_all_per.max()) / len(cluster1)
return cost_cluster
def all_process(k):
df = final_data_simulation(k)
df['B_kmean'] = df.apply(lambda x: find_kmeans_subspace(x['X'], k, x['dim']), axis=1)
df['cluster_kmean'] = df.apply(lambda x: KMeans(n_clusters=k).fit_predict(x['X']),
axis=1) # try to return the clusters in "find_kmeans_subspace"
# df['B_ensc'] = df.apply(lambda x: find_ensc_subspace(x['X'], k, x['dim']), axis=1)
# df['cluster_ensc']=df.apply(lambda x: ElasticNetSubspaceClustering(n_clusters=k,algorithm='lasso_lars',gamma=50).fit(x['X'].T), axis=1)
return df
measure1_kmean = pd.DataFrame()
measure2_kmean = pd.DataFrame()
k = 4
for iter in range(2):
df = all_process(k)
measure1_kmean.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_kmean']), axis=1), True)
measure2_kmean.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_kmean']), axis=1),
True)
# measure1_ensc.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_ensc']), axis=1), True)
# measure2_ensc.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_ensc']), axis=1), True)
df['measure1_kmean'] = measure1_kmean.apply(lambda x: mean(x), axis=1)
df['measure2_kmean'] = measure2_kmean.apply(lambda x: mean(x), axis=1)
# df['measure1_ensc'] = measure1_ensc.apply(lambda x: mean(x), axis=1)
# df['measure2_ensc'] = measure2_ensc.apply(lambda x: mean(x), axis=1)
df['theta_degree'] = df.apply(lambda x: math.degrees(x['theta']), axis=1)
# ploting
def plotting_performance_measure(df, measure):
pp = [2 ** j for j in range(4, 8)]
dd = [2 ** -j for j in range(1, 5)]
plt.title("PERFORMANCE MEASURE1 - KMEANS")
i = 1
for p in pp:
for d in dd:
dim = int(d * p)
sns_df = df[(df['p'] == p) & (df['dim'] == dim)]
sns_df = sns_df.pivot("theta_degree", "n", measure)
plt.subplot(4, 4, i)
ax = sns.heatmap(sns_df)
plt.title('p= {p} ,dim= {dim} '.format(p=p, dim=dim))
i += 1
plotting_performance_measure(df, "measure1_kmean")
plotting_performance_measure(df, "measure2_kmean")
plotting_performance_measure(df, "measure1_ensc")
plotting_performance_measure(df, "measure2_ensc")
| [
"sklearn.cluster.KMeans",
"statistics.mean",
"numpy.trace",
"scipy.optimize.bisect",
"numpy.random.rand",
"cluster.selfrepresentation.ElasticNetSubspaceClustering",
"numpy.ones",
"sklearn.decomposition.PCA",
"math.degrees",
"seaborn.heatmap",
"numpy.array",
"numpy.random.randint",
"numpy.zer... | [((2804, 2824), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (2810, 2824), False, 'from sklearn.cluster import KMeans\n'), ((2842, 2857), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (2854, 2857), True, 'import pandas as pd\n'), ((6547, 6561), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6559, 6561), True, 'import pandas as pd\n'), ((6579, 6593), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6591, 6593), True, 'import pandas as pd\n'), ((859, 874), 'statistics.mean', 'mean', (['theta_max'], {}), '(theta_max)\n', (863, 874), False, 'from statistics import mean\n'), ((1272, 1314), 'scipy.optimize.bisect', 'sc.optimize.bisect', (['find_a_for_theta', '(0)', '(1)'], {}), '(find_a_for_theta, 0, 1)\n', (1290, 1314), True, 'import scipy as sc\n'), ((1445, 1471), 'numpy.random.randint', 'np.random.randint', (['(0)', 'k', 'n'], {}), '(0, k, n)\n', (1462, 1471), True, 'import numpy as np\n'), ((1573, 1589), 'numpy.zeros', 'np.zeros', (['(n, p)'], {}), '((n, p))\n', (1581, 1589), True, 'import numpy as np\n'), ((2075, 2138), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['n', 'p', 'dim', 'theta', 'X', 'z', 'B']"}), "(columns=['n', 'p', 'dim', 'theta', 'X', 'z', 'B'])\n", (2087, 2138), True, 'import pandas as pd\n'), ((3544, 3555), 'numpy.trace', 'np.trace', (['m'], {}), '(m)\n', (3552, 3555), True, 'import numpy as np\n'), ((4058, 4097), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'pca_components_number'}), '(n_components=pca_components_number)\n', (4061, 4097), False, 'from sklearn.decomposition import PCA\n'), ((4233, 4253), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (4239, 4253), False, 'from sklearn.cluster import KMeans\n'), ((4268, 4283), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (4280, 4283), True, 'import pandas as pd\n'), ((4465, 4480), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (4477, 4480), True, 'import pandas as pd\n'), ((7647, 7689), 'matplotlib.pyplot.title', 'plt.title', (['"""PERFORMANCE MEASURE1 - KMEANS"""'], {}), "('PERFORMANCE MEASURE1 - KMEANS')\n", (7656, 7689), True, 'import matplotlib.pyplot as plt\n'), ((3013, 3033), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (3019, 3033), False, 'from sklearn.cluster import KMeans\n'), ((5850, 5861), 'numpy.trace', 'np.trace', (['m'], {}), '(m)\n', (5858, 5861), True, 'import numpy as np\n'), ((7201, 7208), 'statistics.mean', 'mean', (['x'], {}), '(x)\n', (7205, 7208), False, 'from statistics import mean\n'), ((7272, 7279), 'statistics.mean', 'mean', (['x'], {}), '(x)\n', (7276, 7279), False, 'from statistics import mean\n'), ((7471, 7495), 'math.degrees', 'math.degrees', (["x['theta']"], {}), "(x['theta'])\n", (7483, 7495), False, 'import math\n'), ((605, 627), 'numpy.random.rand', 'np.random.rand', (['p', 'dim'], {}), '(p, dim)\n', (619, 627), True, 'import numpy as np\n'), ((1238, 1254), 'statistics.mean', 'mean', (['temp_theta'], {}), '(temp_theta)\n', (1242, 1254), False, 'from statistics import mean\n'), ((1515, 1528), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (1523, 1528), True, 'import numpy as np\n'), ((4506, 4582), 'cluster.selfrepresentation.ElasticNetSubspaceClustering', 'ElasticNetSubspaceClustering', ([], {'n_clusters': 'k', 'algorithm': '"""lasso_lars"""', 'gamma': '(50)'}), "(n_clusters=k, algorithm='lasso_lars', gamma=50)\n", (4534, 4582), False, 'from cluster.selfrepresentation import ElasticNetSubspaceClustering\n'), ((7904, 7924), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', 'i'], {}), '(4, 4, i)\n', (7915, 7924), True, 'import matplotlib.pyplot as plt\n'), ((7942, 7961), 'seaborn.heatmap', 'sns.heatmap', (['sns_df'], {}), '(sns_df)\n', (7953, 7961), True, 'import seaborn as sns\n'), ((1542, 1554), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (1549, 1554), True, 'import numpy as np\n'), ((1780, 1790), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (1787, 1790), True, 'import numpy as np\n'), ((6142, 6162), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (6148, 6162), False, 'from sklearn.cluster import KMeans\n'), ((804, 831), 'scipy.linalg.subspace_angles', 'subspace_angles', (['b[i]', 'b[j]'], {}), '(b[i], b[j])\n', (819, 831), False, 'from scipy.linalg import subspace_angles, orth\n'), ((1146, 1215), 'scipy.linalg.subspace_angles', 'subspace_angles', (['(b[0] * (1 - a) + b[i] * a)', '(b[0] * (1 - a) + b[j] * a)'], {}), '(b[0] * (1 - a) + b[i] * a, b[0] * (1 - a) + b[j] * a)\n', (1161, 1215), False, 'from scipy.linalg import subspace_angles, orth\n'), ((1680, 1697), 'numpy.array', 'np.array', (['w[i, :]'], {}), '(w[i, :])\n', (1688, 1697), True, 'import numpy as np\n'), ((5046, 5080), 'scipy.linalg.subspace_angles', 'subspace_angles', (['B1[i]', 'B2[val[i]]'], {}), '(B1[i], B2[val[i]])\n', (5061, 5080), False, 'from scipy.linalg import subspace_angles, orth\n')] |
import numba as nb
import numpy as np
import torch
from torch.autograd import Function
from Constants import MPS_KERNEL as w
from Constants import BASE_RADIUS, ND_RAIUS, GRAD_RADIUS, LAP_RADIUS
class DivOp(Function):
"""Compute the divergence of a given physics value.
Implement in terms of pytorch autograd function because we need to minimize the
compressibility during training"""
@staticmethod
def forward(ctx, val, Adj_arr, N0):
if not isinstance(val, torch.Tensor):
val = torch.from_numpy(val)
A = Adj_arr.clone() * (3. / N0)
val.require_grad = True
div_val = torch.zeros((val.size(0), 1), dtype=torch.float32)
ctx.save_for_backward(A)
for dim in range(3):
sliced_val = val[:, dim].view(-1, 1)
div_val += torch.sparse.mm(A[dim], sliced_val).view(-1, 1)
return div_val
@staticmethod
def backward(ctx, grad_input):
grad_input.double()
A, = ctx.saved_tensors
grad_output = []
for dim in range(3):
grad_output += [torch.sparse.mm(
A[dim], grad_input).view(-1, 1)]
grad_output = torch.stack(grad_output).squeeze().view(-1, 3)
return grad_output, None, None
class LapOp(Function):
@staticmethod
def forward(ctx, val, Adj_arr, N0, lam):
if not isinstance(val, torch.Tensor):
val = torch.from_numpy(val)
A = Adj_arr * (2. * 3.)/(N0 * lam)
out = torch.sparse.mm(A, val)
ctx.save_for_backward(A)
return out
@staticmethod
def backward(ctx, grad_input):
grad_input.double()
A, = ctx.saved_tensors
grad_output = torch.sparse.mm(A, grad_input)
return grad_output, None, None, None, None
Divergence = DivOp.apply
Laplacian = LapOp.apply
class GradientOp(object):
@staticmethod
def forward(val, val_min, A, A_diag, N0, to_numpy=True):
if not isinstance(val, torch.Tensor):
val = torch.from_numpy(val)
# val.require_grad = True
val = val.float().view(-1, 1)
val_min = val_min.view(-1, 1)
grad_val = torch.zeros((val.size(0), 3), dtype=torch.float32)
# ctx.save_for_backward(A)
for dim in range(3):
grad_val[:, dim] = (3. / N0) * (torch.sparse.mm(A[dim], val) - torch.sparse.mm(A_diag[dim], val_min)).view(-1,)
if to_numpy:
return grad_val.detach().numpy()
else:
return grad_val
class CollisionOp(object):
@staticmethod
def forward(vel, Adj_arr, coef_rest):
if not isinstance(vel, torch.Tensor):
vel = torch.from_numpy(vel)
fdt = torch.zeros_like(vel)
fdt -= torch.sparse.mm(Adj_arr, vel)
fdt *= (coef_rest + 1.0) / 2.0
correction = torch.sparse.mm(Adj_arr, fdt)
return correction
class SumOp(object):
@staticmethod
def forward(Adj_arr, device='cpu', to_numpy=True):
A = Adj_arr.clone()
I = torch.ones((A.size(0), 1), dtype=torch.float32).to(device)
out = torch.sparse.mm(A, I)
if to_numpy:
return out.cpu().numpy()
else:
return out
| [
"torch.zeros_like",
"torch.stack",
"torch.from_numpy",
"torch.sparse.mm"
] | [((1501, 1524), 'torch.sparse.mm', 'torch.sparse.mm', (['A', 'val'], {}), '(A, val)\n', (1516, 1524), False, 'import torch\n'), ((1712, 1742), 'torch.sparse.mm', 'torch.sparse.mm', (['A', 'grad_input'], {}), '(A, grad_input)\n', (1727, 1742), False, 'import torch\n'), ((2704, 2725), 'torch.zeros_like', 'torch.zeros_like', (['vel'], {}), '(vel)\n', (2720, 2725), False, 'import torch\n'), ((2741, 2770), 'torch.sparse.mm', 'torch.sparse.mm', (['Adj_arr', 'vel'], {}), '(Adj_arr, vel)\n', (2756, 2770), False, 'import torch\n'), ((2831, 2860), 'torch.sparse.mm', 'torch.sparse.mm', (['Adj_arr', 'fdt'], {}), '(Adj_arr, fdt)\n', (2846, 2860), False, 'import torch\n'), ((3096, 3117), 'torch.sparse.mm', 'torch.sparse.mm', (['A', 'I'], {}), '(A, I)\n', (3111, 3117), False, 'import torch\n'), ((528, 549), 'torch.from_numpy', 'torch.from_numpy', (['val'], {}), '(val)\n', (544, 549), False, 'import torch\n'), ((1422, 1443), 'torch.from_numpy', 'torch.from_numpy', (['val'], {}), '(val)\n', (1438, 1443), False, 'import torch\n'), ((2016, 2037), 'torch.from_numpy', 'torch.from_numpy', (['val'], {}), '(val)\n', (2032, 2037), False, 'import torch\n'), ((2668, 2689), 'torch.from_numpy', 'torch.from_numpy', (['vel'], {}), '(vel)\n', (2684, 2689), False, 'import torch\n'), ((825, 860), 'torch.sparse.mm', 'torch.sparse.mm', (['A[dim]', 'sliced_val'], {}), '(A[dim], sliced_val)\n', (840, 860), False, 'import torch\n'), ((1091, 1126), 'torch.sparse.mm', 'torch.sparse.mm', (['A[dim]', 'grad_input'], {}), '(A[dim], grad_input)\n', (1106, 1126), False, 'import torch\n'), ((1184, 1208), 'torch.stack', 'torch.stack', (['grad_output'], {}), '(grad_output)\n', (1195, 1208), False, 'import torch\n'), ((2327, 2355), 'torch.sparse.mm', 'torch.sparse.mm', (['A[dim]', 'val'], {}), '(A[dim], val)\n', (2342, 2355), False, 'import torch\n'), ((2358, 2395), 'torch.sparse.mm', 'torch.sparse.mm', (['A_diag[dim]', 'val_min'], {}), '(A_diag[dim], val_min)\n', (2373, 2395), False, 'import torch\n')] |
# LinearRegression.py
# March 2018
#
# This script builds a Linear regression class to analyse data.
# It supports a continuous response and several continuous features.
# The class has a constructor building and fitting the model, and
# a plotting method for residuals.
#
# Dependencies:
#
# Usage:
# from pythia.LinearRegression import LinearRegression
# lm = LinearRegression(X,y)
# print(lm.weights)
# plot_pythia(lm)
## Imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
import pandas as pd
import numpy as np
import numpy.random as random
## The LinearRegression class
class LinearRegression:
"""
LinearRegression is a class performing a linear regression on a data frame
containing continuous features.
Its attributes are the coefficients estimates, the fitted values
and the residuals from fitting a linear regression of y on X.
Args:
X: a pandas.dataframe containing continuous variables (including the response)
y: a pandas.Series of same length containing the response
Attributes:
weights: a pandas.Series, the estimated coefficients
fitted: a pandas.Series, the fitted values
residuals: a pandas.Series, the residuals
"""
def __init__(self, X, y):
# Check the type of the features and select the numeric ones
X_mat = X.select_dtypes(include=[np.number], exclude=None)
if X_mat.shape[1] == 0:
raise NameError("You need at least one continuous features")
try:
for var in X_mat.columns:
assert np.all(X_mat[[var]].notnull())
except AssertionError:
raise NameError("Some of your numeric features contain missing values. Please deal with them (remove, impute...) before using this function.")
else:
# Add an intercept column and convert the data frame in a matrix
n = X_mat.shape[0]
X_mat['intercept'] = pd.Series(np.ones(n), index=X_mat.index)
names = X_mat.columns
X_mat = X_mat.as_matrix()
d = X_mat.shape[1]
y = np.array(y).reshape((10,1))
# Set hyperparameters
alpha = 0.001
n_iter = 1000000
# The gradient of the squared error
def ols_grad(w):
return np.dot(np.transpose(X_mat), np.dot(X_mat, w) - y)
# A norm function for Frobenius
def norm(x):
return np.sum(np.abs(x))
# Update the weights using gradient method
weights = np.zeros(d).reshape((d,1))
i = 0
grad = ols_grad(weights)
while i < n_iter and norm(grad) > 1e-7:
grad = ols_grad(weights)
weights = weights - alpha*grad
i += 1
temp = {}
for i in range(len(weights)):
temp[names[i]] = weights[i,0]
self.weights = temp
# Calculate the fitted values
self.fitted = np.dot(X_mat, weights)
# Calculate the residuals
self.residuals = y - self.fitted
def plot_residuals(self):
"""
This script makes various diagnostic plots for linear regression analysis.
It supports a continuous response and several continuous features.
Args:
A LinearRegression object containing
weights: the estimates of the parameters of the linear regression
fitted: the fitted values
residuals: the residuals.
Returns:
Residuals vs Fitted Plot
Normal Q-Q Plot
Fitted vs True Value Plot(s)
"""
assert len(self.residuals) > 0, "There are no residuals"
assert len(self.fitted) > 0, "There are no fitted values"
assert len(self.residuals) == len(self.fitted), "The number of residuals and fitted values do not match"
# Get fitted values and residuals
residuals = self.residuals
fitted = self.fitted
residuals = residuals.flatten()
fitted = fitted.flatten()
# Fitted vs Residuals
plt.figure(figsize=(10,6))
plt.scatter(fitted, residuals, color='grey')
plt.axhline(y = 0, linewidth = 1, color = 'red')
plt.xlabel('Fitted Values')
plt.ylabel('Residuals')
plt.title('Residuals vs. Fitted Values')
resfit = plt.show()
# Normal QQ Plot
res = np.asarray(residuals)
res.sort()
# Generate normal distribution
ndist = random.normal(loc = 0, scale = 1, size = len(res))
ndist.sort()
# Fit Normal Trendline.
fit = np.polyfit(ndist, res, 1)
fit = fit.tolist()
func = np.poly1d(fit)
trendline_y = func(ndist)
plt.figure(figsize=(10,6))
plt.scatter(ndist, res, color = 'grey')
plt.plot(ndist, trendline_y, color = 'red')
plt.title("Normal QQ Plot")
plt.xlabel("Theoretical quantiles")
plt.ylabel("Expreimental quantiles")
qqplot = plt.show()
return (resfit,qqplot)
| [
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"numpy.array",
"numpy.poly1d",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.axhline",
"numpy.dot",
"matplotlib.pyplot.scatter",
"numpy.abs",
"numpy.ones",
"matplotlib.use",
"matplotlib.pyplot.title",
... | [((468, 489), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (482, 489), False, 'import matplotlib\n'), ((563, 583), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (578, 583), False, 'import os\n'), ((604, 626), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (619, 626), False, 'import os\n'), ((4297, 4324), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4307, 4324), True, 'import matplotlib.pyplot as plt\n'), ((4332, 4376), 'matplotlib.pyplot.scatter', 'plt.scatter', (['fitted', 'residuals'], {'color': '"""grey"""'}), "(fitted, residuals, color='grey')\n", (4343, 4376), True, 'import matplotlib.pyplot as plt\n'), ((4386, 4428), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'linewidth': '(1)', 'color': '"""red"""'}), "(y=0, linewidth=1, color='red')\n", (4397, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4443, 4470), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fitted Values"""'], {}), "('Fitted Values')\n", (4453, 4470), True, 'import matplotlib.pyplot as plt\n'), ((4479, 4502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Residuals"""'], {}), "('Residuals')\n", (4489, 4502), True, 'import matplotlib.pyplot as plt\n'), ((4511, 4551), 'matplotlib.pyplot.title', 'plt.title', (['"""Residuals vs. Fitted Values"""'], {}), "('Residuals vs. Fitted Values')\n", (4520, 4551), True, 'import matplotlib.pyplot as plt\n'), ((4569, 4579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4577, 4579), True, 'import matplotlib.pyplot as plt\n'), ((4620, 4641), 'numpy.asarray', 'np.asarray', (['residuals'], {}), '(residuals)\n', (4630, 4641), True, 'import numpy as np\n'), ((4836, 4861), 'numpy.polyfit', 'np.polyfit', (['ndist', 'res', '(1)'], {}), '(ndist, res, 1)\n', (4846, 4861), True, 'import numpy as np\n'), ((4904, 4918), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (4913, 4918), True, 'import numpy as np\n'), ((4962, 4989), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4972, 4989), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5034), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ndist', 'res'], {'color': '"""grey"""'}), "(ndist, res, color='grey')\n", (5008, 5034), True, 'import matplotlib.pyplot as plt\n'), ((5045, 5086), 'matplotlib.pyplot.plot', 'plt.plot', (['ndist', 'trendline_y'], {'color': '"""red"""'}), "(ndist, trendline_y, color='red')\n", (5053, 5086), True, 'import matplotlib.pyplot as plt\n'), ((5097, 5124), 'matplotlib.pyplot.title', 'plt.title', (['"""Normal QQ Plot"""'], {}), "('Normal QQ Plot')\n", (5106, 5124), True, 'import matplotlib.pyplot as plt\n'), ((5133, 5168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Theoretical quantiles"""'], {}), "('Theoretical quantiles')\n", (5143, 5168), True, 'import matplotlib.pyplot as plt\n'), ((5177, 5213), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expreimental quantiles"""'], {}), "('Expreimental quantiles')\n", (5187, 5213), True, 'import matplotlib.pyplot as plt\n'), ((5231, 5241), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5239, 5241), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3179), 'numpy.dot', 'np.dot', (['X_mat', 'weights'], {}), '(X_mat, weights)\n', (3163, 3179), True, 'import numpy as np\n'), ((2092, 2102), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2099, 2102), True, 'import numpy as np\n'), ((2242, 2253), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2250, 2253), True, 'import numpy as np\n'), ((2468, 2487), 'numpy.transpose', 'np.transpose', (['X_mat'], {}), '(X_mat)\n', (2480, 2487), True, 'import numpy as np\n'), ((2611, 2620), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2617, 2620), True, 'import numpy as np\n'), ((2700, 2711), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (2708, 2711), True, 'import numpy as np\n'), ((2489, 2505), 'numpy.dot', 'np.dot', (['X_mat', 'w'], {}), '(X_mat, w)\n', (2495, 2505), True, 'import numpy as np\n')] |
# encoding: UTF-8
"""Physics Applications Utility"""
__copyright__ = "Copyright (c) 2015, Facility for Rare Isotope Beams"
__author__ = "<NAME>"
__version__ = "0.0.1"
import logging
import phylib
import machine
from machine import *
from phylib.libCore import *
# configure the root logger
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(name)s: %(message)s")
| [
"logging.basicConfig"
] | [((295, 374), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(asctime)s: %(name)s: %(message)s"""'}), "(format='%(levelname)s: %(asctime)s: %(name)s: %(message)s')\n", (314, 374), False, 'import logging\n')] |
#!python3
#
# Copyright (C) 2014-2015 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Functions for standard blocks (solves a step)
"""
import numpy as np
# Gain block
# yo = p * yi
# p is a scalar gain coefficient
def gain_block(yi, p):
yo = p * yi
return yo
# Divide block
# yo = yi / p
# p is a scalar gain coefficient
def gain_block(yi, p):
if p != 0:
yo = yi / p
else:
print('Error: division by zero, ignoring dividion operation')
yo = yi
return yo
# Integrator block
# K / sT
# p = [K, T]
def int_block(h, x0, yi, p):
f = yi * p[0] / p[1]
x1 = x0 + h * f
yo = x1
return yo, x1, f
# Lag block
# K / (1 + sT)
# p = [K, T]
def lag_block(h, x0, yi, p):
f = (yi - x0) / p[1]
x1 = x0 + h * f
yo = p[0] * x1
return yo, x1, f
# Lead-Lag block
# (1 + sTa) / (1 + sTb)
# p = [Ta, Tb]
def leadlag_block(h, x0, yi, p):
f = (yi - x0) / p[1]
x1 = x0 + h * f
yo = x1 + p[0] * (yi - x0) / p[1]
return yo, x1, f
# Limiter block
# yo = min_lim, if yi < min_lim
# yo = max_lim, if yi > max_lim
# yo = yi, min_lim <= yi <= max_lim
# p = [min_lim, max_lim]
def lim_block(yi, p):
min_lim = p[0]
max_lim = p[1]
if yi < min_lim:
yo = min_lim
elif yi > max_lim:
yo = max_lim
else:
yo = yi
return yo
# Multiplication block
# yo = yi1 * yi2 * ... * yin
# yi = [yi1, yi2, ... yin]
def mult_block(yi):
yo = np.prod(yi)
return yo
# Summation block
# yo = yi1 + yi2 + ... + yin
# yi = [yi1, yi2, ... yin]
def sum_block(yi):
yo = sum(yi)
return yo
# Washout block
# (s / (1 + sT)
# p is the time constant T
def wout_block(h, x0, yi, p):
f = (yi - x0) / p
x1 = x0 + h * f
yo = (yi - x1) / p
return yo, x1, f
| [
"numpy.prod"
] | [((1560, 1571), 'numpy.prod', 'np.prod', (['yi'], {}), '(yi)\n', (1567, 1571), True, 'import numpy as np\n')] |
from collections import namedtuple
import pytest
from nesta.packages.examples.example_package import some_func
@pytest.fixture
def mocked_row():
def _mocked_row(*, id, name):
Row = namedtuple('Row', ['id', 'name'])
return Row(id=id, name=name)
return _mocked_row
class TestSomeFunc:
def test_some_func_returns_true_when_start_string_in_name(self, mocked_row):
mocked_row = mocked_row(id=1, name='cat')
assert some_func('cat', mocked_row) == {'my_id': 1, 'data': True}
def test_some_func_returns_false_when_start_string_not_in_name(self, mocked_row):
mocked_row = mocked_row(id=2, name='cat')
assert some_func('dog', mocked_row) == {'my_id': 2, 'data': False}
def test_some_func_returns_false_when_name_is_none(self, mocked_row):
mocked_row = mocked_row(id=3, name=None)
assert some_func('cat', mocked_row) == {'my_id': 3, 'data': False}
| [
"nesta.packages.examples.example_package.some_func",
"collections.namedtuple"
] | [((197, 230), 'collections.namedtuple', 'namedtuple', (['"""Row"""', "['id', 'name']"], {}), "('Row', ['id', 'name'])\n", (207, 230), False, 'from collections import namedtuple\n'), ((459, 487), 'nesta.packages.examples.example_package.some_func', 'some_func', (['"""cat"""', 'mocked_row'], {}), "('cat', mocked_row)\n", (468, 487), False, 'from nesta.packages.examples.example_package import some_func\n'), ((670, 698), 'nesta.packages.examples.example_package.some_func', 'some_func', (['"""dog"""', 'mocked_row'], {}), "('dog', mocked_row)\n", (679, 698), False, 'from nesta.packages.examples.example_package import some_func\n'), ((869, 897), 'nesta.packages.examples.example_package.some_func', 'some_func', (['"""cat"""', 'mocked_row'], {}), "('cat', mocked_row)\n", (878, 897), False, 'from nesta.packages.examples.example_package import some_func\n')] |
import asyncio
import unittest
import unittest.mock
import shc.misc
from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable
class MiscTests(unittest.TestCase):
@async_test
async def test_two_way_pipe(self) -> None:
pipe = shc.misc.TwoWayPipe(float)
pub_left = ExampleSubscribable(float)
pub_right = ExampleSubscribable(float)
sub_left = ExampleWritable(float)
sub_right = ExampleWritable(float)
pipe.connect_left(pub_left)
sub_left.connect(pipe)
pipe.connect_right(pub_right)
pipe.connect_right(sub_right)
await pub_left.publish(42.0, [self])
sub_right._write.assert_called_once_with(42.0, [self, pub_left, pipe.right])
sub_left._write.assert_not_called()
sub_right._write.reset_mock()
await pub_right.publish(36.0, [self])
sub_left._write.assert_called_once_with(36.0, [self, pub_right, pipe.left])
sub_right._write.assert_not_called()
@async_test
async def test_two_way_pipe_concurrent_update(self) -> None:
var1 = shc.Variable(int)
pipe = shc.misc.TwoWayPipe(int).connect_left(var1)
var2 = shc.Variable(int).connect(pipe.right)
await asyncio.gather(var1.write(42, []), var2.write(56, []))
self.assertEqual(await var1.read(), await var2.read())
@async_test
async def test_breakable_subscription_simple(self) -> None:
pub = ExampleSubscribable(float)
control = ExampleReadable(bool, True)
sub = ExampleWritable(float)
sub.connect(shc.misc.BreakableSubscription(pub, control))
await pub.publish(42.0, [self])
sub._write.assert_called_once_with(42.0, [self, pub, unittest.mock.ANY])
sub._write.reset_mock()
control.read.side_effect = (False,)
await pub.publish(36.0, [self])
sub._write.assert_not_called()
sub._write.reset_mock()
control.read.side_effect = (True,)
await pub.publish(56.0, [self])
sub._write.assert_called_once_with(56, unittest.mock.ANY)
@async_test
async def test_breakable_subscription_readsubscribable(self) -> None:
pub = shc.Variable(float)
control = shc.Variable(bool, initial_value=False)
sub = ExampleWritable(float)
sub.connect(shc.misc.BreakableSubscription(pub, control))
# pub is uninitialized, so we should not receive anything, when control changes to True
await control.write(True, [self])
await asyncio.sleep(0.01)
sub._write.assert_not_called()
await pub.write(42.0, [self])
await asyncio.sleep(0.01)
sub._write.assert_called_once_with(42.0, [self, pub, unittest.mock.ANY])
sub._write.reset_mock()
await control.write(False, [self])
await pub.write(56.0, [self])
await asyncio.sleep(0.01)
sub._write.assert_not_called()
await control.write(True, [self])
await asyncio.sleep(0.01)
sub._write.assert_called_once_with(56.0, [self, control, unittest.mock.ANY])
@async_test
async def test_hysteresis(self) -> None:
pub = ExampleSubscribable(float)
hystersis = shc.misc.Hysteresis(pub, 42.0, 56.0)
sub = ExampleWritable(bool).connect(hystersis)
# Check initial value
self.assertEqual(False, await hystersis.read())
# Check climbing value
await pub.publish(41.0, [self])
await pub.publish(43.5, [self])
await pub.publish(44.5, [self])
self.assertEqual(False, await hystersis.read())
sub._write.assert_not_called()
await pub.publish(57.4, [self])
sub._write.assert_called_once_with(True, [self, pub, hystersis])
self.assertEqual(True, await hystersis.read())
sub._write.reset_mock()
await pub.publish(58, [self])
sub._write.assert_not_called()
self.assertEqual(True, await hystersis.read())
# Check descending value
await pub.publish(44.5, [self])
self.assertEqual(True, await hystersis.read())
sub._write.assert_not_called()
await pub.publish(41.4, [self])
sub._write.assert_called_once_with(False, [self, pub, hystersis])
self.assertEqual(False, await hystersis.read())
sub._write.reset_mock()
await pub.publish(40.0, [self])
sub._write.assert_not_called()
self.assertEqual(False, await hystersis.read())
# Check jumps
await pub.publish(57.4, [self])
sub._write.assert_called_once_with(True, [self, pub, hystersis])
self.assertEqual(True, await hystersis.read())
sub._write.reset_mock()
await pub.publish(41.4, [self])
sub._write.assert_called_once_with(False, [self, pub, hystersis])
self.assertEqual(False, await hystersis.read())
@async_test
async def test_fade_step_adapter(self) -> None:
subscribable1 = ExampleSubscribable(shc.datatypes.FadeStep)
variable1 = shc.Variable(shc.datatypes.RangeFloat1)\
.connect(shc.misc.FadeStepAdapter(subscribable1))
with self.assertLogs() as logs:
await subscribable1.publish(shc.datatypes.FadeStep(0.5), [self])
await asyncio.sleep(0.05)
self.assertIn("Cannot apply FadeStep", logs.records[0].msg) # type: ignore
await variable1.write(shc.datatypes.RangeFloat1(0.5), [self])
await asyncio.sleep(0.05)
await subscribable1.publish(shc.datatypes.FadeStep(0.25), [self])
await asyncio.sleep(0.05)
self.assertEqual(shc.datatypes.RangeFloat1(0.75), await variable1.read())
await subscribable1.publish(shc.datatypes.FadeStep(0.5), [self])
await asyncio.sleep(0.05)
self.assertEqual(shc.datatypes.RangeFloat1(1.0), await variable1.read())
@async_test
async def test_convert_subscription(self) -> None:
pub = ExampleSubscribable(shc.datatypes.RangeUInt8)
sub = ExampleWritable(shc.datatypes.RangeFloat1)
sub.connect(shc.misc.ConvertSubscription(pub, shc.datatypes.RangeFloat1))
await pub.publish(shc.datatypes.RangeUInt8(255), [self])
sub._write.assert_called_once_with(shc.datatypes.RangeFloat1(1.0), [self, pub, unittest.mock.ANY])
self.assertIsInstance(sub._write.call_args[0][0], shc.datatypes.RangeFloat1)
| [
"test._helper.ExampleWritable",
"test._helper.ExampleSubscribable",
"test._helper.ExampleReadable",
"asyncio.sleep"
] | [((324, 350), 'test._helper.ExampleSubscribable', 'ExampleSubscribable', (['float'], {}), '(float)\n', (343, 350), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((371, 397), 'test._helper.ExampleSubscribable', 'ExampleSubscribable', (['float'], {}), '(float)\n', (390, 397), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((417, 439), 'test._helper.ExampleWritable', 'ExampleWritable', (['float'], {}), '(float)\n', (432, 439), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((460, 482), 'test._helper.ExampleWritable', 'ExampleWritable', (['float'], {}), '(float)\n', (475, 482), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((1471, 1497), 'test._helper.ExampleSubscribable', 'ExampleSubscribable', (['float'], {}), '(float)\n', (1490, 1497), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((1516, 1543), 'test._helper.ExampleReadable', 'ExampleReadable', (['bool', '(True)'], {}), '(bool, True)\n', (1531, 1543), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((1558, 1580), 'test._helper.ExampleWritable', 'ExampleWritable', (['float'], {}), '(float)\n', (1573, 1580), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((2305, 2327), 'test._helper.ExampleWritable', 'ExampleWritable', (['float'], {}), '(float)\n', (2320, 2327), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((3186, 3212), 'test._helper.ExampleSubscribable', 'ExampleSubscribable', (['float'], {}), '(float)\n', (3205, 3212), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((4986, 5029), 'test._helper.ExampleSubscribable', 'ExampleSubscribable', (['shc.datatypes.FadeStep'], {}), '(shc.datatypes.FadeStep)\n', (5005, 5029), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((5964, 6009), 'test._helper.ExampleSubscribable', 'ExampleSubscribable', (['shc.datatypes.RangeUInt8'], {}), '(shc.datatypes.RangeUInt8)\n', (5983, 6009), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((6024, 6066), 'test._helper.ExampleWritable', 'ExampleWritable', (['shc.datatypes.RangeFloat1'], {}), '(shc.datatypes.RangeFloat1)\n', (6039, 6066), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((2548, 2567), 'asyncio.sleep', 'asyncio.sleep', (['(0.01)'], {}), '(0.01)\n', (2561, 2567), False, 'import asyncio\n'), ((2660, 2679), 'asyncio.sleep', 'asyncio.sleep', (['(0.01)'], {}), '(0.01)\n', (2673, 2679), False, 'import asyncio\n'), ((2889, 2908), 'asyncio.sleep', 'asyncio.sleep', (['(0.01)'], {}), '(0.01)\n', (2902, 2908), False, 'import asyncio\n'), ((3005, 3024), 'asyncio.sleep', 'asyncio.sleep', (['(0.01)'], {}), '(0.01)\n', (3018, 3024), False, 'import asyncio\n'), ((5478, 5497), 'asyncio.sleep', 'asyncio.sleep', (['(0.05)'], {}), '(0.05)\n', (5491, 5497), False, 'import asyncio\n'), ((5587, 5606), 'asyncio.sleep', 'asyncio.sleep', (['(0.05)'], {}), '(0.05)\n', (5600, 5606), False, 'import asyncio\n'), ((5777, 5796), 'asyncio.sleep', 'asyncio.sleep', (['(0.05)'], {}), '(0.05)\n', (5790, 5796), False, 'import asyncio\n'), ((3284, 3305), 'test._helper.ExampleWritable', 'ExampleWritable', (['bool'], {}), '(bool)\n', (3299, 3305), False, 'from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable\n'), ((5289, 5308), 'asyncio.sleep', 'asyncio.sleep', (['(0.05)'], {}), '(0.05)\n', (5302, 5308), False, 'import asyncio\n')] |
# -*- coding: utf-8 -*-
"""Supports F10.7 index values. Downloads data from LASP and the SWPC.
Properties
----------
platform
'sw'
name
'f107'
tag
- 'historic' LASP F10.7 data (downloads by month, loads by day)
- 'prelim' Preliminary SWPC daily solar indices
- 'daily' Daily SWPC solar indices (contains last 30 days)
- 'forecast' Grab forecast data from SWPC (next 3 days)
- '45day' 45-Day Forecast data from the Air Force
Example
-------
Download and load all of the historic F10.7 data. Note that it will not
stop on the current date, but a point in the past when post-processing has
been successfully completed.
::
f107 = pysat.Instrument('sw', 'f107', tag='historic')
f107.download(start=f107.lasp_stime, stop=f107.today(), freq='MS')
f107.load(date=f107.lasp_stime, end_date=f107.today())
Note
----
The forecast data is stored by generation date, where each file contains the
forecast for the next three days. Forecast data downloads are only supported
for the current day. When loading forecast data, the date specified with the
load command is the date the forecast was generated. The data loaded will span
three days. To always ensure you are loading the most recent data, load
the data with tomorrow's date.
::
f107 = pysat.Instrument('sw', 'f107', tag='forecast')
f107.download()
f107.load(date=f107.tomorrow())
Warnings
--------
The 'forecast' F10.7 data loads three days at a time. Loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for 'forecast'
data.
Like 'forecast', the '45day' forecast loads a specific period of time (45 days)
and subsequent files contain overlapping data. Thus, loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for '45day' data.
"""
import datetime as dt
import ftplib
import json
import numpy as np
import os
import requests
import sys
import warnings
import pandas as pds
import pysat
from pysatSpaceWeather.instruments.methods import f107 as mm_f107
from pysatSpaceWeather.instruments.methods.ace import load_csv_data
from pysatSpaceWeather.instruments.methods import general
logger = pysat.logger
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'sw'
name = 'f107'
tags = {'historic': 'Daily LASP value of F10.7',
'prelim': 'Preliminary SWPC daily solar indices',
'daily': 'Daily SWPC solar indices (contains last 30 days)',
'forecast': 'SWPC Forecast F107 data next (3 days)',
'45day': 'Air Force 45-day Forecast'}
# Dict keyed by inst_id that lists supported tags for each inst_id
inst_ids = {'': [tag for tag in tags.keys()]}
# Dict keyed by inst_id that lists supported tags and a good day of test data
# generate todays date to support loading forecast data
now = dt.datetime.utcnow()
today = dt.datetime(now.year, now.month, now.day)
tomorrow = today + pds.DateOffset(days=1)
# The LASP archive start day is also important
lasp_stime = dt.datetime(1947, 2, 14)
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'historic': dt.datetime(2009, 1, 1),
'prelim': dt.datetime(2009, 1, 1),
'daily': tomorrow,
'forecast': tomorrow,
'45day': tomorrow}}
# Other tags assumed to be True
_test_download_travis = {'': {'prelim': False}}
# ----------------------------------------------------------------------------
# Instrument methods
preprocess = general.preprocess
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
self.acknowledgements = mm_f107.acknowledgements(self.name, self.tag)
self.references = mm_f107.references(self.name, self.tag)
logger.info(self.acknowledgements)
# Define the historic F10.7 starting time
if self.tag == 'historic':
self.lasp_stime = lasp_stime
return
def clean(self):
""" Cleaning function for Space Weather indices
Note
----
F10.7 doesn't require cleaning
"""
return
# ----------------------------------------------------------------------------
# Instrument functions
def load(fnames, tag=None, inst_id=None):
"""Load F10.7 index files
Parameters
----------
fnames : pandas.Series
Series of filenames
tag : str or NoneType
tag or None (default=None)
inst_id : str or NoneType
satellite id or None (default=None)
Returns
-------
data : pandas.DataFrame
Object containing satellite data
meta : pysat.Meta
Object containing metadata such as column names and units
Note
----
Called by pysat. Not intended for direct use by user.
"""
# Get the desired file dates and file names from the daily indexed list
file_dates = list()
if tag in ['historic', 'prelim']:
unique_files = list()
for fname in fnames:
file_dates.append(dt.datetime.strptime(fname[-10:], '%Y-%m-%d'))
if fname[0:-11] not in unique_files:
unique_files.append(fname[0:-11])
fnames = unique_files
# Load the CSV data files
data = load_csv_data(fnames, read_csv_kwargs={"index_col": 0,
"parse_dates": True})
# If there is a date range, downselect here
if len(file_dates) > 0:
idx, = np.where((data.index >= min(file_dates))
& (data.index < max(file_dates) + dt.timedelta(days=1)))
data = data.iloc[idx, :]
# Initialize the metadata
meta = pysat.Meta()
meta['f107'] = {meta.labels.units: 'SFU',
meta.labels.name: 'F10.7 cm solar index',
meta.labels.notes: '',
meta.labels.desc:
'F10.7 cm radio flux in Solar Flux Units (SFU)',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
if tag == '45day':
meta['ap'] = {meta.labels.units: '',
meta.labels.name: 'Daily Ap index',
meta.labels.notes: '',
meta.labels.desc: 'Daily average of 3-h ap indices',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: 400}
elif tag == 'daily' or tag == 'prelim':
meta['ssn'] = {meta.labels.units: '',
meta.labels.name: 'Sunspot Number',
meta.labels.notes: '',
meta.labels.desc: 'SESC Sunspot Number',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['ss_area'] = {meta.labels.units: '10$^-6$ Solar Hemisphere',
meta.labels.name: 'Sunspot Area',
meta.labels.notes: '',
meta.labels.desc:
''.join(['Sunspot Area in Millionths of the ',
'Visible Hemisphere']),
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: 1.0e6}
meta['new_reg'] = {meta.labels.units: '',
meta.labels.name: 'New Regions',
meta.labels.notes: '',
meta.labels.desc: 'New active solar regions',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['smf'] = {meta.labels.units: 'G',
meta.labels.name: 'Solar Mean Field',
meta.labels.notes: '',
meta.labels.desc: 'Standford Solar Mean Field',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['goes_bgd_flux'] = {meta.labels.units: 'W/m^2',
meta.labels.name: 'X-ray Background Flux',
meta.labels.notes: '',
meta.labels.desc:
'GOES15 X-ray Background Flux',
meta.labels.fill_val: '*',
meta.labels.min_val: -np.inf,
meta.labels.max_val: np.inf}
meta['c_flare'] = {meta.labels.units: '',
meta.labels.name: 'C X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'C-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['m_flare'] = {meta.labels.units: '',
meta.labels.name: 'M X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'M-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['x_flare'] = {meta.labels.units: '',
meta.labels.name: 'X X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'X-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o1_flare'] = {meta.labels.units: '',
meta.labels.name: '1 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '1-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o2_flare'] = {meta.labels.units: '',
meta.labels.name: '2 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '2-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o3_flare'] = {meta.labels.units: '',
meta.labels.name: '3 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '3-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
return data, meta
def list_files(tag=None, inst_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for F10.7 data
Parameters
----------
tag : string or NoneType
Denotes type of file to load.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
-------
out_files : pysat._files.Files
A class containing the verified available files
Note
----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == 'historic':
# Files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'f107_monthly_{year:04d}-{month:02d}.txt'
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
out_files.loc[out_files.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out_files.iloc[-1]
out_files = out_files.asfreq('D', 'pad')
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag == 'prelim':
# Files are by year (and quarter)
if format_str is None:
format_str = ''.join(['f107_prelim_{year:04d}_{month:02d}',
'_v{version:01d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
# Set each file's valid length at a 1-day resolution
orig_files = out_files.sort_index().copy()
new_files = list()
for orig in orig_files.iteritems():
# Version determines each file's valid length
version = int(orig[1].split("_v")[1][0])
doff = pds.DateOffset(years=1) if version == 2 \
else pds.DateOffset(months=3)
istart = orig[0]
iend = istart + doff - pds.DateOffset(days=1)
# Ensure the end time does not extend past the number of
# possible days included based on the file's download time
fname = os.path.join(data_path, orig[1])
dend = dt.datetime.utcfromtimestamp(os.path.getctime(fname))
dend = dend - pds.DateOffset(days=1)
if dend < iend:
iend = dend
# Pad the original file index
out_files.loc[iend] = orig[1]
out_files = out_files.sort_index()
# Save the files at a daily cadence over the desired period
new_files.append(out_files.loc[istart:
iend].asfreq('D', 'pad'))
# Add the newly indexed files to the file output
out_files = pds.concat(new_files, sort=True)
out_files = out_files.dropna()
out_files = out_files.sort_index()
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag in ['daily', 'forecast', '45day']:
format_str = ''.join(['f107_', tag,
'_{year:04d}-{month:02d}-{day:02d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# Pad list of files data to include most recent file under tomorrow
if not out_files.empty:
pds_off = pds.DateOffset(days=1)
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
else:
raise ValueError(' '.join(('Unrecognized tag name for Space',
'Weather Index F107:', tag)))
else:
raise ValueError(' '.join(('A data_path must be passed to the loading',
'routine for F107')))
return out_files
def download(date_array, tag, inst_id, data_path, update_files=False):
"""Routine to download F107 index data
Parameters
-----------
date_array : list-like
Sequence of dates to download date for.
tag : string or NoneType
Denotes type of file to load.
inst_id : string or NoneType
Specifies the satellite ID for a constellation.
data_path : string or NoneType
Path to data directory.
update_files : bool
Re-download data for files that already exist if True (default=False)
Note
----
Called by pysat. Not intended for direct use by user.
Warnings
--------
Only able to download current forecast data, not archived forecasts.
"""
# download standard F107 data
if tag == 'historic':
# Test the date array, updating it if necessary
if date_array.freq != 'MS':
warnings.warn(''.join(['Historic F10.7 downloads should be invoked',
" with the `freq='MS'` option."]))
date_array = pysat.utils.time.create_date_range(
dt.datetime(date_array[0].year, date_array[0].month, 1),
date_array[-1], freq='MS')
# Download from LASP, by month
for dl_date in date_array:
# Create the name to which the local file will be saved
str_date = dl_date.strftime('%Y-%m')
data_file = os.path.join(data_path,
'f107_monthly_{:s}.txt'.format(str_date))
if update_files or not os.path.isfile(data_file):
# Set the download webpage
dstr = ''.join(['http://lasp.colorado.edu/lisird/latis/dap/',
'noaa_radio_flux.json?time%3E=',
dl_date.strftime('%Y-%m-%d'),
'T00:00:00.000Z&time%3C=',
(dl_date + pds.DateOffset(months=1)
- pds.DateOffset(days=1)).strftime('%Y-%m-%d'),
'T00:00:00.000Z'])
# The data is returned as a JSON file
req = requests.get(dstr)
# Process the JSON file
raw_dict = json.loads(req.text)['noaa_radio_flux']
data = pds.DataFrame.from_dict(raw_dict['samples'])
if data.empty:
warnings.warn("no data for {:}".format(dl_date),
UserWarning)
else:
# The file format changed over time
try:
# This is the new data format
times = [dt.datetime.strptime(time, '%Y%m%d')
for time in data.pop('time')]
except ValueError:
# Accepts old file formats
times = [dt.datetime.strptime(time, '%Y %m %d')
for time in data.pop('time')]
data.index = times
# Replace fill value with NaNs
idx, = np.where(data['f107'] == -99999.0)
data.iloc[idx, :] = np.nan
# Create a local CSV file
data.to_csv(data_file, header=True)
elif tag == 'prelim':
ftp = ftplib.FTP('ftp.swpc.noaa.gov') # connect to host, default port
ftp.login() # user anonymous, passwd <PASSWORD>@
ftp.cwd('/pub/indices/old_indices')
bad_fname = list()
# Get the local files, to ensure that the version 1 files are
# downloaded again if more data has been added
local_files = list_files(tag, inst_id, data_path)
# To avoid downloading multiple files, cycle dates based on file length
dl_date = date_array[0]
while dl_date <= date_array[-1]:
# The file name changes, depending on how recent the requested
# data is
qnum = (dl_date.month - 1) // 3 + 1 # Integer floor division
qmonth = (qnum - 1) * 3 + 1
quar = 'Q{:d}_'.format(qnum)
fnames = ['{:04d}{:s}DSD.txt'.format(dl_date.year, ss)
for ss in ['_', quar]]
versions = ["01_v2", "{:02d}_v1".format(qmonth)]
vend = [dt.datetime(dl_date.year, 12, 31),
dt.datetime(dl_date.year, qmonth, 1)
+ pds.DateOffset(months=3) - pds.DateOffset(days=1)]
downloaded = False
rewritten = False
# Attempt the download(s)
for iname, fname in enumerate(fnames):
# Test to see if we already tried this filename
if fname in bad_fname:
continue
local_fname = fname
saved_fname = os.path.join(data_path, local_fname)
ofile = '_'.join(['f107', 'prelim',
'{:04d}'.format(dl_date.year),
'{:s}.txt'.format(versions[iname])])
outfile = os.path.join(data_path, ofile)
if os.path.isfile(outfile):
downloaded = True
# Check the date to see if this should be rewritten
checkfile = os.path.split(outfile)[-1]
has_file = local_files == checkfile
if np.any(has_file):
if has_file[has_file].index[-1] < vend[iname]:
# This file will be updated again, but only attempt
# to do so if enough time has passed from the
# last time it was downloaded
yesterday = today - pds.DateOffset(days=1)
if has_file[has_file].index[-1] < yesterday:
rewritten = True
else:
# The file does not exist, if it can be downloaded, it
# should be 'rewritten'
rewritten = True
# Attempt to download if the file does not exist or if the
# file has been updated
if rewritten or not downloaded:
try:
sys.stdout.flush()
ftp.retrbinary('RETR ' + fname,
open(saved_fname, 'wb').write)
downloaded = True
logger.info(' '.join(('Downloaded file for ',
dl_date.strftime('%x'))))
except ftplib.error_perm as exception:
# Could not fetch, so cannot rewrite
rewritten = False
# Test for an error
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise RuntimeError(exception)
else:
# file isn't actually there, try the next name
os.remove(saved_fname)
# Save this so we don't try again
# Because there are two possible filenames for
# each time, it's ok if one isn't there. We just
# don't want to keep looking for it.
bad_fname.append(fname)
# If the first file worked, don't try again
if downloaded:
break
if not downloaded:
logger.info(' '.join(('File not available for',
dl_date.strftime('%x'))))
elif rewritten:
with open(saved_fname, 'r') as fprelim:
lines = fprelim.read()
mm_f107.rewrite_daily_file(dl_date.year, outfile, lines)
os.remove(saved_fname)
# Cycle to the next date
dl_date = vend[iname] + pds.DateOffset(days=1)
# Close connection after downloading all dates
ftp.close()
elif tag == 'daily':
logger.info('This routine can only download the latest 30 day file')
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/daily-solar-indices.txt'
req = requests.get(furl)
# Save the output
data_file = 'f107_daily_{:s}.txt'.format(today.strftime('%Y-%m-%d'))
outfile = os.path.join(data_path, data_file)
mm_f107.rewrite_daily_file(today.year, outfile, req.text)
elif tag == 'forecast':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = ''.join(('https://services.swpc.noaa.gov/text/',
'3-day-solar-geomag-predictions.txt'))
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get starting date of the forecasts
raw_data = req.text.split(':Prediction_dates:')[-1]
forecast_date = dt.datetime.strptime(raw_data[3:14], '%Y %b %d')
# Set the times for output data
times = pds.date_range(forecast_date, periods=3, freq='1D')
# String data is the forecast value for the next three days
raw_data = req.text.split('10cm_flux:')[-1]
raw_data = raw_data.split('\n')[1]
val1 = int(raw_data[24:27])
val2 = int(raw_data[38:41])
val3 = int(raw_data[52:])
# Put data into nicer DataFrame
data = pds.DataFrame([val1, val2, val3], index=times, columns=['f107'])
# Write out as a file
data_file = 'f107_forecast_{:s}.txt'.format(
dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
elif tag == '45day':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/45-day-ap-forecast.txt'
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get to the forecast data
raw_data = req.text.split('45-DAY AP FORECAST')[-1]
# Grab AP part
raw_ap = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[0]
raw_ap = raw_ap.split('\n')[1:-1]
# Get the F107
raw_f107 = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[-1]
raw_f107 = raw_f107.split('\n')[1:-4]
# Parse the AP data
ap_times, ap = mm_f107.parse_45day_block(raw_ap)
# Parse the F10.7 data
f107_times, f107 = mm_f107.parse_45day_block(raw_f107)
# Collect into DataFrame
data = pds.DataFrame(f107, index=f107_times, columns=['f107'])
data['ap'] = ap
# Write out as a file
data_file = 'f107_45day_{:s}.txt'.format(dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
return
| [
"pysat.Files.from_os",
"datetime.timedelta",
"pysatSpaceWeather.instruments.methods.f107.rewrite_daily_file",
"pandas.date_range",
"os.remove",
"datetime.datetime",
"ftplib.FTP",
"numpy.where",
"pandas.DataFrame.from_dict",
"os.path.split",
"pandas.DataFrame",
"sys.stdout.flush",
"json.loads... | [((3004, 3024), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (3022, 3024), True, 'import datetime as dt\n'), ((3033, 3074), 'datetime.datetime', 'dt.datetime', (['now.year', 'now.month', 'now.day'], {}), '(now.year, now.month, now.day)\n', (3044, 3074), True, 'import datetime as dt\n'), ((3178, 3202), 'datetime.datetime', 'dt.datetime', (['(1947)', '(2)', '(14)'], {}), '(1947, 2, 14)\n', (3189, 3202), True, 'import datetime as dt\n'), ((3094, 3116), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (3108, 3116), True, 'import pandas as pds\n'), ((3926, 3971), 'pysatSpaceWeather.instruments.methods.f107.acknowledgements', 'mm_f107.acknowledgements', (['self.name', 'self.tag'], {}), '(self.name, self.tag)\n', (3950, 3971), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((3994, 4033), 'pysatSpaceWeather.instruments.methods.f107.references', 'mm_f107.references', (['self.name', 'self.tag'], {}), '(self.name, self.tag)\n', (4012, 4033), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((5456, 5532), 'pysatSpaceWeather.instruments.methods.ace.load_csv_data', 'load_csv_data', (['fnames'], {'read_csv_kwargs': "{'index_col': 0, 'parse_dates': True}"}), "(fnames, read_csv_kwargs={'index_col': 0, 'parse_dates': True})\n", (5469, 5532), False, 'from pysatSpaceWeather.instruments.methods.ace import load_csv_data\n'), ((5872, 5884), 'pysat.Meta', 'pysat.Meta', ([], {}), '()\n', (5882, 5884), False, 'import pysat\n'), ((3345, 3368), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (3356, 3368), True, 'import datetime as dt\n'), ((3400, 3423), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (3411, 3423), True, 'import datetime as dt\n'), ((12614, 12677), 'pysat.Files.from_os', 'pysat.Files.from_os', ([], {'data_path': 'data_path', 'format_str': 'format_str'}), '(data_path=data_path, format_str=format_str)\n', (12633, 12677), False, 'import pysat\n'), ((19601, 19632), 'ftplib.FTP', 'ftplib.FTP', (['"""ftp.swpc.noaa.gov"""'], {}), "('ftp.swpc.noaa.gov')\n", (19611, 19632), False, 'import ftplib\n'), ((5238, 5283), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['fname[-10:]', '"""%Y-%m-%d"""'], {}), "(fname[-10:], '%Y-%m-%d')\n", (5258, 5283), True, 'import datetime as dt\n'), ((13347, 13410), 'pysat.Files.from_os', 'pysat.Files.from_os', ([], {'data_path': 'data_path', 'format_str': 'format_str'}), '(data_path=data_path, format_str=format_str)\n', (13366, 13410), False, 'import pysat\n'), ((17335, 17390), 'datetime.datetime', 'dt.datetime', (['date_array[0].year', 'date_array[0].month', '(1)'], {}), '(date_array[0].year, date_array[0].month, 1)\n', (17346, 17390), True, 'import datetime as dt\n'), ((18401, 18419), 'requests.get', 'requests.get', (['dstr'], {}), '(dstr)\n', (18413, 18419), False, 'import requests\n'), ((18551, 18595), 'pandas.DataFrame.from_dict', 'pds.DataFrame.from_dict', (["raw_dict['samples']"], {}), "(raw_dict['samples'])\n", (18574, 18595), True, 'import pandas as pds\n'), ((24637, 24655), 'requests.get', 'requests.get', (['furl'], {}), '(furl)\n', (24649, 24655), False, 'import requests\n'), ((24778, 24812), 'os.path.join', 'os.path.join', (['data_path', 'data_file'], {}), '(data_path, data_file)\n', (24790, 24812), False, 'import os\n'), ((24821, 24878), 'pysatSpaceWeather.instruments.methods.f107.rewrite_daily_file', 'mm_f107.rewrite_daily_file', (['today.year', 'outfile', 'req.text'], {}), '(today.year, outfile, req.text)\n', (24847, 24878), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((14955, 14987), 'pandas.concat', 'pds.concat', (['new_files'], {'sort': '(True)'}), '(new_files, sort=True)\n', (14965, 14987), True, 'import pandas as pds\n'), ((15391, 15454), 'pysat.Files.from_os', 'pysat.Files.from_os', ([], {'data_path': 'data_path', 'format_str': 'format_str'}), '(data_path=data_path, format_str=format_str)\n', (15410, 15454), False, 'import pysat\n'), ((17790, 17815), 'os.path.isfile', 'os.path.isfile', (['data_file'], {}), '(data_file)\n', (17804, 17815), False, 'import os\n'), ((18488, 18508), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (18498, 18508), False, 'import json\n'), ((19376, 19410), 'numpy.where', 'np.where', (["(data['f107'] == -99999.0)"], {}), "(data['f107'] == -99999.0)\n", (19384, 19410), True, 'import numpy as np\n'), ((20579, 20612), 'datetime.datetime', 'dt.datetime', (['dl_date.year', '(12)', '(31)'], {}), '(dl_date.year, 12, 31)\n', (20590, 20612), True, 'import datetime as dt\n'), ((21094, 21130), 'os.path.join', 'os.path.join', (['data_path', 'local_fname'], {}), '(data_path, local_fname)\n', (21106, 21130), False, 'import os\n'), ((21345, 21375), 'os.path.join', 'os.path.join', (['data_path', 'ofile'], {}), '(data_path, ofile)\n', (21357, 21375), False, 'import os\n'), ((21396, 21419), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (21410, 21419), False, 'import os\n'), ((24308, 24330), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (24322, 24330), True, 'import pandas as pds\n'), ((25228, 25246), 'requests.get', 'requests.get', (['furl'], {}), '(furl)\n', (25240, 25246), False, 'import requests\n'), ((25400, 25447), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['date_str', '"""%Y %b %d %H%M"""'], {}), "(date_str, '%Y %b %d %H%M')\n", (25420, 25447), True, 'import datetime as dt\n'), ((25578, 25626), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['raw_data[3:14]', '"""%Y %b %d"""'], {}), "(raw_data[3:14], '%Y %b %d')\n", (25598, 25626), True, 'import datetime as dt\n'), ((25684, 25735), 'pandas.date_range', 'pds.date_range', (['forecast_date'], {'periods': '(3)', 'freq': '"""1D"""'}), "(forecast_date, periods=3, freq='1D')\n", (25698, 25735), True, 'import pandas as pds\n'), ((26062, 26126), 'pandas.DataFrame', 'pds.DataFrame', (['[val1, val2, val3]'], {'index': 'times', 'columns': "['f107']"}), "([val1, val2, val3], index=times, columns=['f107'])\n", (26075, 26126), True, 'import pandas as pds\n'), ((5774, 5794), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5786, 5794), True, 'import datetime as dt\n'), ((12867, 12889), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (12881, 12889), True, 'import pandas as pds\n'), ((14246, 14278), 'os.path.join', 'os.path.join', (['data_path', 'orig[1]'], {}), '(data_path, orig[1])\n', (14258, 14278), False, 'import os\n'), ((15642, 15664), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (15656, 15664), True, 'import pandas as pds\n'), ((20720, 20742), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (20734, 20742), True, 'import pandas as pds\n'), ((21670, 21686), 'numpy.any', 'np.any', (['has_file'], {}), '(has_file)\n', (21676, 21686), True, 'import numpy as np\n'), ((24138, 24194), 'pysatSpaceWeather.instruments.methods.f107.rewrite_daily_file', 'mm_f107.rewrite_daily_file', (['dl_date.year', 'outfile', 'lines'], {}), '(dl_date.year, outfile, lines)\n', (24164, 24194), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((24211, 24233), 'os.remove', 'os.remove', (['saved_fname'], {}), '(saved_fname)\n', (24220, 24233), False, 'import os\n'), ((26273, 26307), 'os.path.join', 'os.path.join', (['data_path', 'data_file'], {}), '(data_path, data_file)\n', (26285, 26307), False, 'import os\n'), ((26618, 26636), 'requests.get', 'requests.get', (['furl'], {}), '(furl)\n', (26630, 26636), False, 'import requests\n'), ((26790, 26837), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['date_str', '"""%Y %b %d %H%M"""'], {}), "(date_str, '%Y %b %d %H%M')\n", (26810, 26837), True, 'import datetime as dt\n'), ((27261, 27294), 'pysatSpaceWeather.instruments.methods.f107.parse_45day_block', 'mm_f107.parse_45day_block', (['raw_ap'], {}), '(raw_ap)\n', (27286, 27294), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((27354, 27389), 'pysatSpaceWeather.instruments.methods.f107.parse_45day_block', 'mm_f107.parse_45day_block', (['raw_f107'], {}), '(raw_f107)\n', (27379, 27389), True, 'from pysatSpaceWeather.instruments.methods import f107 as mm_f107\n'), ((27439, 27494), 'pandas.DataFrame', 'pds.DataFrame', (['f107'], {'index': 'f107_times', 'columns': "['f107']"}), "(f107, index=f107_times, columns=['f107'])\n", (27452, 27494), True, 'import pandas as pds\n'), ((12810, 12834), 'pandas.DateOffset', 'pds.DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (12824, 12834), True, 'import pandas as pds\n'), ((13862, 13885), 'pandas.DateOffset', 'pds.DateOffset', ([], {'years': '(1)'}), '(years=1)\n', (13876, 13885), True, 'import pandas as pds\n'), ((13933, 13957), 'pandas.DateOffset', 'pds.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (13947, 13957), True, 'import pandas as pds\n'), ((14038, 14060), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (14052, 14060), True, 'import pandas as pds\n'), ((14335, 14358), 'os.path.getctime', 'os.path.getctime', (['fname'], {}), '(fname)\n', (14351, 14358), False, 'import os\n'), ((14394, 14416), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (14408, 14416), True, 'import pandas as pds\n'), ((18933, 18969), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['time', '"""%Y%m%d"""'], {}), "(time, '%Y%m%d')\n", (18953, 18969), True, 'import datetime as dt\n'), ((20634, 20670), 'datetime.datetime', 'dt.datetime', (['dl_date.year', 'qmonth', '(1)'], {}), '(dl_date.year, qmonth, 1)\n', (20645, 20670), True, 'import datetime as dt\n'), ((20693, 20717), 'pandas.DateOffset', 'pds.DateOffset', ([], {'months': '(3)'}), '(months=3)\n', (20707, 20717), True, 'import pandas as pds\n'), ((21564, 21586), 'os.path.split', 'os.path.split', (['outfile'], {}), '(outfile)\n', (21577, 21586), False, 'import os\n'), ((22555, 22573), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (22571, 22573), False, 'import sys\n'), ((27649, 27683), 'os.path.join', 'os.path.join', (['data_path', 'data_file'], {}), '(data_path, data_file)\n', (27661, 27683), False, 'import os\n'), ((19156, 19194), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['time', '"""%Y %m %d"""'], {}), "(time, '%Y %m %d')\n", (19176, 19194), True, 'import datetime as dt\n'), ((18227, 18249), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (18241, 18249), True, 'import pandas as pds\n'), ((22019, 22041), 'pandas.DateOffset', 'pds.DateOffset', ([], {'days': '(1)'}), '(days=1)\n', (22033, 22041), True, 'import pandas as pds\n'), ((23360, 23382), 'os.remove', 'os.remove', (['saved_fname'], {}), '(saved_fname)\n', (23369, 23382), False, 'import os\n'), ((18167, 18191), 'pandas.DateOffset', 'pds.DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (18181, 18191), True, 'import pandas as pds\n')] |
# Generated by Django 3.0.5 on 2020-08-13 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20200813_1943'),
]
operations = [
migrations.AlterField(
model_name='user',
name='emailConfirmToken',
field=models.TextField(default='-<KEY>', max_length=30, verbose_name='Токен подтверждения почты'),
),
]
| [
"django.db.models.TextField"
] | [((342, 438), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""-<KEY>"""', 'max_length': '(30)', 'verbose_name': '"""Токен подтверждения почты"""'}), "(default='-<KEY>', max_length=30, verbose_name=\n 'Токен подтверждения почты')\n", (358, 438), False, 'from django.db import migrations, models\n')] |
import glfw
import OpenGL.GL as gl
import imgui
from imgui.integrations.glfw import GlfwRenderer
def app(render):
imgui.create_context()
window = impl_glfw_init()
impl = GlfwRenderer(window)
while not glfw.window_should_close(window):
glfw.poll_events()
impl.process_inputs()
gl.glClearColor(.2, .5, .2, 0.6)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
imgui.new_frame()
render()
imgui.render()
impl.render(imgui.get_draw_data())
glfw.swap_buffers(window)
impl.shutdown()
glfw.terminate()
def impl_glfw_init():
width, height = 1280, 720
window_name = "minimal ImGui/GLFW3 example"
if not glfw.init():
print("Could not initialize OpenGL context")
exit(1)
# OS X supports only forward-compatible core profiles from 3.2
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(
int(width), int(height), window_name, None, None
)
glfw.make_context_current(window)
if not window:
glfw.terminate()
print("Could not initialize Window")
exit(1)
return window
| [
"glfw.make_context_current",
"glfw.swap_buffers",
"glfw.window_should_close",
"imgui.integrations.glfw.GlfwRenderer",
"glfw.poll_events",
"imgui.create_context",
"OpenGL.GL.glClear",
"glfw.window_hint",
"imgui.render",
"imgui.get_draw_data",
"OpenGL.GL.glClearColor",
"imgui.new_frame",
"glfw... | [((120, 142), 'imgui.create_context', 'imgui.create_context', ([], {}), '()\n', (140, 142), False, 'import imgui\n'), ((184, 204), 'imgui.integrations.glfw.GlfwRenderer', 'GlfwRenderer', (['window'], {}), '(window)\n', (196, 204), False, 'from imgui.integrations.glfw import GlfwRenderer\n'), ((561, 577), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (575, 577), False, 'import glfw\n'), ((846, 893), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MAJOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MAJOR, 3)\n', (862, 893), False, 'import glfw\n'), ((898, 945), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MINOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MINOR, 3)\n', (914, 945), False, 'import glfw\n'), ((950, 1013), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_PROFILE', 'glfw.OPENGL_CORE_PROFILE'], {}), '(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n', (966, 1013), False, 'import glfw\n'), ((1019, 1075), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_FORWARD_COMPAT', 'gl.GL_TRUE'], {}), '(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)\n', (1035, 1075), False, 'import glfw\n'), ((1235, 1268), 'glfw.make_context_current', 'glfw.make_context_current', (['window'], {}), '(window)\n', (1260, 1268), False, 'import glfw\n'), ((219, 251), 'glfw.window_should_close', 'glfw.window_should_close', (['window'], {}), '(window)\n', (243, 251), False, 'import glfw\n'), ((261, 279), 'glfw.poll_events', 'glfw.poll_events', ([], {}), '()\n', (277, 279), False, 'import glfw\n'), ((318, 353), 'OpenGL.GL.glClearColor', 'gl.glClearColor', (['(0.2)', '(0.5)', '(0.2)', '(0.6)'], {}), '(0.2, 0.5, 0.2, 0.6)\n', (333, 353), True, 'import OpenGL.GL as gl\n'), ((359, 393), 'OpenGL.GL.glClear', 'gl.glClear', (['gl.GL_COLOR_BUFFER_BIT'], {}), '(gl.GL_COLOR_BUFFER_BIT)\n', (369, 393), True, 'import OpenGL.GL as gl\n'), ((402, 419), 'imgui.new_frame', 'imgui.new_frame', ([], {}), '()\n', (417, 419), False, 'import imgui\n'), ((445, 459), 'imgui.render', 'imgui.render', ([], {}), '()\n', (457, 459), False, 'import imgui\n'), ((511, 536), 'glfw.swap_buffers', 'glfw.swap_buffers', (['window'], {}), '(window)\n', (528, 536), False, 'import glfw\n'), ((692, 703), 'glfw.init', 'glfw.init', ([], {}), '()\n', (701, 703), False, 'import glfw\n'), ((1297, 1313), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (1311, 1313), False, 'import glfw\n'), ((480, 501), 'imgui.get_draw_data', 'imgui.get_draw_data', ([], {}), '()\n', (499, 501), False, 'import imgui\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Usage: %(scriptName) <bug_report_file> <data_prefix>
"""
import json
from timeit import default_timer
import datetime
import numpy as np
import pickle
import sys
from multiprocessing import Pool
from operator import itemgetter
from scipy import sparse
from sklearn.feature_extraction.text import TfidfTransformer
from tqdm import tqdm
from unqlite import UnQLite
from date_utils import convert_commit_date
def main():
print("Start", datetime.datetime.now().isoformat())
before = default_timer()
bug_report_file_path = sys.argv[1]
print("bug report file path", bug_report_file_path)
data_prefix = sys.argv[2]
print("data prefix", data_prefix)
fixes_list = extract_fixes_list(bug_report_file_path)
vectorize_enriched_api(fixes_list, data_prefix)
after = default_timer()
total = after - before
print("End", datetime.datetime.now().isoformat())
print("total time", total)
def load_bug_reports(bug_report_file_path):
"""load bug report file (the one generated from xml)"""
with open(bug_report_file_path) as bug_report_file:
bug_reports = json.load(bug_report_file)
return bug_reports
def sort_bug_reports_by_commit_date(bug_reports):
commit_dates = []
for index, commit in enumerate(tqdm(bug_reports)):
sha = bug_reports[commit]['commit']['metadata']['sha'].replace('commit ','').strip()
commit_date = convert_commit_date(bug_reports[commit]['commit']['metadata']['date'].replace('Date:','').strip())
commit_dates.append((sha, commit_date))
sorted_commit_dates = sorted(commit_dates, key=itemgetter(1))
sorted_commits = [commit_date[0] for commit_date in sorted_commit_dates]
return sorted_commits
def extract_fixes_list(bug_report_file_path):
bug_reports = load_bug_reports(bug_report_file_path)
return sort_bug_reports_by_commit_date(bug_reports)
def find_supertype_shas(types, class_name_lookup, variable_sha):
if variable_sha not in types:
return []
# variable_type = types[variable_sha]
variable_type = pickle.loads(types[variable_sha])
shas = []
for name in variable_type['superclassNames']:
if name in class_name_lookup:
shas.append(class_name_lookup[name])
for name in variable_type['interfaceNames']:
if name in class_name_lookup:
shas.append(class_name_lookup[name])
return shas
def find_types_shas(types, class_name_lookup, sha):
result = []
to_check = [sha]
while to_check:
current_sha = to_check.pop(0)
if current_sha not in result:
result.append(current_sha)
supertypes = find_supertype_shas(types, class_name_lookup, current_sha)
to_check.extend(supertypes)
return result
def get_indexes(asts, shas):
indexes = []
for sha in shas:
# indexes.append(asts[sha]['source'])
source_index = pickle.loads(asts[sha])['source']
indexes.append(source_index)
return indexes
def add_types_source_to_bug_report_data(data, data_prefix, class_name_lookup, ast_sha):
asts = UnQLite(data_prefix+"_ast_index_collection_index_db", flags = 0x00000100 | 0x00000001)
types = UnQLite(data_prefix+"_ast_types_collection_index_db", flags = 0x00000100 | 0x00000001)
# current_type = types[ast_sha]
# print "searching", ast_sha
current_type = pickle.loads(types[ast_sha])
# print "found", ast_sha
# print current_type['methodVariableTypes']
# exit(0)
types_per_method = current_type['methodVariableTypes']
cl = data.shape[1]
current_index = 0
start = current_index
enriched_apis = []
for method_types in types_per_method:
method_type_shas = []
for method_type in method_types:
if method_type in class_name_lookup:
method_type_shas.append(class_name_lookup[method_type])
supertypes_shas_per_type = [set(find_types_shas(types, class_name_lookup, s)) for s in method_type_shas]
indexes = []
for supertypes in supertypes_shas_per_type:
indexes.extend(get_indexes(asts, supertypes))
if indexes == []:
method_enriched_api = sparse.coo_matrix(np.zeros(cl).reshape(1,cl))
else:
method_enriched_api = sparse.coo_matrix(np.sum((data[indexes,:]), axis = 0))
enriched_apis.append(method_enriched_api)
if enriched_apis == []:
class_enriched_api = sparse.coo_matrix(np.zeros(cl).reshape(1,cl))
else:
class_enriched_api = sparse.coo_matrix(np.sum(enriched_apis, axis = 0))
enriched_apis.append(class_enriched_api)
current_index += len(enriched_apis)
asts.close()
types.close()
lookup = {}
lookup['enrichedApiStart'] = start
lookup['enrichedApiEnd'] = current_index - 1
enriched_apis_matrix = sparse.vstack(enriched_apis)
return (enriched_apis_matrix, lookup, ast_sha)
def vectorize_enriched_api(bug_report_fixing_commits, data_prefix):
work = []
for fixing_commit in bug_report_fixing_commits:
work.append((data_prefix, fixing_commit))
pool = Pool(12, maxtasksperchild=1)
r = list(tqdm(pool.imap(_f, work), total=len(work)))
print("r", len(r))
def _f(args):
return extract_enriched_api(args[0], args[1])
def extract_enriched_api(data_prefix, bug_report_full_sha):
data = sparse.load_npz(data_prefix+'_raw_count_data.npz')
bug_report_files_collection_db = UnQLite(data_prefix+"_bug_report_files_collection_db", flags = 0x00000100 | 0x00000001)
current_files = pickle.loads(bug_report_files_collection_db[bug_report_full_sha])
bug_report_files_collection_db.close()
bug_report_id = bug_report_full_sha[0:7]
shas = current_files['shas']
class_name_lookup = current_files['class_name_to_sha']
bug_report_data = []
bug_report_lookup = {}
n_rows = 0
for ast_sha in shas:
ast_data, lookup, current_ast_sha = add_types_source_to_bug_report_data(data, data_prefix, class_name_lookup, ast_sha)
current_index = n_rows
bug_report_data.append(ast_data)
for k in lookup:
lookup[k] += current_index
bug_report_lookup[current_ast_sha] = lookup
n_rows += ast_data.shape[0]
bug_report_row = get_bug_report(data_prefix, data, bug_report_id)
bug_report_data.append(bug_report_row)
bug_report_data_matrix = sparse.vstack(bug_report_data)
sparse.save_npz(data_prefix+'_'+bug_report_id+'_partial_enriched_api', bug_report_data_matrix)
with open(data_prefix+'_'+bug_report_id+'_partial_enriched_api_index_lookup', 'w') as outfile:
json.dump(bug_report_lookup, outfile)
transformer = TfidfTransformer()
tf_idf_data = transformer.fit_transform(bug_report_data_matrix)
sparse.save_npz(data_prefix+'_'+bug_report_id+'_tfidf_enriched_api', tf_idf_data)
# print "bug_report_id", bug_report_id
return bug_report_id
def get_bug_report(data_prefix, vectorized_data, bug_report_id):
bug_report_index_collection = UnQLite(data_prefix+"_bug_report_index_collection_index_db")
bug_report = pickle.loads(bug_report_index_collection[bug_report_id])
bug_report_index_collection.close()
index = bug_report['report']
return vectorized_data[index, :]
if __name__ == '__main__':
main()
| [
"sklearn.feature_extraction.text.TfidfTransformer",
"unqlite.UnQLite",
"timeit.default_timer",
"scipy.sparse.load_npz",
"tqdm.tqdm",
"operator.itemgetter",
"json.load",
"numpy.sum",
"datetime.datetime.now",
"numpy.zeros",
"multiprocessing.Pool",
"pickle.loads",
"scipy.sparse.save_npz",
"sc... | [((543, 558), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (556, 558), False, 'from timeit import default_timer\n'), ((846, 861), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (859, 861), False, 'from timeit import default_timer\n'), ((2114, 2147), 'pickle.loads', 'pickle.loads', (['types[variable_sha]'], {}), '(types[variable_sha])\n', (2126, 2147), False, 'import pickle\n'), ((3143, 3213), 'unqlite.UnQLite', 'UnQLite', (["(data_prefix + '_ast_index_collection_index_db')"], {'flags': '(256 | 1)'}), "(data_prefix + '_ast_index_collection_index_db', flags=256 | 1)\n", (3150, 3213), False, 'from unqlite import UnQLite\n'), ((3242, 3312), 'unqlite.UnQLite', 'UnQLite', (["(data_prefix + '_ast_types_collection_index_db')"], {'flags': '(256 | 1)'}), "(data_prefix + '_ast_types_collection_index_db', flags=256 | 1)\n", (3249, 3312), False, 'from unqlite import UnQLite\n'), ((3416, 3444), 'pickle.loads', 'pickle.loads', (['types[ast_sha]'], {}), '(types[ast_sha])\n', (3428, 3444), False, 'import pickle\n'), ((4883, 4911), 'scipy.sparse.vstack', 'sparse.vstack', (['enriched_apis'], {}), '(enriched_apis)\n', (4896, 4911), False, 'from scipy import sparse\n'), ((5174, 5202), 'multiprocessing.Pool', 'Pool', (['(12)'], {'maxtasksperchild': '(1)'}), '(12, maxtasksperchild=1)\n', (5178, 5202), False, 'from multiprocessing import Pool\n'), ((5420, 5472), 'scipy.sparse.load_npz', 'sparse.load_npz', (["(data_prefix + '_raw_count_data.npz')"], {}), "(data_prefix + '_raw_count_data.npz')\n", (5435, 5472), False, 'from scipy import sparse\n'), ((5508, 5579), 'unqlite.UnQLite', 'UnQLite', (["(data_prefix + '_bug_report_files_collection_db')"], {'flags': '(256 | 1)'}), "(data_prefix + '_bug_report_files_collection_db', flags=256 | 1)\n", (5515, 5579), False, 'from unqlite import UnQLite\n'), ((5617, 5682), 'pickle.loads', 'pickle.loads', (['bug_report_files_collection_db[bug_report_full_sha]'], {}), '(bug_report_files_collection_db[bug_report_full_sha])\n', (5629, 5682), False, 'import pickle\n'), ((6456, 6486), 'scipy.sparse.vstack', 'sparse.vstack', (['bug_report_data'], {}), '(bug_report_data)\n', (6469, 6486), False, 'from scipy import sparse\n'), ((6492, 6596), 'scipy.sparse.save_npz', 'sparse.save_npz', (["(data_prefix + '_' + bug_report_id + '_partial_enriched_api')", 'bug_report_data_matrix'], {}), "(data_prefix + '_' + bug_report_id + '_partial_enriched_api',\n bug_report_data_matrix)\n", (6507, 6596), False, 'from scipy import sparse\n'), ((6751, 6769), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (6767, 6769), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((6842, 6933), 'scipy.sparse.save_npz', 'sparse.save_npz', (["(data_prefix + '_' + bug_report_id + '_tfidf_enriched_api')", 'tf_idf_data'], {}), "(data_prefix + '_' + bug_report_id + '_tfidf_enriched_api',\n tf_idf_data)\n", (6857, 6933), False, 'from scipy import sparse\n'), ((7093, 7155), 'unqlite.UnQLite', 'UnQLite', (["(data_prefix + '_bug_report_index_collection_index_db')"], {}), "(data_prefix + '_bug_report_index_collection_index_db')\n", (7100, 7155), False, 'from unqlite import UnQLite\n'), ((7171, 7227), 'pickle.loads', 'pickle.loads', (['bug_report_index_collection[bug_report_id]'], {}), '(bug_report_index_collection[bug_report_id])\n', (7183, 7227), False, 'import pickle\n'), ((1159, 1185), 'json.load', 'json.load', (['bug_report_file'], {}), '(bug_report_file)\n', (1168, 1185), False, 'import json\n'), ((1321, 1338), 'tqdm.tqdm', 'tqdm', (['bug_reports'], {}), '(bug_reports)\n', (1325, 1338), False, 'from tqdm import tqdm\n'), ((6694, 6731), 'json.dump', 'json.dump', (['bug_report_lookup', 'outfile'], {}), '(bug_report_lookup, outfile)\n', (6703, 6731), False, 'import json\n'), ((1655, 1668), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (1665, 1668), False, 'from operator import itemgetter\n'), ((2953, 2976), 'pickle.loads', 'pickle.loads', (['asts[sha]'], {}), '(asts[sha])\n', (2965, 2976), False, 'import pickle\n'), ((4593, 4622), 'numpy.sum', 'np.sum', (['enriched_apis'], {'axis': '(0)'}), '(enriched_apis, axis=0)\n', (4599, 4622), True, 'import numpy as np\n'), ((493, 516), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (514, 516), False, 'import datetime\n'), ((906, 929), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (927, 929), False, 'import datetime\n'), ((4344, 4376), 'numpy.sum', 'np.sum', (['data[indexes, :]'], {'axis': '(0)'}), '(data[indexes, :], axis=0)\n', (4350, 4376), True, 'import numpy as np\n'), ((4508, 4520), 'numpy.zeros', 'np.zeros', (['cl'], {}), '(cl)\n', (4516, 4520), True, 'import numpy as np\n'), ((4250, 4262), 'numpy.zeros', 'np.zeros', (['cl'], {}), '(cl)\n', (4258, 4262), True, 'import numpy as np\n')] |
from .db import db
from .userfollower import UserFollower
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from sqlalchemy import Table, Column, Integer, ForeignKey, or_
from .directmessage import DirectMessage
from .userequipment import UserEquipment
from .equipment import Equipment
from .message import Message
from .messagereceiver import MessageReceiver
from sqlalchemy.orm import validates
class User(db.Model, UserMixin):
__tablename__ = 'Users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(40), nullable = False, unique = True)
name = db.Column(db.String(100), nullable=True)
email = db.Column(db.String(255), nullable = False, unique = True)
hashed_password = db.Column(db.String(255), nullable = False)
bio = db.Column(db.Text, nullable=True)
websiteUrl = db.Column(db.Text, nullable=False, default="www.google.com")
userType = db.Column(db.Integer, nullable=True, default=0)
profilePicUrl = db.Column(db.Text, nullable=True)
createdAt = db.Column(db.DateTime(timezone=True), server_default=db.func.now()) #func.sysdate())
updatedAt = db.Column(db.DateTime(timezone=True), server_default=db.func.now(), server_onupdate=db.func.now())
ownPosts = db.relationship('Post', foreign_keys='Post.userId')
ownComments = db.relationship('Comment', foreign_keys='Comment.userId')
taggedInPosts = db.relationship('Post', secondary='taggedusers')
likedPosts = db.relationship('Post', secondary='likedposts')
savedPosts = db.relationship('Post', secondary='savedposts')
sentMessages = db.relationship('DirectMessage', foreign_keys='DirectMessage.senderId')
receivedMessages = db.relationship('DirectMessage', foreign_keys='DirectMessage.receiverId')
likedComments = db.relationship('Comment', secondary='commentlikes')
taggedInComments = db.relationship('Comment', secondary='commenttaggedusers')
followers = [] #db.relationship('User', secondary='userfollowers', foreign_keys='UserFollower.followerId')
following = [] #db.relationship('User', secondary='userfollowers', foreign_keys='UserFollower.userId')
allMessages = []
# equipmentList = []
equipmentList = db.relationship('Equipment', secondary="UserEquipments")
# @validates('username', 'email')
# def convert_lower(self, key, value):
# return value.lower()
@property
def password(self):
return self.hashed_password
@password.setter
def password(self, password):
self.hashed_password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def get_followers(self):
ufs = UserFollower.query.filter(UserFollower.userId == self.id).all()
self.followers = [uf.follower for uf in ufs]
def get_following(self):
ufs = UserFollower.query.filter(UserFollower.followerId == self.id).all()
self.following = [uf.person for uf in ufs]
def get_messages(self):
msgs = DirectMessage.query\
.filter(or_(DirectMessage.senderId == self.id, \
DirectMessage.receiverId == self.id)).order_by(DirectMessage.id).all()
self.allMessages = msgs
def get_conversations(self):
convos = MessageReceiver.query\
.filter(or_(MessageReceiver.senderId == self.id, \
MessageReceiver.receiverId == self.id)).order_by(MessageReceiver.id).all()
uniqueConvos = []
if len(convos):
messageIdSet = set()
for convo in convos:
if convo.senderId != self.id:
uniqueConvos.append(convo)
else:
if convo.messageId not in messageIdSet:
uniqueConvos.append(convo)
messageIdSet.add(convo.messageId)
self.allMessages = uniqueConvos
def get_last_conversation(self):
convo = MessageReceiver.query\
.filter(or_(MessageReceiver.senderId == self.id, \
MessageReceiver.receiverId == self.id)).order_by(-MessageReceiver.id).first()
self.allMessages = [convo]
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_with_posts_and_follows(self):
self.get_followers()
self.get_following()
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"ownPosts": [post.to_dict() for post in self.ownPosts],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
def to_dict_with_posts(self):
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"ownPosts": [post.to_dict() for post in self.ownPosts],
}
def to_dict_with_posts_fast(self):
user_as_dict_basic = {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
user_as_dict = user_as_dict_basic.copy()
user_as_dict["ownPosts"] = [post.to_dict_fast_own_user(user_as_dict_basic) for post in self.ownPosts]
return user_as_dict
# "ownPosts": [post.to_dict_fast() for post in self.ownPosts],
def to_dict_feed(self):
self.get_following()
return {
"followingIds": [int(follow.id) for follow in self.following]
}
def to_dict_for_mentions(self):
return {
"id": self.id,
"displayName": self.name,
"name": self.username,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_no_posts(self):
#no posts so if a post has this user, there is no infinite circular references
return {
"id": self.id,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_for_self(self):
self.get_followers()
self.get_following()
# self.get_messages()
self.get_conversations()
return {
"id": self.id,
"username": self.username,
"name": self.name,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"userType": self.userType,
"ownPosts": [post.to_dict() for post in self.ownPosts],
"likedPosts": [post.to_dict() for post in self.likedPosts],
"savedPosts": [post.to_dict() for post in self.savedPosts],
"taggedInPosts": [post.to_dict() for post in self.taggedInPosts],
"messages": [m.to_dict() for m in self.allMessages], #[sentMsg.to_dict() for sentMsg in self.sentMessages] + [recvdMsg.to_dict() for recvdMsg in self.receivedMessages],
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"likedComments": [comment.to_dict() for comment in self.likedComments],
"taggedInComments": [comment.to_dict() for comment in self.taggedInComments],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
def to_dict_as_generic_profile(self):
'''
compared to "for_self" this does not include:
- messages
and more later
'''
self.get_followers()
self.get_following()
return {
"id": self.id,
"username": self.username,
"name": self.name,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"ownPosts": [post.to_dict() for post in self.ownPosts],
"likedPosts": [post.to_dict() for post in self.likedPosts],
"savedPosts": [post.to_dict() for post in self.savedPosts],
"taggedInPosts": [post.to_dict() for post in self.taggedInPosts],
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"likedComments": [comment.to_dict() for comment in self.likedComments],
"taggedInComments": [comment.to_dict() for comment in self.taggedInComments],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
'''
mapper(
User, t_users,
properties={
'followers': relation(
User,
secondary=t_follows,
primaryjoin=(t_follows.c.followee_id==t_users.c.id),
secondaryjoin=(t_follows.c.follower_id==t_users.c.id),
),
'followees': relation(
User,
secondary=t_follows,
primaryjoin=(t_follows.c.follower_id==t_users.c.id),
secondaryjoin=(t_follows.c.followee_id==t_users.c.id),
),
},
)
'''
| [
"sqlalchemy.or_",
"werkzeug.security.generate_password_hash",
"werkzeug.security.check_password_hash"
] | [((2516, 2548), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {}), '(password)\n', (2538, 2548), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((2600, 2644), 'werkzeug.security.check_password_hash', 'check_password_hash', (['self.password', 'password'], {}), '(self.password, password)\n', (2619, 2644), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((3024, 3099), 'sqlalchemy.or_', 'or_', (['(DirectMessage.senderId == self.id)', '(DirectMessage.receiverId == self.id)'], {}), '(DirectMessage.senderId == self.id, DirectMessage.receiverId == self.id)\n', (3027, 3099), False, 'from sqlalchemy import Table, Column, Integer, ForeignKey, or_\n'), ((3254, 3333), 'sqlalchemy.or_', 'or_', (['(MessageReceiver.senderId == self.id)', '(MessageReceiver.receiverId == self.id)'], {}), '(MessageReceiver.senderId == self.id, MessageReceiver.receiverId == self.id)\n', (3257, 3333), False, 'from sqlalchemy import Table, Column, Integer, ForeignKey, or_\n'), ((3823, 3902), 'sqlalchemy.or_', 'or_', (['(MessageReceiver.senderId == self.id)', '(MessageReceiver.receiverId == self.id)'], {}), '(MessageReceiver.senderId == self.id, MessageReceiver.receiverId == self.id)\n', (3826, 3902), False, 'from sqlalchemy import Table, Column, Integer, ForeignKey, or_\n')] |
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import str
from importlib import import_module
import numpy as np
import os.path
import pandas as pd
import sys
import unittest
from tests.common_functions import add_components_and_load_data
from gridpath.project.operations.operational_types.common_functions import \
determine_relevant_timepoints
TEST_DATA_DIRECTORY = \
os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_data")
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints", "temporal.operations.horizons",
"geography.load_zones",
"project.__init__"
]
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package='gridpath')
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
class TestOperationalTypeCommonFunctions(unittest.TestCase):
"""
Test the common_functions module in the operational types package.
"""
def test_determine_relevant_timepoints(self):
"""
Check that the list of relevant timepoints is as expected based on
the current timepoint and the minimum up/down time (and, on the data
side, the duration of other timepoints). Add any other cases to
check that the 'determine_relevant_timepoints' function gives the
expected results.
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=None, # No need to name since not adding components
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
instance = m.create_instance(data)
test_cases = {
1: {"min_time": 4, "g": "Gas_CCGT", "tmp": 20200103,
"relevant_timepoints": [20200103, 20200102]},
2: {"min_time": 5, "g": "Gas_CCGT", "tmp": 20200103,
"relevant_timepoints":
[20200103, 20200102, 20200101, 20200124, 20200123]},
3: {"min_time": 8, "g": "Gas_CCGT", "tmp": 20200103,
"relevant_timepoints":
[20200103, 20200102, 20200101, 20200124, 20200123,
20200122, 20200121]},
4: {"min_time": 1, "g": "Gas_CCGT", "tmp": 20200120,
"relevant_timepoints": [20200120, 20200119, 20200118]},
5: {"min_time": 2, "g": "Gas_CCGT", "tmp": 20200120,
"relevant_timepoints":
[20200120, 20200119, 20200118, 20200117]},
6: {"min_time": 3, "g": "Gas_CCGT", "tmp": 20200120,
"relevant_timepoints":
[20200120, 20200119, 20200118, 20200117, 20200116]},
# Test min times of longer duration than the horizon in a
# 'circular' horizon setting
7: {"min_time": 100, "g": "Gas_CCGT", "tmp": 20200101,
"relevant_timepoints":
[20200101, 20200124, 20200123, 20200122, 20200121,
20200120, 20200119, 20200118, 20200117, 20200116,
20200115, 20200114, 20200113, 20200112, 20200111,
20200110, 20200109, 20200108, 20200107, 20200106,
20200105, 20200104, 20200103, 20200102, 20200101]},
# If we're in the first timepoint of a linear horizon, test that
# we only get that timepoint (i.e. that we break out of the loop
# before adding any more timepoints)
8: {"min_time": 100, "g": "Gas_CCGT", "tmp": 20200201,
"relevant_timepoints": [20200201]},
# Test that we break out of the loop with min times that reach the
# first horizon timepoint in a 'linear' horizon setting
9: {"min_time": 100, "g": "Gas_CCGT", "tmp": 20200202,
"relevant_timepoints": [20200202, 20200201]}
}
for test_case in test_cases.keys():
expected_list = test_cases[test_case]["relevant_timepoints"]
actual_list, actual_linked_tmps = determine_relevant_timepoints(
mod=instance,
g=test_cases[test_case]["g"],
tmp=test_cases[test_case]["tmp"],
min_time=test_cases[test_case]["min_time"]
)
self.assertListEqual(expected_list, actual_list)
# No linked timepoints, so check that the list is empty in every
# test case
self.assertListEqual([], actual_linked_tmps)
def test_determine_relevant_linked_timepoints(self):
"""
Check that the lists of relevant timepoints and relevant linked
timepoints are as expected based on the current timepoint and the
minimum up/down time (and, on the data side, the duration of other
timepoints).
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=None, # No need to name since not adding components
test_data_dir=os.path.join(TEST_DATA_DIRECTORY, "subproblems"),
subproblem="202002",
stage=""
)
instance = m.create_instance(data)
test_cases = {
1: {"min_time": 4, "g": "Gas_CCGT", "tmp": 20200203,
"relevant_timepoints": [20200203, 20200202, 20200201],
"relevant_linked_timepoints": [0]},
2: {"min_time": 5, "g": "Gas_CCGT", "tmp": 20200203,
"relevant_timepoints": [20200203, 20200202, 20200201],
"relevant_linked_timepoints": [0, -1]},
# Stop at the last included linked timepoint if the min time is
# longer than the total duration of the current timepoint to the
# last linked timepoint
3: {"min_time": 24, "g": "Gas_CCGT", "tmp": 20200203,
"relevant_timepoints": [20200203, 20200202, 20200201],
"relevant_linked_timepoints": [0, -1, -2, -3, -4, -5, -6,
-7, -8, -9, -10, -11]},
# No linked timepoint if min time does not reach them
4: {"min_time": 1, "g": "Gas_CCGT", "tmp": 20200203,
"relevant_timepoints": [20200203],
"relevant_linked_timepoints": []},
# Starting in the first timepoint of the horizon
5: {"min_time": 4, "g": "Gas_CCGT", "tmp": 20200201,
"relevant_timepoints": [20200201],
"relevant_linked_timepoints": [0, -1, -2]},
}
for test_case in test_cases.keys():
expected_rel_tmp_list = test_cases[test_case][
"relevant_timepoints"]
expected_rel_linked_tmp_list = test_cases[test_case][
"relevant_linked_timepoints"]
actual_rel_tmp_list, actual_rel_linked_tmp_list = \
determine_relevant_timepoints(
mod=instance,
g=test_cases[test_case]["g"],
tmp=test_cases[test_case]["tmp"],
min_time=test_cases[test_case]["min_time"]
)
self.assertListEqual(expected_rel_tmp_list, actual_rel_tmp_list)
self.assertListEqual(actual_rel_linked_tmp_list,
expected_rel_linked_tmp_list)
if __name__ == "__main__":
unittest.main()
| [
"gridpath.project.operations.operational_types.common_functions.determine_relevant_timepoints",
"tests.common_functions.add_components_and_load_data",
"builtins.str",
"sys.exit",
"unittest.main"
] | [((8114, 8129), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8127, 8129), False, 'import unittest\n'), ((2120, 2277), 'tests.common_functions.add_components_and_load_data', 'add_components_and_load_data', ([], {'prereq_modules': 'IMPORTED_PREREQ_MODULES', 'module_to_test': 'None', 'test_data_dir': 'TEST_DATA_DIRECTORY', 'subproblem': '""""""', 'stage': '""""""'}), "(prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=None, test_data_dir=TEST_DATA_DIRECTORY, subproblem='',\n stage='')\n", (2148, 2277), False, 'from tests.common_functions import add_components_and_load_data\n'), ((1541, 1552), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1549, 1552), False, 'import sys\n'), ((4802, 4962), 'gridpath.project.operations.operational_types.common_functions.determine_relevant_timepoints', 'determine_relevant_timepoints', ([], {'mod': 'instance', 'g': "test_cases[test_case]['g']", 'tmp': "test_cases[test_case]['tmp']", 'min_time': "test_cases[test_case]['min_time']"}), "(mod=instance, g=test_cases[test_case]['g'],\n tmp=test_cases[test_case]['tmp'], min_time=test_cases[test_case][\n 'min_time'])\n", (4831, 4962), False, 'from gridpath.project.operations.operational_types.common_functions import determine_relevant_timepoints\n'), ((7629, 7789), 'gridpath.project.operations.operational_types.common_functions.determine_relevant_timepoints', 'determine_relevant_timepoints', ([], {'mod': 'instance', 'g': "test_cases[test_case]['g']", 'tmp': "test_cases[test_case]['tmp']", 'min_time': "test_cases[test_case]['min_time']"}), "(mod=instance, g=test_cases[test_case]['g'],\n tmp=test_cases[test_case]['tmp'], min_time=test_cases[test_case][\n 'min_time'])\n", (7658, 7789), False, 'from gridpath.project.operations.operational_types.common_functions import determine_relevant_timepoints\n'), ((1364, 1372), 'builtins.str', 'str', (['mdl'], {}), '(mdl)\n', (1367, 1372), False, 'from builtins import str\n'), ((1507, 1515), 'builtins.str', 'str', (['mdl'], {}), '(mdl)\n', (1510, 1515), False, 'from builtins import str\n')] |
"""
Flask application settings.
"""
import os
DEBUG = True
# Output un-merged files in debug mode.
#ASSETS_DEBUG = DEBUG
SECRET_KEY = os.environ.get('SECRET_KEY', None)
MY_VAR = os.environ.get('MY_VAR', None)
#: Mongodb settings
MONGODB_SETTINGS = {'DB' : 'digfont'}
#: CSRF key
SECRET_KEY = "dig.font.s3cr3t"
| [
"os.environ.get"
] | [((138, 172), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""', 'None'], {}), "('SECRET_KEY', None)\n", (152, 172), False, 'import os\n'), ((182, 212), 'os.environ.get', 'os.environ.get', (['"""MY_VAR"""', 'None'], {}), "('MY_VAR', None)\n", (196, 212), False, 'import os\n')] |
import math
import time
from compas_fab.backends import RosClient
from compas.artists import Artist
from compas.geometry import Frame
with RosClient("localhost") as client:
robot = client.load_robot(load_geometry=True)
group = robot.main_group_name
frame = Frame((0.4, 0.3, 0.05), (-1, 0, 0), (0, 1, 0))
tolerance_position = 0.001
tolerance_axes = [math.radians(1)] * 3
start_configuration = robot.zero_configuration()
start_configuration.joint_values = (-0.106, 5.351, 2.231, -2.869, 4.712, 1.465)
# create goal constraints from frame
goal_constraints = robot.constraints_from_frame(frame, tolerance_position, tolerance_axes, group)
trajectory = robot.plan_motion(goal_constraints, start_configuration, group, options=dict(planner_id="RRT"))
print("Computed kinematic path with %d configurations." % len(trajectory.points))
print("Executing this path at full speed would take approx. %.3f seconds." % trajectory.time_from_start)
artist = Artist(robot.model)
for tp in trajectory.points:
config = robot.zero_configuration()
config.joint_values = tp.joint_values
artist.update(config)
artist.draw_visual()
artist.redraw()
time.sleep(0.02)
| [
"compas.geometry.Frame",
"compas.artists.Artist",
"time.sleep",
"math.radians",
"compas_fab.backends.RosClient"
] | [((142, 164), 'compas_fab.backends.RosClient', 'RosClient', (['"""localhost"""'], {}), "('localhost')\n", (151, 164), False, 'from compas_fab.backends import RosClient\n'), ((273, 319), 'compas.geometry.Frame', 'Frame', (['(0.4, 0.3, 0.05)', '(-1, 0, 0)', '(0, 1, 0)'], {}), '((0.4, 0.3, 0.05), (-1, 0, 0), (0, 1, 0))\n', (278, 319), False, 'from compas.geometry import Frame\n'), ((1000, 1019), 'compas.artists.Artist', 'Artist', (['robot.model'], {}), '(robot.model)\n', (1006, 1019), False, 'from compas.artists import Artist\n'), ((1235, 1251), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (1245, 1251), False, 'import time\n'), ((373, 388), 'math.radians', 'math.radians', (['(1)'], {}), '(1)\n', (385, 388), False, 'import math\n')] |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.from_target import FromTarget
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.what_changed import WhatChanged
from pants.backend.core.wrapped_globs import RGlobs
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.source_root import SourceRoot
from pants.goal.workspace import Workspace
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseWhatChangedTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases.create(
targets={
'java_library': JavaLibrary,
'python_library': PythonLibrary,
'jar_library': JarLibrary,
'unpacked_jars': UnpackedJars,
'resources': Resources,
'java_thrift_library': JavaThriftLibrary,
'java_protobuf_library': JavaProtobufLibrary,
'python_thrift_library': PythonThriftLibrary,
},
context_aware_object_factories={
'source_root': SourceRoot.factory,
'rglobs': RGlobs,
'from_target': FromTarget,
},
objects={
'jar': JarDependency,
}
)
@classmethod
def task_type(cls):
return WhatChanged
def assert_console_output(self, *output, **kwargs):
options = {'spec_excludes': [], 'exclude_target_regexp': []}
if 'options' in kwargs:
options.update(kwargs['options'])
kwargs['options'] = options
super(BaseWhatChangedTest, self).assert_console_output(*output, **kwargs)
def workspace(self, files=None, parent=None, diffspec=None, diff_files=None):
class MockWorkspace(Workspace):
def touched_files(_, p):
self.assertEqual(parent or 'HEAD', p)
return files or []
def changes_in(_, ds):
self.assertEqual(diffspec, ds)
return diff_files or []
return MockWorkspace()
class WhatChangedTestBasic(BaseWhatChangedTest):
def test_nochanges(self):
self.assert_console_output(workspace=self.workspace())
def test_parent(self):
self.assert_console_output(options={'changes_since': '42'},
workspace=self.workspace(parent='42'))
def test_files(self):
self.assert_console_output(
'a/b/c',
'd',
'e/f',
options={'files': True},
workspace=self.workspace(files=['a/b/c', 'd', 'e/f'])
)
class WhatChangedTest(BaseWhatChangedTest):
def setUp(self):
super(WhatChangedTest, self).setUp()
self.add_to_build_file('root', dedent("""
source_root('src/py', python_library, resources)
source_root('resources/a1', resources)
"""))
self.add_to_build_file('root/src/py/a', dedent("""
python_library(
name='alpha',
sources=['b/c', 'd'],
resources=['test.resources']
)
jar_library(
name='beta',
jars=[
jar(org='gamma', name='ray', rev='1.137.bruce_banner')
]
)
"""))
self.add_to_build_file('root/src/py/1', dedent("""
python_library(
name='numeric',
sources=['2']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/a', dedent("""
python_library(
name='a',
sources=['a.py'],
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/b', dedent("""
python_library(
name='b',
sources=['b.py'],
dependencies=['root/src/py/dependency_tree/a']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/c', dedent("""
python_library(
name='c',
sources=['c.py'],
dependencies=['root/src/py/dependency_tree/b']
)
"""))
self.add_to_build_file('root/src/thrift', dedent("""
java_thrift_library(
name='thrift',
sources=['a.thrift']
)
python_thrift_library(
name='py-thrift',
sources=['a.thrift']
)
"""))
self.add_to_build_file('root/resources/a', dedent("""
resources(
name='a_resources',
sources=['a.resources']
)
"""))
self.add_to_build_file('root/src/java/a', dedent("""
java_library(
name='a_java',
sources=rglobs("*.java"),
)
"""))
self.add_to_build_file('root/3rdparty/BUILD.twitter', dedent("""
jar_library(
name='dummy',
jars=[
jar(org='foo', name='ray', rev='1.45')
])
"""))
self.add_to_build_file('root/3rdparty/BUILD', dedent("""
jar_library(
name='dummy1',
jars=[
jar(org='foo1', name='ray', rev='1.45')
])
"""))
# This is a directory that might confuse case insensitive file systems (on macs for example).
# It should not be treated as a BUILD file.
self.create_dir('root/scripts/a/build')
self.add_to_build_file('root/scripts/BUILD', dedent("""
java_library(
name='scripts',
sources=['a/build/scripts.java'],
)
"""))
def test_spec_excludes(self):
self.assert_console_output(
'root/src/py/a:alpha',
options={'spec_excludes': 'root/src/py/1'},
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d'])
)
def test_owned(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'])
)
def test_multiply_owned(self):
self.assert_console_output(
'root/src/thrift:thrift',
'root/src/thrift:py-thrift',
workspace=self.workspace(files=['root/src/thrift/a.thrift'])
)
def test_build(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/a:beta',
workspace=self.workspace(files=['root/src/py/a/BUILD'])
)
def test_resource_changed(self):
self.assert_console_output(
'root/src/py/a:alpha',
workspace=self.workspace(files=['root/src/py/a/test.resources'])
)
def test_resource_changed_for_java_lib(self):
self.assert_console_output(
'root/resources/a:a_resources',
workspace=self.workspace(files=['root/resources/a/a.resources'])
)
def test_build_sibling(self):
self.assert_console_output(
'root/3rdparty:dummy',
workspace=self.workspace(files=['root/3rdparty/BUILD.twitter'])
)
def test_resource_type_error(self):
self.add_to_build_file('root/resources/a1', dedent("""
java_library(
name='a1',
sources=['a1.test'],
resources=[1]
)
"""))
self.assert_console_raises(
Exception,
workspace=self.workspace(files=['root/resources/a1/a1.test'])
)
def test_build_directory(self):
# This should ensure that a directory named the same as build files does not cause an exception.
self.assert_console_output(
'root/scripts:scripts',
workspace=self.workspace(files=['root/scripts/a/build', 'root/scripts/a/build/scripts.java'])
)
def test_fast(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'fast': True},
workspace=self.workspace(
files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec_removed_files(self):
self.assert_console_output(
'root/src/java/a:a_java',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/java/a/b/c/Foo.java'],
),
)
def test_include_dependees(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
options={'include_dependees': 'direct'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_exclude(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive', 'exclude_target_regexp': [':b']},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_deferred_sources(self):
self.add_to_build_file('root/proto', dedent("""
java_protobuf_library(name='unpacked_jars',
sources=from_target(':external-source'),
)
unpacked_jars(name='external-source',
libraries=[':external-source-jars'],
include_patterns=[
'com/squareup/testing/**/*.proto',
],
)
jar_library(name='external-source-jars',
jars=[
jar(org='com.squareup.testing.protolib', name='protolib-external-test', rev='0.0.2'),
],
)
"""))
self.assert_console_output(
'root/proto:unpacked_jars',
'root/proto:external-source',
'root/proto:external-source-jars',
workspace=self.workspace(files=['root/proto/BUILD'])
)
| [
"pants.base.build_file_aliases.BuildFileAliases.create",
"textwrap.dedent"
] | [((1442, 1926), 'pants.base.build_file_aliases.BuildFileAliases.create', 'BuildFileAliases.create', ([], {'targets': "{'java_library': JavaLibrary, 'python_library': PythonLibrary,\n 'jar_library': JarLibrary, 'unpacked_jars': UnpackedJars, 'resources':\n Resources, 'java_thrift_library': JavaThriftLibrary,\n 'java_protobuf_library': JavaProtobufLibrary, 'python_thrift_library':\n PythonThriftLibrary}", 'context_aware_object_factories': "{'source_root': SourceRoot.factory, 'rglobs': RGlobs, 'from_target': FromTarget\n }", 'objects': "{'jar': JarDependency}"}), "(targets={'java_library': JavaLibrary,\n 'python_library': PythonLibrary, 'jar_library': JarLibrary,\n 'unpacked_jars': UnpackedJars, 'resources': Resources,\n 'java_thrift_library': JavaThriftLibrary, 'java_protobuf_library':\n JavaProtobufLibrary, 'python_thrift_library': PythonThriftLibrary},\n context_aware_object_factories={'source_root': SourceRoot.factory,\n 'rglobs': RGlobs, 'from_target': FromTarget}, objects={'jar':\n JarDependency})\n", (1465, 1926), False, 'from pants.base.build_file_aliases import BuildFileAliases\n'), ((3390, 3519), 'textwrap.dedent', 'dedent', (['"""\n source_root(\'src/py\', python_library, resources)\n source_root(\'resources/a1\', resources)\n """'], {}), '(\n """\n source_root(\'src/py\', python_library, resources)\n source_root(\'resources/a1\', resources)\n """\n )\n', (3396, 3519), False, 'from textwrap import dedent\n'), ((3556, 3843), 'textwrap.dedent', 'dedent', (['"""\n python_library(\n name=\'alpha\',\n sources=[\'b/c\', \'d\'],\n resources=[\'test.resources\']\n )\n\n jar_library(\n name=\'beta\',\n jars=[\n jar(org=\'gamma\', name=\'ray\', rev=\'1.137.bruce_banner\')\n ]\n )\n """'], {}), '(\n """\n python_library(\n name=\'alpha\',\n sources=[\'b/c\', \'d\'],\n resources=[\'test.resources\']\n )\n\n jar_library(\n name=\'beta\',\n jars=[\n jar(org=\'gamma\', name=\'ray\', rev=\'1.137.bruce_banner\')\n ]\n )\n """\n )\n', (3562, 3843), False, 'from textwrap import dedent\n'), ((3880, 3985), 'textwrap.dedent', 'dedent', (['"""\n python_library(\n name=\'numeric\',\n sources=[\'2\']\n )\n """'], {}), '(\n """\n python_library(\n name=\'numeric\',\n sources=[\'2\']\n )\n """\n )\n', (3886, 3985), False, 'from textwrap import dedent\n'), ((4038, 4141), 'textwrap.dedent', 'dedent', (['"""\n python_library(\n name=\'a\',\n sources=[\'a.py\'],\n )\n """'], {}), '(\n """\n python_library(\n name=\'a\',\n sources=[\'a.py\'],\n )\n """\n )\n', (4044, 4141), False, 'from textwrap import dedent\n'), ((4194, 4352), 'textwrap.dedent', 'dedent', (['"""\n python_library(\n name=\'b\',\n sources=[\'b.py\'],\n dependencies=[\'root/src/py/dependency_tree/a\']\n )\n """'], {}), '(\n """\n python_library(\n name=\'b\',\n sources=[\'b.py\'],\n dependencies=[\'root/src/py/dependency_tree/a\']\n )\n """\n )\n', (4200, 4352), False, 'from textwrap import dedent\n'), ((4405, 4563), 'textwrap.dedent', 'dedent', (['"""\n python_library(\n name=\'c\',\n sources=[\'c.py\'],\n dependencies=[\'root/src/py/dependency_tree/b\']\n )\n """'], {}), '(\n """\n python_library(\n name=\'c\',\n sources=[\'c.py\'],\n dependencies=[\'root/src/py/dependency_tree/b\']\n )\n """\n )\n', (4411, 4563), False, 'from textwrap import dedent\n'), ((4602, 4811), 'textwrap.dedent', 'dedent', (['"""\n java_thrift_library(\n name=\'thrift\',\n sources=[\'a.thrift\']\n )\n\n python_thrift_library(\n name=\'py-thrift\',\n sources=[\'a.thrift\']\n )\n """'], {}), '(\n """\n java_thrift_library(\n name=\'thrift\',\n sources=[\'a.thrift\']\n )\n\n python_thrift_library(\n name=\'py-thrift\',\n sources=[\'a.thrift\']\n )\n """\n )\n', (4608, 4811), False, 'from textwrap import dedent\n'), ((4851, 4965), 'textwrap.dedent', 'dedent', (['"""\n resources(\n name=\'a_resources\',\n sources=[\'a.resources\']\n )\n """'], {}), '(\n """\n resources(\n name=\'a_resources\',\n sources=[\'a.resources\']\n )\n """\n )\n', (4857, 4965), False, 'from textwrap import dedent\n'), ((5004, 5118), 'textwrap.dedent', 'dedent', (['"""\n java_library(\n name=\'a_java\',\n sources=rglobs("*.java"),\n )\n """'], {}), '(\n """\n java_library(\n name=\'a_java\',\n sources=rglobs("*.java"),\n )\n """\n )\n', (5010, 5118), False, 'from textwrap import dedent\n'), ((5169, 5314), 'textwrap.dedent', 'dedent', (['"""\n jar_library(\n name=\'dummy\',\n jars=[\n jar(org=\'foo\', name=\'ray\', rev=\'1.45\')\n ])\n """'], {}), '(\n """\n jar_library(\n name=\'dummy\',\n jars=[\n jar(org=\'foo\', name=\'ray\', rev=\'1.45\')\n ])\n """\n )\n', (5175, 5314), False, 'from textwrap import dedent\n'), ((5357, 5504), 'textwrap.dedent', 'dedent', (['"""\n jar_library(\n name=\'dummy1\',\n jars=[\n jar(org=\'foo1\', name=\'ray\', rev=\'1.45\')\n ])\n """'], {}), '(\n """\n jar_library(\n name=\'dummy1\',\n jars=[\n jar(org=\'foo1\', name=\'ray\', rev=\'1.45\')\n ])\n """\n )\n', (5363, 5504), False, 'from textwrap import dedent\n'), ((5737, 5860), 'textwrap.dedent', 'dedent', (['"""\n java_library(\n name=\'scripts\',\n sources=[\'a/build/scripts.java\'],\n )\n """'], {}), '(\n """\n java_library(\n name=\'scripts\',\n sources=[\'a/build/scripts.java\'],\n )\n """\n )\n', (5743, 5860), False, 'from textwrap import dedent\n'), ((7317, 7444), 'textwrap.dedent', 'dedent', (['"""\n java_library(\n name=\'a1\',\n sources=[\'a1.test\'],\n resources=[1]\n )\n """'], {}), '(\n """\n java_library(\n name=\'a1\',\n sources=[\'a1.test\'],\n resources=[1]\n )\n """\n )\n', (7323, 7444), False, 'from textwrap import dedent\n'), ((10101, 10596), 'textwrap.dedent', 'dedent', (['"""\n java_protobuf_library(name=\'unpacked_jars\',\n sources=from_target(\':external-source\'),\n )\n\n unpacked_jars(name=\'external-source\',\n libraries=[\':external-source-jars\'],\n include_patterns=[\n \'com/squareup/testing/**/*.proto\',\n ],\n )\n\n jar_library(name=\'external-source-jars\',\n jars=[\n jar(org=\'com.squareup.testing.protolib\', name=\'protolib-external-test\', rev=\'0.0.2\'),\n ],\n )\n """'], {}), '(\n """\n java_protobuf_library(name=\'unpacked_jars\',\n sources=from_target(\':external-source\'),\n )\n\n unpacked_jars(name=\'external-source\',\n libraries=[\':external-source-jars\'],\n include_patterns=[\n \'com/squareup/testing/**/*.proto\',\n ],\n )\n\n jar_library(name=\'external-source-jars\',\n jars=[\n jar(org=\'com.squareup.testing.protolib\', name=\'protolib-external-test\', rev=\'0.0.2\'),\n ],\n )\n """\n )\n', (10107, 10596), False, 'from textwrap import dedent\n')] |
import PIL
import numpy as np
def to_grayscale(img):
return np.dot(img, [0.299, 0.587, 0.144])
def zero_center(img):
return img - 127.0
def crop(img, bottom=12, left=6, right=6):
height, width = img.shape
return img[0: height - bottom, left: width - right]
def save(img, path):
pil_img = PIL.Image.fromarray(img)
pil_img.save(path)
| [
"numpy.dot",
"PIL.Image.fromarray"
] | [((66, 100), 'numpy.dot', 'np.dot', (['img', '[0.299, 0.587, 0.144]'], {}), '(img, [0.299, 0.587, 0.144])\n', (72, 100), True, 'import numpy as np\n'), ((316, 340), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['img'], {}), '(img)\n', (335, 340), False, 'import PIL\n')] |
"""
:mod:`pyffi.formats.tga` --- Targa (.tga)
=========================================
Implementation
--------------
.. autoclass:: TgaFormat
:show-inheritance:
:members:
Regression tests
----------------
Read a TGA file
^^^^^^^^^^^^^^^
>>> # check and read tga file
>>> import os
>>> from os.path import dirname
>>> dirpath = __file__
>>> for i in range(4): #recurse up to root repo dir
... dirpath = dirname(dirpath)
>>> repo_root = dirpath
>>> format_root = os.path.join(repo_root, 'tests', 'formats', 'tga')
>>> file = os.path.join(format_root, 'test.tga').replace("\\\\", "/")
>>> stream = open(file, 'rb')
>>> data = TgaFormat.Data()
>>> data.inspect(stream)
>>> data.read(stream)
>>> stream.close()
>>> data.header.width
60
>>> data.header.height
20
Parse all TGA files in a directory tree
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> for stream, data in TgaFormat.walkData(format_root):
... try:
... # the replace call makes the doctest also pass on windows
... os_path = stream.name
... split = (os_path.split(os.sep))[-4:]
... rejoin = os.path.join(*split).replace("\\\\", "/")
... print("reading %s" % rejoin)
... except Exception:
... print(
... "Warning: read failed due corrupt file,"
... " corrupt format description, or bug.") # doctest: +REPORT_NDIFF
reading tests/formats/tga/test.tga
reading tests/formats/tga/test_footer.tga
Create a TGA file from scratch and write to file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> data = TgaFormat.Data()
>>> from tempfile import TemporaryFile
>>> stream = TemporaryFile()
>>> data.write(stream)
>>> stream.close()
"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, Python File Format Interface
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Python File Format Interface
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import struct, os, re
import pyffi.object_models.xml
import pyffi.object_models.common
import pyffi.object_models.xml.basic
import pyffi.object_models.xml.struct_
import pyffi.object_models
import pyffi.utils.graph
from pyffi.utils.graph import EdgeFilter
class TgaFormat(pyffi.object_models.xml.FileFormat):
"""This class implements the TGA format."""
xml_file_name = 'tga.xml'
# where to look for tga.xml and in what order:
# TGAXMLPATH env var, or TgaFormat module directory
xml_file_path = [os.getenv('TGAXMLPATH'), os.path.dirname(__file__)]
# filter for recognizing tga files by extension
RE_FILENAME = re.compile(r'^.*\.tga$', re.IGNORECASE)
# basic types
int = pyffi.object_models.common.Int
uint = pyffi.object_models.common.UInt
byte = pyffi.object_models.common.Byte
ubyte = pyffi.object_models.common.UByte
char = pyffi.object_models.common.Char
short = pyffi.object_models.common.Short
ushort = pyffi.object_models.common.UShort
float = pyffi.object_models.common.Float
PixelData = pyffi.object_models.common.UndecodedData
class FooterString(pyffi.object_models.xml.basic.BasicBase):
"""The Targa footer signature."""
def __str__(self):
return 'TRUEVISION-XFILE.\x00'
def read(self, stream, data):
"""Read signature from stream.
:param stream: The stream to read from.
:type stream: file
"""
signat = stream.read(18)
if signat != self.__str__().encode("ascii"):
raise ValueError(
"invalid Targa signature: expected '%s' but got '%s'"
%(self.__str__(), signat))
def write(self, stream, data):
"""Write signature to stream.
:param stream: The stream to read from.
:type stream: file
"""
stream.write(self.__str__().encode("ascii"))
def get_value(self):
"""Get signature.
:return: The signature.
"""
return self.__str__()
def set_value(self, value):
"""Set signature.
:param value: The value to assign.
:type value: str
"""
if value != self.__str__():
raise ValueError(
"invalid Targa signature: expected '%s' but got '%s'"
%(self.__str__(), value))
def get_size(self, data=None):
"""Return number of bytes that the signature occupies in a file.
:return: Number of bytes.
"""
return 18
def get_hash(self, data=None):
"""Return a hash value for the signature.
:return: An immutable object that can be used as a hash.
"""
return self.__str__()
class Image(pyffi.utils.graph.GlobalNode):
def __init__(self):
# children are either individual pixels, or RLE packets
self.children = []
def read(self, stream, data):
data = data
if data.header.image_type in (TgaFormat.ImageType.INDEXED,
TgaFormat.ImageType.RGB,
TgaFormat.ImageType.GREY):
self.children = [
TgaFormat.Pixel(argument=data.header.pixel_size)
for i in range(data.header.width
* data.header.height)]
for pixel in self.children:
pixel.read(stream, data)
else:
self.children = []
count = 0
while count < data.header.width * data.header.height:
pixel = TgaFormat.RLEPixels(
argument=data.header.pixel_size)
pixel.read(stream, data)
self.children.append(pixel)
count += pixel.header.count + 1
def write(self, stream, data):
data = data
for child in self.children:
child.arg = data.header.pixel_size
child.write(stream, data)
def get_detail_child_nodes(self, edge_filter=EdgeFilter()):
for child in self.children:
yield child
def get_detail_child_names(self, edge_filter=EdgeFilter()):
for i in range(len(self.children)):
yield str(i)
class Data(pyffi.object_models.FileFormat.Data):
def __init__(self):
self.header = TgaFormat.Header()
self.image = TgaFormat.Image()
self.footer = None # TgaFormat.Footer() is optional
def inspect(self, stream):
"""Quick heuristic check if stream contains Targa data,
by looking at the first 18 bytes.
:param stream: The stream to inspect.
:type stream: file
"""
# XXX todo: set some of the actual fields of the header
pos = stream.tell()
# read header
try:
id_length, colormap_type, image_type, \
colormap_index, colormap_length, colormap_size, \
x_origin, y_origin, width, height, \
pixel_size, flags = struct.unpack("<BBBHHBHHHHBB",
stream.read(18))
except struct.error:
# could not read 18 bytes
# not a TGA file
raise ValueError("Not a Targa file.")
finally:
stream.seek(pos)
# check if tga type is valid
# check pixel size
# check width and height
if not(image_type in (1, 2, 3, 9, 10, 11)
and pixel_size in (8, 24, 32)
and width <= 100000
and height <= 100000):
raise ValueError("Not a Targa file.")
# this looks like a tga file!
def read(self, stream):
"""Read a tga file.
:param stream: The stream from which to read.
:type stream: ``file``
"""
# read the file
self.inspect(stream) # quick check
# header
self.header.read(stream, self)
# image
self.image.read(stream, self)
# check if we are at the end of the file
if not stream.read(1):
self.footer = None
return
# footer
stream.seek(-26, os.SEEK_END)
self.footer = TgaFormat.Footer()
self.footer.read(stream, self)
def write(self, stream):
"""Write a tga file.
:param stream: The stream to write to.
:type stream: ``file``
"""
self.header.write(stream, self)
self.image.write(stream, self)
if self.footer:
self.footer.write(stream, self)
def get_global_child_nodes(self, edge_filter=EdgeFilter()):
yield self.header
yield self.image
if self.footer:
yield self.footer
def get_global_child_names(self, edge_filter=EdgeFilter()):
yield "Header"
yield "Image"
if self.footer:
yield "Footer"
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"pyffi.utils.graph.EdgeFilter",
"os.getenv",
"re.compile",
"os.path.dirname",
"doctest.testmod"
] | [((4018, 4057), 're.compile', 're.compile', (['"""^.*\\\\.tga$"""', 're.IGNORECASE'], {}), "('^.*\\\\.tga$', re.IGNORECASE)\n", (4028, 4057), False, 'import struct, os, re\n'), ((10857, 10874), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (10872, 10874), False, 'import doctest\n'), ((3896, 3919), 'os.getenv', 'os.getenv', (['"""TGAXMLPATH"""'], {}), "('TGAXMLPATH')\n", (3905, 3919), False, 'import struct, os, re\n'), ((3921, 3946), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3936, 3946), False, 'import struct, os, re\n'), ((7643, 7655), 'pyffi.utils.graph.EdgeFilter', 'EdgeFilter', ([], {}), '()\n', (7653, 7655), False, 'from pyffi.utils.graph import EdgeFilter\n'), ((7780, 7792), 'pyffi.utils.graph.EdgeFilter', 'EdgeFilter', ([], {}), '()\n', (7790, 7792), False, 'from pyffi.utils.graph import EdgeFilter\n'), ((10489, 10501), 'pyffi.utils.graph.EdgeFilter', 'EdgeFilter', ([], {}), '()\n', (10499, 10501), False, 'from pyffi.utils.graph import EdgeFilter\n'), ((10679, 10691), 'pyffi.utils.graph.EdgeFilter', 'EdgeFilter', ([], {}), '()\n', (10689, 10691), False, 'from pyffi.utils.graph import EdgeFilter\n')] |
import telegram
import os
def main():
token = os.getenv("TOKEN", None)
message = os.getenv("MESSAGE", "No message, please set MESSAGE env")
chat_id = os.getenv("CHAT_ID", None)
bot = telegram.Bot(token=token)
bot.send_message(chat_id=chat_id, text=message, parse_mode=telegram.ParseMode.HTML)
if __name__ == "__main__":
main() | [
"telegram.Bot",
"os.getenv"
] | [((52, 76), 'os.getenv', 'os.getenv', (['"""TOKEN"""', 'None'], {}), "('TOKEN', None)\n", (61, 76), False, 'import os\n'), ((91, 149), 'os.getenv', 'os.getenv', (['"""MESSAGE"""', '"""No message, please set MESSAGE env"""'], {}), "('MESSAGE', 'No message, please set MESSAGE env')\n", (100, 149), False, 'import os\n'), ((164, 190), 'os.getenv', 'os.getenv', (['"""CHAT_ID"""', 'None'], {}), "('CHAT_ID', None)\n", (173, 190), False, 'import os\n'), ((202, 227), 'telegram.Bot', 'telegram.Bot', ([], {'token': 'token'}), '(token=token)\n', (214, 227), False, 'import telegram\n')] |
# -*- encoding: utf-8 -*-
"""
Filename :train.py
Description :获取小作文摘要
Time :2021/06/22 15:21:08
Author :hwa
Version :1.0
"""
from app.lib.duplication_check.reply_database import ReplyDatabase
import time
def train_data():
start_time = time.time()
db = ReplyDatabase.load_from_json("data/bilibili_cnki_reply.json")
db.dump_to_image("database.dat")
end_time = time.time()
print("train cost {} s".format(end_time - start_time))
if __name__ == "__main__":
train_data()
| [
"app.lib.duplication_check.reply_database.ReplyDatabase.load_from_json",
"time.time"
] | [((285, 296), 'time.time', 'time.time', ([], {}), '()\n', (294, 296), False, 'import time\n'), ((306, 367), 'app.lib.duplication_check.reply_database.ReplyDatabase.load_from_json', 'ReplyDatabase.load_from_json', (['"""data/bilibili_cnki_reply.json"""'], {}), "('data/bilibili_cnki_reply.json')\n", (334, 367), False, 'from app.lib.duplication_check.reply_database import ReplyDatabase\n'), ((420, 431), 'time.time', 'time.time', ([], {}), '()\n', (429, 431), False, 'import time\n')] |
"""Builder for generating evergreen configuration."""
from threading import Lock
from typing import Set, List, Dict
import inject
from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task
from buildscripts.patch_builds.task_generation import validate_task_generation_limit
from buildscripts.task_generation.constants import ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK
from buildscripts.task_generation.gen_task_service import GenTaskService, \
GenTaskOptions, ResmokeGenTaskParams, FuzzerGenTaskParams
from buildscripts.task_generation.generated_config import GeneratedFile, GeneratedConfiguration
from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
from buildscripts.task_generation.suite_split import SuiteSplitService, GeneratedSuite, \
SuiteSplitParameters
from buildscripts.task_generation.task_types.fuzzer_tasks import FuzzerTask
# pylint: disable=too-many-instance-attributes
class EvgConfigBuilder:
"""A builder class for building evergreen configuration."""
@inject.autoparams()
def __init__(
self,
resmoke_proxy: ResmokeProxyService,
suite_split_service: SuiteSplitService,
evg_config_gen_service: GenTaskService,
gen_options: GenTaskOptions,
) -> None:
"""
Initialize a new builder.
:param resmoke_proxy: Proxy to access resmoke data.
:param suite_split_service: Service to split suites into sub-suites.
:param evg_config_gen_service: Service to generate evergreen configuration.
:param gen_options: Global options for generating evergreen configuration.
"""
self.resmoke_proxy = resmoke_proxy
self.suite_split_service = suite_split_service
self.evg_config_gen_service = evg_config_gen_service
self.gen_options = gen_options
self.shrub_config = ShrubProject.empty()
self.build_variants: Dict[str, BuildVariant] = {}
self.generated_files: List[GeneratedFile] = []
self.lock = Lock()
def get_build_variant(self, build_variant: str) -> BuildVariant:
"""
Get the build variant object, creating it if it doesn't exist.
NOTE: The `lock` should be held by any functions calling this one.
:param build_variant: Name of build variant.
:return: BuildVariant object being created.
"""
if build_variant not in self.build_variants:
self.build_variants[build_variant] = BuildVariant(build_variant, activate=False)
return self.build_variants[build_variant]
def generate_suite(self, split_params: SuiteSplitParameters,
gen_params: ResmokeGenTaskParams) -> None:
"""
Add configuration to generate a split version of the specified resmoke suite.
:param split_params: Parameters of how resmoke suite should be split.
:param gen_params: Parameters of how evergreen configuration should be generated.
"""
generated_suite = self.suite_split_service.split_suite(split_params)
with self.lock:
build_variant = self.get_build_variant(generated_suite.build_variant)
resmoke_tasks = self.evg_config_gen_service.generate_task(generated_suite,
build_variant, gen_params)
self.generated_files.extend(self.resmoke_proxy.render_suite_files(resmoke_tasks))
def generate_fuzzer(self, fuzzer_params: FuzzerGenTaskParams) -> FuzzerTask:
"""
Add configuration to generate the specified fuzzer task.
:param fuzzer_params: Parameters of how the fuzzer suite should generated.
"""
with self.lock:
build_variant = self.get_build_variant(fuzzer_params.variant)
return self.evg_config_gen_service.generate_fuzzer_task(fuzzer_params, build_variant)
def add_display_task(self, display_task_name: str, execution_task_names: Set[str],
build_variant: str) -> None:
"""
Add configuration to generate the specified display task.
:param display_task_name: Name of display task to create.
:param execution_task_names: Name of execution tasks to include in display task.
:param build_variant: Name of build variant to add to.
"""
execution_tasks = {ExistingTask(task_name) for task_name in execution_task_names}
with self.lock:
build_variant = self.get_build_variant(build_variant)
build_variant.display_task(display_task_name, execution_existing_tasks=execution_tasks)
def generate_archive_dist_test_debug_activator_task(self, variant: str):
"""
Generate dummy task to activate the task that archives debug symbols.
We can't activate it directly as it's not generated.
"""
with self.lock:
build_variant = self.get_build_variant(variant)
build_variant.add_existing_task(ExistingTask(ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK))
def build(self, config_file_name: str) -> GeneratedConfiguration:
"""
Build the specified configuration and return the files needed to create it.
:param config_file_name: Filename to use for evergreen configuration.
:return: Dictionary of files and contents that are needed to create configuration.
"""
for build_variant in self.build_variants.values():
self.shrub_config.add_build_variant(build_variant)
if not validate_task_generation_limit(self.shrub_config):
raise ValueError("Attempting to generate more than max tasks in single generator")
self.generated_files.append(GeneratedFile(config_file_name, self.shrub_config.json()))
return GeneratedConfiguration(self.generated_files)
| [
"shrub.v2.ShrubProject.empty",
"buildscripts.patch_builds.task_generation.validate_task_generation_limit",
"threading.Lock",
"buildscripts.task_generation.generated_config.GeneratedConfiguration",
"inject.autoparams",
"shrub.v2.ExistingTask",
"shrub.v2.BuildVariant"
] | [((1018, 1037), 'inject.autoparams', 'inject.autoparams', ([], {}), '()\n', (1035, 1037), False, 'import inject\n'), ((1872, 1892), 'shrub.v2.ShrubProject.empty', 'ShrubProject.empty', ([], {}), '()\n', (1890, 1892), False, 'from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task\n'), ((2026, 2032), 'threading.Lock', 'Lock', ([], {}), '()\n', (2030, 2032), False, 'from threading import Lock\n'), ((5792, 5836), 'buildscripts.task_generation.generated_config.GeneratedConfiguration', 'GeneratedConfiguration', (['self.generated_files'], {}), '(self.generated_files)\n', (5814, 5836), False, 'from buildscripts.task_generation.generated_config import GeneratedFile, GeneratedConfiguration\n'), ((2482, 2525), 'shrub.v2.BuildVariant', 'BuildVariant', (['build_variant'], {'activate': '(False)'}), '(build_variant, activate=False)\n', (2494, 2525), False, 'from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task\n'), ((4373, 4396), 'shrub.v2.ExistingTask', 'ExistingTask', (['task_name'], {}), '(task_name)\n', (4385, 4396), False, 'from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task\n'), ((5535, 5584), 'buildscripts.patch_builds.task_generation.validate_task_generation_limit', 'validate_task_generation_limit', (['self.shrub_config'], {}), '(self.shrub_config)\n', (5565, 5584), False, 'from buildscripts.patch_builds.task_generation import validate_task_generation_limit\n'), ((4996, 5047), 'shrub.v2.ExistingTask', 'ExistingTask', (['ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK'], {}), '(ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK)\n', (5008, 5047), False, 'from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task\n')] |
"""
Routes and views for the flask application.
"""
import os
import json
from flask import Flask, redirect, request, render_template, flash
from pathlib import Path
from flask_wtf import FlaskForm
from wtforms import StringField,SelectField,PasswordField,BooleanField
from wtforms.validators import InputRequired,ValidationError
from . import app
source_dexcom = 'dexcom'
source_nightscout = 'nightscout'
LOG_FILENAME="sugarpidisplay.log"
folder_name = '.sugarpidisplay'
config_file = 'config.json'
pi_sugar_path = os.path.join(str(Path.home()), folder_name)
Path(pi_sugar_path).mkdir(exist_ok=True)
def dexcom_field_check(form, field):
if (form.data_source.data == source_dexcom):
if (not field.data):
raise ValidationError('Field cannot be empty')
def nightscout_field_check(form, field):
if (form.data_source.data == source_nightscout):
if (not field.data):
raise ValidationError('Field cannot be empty')
class MyForm(FlaskForm):
class Meta:
csrf = False
data_source = SelectField(
'Data Source',
choices=[(source_dexcom, 'Dexcom'), (source_nightscout, 'Nightscout')]
)
use_animation = BooleanField('Use Animation')
dexcom_user = StringField('Dexcom UserName', validators=[dexcom_field_check])
dexcom_pass = PasswordField('<PASSWORD>', validators=[dexcom_field_check])
ns_url = StringField('Nightscout URL', validators=[nightscout_field_check])
ns_token = StringField('Nightscout Access Token', validators=[nightscout_field_check])
@app.route('/hello')
def hello_world():
return 'Hello, World!'
@app.route('/success')
def success():
return 'Your device is configured. Now cycle the power and it will use the new settings'
@app.route('/', methods=('GET', 'POST'))
def setup():
form = MyForm()
if request.method == 'POST':
if form.validate() == False:
flash('Fields are missing.')
return render_template('setup.html', form=form)
else:
handle_submit(form)
return redirect('/success')
#if form.is_submitted():
loadData(form)
return render_template('setup.html', form=form)
def handle_submit(form):
config = { 'data_source': form.data_source.data }
config['use_animation'] = form.use_animation.data
if (form.data_source.data == source_dexcom):
config['dexcom_username'] = form.dexcom_user.data
config['dexcom_password'] = form.dexcom_pass.data
else:
config['nightscout_url'] = form.ns_url.data
config['nightscout_access_token'] = form.ns_token.data
#__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(pi_sugar_path, config_file), "w")
json.dump(config, f, indent = 4)
f.close()
def loadData(form):
config_full_path = os.path.join(pi_sugar_path, config_file)
if (not Path(config_full_path).exists()):
return
try:
f = open(config_full_path, "r")
config = json.load(f)
f.close()
if ('data_source' in config):
form.data_source.data = config['data_source']
if (config['data_source'] == source_dexcom):
if ('dexcom_username' in config):
form.dexcom_user.data = config['dexcom_username']
if ('dexcom_password' in config):
form.dexcom_pass.data = config['dexcom_password']
if (config['data_source'] == source_nightscout):
if ('nightscout_url' in config):
form.ns_url.data = config['nightscout_url']
if ('nightscout_access_token' in config):
form.ns_token.data = config['nightscout_access_token']
form.use_animation.data = config['use_animation']
except:
pass
| [
"flask.render_template",
"flask.flash",
"pathlib.Path",
"wtforms.validators.ValidationError",
"wtforms.BooleanField",
"wtforms.PasswordField",
"pathlib.Path.home",
"os.path.join",
"wtforms.StringField",
"flask.redirect",
"json.load",
"wtforms.SelectField",
"json.dump"
] | [((1044, 1147), 'wtforms.SelectField', 'SelectField', (['"""Data Source"""'], {'choices': "[(source_dexcom, 'Dexcom'), (source_nightscout, 'Nightscout')]"}), "('Data Source', choices=[(source_dexcom, 'Dexcom'), (\n source_nightscout, 'Nightscout')])\n", (1055, 1147), False, 'from wtforms import StringField, SelectField, PasswordField, BooleanField\n'), ((1185, 1214), 'wtforms.BooleanField', 'BooleanField', (['"""Use Animation"""'], {}), "('Use Animation')\n", (1197, 1214), False, 'from wtforms import StringField, SelectField, PasswordField, BooleanField\n'), ((1233, 1296), 'wtforms.StringField', 'StringField', (['"""Dexcom UserName"""'], {'validators': '[dexcom_field_check]'}), "('Dexcom UserName', validators=[dexcom_field_check])\n", (1244, 1296), False, 'from wtforms import StringField, SelectField, PasswordField, BooleanField\n'), ((1315, 1375), 'wtforms.PasswordField', 'PasswordField', (['"""<PASSWORD>"""'], {'validators': '[dexcom_field_check]'}), "('<PASSWORD>', validators=[dexcom_field_check])\n", (1328, 1375), False, 'from wtforms import StringField, SelectField, PasswordField, BooleanField\n'), ((1389, 1455), 'wtforms.StringField', 'StringField', (['"""Nightscout URL"""'], {'validators': '[nightscout_field_check]'}), "('Nightscout URL', validators=[nightscout_field_check])\n", (1400, 1455), False, 'from wtforms import StringField, SelectField, PasswordField, BooleanField\n'), ((1471, 1546), 'wtforms.StringField', 'StringField', (['"""Nightscout Access Token"""'], {'validators': '[nightscout_field_check]'}), "('Nightscout Access Token', validators=[nightscout_field_check])\n", (1482, 1546), False, 'from wtforms import StringField, SelectField, PasswordField, BooleanField\n'), ((2140, 2180), 'flask.render_template', 'render_template', (['"""setup.html"""'], {'form': 'form'}), "('setup.html', form=form)\n", (2155, 2180), False, 'from flask import Flask, redirect, request, render_template, flash\n'), ((2761, 2791), 'json.dump', 'json.dump', (['config', 'f'], {'indent': '(4)'}), '(config, f, indent=4)\n', (2770, 2791), False, 'import json\n'), ((2852, 2892), 'os.path.join', 'os.path.join', (['pi_sugar_path', 'config_file'], {}), '(pi_sugar_path, config_file)\n', (2864, 2892), False, 'import os\n'), ((535, 546), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (544, 546), False, 'from pathlib import Path\n'), ((562, 581), 'pathlib.Path', 'Path', (['pi_sugar_path'], {}), '(pi_sugar_path)\n', (566, 581), False, 'from pathlib import Path\n'), ((2710, 2750), 'os.path.join', 'os.path.join', (['pi_sugar_path', 'config_file'], {}), '(pi_sugar_path, config_file)\n', (2722, 2750), False, 'import os\n'), ((3020, 3032), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3029, 3032), False, 'import json\n'), ((738, 778), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Field cannot be empty"""'], {}), "('Field cannot be empty')\n", (753, 778), False, 'from wtforms.validators import InputRequired, ValidationError\n'), ((921, 961), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Field cannot be empty"""'], {}), "('Field cannot be empty')\n", (936, 961), False, 'from wtforms.validators import InputRequired, ValidationError\n'), ((1906, 1934), 'flask.flash', 'flash', (['"""Fields are missing."""'], {}), "('Fields are missing.')\n", (1911, 1934), False, 'from flask import Flask, redirect, request, render_template, flash\n'), ((1954, 1994), 'flask.render_template', 'render_template', (['"""setup.html"""'], {'form': 'form'}), "('setup.html', form=form)\n", (1969, 1994), False, 'from flask import Flask, redirect, request, render_template, flash\n'), ((2060, 2080), 'flask.redirect', 'redirect', (['"""/success"""'], {}), "('/success')\n", (2068, 2080), False, 'from flask import Flask, redirect, request, render_template, flash\n'), ((2905, 2927), 'pathlib.Path', 'Path', (['config_full_path'], {}), '(config_full_path)\n', (2909, 2927), False, 'from pathlib import Path\n')] |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
VirtualMachineHtSharing = Enum(
'any',
'internal',
'none',
)
| [
"pyvisdk.thirdparty.Enum"
] | [((188, 219), 'pyvisdk.thirdparty.Enum', 'Enum', (['"""any"""', '"""internal"""', '"""none"""'], {}), "('any', 'internal', 'none')\n", (192, 219), False, 'from pyvisdk.thirdparty import Enum\n')] |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Blender-CoD: Blender Add-On for Call of Duty modding
Version: alpha 3
Copyright (c) 2011 CoDEmanX, Flybynyt -- <EMAIL>
http://code.google.com/p/blender-cod/
TODO
- UI for xmodel and xanim import (planned for alpha 4/5)
"""
bl_info = {
"name": "Blender-CoD - Add-On for Call of Duty modding (alpha 3)",
"author": "CoDEmanX, Flybynyt",
"version": (0, 3, 5),
"blender": (2, 62, 0),
"location": "File > Import | File > Export",
"description": "Export models to *.XMODEL_EXPORT and animations to *.XANIM_EXPORT",
"warning": "Alpha version, please report any bugs!",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Call_of_Duty_IO",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"support": "TESTING",
"category": "Import-Export"
}
# To support reload properly, try to access a package var, if it's there, reload everything
if "bpy" in locals():
import imp
if "import_xmodel" in locals():
imp.reload(import_xmodel)
if "export_xmodel" in locals():
imp.reload(export_xmodel)
if "import_xanim" in locals():
imp.reload(import_xanim)
if "export_xanim" in locals():
imp.reload(export_xanim)
import bpy
from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty
import bpy_extras.io_utils
from bpy_extras.io_utils import ExportHelper, ImportHelper
import time
# Planned for alpha 4/5
class ImportXmodel(bpy.types.Operator, ImportHelper):
"""Load a CoD XMODEL_EXPORT File"""
bl_idname = "import_scene.xmodel"
bl_label = "Import XMODEL_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
#use_meshes = BoolProperty(name="Meshes", description="Import meshes", default=True)
#use_armature = BoolProperty(name="Armature", description="Import Armature", default=True)
#use_bind_armature = BoolProperty(name="Bind Meshes to Armature", description="Parent imported meshes to armature", default=True)
#use_split_objects = BoolProperty(name="Object", description="Import OBJ Objects into Blender Objects", default=True)
#use_split_groups = BoolProperty(name="Group", description="Import OBJ Groups into Blender Objects", default=True)
#use_image_search = BoolProperty(name="Image Search", description="Search subdirs for any associated images (Warning, may be slow)", default=True)
def execute(self, context):
from . import import_xmodel
start_time = time.clock()
result = import_xmodel.load(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Import finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
"""
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "use_meshes")
col.prop(self, "use_armature")
row = layout.row()
row.active = self.use_meshes and self.use_armature
row.prop(self, "use_bind_armature")
"""
@classmethod
def poll(self, context):
return (context.scene is not None)
class ImportXanim(bpy.types.Operator, ImportHelper):
"""Load a CoD XANIM_EXPORT File"""
bl_idname = "import_scene.xanim"
bl_label = "Import XANIM_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT;*.NT_EXPORT", options={'HIDDEN'})
def execute(self, context):
# print("Selected: " + context.active_object.name)
from . import import_xanim
return import_xanim.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
class ExportXmodel(bpy.types.Operator, ExportHelper):
"""Save a CoD XMODEL_EXPORT File"""
bl_idname = "export_scene.xmodel"
bl_label = 'Export XMODEL_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_version = EnumProperty(
name="Format Version",
description="XMODEL_EXPORT format version for export",
items=(('5', "Version 5", "vCoD, CoD:UO"),
('6', "Version 6", "CoD2, CoD4, CoD5, CoD7")),
default='6',
)
use_selection = BoolProperty(
name="Selection only",
description="Export selected meshes only (object or weight paint mode)",
default=False
)
use_vertex_colors = BoolProperty(
name="Vertex colors",
description="Export vertex colors (if disabled, white color will be used)",
default=True
)
use_vertex_colors_alpha = BoolProperty(
name="As alpha",
description="Turn RGB vertex colors into grayscale (average value) and use it as alpha transparency. White is 1 (opaque), black 0 (invisible)",
default=False
)
use_apply_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply all mesh modifiers except Armature (preview resolution)",
default=True
)
use_armature = BoolProperty(
name="Armature",
description="Export bones (if disabled, only a 'tag_origin' bone will be written)",
default=True
)
use_vertex_cleanup = BoolProperty(
name="Clean up vertices",
description="Try this if you have problems converting to xmodel. Skips vertices which aren't used by any face and updates references.",
default=False
)
use_armature_pose = BoolProperty(
name="Pose animation to models",
description="Export meshes with Armature modifier applied as a series of XMODEL_EXPORT files",
default=False
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_weight_min = BoolProperty(
name="Minimum bone weight",
description="Try this if you get 'too small weight' errors when converting",
default=False,
)
use_weight_min_threshold = FloatProperty(
name="Threshold",
description="Smallest allowed weight (minimum value)",
default=0.010097,
min=0.0,
max=1.0,
precision=6
)
def execute(self, context):
from . import export_xmodel
start_time = time.clock()
result = export_xmodel.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
#self.use_frame_start = context.scene.frame_start
self.use_frame_start = context.scene.frame_current
#self.use_frame_end = context.scene.frame_end
self.use_frame_end = context.scene.frame_current
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "use_version", expand=True)
# Calculate number of selected mesh objects
if context.mode in {'OBJECT', 'PAINT_WEIGHT'}:
meshes_selected = len([m for m in bpy.data.objects if m.type == 'MESH' and m.select])
else:
meshes_selected = 0
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i meshes)" % meshes_selected)
col.enabled = bool(meshes_selected)
col = layout.column(align=True)
col.prop(self, "use_apply_modifiers")
col = layout.column(align=True)
col.enabled = not self.use_armature_pose
if self.use_armature and self.use_armature_pose:
col.prop(self, "use_armature", "Armature (disabled)")
else:
col.prop(self, "use_armature")
if self.use_version == '6':
row = layout.row(align=True)
row.prop(self, "use_vertex_colors")
sub = row.split()
sub.active = self.use_vertex_colors
sub.prop(self, "use_vertex_colors_alpha")
col = layout.column(align=True)
col.label("Advanced:")
col = layout.column(align=True)
col.prop(self, "use_vertex_cleanup")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_armature_pose")
sub = box.column()
sub.active = self.use_armature_pose
sub.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = sub.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_weight_min")
sub = box.column()
sub.enabled = self.use_weight_min
sub.prop(self, "use_weight_min_threshold")
@classmethod
def poll(self, context):
return (context.scene is not None)
class ExportXanim(bpy.types.Operator, ExportHelper):
"""Save a XMODEL_XANIM File"""
bl_idname = "export_scene.xanim"
bl_label = 'Export XANIM_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_selection = BoolProperty(
name="Selection only",
description="Export selected bones only (pose mode)",
default=False
)
use_framerate = IntProperty(
name="Framerate",
description="Set frames per second for export, 30 fps is commonly used.",
default=24,
min=1,
max=100
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_notetrack = BoolProperty(
name="Notetrack",
description="Export timeline markers as notetrack nodes",
default=True
)
use_notetrack_format = EnumProperty(
name="Notetrack format",
description="Notetrack format to use. Always set 'CoD 7' for Black Ops, even if not using notetrack!",
items=(('5', "CoD 5", "Separate NT_EXPORT notetrack file for 'World at War'"),
('7', "CoD 7", "Separate NT_EXPORT notetrack file for 'Black Ops'"),
('1', "all other", "Inline notetrack data for all CoD versions except WaW and BO")),
default='1',
)
def execute(self, context):
from . import export_xanim
start_time = time.clock()
result = export_xanim.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
self.use_frame_start = context.scene.frame_start
self.use_frame_end = context.scene.frame_end
self.use_framerate = round(context.scene.render.fps / context.scene.render.fps_base)
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
bones_selected = 0
armature = None
# Take the first armature
for ob in bpy.data.objects:
if ob.type == 'ARMATURE' and len(ob.data.bones) > 0:
armature = ob.data
# Calculate number of selected bones if in pose-mode
if context.mode == 'POSE':
bones_selected = len([b for b in armature.bones if b.select])
# Prepare info string
armature_info = "%s (%i bones)" % (ob.name, len(armature.bones))
break
else:
armature_info = "Not found!"
if armature:
icon = 'NONE'
else:
icon = 'ERROR'
col = layout.column(align=True)
col.label("Armature: %s" % armature_info, icon)
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i bones)" % bones_selected)
col.enabled = bool(bones_selected)
layout.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = layout.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
col = layout.column(align=True)
col.prop(self, "use_framerate")
# Calculate number of markers in export range
frame_min = min(self.use_frame_start, self.use_frame_end)
frame_max = max(self.use_frame_start, self.use_frame_end)
num_markers = len([m for m in context.scene.timeline_markers if frame_max >= m.frame >= frame_min])
col = layout.column(align=True)
col.prop(self, "use_notetrack", text="Notetrack (%i nodes)" % num_markers)
col = layout.column(align=True)
col.prop(self, "use_notetrack_format", expand=True)
@classmethod
def poll(self, context):
return (context.scene is not None)
def menu_func_xmodel_import(self, context):
self.layout.operator(ImportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
"""
def menu_func_xanim_import(self, context):
self.layout.operator(ImportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
"""
def menu_func_xmodel_export(self, context):
self.layout.operator(ExportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
def menu_func_xanim_export(self, context):
self.layout.operator(ExportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_xmodel_import)
#bpy.types.INFO_MT_file_import.append(menu_func_xanim_import)
bpy.types.INFO_MT_file_export.append(menu_func_xmodel_export)
bpy.types.INFO_MT_file_export.append(menu_func_xanim_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_xmodel_import)
#bpy.types.INFO_MT_file_import.remove(menu_func_xanim_import)
bpy.types.INFO_MT_file_export.remove(menu_func_xmodel_export)
bpy.types.INFO_MT_file_export.remove(menu_func_xanim_export)
if __name__ == "__main__":
register()
| [
"bpy.props.IntProperty",
"bpy.props.BoolProperty",
"bpy.props.StringProperty",
"bpy.utils.unregister_module",
"time.clock",
"bpy.types.INFO_MT_file_export.append",
"bpy.types.INFO_MT_file_export.remove",
"imp.reload",
"bpy.props.FloatProperty",
"bpy.props.EnumProperty",
"bpy.utils.register_modul... | [((2562, 2623), 'bpy.props.StringProperty', 'StringProperty', ([], {'default': '"""*.XMODEL_EXPORT"""', 'options': "{'HIDDEN'}"}), "(default='*.XMODEL_EXPORT', options={'HIDDEN'})\n", (2576, 2623), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((4447, 4519), 'bpy.props.StringProperty', 'StringProperty', ([], {'default': '"""*.XANIM_EXPORT;*.NT_EXPORT"""', 'options': "{'HIDDEN'}"}), "(default='*.XANIM_EXPORT;*.NT_EXPORT', options={'HIDDEN'})\n", (4461, 4519), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((4996, 5057), 'bpy.props.StringProperty', 'StringProperty', ([], {'default': '"""*.XMODEL_EXPORT"""', 'options': "{'HIDDEN'}"}), "(default='*.XMODEL_EXPORT', options={'HIDDEN'})\n", (5010, 5057), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((5216, 5423), 'bpy.props.EnumProperty', 'EnumProperty', ([], {'name': '"""Format Version"""', 'description': '"""XMODEL_EXPORT format version for export"""', 'items': "(('5', 'Version 5', 'vCoD, CoD:UO'), ('6', 'Version 6',\n 'CoD2, CoD4, CoD5, CoD7'))", 'default': '"""6"""'}), "(name='Format Version', description=\n 'XMODEL_EXPORT format version for export', items=(('5', 'Version 5',\n 'vCoD, CoD:UO'), ('6', 'Version 6', 'CoD2, CoD4, CoD5, CoD7')), default='6'\n )\n", (5228, 5423), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((5489, 5617), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Selection only"""', 'description': '"""Export selected meshes only (object or weight paint mode)"""', 'default': '(False)'}), "(name='Selection only', description=\n 'Export selected meshes only (object or weight paint mode)', default=False)\n", (5501, 5617), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((5672, 5806), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Vertex colors"""', 'description': '"""Export vertex colors (if disabled, white color will be used)"""', 'default': '(True)'}), "(name='Vertex colors', description=\n 'Export vertex colors (if disabled, white color will be used)', default\n =True)\n", (5684, 5806), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((5862, 6060), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""As alpha"""', 'description': '"""Turn RGB vertex colors into grayscale (average value) and use it as alpha transparency. White is 1 (opaque), black 0 (invisible)"""', 'default': '(False)'}), "(name='As alpha', description=\n 'Turn RGB vertex colors into grayscale (average value) and use it as alpha transparency. White is 1 (opaque), black 0 (invisible)'\n , default=False)\n", (5874, 6060), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((6112, 6248), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Apply Modifiers"""', 'description': '"""Apply all mesh modifiers except Armature (preview resolution)"""', 'default': '(True)'}), "(name='Apply Modifiers', description=\n 'Apply all mesh modifiers except Armature (preview resolution)',\n default=True)\n", (6124, 6248), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((6294, 6430), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Armature"""', 'description': '"""Export bones (if disabled, only a \'tag_origin\' bone will be written)"""', 'default': '(True)'}), '(name=\'Armature\', description=\n "Export bones (if disabled, only a \'tag_origin\' bone will be written)",\n default=True)\n', (6306, 6430), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((6482, 6681), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Clean up vertices"""', 'description': '"""Try this if you have problems converting to xmodel. Skips vertices which aren\'t used by any face and updates references."""', 'default': '(False)'}), '(name=\'Clean up vertices\', description=\n "Try this if you have problems converting to xmodel. Skips vertices which aren\'t used by any face and updates references."\n , default=False)\n', (6494, 6681), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((6731, 6896), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Pose animation to models"""', 'description': '"""Export meshes with Armature modifier applied as a series of XMODEL_EXPORT files"""', 'default': '(False)'}), "(name='Pose animation to models', description=\n 'Export meshes with Armature modifier applied as a series of XMODEL_EXPORT files'\n , default=False)\n", (6743, 6896), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((6944, 7029), 'bpy.props.IntProperty', 'IntProperty', ([], {'name': '"""Start"""', 'description': '"""First frame to export"""', 'default': '(1)', 'min': '(0)'}), "(name='Start', description='First frame to export', default=1, min=0\n )\n", (6955, 7029), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((7088, 7167), 'bpy.props.IntProperty', 'IntProperty', ([], {'name': '"""End"""', 'description': '"""Last frame to export"""', 'default': '(250)', 'min': '(0)'}), "(name='End', description='Last frame to export', default=250, min=0)\n", (7099, 7167), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((7232, 7373), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Minimum bone weight"""', 'description': '"""Try this if you get \'too small weight\' errors when converting"""', 'default': '(False)'}), '(name=\'Minimum bone weight\', description=\n "Try this if you get \'too small weight\' errors when converting",\n default=False)\n', (7244, 7373), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((7432, 7576), 'bpy.props.FloatProperty', 'FloatProperty', ([], {'name': '"""Threshold"""', 'description': '"""Smallest allowed weight (minimum value)"""', 'default': '(0.010097)', 'min': '(0.0)', 'max': '(1.0)', 'precision': '(6)'}), "(name='Threshold', description=\n 'Smallest allowed weight (minimum value)', default=0.010097, min=0.0,\n max=1.0, precision=6)\n", (7445, 7576), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((10794, 10854), 'bpy.props.StringProperty', 'StringProperty', ([], {'default': '"""*.XANIM_EXPORT"""', 'options': "{'HIDDEN'}"}), "(default='*.XANIM_EXPORT', options={'HIDDEN'})\n", (10808, 10854), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((11015, 11124), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Selection only"""', 'description': '"""Export selected bones only (pose mode)"""', 'default': '(False)'}), "(name='Selection only', description=\n 'Export selected bones only (pose mode)', default=False)\n", (11027, 11124), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((11175, 11316), 'bpy.props.IntProperty', 'IntProperty', ([], {'name': '"""Framerate"""', 'description': '"""Set frames per second for export, 30 fps is commonly used."""', 'default': '(24)', 'min': '(1)', 'max': '(100)'}), "(name='Framerate', description=\n 'Set frames per second for export, 30 fps is commonly used.', default=\n 24, min=1, max=100)\n", (11186, 11316), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((11380, 11465), 'bpy.props.IntProperty', 'IntProperty', ([], {'name': '"""Start"""', 'description': '"""First frame to export"""', 'default': '(1)', 'min': '(0)'}), "(name='Start', description='First frame to export', default=1, min=0\n )\n", (11391, 11465), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((11524, 11603), 'bpy.props.IntProperty', 'IntProperty', ([], {'name': '"""End"""', 'description': '"""Last frame to export"""', 'default': '(250)', 'min': '(0)'}), "(name='End', description='Last frame to export', default=250, min=0)\n", (11535, 11603), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((11667, 11774), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Notetrack"""', 'description': '"""Export timeline markers as notetrack nodes"""', 'default': '(True)'}), "(name='Notetrack', description=\n 'Export timeline markers as notetrack nodes', default=True)\n", (11679, 11774), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((11832, 12244), 'bpy.props.EnumProperty', 'EnumProperty', ([], {'name': '"""Notetrack format"""', 'description': '"""Notetrack format to use. Always set \'CoD 7\' for Black Ops, even if not using notetrack!"""', 'items': '((\'5\', \'CoD 5\', "Separate NT_EXPORT notetrack file for \'World at War\'"), (\n \'7\', \'CoD 7\', "Separate NT_EXPORT notetrack file for \'Black Ops\'"), (\n \'1\', \'all other\',\n \'Inline notetrack data for all CoD versions except WaW and BO\'))', 'default': '"""1"""'}), '(name=\'Notetrack format\', description=\n "Notetrack format to use. Always set \'CoD 7\' for Black Ops, even if not using notetrack!"\n , items=((\'5\', \'CoD 5\',\n "Separate NT_EXPORT notetrack file for \'World at War\'"), (\'7\', \'CoD 7\',\n "Separate NT_EXPORT notetrack file for \'Black Ops\'"), (\'1\', \'all other\',\n \'Inline notetrack data for all CoD versions except WaW and BO\')),\n default=\'1\')\n', (11844, 12244), False, 'from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty\n'), ((15605, 15640), 'bpy.utils.register_module', 'bpy.utils.register_module', (['__name__'], {}), '(__name__)\n', (15630, 15640), False, 'import bpy\n'), ((15646, 15707), 'bpy.types.INFO_MT_file_import.append', 'bpy.types.INFO_MT_file_import.append', (['menu_func_xmodel_import'], {}), '(menu_func_xmodel_import)\n', (15682, 15707), False, 'import bpy\n'), ((15778, 15839), 'bpy.types.INFO_MT_file_export.append', 'bpy.types.INFO_MT_file_export.append', (['menu_func_xmodel_export'], {}), '(menu_func_xmodel_export)\n', (15814, 15839), False, 'import bpy\n'), ((15844, 15904), 'bpy.types.INFO_MT_file_export.append', 'bpy.types.INFO_MT_file_export.append', (['menu_func_xanim_export'], {}), '(menu_func_xanim_export)\n', (15880, 15904), False, 'import bpy\n'), ((15928, 15965), 'bpy.utils.unregister_module', 'bpy.utils.unregister_module', (['__name__'], {}), '(__name__)\n', (15955, 15965), False, 'import bpy\n'), ((15971, 16032), 'bpy.types.INFO_MT_file_import.remove', 'bpy.types.INFO_MT_file_import.remove', (['menu_func_xmodel_import'], {}), '(menu_func_xmodel_import)\n', (16007, 16032), False, 'import bpy\n'), ((16103, 16164), 'bpy.types.INFO_MT_file_export.remove', 'bpy.types.INFO_MT_file_export.remove', (['menu_func_xmodel_export'], {}), '(menu_func_xmodel_export)\n', (16139, 16164), False, 'import bpy\n'), ((16169, 16229), 'bpy.types.INFO_MT_file_export.remove', 'bpy.types.INFO_MT_file_export.remove', (['menu_func_xanim_export'], {}), '(menu_func_xanim_export)\n', (16205, 16229), False, 'import bpy\n'), ((1849, 1874), 'imp.reload', 'imp.reload', (['import_xmodel'], {}), '(import_xmodel)\n', (1859, 1874), False, 'import imp\n'), ((1919, 1944), 'imp.reload', 'imp.reload', (['export_xmodel'], {}), '(export_xmodel)\n', (1929, 1944), False, 'import imp\n'), ((1988, 2012), 'imp.reload', 'imp.reload', (['import_xanim'], {}), '(import_xanim)\n', (1998, 2012), False, 'import imp\n'), ((2056, 2080), 'imp.reload', 'imp.reload', (['export_xanim'], {}), '(export_xanim)\n', (2066, 2080), False, 'import imp\n'), ((3427, 3439), 'time.clock', 'time.clock', ([], {}), '()\n', (3437, 3439), False, 'import time\n'), ((7716, 7728), 'time.clock', 'time.clock', ([], {}), '()\n', (7726, 7728), False, 'import time\n'), ((12381, 12393), 'time.clock', 'time.clock', ([], {}), '()\n', (12391, 12393), False, 'import time\n'), ((3645, 3657), 'time.clock', 'time.clock', ([], {}), '()\n', (3655, 3657), False, 'import time\n'), ((7934, 7946), 'time.clock', 'time.clock', ([], {}), '()\n', (7944, 7946), False, 'import time\n'), ((12598, 12610), 'time.clock', 'time.clock', ([], {}), '()\n', (12608, 12610), False, 'import time\n')] |
from collections import deque
class Solution(object):
def longestOnes(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
start, res = 0, 0
zeros = deque()
for i in range(len(A)):
if A[i] == 0:
zeros.append(i)
if K == 0:
res = max(res, i - start)
start = zeros.popleft() + 1
else:
K -= 1
res = max(res, len(A) - start)
return res
def test_long_ones():
s = Solution()
assert 6 == s.longestOnes([1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0], 2)
assert 10 == s.longestOnes(
[0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1], 3)
assert 3 == s.longestOnes([0, 0, 1, 1, 1, 0, 0], 0)
assert 4 == s.longestOnes([0, 0, 0, 1], 4)
| [
"collections.deque"
] | [((223, 230), 'collections.deque', 'deque', ([], {}), '()\n', (228, 230), False, 'from collections import deque\n')] |
#
# This script allows the user to control an Anki car using Python
# To control multiple cars at once, open a seperate Command Line Window for each car
# and call this script with the approriate car mac address.
# This script attempts to save lap times into local mysql db running on the pi
# Author: jstucken
# Created: 23-2-2021
#
SCRIPT_TITLE="Lap timer saving to Mysql"
# import required modules
import loader.bootstrapper
import time
from overdrive import Overdrive
from php_communicator import PhpCommunicator
from network import Network
# Setup our car
car = Overdrive(12) # init overdrive object
car.enableLocationData()
# get car mac address from our class object
car_mac = car.getMacAddress()
car_id = car.getCarId()
username = car.getUsername()
student_id = car.getStudentId()
# count number of laps completed
lap_count = 0
# start the car off
# usage: car.changeSpeed(speed, accel)
car.changeSpeed(400, 800)
last_lap_time = 0
last_lap_count = -1
# race 3 laps and time each one
while lap_count !=3:
time.sleep(0.1)
# lap count is incremented when cars pass over the finish line
lap_count = car.getLapCount()
# count laps done
if last_lap_count != lap_count:
last_lap_count = lap_count
print()
print("lap_count: "+str(lap_count))
# get lap time
prev_lap_time = car.getLapTime()
if last_lap_time != prev_lap_time:
print()
print("prev_lap_time: "+str(prev_lap_time))
# if car has completed at least 1 lap
if lap_count > 0:
# Save last_lap_time time to database now
# get cars current location and speed
location = car.getLocation()
speed = car.getSpeed()
# data to be sent to API
data = {
'student_id':student_id,
'car_id':car_id,
'lap_time':prev_lap_time,
'lap_count':lap_count,
'speed':speed
}
# get the local IP address of the server machine
local_ip_address = Network.getLocalIPAddress()
# build our PHP script URL where data will be sent to be saved
# eg "http://192.168.0.10/lap_times_save.php"
url = "http://"+local_ip_address+"/python_communicator/lap_times_save.php"
# Send data to PHP to save to database
php = PhpCommunicator()
return_text = php.getResponse(url, data) # get the response from PHP
# extracting response text
print("Response from PHP script: %s"%return_text)
# end if
print()
print("*****")
last_lap_time = prev_lap_time
# stop the car
car.stopCarFast()
print("Stopping as car has done the required number of laps")
car.disconnect()
quit() | [
"overdrive.Overdrive",
"php_communicator.PhpCommunicator",
"network.Network.getLocalIPAddress",
"time.sleep"
] | [((594, 607), 'overdrive.Overdrive', 'Overdrive', (['(12)'], {}), '(12)\n', (603, 607), False, 'from overdrive import Overdrive\n'), ((1074, 1089), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1084, 1089), False, 'import time\n'), ((2209, 2236), 'network.Network.getLocalIPAddress', 'Network.getLocalIPAddress', ([], {}), '()\n', (2234, 2236), False, 'from network import Network\n'), ((2536, 2553), 'php_communicator.PhpCommunicator', 'PhpCommunicator', ([], {}), '()\n', (2551, 2553), False, 'from php_communicator import PhpCommunicator\n')] |
import board
from i2cperipheral import I2CPeripheral
from analogio import AnalogOut
from digitalio import DigitalInOut, Direction, Pull
import struct
import math
import time
regs = [0] * 16
index = 0
i2c_addr = 0x68
frame_id = 0
motor_control_mode = 0
backup_mode = 0
motor_switch_state = 0
hall_switch_state = 0
encoder_switch_state = 0
error_flag = 0
unused = 0
invalidTelemetryFlag = 0
invalidTelecommandFlag = 0
encoderError = 0
uartError = 0
i2cError = 0
canError = 0
configurationError = 0
speedError = 0
reference_speed = 0
wheel_current = 290 # mA
wheel_speed = math.floor(100/2) #rpm
wheel_duty = 5
wheel_speed_backup = wheel_speed
def send_tlm_identification():
# print("Send TLM Identification")
output = []
output += bytearray([8, 0, 9, 8]) + struct.pack("H", 1111) + struct.pack("H", 8888)
return output
def send_tlm_identification_ext():
# print("Send TLM Identification Ext")
output = []
output += struct.pack("H", 1234) + bytearray([68, 0xFF])
return output
def send_tlm_status(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag):
# print("Send TLM Status MCM:{0:d}, BM:{1:d}, MSS:{2:d} HSS:{3:d}, ESS:{4:d}, Error Flag: {5:d}".format(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag))
status = 0
status |= (backup_mode & 0x1) << 7
status |= (motor_switch_state & 0x1) << 6
status |= (hall_switch_state & 0x1) << 5
status |= (encoder_switch_state & 0x1) << 4
status |= (error_flag & 0x1) << 3
status |= unused
# print("Status byte: {0:d}:{1:08b}".format(status,status))
output = []
output = struct.pack("H", 1111) + struct.pack("H", 8888) + bytearray([0, 0, motor_control_mode, status])
return output
def send_tlm_wheel_data_full(wheel_speed, wheel_reference_speed, wheel_current):
# print("Send TLM Wheel Data Full")
output = []
output += struct.pack("h", wheel_speed) + struct.pack("h", wheel_reference_speed) + struct.pack("h", wheel_current)
return output
def send_tlm_wheel_data_additional(wheel_duty, wheel_speed_backup):
# print("Send TLM Wheel Data Additional")
output = []
output += struct.pack("h", wheel_duty) + struct.pack("h", wheel_duty)
return output
def send_tlm_wheel_status_flags(invalidTelemetryFlag=0, invalidTelecommandFlag=0, encoderError=0, uartError=0, i2cError=0, canError=0, configurationError=0, speedError=0):
status = 0
status |= (invalidTelemetryFlag & 0x01)
status |= (invalidTelecommandFlag & 0x01) << 1
status |= (encoderError & 0x01) << 2
status |= (uartError & 0x01) << 3
status |= (i2cError & 0x01) << 4
status |= (canError & 0x01) << 5
status |= (configurationError & 0x01) << 6
status |= (speedError & 0x01) << 7
return bytearray([status])
def voltage_to_dac(voltage):
return math.floor((voltage*1024)/3.3 * 64)
vout = 0.95
dac_value = voltage_to_dac(vout)
print("Set analog output for testing: {0:f} ({1:d}) V".format(vout, dac_value))
analog_out = AnalogOut(board.A0)
analog_out.value = dac_value
enable_pin = DigitalInOut(board.D8)
enable_pin.direction = Direction.INPUT
# enable_pin.pull = Pull.DOWN
print("Waiting for wheel enable")
while enable_pin.value == False:
time.sleep(0.1)
print("Starting I2C response")
with I2CPeripheral(board.SCL, board.SDA, (i2c_addr,)) as device:
while True:
r = device.request()
if not r:
# Maybe do some housekeeping
continue
with r: # Closes the transfer if necessary by sending a NACK or feeding dummy bytes
# print("Process request")
# print("I2C Addr: 0x{0:02X}, Is Read {1:d}, Is Restart {2:d}".format(r.address, r.is_read, r.is_restart))
if r.address == i2c_addr:
if not r.is_read: # Main write which is Selected read
# print("Get Frame Id Byte")
b = r.read(1)
if b:
frame_id = struct.unpack("B", b)[0]
print("Recieved frame ID: " + str(frame_id))
if frame_id < 40:
# print("Telecommand Recieved")
if frame_id == 1:
reset_id = struct.unpack("B", r.read(1))[0]
# print("Reset telecommand recieved: {0:d}".format(reset_id))
elif frame_id == 2:
reference_speed = struct.unpack("h", r.read(2))[0]
reference_speed_rpm = float(reference_speed/2.0)
wheel_speed = reference_speed + 5
# print("Reference speed telecommand recieved. Speed: {0:d}:{1:f}".format(reference_speed, reference_speed_rpm))
elif frame_id == 3:
wheel_duty = struct.unpack("h", r.read(2))[0]
# print("Duty cycle command recieved. Duty Cycle: {0:d}".format(wheel_duty))
elif frame_id == 7:
motor_switch_state = r.read(1)
# print("Recieved motor power state command. State: {}".format(motor_switch_state))
elif frame_id == 8:
encoder_switch_state = r.read(1)
# print("Recieved encoder power state command. State: {}".format(encoder_switch_state))
elif frame_id == 8:
hall_switch_state = r.read(1)
# print("Recieved hall power state command. State: {}".format(encoder_switch_state))
elif frame_id == 10:
motor_control_mode = struct.unpack("B", r.read(1))[0]
# print("Control mode telecommand recieved. Mode: {0:d}".format(motor_control_mode))
elif frame_id == 12:
backup_mode = r.read(1)
# print("Recieved back-up mode state command. State: {}".format(backup_mode))
elif frame_id == 20:
clear_errors = r.read(1)
if clear_errors == 85:
invalidTelemetryFlag = 0
invalidTelecommandFlag = 0
encoderError = 0
uartError = 0
i2cError = 0
canError = 0
configurationError = 0
speedError = 0
elif frame_id == 31:
new_i2c_addr = r.read(1)
# print("Recieved set I2C addr command. I2C: {}".format(new_i2c_addr))
elif frame_id == 33:
new_can_mask = r.read(1)
# print("Recieved set CAN mask command. CAN Mask: {}".format(new_can_mask))
elif frame_id == 33:
b = r.read(3)
# print("Recieved PWM Gain Command: {0:s}".format(str(b)))
elif frame_id == 34:
b = r.read(6)
# print("Recieved Main Speed Controller Gain Command: {0:s}".format(str(b)))
elif frame_id == 35:
b = r.read(6)
# print("Recieved Backup Speed Controller Gain Command: {0:s}".format(str(b)))
else:
invalidTelecommandFlag = 1
else:
# print("No data to read")
continue
elif r.is_restart: # Combined transfer: This is the Main read message
# print("Recieved Telemetry Request")
n = 0
if frame_id == 128:
n = r.write(bytes(send_tlm_identification()))
elif frame_id == 129:
n = r.write(bytes(send_tlm_identification_ext()))
elif frame_id == 130:
n = r.write(bytes(send_tlm_status(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag)))
elif frame_id == 133:
n = r.write(bytes(2))
elif frame_id == 134:
n = r.write(bytes(2))
elif frame_id == 135:
n = r.write(bytes(2))
elif frame_id == 137:
n = r.write(bytes(send_tlm_wheel_data_full(wheel_speed, reference_speed, wheel_current)))
elif frame_id == 138:
n = r.write(bytes(send_tlm_wheel_data_additional(wheel_duty, wheel_speed_backup)))
elif frame_id == 139:
n = r.write(bytearray([9,8,7]))
elif frame_id == 140:
n = r.write(bytearray([1,2,3,4,5,6]))
elif frame_id == 141:
n = r.write(bytearray([10, 11, 12, 13, 14, 15]))
elif frame_id == 145:
n = r.write(bytes(send_tlm_wheel_status_flags(invalidTelemetryFlag, invalidTelecommandFlag, encoderError, uartError, i2cError, canError, configurationError, speedError)))
else:
invalidTelemetryFlag = 1
# print("Wrote " + str(n) + " bytes to master")
| [
"i2cperipheral.I2CPeripheral",
"math.floor",
"time.sleep",
"struct.pack",
"struct.unpack",
"analogio.AnalogOut",
"digitalio.DigitalInOut"
] | [((575, 594), 'math.floor', 'math.floor', (['(100 / 2)'], {}), '(100 / 2)\n', (585, 594), False, 'import math\n'), ((3089, 3108), 'analogio.AnalogOut', 'AnalogOut', (['board.A0'], {}), '(board.A0)\n', (3098, 3108), False, 'from analogio import AnalogOut\n'), ((3152, 3174), 'digitalio.DigitalInOut', 'DigitalInOut', (['board.D8'], {}), '(board.D8)\n', (3164, 3174), False, 'from digitalio import DigitalInOut, Direction, Pull\n'), ((2914, 2951), 'math.floor', 'math.floor', (['(voltage * 1024 / 3.3 * 64)'], {}), '(voltage * 1024 / 3.3 * 64)\n', (2924, 2951), False, 'import math\n'), ((3316, 3331), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3326, 3331), False, 'import time\n'), ((3370, 3418), 'i2cperipheral.I2CPeripheral', 'I2CPeripheral', (['board.SCL', 'board.SDA', '(i2c_addr,)'], {}), '(board.SCL, board.SDA, (i2c_addr,))\n', (3383, 3418), False, 'from i2cperipheral import I2CPeripheral\n'), ((799, 821), 'struct.pack', 'struct.pack', (['"""H"""', '(8888)'], {}), "('H', 8888)\n", (810, 821), False, 'import struct\n'), ((949, 971), 'struct.pack', 'struct.pack', (['"""H"""', '(1234)'], {}), "('H', 1234)\n", (960, 971), False, 'import struct\n'), ((2046, 2077), 'struct.pack', 'struct.pack', (['"""h"""', 'wheel_current'], {}), "('h', wheel_current)\n", (2057, 2077), False, 'import struct\n'), ((2241, 2269), 'struct.pack', 'struct.pack', (['"""h"""', 'wheel_duty'], {}), "('h', wheel_duty)\n", (2252, 2269), False, 'import struct\n'), ((2272, 2300), 'struct.pack', 'struct.pack', (['"""h"""', 'wheel_duty'], {}), "('h', wheel_duty)\n", (2283, 2300), False, 'import struct\n'), ((774, 796), 'struct.pack', 'struct.pack', (['"""H"""', '(1111)'], {}), "('H', 1111)\n", (785, 796), False, 'import struct\n'), ((1706, 1728), 'struct.pack', 'struct.pack', (['"""H"""', '(1111)'], {}), "('H', 1111)\n", (1717, 1728), False, 'import struct\n'), ((1731, 1753), 'struct.pack', 'struct.pack', (['"""H"""', '(8888)'], {}), "('H', 8888)\n", (1742, 1753), False, 'import struct\n'), ((1972, 2001), 'struct.pack', 'struct.pack', (['"""h"""', 'wheel_speed'], {}), "('h', wheel_speed)\n", (1983, 2001), False, 'import struct\n'), ((2004, 2043), 'struct.pack', 'struct.pack', (['"""h"""', 'wheel_reference_speed'], {}), "('h', wheel_reference_speed)\n", (2015, 2043), False, 'import struct\n'), ((4059, 4080), 'struct.unpack', 'struct.unpack', (['"""B"""', 'b'], {}), "('B', b)\n", (4072, 4080), False, 'import struct\n')] |
import threading
import time
import random
from multiprocessing.pool import ThreadPool
from PyQt5 import QtCore, QtGui, QtWidgets
bandera = False
val1 = ""
msg = 'Caballo ganador es: {}'
# Clase Caballo
class caballo(threading.Thread):
def __init__(self, num, b1,resultado):
global val1,bandera
threading.Thread.__init__(self)
bandera = False
self.resultado = 20.0
self.tiempo_inicio = time.time()
self.tiempo_final = ""
self.tiempo_total = ""
self.num = num
self.valor = 0
self.boton = b1
self.eleccion= ""
# Selecciona un valor aleatorio, 10 20 o 30
def aleatorio(self):
mylist = ["10","20","30","40"]
self.eleccion = random.choice(mylist)
# Movimiento de los caballos
def movimiento(self):
self.p = self.boton.pos()
self.p += QtCore.QPoint(int(self.eleccion), 0)
self.valor += int(self.eleccion)
self.boton.move(self.p)
time.sleep(0.75)
def retorno(self):
self.resultado
# Hilos
def run(self):
global bandera
while(True):
if bandera == True:
break
else:
self.aleatorio()
self.movimiento()
if self.valor >= 600:
self.tiempo_final = time.time()
self.resultado = self.tiempo_final-self.tiempo_inicio
print("\nEl caballo: " + str(self.num)+" cruzó la meta!!, Tiempo: "+str(self.resultado))
bandera=True
break
| [
"threading.Thread.__init__",
"random.choice",
"time.time",
"time.sleep"
] | [((337, 368), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (362, 368), False, 'import threading\n'), ((449, 460), 'time.time', 'time.time', ([], {}), '()\n', (458, 460), False, 'import time\n'), ((757, 778), 'random.choice', 'random.choice', (['mylist'], {}), '(mylist)\n', (770, 778), False, 'import random\n'), ((1029, 1045), 'time.sleep', 'time.sleep', (['(0.75)'], {}), '(0.75)\n', (1039, 1045), False, 'import time\n'), ((1371, 1382), 'time.time', 'time.time', ([], {}), '()\n', (1380, 1382), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import json
from datetime import datetime
import re
import koji
from kobo.rpmlib import parse_nvr
import semver
from freshmaker import db, conf, log
from freshmaker.handlers import ContainerBuildHandler
from freshmaker.events import BotasErrataShippedEvent, ManualBundleRebuild
from freshmaker.lightblue import ContainerImage
from freshmaker.models import ArtifactBuild, ArtifactType, Event
from freshmaker.types import EventState, ArtifactBuildState, RebuildReason
from freshmaker.pyxis import Pyxis
from freshmaker.kojiservice import KojiService
from freshmaker.errata import Errata
class HandleBotasAdvisory(ContainerBuildHandler):
"""
Handles event that was created by transition of an advisory filed by
BOTAS to SHIPPED_LIVE state
"""
name = "HandleBotasAdvisory"
# This prefix should be added to event reason, when skipping the event.
# Because Release Driver checks event's reason for certain prefixes,
# to determine if there is an error in bundles processing.
_no_bundle_prefix = "No bundles to rebuild: "
def __init__(self, pyxis=None):
super().__init__()
if pyxis:
self._pyxis = pyxis
else:
if not conf.pyxis_server_url:
raise ValueError("'PYXIS_SERVER_URL' parameter should be set")
self._pyxis = Pyxis(conf.pyxis_server_url)
if not conf.freshmaker_root_url or "://" not in conf.freshmaker_root_url:
raise ValueError("'FRESHMAKER_ROOT_URL' parameter should be set to "
"a valid URL")
# Currently processed event
self.event = None
def can_handle(self, event):
if (isinstance(event, BotasErrataShippedEvent) and
'docker' in event.advisory.content_types):
return True
# This handler can handle manual bundle rebuilds too
if isinstance(event, ManualBundleRebuild):
return True
return False
def handle(self, event):
if event.dry_run:
self.force_dry_run()
self.event = event
db_event = Event.get_or_create_from_event(db.session, event)
self.set_context(db_event)
# Check if event is allowed by internal policies
if not self.event.is_allowed(self):
msg = ("This image rebuild is not allowed by internal policy. "
f"message_id: {event.msg_id}")
db_event.transition(EventState.SKIPPED, msg)
self.log_info(msg)
return []
if isinstance(event, ManualBundleRebuild) and \
hasattr(event, 'bundle_images'):
bundles_to_rebuild = self._handle_release_driver_rebuild(db_event)
# automatic rebuild and manual bundle rebuild(triggered by post request)
else:
bundles_to_rebuild = self._handle_bundle_rebuild(db_event)
if not bundles_to_rebuild:
return []
builds = self._prepare_builds(db_event, bundles_to_rebuild)
# Reset context to db_event.
self.set_context(db_event)
self.start_to_build_images(builds)
if all([b.state == ArtifactBuildState.FAILED.value for b in builds]):
db_event.transition(EventState.FAILED, "All bundle rebuilds failed")
else:
msg = f"Advisory {db_event.search_key}: Rebuilding " \
f"{len(db_event.builds.all())} bundle images."
db_event.transition(EventState.BUILDING, msg)
return []
def _handle_bundle_rebuild(self, db_event):
"""
Handle auto rebuild for an advisory created by Botas
OR manually triggered rebuild
:param db_event: database event that represent rebuild event
:rtype: list
:return: list of advisories that should be rebuilt
"""
# Mapping of operators' original build nvrs to rebuilt nvrs in advisory
nvrs_mapping = self._create_original_to_rebuilt_nvrs_map()
original_nvrs = nvrs_mapping.keys()
self.log_info(
"Orignial nvrs of build in the advisory #{0} are: {1}".format(
self.event.advisory.errata_id, " ".join(original_nvrs)))
# Get image manifest_list_digest for all original images, manifest_list_digest is used
# in pullspecs in bundle's related images
original_digests_by_nvr = {}
original_nvrs_by_digest = {}
for nvr in original_nvrs:
digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr)
if digest:
original_digests_by_nvr[nvr] = digest
original_nvrs_by_digest[digest] = nvr
else:
log.warning(
f"Image manifest_list_digest not found for original image {nvr} in Pyxis, "
"skip this image"
)
if not original_digests_by_nvr:
msg = f"None of the original images have digests in Pyxis: {','.join(original_nvrs)}"
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# Get image manifest_list_digest for all rebuilt images, manifest_list_digest is used
# in pullspecs of bundle's related images
rebuilt_digests_by_nvr = {}
rebuilt_nvrs = nvrs_mapping.values()
for nvr in rebuilt_nvrs:
# Don't require that the manifest list digest be published in this case because
# there's a delay from after an advisory is shipped and when the published repositories
# entry is populated
digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr, must_be_published=False)
if digest:
rebuilt_digests_by_nvr[nvr] = digest
else:
log.warning(
f"Image manifest_list_digest not found for rebuilt image {nvr} in Pyxis, "
"skip this image"
)
if not rebuilt_digests_by_nvr:
msg = f"None of the rebuilt images have digests in Pyxis: {','.join(rebuilt_nvrs)}"
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
index_images = self._pyxis.get_operator_indices()
# get latest bundle images per channel per index image filtered
# by the highest semantic version
all_bundles = self._pyxis.get_latest_bundles(index_images)
self.log_debug(
"There are %d bundles that are latest in a channel in the found index images",
len(all_bundles),
)
# A mapping of digests to bundle metadata. This metadata is used to
# for the CSV metadata updates.
bundle_mds_by_digest = {}
# get bundle digests for original images
bundle_digests_by_related_nvr = {}
for image_nvr, image_digest in original_digests_by_nvr.items():
bundles = self._pyxis.get_bundles_by_related_image_digest(
image_digest, all_bundles
)
if not bundles:
log.info(f"No latest bundle image with the related image of {image_nvr}")
continue
for bundle in bundles:
bundle_digest = bundle['bundle_path_digest']
bundle_mds_by_digest[bundle_digest] = bundle
bundle_digests_by_related_nvr.setdefault(image_nvr, []).append(bundle_digest)
if not bundle_digests_by_related_nvr:
msg = "None of the original images have related bundles, skip."
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
self.log_info(
"Found %d bundles with relevant related images", len(bundle_digests_by_related_nvr)
)
# Mapping of bundle digest to bundle data
# {
# digest: {
# "images": [image_amd64, image_aarch64],
# "nvr": NVR,
# "auto_rebuild": True/False,
# "osbs_pinning": True/False,
# "pullspecs": [...],
# }
# }
bundles_by_digest = {}
default_bundle_data = {
'images': [],
'nvr': None,
'auto_rebuild': False,
'osbs_pinning': False,
# CSV modifications for the rebuilt bundle image
'pullspec_replacements': [],
'update': {},
}
# Get images for each bundle digest, a bundle digest can have multiple images
# with different arches.
for digest in bundle_mds_by_digest:
bundles = self._pyxis.get_images_by_digest(digest)
# If no bundle image found, just skip this bundle digest
if not bundles:
self.log_warn('The bundle digest %r was not found in Pyxis. Skipping.', digest)
continue
bundle_nvr = bundles[0]['brew']['build']
# If specific container images where requested to rebuild, process only them
if (isinstance(self.event, ManualBundleRebuild)
and self.event.container_images # noqa: W503
and bundle_nvr not in self.event.container_images): # noqa: W503
self.log_debug("Ignoring '%s', because it's not in requested rebuilds"
" (container_images in request)", bundle_nvr)
continue
# Filter out builds from dependent event that were rebuilt recently
done_build = db_event.get_artifact_build_from_event_dependencies(
bundle_nvr)
if done_build:
self.log_debug("Ignoring '%s' bundle, because it was already rebuilt"
" in dependent event", bundle_nvr)
continue
bundles_by_digest.setdefault(digest, copy.deepcopy(default_bundle_data))
bundles_by_digest[digest]['nvr'] = bundle_nvr
bundles_by_digest[digest]['images'] = bundles
# Unauthenticated koji session to fetch build info of bundles
koji_api = KojiService(conf.koji_profile)
# For each bundle, check whether it should be rebuilt by comparing the
# auto_rebuild_tags of repository and bundle's tags
for digest, bundle_data in bundles_by_digest.items():
bundle_nvr = bundle_data['nvr']
# Images are for different arches, just check against the first image
image = bundle_data['images'][0]
if self.image_has_auto_rebuild_tag(image):
bundle_data['auto_rebuild'] = True
# Fetch buildinfo
buildinfo = koji_api.get_build(bundle_nvr)
related_images = (
buildinfo.get('extra', {})
.get('image', {})
.get('operator_manifests', {})
.get('related_images', {})
)
bundle_data['osbs_pinning'] = related_images.get('created_by_osbs', False)
# Save the original pullspecs
bundle_data['pullspec_replacements'] = related_images.get('pullspecs', [])
# Digests of bundles to be rebuilt
to_rebuild_digests = set()
# Now for each bundle, replace the original digest with rebuilt
# digest (override pullspecs)
for digest, bundle_data in bundles_by_digest.items():
# Override pullspecs only when auto_rebuild is enabled and OSBS-pinning
# mechanism is used.
if not (bundle_data['auto_rebuild'] and bundle_data['osbs_pinning']):
self.log_info(
'The bundle %r does not have auto-rebuild tags (%r) and/or OSBS pinning (%r)',
bundle_data['nvr'],
bundle_data['auto_rebuild'],
bundle_data['osbs_pinning'],
)
continue
csv_name = bundle_mds_by_digest[digest]['csv_name']
version = bundle_mds_by_digest[digest]['version_original']
bundle_data.update(self._get_csv_updates(csv_name, version))
for pullspec in bundle_data['pullspec_replacements']:
# A pullspec item example:
# {
# 'new': 'registry.exampe.io/repo/example-operator@sha256:<sha256-value>',
# 'original': 'registry.example.io/repo/example-operator:v2.2.0',
# 'pinned': True,
# # value used for internal purpose during manual rebuilds, it's an old pullspec that was replaced
# '_old': 'registry.exampe.io/repo/example-operator@sha256:<previous-sha256-value>,
# }
# A pullspec path is in format of "registry/repository@digest"
pullspec_elems = pullspec.get('new').split('@')
old_digest = pullspec_elems[1]
if old_digest not in original_nvrs_by_digest:
# This related image is not one of the original images
continue
# This related image is one of our original images
old_nvr = original_nvrs_by_digest[old_digest]
new_nvr = nvrs_mapping[old_nvr]
new_digest = rebuilt_digests_by_nvr[new_nvr]
# save pullspec that image had before rebuild
pullspec['_old'] = pullspec.get('new')
# Replace the old digest with new digest
pullspec_elems[1] = new_digest
new_pullspec = '@'.join(pullspec_elems)
pullspec['new'] = new_pullspec
# Always set pinned to True when it was replaced by Freshmaker
# since it indicates that the pullspec was modified from the
# original pullspec
pullspec['pinned'] = True
# Once a pullspec in this bundle has been overrided, add this bundle
# to rebuild list
self.log_info(
'Changing pullspec %r to %r in the bundle %r',
pullspec['_old'],
pullspec['new'],
bundle_data['nvr'],
)
to_rebuild_digests.add(digest)
if not to_rebuild_digests:
msg = self._no_bundle_prefix + "No bundle images to rebuild for " \
f"advisory {self.event.advisory.name}"
self.log_info(msg)
db_event.transition(EventState.SKIPPED, msg)
db.session.commit()
return []
bundles_to_rebuild = list(map(lambda x: bundles_by_digest[x],
to_rebuild_digests))
return bundles_to_rebuild
def _handle_release_driver_rebuild(self, db_event):
"""
Handle manual rebuild submitted by Release Driver for an advisory created by Botas
:param db_event: database event that represents a rebuild event
:rtype: list
:return: list of advisories that should be rebuilt
"""
old_to_new_pullspec_map = self._get_pullspecs_mapping()
if not old_to_new_pullspec_map:
msg = self._no_bundle_prefix + 'None of the bundle images have ' \
'applicable pullspecs to replace'
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# Unauthenticated koji session to fetch build info of bundles
koji_api = KojiService(conf.koji_profile)
rebuild_nvr_to_pullspecs_map = dict()
# compare replaced pullspecs with pullspecs in 'container_images' and
# create map for bundles that should be rebuilt with their nvrs
for container_image_nvr in self.event.container_images:
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == container_image_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
pullspecs = []
# Try to find build in FM database, if it's not there check in Brew
if artifact_build:
self.log_info(
"%s in the container_images list was found in the database", container_image_nvr
)
pullspecs = artifact_build.bundle_pullspec_overrides["pullspec_replacements"]
else:
self.log_info(
"%s in the container_images list is not in the database. Searching in Brew "
"instead.",
container_image_nvr,
)
# Fetch buildinfo from Koji
buildinfo = koji_api.get_build(container_image_nvr)
# Get the original pullspecs
pullspecs = (
buildinfo.get('extra', {})
.get('image', {})
.get('operator_manifests', {})
.get('related_images', {})
.get('pullspecs', [])
)
for pullspec in pullspecs:
if pullspec.get('new') not in old_to_new_pullspec_map:
self.log_debug("The pullspec %s is not getting replaced", pullspec.get('new'))
continue
# use newer pullspecs in the image
self.log_info(
"Replacing the pullspec %s with %s on %s",
pullspec['new'],
old_to_new_pullspec_map[pullspec['new']],
container_image_nvr,
)
pullspec['new'] = old_to_new_pullspec_map[pullspec['new']]
rebuild_nvr_to_pullspecs_map[container_image_nvr] = pullspecs
if not rebuild_nvr_to_pullspecs_map:
msg = self._no_bundle_prefix + 'None of the container images have ' \
'applicable pullspecs from the input bundle images'
log.info(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# list with metadata about every bundle to do rebuild
to_rebuild_bundles = []
# fill 'append' and 'update' fields for bundles to rebuild
for nvr, pullspecs in rebuild_nvr_to_pullspecs_map.items():
self.log_debug("Getting the manifest list digest for %s", nvr)
bundle_digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr)
if bundle_digest is not None:
self.log_debug("The manifest list digest for %s is %s", nvr, bundle_digest)
bundles = self._pyxis.get_bundles_by_digest(bundle_digest)
if not bundles:
self.log_error(
"The manifest_list_digest %s is not available on the bundles API endpoint",
bundle_digest,
)
continue
temp_bundle = bundles[0]
csv_updates = (self._get_csv_updates(temp_bundle['csv_name'],
temp_bundle['version_original']))
to_rebuild_bundles.append({
'nvr': nvr,
'update': csv_updates['update'],
'pullspec_replacements': pullspecs,
})
else:
log.warning('Can\'t find manifest_list_digest for bundle '
f'"{nvr}" in Pyxis')
if not to_rebuild_bundles:
msg = 'Can\'t find digests for any of the bundles to rebuild'
log.warning(msg)
db_event.transition(EventState.FAILED, msg)
return []
return to_rebuild_bundles
def _get_pullspecs_mapping(self):
"""
Get map of all replaced pullspecs from 'bundle_images' provided in an event.
:rtype: dict
:return: map of all '_old' pullspecs that was replaced by 'new'
pullspecs in previous Freshmaker rebuilds
"""
old_to_new_pullspec_map = dict()
for bundle_nvr in self.event.bundle_images:
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == bundle_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
if artifact_build is None:
log.warning(
f'Can\'t find build for a bundle image "{bundle_nvr}"')
continue
pullspec_overrides = artifact_build.bundle_pullspec_overrides
for pullspec in pullspec_overrides['pullspec_replacements']:
old_pullspec = pullspec.get('_old', None)
if old_pullspec is None:
continue
old_to_new_pullspec_map[old_pullspec] = pullspec['new']
return old_to_new_pullspec_map
@classmethod
def _get_csv_updates(cls, csv_name, version):
"""
Determine the CSV updates required for the bundle image.
:param str csv_name: the name field in the bundle's ClusterServiceVersion file
:param str version: the version of the bundle image being rebuilt
:return: a dictionary of the CSV updates needed
:rtype: dict
"""
csv_modifications = {}
new_version, fm_suffix = cls._get_rebuild_bundle_version(version)
new_csv_name = cls._get_csv_name(csv_name, version, new_version, fm_suffix)
csv_modifications['update'] = {
'metadata': {
# Update the name of the CSV to something uniquely identify the rebuild
'name': new_csv_name,
# Declare that this rebuild is a substitute of the bundle being rebuilt
'annotations': {'olm.substitutesFor': csv_name}
},
'spec': {
# Update the version of the rebuild to be unique and a newer version than the
# the version of the bundle being rebuilt
'version': new_version,
}
}
return csv_modifications
@classmethod
def _get_rebuild_bundle_version(cls, version):
"""
Get a bundle version for the Freshmaker rebuild of the bundle image.
Examples:
1.2.3 => 1.2.3+0.$timestamp.p (no build ID and not a rebuild)
1.2.3+48273 => 1.2.3+48273.0.$timestamp.p (build ID and not a rebuild)
1.2.3+48273.0.1616457250.p => 1.2.3+48273.0.$timestamp.p (build ID and a rebuild)
:param str version: the version of the bundle image being rebuilt
:return: a tuple of the bundle version of the Freshmaker rebuild of the bundle image and
the suffix that was added by Freshmaker
:rtype: tuple(str, str)
"""
parsed_version = semver.VersionInfo.parse(version)
# Strip off the microseconds of the timestamp
timestamp = int(datetime.utcnow().timestamp())
new_fm_suffix = f'0.{timestamp}.p'
if parsed_version.build:
# Check if the bundle was a Freshmaker rebuild. Include .patched
# for backwards compatibility with the old suffix.
fm_suffix_search = re.search(
r'(?P<fm_suffix>0\.\d+\.(?:p|patched))$', parsed_version.build
)
if fm_suffix_search:
fm_suffix = fm_suffix_search.groupdict()['fm_suffix']
# Get the build without the Freshmaker suffix. This may include a build ID
# from the original build before Freshmaker rebuilt it or be empty.
build_wo_fm_suffix = parsed_version.build[:- len(fm_suffix)]
new_build = f"{build_wo_fm_suffix}{new_fm_suffix}"
else:
# This was not previously rebuilt by Freshmaker so just append the suffix
# to the existing build ID with '.' separating it.
new_build = f"{parsed_version.build}.{new_fm_suffix}"
else:
# If there is existing build ID, then make the Freshmaker suffix the build ID
new_build = new_fm_suffix
# Don't use the replace method in order to support semver 2.8.1
new_version_dict = parsed_version._asdict()
new_version_dict["build"] = new_build
new_version = str(semver.VersionInfo(**new_version_dict))
return new_version, new_fm_suffix
@staticmethod
def _get_csv_name(csv_name, version, rebuild_version, fm_suffix):
"""
Get a bundle CSV name for the Freshmaker rebuild of the bundle image.
:param str csv_name: the name of the ClusterServiceVersion (CSV) file of the bundle image
:param str version: the version of the bundle image being rebuilt
:param str rebuild_version: the new version being assigned by Freshmaker for the rebuild
:param str fm_suffix: the portion of rebuild_version that was generated by Freshmaker
:return: the bundle ClusterServiceVersion (CSV) name of the Freshmaker rebuild of the bundle
image
:rtype: str
"""
# The CSV name must be in the format of a valid DNS name, which means the + from the
# build ID must be replaced. In the event this was a previous Freshmaker rebuild, version
# may have a build ID that would be the DNS safe version in the CSV name.
dns_safe_version = version.replace('+', '-')
if dns_safe_version in csv_name:
dns_safe_rebuild_version = rebuild_version.replace('+', '-')
return csv_name.replace(dns_safe_version, dns_safe_rebuild_version)
else:
return f'{csv_name}.{fm_suffix}'
def get_published_original_nvr(self, rebuilt_nvr):
"""
Search for an original build, that has been built and published to a
repository, and get original_nvr from it
:param str rebuilt_nvr: rebuilt NVR to look build by
:rtype: str or None
:return: original NVR from the first published FM build for given NVR
"""
original_nvr = None
# artifact build should be only one in database, or raise an error
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == rebuilt_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
# recursively search for original artifact build
if artifact_build is not None:
original_nvr = artifact_build.original_nvr
# check if image is published
request_params = {'include': 'data.repositories',
'page_size': 1}
images = self._pyxis._pagination(f'images/nvr/{original_nvr}',
request_params)
if not images:
return None
# stop recursion if the image is published in some repo
if any(repo['published'] for repo in images[0].get('repositories')):
return original_nvr
next_nvr = self.get_published_original_nvr(original_nvr)
if next_nvr is not None:
original_nvr = next_nvr
return original_nvr
def image_has_auto_rebuild_tag(self, image):
""" Check if image has a tag enabled for auto rebuild.
:param dict image: Dict representation of an image entity in Pyxis.
:rtype: bool
:return: True if image has a tag enabled for auto rebuild in repository, otherwise False.
"""
for repo in image['repositories']:
# Skip unpublished repository
if not repo['published']:
continue
auto_rebuild_tags = self._pyxis.get_auto_rebuild_tags(
repo['registry'], repo['repository']
)
tags = [t['name'] for t in repo.get('tags', [])]
if set(auto_rebuild_tags) & set(tags):
return True
# It'd be more efficient to do this check first, but the exceptions are edge cases
# (e.g. testing) and it's best to not use it unless absolutely necessary
nvr = image['brew']['build']
parsed_nvr = parse_nvr(nvr)
nv = f'{parsed_nvr["name"]}-{parsed_nvr["version"]}'
if nv in conf.bundle_autorebuild_tag_exceptions:
self.log_info(
'The bundle %r has an exception for being tagged with an auto-rebuild tag', nvr
)
return True
return False
def _create_original_to_rebuilt_nvrs_map(self):
"""
Creates mapping of original operator build NVRs to rebuilt NVRs in advisory.
Including NVRs of the builds from the blocking advisories
:rtype: dict
:return: map of the original NVRs as keys and rebuilt NVRs as values
"""
nvrs_mapping = {}
# Get builds from all blocking advisories
blocking_advisories_builds = \
Errata().get_blocking_advisories_builds(self.event.advisory.errata_id)
# Get builds NVRs from the advisory attached to the message/event and
# then get original NVR for every build
for product_info in self.event.advisory.builds.values():
for build in product_info['builds']:
# Each build is a one key/value pair, and key is the build NVR
build_nvr = next(iter(build))
# Search for the first build that triggered the chain of rebuilds
# for every shipped NVR to get original NVR from it
original_nvr = self.get_published_original_nvr(build_nvr)
if original_nvr is None:
continue
nvrs_mapping[original_nvr] = build_nvr
parsed_build_nvr = parse_nvr(build_nvr)
# Check builds from blocking advisories and add to the mapping
# all of them, that have overlapping package names
for block_build in blocking_advisories_builds:
block_build_nvr = parse_nvr(block_build)
if (block_build_nvr['name'] == parsed_build_nvr['name']
and block_build_nvr['version'] == parsed_build_nvr['version']): # noqa: W503
nvrs_mapping[block_build] = build_nvr
return nvrs_mapping
def _prepare_builds(self, db_event, to_rebuild_bundles):
"""
Prepare models.ArtifactBuild instance for every bundle that will be
rebuilt
:param models.Event db_event: database event that will contain builds
:param list to_rebuild_bundles: bundles to rebuild
:return: builds that already in database and ready to be submitted to brew
:rtype: list
"""
builds = []
csv_mod_url = conf.freshmaker_root_url + "/api/2/pullspec_overrides/{}"
for bundle in to_rebuild_bundles:
# Reset context to db_event for each iteration before
# the ArtifactBuild is created.
self.set_context(db_event)
rebuild_reason = RebuildReason.DIRECTLY_AFFECTED.value
bundle_name = koji.parse_NVR(bundle["nvr"])["name"]
build = self.record_build(
db_event, bundle_name, ArtifactType.IMAGE,
state=ArtifactBuildState.PLANNED.value,
original_nvr=bundle["nvr"],
rebuild_reason=rebuild_reason)
# Set context to particular build so logging shows this build
# in case of error.
self.set_context(build)
build.transition(ArtifactBuildState.PLANNED.value, "")
additional_data = ContainerImage.get_additional_data_from_koji(bundle["nvr"])
build.build_args = json.dumps({
"repository": additional_data["repository"],
"commit": additional_data["commit"],
"target": additional_data["target"],
"branch": additional_data["git_branch"],
"arches": additional_data["arches"],
# The build system always enforces that bundle images build from
# "scratch", so there is no parent image. See:
# https://osbs.readthedocs.io/en/latest/users.html?#operator-manifest-bundle-builds
"original_parent": None,
"operator_csv_modifications_url": csv_mod_url.format(build.id),
})
build.bundle_pullspec_overrides = {
"pullspec_replacements": bundle["pullspec_replacements"],
"update": bundle["update"],
}
db.session.commit()
builds.append(build)
return builds
| [
"freshmaker.kojiservice.KojiService",
"freshmaker.db.session.commit",
"re.search",
"semver.VersionInfo",
"freshmaker.log.info",
"freshmaker.lightblue.ContainerImage.get_additional_data_from_koji",
"koji.parse_NVR",
"datetime.datetime.utcnow",
"freshmaker.models.Event.get_or_create_from_event",
"se... | [((3228, 3277), 'freshmaker.models.Event.get_or_create_from_event', 'Event.get_or_create_from_event', (['db.session', 'event'], {}), '(db.session, event)\n', (3258, 3277), False, 'from freshmaker.models import ArtifactBuild, ArtifactType, Event\n'), ((11258, 11288), 'freshmaker.kojiservice.KojiService', 'KojiService', (['conf.koji_profile'], {}), '(conf.koji_profile)\n', (11269, 11288), False, 'from freshmaker.kojiservice import KojiService\n'), ((16679, 16709), 'freshmaker.kojiservice.KojiService', 'KojiService', (['conf.koji_profile'], {}), '(conf.koji_profile)\n', (16690, 16709), False, 'from freshmaker.kojiservice import KojiService\n'), ((24058, 24091), 'semver.VersionInfo.parse', 'semver.VersionInfo.parse', (['version'], {}), '(version)\n', (24082, 24091), False, 'import semver\n'), ((29427, 29441), 'kobo.rpmlib.parse_nvr', 'parse_nvr', (['nvr'], {}), '(nvr)\n', (29436, 29441), False, 'from kobo.rpmlib import parse_nvr\n'), ((2459, 2487), 'freshmaker.pyxis.Pyxis', 'Pyxis', (['conf.pyxis_server_url'], {}), '(conf.pyxis_server_url)\n', (2464, 2487), False, 'from freshmaker.pyxis import Pyxis\n'), ((6119, 6135), 'freshmaker.log.warning', 'log.warning', (['msg'], {}), '(msg)\n', (6130, 6135), False, 'from freshmaker import db, conf, log\n'), ((7216, 7232), 'freshmaker.log.warning', 'log.warning', (['msg'], {}), '(msg)\n', (7227, 7232), False, 'from freshmaker import db, conf, log\n'), ((8680, 8696), 'freshmaker.log.warning', 'log.warning', (['msg'], {}), '(msg)\n', (8691, 8696), False, 'from freshmaker import db, conf, log\n'), ((15689, 15708), 'freshmaker.db.session.commit', 'db.session.commit', ([], {}), '()\n', (15706, 15708), False, 'from freshmaker import db, conf, log\n'), ((16493, 16509), 'freshmaker.log.warning', 'log.warning', (['msg'], {}), '(msg)\n', (16504, 16509), False, 'from freshmaker import db, conf, log\n'), ((19212, 19225), 'freshmaker.log.info', 'log.info', (['msg'], {}), '(msg)\n', (19220, 19225), False, 'from freshmaker import db, conf, log\n'), ((20828, 20844), 'freshmaker.log.warning', 'log.warning', (['msg'], {}), '(msg)\n', (20839, 20844), False, 'from freshmaker import db, conf, log\n'), ((24448, 24523), 're.search', 're.search', (['"""(?P<fm_suffix>0\\\\.\\\\d+\\\\.(?:p|patched))$"""', 'parsed_version.build'], {}), "('(?P<fm_suffix>0\\\\.\\\\d+\\\\.(?:p|patched))$', parsed_version.build)\n", (24457, 24523), False, 'import re\n'), ((25558, 25596), 'semver.VersionInfo', 'semver.VersionInfo', ([], {}), '(**new_version_dict)\n', (25576, 25596), False, 'import semver\n'), ((32917, 32976), 'freshmaker.lightblue.ContainerImage.get_additional_data_from_koji', 'ContainerImage.get_additional_data_from_koji', (["bundle['nvr']"], {}), "(bundle['nvr'])\n", (32961, 32976), False, 'from freshmaker.lightblue import ContainerImage\n'), ((33871, 33890), 'freshmaker.db.session.commit', 'db.session.commit', ([], {}), '()\n', (33888, 33890), False, 'from freshmaker import db, conf, log\n'), ((5803, 5916), 'freshmaker.log.warning', 'log.warning', (['f"""Image manifest_list_digest not found for original image {nvr} in Pyxis, skip this image"""'], {}), "(\n f'Image manifest_list_digest not found for original image {nvr} in Pyxis, skip this image'\n )\n", (5814, 5916), False, 'from freshmaker import db, conf, log\n'), ((6904, 7016), 'freshmaker.log.warning', 'log.warning', (['f"""Image manifest_list_digest not found for rebuilt image {nvr} in Pyxis, skip this image"""'], {}), "(\n f'Image manifest_list_digest not found for rebuilt image {nvr} in Pyxis, skip this image'\n )\n", (6915, 7016), False, 'from freshmaker import db, conf, log\n'), ((8194, 8267), 'freshmaker.log.info', 'log.info', (['f"""No latest bundle image with the related image of {image_nvr}"""'], {}), "(f'No latest bundle image with the related image of {image_nvr}')\n", (8202, 8267), False, 'from freshmaker import db, conf, log\n'), ((11016, 11050), 'copy.deepcopy', 'copy.deepcopy', (['default_bundle_data'], {}), '(default_bundle_data)\n', (11029, 11050), False, 'import copy\n'), ((20598, 20674), 'freshmaker.log.warning', 'log.warning', (['f"""Can\'t find manifest_list_digest for bundle "{nvr}" in Pyxis"""'], {}), '(f\'Can\\\'t find manifest_list_digest for bundle "{nvr}" in Pyxis\')\n', (20609, 20674), False, 'from freshmaker import db, conf, log\n'), ((21620, 21687), 'freshmaker.log.warning', 'log.warning', (['f"""Can\'t find build for a bundle image "{bundle_nvr}\\""""'], {}), '(f\'Can\\\'t find build for a bundle image "{bundle_nvr}"\')\n', (21631, 21687), False, 'from freshmaker import db, conf, log\n'), ((30198, 30206), 'freshmaker.errata.Errata', 'Errata', ([], {}), '()\n', (30204, 30206), False, 'from freshmaker.errata import Errata\n'), ((31019, 31039), 'kobo.rpmlib.parse_nvr', 'parse_nvr', (['build_nvr'], {}), '(build_nvr)\n', (31028, 31039), False, 'from kobo.rpmlib import parse_nvr\n'), ((32391, 32420), 'koji.parse_NVR', 'koji.parse_NVR', (["bundle['nvr']"], {}), "(bundle['nvr'])\n", (32405, 32420), False, 'import koji\n'), ((24170, 24187), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (24185, 24187), False, 'from datetime import datetime\n'), ((31288, 31310), 'kobo.rpmlib.parse_nvr', 'parse_nvr', (['block_build'], {}), '(block_build)\n', (31297, 31310), False, 'from kobo.rpmlib import parse_nvr\n'), ((27420, 27451), 'freshmaker.db.session.query', 'db.session.query', (['ArtifactBuild'], {}), '(ArtifactBuild)\n', (27436, 27451), False, 'from freshmaker import db, conf, log\n'), ((16999, 17030), 'freshmaker.db.session.query', 'db.session.query', (['ArtifactBuild'], {}), '(ArtifactBuild)\n', (17015, 17030), False, 'from freshmaker import db, conf, log\n'), ((21376, 21407), 'freshmaker.db.session.query', 'db.session.query', (['ArtifactBuild'], {}), '(ArtifactBuild)\n', (21392, 21407), False, 'from freshmaker import db, conf, log\n')] |
'''
' Python Regular Expression 正则表达式
'
'''
import re
def test_match():
s = 'hello python Hello'
p = 'hello'
o = re.match(p, s)
print(o)
print(dir(o))
print(o.group()) # 返回匹配的字符串
print(o.span()) # 范围
print(o.start()) # 开始处
print('*' * 30, 'flags参数的使用')
o2 = re.match(p, s, re.L)
print(o2.group()) # 返回匹配的字符串
# 常用字符的使用
def test_match_character():
print('-' * 30, ' . 匹配任意一个字符')
print(re.match('.', 'abv'))
print(re.match('.', '12'))
print(re.match('.', '\n'))
print('-' * 30, ' \d 匹配数字 0-9')
print(re.match('\d', 'abc456'))
print(re.match('\d', '234svd'))
print('-' * 30, ' \D 匹配非数字 0-9')
print(re.match('\D', 'abc456'))
print(re.match('\D', '234svd'))
print('-' * 30, ' \s 匹配空白字符')
print(re.match('\s', '\n12\t'))
print(re.match('\s', '\t'))
print(re.match('\s', 'addd'))
print('-' * 30, ' \S 匹配非空白字符')
print(re.match('\S', '\n12\t'))
print(re.match('\S', '\t'))
print(re.match('\S', 'addd'))
print('-' * 30, ' \w 匹配字母、数字')
print(re.match('\w', 'AB'))
print(re.match('\w', 'ab'))
print(re.match('\w', '12'))
print(re.match('\w', '__'))
print(re.match('\w', '##'))
print('-' * 30, ' \W 匹配非 字母、数字')
print(re.match('\W', 'AB'))
print(re.match('\W', 'ab'))
print(re.match('\W', '12'))
print(re.match('\W', '__'))
print(re.match('\W', '##'))
print('-' * 30, ' \[] 匹配列表中的字符')
print(re.match('[2468]', '22'))
print(re.match('[2468]', '33'))
print(re.match('[2468]', '83'))
print(re.match('[2468]', '38'))
def test_match_phone():
print('-' * 30, ' 匹配手机号')
patten = '\d\d\d\d\d\d\d\d\d\d\d'
print(re.match(patten, '13466669999'))
print(re.match('1[345789]\d\d\d\d\d\d\d\d\d', '13466669999'))
# 限定符
def test_match_qualifier():
print('-' * 30, ' * 匹配零次或多次')
print(re.match('\d*', '123abc')) # 匹配开头的数字
print(re.match('\d*', 'abc'))
print('-' * 30, ' + |匹配一次或多次')
print(re.match('\d+', '123abc')) # 匹配开头的数字
print(re.match('\d+', 'abc'))
print('-' * 30, ' ? |匹配一次或零次')
print(re.match('\d?', '1abc'))
print(re.match('\d?', '123abc')) # 匹配开头的数字
print(re.match('\d?', 'abc'))
print('-' * 30, ' {m} |重复m次')
print(re.match('\d{2}', '123abc')) # 匹配开头2个数字
print(re.match('\d{2}', '12abc'))
print(re.match('\d{2}', '1abc'))
print(re.match('\d{2}', 'abc'))
print('-' * 30, '{m,n}|重复m到n次')
print(re.match('\d{1,3}', '1234abc')) # 匹配开头2个数字
print(re.match('\d{1,3}', '123abc'))
print(re.match('\d{1,3}', '12abc'))
print(re.match('\d{1,3}', '1abc'))
print(re.match('\d{1,3}', 'abc'))
print('-' * 30, '{m,}|至少m次')
print(re.match('\d{2,}', '1234abc')) # 匹配开头2个数字
print(re.match('\d{2,}', '123abc'))
print(re.match('\d{2,}', '12abc'))
print(re.match('\d{2,}', '1abc'))
print(re.match('\d{2,}', 'abc'))
print('-' * 30, '案例1 首字母为大写字符,其他小写字符')
print(re.match('[A-Z][a-z]*', 'abc'))
print(re.match('[A-Z][a-z]*', 'ABC'))
print(re.match('[A-Z][a-z]*', 'Abc'))
print(re.match('[A-Z][a-z]*', 'AbC'))
print('-' * 30, '案例2 有效变量名 字母数字下划线,数字不开头')
print(re.match('[a-zA-Z_][a-zA-Z0-9_]*', 'abc'))
print(re.match('[a-zA-Z_]\w*', 'abc'))
print(re.match('[a-zA-Z_][a-zA-Z0-9_]*', 'abc123'))
print(re.match('[a-zA-Z_]\w*', '123abc'))
print(re.match('[a-zA-Z_]\w*', '_123abc'))
print('-' * 30, '案例2 1-99的数字')
print(re.match('[1-9]\d?', '23abc'))
print(re.match('[1-9]\d?', '100'))
print(re.match('[1-9]\d?', '11'))
print(re.match('[1-9]\d?', '1'))
print(re.match('[1-9]\d?', '0'))
print(re.match('[1-9]\d?', '09'))
print('-' * 30, '案例2 8-20随机密码 大写,小写,下划线,数字')
print(re.match('\w{8,20}', '1234567'))
print(re.match('\w{8,20}', '1234567$$'))
print(re.match('\w{8,20}', '1234567abc_'))
print(re.match('\w{8,20}', '1234567abc#'))
print(re.match('\w{8,20}', '12345678901234567890zx'))
# 转义字符 原生字符
def escape_character():
print('C:\t\d\e')
print('C:\\t\\d\\e')
print(r'C:\t\d\e')
# 边界字符
def boundary():
print('-' * 30, '$ 匹配字符串结尾')
print(re.match('[1-9]\d{4,<EMAIL>', '<EMAIL>'))
print(re.match('[1-9]\d{4,9}@qq.<EMAIL>', '<EMAIL>'))
print(re.match(r'[1-9]\d{4,9}@qq.<EMAIL>$', '<EMAIL>'))
print(re.match(r'[1-9]\d{<EMAIL>$', '<EMAIL>'))
print('-' * 30, ' ^ 匹配字符串开头')
print(re.match(r'^hello.*', 'hello abc'))
print(re.match(r'^hello.*', 'abc hello abc'))
print('-' * 30, ' \b 匹配单词的边界')
print(re.match(r'.*\bab', '123 aabc')) # 单词 ab 开始
print(re.match(r'.*\bab', '123 abcd'))
print(re.match(r'.*\bab', '123 aaa'))
print(re.match(r'.*\bab', '123 abcd cdab'))
print(re.match(r'.*ab\b', '123 abc')) # 单词 ab 结尾
print(re.match(r'.*ab\b', '123 aaa'))
print(re.match(r'.*ab\b', '123 ab'))
print(re.match(r'.*ab\b', '123 cdab'))
print(re.match(r'.*ab\b', '123 abcd cdab'))
def test_search():
print(re.match(r'hello', 'hello python'))
print(re.search(r'hello', 'hello python'))
print(re.match(r'hello', 'python hello'))
print(re.search(r'hello', 'python hello '))
print(re.match('aa|bb|cc', 'aa'))
print(re.match('aa|bb|cc', 'bbb'))
print(re.match('aa|bb|cc', 'ccc'))
print(re.match('aa|bb|cc', 'a bb ccc'))
print(re.search('aa|bb|cc', 'a bb ccc'))
# 多个字符
def test_multi_character():
print('-' * 30, '案例 0-100之间的数字: 0-99 | 100')
print(re.match('[1-9]?\d|100', '1'))
print(re.match('[1-9]?\d|100', '11'))
print(re.match('[1-9]?\d|100', '100'))
print(re.match('[1-9]?\d$|100$', '100'))
print(re.match('[1-9]?\d$|100$', '1000'))
print('-' * 30, '案例 ')
print(re.match('[ab][cd]', 'ab'))
print(re.match('[ab][cd]', 'ac'))
print(re.match('[ab][cd]', 'ad'))
print(re.match('ab|cd', 'abc'))
print(re.match('ab|cd', 'ac'))
# 匹配分组
def test_group():
print('-' * 30, '座机号码 区号{3,4} 号码{5,8} 010-0000 0791-222222')
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '010-10086'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '010-88888888'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '1111-10086'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '1111-88888888'))
print('-' * 30, ' 匹配分组')
o = re.match(r'(\d{3,4})-([1-9]\d{4,7})', '1111-88888888')
print(o)
print(o.group(0), o.group(1), o.group(2))
print(o.groups(), o.groups()[0], o.groups()[1])
print('-' * 30, 'html 标签')
print(re.match(r'<.+><.+>.+</.+></.+>', '<html><a>abc</a></html>'))
print(re.match(r'<.+><.+>.+</.+></.+>', '<html><a>abc</b></html>'))
print(re.match(r'<(.*)><(.*)>.*</\2></\1>', '<html><a>abc</b></html>'))
print(re.match(r'<(.*)><(.*)>.*</\2></\1>', '<html><d>abc</d></html>'))
print('-' * 30, 'html 标签 - 别名')
print(re.match(r'<(?P<k_html>.+)><(?P<k_head>.+)>.*</(?P=k_head)></(?P=k_html)>', '<html><d>abc</d></html>'))
## 搜索与替换
def test_sub():
print('-' * 30, ' 替换')
print(re.sub(r'#.*$', '', '2004-222-23322 # 这是个什么')) # 替换#开头的部分
print(re.sub(r'#\D*', '', '2004-222-23322 # 这是个什么'))
print('-' * 30, ' 替换 subn')
print(re.subn(r'#\D*', '', '2004-222-23322 # 这是个什么'))
print(re.subn(r'#.*$', '', '2004-222-23322 # 这是个什么'))
def test_compile():
print('-' * 30, ' compile的使用')
regex = re.compile(r'\w+') # 匹配字母或数字
print(regex.match('1223dfdf'))
print(regex.match('##1223dfdf'))
def test_findall():
print('-' * 30, ' findall 返回数组')
print(re.findall(r'\w', '##1223dfdf')) # 匹配字母或数字 f
print(re.findall(r'\w+', '## 1223 df df 1'))
print('-' * 30, ' finditer 返回迭代器')
print(re.finditer(r'\w+', '## 1223 df df 1'))
for i in re.finditer(r'\w+', '## 1223 df df 1'):
print(i, i.group())
def test_split():
print('-' * 30, ' split 返回数组')
print(re.split(r'\d+', '123abc123abc'))
print(re.split(r'\d+', '123 abc 123 abc'))
print(re.split(r'\d+', 'abc123 abc 123 abc'))
print(re.split(r'\d+', 'abc 123 abc 123 abc',1))
def greedy_mode():
print('-' * 30, ' 贪婪模式')
result = re.match(r'(.+)(\d+-\d+-\d+)', 'this is my tel: 122-1244-1242')
print(result.group(1))
print(result.group(2))
print('-' * 30, ' 非贪婪模式 尽可能少的匹配')
result = re.match(r'(.+?)(\d+-\d+-\d+)', 'this is my tel: 122-1244-1242')
print(result.group(1))
print(result.group(2))
print('-' * 30, ' 贪婪模式')
print(re.match(r'abc(\d+)', 'abc123456'))
print(re.match(r'abc(\d+?)', 'abc123456'))
if __name__ == '__main__':
# test_match()
# test_match_character()
# test_match_phone()
# test_match_qualifier()
# escape_character()
# boundary()
# test_search()
# test_multi_character()
# test_group()
# test_sub()
# test_compile()
# test_findall()
# test_split()
# greedy_mode()
# <.+><.+>.+</.+></.+>
s = '<link href="../assets/css/app.css?t=20112455" type="text/css" rel="stylesheet">'
mathched = re.findall(r'\S+assets/css/\S+.css\S+"', s)
for m in mathched:
print(m, m.index('.css'))
s = s.replace(m, m[:m.index('.css')] + '.css?t=00000"')
print(s)
| [
"re.subn",
"re.split",
"re.compile",
"re.match",
"re.finditer",
"re.sub",
"re.findall",
"re.search"
] | [((122, 136), 're.match', 're.match', (['p', 's'], {}), '(p, s)\n', (130, 136), False, 'import re\n'), ((284, 304), 're.match', 're.match', (['p', 's', 're.L'], {}), '(p, s, re.L)\n', (292, 304), False, 'import re\n'), ((5937, 5992), 're.match', 're.match', (['"""(\\\\d{3,4})-([1-9]\\\\d{4,7})"""', '"""1111-88888888"""'], {}), "('(\\\\d{3,4})-([1-9]\\\\d{4,7})', '1111-88888888')\n", (5945, 5992), False, 'import re\n'), ((6941, 6959), 're.compile', 're.compile', (['"""\\\\w+"""'], {}), "('\\\\w+')\n", (6951, 6959), False, 'import re\n'), ((7293, 7331), 're.finditer', 're.finditer', (['"""\\\\w+"""', '"""## 1223 df df 1"""'], {}), "('\\\\w+', '## 1223 df df 1')\n", (7304, 7331), False, 'import re\n'), ((7654, 7719), 're.match', 're.match', (['"""(.+)(\\\\d+-\\\\d+-\\\\d+)"""', '"""this is my tel: 122-1244-1242"""'], {}), "('(.+)(\\\\d+-\\\\d+-\\\\d+)', 'this is my tel: 122-1244-1242')\n", (7662, 7719), False, 'import re\n'), ((7815, 7881), 're.match', 're.match', (['"""(.+?)(\\\\d+-\\\\d+-\\\\d+)"""', '"""this is my tel: 122-1244-1242"""'], {}), "('(.+?)(\\\\d+-\\\\d+-\\\\d+)', 'this is my tel: 122-1244-1242')\n", (7823, 7881), False, 'import re\n'), ((8482, 8527), 're.findall', 're.findall', (['"""\\\\S+assets/css/\\\\S+.css\\\\S+\\""""', 's'], {}), '(\'\\\\S+assets/css/\\\\S+.css\\\\S+"\', s)\n', (8492, 8527), False, 'import re\n'), ((418, 438), 're.match', 're.match', (['"""."""', '"""abv"""'], {}), "('.', 'abv')\n", (426, 438), False, 'import re\n'), ((448, 467), 're.match', 're.match', (['"""."""', '"""12"""'], {}), "('.', '12')\n", (456, 467), False, 'import re\n'), ((477, 496), 're.match', 're.match', (['"""."""', '"""\n"""'], {}), "('.', '\\n')\n", (485, 496), False, 'import re\n'), ((541, 566), 're.match', 're.match', (['"""\\\\d"""', '"""abc456"""'], {}), "('\\\\d', 'abc456')\n", (549, 566), False, 'import re\n'), ((575, 600), 're.match', 're.match', (['"""\\\\d"""', '"""234svd"""'], {}), "('\\\\d', '234svd')\n", (583, 600), False, 'import re\n'), ((645, 670), 're.match', 're.match', (['"""\\\\D"""', '"""abc456"""'], {}), "('\\\\D', 'abc456')\n", (653, 670), False, 'import re\n'), ((679, 704), 're.match', 're.match', (['"""\\\\D"""', '"""234svd"""'], {}), "('\\\\D', '234svd')\n", (687, 704), False, 'import re\n'), ((746, 771), 're.match', 're.match', (['"""\\\\s"""', '"""\n12\t"""'], {}), "('\\\\s', '\\n12\\t')\n", (754, 771), False, 'import re\n'), ((780, 801), 're.match', 're.match', (['"""\\\\s"""', '"""\t"""'], {}), "('\\\\s', '\\t')\n", (788, 801), False, 'import re\n'), ((810, 833), 're.match', 're.match', (['"""\\\\s"""', '"""addd"""'], {}), "('\\\\s', 'addd')\n", (818, 833), False, 'import re\n'), ((876, 901), 're.match', 're.match', (['"""\\\\S"""', '"""\n12\t"""'], {}), "('\\\\S', '\\n12\\t')\n", (884, 901), False, 'import re\n'), ((910, 931), 're.match', 're.match', (['"""\\\\S"""', '"""\t"""'], {}), "('\\\\S', '\\t')\n", (918, 931), False, 'import re\n'), ((940, 963), 're.match', 're.match', (['"""\\\\S"""', '"""addd"""'], {}), "('\\\\S', 'addd')\n", (948, 963), False, 'import re\n'), ((1006, 1027), 're.match', 're.match', (['"""\\\\w"""', '"""AB"""'], {}), "('\\\\w', 'AB')\n", (1014, 1027), False, 'import re\n'), ((1036, 1057), 're.match', 're.match', (['"""\\\\w"""', '"""ab"""'], {}), "('\\\\w', 'ab')\n", (1044, 1057), False, 'import re\n'), ((1066, 1087), 're.match', 're.match', (['"""\\\\w"""', '"""12"""'], {}), "('\\\\w', '12')\n", (1074, 1087), False, 'import re\n'), ((1096, 1117), 're.match', 're.match', (['"""\\\\w"""', '"""__"""'], {}), "('\\\\w', '__')\n", (1104, 1117), False, 'import re\n'), ((1126, 1147), 're.match', 're.match', (['"""\\\\w"""', '"""##"""'], {}), "('\\\\w', '##')\n", (1134, 1147), False, 'import re\n'), ((1192, 1213), 're.match', 're.match', (['"""\\\\W"""', '"""AB"""'], {}), "('\\\\W', 'AB')\n", (1200, 1213), False, 'import re\n'), ((1222, 1243), 're.match', 're.match', (['"""\\\\W"""', '"""ab"""'], {}), "('\\\\W', 'ab')\n", (1230, 1243), False, 'import re\n'), ((1252, 1273), 're.match', 're.match', (['"""\\\\W"""', '"""12"""'], {}), "('\\\\W', '12')\n", (1260, 1273), False, 'import re\n'), ((1282, 1303), 're.match', 're.match', (['"""\\\\W"""', '"""__"""'], {}), "('\\\\W', '__')\n", (1290, 1303), False, 'import re\n'), ((1312, 1333), 're.match', 're.match', (['"""\\\\W"""', '"""##"""'], {}), "('\\\\W', '##')\n", (1320, 1333), False, 'import re\n'), ((1378, 1402), 're.match', 're.match', (['"""[2468]"""', '"""22"""'], {}), "('[2468]', '22')\n", (1386, 1402), False, 'import re\n'), ((1412, 1436), 're.match', 're.match', (['"""[2468]"""', '"""33"""'], {}), "('[2468]', '33')\n", (1420, 1436), False, 'import re\n'), ((1446, 1470), 're.match', 're.match', (['"""[2468]"""', '"""83"""'], {}), "('[2468]', '83')\n", (1454, 1470), False, 'import re\n'), ((1480, 1504), 're.match', 're.match', (['"""[2468]"""', '"""38"""'], {}), "('[2468]', '38')\n", (1488, 1504), False, 'import re\n'), ((1604, 1635), 're.match', 're.match', (['patten', '"""13466669999"""'], {}), "(patten, '13466669999')\n", (1612, 1635), False, 'import re\n'), ((1645, 1708), 're.match', 're.match', (['"""1[345789]\\\\d\\\\d\\\\d\\\\d\\\\d\\\\d\\\\d\\\\d\\\\d"""', '"""13466669999"""'], {}), "('1[345789]\\\\d\\\\d\\\\d\\\\d\\\\d\\\\d\\\\d\\\\d\\\\d', '13466669999')\n", (1653, 1708), False, 'import re\n'), ((1777, 1803), 're.match', 're.match', (['"""\\\\d*"""', '"""123abc"""'], {}), "('\\\\d*', '123abc')\n", (1785, 1803), False, 'import re\n'), ((1823, 1846), 're.match', 're.match', (['"""\\\\d*"""', '"""abc"""'], {}), "('\\\\d*', 'abc')\n", (1831, 1846), False, 'import re\n'), ((1888, 1914), 're.match', 're.match', (['"""\\\\d+"""', '"""123abc"""'], {}), "('\\\\d+', '123abc')\n", (1896, 1914), False, 'import re\n'), ((1934, 1957), 're.match', 're.match', (['"""\\\\d+"""', '"""abc"""'], {}), "('\\\\d+', 'abc')\n", (1942, 1957), False, 'import re\n'), ((1999, 2023), 're.match', 're.match', (['"""\\\\d?"""', '"""1abc"""'], {}), "('\\\\d?', '1abc')\n", (2007, 2023), False, 'import re\n'), ((2032, 2058), 're.match', 're.match', (['"""\\\\d?"""', '"""123abc"""'], {}), "('\\\\d?', '123abc')\n", (2040, 2058), False, 'import re\n'), ((2078, 2101), 're.match', 're.match', (['"""\\\\d?"""', '"""abc"""'], {}), "('\\\\d?', 'abc')\n", (2086, 2101), False, 'import re\n'), ((2143, 2171), 're.match', 're.match', (['"""\\\\d{2}"""', '"""123abc"""'], {}), "('\\\\d{2}', '123abc')\n", (2151, 2171), False, 'import re\n'), ((2192, 2219), 're.match', 're.match', (['"""\\\\d{2}"""', '"""12abc"""'], {}), "('\\\\d{2}', '12abc')\n", (2200, 2219), False, 'import re\n'), ((2228, 2254), 're.match', 're.match', (['"""\\\\d{2}"""', '"""1abc"""'], {}), "('\\\\d{2}', '1abc')\n", (2236, 2254), False, 'import re\n'), ((2263, 2288), 're.match', 're.match', (['"""\\\\d{2}"""', '"""abc"""'], {}), "('\\\\d{2}', 'abc')\n", (2271, 2288), False, 'import re\n'), ((2332, 2363), 're.match', 're.match', (['"""\\\\d{1,3}"""', '"""1234abc"""'], {}), "('\\\\d{1,3}', '1234abc')\n", (2340, 2363), False, 'import re\n'), ((2384, 2414), 're.match', 're.match', (['"""\\\\d{1,3}"""', '"""123abc"""'], {}), "('\\\\d{1,3}', '123abc')\n", (2392, 2414), False, 'import re\n'), ((2423, 2452), 're.match', 're.match', (['"""\\\\d{1,3}"""', '"""12abc"""'], {}), "('\\\\d{1,3}', '12abc')\n", (2431, 2452), False, 'import re\n'), ((2461, 2489), 're.match', 're.match', (['"""\\\\d{1,3}"""', '"""1abc"""'], {}), "('\\\\d{1,3}', '1abc')\n", (2469, 2489), False, 'import re\n'), ((2498, 2525), 're.match', 're.match', (['"""\\\\d{1,3}"""', '"""abc"""'], {}), "('\\\\d{1,3}', 'abc')\n", (2506, 2525), False, 'import re\n'), ((2566, 2596), 're.match', 're.match', (['"""\\\\d{2,}"""', '"""1234abc"""'], {}), "('\\\\d{2,}', '1234abc')\n", (2574, 2596), False, 'import re\n'), ((2617, 2646), 're.match', 're.match', (['"""\\\\d{2,}"""', '"""123abc"""'], {}), "('\\\\d{2,}', '123abc')\n", (2625, 2646), False, 'import re\n'), ((2655, 2683), 're.match', 're.match', (['"""\\\\d{2,}"""', '"""12abc"""'], {}), "('\\\\d{2,}', '12abc')\n", (2663, 2683), False, 'import re\n'), ((2692, 2719), 're.match', 're.match', (['"""\\\\d{2,}"""', '"""1abc"""'], {}), "('\\\\d{2,}', '1abc')\n", (2700, 2719), False, 'import re\n'), ((2728, 2754), 're.match', 're.match', (['"""\\\\d{2,}"""', '"""abc"""'], {}), "('\\\\d{2,}', 'abc')\n", (2736, 2754), False, 'import re\n'), ((2805, 2835), 're.match', 're.match', (['"""[A-Z][a-z]*"""', '"""abc"""'], {}), "('[A-Z][a-z]*', 'abc')\n", (2813, 2835), False, 'import re\n'), ((2845, 2875), 're.match', 're.match', (['"""[A-Z][a-z]*"""', '"""ABC"""'], {}), "('[A-Z][a-z]*', 'ABC')\n", (2853, 2875), False, 'import re\n'), ((2885, 2915), 're.match', 're.match', (['"""[A-Z][a-z]*"""', '"""Abc"""'], {}), "('[A-Z][a-z]*', 'Abc')\n", (2893, 2915), False, 'import re\n'), ((2925, 2955), 're.match', 're.match', (['"""[A-Z][a-z]*"""', '"""AbC"""'], {}), "('[A-Z][a-z]*', 'AbC')\n", (2933, 2955), False, 'import re\n'), ((3010, 3051), 're.match', 're.match', (['"""[a-zA-Z_][a-zA-Z0-9_]*"""', '"""abc"""'], {}), "('[a-zA-Z_][a-zA-Z0-9_]*', 'abc')\n", (3018, 3051), False, 'import re\n'), ((3061, 3093), 're.match', 're.match', (['"""[a-zA-Z_]\\\\w*"""', '"""abc"""'], {}), "('[a-zA-Z_]\\\\w*', 'abc')\n", (3069, 3093), False, 'import re\n'), ((3102, 3146), 're.match', 're.match', (['"""[a-zA-Z_][a-zA-Z0-9_]*"""', '"""abc123"""'], {}), "('[a-zA-Z_][a-zA-Z0-9_]*', 'abc123')\n", (3110, 3146), False, 'import re\n'), ((3156, 3191), 're.match', 're.match', (['"""[a-zA-Z_]\\\\w*"""', '"""123abc"""'], {}), "('[a-zA-Z_]\\\\w*', '123abc')\n", (3164, 3191), False, 'import re\n'), ((3200, 3236), 're.match', 're.match', (['"""[a-zA-Z_]\\\\w*"""', '"""_123abc"""'], {}), "('[a-zA-Z_]\\\\w*', '_123abc')\n", (3208, 3236), False, 'import re\n'), ((3279, 3309), 're.match', 're.match', (['"""[1-9]\\\\d?"""', '"""23abc"""'], {}), "('[1-9]\\\\d?', '23abc')\n", (3287, 3309), False, 'import re\n'), ((3318, 3346), 're.match', 're.match', (['"""[1-9]\\\\d?"""', '"""100"""'], {}), "('[1-9]\\\\d?', '100')\n", (3326, 3346), False, 'import re\n'), ((3355, 3382), 're.match', 're.match', (['"""[1-9]\\\\d?"""', '"""11"""'], {}), "('[1-9]\\\\d?', '11')\n", (3363, 3382), False, 'import re\n'), ((3391, 3417), 're.match', 're.match', (['"""[1-9]\\\\d?"""', '"""1"""'], {}), "('[1-9]\\\\d?', '1')\n", (3399, 3417), False, 'import re\n'), ((3426, 3452), 're.match', 're.match', (['"""[1-9]\\\\d?"""', '"""0"""'], {}), "('[1-9]\\\\d?', '0')\n", (3434, 3452), False, 'import re\n'), ((3461, 3488), 're.match', 're.match', (['"""[1-9]\\\\d?"""', '"""09"""'], {}), "('[1-9]\\\\d?', '09')\n", (3469, 3488), False, 'import re\n'), ((3545, 3577), 're.match', 're.match', (['"""\\\\w{8,20}"""', '"""1234567"""'], {}), "('\\\\w{8,20}', '1234567')\n", (3553, 3577), False, 'import re\n'), ((3586, 3620), 're.match', 're.match', (['"""\\\\w{8,20}"""', '"""1234567$$"""'], {}), "('\\\\w{8,20}', '1234567$$')\n", (3594, 3620), False, 'import re\n'), ((3629, 3665), 're.match', 're.match', (['"""\\\\w{8,20}"""', '"""1234567abc_"""'], {}), "('\\\\w{8,20}', '1234567abc_')\n", (3637, 3665), False, 'import re\n'), ((3674, 3710), 're.match', 're.match', (['"""\\\\w{8,20}"""', '"""1234567abc#"""'], {}), "('\\\\w{8,20}', '1234567abc#')\n", (3682, 3710), False, 'import re\n'), ((3719, 3766), 're.match', 're.match', (['"""\\\\w{8,20}"""', '"""12345678901234567890zx"""'], {}), "('\\\\w{8,20}', '12345678901234567890zx')\n", (3727, 3766), False, 'import re\n'), ((3933, 3974), 're.match', 're.match', (['"""[1-9]\\\\d{4,<EMAIL>"""', '"""<EMAIL>"""'], {}), "('[1-9]\\\\d{4,<EMAIL>', '<EMAIL>')\n", (3941, 3974), False, 'import re\n'), ((3983, 4030), 're.match', 're.match', (['"""[1-9]\\\\d{4,9}@qq.<EMAIL>"""', '"""<EMAIL>"""'], {}), "('[1-9]\\\\d{4,9}@qq.<EMAIL>', '<EMAIL>')\n", (3991, 4030), False, 'import re\n'), ((4039, 4087), 're.match', 're.match', (['"""[1-9]\\\\d{4,9}@qq.<EMAIL>$"""', '"""<EMAIL>"""'], {}), "('[1-9]\\\\d{4,9}@qq.<EMAIL>$', '<EMAIL>')\n", (4047, 4087), False, 'import re\n'), ((4097, 4137), 're.match', 're.match', (['"""[1-9]\\\\d{<EMAIL>$"""', '"""<EMAIL>"""'], {}), "('[1-9]\\\\d{<EMAIL>$', '<EMAIL>')\n", (4105, 4137), False, 'import re\n'), ((4179, 4212), 're.match', 're.match', (['"""^hello.*"""', '"""hello abc"""'], {}), "('^hello.*', 'hello abc')\n", (4187, 4212), False, 'import re\n'), ((4223, 4260), 're.match', 're.match', (['"""^hello.*"""', '"""abc hello abc"""'], {}), "('^hello.*', 'abc hello abc')\n", (4231, 4260), False, 'import re\n'), ((4304, 4335), 're.match', 're.match', (['""".*\\\\bab"""', '"""123 aabc"""'], {}), "('.*\\\\bab', '123 aabc')\n", (4312, 4335), False, 'import re\n'), ((4357, 4388), 're.match', 're.match', (['""".*\\\\bab"""', '"""123 abcd"""'], {}), "('.*\\\\bab', '123 abcd')\n", (4365, 4388), False, 'import re\n'), ((4398, 4428), 're.match', 're.match', (['""".*\\\\bab"""', '"""123 aaa"""'], {}), "('.*\\\\bab', '123 aaa')\n", (4406, 4428), False, 'import re\n'), ((4438, 4474), 're.match', 're.match', (['""".*\\\\bab"""', '"""123 abcd cdab"""'], {}), "('.*\\\\bab', '123 abcd cdab')\n", (4446, 4474), False, 'import re\n'), ((4484, 4514), 're.match', 're.match', (['""".*ab\\\\b"""', '"""123 abc"""'], {}), "('.*ab\\\\b', '123 abc')\n", (4492, 4514), False, 'import re\n'), ((4536, 4566), 're.match', 're.match', (['""".*ab\\\\b"""', '"""123 aaa"""'], {}), "('.*ab\\\\b', '123 aaa')\n", (4544, 4566), False, 'import re\n'), ((4576, 4605), 're.match', 're.match', (['""".*ab\\\\b"""', '"""123 ab"""'], {}), "('.*ab\\\\b', '123 ab')\n", (4584, 4605), False, 'import re\n'), ((4615, 4646), 're.match', 're.match', (['""".*ab\\\\b"""', '"""123 cdab"""'], {}), "('.*ab\\\\b', '123 cdab')\n", (4623, 4646), False, 'import re\n'), ((4656, 4692), 're.match', 're.match', (['""".*ab\\\\b"""', '"""123 abcd cdab"""'], {}), "('.*ab\\\\b', '123 abcd cdab')\n", (4664, 4692), False, 'import re\n'), ((4723, 4756), 're.match', 're.match', (['"""hello"""', '"""hello python"""'], {}), "('hello', 'hello python')\n", (4731, 4756), False, 'import re\n'), ((4767, 4801), 're.search', 're.search', (['"""hello"""', '"""hello python"""'], {}), "('hello', 'hello python')\n", (4776, 4801), False, 'import re\n'), ((4812, 4845), 're.match', 're.match', (['"""hello"""', '"""python hello"""'], {}), "('hello', 'python hello')\n", (4820, 4845), False, 'import re\n'), ((4856, 4891), 're.search', 're.search', (['"""hello"""', '"""python hello """'], {}), "('hello', 'python hello ')\n", (4865, 4891), False, 'import re\n'), ((4902, 4928), 're.match', 're.match', (['"""aa|bb|cc"""', '"""aa"""'], {}), "('aa|bb|cc', 'aa')\n", (4910, 4928), False, 'import re\n'), ((4938, 4965), 're.match', 're.match', (['"""aa|bb|cc"""', '"""bbb"""'], {}), "('aa|bb|cc', 'bbb')\n", (4946, 4965), False, 'import re\n'), ((4975, 5002), 're.match', 're.match', (['"""aa|bb|cc"""', '"""ccc"""'], {}), "('aa|bb|cc', 'ccc')\n", (4983, 5002), False, 'import re\n'), ((5012, 5044), 're.match', 're.match', (['"""aa|bb|cc"""', '"""a bb ccc"""'], {}), "('aa|bb|cc', 'a bb ccc')\n", (5020, 5044), False, 'import re\n'), ((5054, 5087), 're.search', 're.search', (['"""aa|bb|cc"""', '"""a bb ccc"""'], {}), "('aa|bb|cc', 'a bb ccc')\n", (5063, 5087), False, 'import re\n'), ((5181, 5211), 're.match', 're.match', (['"""[1-9]?\\\\d|100"""', '"""1"""'], {}), "('[1-9]?\\\\d|100', '1')\n", (5189, 5211), False, 'import re\n'), ((5220, 5251), 're.match', 're.match', (['"""[1-9]?\\\\d|100"""', '"""11"""'], {}), "('[1-9]?\\\\d|100', '11')\n", (5228, 5251), False, 'import re\n'), ((5260, 5292), 're.match', 're.match', (['"""[1-9]?\\\\d|100"""', '"""100"""'], {}), "('[1-9]?\\\\d|100', '100')\n", (5268, 5292), False, 'import re\n'), ((5301, 5335), 're.match', 're.match', (['"""[1-9]?\\\\d$|100$"""', '"""100"""'], {}), "('[1-9]?\\\\d$|100$', '100')\n", (5309, 5335), False, 'import re\n'), ((5344, 5379), 're.match', 're.match', (['"""[1-9]?\\\\d$|100$"""', '"""1000"""'], {}), "('[1-9]?\\\\d$|100$', '1000')\n", (5352, 5379), False, 'import re\n'), ((5413, 5439), 're.match', 're.match', (['"""[ab][cd]"""', '"""ab"""'], {}), "('[ab][cd]', 'ab')\n", (5421, 5439), False, 'import re\n'), ((5449, 5475), 're.match', 're.match', (['"""[ab][cd]"""', '"""ac"""'], {}), "('[ab][cd]', 'ac')\n", (5457, 5475), False, 'import re\n'), ((5485, 5511), 're.match', 're.match', (['"""[ab][cd]"""', '"""ad"""'], {}), "('[ab][cd]', 'ad')\n", (5493, 5511), False, 'import re\n'), ((5521, 5545), 're.match', 're.match', (['"""ab|cd"""', '"""abc"""'], {}), "('ab|cd', 'abc')\n", (5529, 5545), False, 'import re\n'), ((5555, 5578), 're.match', 're.match', (['"""ab|cd"""', '"""ac"""'], {}), "('ab|cd', 'ac')\n", (5563, 5578), False, 'import re\n'), ((5679, 5726), 're.match', 're.match', (['"""\\\\d{3,4}-[1-9]\\\\d{4,7}"""', '"""010-10086"""'], {}), "('\\\\d{3,4}-[1-9]\\\\d{4,7}', '010-10086')\n", (5687, 5726), False, 'import re\n'), ((5735, 5785), 're.match', 're.match', (['"""\\\\d{3,4}-[1-9]\\\\d{4,7}"""', '"""010-88888888"""'], {}), "('\\\\d{3,4}-[1-9]\\\\d{4,7}', '010-88888888')\n", (5743, 5785), False, 'import re\n'), ((5794, 5842), 're.match', 're.match', (['"""\\\\d{3,4}-[1-9]\\\\d{4,7}"""', '"""1111-10086"""'], {}), "('\\\\d{3,4}-[1-9]\\\\d{4,7}', '1111-10086')\n", (5802, 5842), False, 'import re\n'), ((5851, 5902), 're.match', 're.match', (['"""\\\\d{3,4}-[1-9]\\\\d{4,7}"""', '"""1111-88888888"""'], {}), "('\\\\d{3,4}-[1-9]\\\\d{4,7}', '1111-88888888')\n", (5859, 5902), False, 'import re\n'), ((6134, 6193), 're.match', 're.match', (['"""<.+><.+>.+</.+></.+>"""', '"""<html><a>abc</a></html>"""'], {}), "('<.+><.+>.+</.+></.+>', '<html><a>abc</a></html>')\n", (6142, 6193), False, 'import re\n'), ((6204, 6263), 're.match', 're.match', (['"""<.+><.+>.+</.+></.+>"""', '"""<html><a>abc</b></html>"""'], {}), "('<.+><.+>.+</.+></.+>', '<html><a>abc</b></html>')\n", (6212, 6263), False, 'import re\n'), ((6274, 6339), 're.match', 're.match', (['"""<(.*)><(.*)>.*</\\\\2></\\\\1>"""', '"""<html><a>abc</b></html>"""'], {}), "('<(.*)><(.*)>.*</\\\\2></\\\\1>', '<html><a>abc</b></html>')\n", (6282, 6339), False, 'import re\n'), ((6348, 6413), 're.match', 're.match', (['"""<(.*)><(.*)>.*</\\\\2></\\\\1>"""', '"""<html><d>abc</d></html>"""'], {}), "('<(.*)><(.*)>.*</\\\\2></\\\\1>', '<html><d>abc</d></html>')\n", (6356, 6413), False, 'import re\n'), ((6456, 6561), 're.match', 're.match', (['"""<(?P<k_html>.+)><(?P<k_head>.+)>.*</(?P=k_head)></(?P=k_html)>"""', '"""<html><d>abc</d></html>"""'], {}), "('<(?P<k_html>.+)><(?P<k_head>.+)>.*</(?P=k_head)></(?P=k_html)>',\n '<html><d>abc</d></html>')\n", (6464, 6561), False, 'import re\n'), ((6620, 6664), 're.sub', 're.sub', (['"""#.*$"""', '""""""', '"""2004-222-23322 # 这是个什么"""'], {}), "('#.*$', '', '2004-222-23322 # 这是个什么')\n", (6626, 6664), False, 'import re\n'), ((6687, 6732), 're.sub', 're.sub', (['"""#\\\\D*"""', '""""""', '"""2004-222-23322 # 这是个什么"""'], {}), "('#\\\\D*', '', '2004-222-23322 # 这是个什么')\n", (6693, 6732), False, 'import re\n'), ((6772, 6818), 're.subn', 're.subn', (['"""#\\\\D*"""', '""""""', '"""2004-222-23322 # 这是个什么"""'], {}), "('#\\\\D*', '', '2004-222-23322 # 这是个什么')\n", (6779, 6818), False, 'import re\n'), ((6828, 6873), 're.subn', 're.subn', (['"""#.*$"""', '""""""', '"""2004-222-23322 # 这是个什么"""'], {}), "('#.*$', '', '2004-222-23322 # 这是个什么')\n", (6835, 6873), False, 'import re\n'), ((7104, 7135), 're.findall', 're.findall', (['"""\\\\w"""', '"""##1223dfdf"""'], {}), "('\\\\w', '##1223dfdf')\n", (7114, 7135), False, 'import re\n'), ((7158, 7195), 're.findall', 're.findall', (['"""\\\\w+"""', '"""## 1223 df df 1"""'], {}), "('\\\\w+', '## 1223 df df 1')\n", (7168, 7195), False, 'import re\n'), ((7242, 7280), 're.finditer', 're.finditer', (['"""\\\\w+"""', '"""## 1223 df df 1"""'], {}), "('\\\\w+', '## 1223 df df 1')\n", (7253, 7280), False, 'import re\n'), ((7418, 7450), 're.split', 're.split', (['"""\\\\d+"""', '"""123abc123abc"""'], {}), "('\\\\d+', '123abc123abc')\n", (7426, 7450), False, 'import re\n'), ((7460, 7495), 're.split', 're.split', (['"""\\\\d+"""', '"""123 abc 123 abc"""'], {}), "('\\\\d+', '123 abc 123 abc')\n", (7468, 7495), False, 'import re\n'), ((7505, 7543), 're.split', 're.split', (['"""\\\\d+"""', '"""abc123 abc 123 abc"""'], {}), "('\\\\d+', 'abc123 abc 123 abc')\n", (7513, 7543), False, 'import re\n'), ((7553, 7595), 're.split', 're.split', (['"""\\\\d+"""', '"""abc 123 abc 123 abc"""', '(1)'], {}), "('\\\\d+', 'abc 123 abc 123 abc', 1)\n", (7561, 7595), False, 'import re\n'), ((7966, 8000), 're.match', 're.match', (['"""abc(\\\\d+)"""', '"""abc123456"""'], {}), "('abc(\\\\d+)', 'abc123456')\n", (7974, 8000), False, 'import re\n'), ((8010, 8045), 're.match', 're.match', (['"""abc(\\\\d+?)"""', '"""abc123456"""'], {}), "('abc(\\\\d+?)', 'abc123456')\n", (8018, 8045), False, 'import re\n')] |
#! /usr/bin/env python2
# -*- coding: utf8 -*-
from subprocess import check_output
def get_pass():
return check_output("pass gmail/me", shell=True).strip("\n")
| [
"subprocess.check_output"
] | [((112, 153), 'subprocess.check_output', 'check_output', (['"""pass gmail/me"""'], {'shell': '(True)'}), "('pass gmail/me', shell=True)\n", (124, 153), False, 'from subprocess import check_output\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-12 17:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('menuCreate', '0001_initial'),
('menu', '0002_remove_menu_slug'),
]
operations = [
migrations.CreateModel(
name='MenuResponseModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=200)),
('date', models.DateField(auto_now_add=True)),
('MenuID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menuCreate.MenuCreateModel')),
('option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.Menu')),
],
),
]
| [
"django.db.models.DateField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((495, 588), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (511, 588), False, 'from django.db import migrations, models\n'), ((616, 648), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (632, 648), False, 'from django.db import migrations, models\n'), ((676, 711), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (692, 711), False, 'from django.db import migrations, models\n'), ((741, 841), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""menuCreate.MenuCreateModel"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'menuCreate.MenuCreateModel')\n", (758, 841), False, 'from django.db import migrations, models\n'), ((866, 944), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""menu.Menu"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='menu.Menu')\n", (883, 944), False, 'from django.db import migrations, models\n')] |
"""
S3AIO Class
Array access to a single S3 object
"""
from __future__ import absolute_import
import SharedArray as sa
import zstd
from itertools import repeat, product
import numpy as np
from pathos.multiprocessing import ProcessingPool
from six.moves import zip
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .s3io import S3IO, generate_array_name
class S3AIO(object):
def __init__(self, enable_compression=True, enable_s3=True, file_path=None, num_workers=30):
"""Initialise the S3 array IO interface.
:param bool enable_s3: Flag to store objects in s3 or disk.
True: store in S3
False: store on disk (for testing purposes)
:param str file_path: The root directory for the emulated s3 buckets when enable_se is set to False.
:param int num_workers: The number of workers for parallel IO.
"""
self.s3io = S3IO(enable_s3, file_path, num_workers)
self.pool = ProcessingPool(num_workers)
self.enable_compression = enable_compression
def to_1d(self, index, shape):
"""Converts nD index to 1D index.
:param tuple index: N-D Index to be converted.
:param tuple shape: Shape to be used for conversion.
:return: Returns the 1D index.
"""
return np.ravel_multi_index(index, shape)
def to_nd(self, index, shape):
"""Converts 1D index to nD index.
:param tuple index: 1D Index to be converted.
:param tuple shape: Shape to be used for conversion.
:return: Returns the ND index.
"""
return np.unravel_index(index, shape)
def get_point(self, index_point, shape, dtype, s3_bucket, s3_key):
"""Gets a point in the nd array stored in S3.
Only works if compression is off.
:param tuple index_point: Index of the point to be retrieved.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the point data.
"""
item_size = np.dtype(dtype).itemsize
idx = self.to_1d(index_point, shape) * item_size
if self.enable_compression:
b = self.s3io.get_bytes(s3_bucket, s3_key)
cctx = zstd.ZstdDecompressor()
b = cctx.decompress(b)[idx:idx + item_size]
else:
b = self.s3io.get_byte_range(s3_bucket, s3_key, idx, idx + item_size)
a = np.frombuffer(b, dtype=dtype, count=-1, offset=0)
return a
def cdims(self, slices, shape):
return [sl.start == 0 and sl.stop == sh and (sl.step is None or sl.step == 1)
for sl, sh in zip(slices, shape)]
def get_slice(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3.
Only works if compression is off.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# convert array_slice into into sub-slices of maximum contiguous blocks
# Todo:
# - parallelise reads and writes
# - option 1. get memory rows in parallel and merge
# - option 2. smarter byte range subsets depending on:
# - data size
# - data contiguity
if self.enable_compression:
return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)
# truncate array_slice to shape
# array_slice = [slice(max(0, s.start) - min(sh, s.stop)) for s, sh in zip(array_sliced, shape)]
array_slice = [slice(max(0, s.start), min(sh, s.stop)) for s, sh in zip(array_slice, shape)]
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
item_size = np.dtype(dtype).itemsize
results = []
for cell, sub_range in blocks:
# print(cell, sub_range)
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
# print(s3_start, s3_end)
data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)
results.append((cell, sub_range, data))
result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)
offset = [s.start for s in array_slice]
for cell, sub_range, data in results:
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
result[t] = data.reshape([s.stop - s.start for s in sub_range])
return result
def get_slice_mp(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3 in parallel.
Only works if compression is off.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# pylint: disable=too-many-locals
def work_get_slice(block, array_name, offset, s3_bucket, s3_key, shape, dtype):
result = sa.attach(array_name)
cell, sub_range = block
item_size = np.dtype(dtype).itemsize
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
# data = data.reshape([s.stop - s.start for s in sub_range])
result[t] = data.reshape([s.stop - s.start for s in sub_range])
if self.enable_compression:
return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
offset = [s.start for s in array_slice]
array_name = generate_array_name('S3AIO')
sa.create(array_name, shape=[s.stop - s.start for s in array_slice], dtype=dtype)
shared_array = sa.attach(array_name)
self.pool.map(work_get_slice, blocks, repeat(array_name), repeat(offset), repeat(s3_bucket),
repeat(s3_key), repeat(shape), repeat(dtype))
sa.delete(array_name)
return shared_array
def get_slice_by_bbox(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3 by bounding box.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# Todo:
# - parallelise reads and writes
# - option 1. use get_byte_range_mp
# - option 2. smarter byte range subsets depending on:
# - data size
# - data contiguity
item_size = np.dtype(dtype).itemsize
s3_begin = (np.ravel_multi_index(tuple([s.start for s in array_slice]), shape)) * item_size
s3_end = (np.ravel_multi_index(tuple([s.stop - 1 for s in array_slice]), shape) + 1) * item_size
# if s3_end-s3_begin <= 5*1024*1024:
# d = self.s3io.get_byte_range(s3_bucket, s3_key, s3_begin, s3_end)
# else:
# d = self.s3io.get_byte_range_mp(s3_bucket, s3_key, s3_begin, s3_end, 5*1024*1024)
d = self.s3io.get_bytes(s3_bucket, s3_key)
if self.enable_compression:
cctx = zstd.ZstdDecompressor()
d = cctx.decompress(d)
d = np.frombuffer(d, dtype=np.uint8, count=-1, offset=0)
d = d[s3_begin:s3_end]
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
item_size = np.dtype(dtype).itemsize
results = []
for cell, sub_range in blocks:
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
data = d[s3_start - s3_begin:s3_end - s3_begin]
results.append((cell, sub_range, data))
result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)
offset = [s.start for s in array_slice]
for cell, sub_range, data in results:
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
result[t] = data.reshape([s.stop - s.start for s in sub_range])
return result
| [
"SharedArray.create",
"numpy.ravel_multi_index",
"itertools.product",
"zstd.ZstdDecompressor",
"SharedArray.delete",
"numpy.empty",
"numpy.unravel_index",
"numpy.frombuffer",
"numpy.dtype",
"SharedArray.attach",
"six.moves.zip",
"pathos.multiprocessing.ProcessingPool",
"itertools.repeat"
] | [((998, 1025), 'pathos.multiprocessing.ProcessingPool', 'ProcessingPool', (['num_workers'], {}), '(num_workers)\n', (1012, 1025), False, 'from pathos.multiprocessing import ProcessingPool\n'), ((1340, 1374), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['index', 'shape'], {}), '(index, shape)\n', (1360, 1374), True, 'import numpy as np\n'), ((1635, 1665), 'numpy.unravel_index', 'np.unravel_index', (['index', 'shape'], {}), '(index, shape)\n', (1651, 1665), True, 'import numpy as np\n'), ((2551, 2600), 'numpy.frombuffer', 'np.frombuffer', (['b'], {'dtype': 'dtype', 'count': '(-1)', 'offset': '(0)'}), '(b, dtype=dtype, count=-1, offset=0)\n', (2564, 2600), True, 'import numpy as np\n'), ((4980, 5044), 'numpy.empty', 'np.empty', (['[(s.stop - s.start) for s in array_slice]'], {'dtype': 'dtype'}), '([(s.stop - s.start) for s in array_slice], dtype=dtype)\n', (4988, 5044), True, 'import numpy as np\n'), ((7656, 7743), 'SharedArray.create', 'sa.create', (['array_name'], {'shape': '[(s.stop - s.start) for s in array_slice]', 'dtype': 'dtype'}), '(array_name, shape=[(s.stop - s.start) for s in array_slice],\n dtype=dtype)\n', (7665, 7743), True, 'import SharedArray as sa\n'), ((7761, 7782), 'SharedArray.attach', 'sa.attach', (['array_name'], {}), '(array_name)\n', (7770, 7782), True, 'import SharedArray as sa\n'), ((7962, 7983), 'SharedArray.delete', 'sa.delete', (['array_name'], {}), '(array_name)\n', (7971, 7983), True, 'import SharedArray as sa\n'), ((9411, 9463), 'numpy.frombuffer', 'np.frombuffer', (['d'], {'dtype': 'np.uint8', 'count': '(-1)', 'offset': '(0)'}), '(d, dtype=np.uint8, count=-1, offset=0)\n', (9424, 9463), True, 'import numpy as np\n'), ((10370, 10434), 'numpy.empty', 'np.empty', (['[(s.stop - s.start) for s in array_slice]'], {'dtype': 'dtype'}), '([(s.stop - s.start) for s in array_slice], dtype=dtype)\n', (10378, 10434), True, 'import numpy as np\n'), ((2171, 2186), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2179, 2186), True, 'import numpy as np\n'), ((2363, 2386), 'zstd.ZstdDecompressor', 'zstd.ZstdDecompressor', ([], {}), '()\n', (2384, 2386), False, 'import zstd\n'), ((4332, 4354), 'itertools.product', 'product', (['*outer_ranges'], {}), '(*outer_ranges)\n', (4339, 4354), False, 'from itertools import repeat, product\n'), ((4445, 4460), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (4453, 4460), True, 'import numpy as np\n'), ((6179, 6200), 'SharedArray.attach', 'sa.attach', (['array_name'], {}), '(array_name)\n', (6188, 6200), True, 'import SharedArray as sa\n'), ((7456, 7478), 'itertools.product', 'product', (['*outer_ranges'], {}), '(*outer_ranges)\n', (7463, 7478), False, 'from itertools import repeat, product\n'), ((7830, 7848), 'itertools.repeat', 'repeat', (['array_name'], {}), '(array_name)\n', (7836, 7848), False, 'from itertools import repeat, product\n'), ((7850, 7864), 'itertools.repeat', 'repeat', (['offset'], {}), '(offset)\n', (7856, 7864), False, 'from itertools import repeat, product\n'), ((7866, 7883), 'itertools.repeat', 'repeat', (['s3_bucket'], {}), '(s3_bucket)\n', (7872, 7883), False, 'from itertools import repeat, product\n'), ((7907, 7921), 'itertools.repeat', 'repeat', (['s3_key'], {}), '(s3_key)\n', (7913, 7921), False, 'from itertools import repeat, product\n'), ((7923, 7936), 'itertools.repeat', 'repeat', (['shape'], {}), '(shape)\n', (7929, 7936), False, 'from itertools import repeat, product\n'), ((7938, 7951), 'itertools.repeat', 'repeat', (['dtype'], {}), '(dtype)\n', (7944, 7951), False, 'from itertools import repeat, product\n'), ((8763, 8778), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (8771, 8778), True, 'import numpy as np\n'), ((9339, 9362), 'zstd.ZstdDecompressor', 'zstd.ZstdDecompressor', ([], {}), '()\n', (9360, 9362), False, 'import zstd\n'), ((9818, 9840), 'itertools.product', 'product', (['*outer_ranges'], {}), '(*outer_ranges)\n', (9825, 9840), False, 'from itertools import repeat, product\n'), ((9931, 9946), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (9939, 9946), True, 'import numpy as np\n'), ((2771, 2789), 'six.moves.zip', 'zip', (['slices', 'shape'], {}), '(slices, shape)\n', (2774, 2789), False, 'from six.moves import zip\n'), ((3984, 4007), 'six.moves.zip', 'zip', (['array_slice', 'shape'], {}), '(array_slice, shape)\n', (3987, 4007), False, 'from six.moves import zip\n'), ((4395, 4422), 'itertools.repeat', 'repeat', (['array_slice[start:]'], {}), '(array_slice[start:])\n', (4401, 4422), False, 'from itertools import repeat, product\n'), ((5347, 5399), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'dtype', 'count': '(-1)', 'offset': '(0)'}), '(data, dtype=dtype, count=-1, offset=0)\n', (5360, 5399), True, 'import numpy as np\n'), ((6262, 6277), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (6270, 6277), True, 'import numpy as np\n'), ((6801, 6853), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'dtype', 'count': '(-1)', 'offset': '(0)'}), '(data, dtype=dtype, count=-1, offset=0)\n', (6814, 6853), True, 'import numpy as np\n'), ((7519, 7546), 'itertools.repeat', 'repeat', (['array_slice[start:]'], {}), '(array_slice[start:])\n', (7525, 7546), False, 'from itertools import repeat, product\n'), ((9881, 9908), 'itertools.repeat', 'repeat', (['array_slice[start:]'], {}), '(array_slice[start:])\n', (9887, 9908), False, 'from itertools import repeat, product\n'), ((10737, 10789), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'dtype', 'count': '(-1)', 'offset': '(0)'}), '(data, dtype=dtype, count=-1, offset=0)\n', (10750, 10789), True, 'import numpy as np\n')] |
import pickle
from unittest import mock
from nose2.tools.params import params
import numpy as np
import tensorflow as tf
from garage.tf.envs import TfEnv
from garage.tf.policies import GaussianMLPPolicyWithModel
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.models import SimpleGaussianMLPModel
class TestGaussianMLPPolicyWithModel(TfGraphTestCase):
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_get_action(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
action, prob = policy.get_action(obs)
expected_action = np.full(action_dim, 0.75)
expected_mean = np.full(action_dim, 0.5)
expected_log_std = np.full(action_dim, 0.5)
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
actions, probs = policy.get_actions([obs, obs, obs])
for action, mean, log_std in zip(actions, probs['mean'],
probs['log_std']):
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_dist_info_sym(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
obs_ph = tf.placeholder(tf.float32, shape=(None, obs_dim))
dist1_sym = policy.dist_info_sym(obs_ph, name='p1_sym')
expected_mean = np.full(action_dim, 0.5)
expected_log_std = np.full(action_dim, 0.5)
prob = self.sess.run(dist1_sym, feed_dict={obs_ph: [obs.flatten()]})
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_is_pickleable(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
action1, prob1 = policy.get_action(obs)
p = pickle.dumps(policy)
with tf.Session(graph=tf.Graph()):
policy_pickled = pickle.loads(p)
action2, prob2 = policy_pickled.get_action(obs)
assert env.action_space.contains(action1)
assert np.array_equal(action1, action2)
assert np.array_equal(prob1['mean'], prob2['mean'])
assert np.array_equal(prob1['log_std'], prob2['log_std'])
| [
"tensorflow.Graph",
"pickle.dumps",
"tensorflow.placeholder",
"numpy.array_equal",
"nose2.tools.params.params",
"tests.fixtures.envs.dummy.DummyBoxEnv",
"pickle.loads",
"numpy.full",
"garage.tf.policies.GaussianMLPPolicyWithModel",
"unittest.mock.patch"
] | [((426, 532), 'nose2.tools.params.params', 'params', (['((1,), (1,))', '((1,), (2,))', '((2,), (2,))', '((1, 1), (1, 1))', '((1, 1), (2, 2))', '((2, 2), (2, 2))'], {}), '(((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1),\n (2, 2)), ((2, 2), (2, 2)))\n', (432, 532), False, 'from nose2.tools.params import params\n'), ((1882, 1988), 'nose2.tools.params.params', 'params', (['((1,), (1,))', '((1,), (2,))', '((2,), (2,))', '((1, 1), (1, 1))', '((1, 1), (2, 2))', '((2, 2), (2, 2))'], {}), '(((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1),\n (2, 2)), ((2, 2), (2, 2)))\n', (1888, 1988), False, 'from nose2.tools.params import params\n'), ((2972, 3078), 'nose2.tools.params.params', 'params', (['((1,), (1,))', '((1,), (2,))', '((2,), (2,))', '((1, 1), (1, 1))', '((1, 1), (2, 2))', '((2, 2), (2, 2))'], {}), '(((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1),\n (2, 2)), ((2, 2), (2, 2)))\n', (2978, 3078), False, 'from nose2.tools.params import params\n'), ((1089, 1114), 'numpy.full', 'np.full', (['action_dim', '(0.75)'], {}), '(action_dim, 0.75)\n', (1096, 1114), True, 'import numpy as np\n'), ((1139, 1163), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (1146, 1163), True, 'import numpy as np\n'), ((1191, 1215), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (1198, 1215), True, 'import numpy as np\n'), ((1281, 1320), 'numpy.array_equal', 'np.array_equal', (['action', 'expected_action'], {}), '(action, expected_action)\n', (1295, 1320), True, 'import numpy as np\n'), ((1336, 1379), 'numpy.array_equal', 'np.array_equal', (["prob['mean']", 'expected_mean'], {}), "(prob['mean'], expected_mean)\n", (1350, 1379), True, 'import numpy as np\n'), ((1395, 1444), 'numpy.array_equal', 'np.array_equal', (["prob['log_std']", 'expected_log_std'], {}), "(prob['log_std'], expected_log_std)\n", (1409, 1444), True, 'import numpy as np\n'), ((2546, 2595), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, obs_dim)'}), '(tf.float32, shape=(None, obs_dim))\n', (2560, 2595), True, 'import tensorflow as tf\n'), ((2686, 2710), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (2693, 2710), True, 'import numpy as np\n'), ((2738, 2762), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (2745, 2762), True, 'import numpy as np\n'), ((2857, 2900), 'numpy.array_equal', 'np.array_equal', (["prob['mean']", 'expected_mean'], {}), "(prob['mean'], expected_mean)\n", (2871, 2900), True, 'import numpy as np\n'), ((2916, 2965), 'numpy.array_equal', 'np.array_equal', (["prob['log_std']", 'expected_log_std'], {}), "(prob['log_std'], expected_log_std)\n", (2930, 2965), True, 'import numpy as np\n'), ((3680, 3700), 'pickle.dumps', 'pickle.dumps', (['policy'], {}), '(policy)\n', (3692, 3700), False, 'import pickle\n'), ((3915, 3947), 'numpy.array_equal', 'np.array_equal', (['action1', 'action2'], {}), '(action1, action2)\n', (3929, 3947), True, 'import numpy as np\n'), ((3963, 4007), 'numpy.array_equal', 'np.array_equal', (["prob1['mean']", "prob2['mean']"], {}), "(prob1['mean'], prob2['mean'])\n", (3977, 4007), True, 'import numpy as np\n'), ((4023, 4073), 'numpy.array_equal', 'np.array_equal', (["prob1['log_std']", "prob2['log_std']"], {}), "(prob1['log_std'], prob2['log_std'])\n", (4037, 4073), True, 'import numpy as np\n'), ((662, 713), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (673, 713), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((728, 841), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel"""'], {'new': 'SimpleGaussianMLPModel'}), "('garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel'\n , new=SimpleGaussianMLPModel)\n", (738, 841), False, 'from unittest import mock\n'), ((913, 958), 'garage.tf.policies.GaussianMLPPolicyWithModel', 'GaussianMLPPolicyWithModel', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (939, 958), False, 'from garage.tf.policies import GaussianMLPPolicyWithModel\n'), ((1704, 1743), 'numpy.array_equal', 'np.array_equal', (['action', 'expected_action'], {}), '(action, expected_action)\n', (1718, 1743), True, 'import numpy as np\n'), ((1763, 1806), 'numpy.array_equal', 'np.array_equal', (["prob['mean']", 'expected_mean'], {}), "(prob['mean'], expected_mean)\n", (1777, 1806), True, 'import numpy as np\n'), ((1826, 1875), 'numpy.array_equal', 'np.array_equal', (["prob['log_std']", 'expected_log_std'], {}), "(prob['log_std'], expected_log_std)\n", (1840, 1875), True, 'import numpy as np\n'), ((2121, 2172), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (2132, 2172), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((2187, 2300), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel"""'], {'new': 'SimpleGaussianMLPModel'}), "('garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel'\n , new=SimpleGaussianMLPModel)\n", (2197, 2300), False, 'from unittest import mock\n'), ((2372, 2417), 'garage.tf.policies.GaussianMLPPolicyWithModel', 'GaussianMLPPolicyWithModel', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (2398, 2417), False, 'from garage.tf.policies import GaussianMLPPolicyWithModel\n'), ((3211, 3262), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (3222, 3262), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((3277, 3390), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel"""'], {'new': 'SimpleGaussianMLPModel'}), "('garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel'\n , new=SimpleGaussianMLPModel)\n", (3287, 3390), False, 'from unittest import mock\n'), ((3462, 3507), 'garage.tf.policies.GaussianMLPPolicyWithModel', 'GaussianMLPPolicyWithModel', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (3488, 3507), False, 'from garage.tf.policies import GaussianMLPPolicyWithModel\n'), ((3773, 3788), 'pickle.loads', 'pickle.loads', (['p'], {}), '(p)\n', (3785, 3788), False, 'import pickle\n'), ((3731, 3741), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3739, 3741), True, 'import tensorflow as tf\n')] |
from cinebot_mini import SERVERS
import requests
import numpy as np
import json
def base_url():
blender_dict = SERVERS["blender"]
url = "http://{}:{}".format(
blender_dict["host"], blender_dict["port"])
return url
def handshake():
url = base_url() + "/api/ping"
for i in range(5):
try:
r = requests.get(url, timeout=1.0)
r_data = r.json()
assert(r_data["url"] == "/api/ping")
return True
except Exception as e:
continue
return False
def create_object(name, type="CAMERA"):
url = base_url() + "/api/create"
data = {
"type": type,
"name": name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
obj_dict = r_data['result']
if "name" in obj_dict:
return obj_dict["name"]
else:
print("Creating {} failed!", obj_name)
def create_objects(type="CAMERA", num=4, base_name="screen_camera_"):
url = base_url() + "/api/create"
obj_names = []
for i in range(num):
obj_name = base_name + str(i)
data = {
"type": type,
"name": obj_name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
obj_dict = r_data['result']
if "name" in obj_dict:
obj_names.append(obj_dict["name"])
else:
print("Creating {} failed!", obj_name)
return obj_names
def set_transform_euler(obj_name, loc, rot, degree=True):
url = base_url() + "/api/object/" + obj_name + "/property"
rot_data = list(rot)
if degree:
rot_data = (np.array(rot) / 180.0 * np.pi).tolist()
data = {
"properties": {
"location": list(loc),
"rotation_euler": list(rot_data)
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_transform_matrix(obj_name, matrix):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
"properties": {
"matrix_world": matrix.tolist()
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_transform_matrix(obj_name, matrix):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
"properties": {
"matrix_world": matrix.tolist()
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_property(obj_name, key, val, prop_type="properties"):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
prop_type: {
key: val
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def get_property(obj_name):
url = base_url() + "/api/object/" + obj_name + "/property"
r = requests.get(url)
r_data = r.json()
return r_data["result"]
def test_object_exist(obj_name):
url = base_url() + "/api/object/" + obj_name + "/property"
data = dict()
r = requests.get(url, data=json.dumps(data))
return r.status_code != 404
def set_animation_euler(obj_name, locs, rots, degree=True):
url = base_url() + "/api/object/" + obj_name + "/animation"
rot_data = rots
if degree:
rot_data = rots / 180.0 * np.pi
transforms = []
for t in range(len(locs)):
tf_data = dict()
tf_data["frame_number"] = t
tf_data["location"] = locs[t].tolist()
tf_data["rotation_euler"] = rot_data[t].tolist()
transforms.append(tf_data)
data = {
"transforms": transforms
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_animation_matrix(obj_name, matrices):
url = base_url() + "/api/object/" + obj_name + "/animation"
transforms = []
for t in range(len(matrices)):
tf_data = dict()
tf_data["frame_number"] = t
tf_data["matrix_world"] = matrices[t].tolist()
transforms.append(tf_data)
data = {
"transforms": transforms
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def get_animation_dict(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.get(url)
r_data = r.json()
animation = r_data["result"]
result = dict()
for frame in animation:
t = frame["frame_number"]
arr = np.array(frame["matrix_world"])
result[t] = arr
return result
def get_animation(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.get(url)
r_data = r.json()
animation = r_data["result"]
result = []
for frame in animation:
arr = np.array(frame["matrix_world"])
result.append(arr)
return result
def delete_animation(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.delete(url)
r_data = r.json()
return r_data["result"]
def delete_object(obj_name):
url = base_url() + "/api/object/" + obj_name
r = requests.delete(url)
r_data = r.json()
return r_data["result"]
def render_animation(file_name, frame_start, frame_end):
url = base_url() + "/api/render/animation"
data = {
"output_file_path": file_name,
"frame_start": frame_start,
"frame_end": frame_end
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_render_resolution(pixel_dim):
url = base_url() + "/api/render/property"
x, y = pixel_dim
data = {
"properties": {
"resolution_x": x,
"resolution_y": y
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
def set_camera_properties(cam_name, focal_length_m, sensor_dims_m):
url = base_url() + "/api/object/" + cam_name + "/property"
lens = focal_length_m * 1000
w, h = np.array(sensor_dims_m) * 1000
data = {
"data_properties": {
"lens": lens,
"sensor_width": w,
"sensor_height": h
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
def set_active_camera(cam_name):
url = base_url() + "/api/render/active_camera"
data = {
"name": cam_name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
| [
"numpy.array",
"json.dumps",
"requests.get",
"requests.delete"
] | [((2923, 2940), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2935, 2940), False, 'import requests\n'), ((4368, 4385), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4380, 4385), False, 'import requests\n'), ((4714, 4731), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4726, 4731), False, 'import requests\n'), ((5028, 5048), 'requests.delete', 'requests.delete', (['url'], {}), '(url)\n', (5043, 5048), False, 'import requests\n'), ((5187, 5207), 'requests.delete', 'requests.delete', (['url'], {}), '(url)\n', (5202, 5207), False, 'import requests\n'), ((4537, 4568), 'numpy.array', 'np.array', (["frame['matrix_world']"], {}), "(frame['matrix_world'])\n", (4545, 4568), True, 'import numpy as np\n'), ((4845, 4876), 'numpy.array', 'np.array', (["frame['matrix_world']"], {}), "(frame['matrix_world'])\n", (4853, 4876), True, 'import numpy as np\n'), ((6098, 6121), 'numpy.array', 'np.array', (['sensor_dims_m'], {}), '(sensor_dims_m)\n', (6106, 6121), True, 'import numpy as np\n'), ((342, 372), 'requests.get', 'requests.get', (['url'], {'timeout': '(1.0)'}), '(url, timeout=1.0)\n', (354, 372), False, 'import requests\n'), ((717, 733), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (727, 733), False, 'import json\n'), ((1847, 1863), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1857, 1863), False, 'import json\n'), ((2152, 2168), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2162, 2168), False, 'import json\n'), ((2457, 2473), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2467, 2473), False, 'import json\n'), ((2754, 2770), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2764, 2770), False, 'import json\n'), ((3138, 3154), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3148, 3154), False, 'import json\n'), ((3723, 3739), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3733, 3739), False, 'import json\n'), ((4192, 4208), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4202, 4208), False, 'import json\n'), ((5520, 5536), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (5530, 5536), False, 'import json\n'), ((5840, 5856), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (5850, 5856), False, 'import json\n'), ((6306, 6322), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (6316, 6322), False, 'import json\n'), ((6548, 6564), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (6558, 6564), False, 'import json\n'), ((1214, 1230), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1224, 1230), False, 'import json\n'), ((1643, 1656), 'numpy.array', 'np.array', (['rot'], {}), '(rot)\n', (1651, 1656), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 The Regents of the University of California
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
from __future__ import print_function
import argparse
import m5
from m5.objects import TimingSimpleCPU, DerivO3CPU
from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory
from m5.objects import Root
from m5.objects import *
from system import BaseTestSystem
from system import InfMemory, SingleCycleMemory, SlowMemory
# Branch predictor params
# If indirect Predictor is disabled use BTB with these params
btbEntries = 512
btbTagSize = 19
class IndirectPred(SimpleIndirectPredictor):
indirectSets = 256 # Cache sets for indirect predictor
indirectWays = 2 # Ways for indirect predictor
indirectTagSize = 16 # Indirect target cache tag bits
indirectPathLength = 3 # Previous indirect targets to use for path history
indirectGHRBits = 13 # Indirect GHR number of bits
ipred = SimpleIndirectPredictor()
#CPU Configs
class Simple_LocalBP(TimingSimpleCPU):
branchPred = LocalBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
class DefaultO3_LocalBP(DerivO3CPU):
branchPred = LocalBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
class Simple_BiModeBP(TimingSimpleCPU):
branchPred = BiModeBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class DefaultO3_BiModeBP(DerivO3CPU):
branchPred = BiModeBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class Simple_TournamentBP(TimingSimpleCPU):
branchPred = TournamentBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
branchPred.localHistoryTableSize = 2048
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class DefaultO3_TournamentBP(DerivO3CPU):
branchPred = TournamentBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
branchPred.localHistoryTableSize = 2048
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class Simple_LTAGEBP(TimingSimpleCPU):
branchPred = LTAGE()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
class DefaultO3_LTAGEBP(DerivO3CPU):
branchPred = LTAGE()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
# Add more CPUs Configs under test before this
valid_configs = [Simple_LocalBP, Simple_BiModeBP, Simple_TournamentBP, Simple_LTAGEBP, DefaultO3_LocalBP, DefaultO3_BiModeBP, DefaultO3_TournamentBP, DefaultO3_LTAGEBP]
valid_configs = {cls.__name__[:-2]:cls for cls in valid_configs}
# Add more Memories under test before this
valid_memories = [InfMemory, SingleCycleMemory, SlowMemory]
valid_memories = {cls.__name__[:-6]:cls for cls in valid_memories}
parser = argparse.ArgumentParser()
parser.add_argument('config', choices = valid_configs.keys())
parser.add_argument('memory_model', choices = valid_memories.keys())
parser.add_argument('binary', type = str, help = "Path to binary to run")
args = parser.parse_args()
class MySystem(BaseTestSystem):
_CPUModel = valid_configs[args.config]
_MemoryModel = valid_memories[args.memory_model]
system = MySystem()
system.setTestBinary(args.binary)
root = Root(full_system = False, system = system)
m5.instantiate()
exit_event = m5.simulate()
if exit_event.getCause() != 'exiting with last active thread context':
print("Benchmark failed with bad exit cause.")
print(exit_event.getCause())
exit(1)
if exit_event.getCode() != 0:
print("Benchmark failed with bad exit code.")
print("Exit code {}".format(exit_event.getCode()))
exit(1)
print("{} ms".format(m5.curTick()/1e9))
| [
"m5.objects.SimpleIndirectPredictor",
"m5.simulate",
"m5.objects.LTAGE",
"argparse.ArgumentParser",
"m5.curTick",
"m5.objects.LocalBP",
"m5.objects.BiModeBP",
"m5.objects.Root",
"m5.objects.TournamentBP",
"m5.instantiate"
] | [((2439, 2464), 'm5.objects.SimpleIndirectPredictor', 'SimpleIndirectPredictor', ([], {}), '()\n', (2462, 2464), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((5829, 5854), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5852, 5854), False, 'import argparse\n'), ((6279, 6317), 'm5.objects.Root', 'Root', ([], {'full_system': '(False)', 'system': 'system'}), '(full_system=False, system=system)\n', (6283, 6317), False, 'from m5.objects import Root\n'), ((6322, 6338), 'm5.instantiate', 'm5.instantiate', ([], {}), '()\n', (6336, 6338), False, 'import m5\n'), ((6353, 6366), 'm5.simulate', 'm5.simulate', ([], {}), '()\n', (6364, 6366), False, 'import m5\n'), ((2535, 2544), 'm5.objects.LocalBP', 'LocalBP', ([], {}), '()\n', (2542, 2544), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((2843, 2852), 'm5.objects.LocalBP', 'LocalBP', ([], {}), '()\n', (2850, 2852), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((3154, 3164), 'm5.objects.BiModeBP', 'BiModeBP', ([], {}), '()\n', (3162, 3164), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((3541, 3551), 'm5.objects.BiModeBP', 'BiModeBP', ([], {}), '()\n', (3549, 3551), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((3934, 3948), 'm5.objects.TournamentBP', 'TournamentBP', ([], {}), '()\n', (3946, 3948), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((4446, 4460), 'm5.objects.TournamentBP', 'TournamentBP', ([], {}), '()\n', (4458, 4460), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((4955, 4962), 'm5.objects.LTAGE', 'LTAGE', ([], {}), '()\n', (4960, 4962), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((5188, 5195), 'm5.objects.LTAGE', 'LTAGE', ([], {}), '()\n', (5193, 5195), False, 'from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory\n'), ((6704, 6716), 'm5.curTick', 'm5.curTick', ([], {}), '()\n', (6714, 6716), False, 'import m5\n')] |
from celerybeatmongo.schedulers import MongoScheduler
from mist.api.sharding.mixins import ShardManagerMixin
from mist.api.poller.models import PollingSchedule
from mist.api.poller.models import OwnerPollingSchedule
from mist.api.poller.models import CloudPollingSchedule
from mist.api.poller.models import MachinePollingSchedule
import datetime
class PollingScheduler(MongoScheduler):
Model = PollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class OwnerPollingScheduler(MongoScheduler):
Model = OwnerPollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class CloudPollingScheduler(MongoScheduler):
Model = CloudPollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class MachinePollingScheduler(MongoScheduler):
Model = MachinePollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class ShardedOwnerScheduler(ShardManagerMixin, OwnerPollingScheduler):
pass
class ShardedCloudScheduler(ShardManagerMixin, CloudPollingScheduler):
pass
class ShardedMachineScheduler(ShardManagerMixin, MachinePollingScheduler):
pass
| [
"datetime.timedelta"
] | [((441, 471), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (459, 471), False, 'import datetime\n'), ((574, 604), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (592, 604), False, 'import datetime\n'), ((707, 737), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (725, 737), False, 'import datetime\n'), ((844, 874), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (862, 874), False, 'import datetime\n')] |
from collections import Counter
import json
from pathlib import Path
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from bootstrap.lib.logger import Logger
from bootstrap.datasets import transforms as bootstrap_tf
try:
from .hdd import HDD
except:
from hdd import HDD
class HDDClassif(HDD):
def __init__(self,
dir_data,
split,
win_size,
im_size,
layer, # "goal" or "cause"
frame_position,
traintest_mode,
fps=10,
horizon=2, # in seconds
extract_mode=False,
batch_size=2,
debug=False,
shuffle=False,
pin_memory=False,
nb_threads=0):
self.win_size = win_size
self.frame_position = frame_position
super(HDDClassif, self).__init__(dir_data,
split,
im_size,
fps,
horizon, # in seconds
batch_size,
debug,
shuffle,
pin_memory,
nb_threads)
self.layer = layer
if self.layer == "cause":
self.layer_id = '1'
self.classid_to_ix = [-1, 16, 17, 18, 19, 20, 22]
elif self.layer == "goal":
self.layer_id = '0'
self.classid_to_ix = [-1, 0, 1, 2, 3, 4, 5, 7, 8, 10, 11, 12]
else:
raise ValueError(self.layer)
# The classid 0 is the background class
self.ix_to_classid = dict((ix, classid) for classid, ix in enumerate(self.classid_to_ix))
self.class_freq = self.get_class_freq()
self.collate_fn = bootstrap_tf.Compose([
bootstrap_tf.ListDictsToDictLists(),
bootstrap_tf.StackTensors()
])
self.dir_navig_features = self.dir_processed_annot
self.im_transform = transforms.Compose([transforms.Resize((self.im_h, self.im_w)),
transforms.ToTensor(),
transforms.Normalize(mean = [0.43216, 0.394666, 0.37645],
std = [0.22803, 0.22145, 0.216989])])
self.traintest_mode = traintest_mode
if self.traintest_mode:
self.make_batch_loader = self._make_batch_loader_traintest
else:
self.make_batch_loader = self._make_batch_loader
def classid_to_classname(self, classid):
ix = self.classid_to_ix[classid]
if ix == -1:
return '__background__'
else:
return self.ix_to_event[ix]
def _make_batch_loader(self, batch_size=None, shuffle=None, num_samples=200000):
nb_threads = self.nb_threads
batch_size = self.batch_size if batch_size is None else batch_size
shuffle = self.shuffle if shuffle is None else shuffle
if shuffle:
sampler = data.RandomSampler(self, replacement=True, num_samples=min(num_samples, len(self)))
shuffle = None
else:
sampler = None
batch_loader = data.DataLoader(
dataset=self,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=self.pin_memory,
num_workers=nb_threads,
collate_fn=self.collate_fn,
sampler=sampler)
return batch_loader
def _make_batch_loader_traintest(self, batch_size=None, shuffle=None):
nb_threads = self.nb_threads
batch_size = self.batch_size if batch_size is None else batch_size
num_samples = batch_size*70000
shuffle = self.shuffle if shuffle is None else shuffle
if shuffle:
sampler = data.RandomSampler(self, replacement=True, num_samples=num_samples)
shuffle = None
else:
sampler = None
batch_loader = data.DataLoader(
dataset=self,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=self.pin_memory,
num_workers=nb_threads,
collate_fn=self.collate_fn,
sampler=sampler)
return batch_loader
def build_index(self):
Logger()('Building index for %s split...' % self.split)
split_file = self.dir_data.joinpath(self.split+'.txt')
index = []
session_template = "{0}-{1}-{2}-{3}-{4}"
self.vid_to_index = []
self.vidname_to_vidid = {}
for idx, session_id in enumerate(open(split_file)):
name = session_template.format(session_id[:4],
session_id[4:6],
session_id[6:8],
session_id[8:10],
session_id[10:12])
annot_paths = list(filter(lambda x:name in x.as_posix(),
self.dir_processed_annot.iterdir()))
if len(annot_paths) == 0:
continue
assert len(annot_paths) == 1
annot_path = annot_paths[0]
if annot_path.exists():
frame_annots = sorted(annot_path.iterdir())
frame_annots = [None]*self.frame_position + frame_annots + [None]*(self.win_size-self.frame_position-1) # Zero-padding of the full video, such that each frame can get a context
L = [frame_annots[i:i+self.win_size] for i in range(0, len(frame_annots)-self.win_size+1)]
self.vid_to_index.append((len(index), len(index)+len(L)))
self.vidname_to_vidid[annot_path.name] = len(index)
index += L
# if self.debug:
# index += frame_annots[5000:7000]
# break
# else:
# index += frame_annots
if self.debug and idx==1:
break
Logger()('Done')
return index
def get_class_freq(self):
class_freq_path = self.dir_processed_annot.joinpath('%s_class_freq.json' % self.layer)
if class_freq_path.exists():
Logger()('Loading class frequency')
class_freq = json.load(open(class_freq_path))
Logger()('Loaded class frequency')
else:
Logger()('Computing class frequency')
if self.split != "train":
raise NotImplementedError('Extract class weigths on train set first')
class_freq = self.compute_class_freq()
with open(class_freq_path, 'w') as F:
F.write(json.dumps(class_freq))
return class_freq
def compute_class_freq(self):
class_freq = Counter()
S = 0
for paths in self.index:
annot_path = paths[-1]
if annot_path is None:
continue
annot = json.load(open(annot_path))
event = annot['labels'][self.layer_id]
classid = self.ix_to_classid.get(event, 0)
class_freq[classid] += 1
S += 1
for classid in class_freq:
class_freq[classid] = class_freq[classid] / S
return class_freq
def get_navig(self, annot):
item = {}
if len(annot['prev_xy']) == self.length:
prev_xy = torch.Tensor(annot['prev_xy'])
r_prev_xy = torch.Tensor(annot['r_prev_xy'])
else:
# should be padded before
n = len(annot['prev_xy'])
prev_xy = torch.Tensor(self.length,2).zero_()
r_prev_xy = torch.Tensor(self.length,2).zero_()
if n>0:
prev_xy[self.length - n:] = torch.Tensor(annot['prev_xy'])
r_prev_xy[self.length - n:] = torch.Tensor(annot['r_prev_xy'])
item['prev_xy'] = prev_xy
item['r_prev_xy'] = r_prev_xy
if len(annot['next_xy']) == self.length:
next_xy = torch.Tensor(annot['next_xy'])
r_next_xy = torch.Tensor(annot['r_next_xy'])
else:
# should be padded after
n = len(annot['next_xy'])
next_xy = torch.Tensor(self.length,2).zero_()
r_next_xy = torch.Tensor(self.length,2).zero_()
if n>0:
next_xy[:n] = torch.Tensor(annot['next_xy'])
r_next_xy[:n] = torch.Tensor(annot['r_next_xy'])
item['next_xy'] = next_xy
item['r_next_xy'] = r_next_xy
item['blinkers'] = torch.LongTensor([self.blinkers_to_ix[annot['blinkers']]])
return item
def get_navig_path(self, annot_path):
# Sometimes, due to sampling considerations, the navig annotation doesn't exist.
# We simply take the navig annotation for the closest existing sample
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
annot_path.name)
if not annot_navig_path.exists():
annot_num = int(annot_path.stem)
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num-1:06d}.json")
if not annot_navig_path.exists():
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num+1:06d}.json")
if not annot_navig_path.exists():
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num-2:06d}.json")
return annot_navig_path
def __getitem__(self, idx):
paths = self.index[idx]
y_true = torch.LongTensor(self.win_size).zero_() -1
frames = None
navig = None
item = {}
for frame_id, annot_path in enumerate(paths):
if annot_path is None:
continue
frame_number = int(annot_path.stem) + 1
frames_folder = self.dir_processed_img.joinpath(annot_path.parent.name)
frame_path = frames_folder.joinpath(f"{frame_number:06d}.jpg")
im = Image.open(frame_path)
im = self.im_transform(im)
if frames is None:
frames = torch.Tensor(self.win_size, 3, self.im_h, self.im_w).zero_()
frames[frame_id] = im
annot = json.load(open(annot_path))
event = annot['labels'][self.layer_id]
y_true[frame_id] = self.ix_to_classid.get(event, 0)
if navig is None:
navig = {'prev_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'next_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'r_prev_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'r_next_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'xy_polynom':torch.Tensor(self.win_size, 5, 2).zero_() - 1,
'blinkers':torch.LongTensor(self.win_size).zero_() - 1}
annot_navig_path = self.get_navig_path(annot_path)
annot_navig = json.load(open(annot_navig_path))
_navig = self.get_navig(annot_navig)
for k in _navig:
navig[k][frame_id] = _navig[k]
item.update(navig)
item['frames'] = frames
item['idx'] = idx
item['paths'] = paths
item['frame_path'] = paths[self.frame_position]
item['y_true_all'] = y_true
item['y_true'] = y_true[self.frame_position]
for k in navig:
item[k+'_all'] = item[k]
item[k] = item[k+'_all'][self.frame_position]
item['frame_position'] = torch.LongTensor([self.frame_position])
return item
if __name__ == "__main__":
split = "val"
fps = 3
dir_data = Path("/datasets_local/HDD")
nb_threads = 0
horizon = 2
win_size = 21
layer = "goal"
batch_size = 12
use_navig = False
im_size = "small"
dataset = HDDClassif(dir_data,
split,
win_size,
im_size,
layer, # "goal" or "cause"
use_navig=use_navig,
fps=fps,
horizon=horizon, # in seconds
batch_size=batch_size,
debug=False,
shuffle=False,
pin_memory=False,
nb_threads=0)
vidname_to_index = {}
for idx, sequence in enumerate(dataset.index):
vid_name = sequence[0].parent.name
if vid_name not in vidname_to_index:
vidname_to_index[vid_name] = []
vidname_to_index[vid_name].append(idx)
batch_sampler = SequentialBatchSampler(vidname_to_index, batch_size)
N = 0
for batch in batch_sampler:
print(batch)
N += 1
# item = dataset[5]
# loader = dataset.make_batch_loader(batch_size,
# shuffle=False)
# for idx, batch in enumerate(loader):
# break | [
"PIL.Image.open",
"pathlib.Path",
"torch.LongTensor",
"torchvision.transforms.Resize",
"torch.Tensor",
"bootstrap.datasets.transforms.StackTensors",
"torch.utils.data.RandomSampler",
"json.dumps",
"collections.Counter",
"bootstrap.datasets.transforms.ListDictsToDictLists",
"torchvision.transform... | [((12678, 12705), 'pathlib.Path', 'Path', (['"""/datasets_local/HDD"""'], {}), "('/datasets_local/HDD')\n", (12682, 12705), False, 'from pathlib import Path\n'), ((3636, 3811), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'self', 'batch_size': 'batch_size', 'shuffle': 'shuffle', 'pin_memory': 'self.pin_memory', 'num_workers': 'nb_threads', 'collate_fn': 'self.collate_fn', 'sampler': 'sampler'}), '(dataset=self, batch_size=batch_size, shuffle=shuffle,\n pin_memory=self.pin_memory, num_workers=nb_threads, collate_fn=self.\n collate_fn, sampler=sampler)\n', (3651, 3811), True, 'import torch.utils.data as data\n'), ((4431, 4606), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'self', 'batch_size': 'batch_size', 'shuffle': 'shuffle', 'pin_memory': 'self.pin_memory', 'num_workers': 'nb_threads', 'collate_fn': 'self.collate_fn', 'sampler': 'sampler'}), '(dataset=self, batch_size=batch_size, shuffle=shuffle,\n pin_memory=self.pin_memory, num_workers=nb_threads, collate_fn=self.\n collate_fn, sampler=sampler)\n', (4446, 4606), True, 'import torch.utils.data as data\n'), ((7328, 7337), 'collections.Counter', 'Counter', ([], {}), '()\n', (7335, 7337), False, 'from collections import Counter\n'), ((9141, 9199), 'torch.LongTensor', 'torch.LongTensor', (["[self.blinkers_to_ix[annot['blinkers']]]"], {}), "([self.blinkers_to_ix[annot['blinkers']]])\n", (9157, 9199), False, 'import torch\n'), ((12533, 12572), 'torch.LongTensor', 'torch.LongTensor', (['[self.frame_position]'], {}), '([self.frame_position])\n', (12549, 12572), False, 'import torch\n'), ((4266, 4333), 'torch.utils.data.RandomSampler', 'data.RandomSampler', (['self'], {'replacement': '(True)', 'num_samples': 'num_samples'}), '(self, replacement=True, num_samples=num_samples)\n', (4284, 4333), True, 'import torch.utils.data as data\n'), ((4760, 4768), 'bootstrap.lib.logger.Logger', 'Logger', ([], {}), '()\n', (4766, 4768), False, 'from bootstrap.lib.logger import Logger\n'), ((6534, 6542), 'bootstrap.lib.logger.Logger', 'Logger', ([], {}), '()\n', (6540, 6542), False, 'from bootstrap.lib.logger import Logger\n'), ((7961, 7991), 'torch.Tensor', 'torch.Tensor', (["annot['prev_xy']"], {}), "(annot['prev_xy'])\n", (7973, 7991), False, 'import torch\n'), ((8017, 8049), 'torch.Tensor', 'torch.Tensor', (["annot['r_prev_xy']"], {}), "(annot['r_prev_xy'])\n", (8029, 8049), False, 'import torch\n'), ((8589, 8619), 'torch.Tensor', 'torch.Tensor', (["annot['next_xy']"], {}), "(annot['next_xy'])\n", (8601, 8619), False, 'import torch\n'), ((8645, 8677), 'torch.Tensor', 'torch.Tensor', (["annot['r_next_xy']"], {}), "(annot['r_next_xy'])\n", (8657, 8677), False, 'import torch\n'), ((10902, 10924), 'PIL.Image.open', 'Image.open', (['frame_path'], {}), '(frame_path)\n', (10912, 10924), False, 'from PIL import Image\n'), ((2167, 2202), 'bootstrap.datasets.transforms.ListDictsToDictLists', 'bootstrap_tf.ListDictsToDictLists', ([], {}), '()\n', (2200, 2202), True, 'from bootstrap.datasets import transforms as bootstrap_tf\n'), ((2217, 2244), 'bootstrap.datasets.transforms.StackTensors', 'bootstrap_tf.StackTensors', ([], {}), '()\n', (2242, 2244), True, 'from bootstrap.datasets import transforms as bootstrap_tf\n'), ((2372, 2413), 'torchvision.transforms.Resize', 'transforms.Resize', (['(self.im_h, self.im_w)'], {}), '((self.im_h, self.im_w))\n', (2389, 2413), True, 'import torchvision.transforms as transforms\n'), ((2464, 2485), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2483, 2485), True, 'import torchvision.transforms as transforms\n'), ((2537, 2631), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.43216, 0.394666, 0.37645]', 'std': '[0.22803, 0.22145, 0.216989]'}), '(mean=[0.43216, 0.394666, 0.37645], std=[0.22803, \n 0.22145, 0.216989])\n', (2557, 2631), True, 'import torchvision.transforms as transforms\n'), ((6753, 6761), 'bootstrap.lib.logger.Logger', 'Logger', ([], {}), '()\n', (6759, 6761), False, 'from bootstrap.lib.logger import Logger\n'), ((6861, 6869), 'bootstrap.lib.logger.Logger', 'Logger', ([], {}), '()\n', (6867, 6869), False, 'from bootstrap.lib.logger import Logger\n'), ((6924, 6932), 'bootstrap.lib.logger.Logger', 'Logger', ([], {}), '()\n', (6930, 6932), False, 'from bootstrap.lib.logger import Logger\n'), ((8329, 8359), 'torch.Tensor', 'torch.Tensor', (["annot['prev_xy']"], {}), "(annot['prev_xy'])\n", (8341, 8359), False, 'import torch\n'), ((8407, 8439), 'torch.Tensor', 'torch.Tensor', (["annot['r_prev_xy']"], {}), "(annot['r_prev_xy'])\n", (8419, 8439), False, 'import torch\n'), ((8942, 8972), 'torch.Tensor', 'torch.Tensor', (["annot['next_xy']"], {}), "(annot['next_xy'])\n", (8954, 8972), False, 'import torch\n'), ((9006, 9038), 'torch.Tensor', 'torch.Tensor', (["annot['r_next_xy']"], {}), "(annot['r_next_xy'])\n", (9018, 9038), False, 'import torch\n'), ((7216, 7238), 'json.dumps', 'json.dumps', (['class_freq'], {}), '(class_freq)\n', (7226, 7238), False, 'import json\n'), ((8166, 8194), 'torch.Tensor', 'torch.Tensor', (['self.length', '(2)'], {}), '(self.length, 2)\n', (8178, 8194), False, 'import torch\n'), ((8227, 8255), 'torch.Tensor', 'torch.Tensor', (['self.length', '(2)'], {}), '(self.length, 2)\n', (8239, 8255), False, 'import torch\n'), ((8793, 8821), 'torch.Tensor', 'torch.Tensor', (['self.length', '(2)'], {}), '(self.length, 2)\n', (8805, 8821), False, 'import torch\n'), ((8854, 8882), 'torch.Tensor', 'torch.Tensor', (['self.length', '(2)'], {}), '(self.length, 2)\n', (8866, 8882), False, 'import torch\n'), ((10441, 10472), 'torch.LongTensor', 'torch.LongTensor', (['self.win_size'], {}), '(self.win_size)\n', (10457, 10472), False, 'import torch\n'), ((11023, 11075), 'torch.Tensor', 'torch.Tensor', (['self.win_size', '(3)', 'self.im_h', 'self.im_w'], {}), '(self.win_size, 3, self.im_h, self.im_w)\n', (11035, 11075), False, 'import torch\n'), ((11352, 11395), 'torch.Tensor', 'torch.Tensor', (['self.win_size', 'self.length', '(2)'], {}), '(self.win_size, self.length, 2)\n', (11364, 11395), False, 'import torch\n'), ((11444, 11487), 'torch.Tensor', 'torch.Tensor', (['self.win_size', 'self.length', '(2)'], {}), '(self.win_size, self.length, 2)\n', (11456, 11487), False, 'import torch\n'), ((11538, 11581), 'torch.Tensor', 'torch.Tensor', (['self.win_size', 'self.length', '(2)'], {}), '(self.win_size, self.length, 2)\n', (11550, 11581), False, 'import torch\n'), ((11632, 11675), 'torch.Tensor', 'torch.Tensor', (['self.win_size', 'self.length', '(2)'], {}), '(self.win_size, self.length, 2)\n', (11644, 11675), False, 'import torch\n'), ((11727, 11760), 'torch.Tensor', 'torch.Tensor', (['self.win_size', '(5)', '(2)'], {}), '(self.win_size, 5, 2)\n', (11739, 11760), False, 'import torch\n'), ((11810, 11841), 'torch.LongTensor', 'torch.LongTensor', (['self.win_size'], {}), '(self.win_size)\n', (11826, 11841), False, 'import torch\n')] |
# https://www.hackerrank.com/challenges/one-week-preparation-kit-jesse-and-cookies/problem
#!/bin/python3
import math
import os
import random
import re
import sys
import heapq
#
# Complete the 'cookies' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY A
#
def cookies(k, A, z=0):
heapq.heapify(A)
while True:
a = heapq.heappop(A)
if(a>=k): return z
if(len(A)==0): return -1
b = heapq.heappop(A)
heapq.heappush(A,(a+2*b))
z+=1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
A = list(map(int, input().rstrip().split()))
result = cookies(k, A)
fptr.write(str(result) + '\n')
fptr.close() | [
"heapq.heappush",
"heapq.heappop",
"heapq.heapify"
] | [((387, 403), 'heapq.heapify', 'heapq.heapify', (['A'], {}), '(A)\n', (400, 403), False, 'import heapq\n'), ((432, 448), 'heapq.heappop', 'heapq.heappop', (['A'], {}), '(A)\n', (445, 448), False, 'import heapq\n'), ((521, 537), 'heapq.heappop', 'heapq.heappop', (['A'], {}), '(A)\n', (534, 537), False, 'import heapq\n'), ((546, 574), 'heapq.heappush', 'heapq.heappush', (['A', '(a + 2 * b)'], {}), '(A, a + 2 * b)\n', (560, 574), False, 'import heapq\n')] |
import bpy
import glob
from bpy.types import Panel, Operator
from bpy.app.handlers import persistent
import os
import threading
from queue import Queue
from pathlib import Path
from . mix_ops import *
from . matgan_ops import *
from . neural_ops import *
cache_path = os.path.join(Path(__file__).parent.resolve(), '.cache')
# Redraw all function
def redraw_all(context):
for area in context.screen.areas:
if area.type in ['NODE_EDITOR']:
area.tag_redraw()
# Thread function for reading output
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line.decode('utf-8').strip())
out.close()
@persistent
def on_addon_save(dummy):
for mat in bpy.data.materials:
if "matgan" in mat.name:
match = re.match(".+?(?=_matgan_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["MaterialGAN_Path"], 'out')
update_matgan(obj, dir)
elif "neural" in mat.name:
match = re.match(".+?(?=_neural_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["Neural_Path"], 'out')
update_neural(obj, dir)
elif "mix" in mat.name:
match = re.match(".+?(?=_mix_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["Algorithmic_Path"], 'out')
update_mix(obj, dir)
@persistent
def on_addon_load(dummy):
MAT_OT_MATGAN_GetInterpolations._popen = None
MAT_OT_MATGAN_Generator._popen = None
MAT_OT_MATGAN_InputFromFlashImage._popen = None
MAT_OT_MATGAN_SuperResolution._popen = None
blender_path = os.path.join(Path(__file__).parent.resolve(), 'final.blend')
with bpy.data.libraries.load(blender_path, link=False) as (data_from, data_to):
data_to.materials = [mat for mat in data_from.materials]
group_list = ['photo_to_pbr', 'Aluminium', 'Wood', 'Plastic', 'Plaster', 'Leather', 'Silk', 'Concrete', 'Marble']
data_to.node_groups = [n for n in data_from.node_groups if n in group_list]
if not os.path.exists(cache_path):
os.makedirs(cache_path)
else:
for root, dirs, files in os.walk(cache_path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# Load mix images
names = ['Aluminium', 'Wood', 'Plastic', 'Plaster', 'Leather', 'Silk', 'Concrete', 'Marble']
for i in names:
img = bpy.data.images.load(os.path.join(Path(__file__).parent.resolve(), f'algorithmic/{i}.png'))
img.name = i
img.preview_ensure()
def update_active_mat(self, context):
active_obj = bpy.context.active_object
if active_obj:
if context.scene.SelectWorkflow == 'MatGAN':
base_name = "matgan_mat"
elif context.scene.SelectWorkflow == 'NeuralMAT':
base_name = "neural_mat"
elif context.scene.SelectWorkflow == 'MixMAT':
base_name = "mix_mat"
name = f"{active_obj.name}_{base_name}"
if name not in bpy.data.materials:
mat = bpy.data.materials[base_name].copy()
mat.name = name
else:
mat = bpy.data.materials[name]
active_obj.active_material = mat
if context.scene.SelectWorkflow == 'MatGAN' and 'MaterialGAN_Path' in active_obj:
bpy.context.scene.matgan_properties.directory = active_obj['MaterialGAN_Path']
elif context.scene.SelectWorkflow == 'NeuralMAT' and 'Neural_Path' in active_obj:
bpy.context.scene.neural_properties.directory = active_obj['Neural_Path']
elif context.scene.SelectWorkflow == 'MixMAT' and 'Algorithmic_Path' in active_obj:
bpy.context.scene.mixmat_properties.directory = active_obj['Algorithmic_Path']
# Copy files to .cache folder
def copy_to_cache(src_path, name):
dst_path = os.path.join(cache_path, name)
if not os.path.exists(dst_path):
os.makedirs(dst_path)
if os.path.isdir(src_path):
for file in os.listdir(os.fsencode(src_path)):
f = os.fsdecode(file)
if f.endswith(".png") or f.endswith(".pt") or f.endswith('.ckpt'):
shutil.copyfile(os.path.join(src_path, f), os.path.join(dst_path, f))
def register():
if on_addon_load not in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.append(on_addon_load)
if on_addon_save not in bpy.app.handlers.save_pre:
bpy.app.handlers.save_pre.append(on_addon_save)
bpy.types.Scene.SelectWorkflow = bpy.props.EnumProperty(
name='Material System Select',
description='Selected Material System for editing and generation.',
items={
('MatGAN', 'MaterialGAN + LIIF', 'Using MaterialGAN for generation and LIIF model for upscaling. ' \
+ 'Editing implemented as vector space exploration.'),
('NeuralMAT', 'Neural Material', 'Using Neural Material model for generatiog. ' \
+ 'Editing implemented as material interpolations.'),
('MixMAT', 'Algorithmic generation', 'Using a Blender shader nodes approach for ' \
+ 'generating textures from albedo with mix blender shader nodes for editing.')
},
default='MatGAN',
update=update_active_mat
)
def unregister():
if on_addon_load in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.remove(on_addon_load)
if on_addon_save in bpy.app.handlers.save_pre:
bpy.app.handlers.save_pre.remove(on_addon_save)
class MAT_PT_GeneratorPanel(Panel):
bl_space_type = "NODE_EDITOR"
bl_region_type = "UI"
bl_label = "Modifier operations"
bl_category = "MaterialGenerator Util"
thumb_scale = 8.0
check_existing = False
mix_preview = None
def draw_matgan(self, context):
layout = self.layout
matgan = bpy.context.scene.matgan_properties
# ================================================
# Draw MaterialGAN props and operators
# ================================================
row = layout.row()
row.prop(matgan, "progress", emboss=False, text="Status")
row = layout.row()
col = row.column()
col.prop(matgan, "num_rend", text="Num of images")
col = row.column()
col.prop(matgan, "epochs", text="Epochs")
row = layout.row()
row.prop(matgan, "directory", text="Directory")
row.operator("matgan.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
col = row.column()
col.operator("matgan.input_from_images", text="Format flash images")
row = layout.row()
col = row.column()
col.operator("matgan.mat_from_images", text="Generate Material")
col = row.column()
col.operator("matgan.stop_generator", text="", icon="PAUSE")
layout.separator()
# ================================================
# Draw Upscale LIIF
# ================================================
row = layout.row()
col = row.column()
col.prop(matgan, "h_res", text="Height resolution")
col = row.column()
col.prop(matgan, "w_res", text="Width resolution")
row = layout.row()
row.operator("matgan.super_res", text="Upscale material")
layout.separator()
row = layout.row()
row.operator("matgan.get_interpolations", text="Get interpolations")
layout.separator()
# ================================================
# Draw Gallery view
# ================================================
if MAT_OT_MATGAN_GetInterpolations._popen is None and MAT_OT_MATGAN_Generator._popen is None:
row = layout.row()
row.operator("matgan.revert_material", text="Revert material to previous")
self.draw_gallery(context, matgan, "matgan")
def draw_gallery(self, context, gan, mode):
x = MAT_OT_GalleryDirection.direction
interp_dir = os.path.join(gan.directory, 'interps')
out_dir = os.path.join(gan.directory, 'out')
rname = f"{bpy.context.active_object.name}_{mode}" if bpy.context.active_object else mode
if f'7_{x}_render.png' in bpy.data.images and f"{rname}_render.png" in bpy.data.images:
layout = self.layout
row = layout.row()
sign = '+' if MAT_OT_GalleryDirection.direction == 1 else '-'
row.operator("wm.edit_direction_toggle", text="Toggle direction")
box = layout.box()
cols = box.column_flow(columns=3)
# Get images
dir_list = sorted(glob.glob(interp_dir + f'/*_{x}_render.png'))
id = 0
for dir in dir_list:
if id == 4:
in_box = cols.box()
col = in_box.column()
img = bpy.data.images[f'{rname}_render.png']
img.preview_ensure()
col.template_icon(icon_value=img.preview.icon_id, scale=10)
col.label(text="Current material")
name = os.path.split(dir)[1]
img = bpy.data.images[name]
img.preview_ensure()
in_box = cols.box()
col = in_box.column()
col.template_icon(icon_value=img.preview.icon_id, scale=10)
operator = col.operator(f'{mode}.edit_move', text=f"Semantic {sign}{name[0]}")
operator.direction = name[0]
id += 1
def draw_neural(self, context):
layout = self.layout
neural = bpy.context.scene.neural_properties
# ================================================
# Draw NeuralMaterial props and operators
# ================================================
row = layout.row()
row.prop(neural, "progress", emboss=False, text="Status")
row = layout.row()
col = row.column()
col.prop(neural, "num_rend", text="Images")
col = row.column()
col.prop(neural, "epochs", text="Epochs")
col = row.column()
col.prop(neural, "seed", text="Seed")
row = layout.row()
col = row.column()
col.prop(neural, "h_res", text="Height resolution")
col = row.column()
col.prop(neural, "w_res", text="Width resolution")
row = layout.row()
row.prop(neural, "directory", text="Directory")
row.operator("neural.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
col = row.column()
col.operator("neural.generator", text="Generate Material")
col = row.column()
col.operator("neural.stop_generator", text="", icon="PAUSE")
row = layout.row()
col = row.column()
col.operator("neural.reseed", text="Upscale Material")
layout.separator()
# ================================================
# Draw NeuralMaterial interpolations operator
# ================================================
row = layout.row()
row.operator("neural.get_interpolations", text="Get interpolations")
layout.separator()
# ================================================
# Draw Gallery view
# ================================================
if MAT_OT_NEURAL_GetInterpolations._popen is None and MAT_OT_NEURAL_Generator._popen is None:
row = layout.row()
row.operator("neural.revert_material", text="Revert material to previous")
self.draw_gallery(context, neural, "neural")
def draw_mixmat(self, context):
layout = self.layout
mix = bpy.context.scene.mixmat_properties
# ================================================
# Draw Mix Materials generator operator
# ================================================
row = layout.row()
row.prop(mix, "progress", emboss=False, text="Status")
row = layout.row()
row.prop(mix, "directory", text="Directory")
row.operator("mixmat.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
row.operator("mixmat.generator", text="Generate")
layout.separator()
# ================================================
# Draw Mix material interpolations operator
# ================================================
row = layout.row()
row.prop(mix, "material", text="Select")
if 'Material' in mix.progress:
row.prop(mix, "value", text="Mix level")
layout.separator()
row = layout.row()
img = bpy.data.images[mix.material]
row.template_icon(icon_value=img.preview.icon_id, scale=10)
def draw(self, context):
self.layout.prop(context.scene, 'SelectWorkflow')
if context.scene.SelectWorkflow == 'MatGAN':
self.draw_matgan(context)
elif context.scene.SelectWorkflow == 'NeuralMAT':
self.draw_neural(context)
elif context.scene.SelectWorkflow == 'MixMAT':
self.draw_mixmat(context)
class MAT_OT_StatusUpdater(Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.modal_status_updater"
bl_label = "Modal Status Updater"
_sTime = 0
_timer = None
_thread = None
_q = Queue()
def modal(self, context, event):
gan = bpy.context.scene.matgan_properties
if event.type == 'TIMER':
if MAT_OT_MATGAN_Generator._popen:
if MAT_OT_MATGAN_Generator._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
update_matgan(bpy.context.active_object, os.path.join(gan.directory, 'out'))
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_matgan(bpy.context.active_object, os.path.join(cache_path, name))
gan.progress = "Material generated."
redraw_all(context)
MAT_OT_MATGAN_Generator._popen = None
self.cancel(context)
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
return {'CANCELLED'}
elif MAT_OT_MATGAN_InputFromFlashImage._popen:
if MAT_OT_MATGAN_InputFromFlashImage._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
gan.progress = "Input ready."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_MATGAN_InputFromFlashImage._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_MATGAN_SuperResolution._popen:
if MAT_OT_MATGAN_SuperResolution._popen.poll() is not None:
gan.progress = "Material upscaled."
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_matgan(bpy.context.active_object, os.path.join(cache_path, name))
redraw_all(context)
MAT_OT_MATGAN_SuperResolution._popen = None
self._thread = None
self.cancel(context)
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
return {'CANCELLED'}
elif MAT_OT_MATGAN_GetInterpolations._popen:
if MAT_OT_MATGAN_GetInterpolations._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
check_remove_img(f'{name}_render.png')
img = bpy.data.images.load(os.path.join(gan.directory, 'out') + '/render.png')
img.name = f'{name}_render.png'
interp_path = os.path.join(gan.directory, 'interps')
dir_list = sorted(glob.glob(interp_path + '/*_*_render.png'))
for dir in dir_list:
check_remove_img(os.path.split(dir)[1])
img = bpy.data.images.load(dir)
img.name = os.path.split(dir)[1]
gan.progress = "Material interpolations generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_MATGAN_GetInterpolations._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_NEURAL_Generator._popen:
gan = bpy.context.scene.neural_properties
if MAT_OT_NEURAL_Generator._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
update_neural(bpy.context.active_object, os.path.join(gan.directory, 'out'))
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_neural" if bpy.context.active_object else "neural"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_neural(bpy.context.active_object, os.path.join(cache_path, name))
gan.progress = "Material generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_NEURAL_Generator._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_NEURAL_GetInterpolations._popen:
gan = bpy.context.scene.neural_properties
if MAT_OT_NEURAL_GetInterpolations._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_neural" if bpy.context.active_object else "neural"
check_remove_img(f'{name}_render.png')
img = bpy.data.images.load(os.path.join(gan.directory, 'out') + '/render.png')
img.name = f'{name}_render.png'
interp_path = os.path.join(gan.directory, 'interps')
dir_list = sorted(glob.glob(interp_path + '/*_*_render.png'))
for dir in dir_list:
check_remove_img(os.path.split(dir)[1])
img = bpy.data.images.load(dir)
img.name = os.path.split(dir)[1]
gan.progress = "Material interpolations generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_neural(bpy.context.active_object, os.path.join(cache_path, name))
redraw_all(context)
MAT_OT_NEURAL_GetInterpolations._popen = None
self.cancel(context)
return {'CANCELLED'}
else:
self.cancel(context)
return {'CANCELLED'}
return {'PASS_THROUGH'}
def execute(self, context):
self._sTime = time.time()
wm = context.window_manager
self._timer = wm.event_timer_add(0.1, window=context.window)
wm.modal_handler_add(self)
if MAT_OT_MATGAN_Generator._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_Generator._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_InputFromFlashImage._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_InputFromFlashImage._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_GetInterpolations._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_GetInterpolations._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_SuperResolution._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_SuperResolution._popen.stdout, self._q), daemon=True)
elif MAT_OT_NEURAL_Generator._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_NEURAL_Generator._popen.stdout, self._q), daemon=True)
elif MAT_OT_NEURAL_GetInterpolations._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_NEURAL_GetInterpolations._popen.stdout, self._q), daemon=True)
self._thread.start()
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
class MAT_OT_GalleryDirection(Operator):
"""Operator which switches gallery edit direction"""
bl_idname = "wm.edit_direction_toggle"
bl_label = "Direction switch operator"
direction = 1
def execute(self, context):
if MAT_OT_GalleryDirection.direction == 1:
MAT_OT_GalleryDirection.direction = 2
bpy.context.scene.matgan_properties.direction = MAT_OT_GalleryDirection.direction = 2
bpy.context.scene.neural_properties.direction = MAT_OT_GalleryDirection.direction = 2
else:
MAT_OT_GalleryDirection.direction = 1
bpy.context.scene.matgan_properties.direction = MAT_OT_GalleryDirection.direction = 1
bpy.context.scene.neural_properties.direction = MAT_OT_GalleryDirection.direction = 1
return {'FINISHED'} | [
"bpy.app.handlers.save_pre.append",
"bpy.data.libraries.load",
"os.fsencode",
"bpy.app.handlers.load_post.append",
"os.fsdecode",
"bpy.data.images.load",
"os.walk",
"os.path.exists",
"pathlib.Path",
"os.path.split",
"os.path.isdir",
"bpy.app.handlers.save_pre.remove",
"bpy.app.handlers.load_... | [((4321, 4351), 'os.path.join', 'os.path.join', (['cache_path', 'name'], {}), '(cache_path, name)\n', (4333, 4351), False, 'import os\n'), ((4426, 4449), 'os.path.isdir', 'os.path.isdir', (['src_path'], {}), '(src_path)\n', (4439, 4449), False, 'import os\n'), ((4986, 5650), 'bpy.props.EnumProperty', 'bpy.props.EnumProperty', ([], {'name': '"""Material System Select"""', 'description': '"""Selected Material System for editing and generation."""', 'items': "{('MatGAN', 'MaterialGAN + LIIF', \n 'Using MaterialGAN for generation and LIIF model for upscaling. ' +\n 'Editing implemented as vector space exploration.'), ('NeuralMAT',\n 'Neural Material', 'Using Neural Material model for generatiog. ' +\n 'Editing implemented as material interpolations.'), ('MixMAT',\n 'Algorithmic generation', 'Using a Blender shader nodes approach for ' +\n 'generating textures from albedo with mix blender shader nodes for editing.'\n )}", 'default': '"""MatGAN"""', 'update': 'update_active_mat'}), "(name='Material System Select', description=\n 'Selected Material System for editing and generation.', items={(\n 'MatGAN', 'MaterialGAN + LIIF', \n 'Using MaterialGAN for generation and LIIF model for upscaling. ' +\n 'Editing implemented as vector space exploration.'), ('NeuralMAT',\n 'Neural Material', 'Using Neural Material model for generatiog. ' +\n 'Editing implemented as material interpolations.'), ('MixMAT',\n 'Algorithmic generation', 'Using a Blender shader nodes approach for ' +\n 'generating textures from albedo with mix blender shader nodes for editing.'\n )}, default='MatGAN', update=update_active_mat)\n", (5008, 5650), False, 'import bpy\n'), ((13983, 13990), 'queue.Queue', 'Queue', ([], {}), '()\n', (13988, 13990), False, 'from queue import Queue\n'), ((2102, 2151), 'bpy.data.libraries.load', 'bpy.data.libraries.load', (['blender_path'], {'link': '(False)'}), '(blender_path, link=False)\n', (2125, 2151), False, 'import bpy\n'), ((2462, 2488), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (2476, 2488), False, 'import os\n'), ((2498, 2521), 'os.makedirs', 'os.makedirs', (['cache_path'], {}), '(cache_path)\n', (2509, 2521), False, 'import os\n'), ((2565, 2584), 'os.walk', 'os.walk', (['cache_path'], {}), '(cache_path)\n', (2572, 2584), False, 'import os\n'), ((4363, 4387), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (4377, 4387), False, 'import os\n'), ((4397, 4418), 'os.makedirs', 'os.makedirs', (['dst_path'], {}), '(dst_path)\n', (4408, 4418), False, 'import os\n'), ((4787, 4835), 'bpy.app.handlers.load_post.append', 'bpy.app.handlers.load_post.append', (['on_addon_load'], {}), '(on_addon_load)\n', (4820, 4835), False, 'import bpy\n'), ((4900, 4947), 'bpy.app.handlers.save_pre.append', 'bpy.app.handlers.save_pre.append', (['on_addon_save'], {}), '(on_addon_save)\n', (4932, 4947), False, 'import bpy\n'), ((5838, 5886), 'bpy.app.handlers.load_post.remove', 'bpy.app.handlers.load_post.remove', (['on_addon_load'], {}), '(on_addon_load)\n', (5871, 5886), False, 'import bpy\n'), ((5946, 5993), 'bpy.app.handlers.save_pre.remove', 'bpy.app.handlers.save_pre.remove', (['on_addon_save'], {}), '(on_addon_save)\n', (5978, 5993), False, 'import bpy\n'), ((8550, 8588), 'os.path.join', 'os.path.join', (['gan.directory', '"""interps"""'], {}), "(gan.directory, 'interps')\n", (8562, 8588), False, 'import os\n'), ((8608, 8642), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (8620, 8642), False, 'import os\n'), ((4482, 4503), 'os.fsencode', 'os.fsencode', (['src_path'], {}), '(src_path)\n', (4493, 4503), False, 'import os\n'), ((4522, 4539), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (4533, 4539), False, 'import os\n'), ((21921, 22033), 'threading.Thread', 'threading.Thread', ([], {'target': 'enqueue_output', 'args': '(MAT_OT_MATGAN_Generator._popen.stdout, self._q)', 'daemon': '(True)'}), '(target=enqueue_output, args=(MAT_OT_MATGAN_Generator.\n _popen.stdout, self._q), daemon=True)\n', (21937, 22033), False, 'import threading\n'), ((283, 297), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (287, 297), False, 'from pathlib import Path\n'), ((992, 1036), 'os.path.join', 'os.path.join', (["obj['MaterialGAN_Path']", '"""out"""'], {}), "(obj['MaterialGAN_Path'], 'out')\n", (1004, 1036), False, 'import os\n'), ((9188, 9232), 'glob.glob', 'glob.glob', (["(interp_dir + f'/*_{x}_render.png')"], {}), "(interp_dir + f'/*_{x}_render.png')\n", (9197, 9232), False, 'import glob\n'), ((22111, 22233), 'threading.Thread', 'threading.Thread', ([], {'target': 'enqueue_output', 'args': '(MAT_OT_MATGAN_InputFromFlashImage._popen.stdout, self._q)', 'daemon': '(True)'}), '(target=enqueue_output, args=(\n MAT_OT_MATGAN_InputFromFlashImage._popen.stdout, self._q), daemon=True)\n', (22127, 22233), False, 'import threading\n'), ((1351, 1390), 'os.path.join', 'os.path.join', (["obj['Neural_Path']", '"""out"""'], {}), "(obj['Neural_Path'], 'out')\n", (1363, 1390), False, 'import os\n'), ((2045, 2059), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2049, 2059), False, 'from pathlib import Path\n'), ((2640, 2661), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (2652, 2661), False, 'import os\n'), ((2720, 2741), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (2732, 2741), False, 'import os\n'), ((4652, 4677), 'os.path.join', 'os.path.join', (['src_path', 'f'], {}), '(src_path, f)\n', (4664, 4677), False, 'import os\n'), ((4679, 4704), 'os.path.join', 'os.path.join', (['dst_path', 'f'], {}), '(dst_path, f)\n', (4691, 4704), False, 'import os\n'), ((9661, 9679), 'os.path.split', 'os.path.split', (['dir'], {}), '(dir)\n', (9674, 9679), False, 'import os\n'), ((22309, 22429), 'threading.Thread', 'threading.Thread', ([], {'target': 'enqueue_output', 'args': '(MAT_OT_MATGAN_GetInterpolations._popen.stdout, self._q)', 'daemon': '(True)'}), '(target=enqueue_output, args=(\n MAT_OT_MATGAN_GetInterpolations._popen.stdout, self._q), daemon=True)\n', (22325, 22429), False, 'import threading\n'), ((1699, 1743), 'os.path.join', 'os.path.join', (["obj['Algorithmic_Path']", '"""out"""'], {}), "(obj['Algorithmic_Path'], 'out')\n", (1711, 1743), False, 'import os\n'), ((14867, 14901), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (14879, 14901), False, 'import os\n'), ((14970, 15000), 'os.path.join', 'os.path.join', (['cache_path', 'name'], {}), '(cache_path, name)\n', (14982, 15000), False, 'import os\n'), ((22503, 22621), 'threading.Thread', 'threading.Thread', ([], {'target': 'enqueue_output', 'args': '(MAT_OT_MATGAN_SuperResolution._popen.stdout, self._q)', 'daemon': '(True)'}), '(target=enqueue_output, args=(MAT_OT_MATGAN_SuperResolution\n ._popen.stdout, self._q), daemon=True)\n', (22519, 22621), False, 'import threading\n'), ((2931, 2945), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2935, 2945), False, 'from pathlib import Path\n'), ((14429, 14463), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (14441, 14463), False, 'import os\n'), ((22689, 22801), 'threading.Thread', 'threading.Thread', ([], {'target': 'enqueue_output', 'args': '(MAT_OT_NEURAL_Generator._popen.stdout, self._q)', 'daemon': '(True)'}), '(target=enqueue_output, args=(MAT_OT_NEURAL_Generator.\n _popen.stdout, self._q), daemon=True)\n', (22705, 22801), False, 'import threading\n'), ((16498, 16532), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (16510, 16532), False, 'import os\n'), ((16601, 16631), 'os.path.join', 'os.path.join', (['cache_path', 'name'], {}), '(cache_path, name)\n', (16613, 16631), False, 'import os\n'), ((17813, 17851), 'os.path.join', 'os.path.join', (['gan.directory', '"""interps"""'], {}), "(gan.directory, 'interps')\n", (17825, 17851), False, 'import os\n'), ((22877, 22997), 'threading.Thread', 'threading.Thread', ([], {'target': 'enqueue_output', 'args': '(MAT_OT_NEURAL_GetInterpolations._popen.stdout, self._q)', 'daemon': '(True)'}), '(target=enqueue_output, args=(\n MAT_OT_NEURAL_GetInterpolations._popen.stdout, self._q), daemon=True)\n', (22893, 22997), False, 'import threading\n'), ((17890, 17932), 'glob.glob', 'glob.glob', (["(interp_path + '/*_*_render.png')"], {}), "(interp_path + '/*_*_render.png')\n", (17899, 17932), False, 'import glob\n'), ((18069, 18094), 'bpy.data.images.load', 'bpy.data.images.load', (['dir'], {}), '(dir)\n', (18089, 18094), False, 'import bpy\n'), ((17674, 17708), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (17686, 17708), False, 'import os\n'), ((18130, 18148), 'os.path.split', 'os.path.split', (['dir'], {}), '(dir)\n', (18143, 18148), False, 'import os\n'), ((19286, 19320), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (19298, 19320), False, 'import os\n'), ((19389, 19419), 'os.path.join', 'os.path.join', (['cache_path', 'name'], {}), '(cache_path, name)\n', (19401, 19419), False, 'import os\n'), ((20658, 20696), 'os.path.join', 'os.path.join', (['gan.directory', '"""interps"""'], {}), "(gan.directory, 'interps')\n", (20670, 20696), False, 'import os\n'), ((18016, 18034), 'os.path.split', 'os.path.split', (['dir'], {}), '(dir)\n', (18029, 18034), False, 'import os\n'), ((18848, 18882), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (18860, 18882), False, 'import os\n'), ((20735, 20777), 'glob.glob', 'glob.glob', (["(interp_path + '/*_*_render.png')"], {}), "(interp_path + '/*_*_render.png')\n", (20744, 20777), False, 'import glob\n'), ((20914, 20939), 'bpy.data.images.load', 'bpy.data.images.load', (['dir'], {}), '(dir)\n', (20934, 20939), False, 'import bpy\n'), ((21187, 21221), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (21199, 21221), False, 'import os\n'), ((21290, 21320), 'os.path.join', 'os.path.join', (['cache_path', 'name'], {}), '(cache_path, name)\n', (21302, 21320), False, 'import os\n'), ((20519, 20553), 'os.path.join', 'os.path.join', (['gan.directory', '"""out"""'], {}), "(gan.directory, 'out')\n", (20531, 20553), False, 'import os\n'), ((20975, 20993), 'os.path.split', 'os.path.split', (['dir'], {}), '(dir)\n', (20988, 20993), False, 'import os\n'), ((20861, 20879), 'os.path.split', 'os.path.split', (['dir'], {}), '(dir)\n', (20874, 20879), False, 'import os\n')] |
import re
def part1(lines, yourbag="shiny gold"):
# A nice little regex that will extract a list of all bags in a given line.
# The first is the outermost bag, and the rest are inner bags.
pattern = re.compile(r"(?:\d*)\s*(.*?)\s*bags?[.,]?(?: contain)?\s*")
# We're going to use an adjacency list mapping each bag type to the bag
# types that can contain it.
contained_by = dict()
for line in lines:
outer, *innards = pattern.findall(line)
for inner in innards:
if inner != 'no other':
if inner in contained_by:
contained_by[inner].append(outer)
else:
contained_by[inner] = [outer]
# We're going to start at our bag type. Ask which bag types can contain it,
# add those to as stack, and then add our bag type to the set of all
# "working" outer bag types. Then pop the top bag type of the stack and
# repeat the above process. This continues until the stack is empty.
#
# The answer is then the number of bags in our set (less 1 for our inital
# bag).
#
# This is an alternative to using recursion. Really, though, it's just
# doing the recursion manually. The pushing and the popping off of the
# stack is done for you when you use recursion... you just can't see the
# stack... it's maintained internally. For more information google "call
# stack".
stack = [yourbag]
works = set()
while len(stack) != 0:
bag = stack.pop()
if bag not in works:
if bag in contained_by:
stack.extend(contained_by[bag])
works.add(bag)
return len(works) - 1
def part2(lines, yourbag="shiny gold"):
# This regex is similar to part 1 except it includes the number of times an
# inner bag type must occur.
pattern = re.compile(r"(\d*)\s*(.*?)\s*bags?[.,]?(?: contain)?\s*")
# We'll be keeping an adjacency list mapping each outer bag type to a list
# of the required inner bags and their multiplicies.
must_contain = dict()
for line in lines:
(_, outer), *innards = pattern.findall(line)
for (n, inner) in innards:
if inner != 'no other':
if outer in must_contain:
must_contain[outer].append((inner, int(n)))
else:
must_contain[outer] = [(inner, int(n))]
# I'll leave it to you to work this one out. ;-)
stack = [(yourbag, 1)]
numbags = 0
while len(stack) != 0:
bag, n = stack.pop()
numbags += n
if bag in must_contain:
for innerbag, m in must_contain[bag]:
stack.append((innerbag, n * m))
return numbags - 1
if __name__ == '__main__':
with open("test.txt") as handle:
lines = handle.readlines()
print("Part I: ", part1(lines))
print("Part II:", part2(lines))
| [
"re.compile"
] | [((213, 275), 're.compile', 're.compile', (['"""(?:\\\\d*)\\\\s*(.*?)\\\\s*bags?[.,]?(?: contain)?\\\\s*"""'], {}), "('(?:\\\\d*)\\\\s*(.*?)\\\\s*bags?[.,]?(?: contain)?\\\\s*')\n", (223, 275), False, 'import re\n'), ((1866, 1926), 're.compile', 're.compile', (['"""(\\\\d*)\\\\s*(.*?)\\\\s*bags?[.,]?(?: contain)?\\\\s*"""'], {}), "('(\\\\d*)\\\\s*(.*?)\\\\s*bags?[.,]?(?: contain)?\\\\s*')\n", (1876, 1926), False, 'import re\n')] |
from backbone import entry_point
if __name__ == '__main__':
entry_point.main()
| [
"backbone.entry_point.main"
] | [((65, 83), 'backbone.entry_point.main', 'entry_point.main', ([], {}), '()\n', (81, 83), False, 'from backbone import entry_point\n')] |
import scrapy,json,re,time,os,glob
from scrapy.exceptions import CloseSpider
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
#get all the imdb xpaths from xpaths.json file
with open('./locators/xpaths.json') as f:
xpaths = json.load(f)
imdb = xpaths["imdb"][0]
#define all the required variables
movie_name = ''
project_path = r'/Users/eshwar/Documents/projects/sentiment_analysis_on_movie_reviews/'
scraped_reviews_path = project_path + "data/scraped_reviews/"
predicted_reviews_path = project_path + "data/predicted_reviews/"
chrome_driver_path = project_path+"scrape_reviews/chrome_driver/chromedriver"
class IMDBSpider(scrapy.Spider):
name = 'imdb_spider'
allowed_domains = ["imdb.com"]
start_urls = [
'https://www.imdb.com/find?ref_=nv_sr_fn&q='
]
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url+self.ip+"&s=tt" , dont_filter=True)
def parse(self, response):
#get all the globally defined variables
global movie_name, project_path, scraped_reviews_path, chrome_driver_path
#get first title
first_title = response.xpath(imdb["first_title"]).extract()
#extract title id from first title
for each_split in first_title[0].split("/"):
if each_split.startswith("tt"):
title_id = each_split
#extract movie name from first title
movie_name = str(re.search(r'">(.+?)</a>', str(first_title[0])).group(1)).replace(" ","_")
temp_movie_name = movie_name
#put timestamp
epoch = time.time()
movie_name+="$#$"+str(epoch)
# create temp file to store movie name temporarily
with open(scraped_reviews_path + "temp.txt", 'w') as f:
f.write(movie_name)
#check timestamp
current_dir = os.getcwd()
change_dir = scraped_reviews_path
os.chdir(change_dir)
temp = temp_movie_name+"$#$"
old_file_name = glob.glob(temp+"*")
diff = 0
#flag determines if searched movie is already searched within a week or not
#flag = 0 (file available)
#flag = 1 (new search)
flag = 1
if len(old_file_name) > 0:
old_file_name = old_file_name[0]
old_timestamp = old_file_name.split("$#$")[1][:-5]
diff = epoch - float(old_timestamp)
if diff < 604800:
flag = 0
with open(project_path+"flag.txt", "w") as f:
f.write(str(flag))
raise CloseSpider('file available')
else:
os.remove(scraped_reviews_path+old_file_name)
os.remove(predicted_reviews_path+old_file_name)
os.chdir(current_dir)
#form imdb reviews link
reviews_link = imdb["urv_link_part_1"] + title_id + imdb["urv_link_part_2"]
#get chrome driver executable
options = Options()
options.headless = True
chrome_driver = webdriver.Chrome(chrome_driver_path, chrome_options=options)
#go to reviews link
chrome_driver.get(reviews_link)
#click load more button until the button exists
while True:
try:
WebDriverWait(chrome_driver, 10).until(EC.element_to_be_clickable((By.XPATH, imdb["load_more_button"]))).click()
except TimeoutException as ex:
break
#get the number of reviews
num_of_reviews = chrome_driver.find_element_by_xpath(imdb["number_of_reviews"]).text
reviews_no = num_of_reviews.split()[0]
print(reviews_no)
#open all the spoilers
spoiler_click = chrome_driver.find_elements_by_xpath(imdb["spoiler_open"])
for i in range(0, len(spoiler_click)):
if spoiler_click[i].is_displayed():
spoiler_click[i].click()
#get all the reviews
reviews = chrome_driver.find_elements_by_xpath(imdb["reviews"])
#convert reviews to list
reviews_list = [str(review.text).replace("\n"," ") for review in reviews]
#get all the authors
authors = chrome_driver.find_elements_by_xpath(imdb["authors"])
#convert authors to list
authors_list = [a.text for a in authors]
#get all the review dates
review_dates = chrome_driver.find_elements_by_xpath(imdb["review_dates"])
#convert review dates to list
review_dates_list = [rd.text for rd in review_dates]
#get all the titles
titles = chrome_driver.find_elements_by_xpath(imdb["titles"])
#convert titles to list
titles_list = [str(t.text).replace("\n", " ") for t in titles]
#create json_data variable with authors, review dates, titles and reviews
json_data = [
{
"author" : a,
"review_date" : rd,
"title" : t,
"review" : re
} for a, rd, t, re in zip(authors_list, review_dates_list, titles_list, reviews_list)
]
output_filename = scraped_reviews_path + movie_name + ".json"
with open(output_filename, 'w') as f:
json.dump(json_data, f, ensure_ascii=False, indent=4)
#close the chrome driver
chrome_driver.close()
| [
"scrapy.exceptions.CloseSpider",
"selenium.webdriver.chrome.options.Options",
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.Chrome",
"json.dump",
"selenium.webdriver.support.expected_conditions.element_to_be_clickable",
"os.getcwd",
"os.chdir",
"scrapy.Request",
"json.load",
... | [((486, 498), 'json.load', 'json.load', (['f'], {}), '(f)\n', (495, 498), False, 'import scrapy, json, re, time, os, glob\n'), ((1839, 1850), 'time.time', 'time.time', ([], {}), '()\n', (1848, 1850), False, 'import scrapy, json, re, time, os, glob\n'), ((2092, 2103), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2101, 2103), False, 'import scrapy, json, re, time, os, glob\n'), ((2154, 2174), 'os.chdir', 'os.chdir', (['change_dir'], {}), '(change_dir)\n', (2162, 2174), False, 'import scrapy, json, re, time, os, glob\n'), ((2236, 2257), 'glob.glob', 'glob.glob', (["(temp + '*')"], {}), "(temp + '*')\n", (2245, 2257), False, 'import scrapy, json, re, time, os, glob\n'), ((2991, 3012), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (2999, 3012), False, 'import scrapy, json, re, time, os, glob\n'), ((3187, 3196), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (3194, 3196), False, 'from selenium.webdriver.chrome.options import Options\n'), ((3253, 3313), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['chrome_driver_path'], {'chrome_options': 'options'}), '(chrome_driver_path, chrome_options=options)\n', (3269, 3313), False, 'from selenium import webdriver\n'), ((5428, 5481), 'json.dump', 'json.dump', (['json_data', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(json_data, f, ensure_ascii=False, indent=4)\n', (5437, 5481), False, 'import scrapy, json, re, time, os, glob\n'), ((1127, 1184), 'scrapy.Request', 'scrapy.Request', (["(url + self.ip + '&s=tt')"], {'dont_filter': '(True)'}), "(url + self.ip + '&s=tt', dont_filter=True)\n", (1141, 1184), False, 'import scrapy, json, re, time, os, glob\n'), ((2809, 2838), 'scrapy.exceptions.CloseSpider', 'CloseSpider', (['"""file available"""'], {}), "('file available')\n", (2820, 2838), False, 'from scrapy.exceptions import CloseSpider\n'), ((2873, 2920), 'os.remove', 'os.remove', (['(scraped_reviews_path + old_file_name)'], {}), '(scraped_reviews_path + old_file_name)\n', (2882, 2920), False, 'import scrapy, json, re, time, os, glob\n'), ((2935, 2984), 'os.remove', 'os.remove', (['(predicted_reviews_path + old_file_name)'], {}), '(predicted_reviews_path + old_file_name)\n', (2944, 2984), False, 'import scrapy, json, re, time, os, glob\n'), ((3532, 3596), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (["(By.XPATH, imdb['load_more_button'])"], {}), "((By.XPATH, imdb['load_more_button']))\n", (3558, 3596), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((3493, 3525), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['chrome_driver', '(10)'], {}), '(chrome_driver, 10)\n', (3506, 3525), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')] |
from django.contrib import admin
from notifications.models import Notification
@admin.register(Notification)
class NotificationAdmin(admin.ModelAdmin):
list_display = (
'sender',
'recipient',
'creation_time',
'verb',
'unread',
)
list_filter = (
'sender',
'recipient',
'unread',
'verb',
)
search_fields = (
'verb',
)
| [
"django.contrib.admin.register"
] | [((83, 111), 'django.contrib.admin.register', 'admin.register', (['Notification'], {}), '(Notification)\n', (97, 111), False, 'from django.contrib import admin\n')] |
# --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df=pd.read_csv(path)
print(df.head())
X=df.drop(columns='insuranceclaim')
y=df['insuranceclaim']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
plt.show()
q_value=X_train['bmi'].quantile(0.95)
print(y_train.value_counts())
# Code ends here
# --------------
import seaborn as sns
# Code starts here
relation=X_train.corr()
print(relation)
sns.pairplot(X_train)
plt.show()
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols=['children','sex','region','smoker']
fig,axes=plt.subplots(2,2)
for i in range(2):
for j in range(2):
col=cols[i*2+j]
sns.countplot(X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr=LogisticRegression(random_state=9)
grid=GridSearchCV(estimator=lr,param_grid=parameters)
grid.fit(X_train,y_train)
y_pred=grid.predict(X_test)
accuracy=accuracy_score(y_test,y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score=roc_auc_score(y_test,y_pred)
y_pred_proba=grid.predict_proba(X_test)[:,1]
fpr,tpr,_=metrics.roc_curve(y_test,y_pred)
roc_auc=roc_auc_score(y_test,y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
| [
"matplotlib.pyplot.boxplot",
"sklearn.model_selection.GridSearchCV",
"sklearn.metrics.accuracy_score",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.subplots",... | [((170, 203), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (193, 203), False, 'import warnings\n'), ((227, 244), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (238, 244), True, 'import pandas as pd\n'), ((351, 404), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(6)'}), '(X, y, test_size=0.2, random_state=6)\n', (367, 404), False, 'from sklearn.model_selection import train_test_split\n'), ((492, 519), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (["X_train['bmi']"], {}), "(X_train['bmi'])\n", (503, 519), True, 'import matplotlib.pyplot as plt\n'), ((520, 530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (528, 530), True, 'import matplotlib.pyplot as plt\n'), ((718, 739), 'seaborn.pairplot', 'sns.pairplot', (['X_train'], {}), '(X_train)\n', (730, 739), True, 'import seaborn as sns\n'), ((740, 750), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (748, 750), True, 'import matplotlib.pyplot as plt\n'), ((912, 930), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (924, 930), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1379), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(9)'}), '(random_state=9)\n', (1363, 1379), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1385, 1434), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'lr', 'param_grid': 'parameters'}), '(estimator=lr, param_grid=parameters)\n', (1397, 1434), False, 'from sklearn.model_selection import GridSearchCV, RandomizedSearchCV\n'), ((1497, 1527), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1511, 1527), False, 'from sklearn.metrics import accuracy_score\n'), ((1675, 1704), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1688, 1704), False, 'from sklearn.metrics import roc_auc_score\n'), ((1760, 1793), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1777, 1793), False, 'from sklearn import metrics\n'), ((1801, 1836), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_proba'], {}), '(y_test, y_pred_proba)\n', (1814, 1836), False, 'from sklearn.metrics import roc_auc_score\n'), ((1004, 1059), 'seaborn.countplot', 'sns.countplot', (['X_train[col]'], {'hue': 'y_train', 'ax': 'axes[i, j]'}), '(X_train[col], hue=y_train, ax=axes[i, j])\n', (1017, 1059), True, 'import seaborn as sns\n')] |
"""Convert a Caffe model file to TensorFlow checkpoint format.
Assume that the network built is a equivalent (or a sub-) to the Caffe
definition.
"""
import tensorflow as tf
from nets import caffe_scope
from nets import nets_factory
slim = tf.contrib.slim
# =========================================================================== #
# Main flags.
# =========================================================================== #
tf.app.flags.DEFINE_string(
'model_name', 'ssd_300_vgg', 'Name of the model to convert.')
tf.app.flags.DEFINE_string(
'num_classes', 21, 'Number of classes in the dataset.')
tf.app.flags.DEFINE_string(
'caffemodel_path', None,
'The path to the Caffe model file to convert.')
FLAGS = tf.app.flags.FLAGS
# =========================================================================== #
# Main converting routine.
# =========================================================================== #
def main(_):
# Caffe scope...
caffemodel = caffe_scope.CaffeScope()
caffemodel.load(FLAGS.caffemodel_path)
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
global_step = slim.create_global_step()
num_classes = int(FLAGS.num_classes)
# Select the network.
ssd_class = nets_factory.get_network(FLAGS.model_name)
ssd_params = ssd_class.default_params._replace(num_classes=num_classes)
ssd_net = ssd_class(ssd_params)
ssd_shape = ssd_net.params.img_shape
# Image placeholder and model.
shape = (1, ssd_shape[0], ssd_shape[1], 3)
img_input = tf.placeholder(shape=shape, dtype=tf.float32)
# Create model.
with slim.arg_scope(ssd_net.arg_scope_caffe(caffemodel)):
ssd_net.net(img_input, is_training=False)
init_op = tf.global_variables_initializer()
with tf.Session() as session:
# Run the init operation.
session.run(init_op)
# Save model in checkpoint.
saver = tf.train.Saver()
ckpt_path = FLAGS.caffemodel_path.replace('.caffemodel', '.ckpt')
saver.save(session, ckpt_path, write_meta_graph=False)
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.Graph",
"nets.nets_factory.get_network",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.logging.set_verbosity",
"tensorflow.train.Saver",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.global_variables_initializer",
"nets.caffe_scope.CaffeScope",
"tensorflow.app.run"
] | [((434, 526), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model_name"""', '"""ssd_300_vgg"""', '"""Name of the model to convert."""'], {}), "('model_name', 'ssd_300_vgg',\n 'Name of the model to convert.')\n", (460, 526), True, 'import tensorflow as tf\n'), ((528, 614), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""num_classes"""', '(21)', '"""Number of classes in the dataset."""'], {}), "('num_classes', 21,\n 'Number of classes in the dataset.')\n", (554, 614), True, 'import tensorflow as tf\n'), ((616, 719), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""caffemodel_path"""', 'None', '"""The path to the Caffe model file to convert."""'], {}), "('caffemodel_path', None,\n 'The path to the Caffe model file to convert.')\n", (642, 719), True, 'import tensorflow as tf\n'), ((993, 1017), 'nets.caffe_scope.CaffeScope', 'caffe_scope.CaffeScope', ([], {}), '()\n', (1015, 1017), False, 'from nets import caffe_scope\n'), ((1066, 1107), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (1090, 1107), True, 'import tensorflow as tf\n'), ((2213, 2225), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2223, 2225), True, 'import tensorflow as tf\n'), ((1286, 1328), 'nets.nets_factory.get_network', 'nets_factory.get_network', (['FLAGS.model_name'], {}), '(FLAGS.model_name)\n', (1310, 1328), False, 'from nets import nets_factory\n'), ((1605, 1650), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': 'shape', 'dtype': 'tf.float32'}), '(shape=shape, dtype=tf.float32)\n', (1619, 1650), True, 'import tensorflow as tf\n'), ((1814, 1847), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1845, 1847), True, 'import tensorflow as tf\n'), ((1861, 1873), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1871, 1873), True, 'import tensorflow as tf\n'), ((2018, 2034), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2032, 2034), True, 'import tensorflow as tf\n'), ((1117, 1127), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1125, 1127), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
from detection.utils.misc import *
class PyramidROIAlign(tf.keras.layers.Layer):
def __init__(self, pool_shape, **kwargs):
'''
Implements ROI Pooling on multiple levels of the feature pyramid.
Attributes
---
pool_shape: (height, width) of the output pooled regions.
Example: (7, 7)
'''
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs, training=True):
'''
Args
---
rois_list: list of [num_rois, (y1, x1, y2, x2)] in normalized coordinates.
feature_map_list: List of [batch, height, width, channels].
feature maps from different levels of the pyramid.
img_metas: [batch_size, 11]
Returns
---
pooled_rois_list: list of [num_rois, pooled_height, pooled_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
'''
rois_list, feature_map_list, img_metas = inputs # [2000 ,4], list:[P2, P3, P4, P5]
pad_shapes = calc_pad_shapes(img_metas)
pad_areas = pad_shapes[:, 0] * pad_shapes[:, 1] # 1216*1216
num_rois_list = [rois.shape.as_list()[0] for rois in rois_list] # data:[2000]
roi_indices = tf.constant(
[i for i in range(len(rois_list)) for _ in range(rois_list[i].shape.as_list()[0])],
dtype=tf.int32
) #[0.....], shape:[2000]
areas = tf.constant(# range(1) range(2000)
[pad_areas[i] for i in range(pad_areas.shape[0]) for _ in range(num_rois_list[i])],
dtype=tf.float32
)#[1216*1216, 1216*1216,...], shape:[2000]
rois = tf.concat(rois_list, axis=0) # [2000, 4]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(rois, 4, axis=1) # 4 of [2000, 1]
h = y2 - y1 # [2000, 1]
w = x2 - x1 # [2000, 1]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
roi_level = tf.math.log( # [2000]
tf.sqrt(tf.squeeze(h * w, 1))
/ tf.cast((224.0 / tf.sqrt(areas * 1.0)), tf.float32)
) / tf.math.log(2.0)
roi_level = tf.minimum(5, tf.maximum( # [2000], clamp to [2-5]
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
# roi_level will indicates which level of feature to use
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled_rois = []
roi_to_level = []
for i, level in enumerate(range(2, 6)): # 2,3,4,5
ix = tf.where(tf.equal(roi_level, level)) # [1999, 1], means 1999 of 2000 select P2
level_rois = tf.gather_nd(rois, ix) # boxes to crop, [1999, 4]
# ROI indices for crop_and_resize.
level_roi_indices = tf.gather_nd(roi_indices, ix) # [19999], data:[0....0]
# Keep track of which roi is mapped to which level
roi_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_rois = tf.stop_gradient(level_rois)
level_roi_indices = tf.stop_gradient(level_roi_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_rois, pool_height, pool_width, channels]
pooled_rois.append(tf.image.crop_and_resize(
feature_map_list[i], level_rois, level_roi_indices, self.pool_shape,
method="bilinear")) # [1, 304, 304, 256], [1999, 4], [1999], [2]=[7,7]=>[1999,7,7,256]
# [1999, 7, 7, 256], [], [], [1,7,7,256] => [2000, 7, 7, 256]
# Pack pooled features into one tensor
pooled_rois = tf.concat(pooled_rois, axis=0)
# Pack roi_to_level mapping into one array and add another
# column representing the order of pooled rois
roi_to_level = tf.concat(roi_to_level, axis=0) # [2000, 1], 1999 of P2, and 1 other P
roi_range = tf.expand_dims(tf.range(tf.shape(roi_to_level)[0]), 1) # [2000, 1], 0~1999
roi_to_level = tf.concat([tf.cast(roi_to_level, tf.int32), roi_range],
axis=1) # [2000, 2], (P, range)
# Rearrange pooled features to match the order of the original rois
# Sort roi_to_level by batch then roi indextf.Tensor([ 0 100001 200002 ... 199801997 199901998 20101999], shape=(2000,), dtype=int32)
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = roi_to_level[:, 0] * 100000 + roi_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape( # k=2000
roi_to_level)[0]).indices[::-1]# reverse the order
ix = tf.gather(roi_to_level[:, 1], ix) # [2000]
pooled_rois = tf.gather(pooled_rois, ix) # [2000, 7, 7, 256]
# 2000 of [7, 7, 256]
pooled_rois_list = tf.split(pooled_rois, num_rois_list, axis=0)
return pooled_rois_list
| [
"tensorflow.round",
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.math.log",
"tensorflow.split",
"tensorflow.concat",
"tensorflow.stop_gradient",
"tensorflow.gather",
"tensorflow.image.crop_and_resize",
"tensorflow.sqrt",
"tensorflow.cast",
"tensorflow.gather_nd",
"tensorflow.squeeze"
... | [((1880, 1908), 'tensorflow.concat', 'tf.concat', (['rois_list'], {'axis': '(0)'}), '(rois_list, axis=0)\n', (1889, 1908), True, 'import tensorflow as tf\n'), ((2030, 2055), 'tensorflow.split', 'tf.split', (['rois', '(4)'], {'axis': '(1)'}), '(rois, 4, axis=1)\n', (2038, 2055), True, 'import tensorflow as tf\n'), ((4428, 4458), 'tensorflow.concat', 'tf.concat', (['pooled_rois'], {'axis': '(0)'}), '(pooled_rois, axis=0)\n', (4437, 4458), True, 'import tensorflow as tf\n'), ((4605, 4636), 'tensorflow.concat', 'tf.concat', (['roi_to_level'], {'axis': '(0)'}), '(roi_to_level, axis=0)\n', (4614, 4636), True, 'import tensorflow as tf\n'), ((5440, 5473), 'tensorflow.gather', 'tf.gather', (['roi_to_level[:, 1]', 'ix'], {}), '(roi_to_level[:, 1], ix)\n', (5449, 5473), True, 'import tensorflow as tf\n'), ((5505, 5531), 'tensorflow.gather', 'tf.gather', (['pooled_rois', 'ix'], {}), '(pooled_rois, ix)\n', (5514, 5531), True, 'import tensorflow as tf\n'), ((5609, 5653), 'tensorflow.split', 'tf.split', (['pooled_rois', 'num_rois_list'], {'axis': '(0)'}), '(pooled_rois, num_rois_list, axis=0)\n', (5617, 5653), True, 'import tensorflow as tf\n'), ((2522, 2538), 'tensorflow.math.log', 'tf.math.log', (['(2.0)'], {}), '(2.0)\n', (2533, 2538), True, 'import tensorflow as tf\n'), ((3046, 3068), 'tensorflow.gather_nd', 'tf.gather_nd', (['rois', 'ix'], {}), '(rois, ix)\n', (3058, 3068), True, 'import tensorflow as tf\n'), ((3176, 3205), 'tensorflow.gather_nd', 'tf.gather_nd', (['roi_indices', 'ix'], {}), '(roi_indices, ix)\n', (3188, 3205), True, 'import tensorflow as tf\n'), ((3414, 3442), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['level_rois'], {}), '(level_rois)\n', (3430, 3442), True, 'import tensorflow as tf\n'), ((3475, 3510), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['level_roi_indices'], {}), '(level_roi_indices)\n', (3491, 3510), True, 'import tensorflow as tf\n'), ((2951, 2977), 'tensorflow.equal', 'tf.equal', (['roi_level', 'level'], {}), '(roi_level, level)\n', (2959, 2977), True, 'import tensorflow as tf\n'), ((4075, 4191), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (['feature_map_list[i]', 'level_rois', 'level_roi_indices', 'self.pool_shape'], {'method': '"""bilinear"""'}), "(feature_map_list[i], level_rois, level_roi_indices,\n self.pool_shape, method='bilinear')\n", (4099, 4191), True, 'import tensorflow as tf\n'), ((4805, 4836), 'tensorflow.cast', 'tf.cast', (['roi_to_level', 'tf.int32'], {}), '(roi_to_level, tf.int32)\n', (4812, 4836), True, 'import tensorflow as tf\n'), ((4720, 4742), 'tensorflow.shape', 'tf.shape', (['roi_to_level'], {}), '(roi_to_level)\n', (4728, 4742), True, 'import tensorflow as tf\n'), ((2402, 2422), 'tensorflow.squeeze', 'tf.squeeze', (['(h * w)', '(1)'], {}), '(h * w, 1)\n', (2412, 2422), True, 'import tensorflow as tf\n'), ((2637, 2656), 'tensorflow.round', 'tf.round', (['roi_level'], {}), '(roi_level)\n', (2645, 2656), True, 'import tensorflow as tf\n'), ((2463, 2483), 'tensorflow.sqrt', 'tf.sqrt', (['(areas * 1.0)'], {}), '(areas * 1.0)\n', (2470, 2483), True, 'import tensorflow as tf\n'), ((5345, 5367), 'tensorflow.shape', 'tf.shape', (['roi_to_level'], {}), '(roi_to_level)\n', (5353, 5367), True, 'import tensorflow as tf\n')] |
""" find the Schwarzschild radius of the Sun in m using pint"""
import pint
class Sun:
""" Class to describe a star based on its mass in terms of solar masses """
def __init__(self, mass):
self.ureg = pint.UnitRegistry()
self.ureg.define("Msolar = 1.98855*10**30 * kilogram")
self.mass = mass * self.ureg.Msolar
def schwarz(self):
""" Find the Schwarzchild radius for the class """
g_newt = self.ureg.newtonian_constant_of_gravitation
msun = self.mass
r_sch = 2 * g_newt * msun / self.ureg.speed_of_light**2
return r_sch.to_base_units()
def schwarz_rad(mass):
""" Given a mass, find the Schwarzschild radius """
star = Sun(mass)
radius = star.schwarz()
return radius
if __name__ == "__main__":
MASS = 1.0
RAD = schwarz_rad(MASS)
print(RAD)
| [
"pint.UnitRegistry"
] | [((219, 238), 'pint.UnitRegistry', 'pint.UnitRegistry', ([], {}), '()\n', (236, 238), False, 'import pint\n')] |
from dsa.parsing.line_parsing import line_parser
from dsa.parsing.token_parsing import make_parser
_parser = line_parser(
'Huffman table entry',
make_parser(
'Huffman table entry data',
('integer', 'encoded bit sequence'),
('hexdump', 'decoded bytes')
)
)
class HuffmanTable:
def __init__(self, decode, encode):
self._decode = decode
self._encode = encode
def _decode_gen(self, stream):
read_byte = stream.read(1)[0]
bit_offset = 0
value = 1
while True:
if value in self._decode:
encoded = self._decode[value]
yield encoded
if encoded[-1] == 0:
return
value = 1 # clear composed value
# append a bit to the composed value
value = (value << 1) | ((read_byte >> bit_offset) & 1)
bit_offset += 1
if bit_offset == 8:
bit_offset = 0
read_byte = stream.read(1)[0]
def decode(self, stream):
return b''.join(self._decode_gen(stream))
class Loader:
def __init__(self):
self._decode = {}
self._encode = {}
def line(self, tokens):
compressed, uncompressed = _parser(tokens)[0]
self._decode[compressed] = uncompressed
self._encode[uncompressed] = compressed
def result(self):
return HuffmanTable(self._decode, self._encode)
| [
"dsa.parsing.token_parsing.make_parser"
] | [((161, 271), 'dsa.parsing.token_parsing.make_parser', 'make_parser', (['"""Huffman table entry data"""', "('integer', 'encoded bit sequence')", "('hexdump', 'decoded bytes')"], {}), "('Huffman table entry data', ('integer', 'encoded bit sequence'),\n ('hexdump', 'decoded bytes'))\n", (172, 271), False, 'from dsa.parsing.token_parsing import make_parser\n')] |
import argparse
import os.path as osp
from glob import glob
import cv2
import pandas as pd
from tqdm import tqdm
from gwd.converters import kaggle2coco
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--image-pattern", default="/data/SPIKE_images/*jpg")
parser.add_argument("--annotation-root", default="/data/SPIKE_annotations")
parser.add_argument("--kaggle_output_path", default="/data/spike.csv")
parser.add_argument("--coco_output_path", default="/data/coco_spike.json")
return parser.parse_args()
def main():
args = parse_args()
img_paths = glob(args.image_pattern)
annotations = []
for img_path in tqdm(img_paths):
ann_path = osp.join(args.annotation_root, (osp.basename(img_path.replace("jpg", "bboxes.tsv"))))
ann = pd.read_csv(ann_path, sep="\t", names=["x_min", "y_min", "x_max", "y_max"])
h, w = cv2.imread(img_path).shape[:2]
ann[["x_min", "x_max"]] = ann[["x_min", "x_max"]].clip(0, w)
ann[["y_min", "y_max"]] = ann[["y_min", "y_max"]].clip(0, h)
ann["height"] = h
ann["width"] = w
ann["bbox_width"] = ann["x_max"] - ann["x_min"]
ann["bbox_height"] = ann["y_max"] - ann["y_min"]
ann = ann[(ann["bbox_width"] > 0) & (ann["bbox_height"] > 0)].copy()
ann["bbox"] = ann[["x_min", "y_min", "bbox_width", "bbox_height"]].values.tolist()
ann["image_id"] = osp.basename(img_path).split(".")[0]
annotations.append(ann)
annotations = pd.concat(annotations)
annotations["source"] = "spike"
print(annotations.head())
annotations[["image_id", "source", "width", "height", "bbox"]].to_csv(args.kaggle_output_path, index=False)
kaggle2coco.main(args.kaggle_output_path, args.coco_output_path)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"tqdm.tqdm",
"os.path.basename",
"gwd.converters.kaggle2coco.main",
"pandas.concat",
"glob.glob",
"cv2.imread"
] | [((187, 212), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (210, 212), False, 'import argparse\n'), ((610, 634), 'glob.glob', 'glob', (['args.image_pattern'], {}), '(args.image_pattern)\n', (614, 634), False, 'from glob import glob\n'), ((676, 691), 'tqdm.tqdm', 'tqdm', (['img_paths'], {}), '(img_paths)\n', (680, 691), False, 'from tqdm import tqdm\n'), ((1517, 1539), 'pandas.concat', 'pd.concat', (['annotations'], {}), '(annotations)\n', (1526, 1539), True, 'import pandas as pd\n'), ((1722, 1786), 'gwd.converters.kaggle2coco.main', 'kaggle2coco.main', (['args.kaggle_output_path', 'args.coco_output_path'], {}), '(args.kaggle_output_path, args.coco_output_path)\n', (1738, 1786), False, 'from gwd.converters import kaggle2coco\n'), ((812, 887), 'pandas.read_csv', 'pd.read_csv', (['ann_path'], {'sep': '"""\t"""', 'names': "['x_min', 'y_min', 'x_max', 'y_max']"}), "(ann_path, sep='\\t', names=['x_min', 'y_min', 'x_max', 'y_max'])\n", (823, 887), True, 'import pandas as pd\n'), ((903, 923), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (913, 923), False, 'import cv2\n'), ((1430, 1452), 'os.path.basename', 'osp.basename', (['img_path'], {}), '(img_path)\n', (1442, 1452), True, 'import os.path as osp\n')] |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the shell console
:author: <NAME>
"""
# Pelix
from pelix.utilities import to_str, to_bytes
# Standard library
import random
import string
import sys
import threading
import time
# Tests
try:
import unittest2 as unittest
except ImportError:
import unittest
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
try:
import subprocess
except ImportError:
# Can't run the test if we can't start another process
pass
else:
class ShellStandaloneTest(unittest.TestCase):
"""
Tests the console shell when started as a script
"""
@staticmethod
def random_str():
"""
Generates a random string
:return: A random string
"""
data = list(string.ascii_letters)
random.shuffle(data)
return ''.join(data)
def test_echo(self):
"""
Tests the console shell 'echo' method
"""
# Get shell PS1 (static method)
import pelix.shell.core
ps1 = pelix.shell.core._ShellService.get_ps1()
# Start the shell process
process = subprocess.Popen(
[sys.executable, '-m', 'pelix.shell'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Avoid being blocked...
timer = threading.Timer(5, process.terminate)
timer.start()
# Wait for prompt
got = ""
while ps1 not in got:
char = to_str(process.stdout.read(1))
if not char:
if sys.version_info[0] == 2:
self.skipTest("Shell console test doesn't work on "
"Python 2.7 with Travis")
else:
if process.poll():
output = to_str(process.stdout.read())
else:
output = "<no output>"
self.fail("Can't read from stdout (rc={})\n{}"
.format(process.returncode, output))
else:
got += char
# We should be good
timer.cancel()
try:
# Try echoing
data = self.random_str()
# Write command
process.stdin.write(to_bytes("echo {}\n".format(data)))
process.stdin.flush()
# Read result
last_line = to_str(process.stdout.readline()).rstrip()
self.assertEqual(last_line, data, "Wrong output")
# Stop the process
process.stdin.write(to_bytes("exit\n"))
process.stdin.flush()
# Wait for the process to stop (1 second max)
delta = 0
start = time.time()
while delta <= 1:
delta = time.time() - start
if process.poll() is not None:
break
time.sleep(.1)
else:
self.fail("Process took too long to stop")
finally:
try:
# Kill it in any case
process.terminate()
except OSError:
# Process was already stopped
pass
def test_properties(self):
"""
Tests the console shell properties parameter
"""
# Prepare some properties
key1 = self.random_str()[:5]
key2 = self.random_str()[:5]
val1 = self.random_str()
val2 = self.random_str()
# Start the shell process
process = subprocess.Popen(
[sys.executable, '-m', 'pelix.shell',
'-D', '{}={}'.format(key1, val1), '{}={}'.format(key2, val2)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
try:
# List properties, stop and get output
output = to_str(process.communicate(to_bytes("properties"))[0])
found = 0
for line in output.splitlines(False):
if key1 in line:
self.assertIn(val1, line)
found += 1
elif key2 in line:
self.assertIn(val2, line)
found += 1
self.assertEqual(found, 2, "Wrong number of properties")
finally:
try:
# Kill it in any case
process.terminate()
except OSError:
# Process was already stopped
pass
| [
"random.shuffle",
"subprocess.Popen",
"threading.Timer",
"time.sleep",
"pelix.utilities.to_bytes",
"time.time"
] | [((1121, 1141), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (1135, 1141), False, 'import random\n'), ((1487, 1620), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, '-m', 'pelix.shell']"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "([sys.executable, '-m', 'pelix.shell'], stdin=subprocess.\n PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n", (1503, 1620), False, 'import subprocess\n'), ((1723, 1760), 'threading.Timer', 'threading.Timer', (['(5)', 'process.terminate'], {}), '(5, process.terminate)\n', (1738, 1760), False, 'import threading\n'), ((3262, 3273), 'time.time', 'time.time', ([], {}), '()\n', (3271, 3273), False, 'import time\n'), ((3091, 3109), 'pelix.utilities.to_bytes', 'to_bytes', (['"""exit\n"""'], {}), "('exit\\n')\n", (3099, 3109), False, 'from pelix.utilities import to_str, to_bytes\n'), ((3457, 3472), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3467, 3472), False, 'import time\n'), ((3336, 3347), 'time.time', 'time.time', ([], {}), '()\n', (3345, 3347), False, 'import time\n'), ((4509, 4531), 'pelix.utilities.to_bytes', 'to_bytes', (['"""properties"""'], {}), "('properties')\n", (4517, 4531), False, 'from pelix.utilities import to_str, to_bytes\n')] |
"""
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
through function annotations. There is a strong suggestion in this document
that only the type of type hinting defined in PEP0484 should be allowed
as annotations in future python versions.
"""
import re
from parso import ParserSyntaxError, parse
from jedi._compatibility import force_unicode
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
from jedi.evaluate.gradual.typing import TypeVar, LazyGenericClass, \
AbstractAnnotatedClass
from jedi.evaluate.gradual.typing import GenericClass
from jedi.evaluate.helpers import is_string
from jedi.evaluate.compiled import builtin_from_name
from jedi import debug
from jedi import parser_utils
def eval_annotation(context, annotation):
"""
Evaluates an annotation node. This means that it evaluates the part of
`int` here:
foo: int = 3
Also checks for forward references (strings)
"""
context_set = context.eval_node(annotation)
if len(context_set) != 1:
debug.warning("Eval'ed typing index %s should lead to 1 object, "
" not %s" % (annotation, context_set))
return context_set
evaled_context = list(context_set)[0]
if is_string(evaled_context):
result = _get_forward_reference_node(context, evaled_context.get_safe_value())
if result is not None:
return context.eval_node(result)
return context_set
def _evaluate_annotation_string(context, string, index=None):
node = _get_forward_reference_node(context, string)
if node is None:
return NO_CONTEXTS
context_set = context.eval_node(node)
if index is not None:
context_set = context_set.filter(
lambda context: context.array_type == u'tuple' # noqa
and len(list(context.py__iter__())) >= index
).py__simple_getitem__(index)
return context_set
def _get_forward_reference_node(context, string):
try:
new_node = context.evaluator.grammar.parse(
force_unicode(string),
start_symbol='eval_input',
error_recovery=False
)
except ParserSyntaxError:
debug.warning('Annotation not parsed: %s' % string)
return None
else:
module = context.tree_node.get_root_node()
parser_utils.move(new_node, module.end_pos[0])
new_node.parent = context.tree_node
return new_node
def _split_comment_param_declaration(decl_text):
"""
Split decl_text on commas, but group generic expressions
together.
For example, given "foo, Bar[baz, biz]" we return
['foo', 'Bar[baz, biz]'].
"""
try:
node = parse(decl_text, error_recovery=False).children[0]
except ParserSyntaxError:
debug.warning('Comment annotation is not valid Python: %s' % decl_text)
return []
if node.type == 'name':
return [node.get_code().strip()]
params = []
try:
children = node.children
except AttributeError:
return []
else:
for child in children:
if child.type in ['name', 'atom_expr', 'power']:
params.append(child.get_code().strip())
return params
@evaluator_method_cache()
def infer_param(execution_context, param):
contexts = _infer_param(execution_context, param)
evaluator = execution_context.evaluator
if param.star_count == 1:
tuple_ = builtin_from_name(evaluator, 'tuple')
return ContextSet([GenericClass(
tuple_,
generics=(contexts,),
) for c in contexts])
elif param.star_count == 2:
dct = builtin_from_name(evaluator, 'dict')
return ContextSet([GenericClass(
dct,
generics=(ContextSet([builtin_from_name(evaluator, 'str')]), contexts),
) for c in contexts])
pass
return contexts
def _infer_param(execution_context, param):
"""
Infers the type of a function parameter, using type annotations.
"""
annotation = param.annotation
if annotation is None:
# If no Python 3-style annotation, look for a Python 2-style comment
# annotation.
# Identify parameters to function in the same sequence as they would
# appear in a type comment.
all_params = [child for child in param.parent.children
if child.type == 'param']
node = param.parent.parent
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_CONTEXTS
match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
if not match:
return NO_CONTEXTS
params_comments = _split_comment_param_declaration(match.group(1))
# Find the specific param being investigated
index = all_params.index(param)
# If the number of parameters doesn't match length of type comment,
# ignore first parameter (assume it's self).
if len(params_comments) != len(all_params):
debug.warning(
"Comments length != Params length %s %s",
params_comments, all_params
)
from jedi.evaluate.context.instance import InstanceArguments
if isinstance(execution_context.var_args, InstanceArguments):
if index == 0:
# Assume it's self, which is already handled
return NO_CONTEXTS
index -= 1
if index >= len(params_comments):
return NO_CONTEXTS
param_comment = params_comments[index]
return _evaluate_annotation_string(
execution_context.function_context.get_default_param_context(),
param_comment
)
# Annotations are like default params and resolve in the same way.
context = execution_context.function_context.get_default_param_context()
return eval_annotation(context, annotation)
def py__annotations__(funcdef):
dct = {}
for function_param in funcdef.get_params():
param_annotation = function_param.annotation
if param_annotation is not None:
dct[function_param.name.value] = param_annotation
return_annotation = funcdef.annotation
if return_annotation:
dct['return'] = return_annotation
return dct
@evaluator_method_cache()
def infer_return_types(function_execution_context):
"""
Infers the type of a function's return value,
according to type annotations.
"""
all_annotations = py__annotations__(function_execution_context.tree_node)
annotation = all_annotations.get("return", None)
if annotation is None:
# If there is no Python 3-type annotation, look for a Python 2-type annotation
node = function_execution_context.tree_node
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_CONTEXTS
match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
if not match:
return NO_CONTEXTS
return _evaluate_annotation_string(
function_execution_context.function_context.get_default_param_context(),
match.group(1).strip()
).execute_annotation()
if annotation is None:
return NO_CONTEXTS
context = function_execution_context.function_context.get_default_param_context()
unknown_type_vars = list(find_unknown_type_vars(context, annotation))
annotation_contexts = eval_annotation(context, annotation)
if not unknown_type_vars:
return annotation_contexts.execute_annotation()
type_var_dict = infer_type_vars_for_execution(function_execution_context, all_annotations)
return ContextSet.from_sets(
ann.define_generics(type_var_dict)
if isinstance(ann, (AbstractAnnotatedClass, TypeVar)) else ContextSet({ann})
for ann in annotation_contexts
).execute_annotation()
def infer_type_vars_for_execution(execution_context, annotation_dict):
"""
Some functions use type vars that are not defined by the class, but rather
only defined in the function. See for example `iter`. In those cases we
want to:
1. Search for undefined type vars.
2. Infer type vars with the execution state we have.
3. Return the union of all type vars that have been found.
"""
context = execution_context.function_context.get_default_param_context()
annotation_variable_results = {}
executed_params, _ = execution_context.get_executed_params_and_issues()
for executed_param in executed_params:
try:
annotation_node = annotation_dict[executed_param.string_name]
except KeyError:
continue
annotation_variables = find_unknown_type_vars(context, annotation_node)
if annotation_variables:
# Infer unknown type var
annotation_context_set = context.eval_node(annotation_node)
star_count = executed_param._param_node.star_count
actual_context_set = executed_param.infer(use_hints=False)
if star_count == 1:
actual_context_set = actual_context_set.merge_types_of_iterate()
elif star_count == 2:
# TODO _dict_values is not public.
actual_context_set = actual_context_set.try_merge('_dict_values')
for ann in annotation_context_set:
_merge_type_var_dicts(
annotation_variable_results,
_infer_type_vars(ann, actual_context_set),
)
return annotation_variable_results
def _merge_type_var_dicts(base_dict, new_dict):
for type_var_name, contexts in new_dict.items():
try:
base_dict[type_var_name] |= contexts
except KeyError:
base_dict[type_var_name] = contexts
def _infer_type_vars(annotation_context, context_set):
"""
This function tries to find information about undefined type vars and
returns a dict from type var name to context set.
This is for example important to understand what `iter([1])` returns.
According to typeshed, `iter` returns an `Iterator[_T]`:
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
This functions would generate `int` for `_T` in this case, because it
unpacks the `Iterable`.
"""
type_var_dict = {}
if isinstance(annotation_context, TypeVar):
return {annotation_context.py__name__(): context_set.py__class__()}
elif isinstance(annotation_context, LazyGenericClass):
name = annotation_context.py__name__()
if name == 'Iterable':
given = annotation_context.get_generics()
if given:
for nested_annotation_context in given[0]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
context_set.merge_types_of_iterate()
)
)
elif name == 'Mapping':
given = annotation_context.get_generics()
if len(given) == 2:
for context in context_set:
try:
method = context.get_mapping_item_contexts
except AttributeError:
continue
key_contexts, value_contexts = method()
for nested_annotation_context in given[0]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
key_contexts,
)
)
for nested_annotation_context in given[1]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
value_contexts,
)
)
return type_var_dict
def find_type_from_comment_hint_for(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[1], name)
def find_type_from_comment_hint_with(context, node, name):
assert len(node.children[1].children) == 3, \
"Can only be here when children[1] is 'foo() as f'"
varlist = node.children[1].children[2]
return _find_type_from_comment_hint(context, node, varlist, name)
def find_type_from_comment_hint_assign(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[0], name)
def _find_type_from_comment_hint(context, node, varlist, name):
index = None
if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
# something like "a, b = 1, 2"
index = 0
for child in varlist.children:
if child == name:
break
if child.type == "operator":
continue
index += 1
else:
return []
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return []
match = re.match(r"^#\s*type:\s*([^#]*)", comment)
if match is None:
return []
return _evaluate_annotation_string(
context, match.group(1).strip(), index
).execute_annotation()
def find_unknown_type_vars(context, node):
def check_node(node):
if node.type in ('atom_expr', 'power'):
trailer = node.children[-1]
if trailer.type == 'trailer' and trailer.children[0] == '[':
for subscript_node in _unpack_subscriptlist(trailer.children[1]):
check_node(subscript_node)
else:
type_var_set = context.eval_node(node)
for type_var in type_var_set:
if isinstance(type_var, TypeVar) and type_var not in found:
found.append(type_var)
found = [] # We're not using a set, because the order matters.
check_node(node)
return found
def _unpack_subscriptlist(subscriptlist):
if subscriptlist.type == 'subscriptlist':
for subscript in subscriptlist.children[::2]:
if subscript.type != 'subscript':
yield subscript
else:
if subscriptlist.type != 'subscript':
yield subscriptlist
| [
"jedi.evaluate.cache.evaluator_method_cache",
"jedi.debug.warning",
"jedi.evaluate.base_context.ContextSet",
"jedi.parser_utils.move",
"re.match",
"jedi._compatibility.force_unicode",
"jedi.evaluate.compiled.builtin_from_name",
"parso.parse",
"jedi.evaluate.gradual.typing.GenericClass",
"jedi.eval... | [((3310, 3334), 'jedi.evaluate.cache.evaluator_method_cache', 'evaluator_method_cache', ([], {}), '()\n', (3332, 3334), False, 'from jedi.evaluate.cache import evaluator_method_cache\n'), ((6407, 6431), 'jedi.evaluate.cache.evaluator_method_cache', 'evaluator_method_cache', ([], {}), '()\n', (6429, 6431), False, 'from jedi.evaluate.cache import evaluator_method_cache\n'), ((1308, 1333), 'jedi.evaluate.helpers.is_string', 'is_string', (['evaled_context'], {}), '(evaled_context)\n', (1317, 1333), False, 'from jedi.evaluate.helpers import is_string\n'), ((13306, 13356), 'jedi.parser_utils.get_following_comment_same_line', 'parser_utils.get_following_comment_same_line', (['node'], {}), '(node)\n', (13350, 13356), False, 'from jedi import parser_utils\n'), ((13411, 13454), 're.match', 're.match', (['"""^#\\\\s*type:\\\\s*([^#]*)"""', 'comment'], {}), "('^#\\\\s*type:\\\\s*([^#]*)', comment)\n", (13419, 13454), False, 'import re\n'), ((1104, 1209), 'jedi.debug.warning', 'debug.warning', (['("Eval\'ed typing index %s should lead to 1 object, not %s" % (annotation,\n context_set))'], {}), '("Eval\'ed typing index %s should lead to 1 object, not %s" %\n (annotation, context_set))\n', (1117, 1209), False, 'from jedi import debug\n'), ((2410, 2456), 'jedi.parser_utils.move', 'parser_utils.move', (['new_node', 'module.end_pos[0]'], {}), '(new_node, module.end_pos[0])\n', (2427, 2456), False, 'from jedi import parser_utils\n'), ((3523, 3560), 'jedi.evaluate.compiled.builtin_from_name', 'builtin_from_name', (['evaluator', '"""tuple"""'], {}), "(evaluator, 'tuple')\n", (3540, 3560), False, 'from jedi.evaluate.compiled import builtin_from_name\n'), ((4543, 4593), 'jedi.parser_utils.get_following_comment_same_line', 'parser_utils.get_following_comment_same_line', (['node'], {}), '(node)\n', (4587, 4593), False, 'from jedi import parser_utils\n'), ((4670, 4725), 're.match', 're.match', (['"""^#\\\\s*type:\\\\s*\\\\(([^#]*)\\\\)\\\\s*->"""', 'comment'], {}), "('^#\\\\s*type:\\\\s*\\\\(([^#]*)\\\\)\\\\s*->', comment)\n", (4678, 4725), False, 'import re\n'), ((6900, 6950), 'jedi.parser_utils.get_following_comment_same_line', 'parser_utils.get_following_comment_same_line', (['node'], {}), '(node)\n', (6944, 6950), False, 'from jedi import parser_utils\n'), ((7027, 7091), 're.match', 're.match', (['"""^#\\\\s*type:\\\\s*\\\\([^#]*\\\\)\\\\s*->\\\\s*([^#]*)"""', 'comment'], {}), "('^#\\\\s*type:\\\\s*\\\\([^#]*\\\\)\\\\s*->\\\\s*([^#]*)', comment)\n", (7035, 7091), False, 'import re\n'), ((2126, 2147), 'jedi._compatibility.force_unicode', 'force_unicode', (['string'], {}), '(string)\n', (2139, 2147), False, 'from jedi._compatibility import force_unicode\n'), ((2269, 2320), 'jedi.debug.warning', 'debug.warning', (["('Annotation not parsed: %s' % string)"], {}), "('Annotation not parsed: %s' % string)\n", (2282, 2320), False, 'from jedi import debug\n'), ((2866, 2937), 'jedi.debug.warning', 'debug.warning', (["('Comment annotation is not valid Python: %s' % decl_text)"], {}), "('Comment annotation is not valid Python: %s' % decl_text)\n", (2879, 2937), False, 'from jedi import debug\n'), ((3732, 3768), 'jedi.evaluate.compiled.builtin_from_name', 'builtin_from_name', (['evaluator', '"""dict"""'], {}), "(evaluator, 'dict')\n", (3749, 3768), False, 'from jedi.evaluate.compiled import builtin_from_name\n'), ((5137, 5225), 'jedi.debug.warning', 'debug.warning', (['"""Comments length != Params length %s %s"""', 'params_comments', 'all_params'], {}), "('Comments length != Params length %s %s', params_comments,\n all_params)\n", (5150, 5225), False, 'from jedi import debug\n'), ((2777, 2815), 'parso.parse', 'parse', (['decl_text'], {'error_recovery': '(False)'}), '(decl_text, error_recovery=False)\n', (2782, 2815), False, 'from parso import ParserSyntaxError, parse\n'), ((3588, 3630), 'jedi.evaluate.gradual.typing.GenericClass', 'GenericClass', (['tuple_'], {'generics': '(contexts,)'}), '(tuple_, generics=(contexts,))\n', (3600, 3630), False, 'from jedi.evaluate.gradual.typing import GenericClass\n'), ((7948, 7965), 'jedi.evaluate.base_context.ContextSet', 'ContextSet', (['{ann}'], {}), '({ann})\n', (7958, 7965), False, 'from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS\n'), ((3861, 3896), 'jedi.evaluate.compiled.builtin_from_name', 'builtin_from_name', (['evaluator', '"""str"""'], {}), "(evaluator, 'str')\n", (3878, 3896), False, 'from jedi.evaluate.compiled import builtin_from_name\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from mmocr.utils import list_from_file, list_to_file
lists = [
[],
[' '],
['\t'],
['a'],
[1],
[1.],
['a', 'b'],
['a', 1, 1.],
[1, 1., 'a'],
['啊', '啊啊'],
['選択', 'noël', 'Информацией', 'ÄÆä'],
]
def test_list_to_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for i, lines in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
list_to_file(filename, lines)
lines2 = [
line.rstrip('\r\n')
for line in open(filename, 'r', encoding='utf-8').readlines()
]
lines = list(map(str, lines))
assert len(lines) == len(lines2)
assert all(line1 == line2 for line1, line2 in zip(lines, lines2))
def test_list_from_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for encoding in ['utf-8', 'utf-8-sig']:
for lineend in ['\n', '\r\n']:
for i, lines in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
with open(filename, 'w', encoding=encoding) as f:
f.writelines(f'{line}{lineend}' for line in lines)
lines2 = list_from_file(filename, encoding=encoding)
lines = list(map(str, lines))
assert len(lines) == len(lines2)
assert all(line1 == line2
for line1, line2 in zip(lines, lines2))
| [
"tempfile.TemporaryDirectory",
"mmocr.utils.list_from_file",
"mmocr.utils.list_to_file"
] | [((339, 368), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (366, 368), False, 'import tempfile\n'), ((869, 898), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (896, 898), False, 'import tempfile\n'), ((485, 514), 'mmocr.utils.list_to_file', 'list_to_file', (['filename', 'lines'], {}), '(filename, lines)\n', (497, 514), False, 'from mmocr.utils import list_from_file, list_to_file\n'), ((1284, 1327), 'mmocr.utils.list_from_file', 'list_from_file', (['filename'], {'encoding': 'encoding'}), '(filename, encoding=encoding)\n', (1298, 1327), False, 'from mmocr.utils import list_from_file, list_to_file\n')] |
import cv2
from Text_Detection import detect_characters, detect_string, detect_words
import re
from live_recognition import facial_recognition
#
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent/ 100)
height = int(frame.shape[0] * percent/ 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
####################################################
frameWidth = 640
frameHeight = 480
nPlateCascade = cv2.CascadeClassifier("../../Resources/haarcascade_russian_plate_number.xml")
minArea=500
color=(255,0,255)
name=None
# count = 0
state_codes = ['AP', 'AR', 'AS', 'BR', 'CG', 'GA', 'GJ', 'HR', 'HP', 'JH', 'KA', 'KL', 'MP', 'MH', 'MN', 'ML', 'MZ', 'NL', 'OD', 'PB', 'RJ', 'SK', 'TN', 'TR', 'UP', 'WB', 'TS','ap', 'ar', 'as', 'br', 'cg', 'ga', 'gj', 'hr', 'hp', 'jh', 'ka', 'kl', 'mp', 'mh', 'mn', 'ml', 'mz', 'nl', 'od', 'pb', 'rj', 'sk', 'tn', 'tr', 'up', 'wb', 'ts']
######################################################
# cap = cv2.VideoCapture("C:\\Users\\jaira\\PycharmProjects\\opencv_tutorial\\Resources\\test.mp4")
cap=cv2.VideoCapture(0,cv2.CAP_DSHOW)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10,150)
success, img = cap.read()
while success:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
numberPlates = nPlateCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in numberPlates:
area = w*h
if area > minArea:
cv2.rectangle(img=img,pt1=(x,y),pt2=(x+w,y+h),
color=color,thickness=2)
# cv2.putText(img=img,text="Number Plate",org=(x,y-5),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,color=color,fontScale=1,thickness=2)
imgRoi=img[y:y+h,x:x+w]
cv2.moveWindow("ROI",40,30)
cv2.imshow(winname="ROI",mat=imgRoi)
temp=detect_words(imgRoi)
for i in state_codes:
if i in temp:
temp2 = ''.join(ch for ch in temp if ch.isalnum() and ch!="." and ch!="_")
if temp[-2:].isnumeric() and temp[2:4].isnumeric() and len(temp)==10:
cv2.putText(img=img,text=temp,org=(x,y-5),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,color=color,fontScale=1,thickness=2)
print(temp)
if name==None:
name,face_img=facial_recognition(img)
cv2.imshow("Face Recognition",face_img)
cv2.imshow("Result", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# except:
# break
cv2.destroyAllWindows() | [
"cv2.rectangle",
"cv2.moveWindow",
"cv2.imshow",
"Text_Detection.detect_words",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"live_recognition.facial_recognition",
"cv2.CascadeClassifier",
"cv2.resize",
"cv2.waitKey"
] | [((488, 565), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""../../Resources/haarcascade_russian_plate_number.xml"""'], {}), "('../../Resources/haarcascade_russian_plate_number.xml')\n", (509, 565), False, 'import cv2\n'), ((1123, 1157), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)', 'cv2.CAP_DSHOW'], {}), '(0, cv2.CAP_DSHOW)\n', (1139, 1157), False, 'import cv2\n'), ((2615, 2638), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2636, 2638), False, 'import cv2\n'), ((324, 376), 'cv2.resize', 'cv2.resize', (['frame', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, dim, interpolation=cv2.INTER_AREA)\n', (334, 376), False, 'import cv2\n'), ((1318, 1355), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1330, 1355), False, 'import cv2\n'), ((2498, 2523), 'cv2.imshow', 'cv2.imshow', (['"""Result"""', 'img'], {}), "('Result', img)\n", (2508, 2523), False, 'import cv2\n'), ((2418, 2441), 'live_recognition.facial_recognition', 'facial_recognition', (['img'], {}), '(img)\n', (2436, 2441), False, 'from live_recognition import facial_recognition\n'), ((2451, 2491), 'cv2.imshow', 'cv2.imshow', (['"""Face Recognition"""', 'face_img'], {}), "('Face Recognition', face_img)\n", (2461, 2491), False, 'import cv2\n'), ((1527, 1612), 'cv2.rectangle', 'cv2.rectangle', ([], {'img': 'img', 'pt1': '(x, y)', 'pt2': '(x + w, y + h)', 'color': 'color', 'thickness': '(2)'}), '(img=img, pt1=(x, y), pt2=(x + w, y + h), color=color, thickness=2\n )\n', (1540, 1612), False, 'import cv2\n'), ((1820, 1849), 'cv2.moveWindow', 'cv2.moveWindow', (['"""ROI"""', '(40)', '(30)'], {}), "('ROI', 40, 30)\n", (1834, 1849), False, 'import cv2\n'), ((1861, 1898), 'cv2.imshow', 'cv2.imshow', ([], {'winname': '"""ROI"""', 'mat': 'imgRoi'}), "(winname='ROI', mat=imgRoi)\n", (1871, 1898), False, 'import cv2\n'), ((1918, 1938), 'Text_Detection.detect_words', 'detect_words', (['imgRoi'], {}), '(imgRoi)\n', (1930, 1938), False, 'from Text_Detection import detect_characters, detect_string, detect_words\n'), ((2532, 2546), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2543, 2546), False, 'import cv2\n'), ((2217, 2349), 'cv2.putText', 'cv2.putText', ([], {'img': 'img', 'text': 'temp', 'org': '(x, y - 5)', 'fontFace': 'cv2.FONT_HERSHEY_COMPLEX_SMALL', 'color': 'color', 'fontScale': '(1)', 'thickness': '(2)'}), '(img=img, text=temp, org=(x, y - 5), fontFace=cv2.\n FONT_HERSHEY_COMPLEX_SMALL, color=color, fontScale=1, thickness=2)\n', (2228, 2349), False, 'import cv2\n')] |
##! python3
##==============================================================================
## Copyright (c) 2021 COMPAL Electronic Inc. All rights reserved.
## This program contains proprietary and confidential information.
## All rights reserved except as may be permitted by prior written consent.
##
## Compal STiD NPSD Test Program Release Notification.
##
## ModuleName:
## LTE.py (Log to Excel)
##
## Abstract:
## Parsing log info to a excel with 4 sheets.
## 1. Read log file: parse -> store (a list of dict)
## 2. Read the INI threshold data: store as dict
## 3. New excel workbook: by openpyxl
## 4. Set worksheet according to Step 1: by dict and DataFrame
## 5. Set condition formating for each sheet
## according to Step 2: by dict
## 6. Save the workbook to xlsx file
##
## Author:
## 25-Oct-2021 <NAME>
##
## Revision History:
## Rev 1.0.0.1 25-Oct-2021 Willy
## First create.
##==============================================================================
import re
import os
import sys
import pandas as pd
import codecs
import time
import configparser
import openpyxl
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, Fill, colors
from openpyxl.formatting.rule import CellIsRule
# [Main]
g_strVersion = "3.0.0.1"
#[ParseLogPath]
g_strLogDir = "./Log/Pass"
class cLogParser:
listKey = ["Power_dBm_CH15", "Power_dBm_CH21", "Power_dBm_CH24", "Current_mA_CH15", "Current_mA_CH21", "Current_mA_CH24", "dBm_LNA_ON", "dBm_LNA_Off",
"Current_mA_3G_CH9750", "Current_mA_3G_CH2787", "Current_mA_2G_CH124", "dBm_CH9750", "dBm_CH2787", "dBm_2G_CH124", "dBm_CH124"]
listInfo, listLTE, listZigbee = [], [], []
def __init__(self):
# get directory names of TryingLog (first layer)
listSN = os.listdir(g_strLogDir)
# iterate through log files in a SN folder (second layer)
self.parseLog(listSN)
# merge data from two different log files
self.mergeLogs()
def parseLog(self, listSN):
printLog("[I][parseLog] ------- Start Parsing Log -------")
strLTEName, strZigbeeName = "GFI20_RF_LTE.log", "GFI20_RF_Zigbee.log"
try:
for strSN in listSN:
dictLTE = {
"SN" : strSN,
"dBm_CH9750" : None,
"dBm_CH2787" : None,
"dBm_2G_CH124" : None,
"Current_mA_3G_CH9750" : None,
"Current_mA_3G_CH2787" : None,
"Current_mA_2G_CH124" : None,
"dBm_CH124" : None }
dictZigbee = {
"SN" : strSN,
"Power_dBm_CH15" : None,
"Power_dBm_CH21" : None,
"Power_dBm_CH24" : None,
"dBm_LNA_ON" : None,
"dBm_LNA_Off" : None,
"Current_mA_CH15" : None,
"Current_mA_CH21" : None,
"Current_mA_CH24" : None }
b_hasLTE, b_hasZigbee = False, False # flag for checking if the target log exists
strSNLog = os.path.join(g_strLogDir, strSN) # set abspath for SN logs
for strLogName in os.listdir(strSNLog):
strLogPath = os.path.join(strSNLog, strLogName)
# check GFI20_RF_LTE.log exists. If not, flag = False and parse only SN.
reMatch = re.fullmatch("^.*RF_LTE\.log", strLogName)
if(reMatch != None):
self.parseLTE(dictLTE, strLogPath, strSN)
b_hasLTE = True
# parse GFI20_RF_Zigbee.log files
reMatch = re.fullmatch("^.*RF_Zigbee\.log", strLogName)
if(reMatch != None):
self.parseZigbee(dictZigbee, strLogPath, strSN)
b_hasZigbee = True
# if log not exists, append initial dict
self.listLTE.append(dictLTE)
self.listZigbee.append(dictZigbee)
# if there is no target log file in the folder, parse only SN
if not b_hasLTE:
#listLTE.append({"SN": strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strLTEName))
if not b_hasZigbee:
#listZigbee.append({"SN" : strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strZigbeeName))
printLog("[I][parseLog] ------- Finish Parsing Log -------")
except Exception as e:
printLog("[E][parseLog] Unexpected Error: " + str(e))
def parseLTE(self, dictLTE, strLTEPath, strSN):
printLog("[I][parseLTE] Parse LTE log: %s" % strLTEPath)
try:
listPostfix = [" \n", " A\n", " dBm\n"]
with open(strLTEPath, encoding='big5') as log: # big5 for windows
content = log.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]*"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ LTE_3G Freq 897.4 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[11], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[8], listPostfix[1], 1000, False)
if re.search("-+ LTE_3G Freq 1950 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[12], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[9], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 914.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[13], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[10], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 959.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_RX_RSSI, self.listKey[14], listPostfix[2], 1, True)
except Exception as e:
printLog("[E][parseLTE] Unexpected Error: " + str(e))
def parseZigbee(self, dictZigbee, strZigBeePath, strSN):
printLog("[I][parseZigbee] Parse Zigbee log: %s" % strZigBeePath)
try:
listPostfix = ["dBm\n", " A\n", " dBm\n"]
with open(strZigBeePath, encoding="big5") as Zigbee: # big5 for windows
content = Zigbee.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]* dBm"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ ZIGBEE_2450 Freq 2425 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[0], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[3], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2455 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[1], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[4], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2470 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[2], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[5], listPostfix[1], 1000, False)
if re.search("-+ LNA ON -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[6], listPostfix[2], 1, False)
if re.search("-+ LNA OFF -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[7], listPostfix[2], 1, False)
except Exception as e:
printLog("[E][parseZigbee] Unexpected Error: " + str(e))
def get_log_value(self, cut_content, dictInfo, re_target, strKey, strPostfix, nUnit, b_getMulti):
for line in cut_content:
# search pattern like "Power: (int/float) dBm"
if re.search(re_target, line) != None:
# get the figure of the line like "Power: 8.817 dBm\n"
fValue = eval(line.split(": ")[1].strip(strPostfix))
dictInfo[strKey] = fValue * nUnit
if not b_getMulti:
break;
# merge two list of dict to single list of dict
def mergeLogs(self):
try:
printLog("[I][mergeLogs] ------- Merging two Log data -------")
# listLTE and listZigbee both has same length
self.listInfo = [None] * len(self.listLTE)
for i in range (0, len(self.listLTE)):
self.listLTE[i].update(self.listZigbee[i]) # merge two dict
self.listInfo[i] = self.listLTE[i]
printLog("[I][mergeLogs] ------- Merged two Log data -------")
except Exception as e:
printLog("[E][mergeLogs] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of parsing log to excel |#
#\====================================================================/#
def log_to_excel(self):
printLog("[I][log_to_excel] ------- Parsing Log to Excel -------")
dictThreshold = {} # store INI threshold ata for setting conditional formating
try:
# ========== get the threshold data from INI ==========
printLog("[I][log_to_excel] ----- INI reading -----")
for key in self.listKey:
dictThreshold[key] = self.readINI(key)
printLog("[I][log_to_excel] ----- INI read -----")
# ========== New Excel workbook and sheets ==========
df_logInfo = pd.DataFrame(self.listInfo) # listInfo -> list of dict
listSheetName = ["Zigbee_Power_Current", "Zigbee_LAN", "LTE_Current", "LTE_dBm"]
listCol = [self.listKey[:6], self.listKey[6:8], self.listKey[8:11], self.listKey[11:15]] # columns for each sheet above
wb = openpyxl.Workbook() # 新增 Excel 活頁
wb.remove(wb['Sheet']) # remove the default sheet when start a workbook
printLog("[I][log_to_excel] ----- Excel Sheet Creating -----")
for i in range(0, len(listSheetName)):
self.newSheet(wb, listSheetName[i], df_logInfo[["SN"] + listCol[i]])
printLog("[I][log_to_excel] ----- Excel Sheet Created -----")
# modify cell font-color according to thershold that parsed from INI
self.set_threshold_to_excel(wb, dictThreshold)
wb.save('LTEV2.xlsx') # save the worksheet as excel file
printLog("[I][log_to_excel] ------- Parsed Log to Excel -------")
except Exception as e:
printLog("[E][log_to_excel] Unexpected Error: " + str(e))
# read INI values one by one by giving keys, then store to var dictThreshold
def readINI(self, strKey):
try:
config = configparser.ConfigParser()
config.read(g_strINIPath)
strMethod = 'Method%s' % g_nMethodIndex
strValue = config.get(strMethod, strKey)
# search pattern like "+-(int/float),+-(int/float)"
if re.fullmatch("[+-]?[0-9]+\.?[0-9]*,[+-]?[0-9]+\.?[0-9]*", strValue):
printLog("[I][readINI] %s = %s" % (strKey, strValue))
return strValue
else:
printLog("[W][readINI] Read %s Fail !!" % strKey)
sys.exit("Read %s Fail !!" % strKey)
except Exception as e:
printLog("[E][readINI] Error: %s" % str(e))
sys.exit("Error: %s" % str(e))
# new worksheets by DataFrame
def newSheet(self, workbook, strSheetName, df_SheetCol):
try:
workbook.create_sheet(strSheetName)
for row in dataframe_to_rows(df_SheetCol, index=False, header=True):
workbook[strSheetName].append(row)
printLog("[I][newSheet] Sheet: %s Created" % strSheetName)
except Exception as e:
printLog("[E][newSheet] Unexpected Error: " + str(e))
# set conditional formating for sheets by dictionay containg thershold data
def set_threshold_to_excel(self, workbook, dictThreshold):
try:
printLog("[I][set_threshold_to_excel] ----- threshold setting -----")
# iterate through every worksheet to set conditional formatting
for ws in workbook.worksheets:
printLog("[I][set_threshold_to_excel] setting worksheet: %s" % ws.title)
# iterate from Col 2 since Col 1 is the Serial Number(SN)
for col in ws.iter_cols(min_row=1, max_row=ws.max_row, min_col=2, max_col=ws.max_column):
strStart, strEnd = None, None # set the test range for cell e.g. A1:A10
istInterval = [] # set the threshold range for the formula below
# check the column is not empty, col[0] is column name
if len(col) > 1:
strStart = col[1].coordinate # set starting cell for thershold testing
strEnd = col[-1].coordinate # set ending cell
# get the thershold and store as interval for the formula below
strThreshold = dictThreshold[col[0].value] # get the test thershold by the column name(col[0])
listInterval = strThreshold.split(",")
red_text = Font(color="9C0006") # font-color: RED
range_string = "%s:%s" % (strStart, strEnd) # the value would be like A1:A10
ws.conditional_formatting.add(range_string,
CellIsRule(operator='notBetween', formula=listInterval, stopIfTrue=True, font=red_text))
printLog("[I][set_threshold_to_excel] ----- threshold set -----")
except Exception as e:
printLog("[E][set_threshold_to_excel] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of printing log of LTE.py |#
#\====================================================================/#
def getDateTimeFormat():
strDateTime = "[%s]" % (time.strftime("%Y/%m/%d %H:%M:%S", time.localtime()))
return strDateTime
def printLog(strPrintLine):
strFileName = os.path.basename(__file__).split('.')[0]
fileLog = codecs.open(g_strFileName + ".log", 'a', "utf-8")
print(strPrintLine)
fileLog.write("%s%s\r\n" % (getDateTimeFormat(), strPrintLine))
fileLog.close()
if __name__ == "__main__":
global g_strFileName, g_strINIPath, g_nMethodIndex
g_strFileName = os.path.basename(__file__).split('.')[0]
g_strINIPath = os.path.join(os.getcwd(), g_strFileName + ".ini")
g_nMethodIndex = 1
printLog("========== Start ==========")
printLog("[I][main] Python " + sys.version)
printLog("[I][main] %s.py %s" % (g_strFileName, g_strVersion))
# ------------ find the target file --------------
try:
LogParser = cLogParser()
LogParser.log_to_excel()
except Exception as e:
printLog("[E][main] Unexpected Error: " + str(e))
printLog("========== End ==========")
| [
"time.localtime",
"os.listdir",
"configparser.ConfigParser",
"openpyxl.utils.dataframe.dataframe_to_rows",
"os.path.join",
"openpyxl.styles.Font",
"os.getcwd",
"re.fullmatch",
"openpyxl.Workbook",
"os.path.basename",
"sys.exit",
"pandas.DataFrame",
"openpyxl.formatting.rule.CellIsRule",
"c... | [((16446, 16495), 'codecs.open', 'codecs.open', (["(g_strFileName + '.log')", '"""a"""', '"""utf-8"""'], {}), "(g_strFileName + '.log', 'a', 'utf-8')\n", (16457, 16495), False, 'import codecs\n'), ((1950, 1973), 'os.listdir', 'os.listdir', (['g_strLogDir'], {}), '(g_strLogDir)\n', (1960, 1973), False, 'import os\n'), ((16785, 16796), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16794, 16796), False, 'import os\n'), ((11579, 11606), 'pandas.DataFrame', 'pd.DataFrame', (['self.listInfo'], {}), '(self.listInfo)\n', (11591, 11606), True, 'import pandas as pd\n'), ((11884, 11903), 'openpyxl.Workbook', 'openpyxl.Workbook', ([], {}), '()\n', (11901, 11903), False, 'import openpyxl\n'), ((12846, 12873), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (12871, 12873), False, 'import configparser\n'), ((13118, 13187), 're.fullmatch', 're.fullmatch', (['"""[+-]?[0-9]+\\\\.?[0-9]*,[+-]?[0-9]+\\\\.?[0-9]*"""', 'strValue'], {}), "('[+-]?[0-9]+\\\\.?[0-9]*,[+-]?[0-9]+\\\\.?[0-9]*', strValue)\n", (13130, 13187), False, 'import re\n'), ((13768, 13824), 'openpyxl.utils.dataframe.dataframe_to_rows', 'dataframe_to_rows', (['df_SheetCol'], {'index': '(False)', 'header': '(True)'}), '(df_SheetCol, index=False, header=True)\n', (13785, 13824), False, 'from openpyxl.utils.dataframe import dataframe_to_rows\n'), ((16302, 16318), 'time.localtime', 'time.localtime', ([], {}), '()\n', (16316, 16318), False, 'import time\n'), ((3312, 3344), 'os.path.join', 'os.path.join', (['g_strLogDir', 'strSN'], {}), '(g_strLogDir, strSN)\n', (3324, 3344), False, 'import os\n'), ((3410, 3430), 'os.listdir', 'os.listdir', (['strSNLog'], {}), '(strSNLog)\n', (3420, 3430), False, 'import os\n'), ((9837, 9863), 're.search', 're.search', (['re_target', 'line'], {}), '(re_target, line)\n', (9846, 9863), False, 'import re\n'), ((13409, 13445), 'sys.exit', 'sys.exit', (["('Read %s Fail !!' % strKey)"], {}), "('Read %s Fail !!' % strKey)\n", (13417, 13445), False, 'import sys\n'), ((16391, 16417), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (16407, 16417), False, 'import os\n'), ((16712, 16738), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (16728, 16738), False, 'import os\n'), ((3465, 3499), 'os.path.join', 'os.path.join', (['strSNLog', 'strLogName'], {}), '(strSNLog, strLogName)\n', (3477, 3499), False, 'import os\n'), ((3624, 3667), 're.fullmatch', 're.fullmatch', (['"""^.*RF_LTE\\\\.log"""', 'strLogName'], {}), "('^.*RF_LTE\\\\.log', strLogName)\n", (3636, 3667), False, 'import re\n'), ((3900, 3946), 're.fullmatch', 're.fullmatch', (['"""^.*RF_Zigbee\\\\.log"""', 'strLogName'], {}), "('^.*RF_Zigbee\\\\.log', strLogName)\n", (3912, 3946), False, 'import re\n'), ((15469, 15489), 'openpyxl.styles.Font', 'Font', ([], {'color': '"""9C0006"""'}), "(color='9C0006')\n", (15473, 15489), False, 'from openpyxl.styles import Font, Fill, colors\n'), ((5447, 5489), 're.search', 're.search', (['"""-+ LTE_3G Freq 897.4 -+"""', 'line'], {}), "('-+ LTE_3G Freq 897.4 -+', line)\n", (5456, 5489), False, 'import re\n'), ((5867, 5908), 're.search', 're.search', (['"""-+ LTE_3G Freq 1950 -+"""', 'line'], {}), "('-+ LTE_3G Freq 1950 -+', line)\n", (5876, 5908), False, 'import re\n'), ((6286, 6328), 're.search', 're.search', (['"""-+ LTE_2G Freq 914.8 -+"""', 'line'], {}), "('-+ LTE_2G Freq 914.8 -+', line)\n", (6295, 6328), False, 'import re\n'), ((6707, 6749), 're.search', 're.search', (['"""-+ LTE_2G Freq 959.8 -+"""', 'line'], {}), "('-+ LTE_2G Freq 959.8 -+', line)\n", (6716, 6749), False, 'import re\n'), ((7679, 7725), 're.search', 're.search', (['"""-+ ZIGBEE_2450 Freq 2425 -+"""', 'line'], {}), "('-+ ZIGBEE_2450 Freq 2425 -+', line)\n", (7688, 7725), False, 'import re\n'), ((8108, 8154), 're.search', 're.search', (['"""-+ ZIGBEE_2450 Freq 2455 -+"""', 'line'], {}), "('-+ ZIGBEE_2450 Freq 2455 -+', line)\n", (8117, 8154), False, 'import re\n'), ((8537, 8583), 're.search', 're.search', (['"""-+ ZIGBEE_2450 Freq 2470 -+"""', 'line'], {}), "('-+ ZIGBEE_2450 Freq 2470 -+', line)\n", (8546, 8583), False, 'import re\n'), ((8966, 8997), 're.search', 're.search', (['"""-+ LNA ON -+"""', 'line'], {}), "('-+ LNA ON -+', line)\n", (8975, 8997), False, 'import re\n'), ((9257, 9289), 're.search', 're.search', (['"""-+ LNA OFF -+"""', 'line'], {}), "('-+ LNA OFF -+', line)\n", (9266, 9289), False, 'import re\n'), ((15713, 15804), 'openpyxl.formatting.rule.CellIsRule', 'CellIsRule', ([], {'operator': '"""notBetween"""', 'formula': 'listInterval', 'stopIfTrue': '(True)', 'font': 'red_text'}), "(operator='notBetween', formula=listInterval, stopIfTrue=True,\n font=red_text)\n", (15723, 15804), False, 'from openpyxl.formatting.rule import CellIsRule\n'), ((4486, 4517), 'os.path.join', 'os.path.join', (['strSN', 'strLTEName'], {}), '(strSN, strLTEName)\n', (4498, 4517), False, 'import os\n'), ((4677, 4711), 'os.path.join', 'os.path.join', (['strSN', 'strZigbeeName'], {}), '(strSN, strZigbeeName)\n', (4689, 4711), False, 'import os\n')] |
from wagtail.admin.edit_handlers import (
InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel,
PageChooserPanel,
)
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from core.helpers import make_translated_interface
from core.panels import SearchEngineOptimisationPanel
class InternationalCapitalInvestLandingPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
ImageChooserPanel('hero_image'),
FieldPanel('hero_title'),
FieldPanel('hero_subheading'),
FieldPanel('hero_subtitle'),
FieldPanel('hero_cta_text'),
FieldPanel('hero_cta_link'),
]
),
MultiFieldPanel(
heading="Reason to invest in the UK section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Reason to Invest Title, Reason to Invest Content'),
FieldPanel('reason_to_invest_section_title'),
FieldPanel('reason_to_invest_section_intro'),
FieldPanel('reason_to_invest_section_content'),
ImageChooserPanel('reason_to_invest_section_image'),
FieldPanel('how_we_help_title'),
FieldPanel('how_we_help_intro'),
HelpPanel('Each icon requires corresponding text to show '
'on page'),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('how_we_help_one_icon'),
FieldPanel('how_we_help_one_text'),
]),
MultiFieldPanel([
ImageChooserPanel('how_we_help_two_icon'),
FieldPanel('how_we_help_two_text'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('how_we_help_three_icon'),
FieldPanel('how_we_help_three_text'),
]),
MultiFieldPanel([
ImageChooserPanel('how_we_help_four_icon'),
FieldPanel('how_we_help_four_text'),
]),
]),
FieldPanel('how_we_help_cta_text'),
FieldPanel('how_we_help_cta_link'),
]
),
MultiFieldPanel(
heading="Investment Opportunities by regions",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Region Opportunity Title, 1 Related Region'),
FieldPanel('region_ops_section_title'),
FieldPanel('region_ops_section_intro'),
InlinePanel(
'added_region_card_fields',
label="Region card fields"
),
]
),
MultiFieldPanel(
heading="Informative banner",
children=[
FieldPanel('banner_information')
],
),
MultiFieldPanel(
heading="Related region pages",
classname='collapsible collapsed',
children=[
HelpPanel('Please use this to link to a related region, '
'rather than adding in manually the region title, '
'image and text in the above section when the '
'capital invest region pages are available'),
InlinePanel(
'added_regions',
label="Related Regions"
),
]
),
MultiFieldPanel(
heading="Energy Sector",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Energy Sector Title, Energy Sector Content'),
FieldPanel('energy_sector_title'),
FieldPanel('energy_sector_content'),
ImageChooserPanel('energy_sector_image'),
HelpPanel('CTA requires text and PDF to show on teh page.'),
FieldPanel('energy_sector_cta_text'),
DocumentChooserPanel('energy_sector_pdf_document'),
]
),
MultiFieldPanel(
heading="Homes in England Section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Homes In England Section Title, Title and PDF '
'for each card'),
FieldPanel('homes_in_england_section_title'),
InlinePanel(
'added_homes_in_england_card_fields',
label="Homes In England cards"
)
]
),
MultiFieldPanel(
heading="Contact Section",
classname='collapsible collapsed',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_section_title'),
FieldPanel('contact_section_text'),
FieldPanel('contact_section_cta_text')
]
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestRegionPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
FieldPanel('hero_title'),
ImageChooserPanel('hero_image'),
],
),
FieldPanel('featured_description'),
MultiFieldPanel(
heading="Region summary",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Region Summary Section Content'),
ImageChooserPanel('region_summary_section_image'),
FieldPanel('region_summary_section_intro'),
FieldPanel('region_summary_section_content'),
],
),
MultiFieldPanel(
heading="Investment opportunities",
classname='collapsible collapsed',
children=[
FieldPanel('investment_opps_title'),
FieldPanel('investment_opps_intro'),
]
),
MultiFieldPanel(
heading="Economics Statistics",
classname='collapsible',
children=[
HelpPanel('Required: at least 4 statistics for the section to show'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('economics_stat_1_heading'),
FieldPanel('economics_stat_1_number'),
FieldPanel('economics_stat_1_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_2_heading'),
FieldPanel('economics_stat_2_number'),
FieldPanel('economics_stat_2_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_3_heading'),
FieldPanel('economics_stat_3_number'),
FieldPanel('economics_stat_3_smallprint'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('economics_stat_4_heading'),
FieldPanel('economics_stat_4_number'),
FieldPanel('economics_stat_4_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_5_heading'),
FieldPanel('economics_stat_5_number'),
FieldPanel('economics_stat_5_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_6_heading'),
FieldPanel('economics_stat_6_number'),
FieldPanel('economics_stat_6_smallprint'),
]),
]),
],
),
MultiFieldPanel(
heading="Location Statistics",
classname='collapsible',
children=[
HelpPanel('Required: at least 4 statistics for the section to show'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('location_stat_1_heading'),
FieldPanel('location_stat_1_number'),
FieldPanel('location_stat_1_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_2_heading'),
FieldPanel('location_stat_2_number'),
FieldPanel('location_stat_2_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_3_heading'),
FieldPanel('location_stat_3_number'),
FieldPanel('location_stat_3_smallprint'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('location_stat_4_heading'),
FieldPanel('location_stat_4_number'),
FieldPanel('location_stat_4_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_5_heading'),
FieldPanel('location_stat_5_number'),
FieldPanel('location_stat_5_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_6_heading'),
FieldPanel('location_stat_6_number'),
FieldPanel('location_stat_6_smallprint'),
]),
]),
],
),
MultiFieldPanel(
heading="Extra optional Property and Infrastructure section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Property and Infrastructure Section Title, '
'Property and Infrastructure Section Content'),
ImageChooserPanel('property_and_infrastructure_section_image'),
FieldPanel('property_and_infrastructure_section_title'),
FieldPanel('property_and_infrastructure_section_content'),
],
),
MultiFieldPanel(
heading="Accordions subsections",
classname='collapsible collapsed',
children=[
HelpPanel('Required: subsections title and at least one title and content for an accordion to show'),
FieldPanel('subsections_title'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('sub_section_one_title'),
ImageChooserPanel('sub_section_one_icon'),
FieldPanel('sub_section_one_content')
]),
MultiFieldPanel([
FieldPanel('sub_section_two_title'),
ImageChooserPanel('sub_section_two_icon'),
FieldPanel('sub_section_two_content')
]),
MultiFieldPanel([
FieldPanel('sub_section_three_title'),
ImageChooserPanel('sub_section_three_icon'),
FieldPanel('sub_section_three_content')
]),
]),
]
),
MultiFieldPanel(
heading="Case study",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Case Study Image, Case Study Title'),
ImageChooserPanel('case_study_image'),
FieldPanel('case_study_title'),
FieldPanel('case_study_text'),
HelpPanel('Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('case_study_cta_text'),
FieldPanel('case_study_cta_link'),
],
),
MultiFieldPanel(
heading="Contact",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_title'),
FieldPanel('contact_text'),
FieldPanel('contact_cta_text'),
FieldPanel('contact_cta_link'),
],
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestOpportunityListingPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
FieldPanel('search_results_title'),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestOpportunityPagePanels:
content_panels = [
FieldPanel('title'),
MultiFieldPanel(
heading="Related sector",
classname='collapsible collapsed',
children=[
InlinePanel('related_sectors', label="Related Sectors"),
],
),
MultiFieldPanel(
heading="Related region",
classname='collapsible collapsed',
children=[
PageChooserPanel(
'related_region',
[
'great_international.'
'AboutUkRegionPage'
]
),
],
),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
ImageChooserPanel('hero_image'),
FieldPanel('hero_title'),
],
),
MultiFieldPanel(
heading="Opportunity summary",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Opportunity Summary Intro'),
FieldPanel('opportunity_summary_intro'),
FieldPanel('opportunity_summary_content'),
ImageChooserPanel('opportunity_summary_image'),
],
),
MultiFieldPanel(
heading="Opportunity Details",
classname='collapsible',
children=[
HelpPanel('Icons require the corresponding text to show on '
'page'),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('location_icon'),
FieldPanel('location_heading'),
FieldPanel('location'),
]),
MultiFieldPanel([
ImageChooserPanel('project_promoter_icon'),
FieldPanel('project_promoter_heading'),
FieldPanel('project_promoter'),
]),
MultiFieldPanel([
ImageChooserPanel('scale_icon'),
FieldPanel('scale_heading'),
FieldPanel('scale'),
FieldPanel('scale_value'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('sector_icon'),
FieldPanel('sector_heading'),
InlinePanel('related_sub_sectors',
label="Related Sectors"),
]),
MultiFieldPanel([
ImageChooserPanel('investment_type_icon'),
FieldPanel('investment_type_heading'),
FieldPanel('investment_type'),
]),
MultiFieldPanel([
ImageChooserPanel('planning_status_icon'),
FieldPanel('planning_status_heading'),
FieldPanel('planning_status'),
]),
]),
],
),
MultiFieldPanel(
heading="Project Details",
classname='collapsible',
children=[
HelpPanel('Title requires corresponding text to show on page'),
FieldPanel('project_background_title'),
FieldPanel('project_background_intro'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('project_description_title'),
FieldPanel('project_description_content'),
]),
MultiFieldPanel([
FieldPanel('project_promoter_title'),
FieldPanel('project_promoter_content'),
]),
]),
ImageChooserPanel('project_image')
],
),
MultiFieldPanel(
heading="Similar projects",
classname='collapsible',
children=[
HelpPanel('Section shows if there are opportunities with the same related sector. '
'They are chosen randomly. Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('similar_projects_cta_text'),
FieldPanel('similar_projects_cta_link'),
],
),
MultiFieldPanel(
heading="Case study",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Case Study Image, Case Study Title'),
ImageChooserPanel('case_study_image'),
FieldPanel('case_study_title'),
FieldPanel('case_study_text'),
HelpPanel('Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('case_study_cta_text'),
FieldPanel('case_study_cta_link'),
],
),
MultiFieldPanel(
heading="Contact",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_title'),
FieldPanel('contact_text'),
],
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
class CapitalInvestContactFormPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
FieldPanel('heading'),
FieldPanel('intro'),
FieldPanel('comment'),
FieldPanel('cta_text'),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
class CapitalInvestContactFormSuccessPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('message_box_heading'),
FieldPanel('message_box_description'),
FieldPanel('what_happens_next_description')
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
| [
"wagtail.images.edit_handlers.ImageChooserPanel",
"wagtail.admin.edit_handlers.PageChooserPanel",
"wagtail.documents.edit_handlers.DocumentChooserPanel",
"core.helpers.make_translated_interface",
"core.panels.SearchEngineOptimisationPanel",
"wagtail.admin.edit_handlers.FieldPanel",
"wagtail.admin.edit_h... | [((5735, 5829), 'core.helpers.make_translated_interface', 'make_translated_interface', ([], {'content_panels': 'content_panels', 'settings_panels': 'settings_panels'}), '(content_panels=content_panels, settings_panels=\n settings_panels)\n', (5760, 5829), False, 'from core.helpers import make_translated_interface\n'), ((13689, 13783), 'core.helpers.make_translated_interface', 'make_translated_interface', ([], {'content_panels': 'content_panels', 'settings_panels': 'settings_panels'}), '(content_panels=content_panels, settings_panels=\n settings_panels)\n', (13714, 13783), False, 'from core.helpers import make_translated_interface\n'), ((14115, 14209), 'core.helpers.make_translated_interface', 'make_translated_interface', ([], {'content_panels': 'content_panels', 'settings_panels': 'settings_panels'}), '(content_panels=content_panels, settings_panels=\n settings_panels)\n', (14140, 14209), False, 'from core.helpers import make_translated_interface\n'), ((19986, 20080), 'core.helpers.make_translated_interface', 'make_translated_interface', ([], {'content_panels': 'content_panels', 'settings_panels': 'settings_panels'}), '(content_panels=content_panels, settings_panels=\n settings_panels)\n', (20011, 20080), False, 'from core.helpers import make_translated_interface\n'), ((20484, 20578), 'core.helpers.make_translated_interface', 'make_translated_interface', ([], {'content_panels': 'content_panels', 'settings_panels': 'settings_panels'}), '(content_panels=content_panels, settings_panels=\n settings_panels)\n', (20509, 20578), False, 'from core.helpers import make_translated_interface\n'), ((20927, 21021), 'core.helpers.make_translated_interface', 'make_translated_interface', ([], {'content_panels': 'content_panels', 'settings_panels': 'settings_panels'}), '(content_panels=content_panels, settings_panels=\n settings_panels)\n', (20952, 21021), False, 'from core.helpers import make_translated_interface\n'), ((453, 472), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (463, 472), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((482, 513), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""breadcrumbs_label"""'], {}), "('breadcrumbs_label')\n", (492, 513), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((5618, 5649), 'core.panels.SearchEngineOptimisationPanel', 'SearchEngineOptimisationPanel', ([], {}), '()\n', (5647, 5649), False, 'from core.panels import SearchEngineOptimisationPanel\n'), ((5689, 5707), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""slug"""'], {}), "('slug')\n", (5699, 5707), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((5918, 5937), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (5928, 5937), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((5947, 5978), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""breadcrumbs_label"""'], {}), "('breadcrumbs_label')\n", (5957, 5978), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((6181, 6215), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""featured_description"""'], {}), "('featured_description')\n", (6191, 6215), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13572, 13603), 'core.panels.SearchEngineOptimisationPanel', 'SearchEngineOptimisationPanel', ([], {}), '()\n', (13601, 13603), False, 'from core.panels import SearchEngineOptimisationPanel\n'), ((13643, 13661), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""slug"""'], {}), "('slug')\n", (13653, 13661), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13884, 13903), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (13894, 13903), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13913, 13944), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""breadcrumbs_label"""'], {}), "('breadcrumbs_label')\n", (13923, 13944), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13954, 13988), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""search_results_title"""'], {}), "('search_results_title')\n", (13964, 13988), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13998, 14029), 'core.panels.SearchEngineOptimisationPanel', 'SearchEngineOptimisationPanel', ([], {}), '()\n', (14027, 14029), False, 'from core.panels import SearchEngineOptimisationPanel\n'), ((14069, 14087), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""slug"""'], {}), "('slug')\n", (14079, 14087), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((14303, 14322), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (14313, 14322), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((14949, 14980), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""breadcrumbs_label"""'], {}), "('breadcrumbs_label')\n", (14959, 14980), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19869, 19900), 'core.panels.SearchEngineOptimisationPanel', 'SearchEngineOptimisationPanel', ([], {}), '()\n', (19898, 19900), False, 'from core.panels import SearchEngineOptimisationPanel\n'), ((19940, 19958), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""slug"""'], {}), "('slug')\n", (19950, 19958), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20174, 20193), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (20184, 20193), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20203, 20234), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""breadcrumbs_label"""'], {}), "('breadcrumbs_label')\n", (20213, 20234), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20244, 20265), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""heading"""'], {}), "('heading')\n", (20254, 20265), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20275, 20294), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""intro"""'], {}), "('intro')\n", (20285, 20294), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20304, 20325), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""comment"""'], {}), "('comment')\n", (20314, 20325), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20335, 20357), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""cta_text"""'], {}), "('cta_text')\n", (20345, 20357), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20367, 20398), 'core.panels.SearchEngineOptimisationPanel', 'SearchEngineOptimisationPanel', ([], {}), '()\n', (20396, 20398), False, 'from core.panels import SearchEngineOptimisationPanel\n'), ((20438, 20456), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""slug"""'], {}), "('slug')\n", (20448, 20456), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20679, 20698), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (20689, 20698), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20708, 20741), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""message_box_heading"""'], {}), "('message_box_heading')\n", (20718, 20741), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20751, 20788), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""message_box_description"""'], {}), "('message_box_description')\n", (20761, 20788), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20798, 20841), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""what_happens_next_description"""'], {}), "('what_happens_next_description')\n", (20808, 20841), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((20881, 20899), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""slug"""'], {}), "('slug')\n", (20891, 20899), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((607, 638), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""hero_image"""'], {}), "('hero_image')\n", (624, 638), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((656, 680), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""hero_title"""'], {}), "('hero_title')\n", (666, 680), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((698, 727), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""hero_subheading"""'], {}), "('hero_subheading')\n", (708, 727), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((745, 772), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""hero_subtitle"""'], {}), "('hero_subtitle')\n", (755, 772), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((790, 817), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""hero_cta_text"""'], {}), "('hero_cta_text')\n", (800, 817), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((835, 862), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""hero_cta_link"""'], {}), "('hero_cta_link')\n", (845, 862), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1048, 1156), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Reason to Invest Title, Reason to Invest Content"""'], {}), "(\n 'Required fields for section to show: Reason to Invest Title, Reason to Invest Content'\n )\n", (1057, 1156), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1193, 1237), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""reason_to_invest_section_title"""'], {}), "('reason_to_invest_section_title')\n", (1203, 1237), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1255, 1299), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""reason_to_invest_section_intro"""'], {}), "('reason_to_invest_section_intro')\n", (1265, 1299), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1317, 1363), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""reason_to_invest_section_content"""'], {}), "('reason_to_invest_section_content')\n", (1327, 1363), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1381, 1432), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""reason_to_invest_section_image"""'], {}), "('reason_to_invest_section_image')\n", (1398, 1432), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((1450, 1481), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""how_we_help_title"""'], {}), "('how_we_help_title')\n", (1460, 1481), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1499, 1530), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""how_we_help_intro"""'], {}), "('how_we_help_intro')\n", (1509, 1530), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1548, 1614), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Each icon requires corresponding text to show on page"""'], {}), "('Each icon requires corresponding text to show on page')\n", (1557, 1614), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((2527, 2561), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""how_we_help_cta_text"""'], {}), "('how_we_help_cta_text')\n", (2537, 2561), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((2579, 2613), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""how_we_help_cta_link"""'], {}), "('how_we_help_cta_link')\n", (2589, 2613), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((2800, 2902), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Region Opportunity Title, 1 Related Region"""'], {}), "(\n 'Required fields for section to show: Region Opportunity Title, 1 Related Region'\n )\n", (2809, 2902), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((2939, 2977), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""region_ops_section_title"""'], {}), "('region_ops_section_title')\n", (2949, 2977), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((2995, 3033), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""region_ops_section_intro"""'], {}), "('region_ops_section_intro')\n", (3005, 3033), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((3051, 3118), 'wagtail.admin.edit_handlers.InlinePanel', 'InlinePanel', (['"""added_region_card_fields"""'], {'label': '"""Region card fields"""'}), "('added_region_card_fields', label='Region card fields')\n", (3062, 3118), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((3309, 3341), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""banner_information"""'], {}), "('banner_information')\n", (3319, 3341), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((3523, 3726), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Please use this to link to a related region, rather than adding in manually the region title, image and text in the above section when the capital invest region pages are available"""'], {}), "(\n 'Please use this to link to a related region, rather than adding in manually the region title, image and text in the above section when the capital invest region pages are available'\n )\n", (3532, 3726), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((3821, 3874), 'wagtail.admin.edit_handlers.InlinePanel', 'InlinePanel', (['"""added_regions"""'], {'label': '"""Related Regions"""'}), "('added_regions', label='Related Regions')\n", (3832, 3874), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((4097, 4199), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Energy Sector Title, Energy Sector Content"""'], {}), "(\n 'Required fields for section to show: Energy Sector Title, Energy Sector Content'\n )\n", (4106, 4199), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((4236, 4269), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""energy_sector_title"""'], {}), "('energy_sector_title')\n", (4246, 4269), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((4287, 4322), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""energy_sector_content"""'], {}), "('energy_sector_content')\n", (4297, 4322), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((4340, 4380), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""energy_sector_image"""'], {}), "('energy_sector_image')\n", (4357, 4380), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((4398, 4457), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""CTA requires text and PDF to show on teh page."""'], {}), "('CTA requires text and PDF to show on teh page.')\n", (4407, 4457), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((4475, 4511), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""energy_sector_cta_text"""'], {}), "('energy_sector_cta_text')\n", (4485, 4511), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((4529, 4579), 'wagtail.documents.edit_handlers.DocumentChooserPanel', 'DocumentChooserPanel', (['"""energy_sector_pdf_document"""'], {}), "('energy_sector_pdf_document')\n", (4549, 4579), False, 'from wagtail.documents.edit_handlers import DocumentChooserPanel\n'), ((4755, 4874), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Homes In England Section Title, Title and PDF for each card"""'], {}), "(\n 'Required fields for section to show: Homes In England Section Title, Title and PDF for each card'\n )\n", (4764, 4874), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((4940, 4984), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""homes_in_england_section_title"""'], {}), "('homes_in_england_section_title')\n", (4950, 4984), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((5002, 5088), 'wagtail.admin.edit_handlers.InlinePanel', 'InlinePanel', (['"""added_homes_in_england_card_fields"""'], {'label': '"""Homes In England cards"""'}), "('added_homes_in_england_card_fields', label=\n 'Homes In England cards')\n", (5013, 5088), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((5317, 5394), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Contact Title, Contact Text"""'], {}), "('Required fields for section to show: Contact Title, Contact Text')\n", (5326, 5394), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((5441, 5476), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_section_title"""'], {}), "('contact_section_title')\n", (5451, 5476), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((5494, 5528), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_section_text"""'], {}), "('contact_section_text')\n", (5504, 5528), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((5546, 5584), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_section_cta_text"""'], {}), "('contact_section_cta_text')\n", (5556, 5584), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((6072, 6096), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""hero_title"""'], {}), "('hero_title')\n", (6082, 6096), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((6114, 6145), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""hero_image"""'], {}), "('hero_image')\n", (6131, 6145), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((6356, 6441), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Region Summary Section Content"""'], {}), "('Required fields for section to show: Region Summary Section Content'\n )\n", (6365, 6441), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((6483, 6532), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""region_summary_section_image"""'], {}), "('region_summary_section_image')\n", (6500, 6532), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((6550, 6592), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""region_summary_section_intro"""'], {}), "('region_summary_section_intro')\n", (6560, 6592), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((6610, 6654), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""region_summary_section_content"""'], {}), "('region_summary_section_content')\n", (6620, 6654), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((6841, 6876), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""investment_opps_title"""'], {}), "('investment_opps_title')\n", (6851, 6876), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((6894, 6929), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""investment_opps_intro"""'], {}), "('investment_opps_intro')\n", (6904, 6929), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7101, 7169), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required: at least 4 statistics for the section to show"""'], {}), "('Required: at least 4 statistics for the section to show')\n", (7110, 7169), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8982, 9050), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required: at least 4 statistics for the section to show"""'], {}), "('Required: at least 4 statistics for the section to show')\n", (8991, 9050), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10875, 11021), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Property and Infrastructure Section Title, Property and Infrastructure Section Content"""'], {}), "(\n 'Required fields for section to show: Property and Infrastructure Section Title, Property and Infrastructure Section Content'\n )\n", (10884, 11021), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((11087, 11149), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""property_and_infrastructure_section_image"""'], {}), "('property_and_infrastructure_section_image')\n", (11104, 11149), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((11167, 11222), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""property_and_infrastructure_section_title"""'], {}), "('property_and_infrastructure_section_title')\n", (11177, 11222), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((11240, 11297), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""property_and_infrastructure_section_content"""'], {}), "('property_and_infrastructure_section_content')\n", (11250, 11297), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((11482, 11592), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required: subsections title and at least one title and content for an accordion to show"""'], {}), "(\n 'Required: subsections title and at least one title and content for an accordion to show'\n )\n", (11491, 11592), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((11600, 11631), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""subsections_title"""'], {}), "('subsections_title')\n", (11610, 11631), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((12607, 12696), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Case Study Image, Case Study Title"""'], {}), "(\n 'Required fields for section to show: Case Study Image, Case Study Title')\n", (12616, 12696), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((12738, 12775), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""case_study_image"""'], {}), "('case_study_image')\n", (12755, 12775), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((12793, 12823), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""case_study_title"""'], {}), "('case_study_title')\n", (12803, 12823), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((12841, 12870), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""case_study_text"""'], {}), "('case_study_text')\n", (12851, 12870), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((12888, 12953), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Cta\'s require both text and a link to show on page. """'], {}), '("Cta\'s require both text and a link to show on page. ")\n', (12897, 12953), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13001, 13034), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""case_study_cta_text"""'], {}), "('case_study_cta_text')\n", (13011, 13034), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13052, 13085), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""case_study_cta_link"""'], {}), "('case_study_cta_link')\n", (13062, 13085), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13245, 13322), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Contact Title, Contact Text"""'], {}), "('Required fields for section to show: Contact Title, Contact Text')\n", (13254, 13322), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13369, 13396), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_title"""'], {}), "('contact_title')\n", (13379, 13396), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13414, 13440), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_text"""'], {}), "('contact_text')\n", (13424, 13440), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13458, 13488), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_cta_text"""'], {}), "('contact_cta_text')\n", (13468, 13488), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((13506, 13536), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_cta_link"""'], {}), "('contact_cta_link')\n", (13516, 13536), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((14473, 14528), 'wagtail.admin.edit_handlers.InlinePanel', 'InlinePanel', (['"""related_sectors"""'], {'label': '"""Related Sectors"""'}), "('related_sectors', label='Related Sectors')\n", (14484, 14528), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((14705, 14782), 'wagtail.admin.edit_handlers.PageChooserPanel', 'PageChooserPanel', (['"""related_region"""', "['great_international.AboutUkRegionPage']"], {}), "('related_region', ['great_international.AboutUkRegionPage'])\n", (14721, 14782), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((15074, 15105), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""hero_image"""'], {}), "('hero_image')\n", (15091, 15105), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((15123, 15147), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""hero_title"""'], {}), "('hero_title')\n", (15133, 15147), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((15319, 15394), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Opportunity Summary Intro"""'], {}), "('Required fields for section to show: Opportunity Summary Intro')\n", (15328, 15394), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((15441, 15480), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""opportunity_summary_intro"""'], {}), "('opportunity_summary_intro')\n", (15451, 15480), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((15498, 15539), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""opportunity_summary_content"""'], {}), "('opportunity_summary_content')\n", (15508, 15539), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((15557, 15603), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""opportunity_summary_image"""'], {}), "('opportunity_summary_image')\n", (15574, 15603), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((15775, 15840), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Icons require the corresponding text to show on page"""'], {}), "('Icons require the corresponding text to show on page')\n", (15784, 15840), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((17674, 17736), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Title requires corresponding text to show on page"""'], {}), "('Title requires corresponding text to show on page')\n", (17683, 17736), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((17754, 17792), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""project_background_title"""'], {}), "('project_background_title')\n", (17764, 17792), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((17810, 17848), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""project_background_intro"""'], {}), "('project_background_intro')\n", (17820, 17848), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((18300, 18334), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""project_image"""'], {}), "('project_image')\n", (18317, 18334), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((18502, 18674), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Section shows if there are opportunities with the same related sector. They are chosen randomly. Cta\'s require both text and a link to show on page. """'], {}), '(\n "Section shows if there are opportunities with the same related sector. They are chosen randomly. Cta\'s require both text and a link to show on page. "\n )\n', (18511, 18674), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((18741, 18780), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""similar_projects_cta_text"""'], {}), "('similar_projects_cta_text')\n", (18751, 18780), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((18798, 18837), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""similar_projects_cta_link"""'], {}), "('similar_projects_cta_link')\n", (18808, 18837), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19000, 19089), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Case Study Image, Case Study Title"""'], {}), "(\n 'Required fields for section to show: Case Study Image, Case Study Title')\n", (19009, 19089), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19131, 19168), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""case_study_image"""'], {}), "('case_study_image')\n", (19148, 19168), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((19186, 19216), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""case_study_title"""'], {}), "('case_study_title')\n", (19196, 19216), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19234, 19263), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""case_study_text"""'], {}), "('case_study_text')\n", (19244, 19263), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19281, 19346), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Cta\'s require both text and a link to show on page. """'], {}), '("Cta\'s require both text and a link to show on page. ")\n', (19290, 19346), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19394, 19427), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""case_study_cta_text"""'], {}), "('case_study_cta_text')\n", (19404, 19427), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19445, 19478), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""case_study_cta_link"""'], {}), "('case_study_cta_link')\n", (19455, 19478), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19638, 19715), 'wagtail.admin.edit_handlers.HelpPanel', 'HelpPanel', (['"""Required fields for section to show: Contact Title, Contact Text"""'], {}), "('Required fields for section to show: Contact Title, Contact Text')\n", (19647, 19715), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19762, 19789), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_title"""'], {}), "('contact_title')\n", (19772, 19789), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((19807, 19833), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""contact_text"""'], {}), "('contact_text')\n", (19817, 19833), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1739, 1780), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""how_we_help_one_icon"""'], {}), "('how_we_help_one_icon')\n", (1756, 1780), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((1806, 1840), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""how_we_help_one_text"""'], {}), "('how_we_help_one_text')\n", (1816, 1840), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((1928, 1969), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""how_we_help_two_icon"""'], {}), "('how_we_help_two_icon')\n", (1945, 1969), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((1995, 2029), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""how_we_help_two_text"""'], {}), "('how_we_help_two_text')\n", (2005, 2029), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((2169, 2212), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""how_we_help_three_icon"""'], {}), "('how_we_help_three_icon')\n", (2186, 2212), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((2238, 2274), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""how_we_help_three_text"""'], {}), "('how_we_help_three_text')\n", (2248, 2274), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((2362, 2404), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""how_we_help_four_icon"""'], {}), "('how_we_help_four_icon')\n", (2379, 2404), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((2430, 2465), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""how_we_help_four_text"""'], {}), "('how_we_help_four_text')\n", (2440, 2465), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7265, 7303), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_1_heading"""'], {}), "('economics_stat_1_heading')\n", (7275, 7303), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7329, 7366), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_1_number"""'], {}), "('economics_stat_1_number')\n", (7339, 7366), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7392, 7433), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_1_smallprint"""'], {}), "('economics_stat_1_smallprint')\n", (7402, 7433), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7521, 7559), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_2_heading"""'], {}), "('economics_stat_2_heading')\n", (7531, 7559), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7585, 7622), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_2_number"""'], {}), "('economics_stat_2_number')\n", (7595, 7622), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7648, 7689), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_2_smallprint"""'], {}), "('economics_stat_2_smallprint')\n", (7658, 7689), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7777, 7815), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_3_heading"""'], {}), "('economics_stat_3_heading')\n", (7787, 7815), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7841, 7878), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_3_number"""'], {}), "('economics_stat_3_number')\n", (7851, 7878), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((7904, 7945), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_3_smallprint"""'], {}), "('economics_stat_3_smallprint')\n", (7914, 7945), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8085, 8123), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_4_heading"""'], {}), "('economics_stat_4_heading')\n", (8095, 8123), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8149, 8186), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_4_number"""'], {}), "('economics_stat_4_number')\n", (8159, 8186), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8212, 8253), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_4_smallprint"""'], {}), "('economics_stat_4_smallprint')\n", (8222, 8253), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8341, 8379), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_5_heading"""'], {}), "('economics_stat_5_heading')\n", (8351, 8379), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8405, 8442), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_5_number"""'], {}), "('economics_stat_5_number')\n", (8415, 8442), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8468, 8509), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_5_smallprint"""'], {}), "('economics_stat_5_smallprint')\n", (8478, 8509), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8597, 8635), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_6_heading"""'], {}), "('economics_stat_6_heading')\n", (8607, 8635), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8661, 8698), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_6_number"""'], {}), "('economics_stat_6_number')\n", (8671, 8698), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((8724, 8765), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""economics_stat_6_smallprint"""'], {}), "('economics_stat_6_smallprint')\n", (8734, 8765), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9146, 9183), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_1_heading"""'], {}), "('location_stat_1_heading')\n", (9156, 9183), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9209, 9245), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_1_number"""'], {}), "('location_stat_1_number')\n", (9219, 9245), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9271, 9311), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_1_smallprint"""'], {}), "('location_stat_1_smallprint')\n", (9281, 9311), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9399, 9436), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_2_heading"""'], {}), "('location_stat_2_heading')\n", (9409, 9436), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9462, 9498), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_2_number"""'], {}), "('location_stat_2_number')\n", (9472, 9498), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9524, 9564), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_2_smallprint"""'], {}), "('location_stat_2_smallprint')\n", (9534, 9564), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9652, 9689), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_3_heading"""'], {}), "('location_stat_3_heading')\n", (9662, 9689), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9715, 9751), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_3_number"""'], {}), "('location_stat_3_number')\n", (9725, 9751), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9777, 9817), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_3_smallprint"""'], {}), "('location_stat_3_smallprint')\n", (9787, 9817), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((9957, 9994), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_4_heading"""'], {}), "('location_stat_4_heading')\n", (9967, 9994), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10020, 10056), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_4_number"""'], {}), "('location_stat_4_number')\n", (10030, 10056), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10082, 10122), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_4_smallprint"""'], {}), "('location_stat_4_smallprint')\n", (10092, 10122), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10210, 10247), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_5_heading"""'], {}), "('location_stat_5_heading')\n", (10220, 10247), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10273, 10309), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_5_number"""'], {}), "('location_stat_5_number')\n", (10283, 10309), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10335, 10375), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_5_smallprint"""'], {}), "('location_stat_5_smallprint')\n", (10345, 10375), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10463, 10500), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_6_heading"""'], {}), "('location_stat_6_heading')\n", (10473, 10500), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10526, 10562), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_6_number"""'], {}), "('location_stat_6_number')\n", (10536, 10562), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((10588, 10628), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_stat_6_smallprint"""'], {}), "('location_stat_6_smallprint')\n", (10598, 10628), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((11727, 11762), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""sub_section_one_title"""'], {}), "('sub_section_one_title')\n", (11737, 11762), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((11788, 11829), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""sub_section_one_icon"""'], {}), "('sub_section_one_icon')\n", (11805, 11829), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((11855, 11892), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""sub_section_one_content"""'], {}), "('sub_section_one_content')\n", (11865, 11892), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((11979, 12014), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""sub_section_two_title"""'], {}), "('sub_section_two_title')\n", (11989, 12014), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((12040, 12081), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""sub_section_two_icon"""'], {}), "('sub_section_two_icon')\n", (12057, 12081), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((12107, 12144), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""sub_section_two_content"""'], {}), "('sub_section_two_content')\n", (12117, 12144), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((12231, 12268), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""sub_section_three_title"""'], {}), "('sub_section_three_title')\n", (12241, 12268), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((12294, 12337), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""sub_section_three_icon"""'], {}), "('sub_section_three_icon')\n", (12311, 12337), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((12363, 12402), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""sub_section_three_content"""'], {}), "('sub_section_three_content')\n", (12373, 12402), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((15965, 15999), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""location_icon"""'], {}), "('location_icon')\n", (15982, 15999), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((16025, 16055), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location_heading"""'], {}), "('location_heading')\n", (16035, 16055), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((16081, 16103), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""location"""'], {}), "('location')\n", (16091, 16103), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((16191, 16233), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""project_promoter_icon"""'], {}), "('project_promoter_icon')\n", (16208, 16233), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((16259, 16297), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""project_promoter_heading"""'], {}), "('project_promoter_heading')\n", (16269, 16297), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((16323, 16353), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""project_promoter"""'], {}), "('project_promoter')\n", (16333, 16353), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((16441, 16472), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""scale_icon"""'], {}), "('scale_icon')\n", (16458, 16472), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((16498, 16525), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""scale_heading"""'], {}), "('scale_heading')\n", (16508, 16525), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((16551, 16570), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""scale"""'], {}), "('scale')\n", (16561, 16570), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((16596, 16621), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""scale_value"""'], {}), "('scale_value')\n", (16606, 16621), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((16761, 16793), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""sector_icon"""'], {}), "('sector_icon')\n", (16778, 16793), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((16819, 16847), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""sector_heading"""'], {}), "('sector_heading')\n", (16829, 16847), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((16873, 16932), 'wagtail.admin.edit_handlers.InlinePanel', 'InlinePanel', (['"""related_sub_sectors"""'], {'label': '"""Related Sectors"""'}), "('related_sub_sectors', label='Related Sectors')\n", (16884, 16932), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((17056, 17097), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""investment_type_icon"""'], {}), "('investment_type_icon')\n", (17073, 17097), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((17123, 17160), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""investment_type_heading"""'], {}), "('investment_type_heading')\n", (17133, 17160), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((17186, 17215), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""investment_type"""'], {}), "('investment_type')\n", (17196, 17215), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((17303, 17344), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""planning_status_icon"""'], {}), "('planning_status_icon')\n", (17320, 17344), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((17370, 17407), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""planning_status_heading"""'], {}), "('planning_status_heading')\n", (17380, 17407), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((17433, 17462), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""planning_status"""'], {}), "('planning_status')\n", (17443, 17462), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((17944, 17983), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""project_description_title"""'], {}), "('project_description_title')\n", (17954, 17983), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((18009, 18050), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""project_description_content"""'], {}), "('project_description_content')\n", (18019, 18050), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((18138, 18174), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""project_promoter_title"""'], {}), "('project_promoter_title')\n", (18148, 18174), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n'), ((18200, 18238), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""project_promoter_content"""'], {}), "('project_promoter_content')\n", (18210, 18238), False, 'from wagtail.admin.edit_handlers import InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel, PageChooserPanel\n')] |
from floodsystem.geo import stations_within_radius
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task 1C"""
# Build list of stations
stations = build_station_list()
# Store the coordinates of Cambridge City Centre
CambCoord = (52.2053, 0.1218)
#store the radius value
radius = 10
near_cambstations = stations_within_radius(stations, CambCoord, radius)
print(sorted([station.name for station in near_cambstations]))
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run() | [
"floodsystem.stationdata.build_station_list",
"floodsystem.geo.stations_within_radius"
] | [((198, 218), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (216, 218), False, 'from floodsystem.stationdata import build_station_list\n'), ((375, 426), 'floodsystem.geo.stations_within_radius', 'stations_within_radius', (['stations', 'CambCoord', 'radius'], {}), '(stations, CambCoord, radius)\n', (397, 426), False, 'from floodsystem.geo import stations_within_radius\n')] |
from model.objects import Objects
import time
class ProjectHelper:
def __init__(self, app):
self.app = app
project_cache = None
def get_project_list(self):
if self.project_cache is None:
wd = self.app.wd
self.open_manage_project_page()
self.project_cache = []
for row in wd.find_elements_by_xpath("//div[@id='content']/div[2]/table/tbody/tr"):
cells = row.find_elements_by_tag_name("td")
pname = cells[0].text
description = cells[4].text
self.project_cache.append(Objects(pname=pname, description=description))
return list(self.project_cache)
def open_manage_project_page(self):
wd = self.app.wd
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def create(self, objects):
wd = self.app.wd
self.open_manage_project_page()
wd.find_element_by_xpath("//input[@value='Create New Project']").click()
self.fill_form(objects)
wd.find_element_by_xpath("//input[@value='Add Project']").click()
time.sleep(4)
self.project_cache = None
def fill_form(self, objects):
self.change_field_value("name", objects.pname)
self.change_field_value("description", objects.description)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def delete_project(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='sidebar']/ul/li[7]/a/i").click()
wd.find_element_by_link_text("Manage Projects").click()
wd.find_element_by_css_selector("td > a").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
| [
"model.objects.Objects",
"time.sleep"
] | [((1173, 1186), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (1183, 1186), False, 'import time\n'), ((609, 654), 'model.objects.Objects', 'Objects', ([], {'pname': 'pname', 'description': 'description'}), '(pname=pname, description=description)\n', (616, 654), False, 'from model.objects import Objects\n')] |
# Generated by Django 3.1.1 on 2020-09-09 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("carbon_quiz", "0009_auto_20200908_2201"),
]
operations = [
migrations.RemoveField(
model_name="mission",
name="description",
),
migrations.RemoveField(
model_name="mission",
name="status",
),
migrations.AddField(
model_name="mission",
name="action",
field=models.CharField(
help_text="Describes what the user needs to do.",
max_length=500,
null=True,
),
),
migrations.AddField(
model_name="mission",
name="clicks_needed",
field=models.IntegerField(
default=1, help_text="Number of the links user needs to click."
),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((239, 303), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""mission"""', 'name': '"""description"""'}), "(model_name='mission', name='description')\n", (261, 303), False, 'from django.db import migrations, models\n'), ((348, 407), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""mission"""', 'name': '"""status"""'}), "(model_name='mission', name='status')\n", (370, 407), False, 'from django.db import migrations, models\n'), ((552, 649), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Describes what the user needs to do."""', 'max_length': '(500)', 'null': '(True)'}), "(help_text='Describes what the user needs to do.',\n max_length=500, null=True)\n", (568, 649), False, 'from django.db import migrations, models\n'), ((836, 925), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'help_text': '"""Number of the links user needs to click."""'}), "(default=1, help_text=\n 'Number of the links user needs to click.')\n", (855, 925), False, 'from django.db import migrations, models\n')] |
import json
from ..webstack import run as webstack_run
def process(json_file_list):
for json_filename in json_file_list:
with open(json_filename) as json_file:
json_data = json.load(json_file)
webstack_data = json_data.get('webstack', None)
if webstack_data:
webstack_run(webstack_data)
| [
"json.load"
] | [((197, 217), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (206, 217), False, 'import json\n')] |
import trio
import os
import json
from itertools import count
# Experiment with generating Chrome Event Trace format, which can be browsed
# through chrome://tracing or other mechanisms.
#
# Screenshot: https://files.gitter.im/python-trio/general/fp6w/image.png
#
# Trace format docs: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#
#
# Things learned so far:
# - I don't understand how the ph="s"/ph="f" flow events work – I think
# they're supposed to show up as arrows, and I'm emitting them between tasks
# that wake each other up, but they're not showing up.
# - I think writing out json synchronously from each event is creating gaps in
# the trace; maybe better to batch them up to write up all at once at the
# end
# - including tracebacks would be cool
# - there doesn't seem to be any good way to group together tasks based on
# nurseries. this really limits the value of this particular trace
# format+viewer for us. (also maybe we should have an instrumentation event
# when a nursery is opened/closed?)
# - task._counter should maybe be public
# - I don't know how to best show task lifetime, scheduling times, and what
# the task is actually doing on the same plot. if we want to show particular
# events like "called stream.send_all", then the chrome trace format won't
# let us also show "task is running", because neither kind of event is
# strictly nested inside the other
class Trace(trio.abc.Instrument):
def __init__(self, out):
self.out = out
self.out.write("[\n")
self.ids = count()
self._task_metadata(-1, "I/O manager")
def _write(self, **ev):
ev.setdefault("pid", os.getpid())
if ev["ph"] != "M":
ev.setdefault("ts", trio.current_time() * 1e6)
self.out.write(json.dumps(ev))
self.out.write(",\n")
def _task_metadata(self, tid, name):
self._write(
name="thread_name",
ph="M",
tid=tid,
args={"name": name},
)
self._write(
name="thread_sort_index",
ph="M",
tid=tid,
args={"sort_index": tid},
)
def task_spawned(self, task):
self._task_metadata(task._counter, task.name)
self._write(
name="task lifetime",
ph="B",
tid=task._counter,
)
def task_exited(self, task):
self._write(
name="task lifetime",
ph="E",
tid=task._counter,
)
def before_task_step(self, task):
self._write(
name="running",
ph="B",
tid=task._counter,
)
def after_task_step(self, task):
self._write(
name="running",
ph="E",
tid=task._counter,
)
def task_scheduled(self, task):
try:
waker = trio.lowlevel.current_task()
except RuntimeError:
pass
else:
id = next(self.ids)
self._write(
ph="s",
cat="wakeup",
id=id,
tid=waker._counter,
)
self._write(
cat="wakeup",
ph="f",
id=id,
tid=task._counter,
)
def before_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="B",
tid=-1,
)
def after_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="E",
tid=-1,
)
async def child1():
print(" child1: started! sleeping now...")
await trio.sleep(1)
print(" child1: exiting!")
async def child2():
print(" child2: started! sleeping now...")
await trio.sleep(1)
print(" child2: exiting!")
async def parent():
print("parent: started!")
async with trio.open_nursery() as nursery:
print("parent: spawning child1...")
nursery.start_soon(child1)
print("parent: spawning child2...")
nursery.start_soon(child2)
print("parent: waiting for children to finish...")
# -- we exit the nursery block here --
print("parent: all done!")
t = Trace(open("/tmp/t.json", "w"))
trio.run(parent, instruments=[t])
| [
"trio.current_time",
"trio.run",
"trio.sleep",
"json.dumps",
"trio.open_nursery",
"trio.lowlevel.current_task",
"itertools.count",
"os.getpid"
] | [((4310, 4343), 'trio.run', 'trio.run', (['parent'], {'instruments': '[t]'}), '(parent, instruments=[t])\n', (4318, 4343), False, 'import trio\n'), ((1591, 1598), 'itertools.count', 'count', ([], {}), '()\n', (1596, 1598), False, 'from itertools import count\n'), ((3707, 3720), 'trio.sleep', 'trio.sleep', (['(1)'], {}), '(1)\n', (3717, 3720), False, 'import trio\n'), ((3832, 3845), 'trio.sleep', 'trio.sleep', (['(1)'], {}), '(1)\n', (3842, 3845), False, 'import trio\n'), ((3944, 3963), 'trio.open_nursery', 'trio.open_nursery', ([], {}), '()\n', (3961, 3963), False, 'import trio\n'), ((1704, 1715), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1713, 1715), False, 'import os\n'), ((1827, 1841), 'json.dumps', 'json.dumps', (['ev'], {}), '(ev)\n', (1837, 1841), False, 'import json\n'), ((2922, 2950), 'trio.lowlevel.current_task', 'trio.lowlevel.current_task', ([], {}), '()\n', (2948, 2950), False, 'import trio\n'), ((1777, 1796), 'trio.current_time', 'trio.current_time', ([], {}), '()\n', (1794, 1796), False, 'import trio\n')] |
from zeep import Client
from models import RequestParameter
class Caller:
def __init__(self):
wsdl_url = "http://0.0.0.0:8080/?wsdl"
self._name = "dummy_name"
self._times = 3
self._client = Client(wsdl_url)
def call_say_hello_1(self):
result = self._client.service.say_hello_1(
self._name,
self._times)
print(result)
def call_say_hello_2(self):
result = self._client.service.say_hello_2(
{
"name": self._name,
"times": self._times
}
)
print(result)
def call_say_hello_3(self):
param = RequestParameter(
name=self._name,
times=self._times
)
result = self._client.service.say_hello_3(param.as_dict())
print(result)
print(type(result))
def main():
caller = Caller()
caller.call_say_hello_1()
print("=====================")
caller.call_say_hello_2()
print("=====================")
caller.call_say_hello_3()
print("=====================")
if __name__ == '__main__':
main()
| [
"models.RequestParameter",
"zeep.Client"
] | [((229, 245), 'zeep.Client', 'Client', (['wsdl_url'], {}), '(wsdl_url)\n', (235, 245), False, 'from zeep import Client\n'), ((667, 719), 'models.RequestParameter', 'RequestParameter', ([], {'name': 'self._name', 'times': 'self._times'}), '(name=self._name, times=self._times)\n', (683, 719), False, 'from models import RequestParameter\n')] |
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateIPSecTunnelBgpSessionDetails(object):
"""
CreateIPSecTunnelBgpSessionDetails model.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateIPSecTunnelBgpSessionDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param oracle_interface_ip:
The value to assign to the oracle_interface_ip property of this CreateIPSecTunnelBgpSessionDetails.
:type oracle_interface_ip: str
:param customer_interface_ip:
The value to assign to the customer_interface_ip property of this CreateIPSecTunnelBgpSessionDetails.
:type customer_interface_ip: str
:param customer_bgp_asn:
The value to assign to the customer_bgp_asn property of this CreateIPSecTunnelBgpSessionDetails.
:type customer_bgp_asn: str
"""
self.swagger_types = {
'oracle_interface_ip': 'str',
'customer_interface_ip': 'str',
'customer_bgp_asn': 'str'
}
self.attribute_map = {
'oracle_interface_ip': 'oracleInterfaceIp',
'customer_interface_ip': 'customerInterfaceIp',
'customer_bgp_asn': 'customerBgpAsn'
}
self._oracle_interface_ip = None
self._customer_interface_ip = None
self._customer_bgp_asn = None
@property
def oracle_interface_ip(self):
"""
Gets the oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the Oracle end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.4/31`
:return: The oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._oracle_interface_ip
@oracle_interface_ip.setter
def oracle_interface_ip(self, oracle_interface_ip):
"""
Sets the oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the Oracle end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.4/31`
:param oracle_interface_ip: The oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._oracle_interface_ip = oracle_interface_ip
@property
def customer_interface_ip(self):
"""
Gets the customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the CPE end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.5/31`
:return: The customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._customer_interface_ip
@customer_interface_ip.setter
def customer_interface_ip(self, customer_interface_ip):
"""
Sets the customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the CPE end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.5/31`
:param customer_interface_ip: The customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._customer_interface_ip = customer_interface_ip
@property
def customer_bgp_asn(self):
"""
Gets the customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this ASN
is required and used for the tunnel's BGP session. This is the ASN of the network on the
CPE end of the BGP session. Can be a 2-byte or 4-byte ASN. Uses \"asplain\" format.
If the tunnel's `routing` attribute is set to `STATIC`, the `customerBgpAsn` must be null.
Example: `12345` (2-byte) or `1587232876` (4-byte)
:return: The customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._customer_bgp_asn
@customer_bgp_asn.setter
def customer_bgp_asn(self, customer_bgp_asn):
"""
Sets the customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this ASN
is required and used for the tunnel's BGP session. This is the ASN of the network on the
CPE end of the BGP session. Can be a 2-byte or 4-byte ASN. Uses \"asplain\" format.
If the tunnel's `routing` attribute is set to `STATIC`, the `customerBgpAsn` must be null.
Example: `12345` (2-byte) or `1587232876` (4-byte)
:param customer_bgp_asn: The customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._customer_bgp_asn = customer_bgp_asn
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"oci.util.formatted_flat_dict"
] | [((6895, 6920), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (6914, 6920), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
import balanced
balanced.configure('ak-test-1o9QKwUCrwstHW<KEY>')
order = balanced.Order.fetch('/orders/OR7qAh5x1cFzX0U9hD628LPa') | [
"balanced.configure",
"balanced.Order.fetch"
] | [((17, 66), 'balanced.configure', 'balanced.configure', (['"""ak-test-1o9QKwUCrwstHW<KEY>"""'], {}), "('ak-test-1o9QKwUCrwstHW<KEY>')\n", (35, 66), False, 'import balanced\n'), ((76, 132), 'balanced.Order.fetch', 'balanced.Order.fetch', (['"""/orders/OR7qAh5x1cFzX0U9hD628LPa"""'], {}), "('/orders/OR7qAh5x1cFzX0U9hD628LPa')\n", (96, 132), False, 'import balanced\n')] |
from __future__ import print_function
import tensorflow as tf
import numpy as np
from collections import namedtuple, OrderedDict
from subprocess import call
import scipy.io.wavfile as wavfile
import argparse
import codecs
import timeit
import struct
import toml
import re
import sys
import os
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def slice_signal(signal, window_size, stride=0.5):
""" Return windows of the given signal by sweeping in stride fractions
of window
"""
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
if end_i - beg_i < window_size:
break
slice_ = signal[beg_i:end_i]
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.int32)
def read_and_slice(filename, wav_canvas_size, stride=0.5):
fm, wav_data = wavfile.read(filename)
if fm != 16000:
raise ValueError('Sampling rate is expected to be 16kHz!')
signals = slice_signal(wav_data, wav_canvas_size, stride)
return signals
def encoder_proc(wav_filename, noisy_path, out_file, wav_canvas_size, baseline_dir=None):
""" Read and slice the wav and noisy files and write to TFRecords.
out_file: TFRecordWriter.
"""
ppath, wav_fullname = os.path.split(wav_filename)
noisy_filename = os.path.join(noisy_path, wav_fullname)
wav_signals = read_and_slice(wav_filename, wav_canvas_size)
noisy_signals = read_and_slice(noisy_filename, wav_canvas_size)
if not baseline_dir is None:
baseline_filename = os.path.join(baseline_dir, wav_fullname)
baseline_signals = read_and_slice(baseline_filename, wav_canvas_size)
assert wav_signals.shape == noisy_signals.shape, noisy_signals.shape
if baseline_dir is None:
for (wav, noisy) in zip(wav_signals, noisy_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw)}))
out_file.write(example.SerializeToString())
else:
for (wav, noisy, base) in zip(wav_signals, noisy_signals, baseline_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
baseline_raw = base.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw),
'baseline_raw': _bytes_feature(baseline_raw)
}))
out_file.write(example.SerializeToString())
def main(opts):
if not os.path.exists(opts.save_path):
# make save path if it does not exist
os.makedirs(opts.save_path)
# set up the output filepath
out_filepath = os.path.join(opts.save_path, opts.out_file)
if os.path.splitext(out_filepath)[1] != '.tfrecords':
# if wrong extension or no extension appended, put .tfrecords
out_filepath += '.tfrecords'
else:
out_filename, ext = os.path.splitext(out_filepath)
out_filepath = out_filename + ext
# check if out_file exists and if force flag is set
if os.path.exists(out_filepath) and not opts.force_gen:
raise ValueError('ERROR: {} already exists. Set force flag (--force-gen) to '
'overwrite. Skipping this speaker.'.format(out_filepath))
elif os.path.exists(out_filepath) and opts.force_gen:
print('Will overwrite previously existing tfrecords')
os.unlink(out_filepath)
with open(opts.cfg) as cfh:
# read the configuration description
cfg_desc = toml.loads(cfh.read())
beg_enc_t = timeit.default_timer()
out_file = tf.python_io.TFRecordWriter(out_filepath)
# process the acoustic and textual data now
for dset_i, (dset, dset_desc) in enumerate(cfg_desc.iteritems()):
print('-' * 50)
wav_dir = dset_desc['clean']
wav_files = [os.path.join(wav_dir, wav) for wav in
os.listdir(wav_dir) if wav.endswith('.wav')]
noisy_dir = dset_desc['noisy']
baseline_dir = None
if 'baseline' in dset_desc.keys():
baseline_dir = dset_desc['baseline']
nfiles = len(wav_files)
for m, wav_file in enumerate(wav_files):
print('Processing wav file {}/{} {}{}'.format(m + 1,
nfiles,
wav_file,
' ' * 10),
end='\r')
sys.stdout.flush()
encoder_proc(wav_file, noisy_dir, out_file, 2 ** 14, baseline_dir)
out_file.close()
end_enc_t = timeit.default_timer() - beg_enc_t
print('')
print('*' * 50)
print('Total processing and writing time: {} s'.format(end_enc_t))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the set of txt and '
'wavs to TFRecords')
parser.add_argument('--cfg', type=str, default='cfg/e2e_maker.cfg',
help='File containing the description of datasets '
'to extract the info to make the TFRecords.')
parser.add_argument('--save_path', type=str, default='data/',
help='Path to save the dataset')
parser.add_argument('--out_file', type=str, default='segan.tfrecords',
help='Output filename')
parser.add_argument('--force-gen', dest='force_gen', action='store_true',
help='Flag to force overwriting existing dataset.')
parser.set_defaults(force_gen=False)
opts = parser.parse_args()
main(opts)
| [
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.makedirs",
"timeit.default_timer",
"os.path.join",
"os.path.splitext",
"tensorflow.train.Int64List",
"os.path.split",
"tensorflow.train.BytesList",
"numpy.array",
"scipy.io.wavfile.read",
"os.unlink",
"tensorflow.python_io.TFRe... | [((1126, 1158), 'numpy.array', 'np.array', (['slices'], {'dtype': 'np.int32'}), '(slices, dtype=np.int32)\n', (1134, 1158), True, 'import numpy as np\n'), ((1238, 1260), 'scipy.io.wavfile.read', 'wavfile.read', (['filename'], {}), '(filename)\n', (1250, 1260), True, 'import scipy.io.wavfile as wavfile\n'), ((1660, 1687), 'os.path.split', 'os.path.split', (['wav_filename'], {}), '(wav_filename)\n', (1673, 1687), False, 'import os\n'), ((1709, 1747), 'os.path.join', 'os.path.join', (['noisy_path', 'wav_fullname'], {}), '(noisy_path, wav_fullname)\n', (1721, 1747), False, 'import os\n'), ((3275, 3318), 'os.path.join', 'os.path.join', (['opts.save_path', 'opts.out_file'], {}), '(opts.save_path, opts.out_file)\n', (3287, 3318), False, 'import os\n'), ((5522, 5610), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert the set of txt and wavs to TFRecords"""'}), "(description=\n 'Convert the set of txt and wavs to TFRecords')\n", (5545, 5610), False, 'import argparse\n'), ((1941, 1981), 'os.path.join', 'os.path.join', (['baseline_dir', 'wav_fullname'], {}), '(baseline_dir, wav_fullname)\n', (1953, 1981), False, 'import os\n'), ((3109, 3139), 'os.path.exists', 'os.path.exists', (['opts.save_path'], {}), '(opts.save_path)\n', (3123, 3139), False, 'import os\n'), ((3195, 3222), 'os.makedirs', 'os.makedirs', (['opts.save_path'], {}), '(opts.save_path)\n', (3206, 3222), False, 'import os\n'), ((3522, 3552), 'os.path.splitext', 'os.path.splitext', (['out_filepath'], {}), '(out_filepath)\n', (3538, 3552), False, 'import os\n'), ((3658, 3686), 'os.path.exists', 'os.path.exists', (['out_filepath'], {}), '(out_filepath)\n', (3672, 3686), False, 'import os\n'), ((4171, 4193), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4191, 4193), False, 'import timeit\n'), ((4213, 4254), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['out_filepath'], {}), '(out_filepath)\n', (4240, 4254), True, 'import tensorflow as tf\n'), ((359, 392), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (377, 392), True, 'import tensorflow as tf\n'), ((459, 492), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (477, 492), True, 'import tensorflow as tf\n'), ((3326, 3356), 'os.path.splitext', 'os.path.splitext', (['out_filepath'], {}), '(out_filepath)\n', (3342, 3356), False, 'import os\n'), ((3889, 3917), 'os.path.exists', 'os.path.exists', (['out_filepath'], {}), '(out_filepath)\n', (3903, 3917), False, 'import os\n'), ((4008, 4031), 'os.unlink', 'os.unlink', (['out_filepath'], {}), '(out_filepath)\n', (4017, 4031), False, 'import os\n'), ((5329, 5351), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5349, 5351), False, 'import timeit\n'), ((4475, 4501), 'os.path.join', 'os.path.join', (['wav_dir', 'wav'], {}), '(wav_dir, wav)\n', (4487, 4501), False, 'import os\n'), ((5182, 5200), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5198, 5200), False, 'import sys\n'), ((4540, 4559), 'os.listdir', 'os.listdir', (['wav_dir'], {}), '(wav_dir)\n', (4550, 4559), False, 'import os\n')] |
import xNormal
xNormal.run("piano_high.obj", "piano_low.obj", "piano.png", width=256, height=256, gen_normals = True, gen_ao = True) | [
"xNormal.run"
] | [((15, 132), 'xNormal.run', 'xNormal.run', (['"""piano_high.obj"""', '"""piano_low.obj"""', '"""piano.png"""'], {'width': '(256)', 'height': '(256)', 'gen_normals': '(True)', 'gen_ao': '(True)'}), "('piano_high.obj', 'piano_low.obj', 'piano.png', width=256,\n height=256, gen_normals=True, gen_ao=True)\n", (26, 132), False, 'import xNormal\n')] |
from copy import deepcopy
from typing import Tuple
import jax.numpy as jnp
from jax.scipy.linalg import cho_factor, cho_solve
from multipledispatch import dispatch
from .types import Array
def I(n: int) -> Array:
"""
Compute an n x n identity matrix.
:param n: The size of of the matrix.
:return: An n x n identity matrix.
"""
return jnp.eye(n)
def concat_dictionaries(a: dict, b: dict) -> dict:
"""
Append one dictionary below another. If duplicate keys exist, then the key-value pair of the second supplied
dictionary will be used.
"""
return {**a, **b}
def merge_dictionaries(base_dict: dict, in_dict: dict) -> dict:
"""
This will return a complete dictionary based on the keys of the first matrix. If the same key should exist in the
second matrix, then the key-value pair from the first dictionary will be overwritten. The purpose of this is that
the base_dict will be a complete dictionary of values such that an incomplete second dictionary can be used to
update specific key-value pairs.
:param base_dict: Complete dictionary of key-value pairs.
:param in_dict: Subset of key-values pairs such that values from this dictionary will take precedent.
:return: A merged single dictionary.
"""
for k, v in base_dict.items():
if k in in_dict.keys():
base_dict[k] = in_dict[k]
return base_dict
def sort_dictionary(base_dict: dict) -> dict:
"""
Sort a dictionary based on the dictionary's key values.
:param base_dict: The unsorted dictionary.
:return: A dictionary sorted alphabetically on the dictionary's keys.
"""
return dict(sorted(base_dict.items()))
@dispatch(jnp.DeviceArray)
def standardise(x: jnp.DeviceArray) -> Tuple[jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray]:
"""
Standardise a given matrix such that values are distributed according to a unit normal random variable. This is
primarily designed for standardising a training dataset.
:param x: A matrix of unstandardised values
:return: A matrix of standardised values
"""
xmean = jnp.mean(x, axis=0)
xstd = jnp.std(x, axis=0)
return (x - xmean) / xstd, xmean, xstd
@dispatch(jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray)
def standardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Standardise a given matrix with respect to a given mean and standard deviation. This is primarily designed for
standardising a test set of data with respect to the training data.
:param x: A matrix of unstandardised values
:param xmean: A precomputed mean vector
:param xstd: A precomputed standard deviation vector
:return: A matrix of standardised values
"""
return (x - xmean) / xstd
def unstandardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Unstandardise a given matrix with respect to a previously computed mean and standard deviation. This is designed
for remapping a matrix back onto its original scale.
:param x: A standardised matrix.
:param xmean: A mean vector.
:param xstd: A standard deviation vector.
:return: A matrix of unstandardised values.
"""
return (x * xstd) + xmean
def as_constant(parameter_set: dict, params: list) -> Tuple[dict, dict]:
base_params = deepcopy(parameter_set)
sparams = {}
for param in params:
sparams[param] = base_params[param]
del base_params[param]
return base_params, sparams
| [
"jax.numpy.eye",
"jax.numpy.std",
"multipledispatch.dispatch",
"copy.deepcopy",
"jax.numpy.mean"
] | [((1704, 1729), 'multipledispatch.dispatch', 'dispatch', (['jnp.DeviceArray'], {}), '(jnp.DeviceArray)\n', (1712, 1729), False, 'from multipledispatch import dispatch\n'), ((2222, 2281), 'multipledispatch.dispatch', 'dispatch', (['jnp.DeviceArray', 'jnp.DeviceArray', 'jnp.DeviceArray'], {}), '(jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray)\n', (2230, 2281), False, 'from multipledispatch import dispatch\n'), ((362, 372), 'jax.numpy.eye', 'jnp.eye', (['n'], {}), '(n)\n', (369, 372), True, 'import jax.numpy as jnp\n'), ((2126, 2145), 'jax.numpy.mean', 'jnp.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2134, 2145), True, 'import jax.numpy as jnp\n'), ((2157, 2175), 'jax.numpy.std', 'jnp.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2164, 2175), True, 'import jax.numpy as jnp\n'), ((3410, 3433), 'copy.deepcopy', 'deepcopy', (['parameter_set'], {}), '(parameter_set)\n', (3418, 3433), False, 'from copy import deepcopy\n')] |
import os
def replace_version(old_version, new_version):
if not isinstance(old_version, tuple) or not isinstance(new_version, tuple):
raise ValueError("`old_version` and `new_version` must be a version tuple. Eg: (1.2.3)")
major, minor, micro = old_version[:3]
old_version = f'{major}.{minor}.{micro}'
major, minor, micro = new_version[:3]
new_version = f'{major}.{minor}.{micro}'
print(f"New version = {new_version}")
for root, _, files in os.walk('../caer'):
for file in files:
if file.endswith(('.py', '.cpp', '.c', '.h', '.hpp')):
with open(os.path.abspath(os.path.join(root, file)), 'r') as f:
new_text = f.read().replace('version ' + old_version, 'version ' + new_version)
with open(os.path.abspath(os.path.join(root, file)), 'w') as f:
print(os.path.abspath(os.path.join(root, file)))
f.write(new_text)
replace_version((1,8,0), (3,9,1))
| [
"os.path.join",
"os.walk"
] | [((481, 499), 'os.walk', 'os.walk', (['"""../caer"""'], {}), "('../caer')\n", (488, 499), False, 'import os\n'), ((637, 661), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (649, 661), False, 'import os\n'), ((818, 842), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (830, 842), False, 'import os\n'), ((898, 922), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (910, 922), False, 'import os\n')] |
import argparse
import math
import matplotlib.pyplot as plt
import os
import numpy as np
import shutil
import pandas as pd
import seaborn as sns
sns.set()
sns.set_context("talk")
NUM_BINS = 100
path = '../Data/Video_Info/Pensieve_Info/PenieveVideo_video_info'
video_mappings = {}
video_mappings['300'] = '320x180x30_vmaf_score'
video_mappings['750'] = '640x360x30_vmaf_score'
video_mappings['1200'] = '768x432x30_vmaf_score'
video_mappings['1850'] = '1024x576x30_vmaf_score'
video_mappings['2850'] = '1280x720x30_vmaf_score'
video_mappings['4300'] = '1280x720x60_vmaf_score'
metric_list = ["reward_vmaf", "reward_br", "rebuf", "br_avg", "vmaf_avg", "switching_vmaf", "switching_br"]
#MINERVA
rebuf_penalty = 25
switching_penalty = 2.5
segment_lenght = 4.0
def load_csv():
video_info = pd.read_csv(path)
return video_info
pensieve_video_csv = load_csv()
def get_qoe(abr, trace):
logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
logfile = os.path.join(logdir, abr + "_rewards_0.log")
reward_vmaf = 0
reward_bitrate = 0
total_rebuffering = 0.0
vmaf_avg = 0.0
vmaf_switching_avg = 0.0
bitrate_avg = 0.0
bitrate_switching_avg = 0.0
with open(logfile, "r") as fin:
reward_lines = fin.readlines()
if (len(reward_lines) != args.video_chunks):
if len(reward_lines) < args.video_chunks:
to_clean.append(logfile)
print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
print("Skip, please")
return None, None, None, None, None, None, None
for i, r_line in enumerate(reward_lines):
data = r_line.split()
if i == 0:
br = int(data[1])
br_previous = br
vmaf_previous = pensieve_video_csv.loc[i, video_mappings[str(br)]]
else: # skip first
br = int(data[1])
bitrate_avg += br
bitrate_switching_avg += abs(br - br_previous)
reward_bitrate += float(data[-1])
total_rebuffering += float(data[3])
vmaf_current = pensieve_video_csv.loc[i, video_mappings[str(br)]]
vmaf_avg += vmaf_current
vmaf_switching_avg += abs(vmaf_current - vmaf_previous)
reward_vmaf += (float(vmaf_current) -
rebuf_penalty*(float(data[3])) -
switching_penalty*(abs(vmaf_current - vmaf_previous)))
vmaf_previous = vmaf_current
br_previous = br
return reward_vmaf,\
reward_bitrate,\
total_rebuffering,\
bitrate_switching_avg/(segment_lenght*args.video_chunks),\
vmaf_switching_avg/(segment_lenght*args.video_chunks),\
vmaf_avg/(segment_lenght*args.video_chunks),\
bitrate_avg/args.video_chunks
#
#def get_qoe(abr, trace):
# logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
# logfile = os.path.join(logdir, abr + "_rewards_0.log")
#
# reward = 0
#
#
# with open(logfile, "r") as fin:
# reward_lines = fin.readlines()
#
# if (len(reward_lines) != args.video_chunks):
# if len(reward_lines) < args.video_chunks:
# to_clean.append(logfile)
# print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
# print("Skip, please")
# return None
#
# for i, r_line in enumerate(reward_lines):
# if i > 0: # skip first
# reward += float(r_line.split()[-1])
#
# return reward
def get_qoes(abrs_list, traces_list):
global_results = {}
for abr in abrs_list:
global_results[abr] = []
global_results[abr] = {}
global_results[abr]['reward_vmaf'] = []
global_results[abr]['reward_br'] = []
global_results[abr]['rebuf'] = []
global_results[abr]['switching_br'] = []
global_results[abr]['switching_vmaf'] = []
global_results[abr]['vmaf_avg'] = []
global_results[abr]['br_avg'] = []
for trace in traces_list:
reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)
if reward_vmaf is not None:
global_results[abr]['reward_vmaf'].append(reward_vmaf)
global_results[abr]['reward_br'].append(reward_br)
global_results[abr]['rebuf'].append(rebuf)
global_results[abr]['switching_br'].append(switching_br)
global_results[abr]['switching_vmaf'].append(switching_vmaf)
global_results[abr]['vmaf_avg'].append(vmaf_avg)
global_results[abr]['br_avg'].append(br_avg)
return global_results
def get_qoes_partial(abrs_list, traces_list):
total_experiments_expected = len(args.abrs) * len(args.traces)
experiments_executed_so_far = 0
partial_results = {}
for abr in abrs_list:
partial_results[abr] = {}
partial_results[abr]['reward_vmaf'] = []
partial_results[abr]['reward_br'] = []
partial_results[abr]['rebuf'] = []
partial_results[abr]['switching_br'] = []
partial_results[abr]['switching_vmaf'] = []
partial_results[abr]['vmaf_avg'] = []
partial_results[abr]['br_avg'] = []
for trace in traces_list:
logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
if os.path.exists(logdir):
reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)
if reward_vmaf is not None:
partial_results[abr]['reward_vmaf'].append(reward_vmaf)
partial_results[abr]['reward_br'].append(reward_br)
partial_results[abr]['rebuf'].append(rebuf)
partial_results[abr]['switching_br'].append(switching_br)
partial_results[abr]['switching_vmaf'].append(switching_vmaf)
partial_results[abr]['vmaf_avg'].append(vmaf_avg)
partial_results[abr]['br_avg'].append(br_avg)
experiments_executed_so_far += 1
if partial_results[abr] == []:
del partial_results[abr]
print("Experiment executed: {}/{}".format(experiments_executed_so_far, total_experiments_expected))
return partial_results
def plot_cdf(results, reward_key):
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
def average_of_the_best():
avg_best = -1000000000000
abr_best = ''
for scheme in results.keys():
avg_tmp = np.mean(results[scheme][reward_key])
if avg_best < avg_tmp:
avg_best = avg_tmp
abr_best = scheme
print("Best provider in average is {} with {}".format(abr_best, avg_best))
return abs(avg_best)
schemes = []
norm = average_of_the_best()
markers = ['.', ',', 'o', 'v', '^', '>', '<', 's', 'x', 'D', 'd', '*', '_', '']
for i, scheme in enumerate(results.keys()):
values = [float(i)/norm for i in results[scheme][reward_key]]
values, base = np.histogram(values, bins=len(values))
cumulative = np.cumsum(values)
cumulative = [float(i) / len(values) * 100 for i in cumulative]
marker_index = i % len(markers)
ax.plot(base[:-1], cumulative, linewidth=3, marker=markers[marker_index], markevery=2, markersize=15)
schemes.append(scheme)
ax.legend(schemes, loc=2)
ax.set_xlim(-1.0, 1.8)
plt.ylabel('CDF')
plt.xlabel('total reward')
fig.savefig(os.path.join(args.store_dir, 'cdf_{}.png'.format(reward_key)))
def plot_bar(results, metric):
results_metric_avg = {}
for scheme in results.keys():
results_metric_avg[scheme] = np.mean(results[scheme][metric])
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
y_pos = np.arange(len(results_metric_avg.keys()))
ax.bar(y_pos, results_metric_avg.values())
ax.set_xticks(y_pos)
ax.set_xticklabels(results_metric_avg.keys())
fig.savefig(os.path.join(args.store_dir, 'bar_{}.png'.format(metric)))
def clean():
timestamps = []
for c in to_clean:
timestamp_creation = os.path.getmtime(c)
timestamps.append(timestamp_creation)
print("File {} was created at {}".format(c, timestamp_creation))
timestamps.sort()
if not args.include_last and len(timestamps) >= 1:
print("Skipping file created at {}: might be still running".format(timestamps[-1]))
del timestamps[-1]
removing = []
for t in timestamps:
for c in to_clean:
if os.path.getmtime(c) == t:
print("Removing {}".format(os.path.dirname(os.path.dirname(c))))
removing.append(os.path.dirname(os.path.dirname(c)))
for r in removing:
shutil.rmtree(r)
def main():
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('result_dir', help='result directory', type=str)
parser.add_argument('store_dir', help='result directory', type=str)
parser.add_argument('video_chunks', help='result directory', type=int)
parser.add_argument("--abrs", nargs="+", help='ABR list')
parser.add_argument("--traces", nargs="+", help='Traces list')
parser.add_argument('--partial', action="store_true", help="get the partial results")
parser.add_argument('--allow_cleaning', action="store_true", help="if enabled, cleans the experiments that failed, a part of the most recent one (might still be running")
parser.add_argument('--include_last', action="store_true", help="if enabled, also the last is getting cleaned")
# args need to be global for simplicity
global args
args = parser.parse_args()
global to_clean
to_clean = []
if not os.path.exists(args.store_dir):
os.makedirs(args.store_dir)
if args.partial:
res = get_qoes_partial(args.abrs, args.traces)
else:
res = get_qoes(args.abrs, args.traces)
for metric in metric_list:
if "reward" in metric:
plot_cdf(res, metric)
plot_bar(res, metric)
if args.allow_cleaning:
print("Executing cleaning")
clean()
if __name__ == "__main__":
main()
| [
"numpy.mean",
"seaborn.set",
"os.path.exists",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.pyplot.xlabel",
"seaborn.set_context",
"os.path.join",
"os.path.getmtime",
"os.path.dirname",
"matplotlib.pyplot.figure",
"shutil.rmtree",
"... | [((146, 155), 'seaborn.set', 'sns.set', ([], {}), '()\n', (153, 155), True, 'import seaborn as sns\n'), ((156, 179), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (171, 179), True, 'import seaborn as sns\n'), ((798, 815), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (809, 815), True, 'import pandas as pd\n'), ((910, 968), 'os.path.join', 'os.path.join', (['args.result_dir', "(abr + '-' + trace)", '"""result"""'], {}), "(args.result_dir, abr + '-' + trace, 'result')\n", (922, 968), False, 'import os\n'), ((983, 1027), 'os.path.join', 'os.path.join', (['logdir', "(abr + '_rewards_0.log')"], {}), "(logdir, abr + '_rewards_0.log')\n", (995, 1027), False, 'import os\n'), ((6675, 6707), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0, 10.0)'}), '(figsize=(16.0, 10.0))\n', (6685, 6707), True, 'import matplotlib.pyplot as plt\n'), ((7825, 7842), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (7835, 7842), True, 'import matplotlib.pyplot as plt\n'), ((7847, 7873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""total reward"""'], {}), "('total reward')\n", (7857, 7873), True, 'import matplotlib.pyplot as plt\n'), ((8135, 8167), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0, 10.0)'}), '(figsize=(16.0, 10.0))\n', (8145, 8167), True, 'import matplotlib.pyplot as plt\n'), ((9251, 9276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9274, 9276), False, 'import argparse\n'), ((7492, 7509), 'numpy.cumsum', 'np.cumsum', (['values'], {}), '(values)\n', (7501, 7509), True, 'import numpy as np\n'), ((8091, 8123), 'numpy.mean', 'np.mean', (['results[scheme][metric]'], {}), '(results[scheme][metric])\n', (8098, 8123), True, 'import numpy as np\n'), ((8538, 8557), 'os.path.getmtime', 'os.path.getmtime', (['c'], {}), '(c)\n', (8554, 8557), False, 'import os\n'), ((9177, 9193), 'shutil.rmtree', 'shutil.rmtree', (['r'], {}), '(r)\n', (9190, 9193), False, 'import shutil\n'), ((10156, 10186), 'os.path.exists', 'os.path.exists', (['args.store_dir'], {}), '(args.store_dir)\n', (10170, 10186), False, 'import os\n'), ((10196, 10223), 'os.makedirs', 'os.makedirs', (['args.store_dir'], {}), '(args.store_dir)\n', (10207, 10223), False, 'import os\n'), ((5595, 5653), 'os.path.join', 'os.path.join', (['args.result_dir', "(abr + '-' + trace)", '"""result"""'], {}), "(args.result_dir, abr + '-' + trace, 'result')\n", (5607, 5653), False, 'import os\n'), ((5669, 5691), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (5683, 5691), False, 'import os\n'), ((6890, 6926), 'numpy.mean', 'np.mean', (['results[scheme][reward_key]'], {}), '(results[scheme][reward_key])\n', (6897, 6926), True, 'import numpy as np\n'), ((8970, 8989), 'os.path.getmtime', 'os.path.getmtime', (['c'], {}), '(c)\n', (8986, 8989), False, 'import os\n'), ((9125, 9143), 'os.path.dirname', 'os.path.dirname', (['c'], {}), '(c)\n', (9140, 9143), False, 'import os\n'), ((9055, 9073), 'os.path.dirname', 'os.path.dirname', (['c'], {}), '(c)\n', (9070, 9073), False, 'import os\n')] |