code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
is_visual = True
is_to_csv = True #False
def visulize_distribution(df):
if 1:
print(df.target.value_counts())
#df.target.value_counts()
else:
import matplotlib.pyplot as plt
print('++')
df['target'].plot.hist(width=0.1, )
#plt.hist(column='target')
#plt.hist(out['target'])
print('--')
plt.show()
def read_20newsgroups(data_file=None, test_file=None, dataset=None, test_size=0.2):
if test_file is not None:
testset = pd.read_csv(test_file)
testset = testset.dropna()
if is_visual:
visulize_distribution(testset)
valid_texts = list(testset['text'])
valid_labels = np.array(testset['target'])
classifier_types = list(testset['title'].unique())
dataset = pd.read_csv(data_file)
dataset = dataset.dropna()
train_texts = list(dataset['text'])
train_labels = np.array(dataset['target'])
classifier_types = list(dataset['title'].unique())
if is_visual:
visulize_distribution(dataset)
return (train_texts, valid_texts, train_labels, valid_labels), classifier_types
else:
if data_file is not None:
print(data_file)
dataset = pd.read_csv(data_file)
#https://stackoverflow.com/questions/63517293/valueerror-textencodeinput-must-be-uniontextinputsequence-tupleinputsequence
dataset = dataset.dropna()
#print(dataset.shape)
if dataset is not None:
#print(dataset.shape)
#print(dataset.columns)
documents = list(dataset['text'])
labels = np.array(dataset['target'])
classifier_types = list(dataset['title'].unique())
#print(type(documents), len(documents), documents[0])
#print(type(labels), len(labels), labels[0])
#print(classifier_types, len(classifier_types))
else:
# download & load 20newsgroups dataset from sklearn's repos
dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
print(type(dataset))
documents = dataset.data
labels = dataset.target
classifier_types = dataset.target_names
#print(type(labels), len(labels), labels[0])
#print(type(dataset.target_names), dataset.target_names, len(dataset.target_names))
# split into training & testing a return data as well as label names
print(type(documents), len(documents))
print('>>', documents[0])
print('>>', documents[1])
return train_test_split(documents, labels, test_size=test_size), classifier_types
def twenty_newsgroup_to_csv(subset=None):
#newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
#newsgroups = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="all", remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="train", remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="test", remove=("headers", "footers", "quotes"))
if subset is not None:
newsgroups = fetch_20newsgroups(subset=subset, remove=("headers", "footers", "quotes"))
df = pd.DataFrame([newsgroups.data, newsgroups.target.tolist()]).T
df.columns = ['text', 'target']
targets = pd.DataFrame( newsgroups.target_names)
targets.columns=['title']
out = pd.merge(df, targets, left_on='target', right_index=True)
print(out.shape, out.columns)
#out.describe(include=['target'])
#out.to_csv('20_newsgroup.csv')
#out.groupby('target').count().plot.bar()
if is_visual:
visulize_distribution(out)
return out
def test_20newsgroups(dataset):
if is_to_csv:
dataset.to_csv('test_20newsgroups.csv', index=False)
def iid_20newsgroups(dataset, num_users):
"""
Sample I.I.D. client data from 20newsgroups dataset
:param dataset:
:param num_users:
:return: dict of users' dataset
"""
num_items = int(len(dataset)/num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
print(dict_users, num_items)
for i in range(num_users):
chosen_idxs = np.random.choice(all_idxs, num_items, replace=False)
dict_users[i] = dataset.iloc[chosen_idxs]
all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('iid_20newsgroups_'+str(i)+'.csv', index=False)
#print(dict_users.keys())
return dict_users
def noniid_label_20newsgroups(dataset, num_users, alpha=None):
"""
Sample non-I.I.D client data from 20newsgroups dataset: label imbalance, quantity uniform
:param dataset:
:param num_users:
:alpha: label ratio, total number = 20lables
:return:
"""
if is_visual:
visulize_distribution(dataset)
#dict_users, all_idxs = {}, [i for i in range(len(dataset))]
dict_users = {i: np.array([]) for i in range(num_users)}
labels = np.array(dataset['target'])
num_samples = len(dataset)
num_labels = 20
num_shards = int(len(dataset)/num_labels)
idxs = np.arange(num_samples)
print(dict_users)
print(labels, len(labels))
print(idxs, len(idxs))
# sort labels
idxs_labels = np.vstack((idxs, labels))
#print(idxs_labels, len(idxs_labels))
#idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
#print(idxs_labels)
#idxs = idxs_labels[0, :]
#print(idxs, len(idxs))
safe_idxs = []
seed_idxs = {}
for i in range(len(dataset)): #only two users
key = idxs_labels[1][i]
if key in seed_idxs:
if seed_idxs[key] < 3:
safe_idxs.append(idxs_labels[0][i])
seed_idxs[key] += 1
else:
safe_idxs.append(idxs_labels[0][i])
seed_idxs[key] = 1
#seed_idxs[idxs_labels[1][i]] = idxs_labels[0][i]
print('seed_idxs', seed_idxs)
chosen_idxs = {i:[] for i in range(num_users)}
#for i in range(18000,len(idxs)):
#for i in range(100):
for i in range(len(dataset)): #only two users
user_id = idxs_labels[1][i] % 2
if user_id == 0:
#print(i, idxs_labels[0][i], idxs_labels[1][i])
chosen_idxs[user_id].append(idxs_labels[0][i])
else:
chosen_idxs[user_id].append(idxs_labels[0][i])
for i in range(num_users):
dict_users[i] = dataset.iloc[chosen_idxs[i] + safe_idxs]
#all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('noniid_label_20newsgroups_alpha'+ str(alpha)+ '_'+str(i)+'.csv', index=False)
return dict_users
def noniid_quantity_20newsgroups(dataset, num_users=2, beta=None):
"""
Sample non-I.I.D client data from 20newsgroups dataset: quantity imbalance, label uniform
:param dataset:
:param num_users:
:return:
"""
if is_visual:
visulize_distribution(dataset)
#dict_users, all_idxs = {}, [i for i in range(len(dataset))]
num_items = {} #int(len(dataset)/num_users)
for i in range(len(beta)):
num_items[i] = int(len(dataset) * beta[i])
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
print(dict_users, num_items)
for i in range(num_users):
chosen_idxs = np.random.choice(all_idxs, num_items[i], replace=False)
dict_users[i] = dataset.iloc[chosen_idxs]
all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('noniid_quantity_20newsgroups_beta'+ str(beta[i])+ '_'+str(i)+'.csv', index=False)
#print(dict_users.keys())
return dict_users
if __name__ == '__main__':
if 0:
(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups()
print(type(train_texts), len(train_texts))
print(type(train_labels), len(train_labels))
if 0:
start=0
valid_sample_n = 2
sample_n = valid_sample_n*5
train_texts = train_texts[start:sample_n]
train_labels = train_labels[start:sample_n]
valid_texts = valid_texts[start:valid_sample_n]
valid_labels = valid_labels[start:valid_sample_n]
print(len(train_texts), len(train_labels))
print(len(valid_texts), len(valid_labels))
#print(valid_texts, valid_labels)
print(target_names)
if 0: #generate iid-dataset
dataset = twenty_newsgroup_to_csv()
#print(dataset.head(10))
#dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
dict_user = iid_20newsgroups(dataset, 2)
read_20newsgroups(dict_user[0])
read_20newsgroups()
if 0: #load dataset via read_20newsgroups
#(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file=None)
#(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file='iid_20newsgroups_1.csv')
(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file='noniid_label_20newsgroups_alpha0.5_0.csv', test_file='test_20newsgroups.csv')
print(type(train_texts), len(train_texts))
print(type(train_labels), len(train_labels))
print(train_labels[:2])
if 1:
dataset = twenty_newsgroup_to_csv(subset='train')
#print(dataset.head(10))
#dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
#dict_user = noniid_20newsgroups(dataset, 2)
noniid_label_20newsgroups(dataset, 2, alpha=0.5)
num_users = 2
#noniid_quantity_20newsgroups(dataset, beta=[0.1, 0.9])
if 0:
dataset = twenty_newsgroup_to_csv(subset='test')
test_20newsgroups(dataset)
| [
"pandas.read_csv",
"numpy.random.choice",
"sklearn.model_selection.train_test_split",
"pandas.merge",
"sklearn.datasets.fetch_20newsgroups",
"numpy.array",
"numpy.vstack",
"pandas.DataFrame",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((4240, 4277), 'pandas.DataFrame', 'pd.DataFrame', (['newsgroups.target_names'], {}), '(newsgroups.target_names)\n', (4252, 4277), True, 'import pandas as pd\n'), ((4320, 4377), 'pandas.merge', 'pd.merge', (['df', 'targets'], {'left_on': '"""target"""', 'right_index': '(True)'}), "(df, targets, left_on='target', right_index=True)\n", (4328, 4377), True, 'import pandas as pd\n'), ((6094, 6121), 'numpy.array', 'np.array', (["dataset['target']"], {}), "(dataset['target'])\n", (6102, 6121), True, 'import numpy as np\n'), ((6230, 6252), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (6239, 6252), True, 'import numpy as np\n'), ((6370, 6395), 'numpy.vstack', 'np.vstack', (['(idxs, labels)'], {}), '((idxs, labels))\n', (6379, 6395), True, 'import numpy as np\n'), ((1087, 1097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1095, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1253), 'pandas.read_csv', 'pd.read_csv', (['test_file'], {}), '(test_file)\n', (1242, 1253), True, 'import pandas as pd\n'), ((1421, 1448), 'numpy.array', 'np.array', (["testset['target']"], {}), "(testset['target'])\n", (1429, 1448), True, 'import numpy as np\n'), ((1527, 1549), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {}), '(data_file)\n', (1538, 1549), True, 'import pandas as pd\n'), ((1652, 1679), 'numpy.array', 'np.array', (["dataset['target']"], {}), "(dataset['target'])\n", (1660, 1679), True, 'import numpy as np\n'), ((4042, 4116), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': 'subset', 'remove': "('headers', 'footers', 'quotes')"}), "(subset=subset, remove=('headers', 'footers', 'quotes'))\n", (4060, 4116), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((5105, 5157), 'numpy.random.choice', 'np.random.choice', (['all_idxs', 'num_items'], {'replace': '(False)'}), '(all_idxs, num_items, replace=False)\n', (5121, 5157), True, 'import numpy as np\n'), ((6041, 6053), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6049, 6053), True, 'import numpy as np\n'), ((8616, 8671), 'numpy.random.choice', 'np.random.choice', (['all_idxs', 'num_items[i]'], {'replace': '(False)'}), '(all_idxs, num_items[i], replace=False)\n', (8632, 8671), True, 'import numpy as np\n'), ((1989, 2011), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {}), '(data_file)\n', (2000, 2011), True, 'import pandas as pd\n'), ((2399, 2426), 'numpy.array', 'np.array', (["dataset['target']"], {}), "(dataset['target'])\n", (2407, 2426), True, 'import numpy as np\n'), ((2781, 2872), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': '"""all"""', 'shuffle': '(True)', 'remove': "('headers', 'footers', 'quotes')"}), "(subset='all', shuffle=True, remove=('headers', 'footers',\n 'quotes'))\n", (2799, 2872), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((3391, 3447), 'sklearn.model_selection.train_test_split', 'train_test_split', (['documents', 'labels'], {'test_size': 'test_size'}), '(documents, labels, test_size=test_size)\n', (3407, 3447), False, 'from sklearn.model_selection import train_test_split\n')] |
# (c) MIT License Copyright 2014 <NAME>
# Please reuse, modify or distribute freely.
from collections import OrderedDict
import tkinter as tk
class StripChart( tk.Frame ):
def __init__( self, parent, scale, historySize, trackColors, *args, **opts ):
# Initialize
super().__init__( parent, *args, **opts )
self._trackHist = OrderedDict() # Map: TrackName -> list of canvas objID
self._trackColor = trackColors # Map: Track Name -> color
self._chartHeight = scale + 1
self._chartLength = historySize * 2 # Stretch for readability
self._canvas = tk.Canvas( self, height=self._chartHeight + 17,
width=self._chartLength, background='black' )
self._canvas.grid( sticky=tk.N+tk.S+tk.E+tk.W )
# Draw horizontal to divide plot from tick labels
x, y = 0, self._chartHeight + 2
x2, y2 = self._chartLength, y
self._baseLine = self._canvas.create_line( x, y, x2, y2, fill='white' )
# Init track def and histories lists
self._trackColor.update( { 'tick':'white', 'tickline':'white',
'ticklabel':'white' } )
for trackName in self._trackColor.keys():
self._trackHist[ trackName ] = [ None for x in range(historySize) ]
def plotValues( self, **vals ):
for trackName, trackHistory in self._trackHist.items():
# Scroll left-wards
self._canvas.delete( trackHistory.pop(0) )
# Remove left-most canvas objs
self._canvas.move( trackName, -2, 0 )
# Scroll canvas objs 2 pixels left
# Plot the new values
try:
val = vals[ trackName ]
x = self._chartLength
y = self._chartHeight - val
color = self._trackColor[ trackName ]
objId = self._canvas.create_line( x, y, x+1, y, fill=color,
width=3, tags=trackName )
trackHistory.append( objId )
except:
trackHistory.append( None )
def drawTick( self, text=None, **lineOpts ):
# draw vertical tick line
x = self._chartLength
y = 1
x2 = x
y2 = self._chartHeight
color = self._trackColor[ 'tickline' ]
objId = self._canvas.create_line( x, y, x2, y2, fill=color,
tags='tick', **lineOpts )
self._trackHist[ 'tickline' ].append( objId )
# draw tick label
if text is not None:
x = self._chartLength
y = self._chartHeight + 10
color = self._trackColor[ 'ticklabel' ]
objId = self._canvas.create_text( x, y, text=text,
fill=color, tags='tick' )
self._trackHist[ 'ticklabel' ].append( objId )
def configTrackColors( self, **trackColors ):
# Change plotted data color
for trackName, colorName in trackColors.items( ):
self._canvas.itemconfigure( trackName, fill=colorName )
# Change settings so future data has the new color
self._trackColor.update( trackColors )
if __name__ == '__main__':
top = tk.Tk( )
graph = StripChart( top, 100, 300, { 'A':'blue', 'B':'green', 'C':'red' } )
graph.grid( )
val_A = 0
val_B = 0
val_C = 0
delta = [ -3, -2, -1, 0, 1, 2, 3 ] # randomly vary the values by one of these
tickCount = 0
def nextVal( current, lowerBound, upperBound ):
from random import choice
current += choice( delta )
if current < lowerBound:
return lowerBound
elif current > upperBound:
return upperBound
else:
return current
def plotNextVals( ):
global val_A, val_B, val_C, tickCount
if tickCount % 50 == 0:
graph.drawTick( text=str(tickCount), dash=(1,4) )
tickCount += 1
val_A = nextVal( val_A, 0, 99 )
val_B = nextVal( val_B, 0, 99 )
val_C = nextVal( val_C, 0, 99 )
graph.plotValues( A=val_A, B=val_B, C=val_C )
#changeColor = { 800: 'black',
#1200: 'yellow',
#1600: 'orange',
#2000: 'white',
#2400: 'brown',
#2800: 'blue' }
#if tickCount in changeColor:
#graph.configTrackColors( A=changeColor[tickCount] )
top.after( 1, plotNextVals )
top.after( 1, plotNextVals )
top.mainloop( )
| [
"tkinter.Canvas",
"collections.OrderedDict",
"tkinter.Tk",
"random.choice"
] | [((3213, 3220), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (3218, 3220), True, 'import tkinter as tk\n'), ((348, 361), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (359, 361), False, 'from collections import OrderedDict\n'), ((605, 700), 'tkinter.Canvas', 'tk.Canvas', (['self'], {'height': '(self._chartHeight + 17)', 'width': 'self._chartLength', 'background': '"""black"""'}), "(self, height=self._chartHeight + 17, width=self._chartLength,\n background='black')\n", (614, 700), True, 'import tkinter as tk\n'), ((3571, 3584), 'random.choice', 'choice', (['delta'], {}), '(delta)\n', (3577, 3584), False, 'from random import choice\n')] |
from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution
from django.shortcuts import render, HttpResponseRedirect
from django.core.exceptions import PermissionDenied
from django.views.generic import FormView, CreateView, ListView, DetailView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import AnalyticsServiceForm
from django.core import serializers
from django.utils.encoding import uri_to_iri
from django.shortcuts import render, HttpResponseRedirect
from apps.calc.measurement import measurement_obj
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
import json
from apps.analysis.json import NumPyArangeEncoder
from apps.projects.models import Experiment, Project, Datarow, Value
from apps.projects.serializer import project_serialize
from django.conf import settings
from django.core.exceptions import PermissionDenied
import numpy as np
import random
from apps.quiver import service_executor
# Create your views here.
class NewAnalyticsService(LoginRequiredMixin, CreateView):
form_class = AnalyticsServiceForm
template_name = 'quiver/analyticsservice_create.html'
def get_context_data(self, **kwargs):
data = super(NewAnalyticsService, self).get_context_data(**kwargs)
return data
def form_valid(self, form):
user = self.request.user
form.instance.user = user
context = self.get_context_data()
self.object = form.save()
return super(NewAnalyticsService, self).form_valid(form)
class UpdateAnalyticsService(LoginRequiredMixin, UpdateView):
model = AnalyticsService
form_class = AnalyticsServiceForm
pk_url_kwarg = 'id'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.object.user == self.request.user and not self.object.visibility:
raise PermissionDenied()
return super(UpdateAnalyticsService, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
data = super(UpdateAnalyticsService, self).get_context_data(**kwargs)
return data
def form_valid(self, form):
context = self.get_context_data()
return super(UpdateAnalyticsService, self).form_valid(form)
class MyAnalyticsService(LoginRequiredMixin, ListView):
model = AnalyticsService
allow_empty = True
paginate_by = 10
def get_queryset(self):
user = self.request.user
return AnalyticsService.objects.filter(user=user).order_by('updated')
class AnalyticsServiceDetail(DetailView):
model = AnalyticsService
pk_url_kwarg = 'id'
def get_context_data(self, **kwargs):
user = self.request.user
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the projects
context['project_list'] = Project.objects.filter(user=user).order_by('updated')
return context
#def get(self, request, *args, **kwargs):
# self.object = self.get_object()
# if self.object.user != self.request.user and not self.object.visibility:
# raise PermissionDenied()
# return super(AnalyticsServiceDetail, self).get(request, *args, **kwargs)
def delete_analytics_service(request, analytics_service_id):
AnalyticsService.objects.get(id=analytics_service_id).delete()
return HttpResponseRedirect('/quiver/')
@login_required
def analytics_service_detail(request, experimentId):
if request.method != 'POST':
return HttpResponseRedirect('/dashboard/')
# current user
curruser_id = request.user.id
projectId = Experiment.objects.get(id=experimentId).project_id
# owner of experiment
expowner_id = Project.objects.get(id=projectId).user_id
# read graph visibility from post
graph_visibility = request.POST.get("graphVisibilities", "").split(',')
# Read Data from DB
header_list = np.asarray(Datarow.objects.filter(experiment_id=experimentId).values_list('name', flat=True))
einheiten_list = np.asarray(Datarow.objects.filter(experiment_id=experimentId).values_list('unit', flat=True))
mInstruments_list = np.asarray(
Datarow.objects.filter(experiment_id=experimentId).values_list('measuring_instrument', flat=True))
experimentName = Experiment.objects.get(id=experimentId).name
dateCreated = Experiment.objects.get(id=experimentId).created
timerow = Experiment.objects.get(id=experimentId).timerow
datarow_id = Datarow.objects.filter(experiment_id=experimentId).values_list('id', flat=True)
value_amount = len(Value.objects.filter(datarow_id=datarow_id[0]))
datarow_amount = len(datarow_id)
# values in the right order will be put in here, but for now initialize with 0
values_wo = [0] * datarow_amount
#fill values_wo with only datarow_amount-times of database fetches
i = 0
while i < datarow_amount:
values_wo[i] = Value.objects.filter(datarow_id=datarow_id[i]).values_list('value', flat=True)
i += 1
# order the values in values_wo, so that they can be used without database fetching
data = np.transpose(values_wo).astype(float)
# Create/Initialize the measurement object
measurement = measurement_obj.Measurement(json.dumps(data, cls=NumPyArangeEncoder),json.dumps(header_list, cls=NumPyArangeEncoder),
json.dumps(einheiten_list, cls=NumPyArangeEncoder),timerow)
# Prepare the Data for Rendering
dataForRender = {
'jsonData': json.dumps(measurement.data, cls=NumPyArangeEncoder),
'jsonHeader': json.dumps(measurement.colNames, cls=NumPyArangeEncoder),
'jsonEinheiten': json.dumps(measurement.colUnits, cls=NumPyArangeEncoder),
'jsonZeitreihenSpalte': json.dumps(measurement.timeIndex, cls=NumPyArangeEncoder),
'jsonMeasurementInstruments': json.dumps(mInstruments_list, cls=NumPyArangeEncoder),
'experimentId': experimentId,
'experimentName': experimentName,
'projectId': projectId,
'dateCreated': dateCreated,
'current_user_id': curruser_id,
'experiment_owner_id': expowner_id,
'graphVisibility': json.dumps(graph_visibility, cls=NumPyArangeEncoder),
}
# save experimentId to get it in ajax call when refreshing graph
request.session['experimentId'] = experimentId
return render(request, "quiver/index.html", dataForRender)
#def analyticsService(request):
#
# if request.method == 'POST':
# form = AnalyticsServiceForm(request.POST)
# if form.is_valid():
# print('hi')
#
# form = AnalyticsServiceForm()
#
# return render(request, 'analytics_service_detail.html', {'form': form})
def execute_service(request, analytics_service_id):
#data = request.body
#data = json.loads(data)
#read data and get project id:
if request.method == 'POST':
project_id = request.POST.get("project_id", )
rowcounter = int(request.POST.get("rowcounter", ))
#read out of ajax and adjust format for follwing execution of service
#read and prepare parameter data to send it to the service
input = [];
parameter = [];
i = 0;
while i < rowcounter:
param_attributes = {
'name': request.POST.get('parameter_name_' + str(i), ),
'value': request.POST.get('parameter_value_' + str(i), ),
'type': request.POST.get('type_select_' + str(i), )
}
parameter.append(param_attributes)
i = i + 1;
# work that input
#serialize project as preparation to send it to the service
input = project_serialize(project_id)
#generate a random number between 0 and 9999 as task_id
task_id = random.randrange(0, 10000, 1)
service = AnalyticsService.objects.get(id=analytics_service_id)
status = service_executor.get_status_for_service(service)
if status == service_executor.ServiceState.READY:
user = request.user
service_execution = AnalyticsServiceExecution(service=service, last_state=1, user=user)
service_execution.save()
#while service_execution.last_state != service_executor.ServiceState.DONE:
if service_execution.last_state == service_executor.ServiceState.READY:
task_url = service_executor.execute_next_state(service_execution, None, input, parameter)
if service_execution.last_state == service_executor.ServiceState.RUNNING:
result = service_executor.execute_next_state(service_execution, task_url, None, None).decode('ascii')
return JsonResponse(result, safe=False)
else: raise ValueError('Service does not exist right now.')
return
| [
"django.shortcuts.render",
"apps.quiver.models.AnalyticsService.objects.get",
"django.core.exceptions.PermissionDenied",
"apps.projects.models.Datarow.objects.filter",
"numpy.transpose",
"apps.quiver.models.AnalyticsService.objects.filter",
"random.randrange",
"django.http.JsonResponse",
"apps.proje... | [((3445, 3477), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/quiver/"""'], {}), "('/quiver/')\n", (3465, 3477), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((6457, 6508), 'django.shortcuts.render', 'render', (['request', '"""quiver/index.html"""', 'dataForRender'], {}), "(request, 'quiver/index.html', dataForRender)\n", (6463, 6508), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((3597, 3632), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/dashboard/"""'], {}), "('/dashboard/')\n", (3617, 3632), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((3702, 3741), 'apps.projects.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'id': 'experimentId'}), '(id=experimentId)\n', (3724, 3741), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((3797, 3830), 'apps.projects.models.Project.objects.get', 'Project.objects.get', ([], {'id': 'projectId'}), '(id=projectId)\n', (3816, 3830), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4370, 4409), 'apps.projects.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'id': 'experimentId'}), '(id=experimentId)\n', (4392, 4409), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4433, 4472), 'apps.projects.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'id': 'experimentId'}), '(id=experimentId)\n', (4455, 4472), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4495, 4534), 'apps.projects.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'id': 'experimentId'}), '(id=experimentId)\n', (4517, 4534), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4663, 4709), 'apps.projects.models.Value.objects.filter', 'Value.objects.filter', ([], {'datarow_id': 'datarow_id[0]'}), '(datarow_id=datarow_id[0])\n', (4683, 4709), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((5327, 5367), 'json.dumps', 'json.dumps', (['data'], {'cls': 'NumPyArangeEncoder'}), '(data, cls=NumPyArangeEncoder)\n', (5337, 5367), False, 'import json\n'), ((5368, 5415), 'json.dumps', 'json.dumps', (['header_list'], {'cls': 'NumPyArangeEncoder'}), '(header_list, cls=NumPyArangeEncoder)\n', (5378, 5415), False, 'import json\n'), ((5463, 5513), 'json.dumps', 'json.dumps', (['einheiten_list'], {'cls': 'NumPyArangeEncoder'}), '(einheiten_list, cls=NumPyArangeEncoder)\n', (5473, 5513), False, 'import json\n'), ((5604, 5656), 'json.dumps', 'json.dumps', (['measurement.data'], {'cls': 'NumPyArangeEncoder'}), '(measurement.data, cls=NumPyArangeEncoder)\n', (5614, 5656), False, 'import json\n'), ((5680, 5736), 'json.dumps', 'json.dumps', (['measurement.colNames'], {'cls': 'NumPyArangeEncoder'}), '(measurement.colNames, cls=NumPyArangeEncoder)\n', (5690, 5736), False, 'import json\n'), ((5763, 5819), 'json.dumps', 'json.dumps', (['measurement.colUnits'], {'cls': 'NumPyArangeEncoder'}), '(measurement.colUnits, cls=NumPyArangeEncoder)\n', (5773, 5819), False, 'import json\n'), ((5853, 5910), 'json.dumps', 'json.dumps', (['measurement.timeIndex'], {'cls': 'NumPyArangeEncoder'}), '(measurement.timeIndex, cls=NumPyArangeEncoder)\n', (5863, 5910), False, 'import json\n'), ((5950, 6003), 'json.dumps', 'json.dumps', (['mInstruments_list'], {'cls': 'NumPyArangeEncoder'}), '(mInstruments_list, cls=NumPyArangeEncoder)\n', (5960, 6003), False, 'import json\n'), ((6264, 6316), 'json.dumps', 'json.dumps', (['graph_visibility'], {'cls': 'NumPyArangeEncoder'}), '(graph_visibility, cls=NumPyArangeEncoder)\n', (6274, 6316), False, 'import json\n'), ((7833, 7862), 'apps.projects.serializer.project_serialize', 'project_serialize', (['project_id'], {}), '(project_id)\n', (7850, 7862), False, 'from apps.projects.serializer import project_serialize\n'), ((7946, 7975), 'random.randrange', 'random.randrange', (['(0)', '(10000)', '(1)'], {}), '(0, 10000, 1)\n', (7962, 7975), False, 'import random\n'), ((7995, 8048), 'apps.quiver.models.AnalyticsService.objects.get', 'AnalyticsService.objects.get', ([], {'id': 'analytics_service_id'}), '(id=analytics_service_id)\n', (8023, 8048), False, 'from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution\n'), ((8066, 8114), 'apps.quiver.service_executor.get_status_for_service', 'service_executor.get_status_for_service', (['service'], {}), '(service)\n', (8105, 8114), False, 'from apps.quiver import service_executor\n'), ((1912, 1930), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (1928, 1930), False, 'from django.core.exceptions import PermissionDenied\n'), ((3370, 3423), 'apps.quiver.models.AnalyticsService.objects.get', 'AnalyticsService.objects.get', ([], {'id': 'analytics_service_id'}), '(id=analytics_service_id)\n', (3398, 3423), False, 'from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution\n'), ((4560, 4610), 'apps.projects.models.Datarow.objects.filter', 'Datarow.objects.filter', ([], {'experiment_id': 'experimentId'}), '(experiment_id=experimentId)\n', (4582, 4610), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((5195, 5218), 'numpy.transpose', 'np.transpose', (['values_wo'], {}), '(values_wo)\n', (5207, 5218), True, 'import numpy as np\n'), ((8237, 8304), 'apps.quiver.models.AnalyticsServiceExecution', 'AnalyticsServiceExecution', ([], {'service': 'service', 'last_state': '(1)', 'user': 'user'}), '(service=service, last_state=1, user=user)\n', (8262, 8304), False, 'from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution\n'), ((2504, 2546), 'apps.quiver.models.AnalyticsService.objects.filter', 'AnalyticsService.objects.filter', ([], {'user': 'user'}), '(user=user)\n', (2535, 2546), False, 'from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution\n'), ((2936, 2969), 'apps.projects.models.Project.objects.filter', 'Project.objects.filter', ([], {'user': 'user'}), '(user=user)\n', (2958, 2969), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4008, 4058), 'apps.projects.models.Datarow.objects.filter', 'Datarow.objects.filter', ([], {'experiment_id': 'experimentId'}), '(experiment_id=experimentId)\n', (4030, 4058), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4123, 4173), 'apps.projects.models.Datarow.objects.filter', 'Datarow.objects.filter', ([], {'experiment_id': 'experimentId'}), '(experiment_id=experimentId)\n', (4145, 4173), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4250, 4300), 'apps.projects.models.Datarow.objects.filter', 'Datarow.objects.filter', ([], {'experiment_id': 'experimentId'}), '(experiment_id=experimentId)\n', (4272, 4300), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((5002, 5048), 'apps.projects.models.Value.objects.filter', 'Value.objects.filter', ([], {'datarow_id': 'datarow_id[i]'}), '(datarow_id=datarow_id[i])\n', (5022, 5048), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((8540, 8618), 'apps.quiver.service_executor.execute_next_state', 'service_executor.execute_next_state', (['service_execution', 'None', 'input', 'parameter'], {}), '(service_execution, None, input, parameter)\n', (8575, 8618), False, 'from apps.quiver import service_executor\n'), ((8846, 8878), 'django.http.JsonResponse', 'JsonResponse', (['result'], {'safe': '(False)'}), '(result, safe=False)\n', (8858, 8878), False, 'from django.http import JsonResponse\n'), ((8730, 8806), 'apps.quiver.service_executor.execute_next_state', 'service_executor.execute_next_state', (['service_execution', 'task_url', 'None', 'None'], {}), '(service_execution, task_url, None, None)\n', (8765, 8806), False, 'from apps.quiver import service_executor\n')] |
"""
Mapping from OOType opcodes to JVM MicroInstructions. Most of these
come from the oosupport directory.
"""
from pypy.translator.oosupport.metavm import \
PushArg, PushAllArgs, StoreResult, InstructionList, New, DoNothing, Call,\
SetField, GetField, DownCast, RuntimeNew, OOString, OOUnicode, \
CastTo, PushPrimitive
from pypy.translator.jvm.metavm import \
IndirectCall, JvmCallMethod, NewCustomDict, \
CastPrimitive, PushPyPy
from pypy.rpython.ootypesystem import ootype
import pypy.translator.jvm.generator as jvmgen
import pypy.translator.jvm.typesystem as jvmtype
def _proc(val):
if isinstance(val, list):
# Lists of instructions we leave alone:
return InstructionList(val)
elif isinstance(val, jvmgen.Method) and not val.is_static():
# For virtual methods, we first push an instance of the relevant
# class, then the arguments, and then invoke the method. Note
# that we only allow virtual methods of certain pre-designated
# classes to be in the table.
if val.class_name == jvmtype.jPyPy.name:
return InstructionList(
(PushPyPy, PushAllArgs, val, StoreResult))
else:
raise Exception("Unknown class for non-static method")
# For anything else (static methods, strings, etc) we first push
# all arguments, then invoke the emit() routine, and finally
# store the result.
return InstructionList((PushAllArgs, val, StoreResult))
def _proc_dict(original):
""" Function which is used to post-process each entry in the
opcodes table."""
res = {}
for key, val in original.items():
res[key] = _proc(val)
return res
def _check_zer(op):
# Note: we convert from Java's ArithmeticException to RPython's
# ZeroDivisionError in the *catch* code, not here where the
# exception is generated. See introduce_exception_conversions()
# in node.py for details.
return op
def _check_ovf(op):
return op
Ignore = []
# This table maps the opcodes to micro-ops for processing them.
# It is post-processed by _proc.
opcodes = _proc_dict({
# __________ object oriented operations __________
'new': [New, StoreResult],
'runtimenew': [RuntimeNew, StoreResult],
'oosetfield': [SetField],
'oogetfield': [GetField, StoreResult],
'oosend': [JvmCallMethod, StoreResult],
'ooupcast': DoNothing,
'oodowncast': [DownCast, StoreResult],
'oois': 'ref_is_eq',
'oononnull': 'is_not_null',
'instanceof': [CastTo, StoreResult],
'subclassof': [PushAllArgs, jvmgen.SWAP, jvmgen.CLASSISASSIGNABLEFROM, StoreResult],
'ooidentityhash': [PushAllArgs, jvmgen.OBJHASHCODE, StoreResult],
'oohash': [PushAllArgs, jvmgen.OBJHASHCODE, StoreResult],
'oostring': [OOString, StoreResult],
'oounicode': [OOUnicode, StoreResult],
'ooparse_float': jvmgen.PYPYOOPARSEFLOAT,
'oonewcustomdict': [NewCustomDict, StoreResult],
'same_as': DoNothing,
'hint': [PushArg(0), StoreResult],
'direct_call': [Call, StoreResult],
'indirect_call': [PushAllArgs, IndirectCall, StoreResult],
'gc__collect': jvmgen.SYSTEMGC,
'gc_set_max_heap_size': Ignore,
'resume_point': Ignore,
'debug_assert': [], # TODO: implement?
# __________ numeric operations __________
'bool_not': 'logical_not',
'char_lt': 'less_than',
'char_le': 'less_equals',
'char_eq': 'equals',
'char_ne': 'not_equals',
'char_gt': 'greater_than',
'char_ge': 'greater_equals',
'unichar_eq': 'equals',
'unichar_ne': 'not_equals',
'int_is_true': 'not_equals_zero',
'int_neg': jvmgen.INEG,
'int_neg_ovf': jvmgen.INEGOVF,
'int_abs': 'iabs',
'int_abs_ovf': jvmgen.IABSOVF,
'int_invert': 'bitwise_negate',
'int_add': jvmgen.IADD,
'int_sub': jvmgen.ISUB,
'int_mul': jvmgen.IMUL,
'int_floordiv': jvmgen.IDIV,
'int_floordiv_zer': _check_zer(jvmgen.IDIV),
'int_mod': jvmgen.IREM,
'int_lt': 'less_than',
'int_le': 'less_equals',
'int_eq': 'equals',
'int_ne': 'not_equals',
'int_gt': 'greater_than',
'int_ge': 'greater_equals',
'int_and': jvmgen.IAND,
'int_or': jvmgen.IOR,
'int_lshift': jvmgen.ISHL,
'int_rshift': jvmgen.ISHR,
'int_xor': jvmgen.IXOR,
'int_add_ovf': jvmgen.IADDOVF,
'int_add_nonneg_ovf': jvmgen.IADDOVF,
'int_sub_ovf': jvmgen.ISUBOVF,
'int_mul_ovf': jvmgen.IMULOVF,
'int_floordiv_ovf': jvmgen.IDIV, # these can't overflow!
'int_mod_zer': _check_zer(jvmgen.IREM),
'int_mod_ovf': jvmgen.IREMOVF,
'int_lt_ovf': 'less_than',
'int_le_ovf': 'less_equals',
'int_eq_ovf': 'equals',
'int_ne_ovf': 'not_equals',
'int_gt_ovf': 'greater_than',
'int_ge_ovf': 'greater_equals',
'int_and_ovf': jvmgen.IAND,
'int_or_ovf': jvmgen.IOR,
'int_lshift_ovf': jvmgen.ISHLOVF,
'int_lshift_ovf_val': jvmgen.ISHLOVF, # VAL... what is val used for??
'int_rshift_ovf': jvmgen.ISHR, # these can't overflow!
'int_xor_ovf': jvmgen.IXOR,
'int_floordiv_ovf_zer': _check_zer(jvmgen.IDIV),
'int_mod_ovf_zer': _check_zer(jvmgen.IREMOVF),
'uint_is_true': 'not_equals_zero',
'uint_invert': 'bitwise_negate',
'uint_add': jvmgen.IADD,
'uint_sub': jvmgen.ISUB,
'uint_mul': jvmgen.PYPYUINTMUL,
'uint_div': jvmgen.PYPYUINTDIV,
'uint_truediv': None, # TODO
'uint_floordiv': jvmgen.PYPYUINTDIV,
'uint_mod': jvmgen.PYPYUINTMOD,
'uint_lt': 'u_less_than',
'uint_le': 'u_less_equals',
'uint_eq': 'u_equals',
'uint_ne': 'u_not_equals',
'uint_gt': 'u_greater_than',
'uint_ge': 'u_greater_equals',
'uint_and': jvmgen.IAND,
'uint_or': jvmgen.IOR,
'uint_lshift': jvmgen.ISHL,
'uint_rshift': jvmgen.IUSHR,
'uint_xor': jvmgen.IXOR,
'float_is_true': [PushAllArgs, jvmgen.DCONST_0, 'dbl_not_equals', StoreResult],
'float_neg': jvmgen.DNEG,
'float_abs': 'dbl_abs',
'float_add': jvmgen.DADD,
'float_sub': jvmgen.DSUB,
'float_mul': jvmgen.DMUL,
'float_truediv': jvmgen.DDIV,
'float_lt': 'dbl_less_than',
'float_le': 'dbl_less_equals',
'float_eq': 'dbl_equals',
'float_ne': 'dbl_not_equals',
'float_gt': 'dbl_greater_than',
'float_ge': 'dbl_greater_equals',
'llong_is_true': [PushAllArgs, jvmgen.LCONST_0, 'long_not_equals', StoreResult],
'llong_neg': jvmgen.LNEG,
'llong_neg_ovf': jvmgen.LNEGOVF,
'llong_abs': jvmgen.MATHLABS,
'llong_abs_ovf': jvmgen.LABSOVF,
'llong_invert': jvmgen.PYPYLONGBITWISENEGATE,
'llong_add': jvmgen.LADD,
'llong_sub': jvmgen.LSUB,
'llong_mul': jvmgen.LMUL,
'llong_div': jvmgen.LDIV,
'llong_truediv': None, # TODO
'llong_floordiv': jvmgen.LDIV,
'llong_floordiv_zer': _check_zer(jvmgen.LDIV),
'llong_mod': jvmgen.LREM,
'llong_mod_zer': _check_zer(jvmgen.LREM),
'llong_lt': 'long_less_than',
'llong_le': 'long_less_equals',
'llong_eq': 'long_equals',
'llong_ne': 'long_not_equals',
'llong_gt': 'long_greater_than',
'llong_ge': 'long_greater_equals',
'llong_and': jvmgen.LAND,
'llong_or': jvmgen.LOR,
'llong_lshift': [PushAllArgs, jvmgen.L2I, jvmgen.LSHL, StoreResult], # XXX - do we care about shifts of >(1<<32) bits??
'llong_rshift': [PushAllArgs, jvmgen.L2I, jvmgen.LSHR, StoreResult],
'llong_xor': jvmgen.LXOR,
'llong_floordiv_ovf': jvmgen.LDIV, # these can't overflow!
'llong_mod_ovf': jvmgen.LREMOVF,
'llong_lshift_ovf': jvmgen.LSHLOVF,
'ullong_is_true': [PushAllArgs, jvmgen.LCONST_0, 'long_not_equals', StoreResult],
'ullong_invert': jvmgen.PYPYLONGBITWISENEGATE,
'ullong_add': jvmgen.LADD,
'ullong_sub': jvmgen.LSUB,
'ullong_mul': jvmgen.LMUL,
'ullong_div': jvmgen.LDIV, # valid?
'ullong_truediv': None, # TODO
'ullong_floordiv': jvmgen.LDIV, # valid?
'ullong_mod': jvmgen.PYPYULONGMOD,
'ullong_lt': 'ulong_less_than',
'ullong_le': 'ulong_less_equals',
'ullong_eq': 'ulong_equals',
'ullong_ne': 'ulong_not_equals',
'ullong_gt': 'ulong_greater_than',
'ullong_ge': 'ulong_greater_equals',
'ullong_lshift': [PushAllArgs, jvmgen.L2I, jvmgen.LSHL, StoreResult],
'ullong_rshift': [PushAllArgs, jvmgen.L2I, jvmgen.LUSHR, StoreResult],
'ullong_mod_zer': jvmgen.PYPYULONGMOD,
# when casting from bool we want that every truth value is casted
# to 1: we can't simply DoNothing, because the CLI stack could
# contains a truth value not equal to 1, so we should use the !=0
# trick. #THIS COMMENT NEEDS TO BE VALIDATED AND UPDATED
'cast_bool_to_int': DoNothing,
'cast_bool_to_uint': DoNothing,
'cast_bool_to_float': jvmgen.PYPYBOOLTODOUBLE, #PAUL, inefficient
'cast_char_to_int': DoNothing,
'cast_unichar_to_int': DoNothing,
'cast_int_to_char': DoNothing,
'cast_int_to_unichar': DoNothing,
'cast_int_to_uint': DoNothing,
'cast_int_to_float': jvmgen.I2D,
'cast_int_to_longlong': jvmgen.I2L,
'cast_uint_to_int': DoNothing,
'cast_uint_to_float': jvmgen.PYPYUINTTODOUBLE,
'cast_float_to_int': jvmgen.D2I,
'cast_float_to_longlong': jvmgen.PYPYDOUBLETOLONG, #PAUL
'cast_float_to_uint': jvmgen.PYPYDOUBLETOUINT,
'truncate_longlong_to_int': jvmgen.L2I,
'cast_longlong_to_float': jvmgen.L2D,
'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult],
'is_early_constant': [PushPrimitive(ootype.Bool, False), StoreResult]
})
| [
"pypy.translator.oosupport.metavm.PushPrimitive",
"pypy.translator.oosupport.metavm.InstructionList",
"pypy.translator.oosupport.metavm.PushArg"
] | [((1445, 1493), 'pypy.translator.oosupport.metavm.InstructionList', 'InstructionList', (['(PushAllArgs, val, StoreResult)'], {}), '((PushAllArgs, val, StoreResult))\n', (1460, 1493), False, 'from pypy.translator.oosupport.metavm import PushArg, PushAllArgs, StoreResult, InstructionList, New, DoNothing, Call, SetField, GetField, DownCast, RuntimeNew, OOString, OOUnicode, CastTo, PushPrimitive\n'), ((712, 732), 'pypy.translator.oosupport.metavm.InstructionList', 'InstructionList', (['val'], {}), '(val)\n', (727, 732), False, 'from pypy.translator.oosupport.metavm import PushArg, PushAllArgs, StoreResult, InstructionList, New, DoNothing, Call, SetField, GetField, DownCast, RuntimeNew, OOString, OOUnicode, CastTo, PushPrimitive\n'), ((3309, 3319), 'pypy.translator.oosupport.metavm.PushArg', 'PushArg', (['(0)'], {}), '(0)\n', (3316, 3319), False, 'from pypy.translator.oosupport.metavm import PushArg, PushAllArgs, StoreResult, InstructionList, New, DoNothing, Call, SetField, GetField, DownCast, RuntimeNew, OOString, OOUnicode, CastTo, PushPrimitive\n'), ((11817, 11850), 'pypy.translator.oosupport.metavm.PushPrimitive', 'PushPrimitive', (['ootype.Bool', '(False)'], {}), '(ootype.Bool, False)\n', (11830, 11850), False, 'from pypy.translator.oosupport.metavm import PushArg, PushAllArgs, StoreResult, InstructionList, New, DoNothing, Call, SetField, GetField, DownCast, RuntimeNew, OOString, OOUnicode, CastTo, PushPrimitive\n'), ((1119, 1177), 'pypy.translator.oosupport.metavm.InstructionList', 'InstructionList', (['(PushPyPy, PushAllArgs, val, StoreResult)'], {}), '((PushPyPy, PushAllArgs, val, StoreResult))\n', (1134, 1177), False, 'from pypy.translator.oosupport.metavm import PushArg, PushAllArgs, StoreResult, InstructionList, New, DoNothing, Call, SetField, GetField, DownCast, RuntimeNew, OOString, OOUnicode, CastTo, PushPrimitive\n')] |
from hamcrest import *
from tests.helpers.sql import sql_query
class TestSqlQueries:
def test_sql_select(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
item_definition = item
# When ("Execute SQL query SELECT")
query = f'SELECT * FROM {namespace_name}'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list, has_item(equal_to(item_definition)), "Can't SQL select data")
def test_sql_select_with_join(self, namespace, second_namespace_for_join, index, items):
# Given("Create two namespaces")
db, namespace_name = namespace
second_namespace_name, second_ns_item_definition_join = second_namespace_for_join
# When ("Execute SQL query SELECT with JOIN")
query = f'SELECT id FROM {namespace_name} INNER JOIN {second_namespace_name} ON {namespace_name}.id = {second_namespace_name}.id'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list,
has_item(equal_to({'id': 1, f'joined_{second_namespace_name}': [second_ns_item_definition_join]})),
"Can't SQL select data with JOIN")
def test_sql_select_with_condition(self, namespace, index, items):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query SELECT")
query = f'SELECT * FROM {namespace_name} WHERE id=3'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list, has_item(equal_to({'id': 3, 'val': 'testval3'})), "Can't SQL select data with condition")
def test_sql_update(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query UPDATE")
query = f"UPDATE {namespace_name} SET \"val\" = 'new_val' WHERE id = 100"
item_list = sql_query(namespace, query)
# Then ("Check that item is updated")
assert_that(item_list, has_item(equal_to({'id': 100, 'val': 'new_val'})), "Can't SQL update data")
def test_sql_delete(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query DELETE")
query_delete = f"DELETE FROM {namespace_name} WHERE id = 100"
sql_query(namespace, query_delete)
# Then ("Check that item is deleted")
query_select = f"SELECT * FROM {namespace_name}"
item_list = sql_query(namespace, query_select)
assert_that(item_list, equal_to([]), "Can't SQL delete data")
def test_sql_select_with_syntax_error(self, namespace, index, item):
# Given("Create namespace with item")
# When ("Execute SQL query SELECT with incorrect syntax")
query = f'SELECT *'
# Then ("Check that selected item is in result")
assert_that(calling(sql_query).with_args(namespace, query),
raises(Exception, matching=has_string(string_contains_in_order(
"Expected", "but found"))), "Error wasn't raised when syntax was incorrect")
| [
"tests.helpers.sql.sql_query"
] | [((373, 400), 'tests.helpers.sql.sql_query', 'sql_query', (['namespace', 'query'], {}), '(namespace, query)\n', (382, 400), False, 'from tests.helpers.sql import sql_query\n'), ((1027, 1054), 'tests.helpers.sql.sql_query', 'sql_query', (['namespace', 'query'], {}), '(namespace, query)\n', (1036, 1054), False, 'from tests.helpers.sql import sql_query\n'), ((1600, 1627), 'tests.helpers.sql.sql_query', 'sql_query', (['namespace', 'query'], {}), '(namespace, query)\n', (1609, 1627), False, 'from tests.helpers.sql import sql_query\n'), ((2093, 2120), 'tests.helpers.sql.sql_query', 'sql_query', (['namespace', 'query'], {}), '(namespace, query)\n', (2102, 2120), False, 'from tests.helpers.sql import sql_query\n'), ((2537, 2571), 'tests.helpers.sql.sql_query', 'sql_query', (['namespace', 'query_delete'], {}), '(namespace, query_delete)\n', (2546, 2571), False, 'from tests.helpers.sql import sql_query\n'), ((2695, 2729), 'tests.helpers.sql.sql_query', 'sql_query', (['namespace', 'query_select'], {}), '(namespace, query_select)\n', (2704, 2729), False, 'from tests.helpers.sql import sql_query\n')] |
from collections import defaultdict
from localstack.utils.common import to_str
def multi_value_dict_for_list(elements):
temp_mv_dict = defaultdict(list)
for key in elements:
if isinstance(key, (list, tuple)):
key, value = key
else:
value = elements[key]
key = to_str(key)
temp_mv_dict[key].append(value)
return dict((k, tuple(v)) for k, v in temp_mv_dict.items())
| [
"collections.defaultdict",
"localstack.utils.common.to_str"
] | [((141, 158), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (152, 158), False, 'from collections import defaultdict\n'), ((318, 329), 'localstack.utils.common.to_str', 'to_str', (['key'], {}), '(key)\n', (324, 329), False, 'from localstack.utils.common import to_str\n')] |
## Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import tensorflow as tf
import dgp.dgp as dgp
import dgp.sorf_transform as sorf_transform
class Dgp_Sorf_Optim(dgp.Dgp):
def __init__(self, feature_dim, d_out, nb_gp_blocks=1, ratio_nrf_df=1, keep_prob=0.5, p_sigma2_d=0.01):
# Initialize for superclass
super(Dgp_Sorf_Optim, self).__init__(feature_dim=feature_dim, d_out=d_out, nb_gp_blocks=nb_gp_blocks, ratio_nrf_df=ratio_nrf_df, keep_prob=keep_prob)
# Set p_sigma2_d
self.p_sigma2_d = p_sigma2_d
# Define the initialized value d1_init, d2_init and d3_init
self.d1_init, self.d2_init, self.d3_init = self.create_init_value_d()
# Define variable d1, d2, d3
self.d1, self.d2, self.d3 = self.get_variable_d()
self.omegas = self.d1 + self.d2 + self.d3 + self.d1_init + self.d2_init + self.d3_init
def create_binary_scaling_vector(self, d):
r_u = tf.random_uniform([1, d], minval=0, maxval=1.0, dtype=tf.float32)
ones = tf.ones([1, d])
means = tf.multiply(0.5, ones)
B = tf.cast(tf.where(r_u > means, ones, tf.multiply(-1.0, ones)), tf.float32)
return B
# Define initialized value for variable d1, d2 and d3
def create_init_value_d(self):
d1 = [tf.Variable(self.create_binary_scaling_vector(self.d_omegas_out[i]), dtype=tf.float32, trainable=False) for i in range(self.nb_gp_blocks)]
d2 = [tf.Variable(self.create_binary_scaling_vector(self.d_omegas_out[i]), dtype=tf.float32, trainable=False) for i in range(self.nb_gp_blocks)]
d3 = [tf.Variable(self.create_binary_scaling_vector(self.d_omegas_out[i]), dtype=tf.float32, trainable=False) for i in range(self.nb_gp_blocks)]
return d1, d2, d3
# Define variable d1, d2 and d3
def get_variable_d(self):
d1 = [tf.Variable(self.d1_init[i], dtype=tf.float32) for i in range(self.nb_gp_blocks)]
d2 = [tf.Variable(self.d2_init[i], dtype=tf.float32) for i in range(self.nb_gp_blocks)]
d3 = [tf.Variable(self.d3_init[i], dtype=tf.float32) for i in range(self.nb_gp_blocks)]
return d1, d2, d3
def get_name(self):
return "dgpsorfoptimrelu" + str(self.nb_gp_blocks) + "nb_gp_blocks"
def get_omegas(self):
return self.omegas
def compute_layer_times_omega(self, x, id_nb_gp_blocks):
layer_times_omega = 1 / (tf.exp(self.log_theta_lengthscales[id_nb_gp_blocks]) * self.d_omegas_in[id_nb_gp_blocks]) \
* sorf_transform.sorf_transform(self.layers[id_nb_gp_blocks], self.d1[id_nb_gp_blocks], self.d2[id_nb_gp_blocks], self.d3[id_nb_gp_blocks])
return layer_times_omega
def get_regu_loss(self):
regu_loss = 0.0
for i in range(self.nb_gp_blocks):
regu_loss = regu_loss + tf.nn.l2_loss(tf.subtract(self.d1[i], self.d1_init[i])) / self.p_sigma2_d
regu_loss = regu_loss + tf.nn.l2_loss(tf.subtract(self.d2[i], self.d2_init[i])) / self.p_sigma2_d
regu_loss = regu_loss + tf.nn.l2_loss(tf.subtract(self.d3[i], self.d3_init[i])) / self.p_sigma2_d
regu_loss = regu_loss + self.keep_prob * tf.nn.l2_loss(self.w[i])
return regu_loss
| [
"tensorflow.Variable",
"tensorflow.ones",
"tensorflow.multiply",
"tensorflow.nn.l2_loss",
"tensorflow.random_uniform",
"dgp.sorf_transform.sorf_transform",
"tensorflow.subtract",
"tensorflow.exp"
] | [((1451, 1516), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, d]'], {'minval': '(0)', 'maxval': '(1.0)', 'dtype': 'tf.float32'}), '([1, d], minval=0, maxval=1.0, dtype=tf.float32)\n', (1468, 1516), True, 'import tensorflow as tf\n'), ((1526, 1541), 'tensorflow.ones', 'tf.ones', (['[1, d]'], {}), '([1, d])\n', (1533, 1541), True, 'import tensorflow as tf\n'), ((1552, 1574), 'tensorflow.multiply', 'tf.multiply', (['(0.5)', 'ones'], {}), '(0.5, ones)\n', (1563, 1574), True, 'import tensorflow as tf\n'), ((2285, 2331), 'tensorflow.Variable', 'tf.Variable', (['self.d1_init[i]'], {'dtype': 'tf.float32'}), '(self.d1_init[i], dtype=tf.float32)\n', (2296, 2331), True, 'import tensorflow as tf\n'), ((2375, 2421), 'tensorflow.Variable', 'tf.Variable', (['self.d2_init[i]'], {'dtype': 'tf.float32'}), '(self.d2_init[i], dtype=tf.float32)\n', (2386, 2421), True, 'import tensorflow as tf\n'), ((2465, 2511), 'tensorflow.Variable', 'tf.Variable', (['self.d3_init[i]'], {'dtype': 'tf.float32'}), '(self.d3_init[i], dtype=tf.float32)\n', (2476, 2511), True, 'import tensorflow as tf\n'), ((2909, 3051), 'dgp.sorf_transform.sorf_transform', 'sorf_transform.sorf_transform', (['self.layers[id_nb_gp_blocks]', 'self.d1[id_nb_gp_blocks]', 'self.d2[id_nb_gp_blocks]', 'self.d3[id_nb_gp_blocks]'], {}), '(self.layers[id_nb_gp_blocks], self.d1[\n id_nb_gp_blocks], self.d2[id_nb_gp_blocks], self.d3[id_nb_gp_blocks])\n', (2938, 3051), True, 'import dgp.sorf_transform as sorf_transform\n'), ((1617, 1640), 'tensorflow.multiply', 'tf.multiply', (['(-1.0)', 'ones'], {}), '(-1.0, ones)\n', (1628, 1640), True, 'import tensorflow as tf\n'), ((2792, 2844), 'tensorflow.exp', 'tf.exp', (['self.log_theta_lengthscales[id_nb_gp_blocks]'], {}), '(self.log_theta_lengthscales[id_nb_gp_blocks])\n', (2798, 2844), True, 'import tensorflow as tf\n'), ((3504, 3528), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.w[i]'], {}), '(self.w[i])\n', (3517, 3528), True, 'import tensorflow as tf\n'), ((3198, 3238), 'tensorflow.subtract', 'tf.subtract', (['self.d1[i]', 'self.d1_init[i]'], {}), '(self.d1[i], self.d1_init[i])\n', (3209, 3238), True, 'import tensorflow as tf\n'), ((3299, 3339), 'tensorflow.subtract', 'tf.subtract', (['self.d2[i]', 'self.d2_init[i]'], {}), '(self.d2[i], self.d2_init[i])\n', (3310, 3339), True, 'import tensorflow as tf\n'), ((3400, 3440), 'tensorflow.subtract', 'tf.subtract', (['self.d3[i]', 'self.d3_init[i]'], {}), '(self.d3[i], self.d3_init[i])\n', (3411, 3440), True, 'import tensorflow as tf\n')] |
import cv2
import numpy as np
from .augmentor import DataAugment
import math
class Rotate(DataAugment):
"""
Continuous rotatation.
The sample size for x- and y-axes should be at least sqrt(2) times larger
than the input size to make sure there is no non-valid region after center-crop.
Args:
p (float): probability of applying the augmentation
"""
def __init__(self, p=0.5):
super(Rotate, self).__init__(p=p)
self.image_interpolation = cv2.INTER_LINEAR
self.label_interpolation = cv2.INTER_NEAREST
self.border_mode = cv2.BORDER_CONSTANT
self.set_params()
def set_params(self):
self.sample_params['ratio'] = [1.0, 1.42, 1.42]
def rotate(self, imgs, M, interpolation):
height, width = imgs.shape[-2:]
if imgs.ndim == 4:
channels = imgs.shape[-4]
slices = imgs.shape[-3]
if imgs.ndim == 3:
channels = 1
slices = imgs.shape[-3]
transformedimgs = np.copy(imgs)
for z in range(slices):
if channels == 1:
img = transformedimgs[z, :, :]
dst = cv2.warpAffine(img, M, (height, width), 1.0, flags=interpolation, borderMode=self.border_mode)
transformedimgs[z, :, :] = dst
elif channels == 3:
img = transformedimgs[:, z, :, :]
img = np.moveaxis(img, 0, -1)
dst = cv2.warpAffine(img, M, (height, width), 1.0, flags=interpolation, borderMode=self.border_mode)
transformedimgs[:, z, :, :] = np.moveaxis(dst, -1, 0)
else:
raise Exception('Unknown number of channels in 2d slice')
return transformedimgs
def rotation_matrix(self, axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta degrees.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
theta = float(theta) * np.pi / 180.0
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def __call__(self, data, random_state=None):
if random_state is None:
random_state = np.random.RandomState()
image = data['image']
height, width = image.shape[-2:]
angle = random_state.rand()*360.0
M = cv2.getRotationMatrix2D((height/2, width/2), angle, 1)
output = {}
for key, val in data.items():
if key in ['label', 'skeleton', 'weight', 'context', 'skeleton_probability']:
output[key] = self.rotate(val, M, self.label_interpolation)
elif key == 'flux':
r_img = self.rotate(val, M, self.image_interpolation)
r_mat = self.rotation_matrix((1, 0, 0), angle)
r_field = np.matmul(r_mat, r_img.reshape((3, -1)))
output[key] = r_field.reshape(val.shape)
elif key == 'image':
output[key] = self.rotate(val, M, self.image_interpolation)
else:
raise TypeError('Input data key not identified, Key was: ' + key)
return output | [
"numpy.copy",
"cv2.warpAffine",
"numpy.asarray",
"math.cos",
"numpy.array",
"numpy.dot",
"numpy.moveaxis",
"cv2.getRotationMatrix2D",
"math.sin",
"numpy.random.RandomState"
] | [((1025, 1038), 'numpy.copy', 'np.copy', (['imgs'], {}), '(imgs)\n', (1032, 1038), True, 'import numpy as np\n'), ((1960, 1976), 'numpy.asarray', 'np.asarray', (['axis'], {}), '(axis)\n', (1970, 1976), True, 'import numpy as np\n'), ((2086, 2107), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (2094, 2107), False, 'import math\n'), ((2297, 2468), 'numpy.array', 'np.array', (['[[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc -\n bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]'], {}), '([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad),\n aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa +\n dd - bb - cc]])\n', (2305, 2468), True, 'import numpy as np\n'), ((2771, 2829), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(height / 2, width / 2)', 'angle', '(1)'], {}), '((height / 2, width / 2), angle, 1)\n', (2794, 2829), False, 'import cv2\n'), ((2134, 2155), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (2142, 2155), False, 'import math\n'), ((2621, 2644), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2642, 2644), True, 'import numpy as np\n'), ((1171, 1269), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(height, width)', '(1.0)'], {'flags': 'interpolation', 'borderMode': 'self.border_mode'}), '(img, M, (height, width), 1.0, flags=interpolation,\n borderMode=self.border_mode)\n', (1185, 1269), False, 'import cv2\n'), ((2009, 2027), 'numpy.dot', 'np.dot', (['axis', 'axis'], {}), '(axis, axis)\n', (2015, 2027), True, 'import numpy as np\n'), ((1417, 1440), 'numpy.moveaxis', 'np.moveaxis', (['img', '(0)', '(-1)'], {}), '(img, 0, -1)\n', (1428, 1440), True, 'import numpy as np\n'), ((1463, 1561), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(height, width)', '(1.0)'], {'flags': 'interpolation', 'borderMode': 'self.border_mode'}), '(img, M, (height, width), 1.0, flags=interpolation,\n borderMode=self.border_mode)\n', (1477, 1561), False, 'import cv2\n'), ((1604, 1627), 'numpy.moveaxis', 'np.moveaxis', (['dst', '(-1)', '(0)'], {}), '(dst, -1, 0)\n', (1615, 1627), True, 'import numpy as np\n')] |
import re
# Used to access the DATA_TYPES dictionary
INT = "INT"
FLOAT = "FLOAT"
BOOLEAN = "BOOLEAN"
INT_LIST = "INT_LIST"
FLOAT_LIST = "FLOAT_LIST"
BOOLEAN_LIST = "BOOLEAN_LIST"
VOID = "VOID"
OBJECT = "OBJECT"
SEMANTIC_ERROR = 99
# Regular expressiones to match data types
REGEX_BOOLEAN = r'true|false'
regex_boolean = re.compile(REGEX_BOOLEAN)
REGEX_INT = r'[0-9][0-9]*'
regex_int = re.compile(REGEX_INT)
REGEX_FLOAT = r'[0-9]*[\.][0-9]+'
regex_float = re.compile(REGEX_FLOAT)
REGEX_OBJECT = r'cube|sphere'
regex_object = re.compile(REGEX_OBJECT)
# Data types as integers used during compilation
DATA_TYPES = {
INT : 0,
FLOAT : 1,
BOOLEAN : 3,
INT_LIST : 4,
FLOAT_LIST : 5,
BOOLEAN_LIST : 6,
VOID : 8,
OBJECT : 9
}
# Operators as integers used during compilation
OPERATORS = {
# Arithmetic
"+" : 0,
"-" : 1,
"/" : 2,
"*" : 3,
"=" : 4,
# Relational
"==" : 5,
"<" : 6,
">" : 7,
"<=" : 8,
">=" : 9,
"!=" : 10,
"||" : 11,
"&&" : 12,
# Unary
"!" : 13,
"~" : 14,
} | [
"re.compile"
] | [((322, 347), 're.compile', 're.compile', (['REGEX_BOOLEAN'], {}), '(REGEX_BOOLEAN)\n', (332, 347), False, 'import re\n'), ((388, 409), 're.compile', 're.compile', (['REGEX_INT'], {}), '(REGEX_INT)\n', (398, 409), False, 'import re\n'), ((459, 482), 're.compile', 're.compile', (['REGEX_FLOAT'], {}), '(REGEX_FLOAT)\n', (469, 482), False, 'import re\n'), ((529, 553), 're.compile', 're.compile', (['REGEX_OBJECT'], {}), '(REGEX_OBJECT)\n', (539, 553), False, 'import re\n')] |
import FWCore.ParameterSet.Config as cms
simEcalDigis = cms.EDProducer("EcalSelectiveReadoutProducer",
# Label of input EB and EE digi collections
digiProducer = cms.string('simEcalUnsuppressedDigis'),
# Instance name of input EB digi collections
EBdigiCollection = cms.string(''),
# Instance name of input EB digi collections
EEdigiCollection = cms.string(''),
# Instance name of output EB SR flags collection
EBSrFlagCollection = cms.string('ebSrFlags'),
# Instance name of output EE SR flags collection
EESrFlagCollection = cms.string('eeSrFlags'),
# Instance name of output EB digis collection
EBSRPdigiCollection = cms.string('ebDigis'),
# Instance name of output EE digis collection
EESRPdigiCollection = cms.string('eeDigis'),
# Label name of input ECAL trigger primitive collection
trigPrimProducer = cms.string('simEcalTriggerPrimitiveDigis'),
# Instance name of ECAL trigger primitive collection
trigPrimCollection = cms.string(''),
# Neighbour eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaEta = cms.int32(1),
# Neighbouring eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaPhi = cms.int32(1),
# Index of time sample (staring from 1) the first DCC weights is implied
ecalDccZs1stSample = cms.int32(3),
# ADC to GeV conversion factor used in ZS filter for EB
ebDccAdcToGeV = cms.double(0.035),
# ADC to GeV conversion factor used in ZS filter for EE
eeDccAdcToGeV = cms.double(0.06),
#DCC ZS FIR weights.
#d-efault value set of DCC firmware used in CRUZET and CRAFT
dccNormalizedWeights = cms.vdouble(-1.1865, 0.0195, 0.2900, 0.3477, 0.3008,
0.2266),
# Switch to use a symetric zero suppression (cut on absolute value). For
# studies only, for time being it is not supported by the hardware.
symetricZS = cms.bool(False),
# ZS energy threshold in GeV to apply to low interest channels of barrel
srpBarrelLowInterestChannelZS = cms.double(3*.035),
# ZS energy threshold in GeV to apply to low interest channels of endcap
srpEndcapLowInterestChannelZS = cms.double(3*0.06),
# ZS energy threshold in GeV to apply to high interest channels of barrel
srpBarrelHighInterestChannelZS = cms.double(-1.e9),
# ZS energy threshold in GeV to apply to high interest channels of endcap
srpEndcapHighInterestChannelZS = cms.double(-1.e9),
#switch to run w/o trigger primitive. For debug use only
trigPrimBypass = cms.bool(False),
#for debug mode only:
trigPrimBypassLTH = cms.double(1.0),
#for debug mode only:
trigPrimBypassHTH = cms.double(1.0),
#for debug mode only
trigPrimBypassWithPeakFinder = cms.bool(True),
# Mode selection for "Trig bypass" mode
# 0: TT thresholds applied on sum of crystal Et's
# 1: TT thresholds applies on compressed Et from Trigger primitive
# @ee trigPrimByPass_ switch
trigPrimBypassMode = cms.int32(0),
#number of events whose TT and SR flags must be dumped (for debug purpose):
dumpFlags = cms.untracked.int32(0),
#logical flag to write out SrFlags
writeSrFlags = cms.untracked.bool(True),
#switch to apply selective readout decision on the digis and produce
#the "suppressed" digis
produceDigis = cms.untracked.bool(True),
#Trigger Tower Flag to use when a flag is not found from the input
#Trigger Primitive collection. Must be one of the following values:
# 0: low interest, 1: mid interest, 3: high interest
# 4: forced low interest, 5: forced mid interest, 7: forced high interest
defaultTtf_ = cms.int32(4),
# SR->action flag map
actions = cms.vint32(1, 3, 3, 3, 5, 7, 7, 7)
)
| [
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.double",
"FWCore.ParameterSet.Config.vint32",
"FWCore.ParameterSet.Config.vdouble",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.int32",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Confi... | [((171, 209), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""simEcalUnsuppressedDigis"""'], {}), "('simEcalUnsuppressedDigis')\n", (181, 209), True, 'import FWCore.ParameterSet.Config as cms\n'), ((284, 298), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (294, 298), True, 'import FWCore.ParameterSet.Config as cms\n'), ((373, 387), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (383, 387), True, 'import FWCore.ParameterSet.Config as cms\n'), ((468, 491), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""ebSrFlags"""'], {}), "('ebSrFlags')\n", (478, 491), True, 'import FWCore.ParameterSet.Config as cms\n'), ((572, 595), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""eeSrFlags"""'], {}), "('eeSrFlags')\n", (582, 595), True, 'import FWCore.ParameterSet.Config as cms\n'), ((674, 695), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""ebDigis"""'], {}), "('ebDigis')\n", (684, 695), True, 'import FWCore.ParameterSet.Config as cms\n'), ((774, 795), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""eeDigis"""'], {}), "('eeDigis')\n", (784, 795), True, 'import FWCore.ParameterSet.Config as cms\n'), ((881, 923), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""simEcalTriggerPrimitiveDigis"""'], {}), "('simEcalTriggerPrimitiveDigis')\n", (891, 923), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1008, 1022), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (1018, 1022), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1111, 1123), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(1)'], {}), '(1)\n', (1120, 1123), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1215, 1227), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(1)'], {}), '(1)\n', (1224, 1227), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1332, 1344), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(3)'], {}), '(3)\n', (1341, 1344), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1427, 1444), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.035)'], {}), '(0.035)\n', (1437, 1444), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1527, 1543), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.06)'], {}), '(0.06)\n', (1537, 1543), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1663, 1721), 'FWCore.ParameterSet.Config.vdouble', 'cms.vdouble', (['(-1.1865)', '(0.0195)', '(0.29)', '(0.3477)', '(0.3008)', '(0.2266)'], {}), '(-1.1865, 0.0195, 0.29, 0.3477, 0.3008, 0.2266)\n', (1674, 1721), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1932, 1947), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (1940, 1947), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2063, 2084), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(3 * 0.035)'], {}), '(3 * 0.035)\n', (2073, 2084), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2197, 2217), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(3 * 0.06)'], {}), '(3 * 0.06)\n', (2207, 2217), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2333, 2358), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(-1000000000.0)'], {}), '(-1000000000.0)\n', (2343, 2358), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2468, 2493), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(-1000000000.0)'], {}), '(-1000000000.0)\n', (2478, 2493), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2570, 2585), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (2578, 2585), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2668, 2683), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (2678, 2683), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2736, 2751), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (2746, 2751), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2814, 2828), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (2822, 2828), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3058, 3070), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(0)'], {}), '(0)\n', (3067, 3070), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3199, 3221), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(0)'], {}), '(0)\n', (3218, 3221), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3312, 3336), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (3330, 3336), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3459, 3483), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (3477, 3483), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3782, 3794), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(4)'], {}), '(4)\n', (3791, 3794), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3837, 3871), 'FWCore.ParameterSet.Config.vint32', 'cms.vint32', (['(1)', '(3)', '(3)', '(3)', '(5)', '(7)', '(7)', '(7)'], {}), '(1, 3, 3, 3, 5, 7, 7, 7)\n', (3847, 3871), True, 'import FWCore.ParameterSet.Config as cms\n')] |
# Generated from jsgParser.g4 by ANTLR 4.9
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\'")
buf.write("\u0143\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\3\2\5\2F\n\2\3\2\7\2I\n")
buf.write("\2\f\2\16\2L\13\2\3\2\7\2O\n\2\f\2\16\2R\13\2\3\2\5\2")
buf.write("U\n\2\3\2\3\2\3\3\3\3\3\3\5\3\\\n\3\3\3\3\3\3\4\3\4\6")
buf.write("\4b\n\4\r\4\16\4c\3\5\3\5\7\5h\n\5\f\5\16\5k\13\5\3\5")
buf.write("\3\5\3\6\3\6\3\6\3\6\5\6s\n\6\3\7\3\7\3\7\3\b\3\b\5\b")
buf.write("z\n\b\3\b\3\b\3\b\5\b\177\n\b\3\b\3\b\3\b\5\b\u0084\n")
buf.write("\b\3\b\3\b\5\b\u0088\n\b\3\t\3\t\6\t\u008c\n\t\r\t\16")
buf.write("\t\u008d\3\t\3\t\7\t\u0092\n\t\f\t\16\t\u0095\13\t\3\t")
buf.write("\3\t\5\t\u0099\n\t\5\t\u009b\n\t\3\n\7\n\u009e\n\n\f\n")
buf.write("\16\n\u00a1\13\n\3\13\3\13\5\13\u00a5\n\13\3\f\3\f\3\r")
buf.write("\3\r\3\r\3\r\5\r\u00ad\n\r\3\r\3\r\5\r\u00b1\n\r\3\r\3")
buf.write("\r\6\r\u00b5\n\r\r\r\16\r\u00b6\3\r\3\r\3\r\3\r\5\r\u00bd")
buf.write("\n\r\5\r\u00bf\n\r\3\16\3\16\3\17\3\17\3\17\3\20\3\20")
buf.write("\3\20\3\20\7\20\u00ca\n\20\f\20\16\20\u00cd\13\20\3\20")
buf.write("\5\20\u00d0\n\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3")
buf.write("\22\3\22\3\22\3\22\3\22\7\22\u00de\n\22\f\22\16\22\u00e1")
buf.write("\13\22\3\22\3\22\3\23\3\23\3\24\3\24\5\24\u00e9\n\24\3")
buf.write("\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\5\25\u00f4")
buf.write("\n\25\3\26\3\26\3\26\6\26\u00f9\n\26\r\26\16\26\u00fa")
buf.write("\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\5\30\u0106")
buf.write("\n\30\5\30\u0108\n\30\3\30\5\30\u010b\n\30\3\31\3\31\7")
buf.write("\31\u010f\n\31\f\31\16\31\u0112\13\31\3\32\3\32\3\32\3")
buf.write("\32\3\32\3\33\3\33\5\33\u011b\n\33\3\34\3\34\3\34\7\34")
buf.write("\u0120\n\34\f\34\16\34\u0123\13\34\3\35\3\35\5\35\u0127")
buf.write("\n\35\3\36\6\36\u012a\n\36\r\36\16\36\u012b\3\37\3\37")
buf.write("\5\37\u0130\n\37\3\37\3\37\5\37\u0134\n\37\5\37\u0136")
buf.write("\n\37\3 \3 \3 \3 \3!\3!\3!\5!\u013f\n!\3\"\3\"\3\"\2\2")
buf.write("#\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62")
buf.write("\64\668:<>@B\2\7\4\2\3\3\7\7\3\2\4\5\4\2\7\7\f\22\4\2")
buf.write("\6\6\32\32\4\2\5\5$$\2\u0154\2E\3\2\2\2\4X\3\2\2\2\6_")
buf.write("\3\2\2\2\be\3\2\2\2\nr\3\2\2\2\ft\3\2\2\2\16\u0087\3\2")
buf.write("\2\2\20\u009a\3\2\2\2\22\u009f\3\2\2\2\24\u00a2\3\2\2")
buf.write("\2\26\u00a6\3\2\2\2\30\u00be\3\2\2\2\32\u00c0\3\2\2\2")
buf.write("\34\u00c2\3\2\2\2\36\u00c5\3\2\2\2 \u00d3\3\2\2\2\"\u00d8")
buf.write("\3\2\2\2$\u00e4\3\2\2\2&\u00e8\3\2\2\2(\u00f3\3\2\2\2")
buf.write("*\u00f5\3\2\2\2,\u00fc\3\2\2\2.\u010a\3\2\2\2\60\u010c")
buf.write("\3\2\2\2\62\u0113\3\2\2\2\64\u0118\3\2\2\2\66\u011c\3")
buf.write("\2\2\28\u0126\3\2\2\2:\u0129\3\2\2\2<\u0135\3\2\2\2>\u0137")
buf.write("\3\2\2\2@\u013e\3\2\2\2B\u0140\3\2\2\2DF\5\4\3\2ED\3\2")
buf.write("\2\2EF\3\2\2\2FJ\3\2\2\2GI\5\b\5\2HG\3\2\2\2IL\3\2\2\2")
buf.write("JH\3\2\2\2JK\3\2\2\2KP\3\2\2\2LJ\3\2\2\2MO\5\n\6\2NM\3")
buf.write("\2\2\2OR\3\2\2\2PN\3\2\2\2PQ\3\2\2\2QT\3\2\2\2RP\3\2\2")
buf.write("\2SU\5\60\31\2TS\3\2\2\2TU\3\2\2\2UV\3\2\2\2VW\7\2\2\3")
buf.write("W\3\3\2\2\2XY\7\t\2\2Y[\5\32\16\2Z\\\5\6\4\2[Z\3\2\2\2")
buf.write("[\\\3\2\2\2\\]\3\2\2\2]^\7\25\2\2^\5\3\2\2\2_a\7\26\2")
buf.write("\2`b\5,\27\2a`\3\2\2\2bc\3\2\2\2ca\3\2\2\2cd\3\2\2\2d")
buf.write("\7\3\2\2\2ei\7\n\2\2fh\5\32\16\2gf\3\2\2\2hk\3\2\2\2i")
buf.write("g\3\2\2\2ij\3\2\2\2jl\3\2\2\2ki\3\2\2\2lm\7\25\2\2m\t")
buf.write("\3\2\2\2ns\5\f\7\2os\5\34\17\2ps\5 \21\2qs\5\"\22\2rn")
buf.write("\3\2\2\2ro\3\2\2\2rp\3\2\2\2rq\3\2\2\2s\13\3\2\2\2tu\7")
buf.write("\4\2\2uv\5\16\b\2v\r\3\2\2\2wy\7\27\2\2xz\5\20\t\2yx\3")
buf.write("\2\2\2yz\3\2\2\2z{\3\2\2\2{\u0088\7\30\2\2|~\7\27\2\2")
buf.write("}\177\t\2\2\2~}\3\2\2\2~\177\3\2\2\2\177\u0080\3\2\2\2")
buf.write("\u0080\u0081\7\13\2\2\u0081\u0083\5&\24\2\u0082\u0084")
buf.write("\5.\30\2\u0083\u0082\3\2\2\2\u0083\u0084\3\2\2\2\u0084")
buf.write("\u0085\3\2\2\2\u0085\u0086\7\30\2\2\u0086\u0088\3\2\2")
buf.write("\2\u0087w\3\2\2\2\u0087|\3\2\2\2\u0088\17\3\2\2\2\u0089")
buf.write("\u009b\7\31\2\2\u008a\u008c\5\24\13\2\u008b\u008a\3\2")
buf.write("\2\2\u008c\u008d\3\2\2\2\u008d\u008b\3\2\2\2\u008d\u008e")
buf.write("\3\2\2\2\u008e\u0093\3\2\2\2\u008f\u0090\7\37\2\2\u0090")
buf.write("\u0092\5\22\n\2\u0091\u008f\3\2\2\2\u0092\u0095\3\2\2")
buf.write("\2\u0093\u0091\3\2\2\2\u0093\u0094\3\2\2\2\u0094\u0098")
buf.write("\3\2\2\2\u0095\u0093\3\2\2\2\u0096\u0097\7\37\2\2\u0097")
buf.write("\u0099\5\26\f\2\u0098\u0096\3\2\2\2\u0098\u0099\3\2\2")
buf.write("\2\u0099\u009b\3\2\2\2\u009a\u0089\3\2\2\2\u009a\u008b")
buf.write("\3\2\2\2\u009b\21\3\2\2\2\u009c\u009e\5\24\13\2\u009d")
buf.write("\u009c\3\2\2\2\u009e\u00a1\3\2\2\2\u009f\u009d\3\2\2\2")
buf.write("\u009f\u00a0\3\2\2\2\u00a0\23\3\2\2\2\u00a1\u009f\3\2")
buf.write("\2\2\u00a2\u00a4\5\30\r\2\u00a3\u00a5\7\31\2\2\u00a4\u00a3")
buf.write("\3\2\2\2\u00a4\u00a5\3\2\2\2\u00a5\25\3\2\2\2\u00a6\u00a7")
buf.write("\7\31\2\2\u00a7\27\3\2\2\2\u00a8\u00a9\5\32\16\2\u00a9")
buf.write("\u00aa\7 \2\2\u00aa\u00ac\5&\24\2\u00ab\u00ad\5.\30\2")
buf.write("\u00ac\u00ab\3\2\2\2\u00ac\u00ad\3\2\2\2\u00ad\u00bf\3")
buf.write("\2\2\2\u00ae\u00b0\5,\27\2\u00af\u00b1\5.\30\2\u00b0\u00af")
buf.write("\3\2\2\2\u00b0\u00b1\3\2\2\2\u00b1\u00bf\3\2\2\2\u00b2")
buf.write("\u00b4\7\35\2\2\u00b3\u00b5\5\32\16\2\u00b4\u00b3\3\2")
buf.write("\2\2\u00b5\u00b6\3\2\2\2\u00b6\u00b4\3\2\2\2\u00b6\u00b7")
buf.write("\3\2\2\2\u00b7\u00b8\3\2\2\2\u00b8\u00b9\7\36\2\2\u00b9")
buf.write("\u00ba\7 \2\2\u00ba\u00bc\5&\24\2\u00bb\u00bd\5.\30\2")
buf.write("\u00bc\u00bb\3\2\2\2\u00bc\u00bd\3\2\2\2\u00bd\u00bf\3")
buf.write("\2\2\2\u00be\u00a8\3\2\2\2\u00be\u00ae\3\2\2\2\u00be\u00b2")
buf.write("\3\2\2\2\u00bf\31\3\2\2\2\u00c0\u00c1\t\3\2\2\u00c1\33")
buf.write("\3\2\2\2\u00c2\u00c3\7\4\2\2\u00c3\u00c4\5\36\20\2\u00c4")
buf.write("\35\3\2\2\2\u00c5\u00c6\7\23\2\2\u00c6\u00cb\5&\24\2\u00c7")
buf.write("\u00c8\7\37\2\2\u00c8\u00ca\5&\24\2\u00c9\u00c7\3\2\2")
buf.write("\2\u00ca\u00cd\3\2\2\2\u00cb\u00c9\3\2\2\2\u00cb\u00cc")
buf.write("\3\2\2\2\u00cc\u00cf\3\2\2\2\u00cd\u00cb\3\2\2\2\u00ce")
buf.write("\u00d0\5.\30\2\u00cf\u00ce\3\2\2\2\u00cf\u00d0\3\2\2\2")
buf.write("\u00d0\u00d1\3\2\2\2\u00d1\u00d2\7\24\2\2\u00d2\37\3\2")
buf.write("\2\2\u00d3\u00d4\7\4\2\2\u00d4\u00d5\7!\2\2\u00d5\u00d6")
buf.write("\5\20\t\2\u00d6\u00d7\7\25\2\2\u00d7!\3\2\2\2\u00d8\u00d9")
buf.write("\7\4\2\2\u00d9\u00da\7!\2\2\u00da\u00df\5(\25\2\u00db")
buf.write("\u00dc\7\37\2\2\u00dc\u00de\5(\25\2\u00dd\u00db\3\2\2")
buf.write("\2\u00de\u00e1\3\2\2\2\u00df\u00dd\3\2\2\2\u00df\u00e0")
buf.write("\3\2\2\2\u00e0\u00e2\3\2\2\2\u00e1\u00df\3\2\2\2\u00e2")
buf.write("\u00e3\7\25\2\2\u00e3#\3\2\2\2\u00e4\u00e5\t\4\2\2\u00e5")
buf.write("%\3\2\2\2\u00e6\u00e9\5,\27\2\u00e7\u00e9\5(\25\2\u00e8")
buf.write("\u00e6\3\2\2\2\u00e8\u00e7\3\2\2\2\u00e9\'\3\2\2\2\u00ea")
buf.write("\u00f4\7\3\2\2\u00eb\u00f4\7\5\2\2\u00ec\u00f4\5$\23\2")
buf.write("\u00ed\u00f4\5\16\b\2\u00ee\u00f4\5\36\20\2\u00ef\u00f0")
buf.write("\7\35\2\2\u00f0\u00f1\5*\26\2\u00f1\u00f2\7\36\2\2\u00f2")
buf.write("\u00f4\3\2\2\2\u00f3\u00ea\3\2\2\2\u00f3\u00eb\3\2\2\2")
buf.write("\u00f3\u00ec\3\2\2\2\u00f3\u00ed\3\2\2\2\u00f3\u00ee\3")
buf.write("\2\2\2\u00f3\u00ef\3\2\2\2\u00f4)\3\2\2\2\u00f5\u00f8")
buf.write("\5&\24\2\u00f6\u00f7\7\37\2\2\u00f7\u00f9\5&\24\2\u00f8")
buf.write("\u00f6\3\2\2\2\u00f9\u00fa\3\2\2\2\u00fa\u00f8\3\2\2\2")
buf.write("\u00fa\u00fb\3\2\2\2\u00fb+\3\2\2\2\u00fc\u00fd\7\4\2")
buf.write("\2\u00fd-\3\2\2\2\u00fe\u010b\7\33\2\2\u00ff\u010b\7\32")
buf.write("\2\2\u0100\u010b\7\34\2\2\u0101\u0102\7\27\2\2\u0102\u0107")
buf.write("\7\6\2\2\u0103\u0105\7\31\2\2\u0104\u0106\t\5\2\2\u0105")
buf.write("\u0104\3\2\2\2\u0105\u0106\3\2\2\2\u0106\u0108\3\2\2\2")
buf.write("\u0107\u0103\3\2\2\2\u0107\u0108\3\2\2\2\u0108\u0109\3")
buf.write("\2\2\2\u0109\u010b\7\30\2\2\u010a\u00fe\3\2\2\2\u010a")
buf.write("\u00ff\3\2\2\2\u010a\u0100\3\2\2\2\u010a\u0101\3\2\2\2")
buf.write("\u010b/\3\2\2\2\u010c\u0110\7\b\2\2\u010d\u010f\5\62\32")
buf.write("\2\u010e\u010d\3\2\2\2\u010f\u0112\3\2\2\2\u0110\u010e")
buf.write("\3\2\2\2\u0110\u0111\3\2\2\2\u0111\61\3\2\2\2\u0112\u0110")
buf.write("\3\2\2\2\u0113\u0114\7$\2\2\u0114\u0115\7 \2\2\u0115\u0116")
buf.write("\5\64\33\2\u0116\u0117\7\25\2\2\u0117\63\3\2\2\2\u0118")
buf.write("\u011a\5\66\34\2\u0119\u011b\5$\23\2\u011a\u0119\3\2\2")
buf.write("\2\u011a\u011b\3\2\2\2\u011b\65\3\2\2\2\u011c\u0121\5")
buf.write("8\35\2\u011d\u011e\7\37\2\2\u011e\u0120\58\35\2\u011f")
buf.write("\u011d\3\2\2\2\u0120\u0123\3\2\2\2\u0121\u011f\3\2\2\2")
buf.write("\u0121\u0122\3\2\2\2\u0122\67\3\2\2\2\u0123\u0121\3\2")
buf.write("\2\2\u0124\u0127\5:\36\2\u0125\u0127\3\2\2\2\u0126\u0124")
buf.write("\3\2\2\2\u0126\u0125\3\2\2\2\u01279\3\2\2\2\u0128\u012a")
buf.write("\5<\37\2\u0129\u0128\3\2\2\2\u012a\u012b\3\2\2\2\u012b")
buf.write("\u0129\3\2\2\2\u012b\u012c\3\2\2\2\u012c;\3\2\2\2\u012d")
buf.write("\u012f\5@!\2\u012e\u0130\5.\30\2\u012f\u012e\3\2\2\2\u012f")
buf.write("\u0130\3\2\2\2\u0130\u0136\3\2\2\2\u0131\u0133\5> \2\u0132")
buf.write("\u0134\5.\30\2\u0133\u0132\3\2\2\2\u0133\u0134\3\2\2\2")
buf.write("\u0134\u0136\3\2\2\2\u0135\u012d\3\2\2\2\u0135\u0131\3")
buf.write("\2\2\2\u0136=\3\2\2\2\u0137\u0138\7\35\2\2\u0138\u0139")
buf.write("\5\66\34\2\u0139\u013a\7\36\2\2\u013a?\3\2\2\2\u013b\u013f")
buf.write("\5B\"\2\u013c\u013f\7%\2\2\u013d\u013f\7\7\2\2\u013e\u013b")
buf.write("\3\2\2\2\u013e\u013c\3\2\2\2\u013e\u013d\3\2\2\2\u013f")
buf.write("A\3\2\2\2\u0140\u0141\t\6\2\2\u0141C\3\2\2\2+EJPT[cir")
buf.write("y~\u0083\u0087\u008d\u0093\u0098\u009a\u009f\u00a4\u00ac")
buf.write("\u00b0\u00b6\u00bc\u00be\u00cb\u00cf\u00df\u00e8\u00f3")
buf.write("\u00fa\u0105\u0107\u010a\u0110\u011a\u0121\u0126\u012b")
buf.write("\u012f\u0133\u0135\u013e")
return buf.getvalue()
class jsgParser ( Parser ):
grammarFileName = "jsgParser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "'@terminals'", "'.TYPE'",
"'.IGNORE'", "'->'", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"'['", "']'", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "'='" ]
symbolicNames = [ "<INVALID>", "LEXER_ID_REF", "ID", "STRING", "INT",
"ANY", "TERMINALS", "TYPE", "IGNORE", "MAPSTO", "JSON_STRING",
"JSON_NUMBER", "JSON_INT", "JSON_BOOL", "JSON_NULL",
"JSON_ARRAY", "JSON_OBJECT", "OBRACKET", "CBRACKET",
"SEMI", "DASH", "OBRACE", "CBRACE", "COMMA", "STAR",
"QMARK", "PLUS", "OPREN", "CPREN", "BAR", "COLON",
"EQUALS", "PASS", "COMMENT", "LEXER_ID", "LEXER_CHAR_SET",
"LEXER_PASS", "LEXER_COMMENT" ]
RULE_doc = 0
RULE_typeDirective = 1
RULE_typeExceptions = 2
RULE_ignoreDirective = 3
RULE_grammarElt = 4
RULE_objectDef = 5
RULE_objectExpr = 6
RULE_membersDef = 7
RULE_altMemberDef = 8
RULE_member = 9
RULE_lastComma = 10
RULE_pairDef = 11
RULE_name = 12
RULE_arrayDef = 13
RULE_arrayExpr = 14
RULE_objectMacro = 15
RULE_valueTypeMacro = 16
RULE_builtinValueType = 17
RULE_valueType = 18
RULE_nonRefValueType = 19
RULE_typeAlternatives = 20
RULE_idref = 21
RULE_ebnfSuffix = 22
RULE_lexerRules = 23
RULE_lexerRuleSpec = 24
RULE_lexerRuleBlock = 25
RULE_lexerAltList = 26
RULE_lexerAlt = 27
RULE_lexerElements = 28
RULE_lexerElement = 29
RULE_lexerBlock = 30
RULE_lexerAtom = 31
RULE_lexerTerminal = 32
ruleNames = [ "doc", "typeDirective", "typeExceptions", "ignoreDirective",
"grammarElt", "objectDef", "objectExpr", "membersDef",
"altMemberDef", "member", "lastComma", "pairDef", "name",
"arrayDef", "arrayExpr", "objectMacro", "valueTypeMacro",
"builtinValueType", "valueType", "nonRefValueType", "typeAlternatives",
"idref", "ebnfSuffix", "lexerRules", "lexerRuleSpec",
"lexerRuleBlock", "lexerAltList", "lexerAlt", "lexerElements",
"lexerElement", "lexerBlock", "lexerAtom", "lexerTerminal" ]
EOF = Token.EOF
LEXER_ID_REF=1
ID=2
STRING=3
INT=4
ANY=5
TERMINALS=6
TYPE=7
IGNORE=8
MAPSTO=9
JSON_STRING=10
JSON_NUMBER=11
JSON_INT=12
JSON_BOOL=13
JSON_NULL=14
JSON_ARRAY=15
JSON_OBJECT=16
OBRACKET=17
CBRACKET=18
SEMI=19
DASH=20
OBRACE=21
CBRACE=22
COMMA=23
STAR=24
QMARK=25
PLUS=26
OPREN=27
CPREN=28
BAR=29
COLON=30
EQUALS=31
PASS=32
COMMENT=33
LEXER_ID=34
LEXER_CHAR_SET=35
LEXER_PASS=36
LEXER_COMMENT=37
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class DocContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(jsgParser.EOF, 0)
def typeDirective(self):
return self.getTypedRuleContext(jsgParser.TypeDirectiveContext,0)
def ignoreDirective(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.IgnoreDirectiveContext)
else:
return self.getTypedRuleContext(jsgParser.IgnoreDirectiveContext,i)
def grammarElt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.GrammarEltContext)
else:
return self.getTypedRuleContext(jsgParser.GrammarEltContext,i)
def lexerRules(self):
return self.getTypedRuleContext(jsgParser.LexerRulesContext,0)
def getRuleIndex(self):
return jsgParser.RULE_doc
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDoc" ):
return visitor.visitDoc(self)
else:
return visitor.visitChildren(self)
def doc(self):
localctx = jsgParser.DocContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_doc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 67
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.TYPE:
self.state = 66
self.typeDirective()
self.state = 72
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.IGNORE:
self.state = 69
self.ignoreDirective()
self.state = 74
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 78
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.ID:
self.state = 75
self.grammarElt()
self.state = 80
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 82
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.TERMINALS:
self.state = 81
self.lexerRules()
self.state = 84
self.match(jsgParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeDirectiveContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TYPE(self):
return self.getToken(jsgParser.TYPE, 0)
def name(self):
return self.getTypedRuleContext(jsgParser.NameContext,0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def typeExceptions(self):
return self.getTypedRuleContext(jsgParser.TypeExceptionsContext,0)
def getRuleIndex(self):
return jsgParser.RULE_typeDirective
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeDirective" ):
return visitor.visitTypeDirective(self)
else:
return visitor.visitChildren(self)
def typeDirective(self):
localctx = jsgParser.TypeDirectiveContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_typeDirective)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 86
self.match(jsgParser.TYPE)
self.state = 87
self.name()
self.state = 89
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.DASH:
self.state = 88
self.typeExceptions()
self.state = 91
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeExceptionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DASH(self):
return self.getToken(jsgParser.DASH, 0)
def idref(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.IdrefContext)
else:
return self.getTypedRuleContext(jsgParser.IdrefContext,i)
def getRuleIndex(self):
return jsgParser.RULE_typeExceptions
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeExceptions" ):
return visitor.visitTypeExceptions(self)
else:
return visitor.visitChildren(self)
def typeExceptions(self):
localctx = jsgParser.TypeExceptionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_typeExceptions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 93
self.match(jsgParser.DASH)
self.state = 95
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 94
self.idref()
self.state = 97
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==jsgParser.ID):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IgnoreDirectiveContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IGNORE(self):
return self.getToken(jsgParser.IGNORE, 0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def name(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.NameContext)
else:
return self.getTypedRuleContext(jsgParser.NameContext,i)
def getRuleIndex(self):
return jsgParser.RULE_ignoreDirective
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIgnoreDirective" ):
return visitor.visitIgnoreDirective(self)
else:
return visitor.visitChildren(self)
def ignoreDirective(self):
localctx = jsgParser.IgnoreDirectiveContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_ignoreDirective)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 99
self.match(jsgParser.IGNORE)
self.state = 103
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.ID or _la==jsgParser.STRING:
self.state = 100
self.name()
self.state = 105
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 106
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GrammarEltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def objectDef(self):
return self.getTypedRuleContext(jsgParser.ObjectDefContext,0)
def arrayDef(self):
return self.getTypedRuleContext(jsgParser.ArrayDefContext,0)
def objectMacro(self):
return self.getTypedRuleContext(jsgParser.ObjectMacroContext,0)
def valueTypeMacro(self):
return self.getTypedRuleContext(jsgParser.ValueTypeMacroContext,0)
def getRuleIndex(self):
return jsgParser.RULE_grammarElt
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrammarElt" ):
return visitor.visitGrammarElt(self)
else:
return visitor.visitChildren(self)
def grammarElt(self):
localctx = jsgParser.GrammarEltContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_grammarElt)
try:
self.state = 112
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 108
self.objectDef()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 109
self.arrayDef()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 110
self.objectMacro()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 111
self.valueTypeMacro()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def objectExpr(self):
return self.getTypedRuleContext(jsgParser.ObjectExprContext,0)
def getRuleIndex(self):
return jsgParser.RULE_objectDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitObjectDef" ):
return visitor.visitObjectDef(self)
else:
return visitor.visitChildren(self)
def objectDef(self):
localctx = jsgParser.ObjectDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_objectDef)
try:
self.enterOuterAlt(localctx, 1)
self.state = 114
self.match(jsgParser.ID)
self.state = 115
self.objectExpr()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OBRACE(self):
return self.getToken(jsgParser.OBRACE, 0)
def CBRACE(self):
return self.getToken(jsgParser.CBRACE, 0)
def membersDef(self):
return self.getTypedRuleContext(jsgParser.MembersDefContext,0)
def MAPSTO(self):
return self.getToken(jsgParser.MAPSTO, 0)
def valueType(self):
return self.getTypedRuleContext(jsgParser.ValueTypeContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(jsgParser.EbnfSuffixContext,0)
def LEXER_ID_REF(self):
return self.getToken(jsgParser.LEXER_ID_REF, 0)
def ANY(self):
return self.getToken(jsgParser.ANY, 0)
def getRuleIndex(self):
return jsgParser.RULE_objectExpr
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitObjectExpr" ):
return visitor.visitObjectExpr(self)
else:
return visitor.visitChildren(self)
def objectExpr(self):
localctx = jsgParser.ObjectExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_objectExpr)
self._la = 0 # Token type
try:
self.state = 133
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,11,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 117
self.match(jsgParser.OBRACE)
self.state = 119
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ID) | (1 << jsgParser.STRING) | (1 << jsgParser.COMMA) | (1 << jsgParser.OPREN))) != 0):
self.state = 118
self.membersDef()
self.state = 121
self.match(jsgParser.CBRACE)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 122
self.match(jsgParser.OBRACE)
self.state = 124
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.LEXER_ID_REF or _la==jsgParser.ANY:
self.state = 123
_la = self._input.LA(1)
if not(_la==jsgParser.LEXER_ID_REF or _la==jsgParser.ANY):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 126
self.match(jsgParser.MAPSTO)
self.state = 127
self.valueType()
self.state = 129
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 128
self.ebnfSuffix()
self.state = 131
self.match(jsgParser.CBRACE)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MembersDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def member(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.MemberContext)
else:
return self.getTypedRuleContext(jsgParser.MemberContext,i)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def altMemberDef(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.AltMemberDefContext)
else:
return self.getTypedRuleContext(jsgParser.AltMemberDefContext,i)
def lastComma(self):
return self.getTypedRuleContext(jsgParser.LastCommaContext,0)
def getRuleIndex(self):
return jsgParser.RULE_membersDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMembersDef" ):
return visitor.visitMembersDef(self)
else:
return visitor.visitChildren(self)
def membersDef(self):
localctx = jsgParser.MembersDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_membersDef)
self._la = 0 # Token type
try:
self.state = 152
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.COMMA]:
self.enterOuterAlt(localctx, 1)
self.state = 135
self.match(jsgParser.COMMA)
pass
elif token in [jsgParser.ID, jsgParser.STRING, jsgParser.OPREN]:
self.enterOuterAlt(localctx, 2)
self.state = 137
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 136
self.member()
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ID) | (1 << jsgParser.STRING) | (1 << jsgParser.OPREN))) != 0)):
break
self.state = 145
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 141
self.match(jsgParser.BAR)
self.state = 142
self.altMemberDef()
self.state = 147
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
self.state = 150
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.BAR:
self.state = 148
self.match(jsgParser.BAR)
self.state = 149
self.lastComma()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AltMemberDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def member(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.MemberContext)
else:
return self.getTypedRuleContext(jsgParser.MemberContext,i)
def getRuleIndex(self):
return jsgParser.RULE_altMemberDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAltMemberDef" ):
return visitor.visitAltMemberDef(self)
else:
return visitor.visitChildren(self)
def altMemberDef(self):
localctx = jsgParser.AltMemberDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_altMemberDef)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 157
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ID) | (1 << jsgParser.STRING) | (1 << jsgParser.OPREN))) != 0):
self.state = 154
self.member()
self.state = 159
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MemberContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def pairDef(self):
return self.getTypedRuleContext(jsgParser.PairDefContext,0)
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def getRuleIndex(self):
return jsgParser.RULE_member
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMember" ):
return visitor.visitMember(self)
else:
return visitor.visitChildren(self)
def member(self):
localctx = jsgParser.MemberContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_member)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 160
self.pairDef()
self.state = 162
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.COMMA:
self.state = 161
self.match(jsgParser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LastCommaContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def getRuleIndex(self):
return jsgParser.RULE_lastComma
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLastComma" ):
return visitor.visitLastComma(self)
else:
return visitor.visitChildren(self)
def lastComma(self):
localctx = jsgParser.LastCommaContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_lastComma)
try:
self.enterOuterAlt(localctx, 1)
self.state = 164
self.match(jsgParser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PairDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def name(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.NameContext)
else:
return self.getTypedRuleContext(jsgParser.NameContext,i)
def COLON(self):
return self.getToken(jsgParser.COLON, 0)
def valueType(self):
return self.getTypedRuleContext(jsgParser.ValueTypeContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(jsgParser.EbnfSuffixContext,0)
def idref(self):
return self.getTypedRuleContext(jsgParser.IdrefContext,0)
def OPREN(self):
return self.getToken(jsgParser.OPREN, 0)
def CPREN(self):
return self.getToken(jsgParser.CPREN, 0)
def getRuleIndex(self):
return jsgParser.RULE_pairDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPairDef" ):
return visitor.visitPairDef(self)
else:
return visitor.visitChildren(self)
def pairDef(self):
localctx = jsgParser.PairDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_pairDef)
self._la = 0 # Token type
try:
self.state = 188
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,22,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 166
self.name()
self.state = 167
self.match(jsgParser.COLON)
self.state = 168
self.valueType()
self.state = 170
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 169
self.ebnfSuffix()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 172
self.idref()
self.state = 174
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 173
self.ebnfSuffix()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 176
self.match(jsgParser.OPREN)
self.state = 178
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 177
self.name()
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==jsgParser.ID or _la==jsgParser.STRING):
break
self.state = 182
self.match(jsgParser.CPREN)
self.state = 183
self.match(jsgParser.COLON)
self.state = 184
self.valueType()
self.state = 186
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 185
self.ebnfSuffix()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def STRING(self):
return self.getToken(jsgParser.STRING, 0)
def getRuleIndex(self):
return jsgParser.RULE_name
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitName" ):
return visitor.visitName(self)
else:
return visitor.visitChildren(self)
def name(self):
localctx = jsgParser.NameContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_name)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 190
_la = self._input.LA(1)
if not(_la==jsgParser.ID or _la==jsgParser.STRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArrayDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def arrayExpr(self):
return self.getTypedRuleContext(jsgParser.ArrayExprContext,0)
def getRuleIndex(self):
return jsgParser.RULE_arrayDef
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArrayDef" ):
return visitor.visitArrayDef(self)
else:
return visitor.visitChildren(self)
def arrayDef(self):
localctx = jsgParser.ArrayDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_arrayDef)
try:
self.enterOuterAlt(localctx, 1)
self.state = 192
self.match(jsgParser.ID)
self.state = 193
self.arrayExpr()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArrayExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OBRACKET(self):
return self.getToken(jsgParser.OBRACKET, 0)
def valueType(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.ValueTypeContext)
else:
return self.getTypedRuleContext(jsgParser.ValueTypeContext,i)
def CBRACKET(self):
return self.getToken(jsgParser.CBRACKET, 0)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def ebnfSuffix(self):
return self.getTypedRuleContext(jsgParser.EbnfSuffixContext,0)
def getRuleIndex(self):
return jsgParser.RULE_arrayExpr
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArrayExpr" ):
return visitor.visitArrayExpr(self)
else:
return visitor.visitChildren(self)
def arrayExpr(self):
localctx = jsgParser.ArrayExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_arrayExpr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 195
self.match(jsgParser.OBRACKET)
self.state = 196
self.valueType()
self.state = 201
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.BAR:
self.state = 197
self.match(jsgParser.BAR)
self.state = 198
self.valueType()
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 205
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 204
self.ebnfSuffix()
self.state = 207
self.match(jsgParser.CBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectMacroContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def EQUALS(self):
return self.getToken(jsgParser.EQUALS, 0)
def membersDef(self):
return self.getTypedRuleContext(jsgParser.MembersDefContext,0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def getRuleIndex(self):
return jsgParser.RULE_objectMacro
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitObjectMacro" ):
return visitor.visitObjectMacro(self)
else:
return visitor.visitChildren(self)
def objectMacro(self):
localctx = jsgParser.ObjectMacroContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_objectMacro)
try:
self.enterOuterAlt(localctx, 1)
self.state = 209
self.match(jsgParser.ID)
self.state = 210
self.match(jsgParser.EQUALS)
self.state = 211
self.membersDef()
self.state = 212
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueTypeMacroContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def EQUALS(self):
return self.getToken(jsgParser.EQUALS, 0)
def nonRefValueType(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.NonRefValueTypeContext)
else:
return self.getTypedRuleContext(jsgParser.NonRefValueTypeContext,i)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def getRuleIndex(self):
return jsgParser.RULE_valueTypeMacro
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitValueTypeMacro" ):
return visitor.visitValueTypeMacro(self)
else:
return visitor.visitChildren(self)
def valueTypeMacro(self):
localctx = jsgParser.ValueTypeMacroContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_valueTypeMacro)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 214
self.match(jsgParser.ID)
self.state = 215
self.match(jsgParser.EQUALS)
self.state = 216
self.nonRefValueType()
self.state = 221
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.BAR:
self.state = 217
self.match(jsgParser.BAR)
self.state = 218
self.nonRefValueType()
self.state = 223
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 224
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BuiltinValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def JSON_STRING(self):
return self.getToken(jsgParser.JSON_STRING, 0)
def JSON_NUMBER(self):
return self.getToken(jsgParser.JSON_NUMBER, 0)
def JSON_INT(self):
return self.getToken(jsgParser.JSON_INT, 0)
def JSON_BOOL(self):
return self.getToken(jsgParser.JSON_BOOL, 0)
def JSON_NULL(self):
return self.getToken(jsgParser.JSON_NULL, 0)
def JSON_ARRAY(self):
return self.getToken(jsgParser.JSON_ARRAY, 0)
def JSON_OBJECT(self):
return self.getToken(jsgParser.JSON_OBJECT, 0)
def ANY(self):
return self.getToken(jsgParser.ANY, 0)
def getRuleIndex(self):
return jsgParser.RULE_builtinValueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBuiltinValueType" ):
return visitor.visitBuiltinValueType(self)
else:
return visitor.visitChildren(self)
def builtinValueType(self):
localctx = jsgParser.BuiltinValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_builtinValueType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 226
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ANY) | (1 << jsgParser.JSON_STRING) | (1 << jsgParser.JSON_NUMBER) | (1 << jsgParser.JSON_INT) | (1 << jsgParser.JSON_BOOL) | (1 << jsgParser.JSON_NULL) | (1 << jsgParser.JSON_ARRAY) | (1 << jsgParser.JSON_OBJECT))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def idref(self):
return self.getTypedRuleContext(jsgParser.IdrefContext,0)
def nonRefValueType(self):
return self.getTypedRuleContext(jsgParser.NonRefValueTypeContext,0)
def getRuleIndex(self):
return jsgParser.RULE_valueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitValueType" ):
return visitor.visitValueType(self)
else:
return visitor.visitChildren(self)
def valueType(self):
localctx = jsgParser.ValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_valueType)
try:
self.state = 230
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.ID]:
self.enterOuterAlt(localctx, 1)
self.state = 228
self.idref()
pass
elif token in [jsgParser.LEXER_ID_REF, jsgParser.STRING, jsgParser.ANY, jsgParser.JSON_STRING, jsgParser.JSON_NUMBER, jsgParser.JSON_INT, jsgParser.JSON_BOOL, jsgParser.JSON_NULL, jsgParser.JSON_ARRAY, jsgParser.JSON_OBJECT, jsgParser.OBRACKET, jsgParser.OBRACE, jsgParser.OPREN]:
self.enterOuterAlt(localctx, 2)
self.state = 229
self.nonRefValueType()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NonRefValueTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER_ID_REF(self):
return self.getToken(jsgParser.LEXER_ID_REF, 0)
def STRING(self):
return self.getToken(jsgParser.STRING, 0)
def builtinValueType(self):
return self.getTypedRuleContext(jsgParser.BuiltinValueTypeContext,0)
def objectExpr(self):
return self.getTypedRuleContext(jsgParser.ObjectExprContext,0)
def arrayExpr(self):
return self.getTypedRuleContext(jsgParser.ArrayExprContext,0)
def OPREN(self):
return self.getToken(jsgParser.OPREN, 0)
def typeAlternatives(self):
return self.getTypedRuleContext(jsgParser.TypeAlternativesContext,0)
def CPREN(self):
return self.getToken(jsgParser.CPREN, 0)
def getRuleIndex(self):
return jsgParser.RULE_nonRefValueType
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNonRefValueType" ):
return visitor.visitNonRefValueType(self)
else:
return visitor.visitChildren(self)
def nonRefValueType(self):
localctx = jsgParser.NonRefValueTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_nonRefValueType)
try:
self.state = 241
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.LEXER_ID_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 232
self.match(jsgParser.LEXER_ID_REF)
pass
elif token in [jsgParser.STRING]:
self.enterOuterAlt(localctx, 2)
self.state = 233
self.match(jsgParser.STRING)
pass
elif token in [jsgParser.ANY, jsgParser.JSON_STRING, jsgParser.JSON_NUMBER, jsgParser.JSON_INT, jsgParser.JSON_BOOL, jsgParser.JSON_NULL, jsgParser.JSON_ARRAY, jsgParser.JSON_OBJECT]:
self.enterOuterAlt(localctx, 3)
self.state = 234
self.builtinValueType()
pass
elif token in [jsgParser.OBRACE]:
self.enterOuterAlt(localctx, 4)
self.state = 235
self.objectExpr()
pass
elif token in [jsgParser.OBRACKET]:
self.enterOuterAlt(localctx, 5)
self.state = 236
self.arrayExpr()
pass
elif token in [jsgParser.OPREN]:
self.enterOuterAlt(localctx, 6)
self.state = 237
self.match(jsgParser.OPREN)
self.state = 238
self.typeAlternatives()
self.state = 239
self.match(jsgParser.CPREN)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeAlternativesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def valueType(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.ValueTypeContext)
else:
return self.getTypedRuleContext(jsgParser.ValueTypeContext,i)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def getRuleIndex(self):
return jsgParser.RULE_typeAlternatives
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeAlternatives" ):
return visitor.visitTypeAlternatives(self)
else:
return visitor.visitChildren(self)
def typeAlternatives(self):
localctx = jsgParser.TypeAlternativesContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_typeAlternatives)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 243
self.valueType()
self.state = 246
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 244
self.match(jsgParser.BAR)
self.state = 245
self.valueType()
self.state = 248
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==jsgParser.BAR):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdrefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(jsgParser.ID, 0)
def getRuleIndex(self):
return jsgParser.RULE_idref
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdref" ):
return visitor.visitIdref(self)
else:
return visitor.visitChildren(self)
def idref(self):
localctx = jsgParser.IdrefContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_idref)
try:
self.enterOuterAlt(localctx, 1)
self.state = 250
self.match(jsgParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EbnfSuffixContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def QMARK(self):
return self.getToken(jsgParser.QMARK, 0)
def STAR(self):
return self.getToken(jsgParser.STAR, 0)
def PLUS(self):
return self.getToken(jsgParser.PLUS, 0)
def OBRACE(self):
return self.getToken(jsgParser.OBRACE, 0)
def INT(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.INT)
else:
return self.getToken(jsgParser.INT, i)
def CBRACE(self):
return self.getToken(jsgParser.CBRACE, 0)
def COMMA(self):
return self.getToken(jsgParser.COMMA, 0)
def getRuleIndex(self):
return jsgParser.RULE_ebnfSuffix
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEbnfSuffix" ):
return visitor.visitEbnfSuffix(self)
else:
return visitor.visitChildren(self)
def ebnfSuffix(self):
localctx = jsgParser.EbnfSuffixContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_ebnfSuffix)
self._la = 0 # Token type
try:
self.state = 264
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.QMARK]:
self.enterOuterAlt(localctx, 1)
self.state = 252
self.match(jsgParser.QMARK)
pass
elif token in [jsgParser.STAR]:
self.enterOuterAlt(localctx, 2)
self.state = 253
self.match(jsgParser.STAR)
pass
elif token in [jsgParser.PLUS]:
self.enterOuterAlt(localctx, 3)
self.state = 254
self.match(jsgParser.PLUS)
pass
elif token in [jsgParser.OBRACE]:
self.enterOuterAlt(localctx, 4)
self.state = 255
self.match(jsgParser.OBRACE)
self.state = 256
self.match(jsgParser.INT)
self.state = 261
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.COMMA:
self.state = 257
self.match(jsgParser.COMMA)
self.state = 259
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==jsgParser.INT or _la==jsgParser.STAR:
self.state = 258
_la = self._input.LA(1)
if not(_la==jsgParser.INT or _la==jsgParser.STAR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 263
self.match(jsgParser.CBRACE)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRulesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TERMINALS(self):
return self.getToken(jsgParser.TERMINALS, 0)
def lexerRuleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.LexerRuleSpecContext)
else:
return self.getTypedRuleContext(jsgParser.LexerRuleSpecContext,i)
def getRuleIndex(self):
return jsgParser.RULE_lexerRules
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRules" ):
return visitor.visitLexerRules(self)
else:
return visitor.visitChildren(self)
def lexerRules(self):
localctx = jsgParser.LexerRulesContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_lexerRules)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 266
self.match(jsgParser.TERMINALS)
self.state = 270
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.LEXER_ID:
self.state = 267
self.lexerRuleSpec()
self.state = 272
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER_ID(self):
return self.getToken(jsgParser.LEXER_ID, 0)
def COLON(self):
return self.getToken(jsgParser.COLON, 0)
def lexerRuleBlock(self):
return self.getTypedRuleContext(jsgParser.LexerRuleBlockContext,0)
def SEMI(self):
return self.getToken(jsgParser.SEMI, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerRuleSpec
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleSpec" ):
return visitor.visitLexerRuleSpec(self)
else:
return visitor.visitChildren(self)
def lexerRuleSpec(self):
localctx = jsgParser.LexerRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_lexerRuleSpec)
try:
self.enterOuterAlt(localctx, 1)
self.state = 273
self.match(jsgParser.LEXER_ID)
self.state = 274
self.match(jsgParser.COLON)
self.state = 275
self.lexerRuleBlock()
self.state = 276
self.match(jsgParser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAltList(self):
return self.getTypedRuleContext(jsgParser.LexerAltListContext,0)
def builtinValueType(self):
return self.getTypedRuleContext(jsgParser.BuiltinValueTypeContext,0)
def getRuleIndex(self):
return jsgParser.RULE_lexerRuleBlock
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleBlock" ):
return visitor.visitLexerRuleBlock(self)
else:
return visitor.visitChildren(self)
def lexerRuleBlock(self):
localctx = jsgParser.LexerRuleBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_lexerRuleBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 278
self.lexerAltList()
self.state = 280
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.ANY) | (1 << jsgParser.JSON_STRING) | (1 << jsgParser.JSON_NUMBER) | (1 << jsgParser.JSON_INT) | (1 << jsgParser.JSON_BOOL) | (1 << jsgParser.JSON_NULL) | (1 << jsgParser.JSON_ARRAY) | (1 << jsgParser.JSON_OBJECT))) != 0):
self.state = 279
self.builtinValueType()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAlt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.LexerAltContext)
else:
return self.getTypedRuleContext(jsgParser.LexerAltContext,i)
def BAR(self, i:int=None):
if i is None:
return self.getTokens(jsgParser.BAR)
else:
return self.getToken(jsgParser.BAR, i)
def getRuleIndex(self):
return jsgParser.RULE_lexerAltList
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAltList" ):
return visitor.visitLexerAltList(self)
else:
return visitor.visitChildren(self)
def lexerAltList(self):
localctx = jsgParser.LexerAltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_lexerAltList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 282
self.lexerAlt()
self.state = 287
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==jsgParser.BAR:
self.state = 283
self.match(jsgParser.BAR)
self.state = 284
self.lexerAlt()
self.state = 289
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerElements(self):
return self.getTypedRuleContext(jsgParser.LexerElementsContext,0)
def getRuleIndex(self):
return jsgParser.RULE_lexerAlt
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAlt" ):
return visitor.visitLexerAlt(self)
else:
return visitor.visitChildren(self)
def lexerAlt(self):
localctx = jsgParser.LexerAltContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_lexerAlt)
try:
self.state = 292
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,35,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 290
self.lexerElements()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(jsgParser.LexerElementContext)
else:
return self.getTypedRuleContext(jsgParser.LexerElementContext,i)
def getRuleIndex(self):
return jsgParser.RULE_lexerElements
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElements" ):
return visitor.visitLexerElements(self)
else:
return visitor.visitChildren(self)
def lexerElements(self):
localctx = jsgParser.LexerElementsContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_lexerElements)
try:
self.enterOuterAlt(localctx, 1)
self.state = 295
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 294
self.lexerElement()
else:
raise NoViableAltException(self)
self.state = 297
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,36,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAtom(self):
return self.getTypedRuleContext(jsgParser.LexerAtomContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(jsgParser.EbnfSuffixContext,0)
def lexerBlock(self):
return self.getTypedRuleContext(jsgParser.LexerBlockContext,0)
def getRuleIndex(self):
return jsgParser.RULE_lexerElement
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElement" ):
return visitor.visitLexerElement(self)
else:
return visitor.visitChildren(self)
def lexerElement(self):
localctx = jsgParser.LexerElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_lexerElement)
self._la = 0 # Token type
try:
self.state = 307
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.STRING, jsgParser.ANY, jsgParser.LEXER_ID, jsgParser.LEXER_CHAR_SET]:
self.enterOuterAlt(localctx, 1)
self.state = 299
self.lexerAtom()
self.state = 301
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 300
self.ebnfSuffix()
pass
elif token in [jsgParser.OPREN]:
self.enterOuterAlt(localctx, 2)
self.state = 303
self.lexerBlock()
self.state = 305
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jsgParser.OBRACE) | (1 << jsgParser.STAR) | (1 << jsgParser.QMARK) | (1 << jsgParser.PLUS))) != 0):
self.state = 304
self.ebnfSuffix()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPREN(self):
return self.getToken(jsgParser.OPREN, 0)
def lexerAltList(self):
return self.getTypedRuleContext(jsgParser.LexerAltListContext,0)
def CPREN(self):
return self.getToken(jsgParser.CPREN, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerBlock
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerBlock" ):
return visitor.visitLexerBlock(self)
else:
return visitor.visitChildren(self)
def lexerBlock(self):
localctx = jsgParser.LexerBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_lexerBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 309
self.match(jsgParser.OPREN)
self.state = 310
self.lexerAltList()
self.state = 311
self.match(jsgParser.CPREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerTerminal(self):
return self.getTypedRuleContext(jsgParser.LexerTerminalContext,0)
def LEXER_CHAR_SET(self):
return self.getToken(jsgParser.LEXER_CHAR_SET, 0)
def ANY(self):
return self.getToken(jsgParser.ANY, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerAtom
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtom" ):
return visitor.visitLexerAtom(self)
else:
return visitor.visitChildren(self)
def lexerAtom(self):
localctx = jsgParser.LexerAtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_lexerAtom)
try:
self.state = 316
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [jsgParser.STRING, jsgParser.LEXER_ID]:
self.enterOuterAlt(localctx, 1)
self.state = 313
self.lexerTerminal()
pass
elif token in [jsgParser.LEXER_CHAR_SET]:
self.enterOuterAlt(localctx, 2)
self.state = 314
self.match(jsgParser.LEXER_CHAR_SET)
pass
elif token in [jsgParser.ANY]:
self.enterOuterAlt(localctx, 3)
self.state = 315
self.match(jsgParser.ANY)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerTerminalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER_ID(self):
return self.getToken(jsgParser.LEXER_ID, 0)
def STRING(self):
return self.getToken(jsgParser.STRING, 0)
def getRuleIndex(self):
return jsgParser.RULE_lexerTerminal
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerTerminal" ):
return visitor.visitLexerTerminal(self)
else:
return visitor.visitChildren(self)
def lexerTerminal(self):
localctx = jsgParser.LexerTerminalContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_lexerTerminal)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 318
_la = self._input.LA(1)
if not(_la==jsgParser.STRING or _la==jsgParser.LEXER_ID):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| [
"io.StringIO"
] | [((240, 250), 'io.StringIO', 'StringIO', ([], {}), '()\n', (248, 250), False, 'from io import StringIO\n')] |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Modified by <NAME> at SNU Software Platform Lab for
# SWPP fall 2020 lecture.
import sys
import re
import os
from functools import wraps
"""Baby Names exercise
Implement the babyname parser class that parses the popular names and their ranks from a html file.
1) At first, you need to implement a decorator that checks whether the html file exists or not.
2) Also, the parser should extract tuples of (rank, male-name, female-name) from the file by using regex.
For writing regex, it's nice to include a copy of the target text for inspiration.
3) Finally, you need to implement `parse` method in `BabynameParser` class that parses the extracted tuples
with the given lambda and return a list of processed results.
"""
class BabynameFileNotFoundException(Exception):
"""
A custom exception for the cases that the babyname file does not exist.
"""
pass
def check_filename_existence(func):
@wraps(func)
def wrapper(*args,**kwargs):
try:
return func(*args,**kwargs)
except FileNotFoundError as pathname :
raise BabynameFileNotFoundException("No such file: {}".format(pathname.filename))
return wrapper
"""
(1 point)
A decorator that catches the non-exiting filename argument and raises a custom `BabynameFileNotFoundException`.
Args:
func: The function to decorate.
Raises:
BabynameFileNotFoundException: if there is no such file while func tries to open a file.
We assume func receives directory path and year to generate a filename to open.
"""
# TODO: Implement this decorator
class BabynameParser:
@check_filename_existence
def __init__(self, dirname, year):
"""
(3 points)
Given directory path and year, extracts the name of a file to open the corresponding file
and a list of the (rank, male-name, female-name) tuples from the file read by using regex.
[('1', 'Michael', 'Jessica'), ('2', 'Christopher', 'Ashley'), ....]
Args:
dirname: The name of the directory where baby name html files are stored
year: The year number. int.
"""
pathname = os.path.join(dirname, "{}.html".format(year))
f=open(pathname,'r')
text=f.read()
self.year=year
regex=re.compile("<td>\w{1,60}</td>")
res=regex.findall(text)
mylist=[(res[0][4:-5],res[1][4:-5],res[2][4:-5])]
i=3
while i <= (len(res)-3):
firs=res[i][4:-5]
secon=res[i+1][4:-5]
thir=res[i+2][4:-5]
mylist.append((firs,secon,thir))
i+=3
self.rank_to_names_tuples = mylist
def parse(self, parsing_lambda):
answer=[]
for i in self.rank_to_names_tuples :
answer.append(parsing_lambda(i))
return answer
"""
(2 points)
Collects a list of babynames parsed from the (rank, male-name, female-name) tuples.
The list must contains all results processed with the given lambda.
Args:
parsing_lambda: The parsing lambda.
It must process an single (string, string, string) tuple and return something.
Returns:
A list of lambda function's output
"""
# TODO: Implement this method
| [
"functools.wraps",
"re.compile"
] | [((1155, 1166), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1160, 1166), False, 'from functools import wraps\n'), ((2576, 2608), 're.compile', 're.compile', (['"""<td>\\\\w{1,60}</td>"""'], {}), "('<td>\\\\w{1,60}</td>')\n", (2586, 2608), False, 'import re\n')] |
from typing import Optional
import requests
from app.core import config
from app.services.bgm_tv.model import UserInfo, SubjectWithEps
class BgmApi:
def __init__(self, mirror=False):
self.session = requests.Session()
if mirror:
self.host = "mirror.api.bgm.rin.cat"
self.session.headers["user-agent"] = config.REQUEST_SERVICE_USER_AGENT
else:
self.host = "api.bgm.tv"
self.session.headers["user-agent"] = config.REQUEST_USER_AGENT
def url(self, path):
return f"https://{self.host}{path}"
@staticmethod
def error_in_response(data: dict):
return "error" in data
def subject_eps(self, subject_id: int) -> Optional[SubjectWithEps]:
r = self.session.get(self.url(f"/subject/{subject_id}/ep")).json()
if self.error_in_response(r):
return None
return SubjectWithEps.parse_obj(r)
def get_user_info(self, user_id: str) -> Optional[UserInfo]:
r = self.session.get(self.url(f"/user/{user_id}")).json()
if self.error_in_response(r):
return None
return UserInfo.parse_obj(r)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
| [
"app.services.bgm_tv.model.UserInfo.parse_obj",
"requests.Session",
"app.services.bgm_tv.model.SubjectWithEps.parse_obj"
] | [((214, 232), 'requests.Session', 'requests.Session', ([], {}), '()\n', (230, 232), False, 'import requests\n'), ((894, 921), 'app.services.bgm_tv.model.SubjectWithEps.parse_obj', 'SubjectWithEps.parse_obj', (['r'], {}), '(r)\n', (918, 921), False, 'from app.services.bgm_tv.model import UserInfo, SubjectWithEps\n'), ((1131, 1152), 'app.services.bgm_tv.model.UserInfo.parse_obj', 'UserInfo.parse_obj', (['r'], {}), '(r)\n', (1149, 1152), False, 'from app.services.bgm_tv.model import UserInfo, SubjectWithEps\n')] |
''' The most basic way to use the Private API. I recommend renaming the file .env
to .env and filling out the gemini api key information. The dotenv package loads the .env (or .env)
file and the os.environ() function reads the values from the file.ß
'''
import os
import robin_stocks.gemini as g
from dotenv import load_dotenv
##
ticker = "btcusd"
##
g.login(os.environ['gemini_account_key'], os.environ['gemini_account_secret'])
my_trades, error = g.get_trades_for_crypto(ticker, jsonify=True)
if error:
print("oh my an error")
else:
print("no errors here")
print(my_trades)
| [
"robin_stocks.gemini.login",
"robin_stocks.gemini.get_trades_for_crypto"
] | [((352, 430), 'robin_stocks.gemini.login', 'g.login', (["os.environ['gemini_account_key']", "os.environ['gemini_account_secret']"], {}), "(os.environ['gemini_account_key'], os.environ['gemini_account_secret'])\n", (359, 430), True, 'import robin_stocks.gemini as g\n'), ((450, 495), 'robin_stocks.gemini.get_trades_for_crypto', 'g.get_trades_for_crypto', (['ticker'], {'jsonify': '(True)'}), '(ticker, jsonify=True)\n', (473, 495), True, 'import robin_stocks.gemini as g\n')] |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.validators import MinValueValidator
# Create your models here.
class Profile(models.Model):
""" Extend built-in Django User model with cash value """
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
)
cash = models.FloatField(
default=10000,
validators=[MinValueValidator(0)],
)
def __str__(self):
return f'{self.user.username}'
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
""" When a new user is created, also create a new Profile """
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
""" Save the OneToOne linked Profile on the User instance """
instance.profile.save()
class Stock(models.Model):
symbol = models.CharField(
max_length=10,
unique=True,
)
name = models.CharField(
max_length=80,
)
def __str__(self):
return f'{self.symbol} - {self.name[:10]}'
class Transaction(models.Model):
""" Stores an append-only list of transactions. """
user = models.ForeignKey(
'Profile',
on_delete=models.CASCADE,
related_name='transactions',
)
stock = models.ForeignKey(
'Stock',
on_delete=models.CASCADE,
)
quantity = models.FloatField()
price = models.FloatField()
time = models.DateTimeField(
auto_now_add=True,
)
def __str__(self):
buy_sell = 'BUY' if self.quantity > 0 else 'SELL'
return f'{self.user} - {buy_sell} {self.stock}'
| [
"django.db.models.OneToOneField",
"django.db.models.FloatField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.dispatch.receiver",
"django.core.validators.MinValueValidator",
"django.db.models.CharField"
] | [((584, 616), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (592, 616), False, 'from django.dispatch import receiver\n'), ((810, 842), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (818, 842), False, 'from django.dispatch import receiver\n'), ((340, 392), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (360, 392), False, 'from django.db import models\n'), ((1030, 1074), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'unique': '(True)'}), '(max_length=10, unique=True)\n', (1046, 1074), False, 'from django.db import models\n'), ((1109, 1140), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)'}), '(max_length=80)\n', (1125, 1140), False, 'from django.db import models\n'), ((1333, 1421), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Profile"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""transactions"""'}), "('Profile', on_delete=models.CASCADE, related_name=\n 'transactions')\n", (1350, 1421), False, 'from django.db import models\n'), ((1460, 1512), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Stock"""'], {'on_delete': 'models.CASCADE'}), "('Stock', on_delete=models.CASCADE)\n", (1477, 1512), False, 'from django.db import models\n'), ((1551, 1570), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1568, 1570), False, 'from django.db import models\n'), ((1583, 1602), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1600, 1602), False, 'from django.db import models\n'), ((1614, 1653), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1634, 1653), False, 'from django.db import models\n'), ((489, 509), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (506, 509), False, 'from django.core.validators import MinValueValidator\n')] |
import re
from .Node import Node
class XGBoostNode(Node):
FLOAT_REGEX = '[+-]?\d+(\.\d+)?([eE][+-]?\d+)?'
BRANCH_REGEX = re.compile(f'(?P<branch>\d+):\[(?P<feature>\w+)(?P<comp><)(?P<value>{FLOAT_REGEX})\]')
LEAF_REGEX = re.compile(f'(?P<leaf>\d+):leaf=(?P<value>{FLOAT_REGEX})')
FEATURE_REGEX = re.compile('\w(?P<id>\d+)')
def __init__(self, parent=None, line='', feature_index_dict=None):
super().__init__(parent=parent)
# propagate any feature index dict
self.feature_index_dict = None
if feature_index_dict or parent:
self.feature_index_dict = feature_index_dict or parent.feature_index_dict
match_leaf = self.LEAF_REGEX.search(line)
if match_leaf:
self.weight = float(match_leaf.groupdict().get('value'))
self.final = True
else:
self.weight = 0
self.final = False
match_branch = self.BRANCH_REGEX.search(line)
if match_branch:
self.cut_value = float(match_branch.groupdict().get('value'))
self.feature = match_branch.groupdict().get('feature')
if self.feature_index_dict:
self.feature_index = self.feature_index_dict[self.feature]
else:
feature_match = self.FEATURE_REGEX.search(self.feature)
if not feature_match:
raise ValueError(f'Feature {self.feature} needs to be '
'matched with its correct position in the feature '
'value vector. Please give a list of feature names'
' in the correct order with `--feature-names`.')
self.feature_index = feature_match.groupdict().get('id')
else:
self.cut_value = None
self.feature = None
self.feature_index = None
def get_feature_names(lines):
features = set()
for l in lines:
match_branch = XGBoostNode.BRANCH_REGEX.search(l)
if match_branch:
features.add(match_branch.groupdict().get('feature'))
return features
def parse_model(filename, feature_names):
trees = []
with open(filename, 'r') as f:
lines = f.readlines()
# build the feature name dict if neccessary
if feature_names:
# check that the feature names are in line with the names found in
# the tree
if not set(feature_names) >= get_feature_names(lines):
raise ValueError('The given feature names do not properly describe'
'the features found in the model. Please check that your '
'argument for `--feature-names` is a proper superset of the '
'feature names used in the model.\nThese features have been '
f'found in the model:\n{" ".join(get_feature_names(lines))}')
feature_index_dict = {name: i for i, name in enumerate(feature_names)}
else:
feature_index_dict = None
node = None
for i, line in enumerate(lines):
# save finished tree
if line.startswith('booster'):
if node:
trees.append(node.root)
node = None
continue
# start a new tree
if node is None:
node = XGBoostNode(line=line, feature_index_dict=feature_index_dict)
continue
# move upwards if a leaf is reached
while node.final or (node.parent and node.left and node.right):
node = node.parent
# fill left and right leaf
if not node.left:
node.left = XGBoostNode(parent=node, line=line)
node = node.left
continue
if not node.right:
node.right = XGBoostNode(parent=node, line=line)
node = node.right
continue
trees.append(node.root)
return trees
| [
"re.compile"
] | [((133, 233), 're.compile', 're.compile', (['f"""(?P<branch>\\\\d+):\\\\[(?P<feature>\\\\w+)(?P<comp><)(?P<value>{FLOAT_REGEX})\\\\]"""'], {}), "(\n f'(?P<branch>\\\\d+):\\\\[(?P<feature>\\\\w+)(?P<comp><)(?P<value>{FLOAT_REGEX})\\\\]'\n )\n", (143, 233), False, 'import re\n'), ((237, 296), 're.compile', 're.compile', (['f"""(?P<leaf>\\\\d+):leaf=(?P<value>{FLOAT_REGEX})"""'], {}), "(f'(?P<leaf>\\\\d+):leaf=(?P<value>{FLOAT_REGEX})')\n", (247, 296), False, 'import re\n'), ((316, 345), 're.compile', 're.compile', (['"""\\\\w(?P<id>\\\\d+)"""'], {}), "('\\\\w(?P<id>\\\\d+)')\n", (326, 345), False, 'import re\n')] |
import flask_login
from .application import app
from .models import DbUser
login_manager = flask_login.LoginManager() # pylint: disable=invalid-name
login_manager.init_app(app)
login_manager.user_loader(lambda user_id: DbUser.query.filter_by(id=user_id).first())
login_required = flask_login.login_required
@login_manager.unauthorized_handler
def unauthorized_handler():
from flask import redirect, request, url_for
return redirect(url_for('login', request_uri=request.path))
@app.before_request
def redirect_https():
from flask import redirect, request
if 'X-Arr-Ssl' not in request.headers and not app.config['is_local_server']:
redirect_url = request.url.replace('http', 'https')
return redirect(redirect_url)
@app.route('/', methods=['GET'])
def index():
from flask import render_template
byline = 'Morocco - An automation service runs on Azure Batch.\n'
return render_template('index.html', byline=byline, title='Azure CLI')
@app.route('/login', methods=['GET'])
def login():
"""Redirect user agent to Azure AD sign-in page"""
import morocco.auth
return morocco.auth.openid_login()
@app.route('/signin-callback', methods=['POST'])
def signin_callback():
"""Redirect from AAD sign in page"""
def get_or_add_user(user_id: str):
from .application import db
from .models import DbUser
user = DbUser.query.filter_by(id=user_id).first()
if not user:
user = DbUser(user_id)
db.session.add(user)
db.session.commit()
return user
import morocco.auth
return morocco.auth.openid_callback(get_or_add_user)
@app.route('/logout', methods=['POST'])
def logout():
"""Logout from both this application as well as Azure OpenID sign in."""
import morocco.auth
return morocco.auth.openid_logout()
| [
"flask.render_template",
"flask_login.LoginManager",
"flask.request.url.replace",
"flask.url_for",
"flask.redirect"
] | [((92, 118), 'flask_login.LoginManager', 'flask_login.LoginManager', ([], {}), '()\n', (116, 118), False, 'import flask_login\n'), ((921, 984), 'flask.render_template', 'render_template', (['"""index.html"""'], {'byline': 'byline', 'title': '"""Azure CLI"""'}), "('index.html', byline=byline, title='Azure CLI')\n", (936, 984), False, 'from flask import render_template\n'), ((445, 487), 'flask.url_for', 'url_for', (['"""login"""'], {'request_uri': 'request.path'}), "('login', request_uri=request.path)\n", (452, 487), False, 'from flask import redirect, request, url_for\n'), ((678, 714), 'flask.request.url.replace', 'request.url.replace', (['"""http"""', '"""https"""'], {}), "('http', 'https')\n", (697, 714), False, 'from flask import redirect, request\n'), ((730, 752), 'flask.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (738, 752), False, 'from flask import redirect, request\n')] |
#!/usr/bin/env python
import json
import os
import subprocess
import sys
def fileExistsAndNonEmpty(filename):
if not os.path.exists(filename):
return False
return os.stat(filename).st_size > 0
class AssemblerRunner(object):
def __init__(self, sample_id, sample_seq, bam_file):
with open("startplugin.json", "r") as fh:
self.config = json.load(fh)
self.params = self.config['pluginconfig']
# launch.sh creates a symlink to the input BAM file in this directory
self.output_dir = self.config['runinfo']['results_dir']
self.sample_id = sample_id
self.sample_seq = sample_seq
self.sample_name = sample_id + "." + sample_seq
self.sample_output_dir = os.path.join(self.output_dir, self.sample_name)
self.bam_file = bam_file
self.bam_rel_path = os.path.join(self.sample_name, self.bam_file)
# relative path to the input bam file
self.bam_to_assemble = os.path.join(self.output_dir, self.bam_rel_path)
# how much to downsample (the step is skipped if it equals to 1)
if self.params.has_key('fraction_of_reads'):
self.fraction_of_reads = float(self.params['fraction_of_reads'])
# all executables are located in bin/ subdirectory
self.assembler_path = os.path.join(os.environ['DIRNAME'], 'bin')
# where to output HTML with results
self.url_root = self.config['runinfo']['url_root']
# skip assembly (and run only QUAST) if contigs exist
self.quast_only = self.params.has_key('quastOnly')
# information will be printed to "info.json"
self.info = { 'params' : self.params, 'executedCommands' : [] }
if sample_id != '' and sample_seq != '':
self.info['sampleId'] = sample_id
self.info['sampleSeq'] = sample_seq
self.info['sampleName'] = self.sample_name
# Prints 'pluginconfig' section of 'startplugin.json'
def printAssemblyParameters(self):
print("AssemblerSPAdes run parameters:")
print(self.params)
def writeInfo(self, json_filename):
with open(json_filename, 'w+') as f:
json.dump(self.info, f, indent=4)
def runCommand(self, command, description=None):
if description:
print(description)
else:
print(command)
sys.stdout.flush()
os.system(command)
self.info['executedCommands'].append(command)
def runDownsampling(self):
print("\nSubsampling using Picard")
# downsampler = os.path.join(self.assembler_path, 'DownsampleSam.jar')
downsampler = "/opt/picard/picard-tools-current/picard.jar"
out = os.path.join(self.sample_output_dir, self.bam_file + "_scaled")
cmd = ("java -Xmx2g -jar {downsampler} "
"DownsampleSam "
"INPUT={self.bam_to_assemble} OUTPUT={out} "
"PROBABILITY={self.fraction_of_reads}").format(**locals())
self.runCommand(cmd)
cmd = ("mv {out} {self.bam_to_assemble}").format(**locals())
self.runCommand(cmd)
def execute(self):
self.printAssemblyParameters()
read_count_cmd = "samtools view -c " + self.bam_rel_path
read_count_process = subprocess.Popen(read_count_cmd, shell=True,
stdout=subprocess.PIPE)
num_reads = int(read_count_process.communicate()[0])
def tooFewReads():
if not self.params.has_key('min_reads'):
return False
self.min_reads = int(self.params['min_reads'])
return num_reads <= self.min_reads
print("%d reads in %s" % (num_reads, self.bam_file))
if tooFewReads():
print(("\tDoes not have more than %d reads. "
"Skipping this file") % (self.min_reads,))
return
if self.fraction_of_reads < 1:
self.runDownsampling()
# if self.params.has_key('runSpades'):
self.runSPAdes()
def runSPAdes(self):
if self.params.has_key('spadesversion'):
version = self.params['spadesversion']
else:
version = "3.1.0"
assert(version >= "3.0.0")
rel_path = os.path.join("SPAdes-%s-Linux" % version, "bin", "spades.py")
spades_path = os.path.join(self.assembler_path, rel_path)
output_dir = os.path.join(self.sample_name, "spades")
contigs_fn = os.path.join(output_dir, "contigs.fasta")
scaffolds_fn = os.path.join(output_dir, "scaffolds.fasta")
log_fn = os.path.join(output_dir, "spades.log")
skip_assembly = self.quast_only and fileExistsAndNonEmpty(contigs_fn)
if self.params.has_key('spadesOptions'):
user_options = self.params['spadesOptions']
else:
user_options = "-k 21,33,55,77,99"
spades_info = {'contigs' : contigs_fn,
'scaffolds' : scaffolds_fn,
'log' : log_fn,
'userOptions' : user_options,
'version' : version }
pid = os.getpid()
if not skip_assembly:
cmd = ("{spades_path} --iontorrent --tmp-dir /tmp/{pid} "
"-s {self.bam_to_assemble} -o {output_dir} "
"{user_options} > /dev/null").format(**locals())
print("Running AssemblerSPAdes - SPAdes %s" % version)
self.runCommand(cmd)
report_dir = self.createQuastReport(contigs_fn, output_dir)
spades_info['quastReportDir'] = report_dir
self.info['spades'] = spades_info
def createQuastReport(self, contigs_fn, output_dir):
version = "2.3"
rel_path = os.path.join("quast-%s" % version, "quast.py")
quast_path = os.path.join(self.assembler_path, rel_path)
# quast_reference = self.params['bgenome']
quast_reference = "None"
quast_results_dir = os.path.join(output_dir, "quast_results")
print("Running QUAST %s" % version)
reference_param = ("-R " + quast_reference) if quast_reference!="None" else " "
cmd = ("{quast_path} -o {quast_results_dir} "
"{reference_param} {contigs_fn}").format(**locals())
self.runCommand(cmd)
try:
if os.path.isfile(os.path.join(quast_results_dir, "report.html")):
return os.path.abspath(quast_results_dir)
else:
return None
except:
return None
import sys
if __name__ == "__main__":
if len(sys.argv) == 5:
sample_id = sys.argv[1]
sample_seq = sys.argv[2]
bam_file = sys.argv[3]
out_dir = sys.argv[4]
runner = AssemblerRunner(sample_id, sample_seq, bam_file)
runner.execute()
runner.writeInfo("%s/info_%s.%s.json" % (out_dir,sample_id, sample_seq))
else:
assert(len(sys.argv) == 3) # not a barcode run
bam_file = sys.argv[1]
out_dir = sys.argv[2]
# HACK: sample_name = '.' => essentially vanishes from all paths
runner = AssemblerRunner('', '', bam_file)
runner.execute()
runner.writeInfo("%s/info.json" % (out_dir))
| [
"os.path.exists",
"os.stat",
"subprocess.Popen",
"os.path.join",
"os.getpid",
"json.load",
"os.system",
"sys.stdout.flush",
"os.path.abspath",
"json.dump"
] | [((122, 146), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (136, 146), False, 'import os\n'), ((747, 794), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.sample_name'], {}), '(self.output_dir, self.sample_name)\n', (759, 794), False, 'import os\n'), ((856, 901), 'os.path.join', 'os.path.join', (['self.sample_name', 'self.bam_file'], {}), '(self.sample_name, self.bam_file)\n', (868, 901), False, 'import os\n'), ((980, 1028), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.bam_rel_path'], {}), '(self.output_dir, self.bam_rel_path)\n', (992, 1028), False, 'import os\n'), ((1323, 1365), 'os.path.join', 'os.path.join', (["os.environ['DIRNAME']", '"""bin"""'], {}), "(os.environ['DIRNAME'], 'bin')\n", (1335, 1365), False, 'import os\n'), ((2380, 2398), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2396, 2398), False, 'import sys\n'), ((2407, 2425), 'os.system', 'os.system', (['command'], {}), '(command)\n', (2416, 2425), False, 'import os\n'), ((2717, 2780), 'os.path.join', 'os.path.join', (['self.sample_output_dir', "(self.bam_file + '_scaled')"], {}), "(self.sample_output_dir, self.bam_file + '_scaled')\n", (2729, 2780), False, 'import os\n'), ((3282, 3350), 'subprocess.Popen', 'subprocess.Popen', (['read_count_cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(read_count_cmd, shell=True, stdout=subprocess.PIPE)\n', (3298, 3350), False, 'import subprocess\n'), ((4299, 4360), 'os.path.join', 'os.path.join', (["('SPAdes-%s-Linux' % version)", '"""bin"""', '"""spades.py"""'], {}), "('SPAdes-%s-Linux' % version, 'bin', 'spades.py')\n", (4311, 4360), False, 'import os\n'), ((4383, 4426), 'os.path.join', 'os.path.join', (['self.assembler_path', 'rel_path'], {}), '(self.assembler_path, rel_path)\n', (4395, 4426), False, 'import os\n'), ((4449, 4489), 'os.path.join', 'os.path.join', (['self.sample_name', '"""spades"""'], {}), "(self.sample_name, 'spades')\n", (4461, 4489), False, 'import os\n'), ((4511, 4552), 'os.path.join', 'os.path.join', (['output_dir', '"""contigs.fasta"""'], {}), "(output_dir, 'contigs.fasta')\n", (4523, 4552), False, 'import os\n'), ((4576, 4619), 'os.path.join', 'os.path.join', (['output_dir', '"""scaffolds.fasta"""'], {}), "(output_dir, 'scaffolds.fasta')\n", (4588, 4619), False, 'import os\n'), ((4637, 4675), 'os.path.join', 'os.path.join', (['output_dir', '"""spades.log"""'], {}), "(output_dir, 'spades.log')\n", (4649, 4675), False, 'import os\n'), ((5178, 5189), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5187, 5189), False, 'import os\n'), ((5785, 5831), 'os.path.join', 'os.path.join', (["('quast-%s' % version)", '"""quast.py"""'], {}), "('quast-%s' % version, 'quast.py')\n", (5797, 5831), False, 'import os\n'), ((5853, 5896), 'os.path.join', 'os.path.join', (['self.assembler_path', 'rel_path'], {}), '(self.assembler_path, rel_path)\n', (5865, 5896), False, 'import os\n'), ((6008, 6049), 'os.path.join', 'os.path.join', (['output_dir', '"""quast_results"""'], {}), "(output_dir, 'quast_results')\n", (6020, 6049), False, 'import os\n'), ((180, 197), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (187, 197), False, 'import os\n'), ((375, 388), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (384, 388), False, 'import json\n'), ((2188, 2221), 'json.dump', 'json.dump', (['self.info', 'f'], {'indent': '(4)'}), '(self.info, f, indent=4)\n', (2197, 2221), False, 'import json\n'), ((6378, 6424), 'os.path.join', 'os.path.join', (['quast_results_dir', '"""report.html"""'], {}), "(quast_results_dir, 'report.html')\n", (6390, 6424), False, 'import os\n'), ((6450, 6484), 'os.path.abspath', 'os.path.abspath', (['quast_results_dir'], {}), '(quast_results_dir)\n', (6465, 6484), False, 'import os\n')] |
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import logging
def logger_fn(name, filepath, level = logging.DEBUG):
""" Function for creating log manager
Args:
name: name for log manager
filepath: file path for log file
level: log level (CRITICAL > ERROR > WARNING > INFO > DEBUG)
Return:
log manager
"""
logger = logging.getLogger(name)
logger.setLevel(level)
sh = logging.StreamHandler(sys.stdout)
fh = logging.FileHandler(filepath, mode = 'w')
# formatter = logging.Formatter('[%(asctime)s][%(levelname)s][%(filename)s][line:%(lineno)d] %(message)s')
# formatter = logging.Formatter('[%(asctime)s][%(filename)s][line:%(lineno)d] %(message)s')
formatter = logging.Formatter('[%(asctime)s] %(message)s')
"""
%(levelno)s: 打印日志级别的数值
%(levelname)s: 打印日志级别名称
%(pathname)s: 打印当前执行程序的路径,其实就是sys.argv[0]
%(filename)s: 打印当前执行程序名
%(funcName)s: 打印日志的当前函数
%(lineno)d: 打印日志的当前行号
%(asctime)s: 打印日志的时间
%(thread)d: 打印线程ID
%(threadName)s: 打印线程名称
%(process)d: 打印进程ID
%(message)s: 打印日志信息
"""
sh.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
return logger | [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler",
"logging.FileHandler"
] | [((442, 465), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (459, 465), False, 'import logging\n'), ((497, 530), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (518, 530), False, 'import logging\n'), ((537, 576), 'logging.FileHandler', 'logging.FileHandler', (['filepath'], {'mode': '"""w"""'}), "(filepath, mode='w')\n", (556, 576), False, 'import logging\n'), ((794, 840), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] %(message)s"""'], {}), "('[%(asctime)s] %(message)s')\n", (811, 840), False, 'import logging\n')] |
from drf_extra_fields.geo_fields import PointField
from rest_framework import serializers
from phr.insteducativa.models import InstitucionEducativa
from phr.ubigeo.models import UbigeoDepartamento, UbigeoDistrito, UbigeoProvincia
class InstEducativaSerializer(serializers.ModelSerializer):
ubicacion = PointField(required=False)
departamento_nombre = serializers.SerializerMethodField()
provincia_nombre = serializers.SerializerMethodField()
distrito_nombre = serializers.SerializerMethodField()
class Meta:
model = InstitucionEducativa
fields = ('codigo_colegio', 'codigo_modular', 'nombre', 'ubigeo', 'direccion', 'nivel', 'nivel_descripcion',
'tipo', 'tipo_descripcion', 'nombre_ugel', 'establecimiento_renaes', 'establecimiento_nombre',
'ubicacion', 'departamento_nombre', 'provincia_nombre', 'distrito_nombre',)
def get_departamento_nombre(self, obj):
if obj.ubigeo:
try:
departamento = UbigeoDepartamento.objects.get(cod_ubigeo_inei_departamento=obj.ubigeo[:2])
return departamento.ubigeo_departamento
except UbigeoDepartamento.DoesNotExist:
return ''
def get_provincia_nombre(self, obj):
if obj.ubigeo:
try:
provincia = UbigeoProvincia.objects.get(cod_ubigeo_inei_provincia=obj.ubigeo[:4])
return provincia.ubigeo_provincia
except UbigeoProvincia.DoesNotExist:
return ''
def get_distrito_nombre(self, obj):
if obj.ubigeo:
try:
distrito = UbigeoDistrito.objects.get(cod_ubigeo_inei_distrito=obj.ubigeo)
return distrito.ubigeo_distrito
except UbigeoDistrito.DoesNotExist:
return ''
| [
"phr.ubigeo.models.UbigeoProvincia.objects.get",
"rest_framework.serializers.SerializerMethodField",
"phr.ubigeo.models.UbigeoDepartamento.objects.get",
"drf_extra_fields.geo_fields.PointField",
"phr.ubigeo.models.UbigeoDistrito.objects.get"
] | [((309, 335), 'drf_extra_fields.geo_fields.PointField', 'PointField', ([], {'required': '(False)'}), '(required=False)\n', (319, 335), False, 'from drf_extra_fields.geo_fields import PointField\n'), ((362, 397), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (395, 397), False, 'from rest_framework import serializers\n'), ((421, 456), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (454, 456), False, 'from rest_framework import serializers\n'), ((479, 514), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (512, 514), False, 'from rest_framework import serializers\n'), ((1009, 1084), 'phr.ubigeo.models.UbigeoDepartamento.objects.get', 'UbigeoDepartamento.objects.get', ([], {'cod_ubigeo_inei_departamento': 'obj.ubigeo[:2]'}), '(cod_ubigeo_inei_departamento=obj.ubigeo[:2])\n', (1039, 1084), False, 'from phr.ubigeo.models import UbigeoDepartamento, UbigeoDistrito, UbigeoProvincia\n'), ((1329, 1398), 'phr.ubigeo.models.UbigeoProvincia.objects.get', 'UbigeoProvincia.objects.get', ([], {'cod_ubigeo_inei_provincia': 'obj.ubigeo[:4]'}), '(cod_ubigeo_inei_provincia=obj.ubigeo[:4])\n', (1356, 1398), False, 'from phr.ubigeo.models import UbigeoDepartamento, UbigeoDistrito, UbigeoProvincia\n'), ((1632, 1695), 'phr.ubigeo.models.UbigeoDistrito.objects.get', 'UbigeoDistrito.objects.get', ([], {'cod_ubigeo_inei_distrito': 'obj.ubigeo'}), '(cod_ubigeo_inei_distrito=obj.ubigeo)\n', (1658, 1695), False, 'from phr.ubigeo.models import UbigeoDepartamento, UbigeoDistrito, UbigeoProvincia\n')] |
## list
array = [1,2,3,"four","five","six",True]
print(array[:3])
dust = {
'영등포구': 50,
'강남구' : 40
}
## Dictionary
print(dust['영등포구'])
dust2 = dict(abc=50)
print(dust2)
## 랜덤으로 coffee메뉴 3개 뽑기
import random
coffee = ['아아','뜨아','라떼','믹스','핫초코']
coffee_fav=coffee[1:4] #내가 좋아하는 메뉴 일부 출력
print(coffee_fav)
ls = []
while True:
a = random.choice(coffee)
if a not in ls:
ls.append(a)
if len(ls) ==3:
break
print(ls)
### range
b= list(range(1,10))
print(b)
### 랜덤으로 오늘의 점심메뉴 식당과 전화번호 출력하기
import random
manu = ['20층','양자강','김밥카페','순남시래기','바나프레소']
phone_book = {
'20층' : '02-1233-4444',
'양자강' : '02-4444-5555',
'김밥카페' : '02-6666-7777',
'순남시래기' : '02-8888-9999',
'바나프레소' : '02-1000-2222'
}
today_manu = random.choice(manu)
today_num = phone_book[today_manu]
print("오늘의 메뉴:{}, 전화번호는:{}".format(today_manu, today_num))
# print(dir(random))
| [
"random.choice"
] | [((748, 767), 'random.choice', 'random.choice', (['manu'], {}), '(manu)\n', (761, 767), False, 'import random\n'), ((344, 365), 'random.choice', 'random.choice', (['coffee'], {}), '(coffee)\n', (357, 365), False, 'import random\n')] |
from gevent import monkey
monkey.patch_all()
import sys
import purpledrop.server as server
from purpledrop.purpledrop import list_purpledrop_devices, PurpleDropDevice, PurpleDropController
devices = list_purpledrop_devices()
if(len(devices) == 0):
print("No PurpleDrop USB device found")
sys.exit(1)
elif len(devices) > 1:
print("Multiple PurpleDrop devices found. Please ammend software to allow selection by serial number")
for d in devices:
print(f"{d.device}: Serial {d.serial_number}")
sys.exit(1)
dev = PurpleDropDevice(devices[0].device)
controller = PurpleDropController(dev)
server.run_server(controller, "localhost:5000")
| [
"purpledrop.server.run_server",
"purpledrop.purpledrop.PurpleDropDevice",
"purpledrop.purpledrop.list_purpledrop_devices",
"gevent.monkey.patch_all",
"purpledrop.purpledrop.PurpleDropController",
"sys.exit"
] | [((27, 45), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (43, 45), False, 'from gevent import monkey\n'), ((201, 226), 'purpledrop.purpledrop.list_purpledrop_devices', 'list_purpledrop_devices', ([], {}), '()\n', (224, 226), False, 'from purpledrop.purpledrop import list_purpledrop_devices, PurpleDropDevice, PurpleDropController\n'), ((540, 575), 'purpledrop.purpledrop.PurpleDropDevice', 'PurpleDropDevice', (['devices[0].device'], {}), '(devices[0].device)\n', (556, 575), False, 'from purpledrop.purpledrop import list_purpledrop_devices, PurpleDropDevice, PurpleDropController\n'), ((589, 614), 'purpledrop.purpledrop.PurpleDropController', 'PurpleDropController', (['dev'], {}), '(dev)\n', (609, 614), False, 'from purpledrop.purpledrop import list_purpledrop_devices, PurpleDropDevice, PurpleDropController\n'), ((616, 663), 'purpledrop.server.run_server', 'server.run_server', (['controller', '"""localhost:5000"""'], {}), "(controller, 'localhost:5000')\n", (633, 663), True, 'import purpledrop.server as server\n'), ((298, 309), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (306, 309), False, 'import sys\n'), ((522, 533), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (530, 533), False, 'import sys\n')] |
from typing import Any
from urllib.parse import urlparse
from rediscluster import RedisCluster
from .redis import RedisStorage
class RedisClusterStorage(RedisStorage):
"""
Rate limit storage with redis cluster as backend.
Depends on `redis-py-cluster` library.
"""
@classmethod
def from_uri(cls, uri: str, **options: Any) -> "RedisClusterStorage":
"""
:param uri: URI of the form `redis+cluster://[:password]@host:port,host:port`
:param options: All remaining keyword arguments are passed directly to the constructor
of :class:`rediscluster.RedisCluster`.
"""
parsed_uri = urlparse(uri)
cluster_hosts = []
for loc in parsed_uri.netloc.split(","):
host, port = loc.split(":")
cluster_hosts.append({"host": host, "port": int(port)})
options.setdefault("max_connections", 1000)
options["startup_nodes"] = cluster_hosts
client = RedisCluster(**options)
return cls(client)
def reset(self) -> None:
"""
Redis Clusters are sharded and deleting across shards
can't be done atomically. Because of this, this reset loops over all
keys that are prefixed with 'LIMITER' and calls delete on them, one at
a time.
.. warning::
This operation was not tested with extremely large data sets.
On a large production based system, care should be taken with its
usage as it could be slow on very large data sets.
"""
keys = self._client.keys("LIMITER*")
for key in keys:
self._client.delete(key.decode("utf-8"))
__all__ = [
"RedisClusterStorage",
]
| [
"rediscluster.RedisCluster",
"urllib.parse.urlparse"
] | [((668, 681), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (676, 681), False, 'from urllib.parse import urlparse\n'), ((986, 1009), 'rediscluster.RedisCluster', 'RedisCluster', ([], {}), '(**options)\n', (998, 1009), False, 'from rediscluster import RedisCluster\n')] |
def alien_order(words):
# Underspecified input 0
if not words:
return []
# Underspecified input 1
if len(words) == 1:
return "".join(sorted(set(list(words[0]))))
nodes = []
adj_list = []
chars = set()
# 1. Take each word pair
for i, word1 in enumerate(words[:-1]):
chars.union(set([ch for ch in word1]))
len1 = len(word1)
for j, word2 in enumerate(words[i:]):
chars.union(set([ch for ch in word2]))
len2 = len(word2)
# 2. Find first character which differs
for k in range(min(len1, len2)):
ch1 = word1[k]
# 3.1. Optionally register node1 for ch1
if ch1 not in nodes:
nodes.append(ch1)
adj_list.append([])
node1 = nodes.index(ch1)
# 3.2. Optionally register node2 for ch2
ch2 = word2[k]
if ch2 not in nodes:
nodes.append(ch2)
adj_list.append([])
node2 = nodes.index(ch2)
if ch1 != ch2:
# Means a graph edge
# 3.3. Check if invalid (direct circle)
if node1 in adj_list[node2]:
return ""
# 3.4. Register edge
if node2 not in adj_list[node1]:
adj_list[node1].append(node2)
break
left_out = chars - set(nodes)
for ch in left_out:
nodes.append(ch)
adj_list.append([])
n = len(nodes)
# Underspecified input 2
if not adj_list or all(not l for l in adj_list):
return "".join(nodes)
print(nodes, adj_list)
order = [0] * n
# 4. Topological sort - need iterative
# 4.1 Find a good starting point for underspecified cases
for start, ch in enumerate(nodes):
if adj_list[start] and not order[start]:
visited = [False] * n
q = [(start, 1)]
while q:
v, level = q.pop()
order[v] -= level
for neighbor in adj_list[v]:
if not visited[neighbor]:
q.append((neighbor, level + 1))
else:
order[v] -= level
# 5. Non involved (underspecified) charecters are de priotirized
for i, o in enumerate(order):
if not o:
order[i] = -100000
# 5. Construct abc
zipped = zip(nodes, order)
ordered = sorted(zipped, key=lambda x: -x[1])
print(zipped, ordered)
abc = ""
for v in ordered:
abc += v[0]
return abc
import pytest
@pytest.mark.parametrize("words,expected", [
(["zy", "zx"], "yxz"),
(["wrt", "wrf", "er", "ett", "rftt"], "wertf"),
(["ac", "ab", "b"], "acb"),
])
def test_alien_dict(words, expected):
assert(alien_order(words) == expected)
pytest.main()
| [
"pytest.mark.parametrize",
"pytest.main"
] | [((2731, 2877), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""words,expected"""', "[(['zy', 'zx'], 'yxz'), (['wrt', 'wrf', 'er', 'ett', 'rftt'], 'wertf'), ([\n 'ac', 'ab', 'b'], 'acb')]"], {}), "('words,expected', [(['zy', 'zx'], 'yxz'), (['wrt',\n 'wrf', 'er', 'ett', 'rftt'], 'wertf'), (['ac', 'ab', 'b'], 'acb')])\n", (2754, 2877), False, 'import pytest\n'), ((2972, 2985), 'pytest.main', 'pytest.main', ([], {}), '()\n', (2983, 2985), False, 'import pytest\n')] |
from pyspark import Row
from pyspark.sql import SparkSession
"""
Row class introduction:
Row class extends the tuple hence it takes variable number of arguments, Row() is used to create the row object.
Once the row object created, we can retrieve the data from Row using index similar to tuple.
Key Points of Row Class:
- Earlier to Spark 3.0, when used Row class with named arguments, the fields are sorted by name.
- Since 3.0, Rows created from named arguments are not sorted alphabetically instead they will be ordered in
the position entered.
- To enable sorting by names, set the environment variable PYSPARK_ROW_FIELD_SORTING_ENABLED to true.
- Row class provides a way to create a struct-type column as well.
Note that, here we called the elements in a row "field", not column. Because column only make sense in a dataframe.
"""
"""Exp1 Row Object creation
Row object can have primitive field, array field, map field and struct field
"""
def exp1():
# We can create row object without giving a field name, and accessing field by using index
print("Exp1 output simple row object without name: ")
row1 = Row("Alice", 18)
print("name:{},age:{}".format(row1[0], str(row1[1])))
# We can also specify field name when create a row object, then we can access it by using its name
print("Exp1 output row object with field name: ")
row2 = Row(name="Bob", age=38)
print("name:{},age:{}".format(row2.name, str(row2.age)))
# Row object can have primitive field, array field, map field and struct field
# To access struct field, use ".", to access array field use [index], to access map field use .get(key)
row3 = Row(name=Row(fname="Alice", lname="Liu"), score=[10, 20, 40], properties={"hair": "black", "eye": "bleu"})
print("first_name:{}, last_name:{}, 1st_score:{}, eye:{}".format(row3.name.fname, row3.name.lname, row3.score[0],
row3.properties.get("eye")))
""" Exp2 : Create custom class from Row
We can create a custom class by using Row(*fieldName)
"""
def exp2():
#
Student = Row("name", "age")
s1 = Student("alice", 18)
s2 = Student("Bob", 38)
print("Student1: name={},age={}".format(s1.name, str(s1.age)))
print("Student2: name={},age={}".format(s2.name, str(s2.age)))
""" Exp3: Create RDD by using row
We can create an RDD by using a list of Rows. rdd.collect() will return a list of row.
"""
def exp3(spark):
# data is a list of rows
data = [Row(name="James,,Smith", lang=["Java", "Scala", "C++"], state="CA"),
Row(name="Michael,Rose,", lang=["Spark", "Java", "C++"], state="NJ"),
Row(name="Robert,,Williams", lang=["CSharp", "VB"], state="NV")]
# parallelize turn the list to rdd
rdd = spark.sparkContext.parallelize(data)
print("Exp3 rdd has type:{}".format(str(type(rdd))))
# collect turn the rdd back to list
rowList = rdd.collect()
print("Exp3 row has type:{}".format(str(type(rowList))))
print("Exp3 row has value:")
for row in rowList:
print("name: {}, lang: {}, state: {}".format(row.name, str(row.lang), row.state))
""" Exp4 : Create a dataframe by using row
"""
def exp4(spark):
# we use custom class to create a list of Student(row)
# the advantage of custom class is that we don't need to repeat filed name in each row.
Student = Row("name", "score", "properties")
data = [Student(Row(fname="James", lname="Smith"), [10, 20, 30], {'hair': 'black', 'eye': 'brown'}),
Student(Row(fname="Michael", lname="Rose"), [20, 30, 40], {'hair': 'brown', 'eye': 'black'}),
Student(Row(fname="Robert", lname="Williams"), [30, 20, 50], {'hair': 'black', 'eye': 'blue'})]
df = spark.createDataFrame(data)
df.printSchema()
df.show(truncate=False)
# we can access these field
df.select(df.name.fname.alias("first_name"), df.name.lname.alias("last_name"), df.score.getItem(0).alias("score_0"),
df.properties.getItem("hair").alias("hair")).show()
def main():
spark = SparkSession.builder \
.master("local[2]") \
.appName("Row class example") \
.config("spark.executor.memory", "4g") \
.getOrCreate()
# run exp1
# exp1()
# run exp2
# exp2()
# run exp3
# exp3(spark)
# run exp4
exp4(spark)
if __name__ == "__main__":
main()
| [
"pyspark.Row",
"pyspark.sql.SparkSession.builder.master"
] | [((1134, 1150), 'pyspark.Row', 'Row', (['"""Alice"""', '(18)'], {}), "('Alice', 18)\n", (1137, 1150), False, 'from pyspark import Row\n'), ((1378, 1401), 'pyspark.Row', 'Row', ([], {'name': '"""Bob"""', 'age': '(38)'}), "(name='Bob', age=38)\n", (1381, 1401), False, 'from pyspark import Row\n'), ((2124, 2142), 'pyspark.Row', 'Row', (['"""name"""', '"""age"""'], {}), "('name', 'age')\n", (2127, 2142), False, 'from pyspark import Row\n'), ((3402, 3436), 'pyspark.Row', 'Row', (['"""name"""', '"""score"""', '"""properties"""'], {}), "('name', 'score', 'properties')\n", (3405, 3436), False, 'from pyspark import Row\n'), ((2522, 2589), 'pyspark.Row', 'Row', ([], {'name': '"""James,,Smith"""', 'lang': "['Java', 'Scala', 'C++']", 'state': '"""CA"""'}), "(name='James,,Smith', lang=['Java', 'Scala', 'C++'], state='CA')\n", (2525, 2589), False, 'from pyspark import Row\n'), ((2603, 2671), 'pyspark.Row', 'Row', ([], {'name': '"""Michael,Rose,"""', 'lang': "['Spark', 'Java', 'C++']", 'state': '"""NJ"""'}), "(name='Michael,Rose,', lang=['Spark', 'Java', 'C++'], state='NJ')\n", (2606, 2671), False, 'from pyspark import Row\n'), ((2685, 2748), 'pyspark.Row', 'Row', ([], {'name': '"""Robert,,Williams"""', 'lang': "['CSharp', 'VB']", 'state': '"""NV"""'}), "(name='Robert,,Williams', lang=['CSharp', 'VB'], state='NV')\n", (2688, 2748), False, 'from pyspark import Row\n'), ((1675, 1706), 'pyspark.Row', 'Row', ([], {'fname': '"""Alice"""', 'lname': '"""Liu"""'}), "(fname='Alice', lname='Liu')\n", (1678, 1706), False, 'from pyspark import Row\n'), ((3457, 3490), 'pyspark.Row', 'Row', ([], {'fname': '"""James"""', 'lname': '"""Smith"""'}), "(fname='James', lname='Smith')\n", (3460, 3490), False, 'from pyspark import Row\n'), ((3562, 3596), 'pyspark.Row', 'Row', ([], {'fname': '"""Michael"""', 'lname': '"""Rose"""'}), "(fname='Michael', lname='Rose')\n", (3565, 3596), False, 'from pyspark import Row\n'), ((3668, 3705), 'pyspark.Row', 'Row', ([], {'fname': '"""Robert"""', 'lname': '"""Williams"""'}), "(fname='Robert', lname='Williams')\n", (3671, 3705), False, 'from pyspark import Row\n'), ((4087, 4126), 'pyspark.sql.SparkSession.builder.master', 'SparkSession.builder.master', (['"""local[2]"""'], {}), "('local[2]')\n", (4114, 4126), False, 'from pyspark.sql import SparkSession\n')] |
# models.py
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
from app import db
class District(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=True)
coordinates = db.Column(db.String(120), nullable=True)
def __init__(self, name, coordinates=None):
self.name = name
self.coordinates = coordinates
def __repr__(self):
return self.name
class Hospital(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=True)
address = db.Column(db.String(120), nullable=True)
phone = db.Column(db.String(120), nullable=True)
coordinates = db.Column(db.String(120), nullable=True)
district_id = db.Column(db.Integer, db.ForeignKey('district.id'))
district = db.relationship('District',
backref=db.backref('hospitals', lazy='dynamic', uselist=True))
def __init__(self, name, district, address=None, phone=None, coordinates=None):
self.name = name
self.district = district
self.address = address
self.phone = phone
self.coordinates = coordinates
def __repr__(self):
return self.name
class Disease(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, unique=True)
def __init__(self, name, description=None):
self.name = name
self.description = description
class DiseasePopulation(db.Model):
id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.Integer, nullable=True)
children = db.Column(db.Integer, nullable=True)
children_observed = db.Column(db.Integer, nullable=True)
adults = db.Column(db.Integer, nullable=True)
adults_observed = db.Column(db.Integer, nullable=True)
hospital_id = db.Column(db.Integer, db.ForeignKey('hospital.id'))
hospital = db.relationship('Hospital',
backref=db.backref('population', lazy='dynamic',
uselist=True))
disease_id = db.Column(db.Integer, db.ForeignKey('disease.id'))
disease = db.relationship('Disease',
backref=db.backref('population', lazy='dynamic', uselist=True))
def __init__(self, disease, hospital, year, adults=0, adults_observed=0,
children=0, children_observed=0):
self.disease = disease
self.hospital = hospital
self.year = int(year) if year else 0
self.children = int(children)
self.children_observed = int(children_observed)
self.adults = int(adults)
self.adults_observed = int(adults_observed)
self.all = self.children + self.adults
self.all_observed = self.children_observed + self.adults_observed
def __repr__(self):
return '{0}{1}'.format(self.name, self.year)
class Population(db.Model):
id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.Integer)
all = db.Column(db.Integer)
men = db.Column(db.Integer)
women = db.Column(db.Integer)
children = db.Column(db.Integer)
adults = db.Column(db.Integer)
employable = db.Column(db.Integer)
employable_men = db.Column(db.Integer)
employable_women = db.Column(db.Integer)
district_id = db.Column(db.Integer, db.ForeignKey('district.id'))
district = db.relationship('District',
backref=db.backref('population', lazy='dynamic', uselist=True))
def __init__(self, year, district,
men=0, women=0, children=0, employable_men=0, employable_women=0, district_id=0):
self.district = district
self.year = int(year)
self.men = int(men)
self.women = int(women)
self.children = int(children)
self.employable_men = int(employable_men)
self.employable_women = int(employable_women)
self.all = self.men + self.women
self.adults = self.all - self.children
self.employable = self.employable_men + self.employable_women
def __repr__(self):
return '{}:{}'.format(self.year, self.all)
| [
"app.db.String",
"os.path.dirname",
"app.db.backref",
"app.db.Column",
"app.db.ForeignKey"
] | [((81, 93), 'os.path.dirname', 'dir', (['path[0]'], {}), '(path[0])\n', (84, 93), True, 'from os.path import dirname as dir\n'), ((152, 191), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (161, 191), False, 'from app import db\n'), ((501, 540), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (510, 540), False, 'from app import db\n'), ((1292, 1331), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1301, 1331), False, 'from app import db\n'), ((1343, 1374), 'app.db.Column', 'db.Column', (['db.Text'], {'unique': '(True)'}), '(db.Text, unique=True)\n', (1352, 1374), False, 'from app import db\n'), ((1534, 1573), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1543, 1573), False, 'from app import db\n'), ((1586, 1622), 'app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(True)'}), '(db.Integer, nullable=True)\n', (1595, 1622), False, 'from app import db\n'), ((1638, 1674), 'app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(True)'}), '(db.Integer, nullable=True)\n', (1647, 1674), False, 'from app import db\n'), ((1699, 1735), 'app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(True)'}), '(db.Integer, nullable=True)\n', (1708, 1735), False, 'from app import db\n'), ((1749, 1785), 'app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(True)'}), '(db.Integer, nullable=True)\n', (1758, 1785), False, 'from app import db\n'), ((1808, 1844), 'app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(True)'}), '(db.Integer, nullable=True)\n', (1817, 1844), False, 'from app import db\n'), ((2969, 3008), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (2978, 3008), False, 'from app import db\n'), ((3020, 3041), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3029, 3041), False, 'from app import db\n'), ((3052, 3073), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3061, 3073), False, 'from app import db\n'), ((3084, 3105), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3093, 3105), False, 'from app import db\n'), ((3118, 3139), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3127, 3139), False, 'from app import db\n'), ((3155, 3176), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3164, 3176), False, 'from app import db\n'), ((3190, 3211), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3199, 3211), False, 'from app import db\n'), ((3229, 3250), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3238, 3250), False, 'from app import db\n'), ((3272, 3293), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3281, 3293), False, 'from app import db\n'), ((3317, 3338), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (3326, 3338), False, 'from app import db\n'), ((213, 227), 'app.db.String', 'db.String', (['(120)'], {}), '(120)\n', (222, 227), False, 'from app import db\n'), ((270, 284), 'app.db.String', 'db.String', (['(120)'], {}), '(120)\n', (279, 284), False, 'from app import db\n'), ((562, 576), 'app.db.String', 'db.String', (['(120)'], {}), '(120)\n', (571, 576), False, 'from app import db\n'), ((615, 629), 'app.db.String', 'db.String', (['(120)'], {}), '(120)\n', (624, 629), False, 'from app import db\n'), ((668, 682), 'app.db.String', 'db.String', (['(120)'], {}), '(120)\n', (677, 682), False, 'from app import db\n'), ((727, 741), 'app.db.String', 'db.String', (['(120)'], {}), '(120)\n', (736, 741), False, 'from app import db\n'), ((799, 827), 'app.db.ForeignKey', 'db.ForeignKey', (['"""district.id"""'], {}), "('district.id')\n", (812, 827), False, 'from app import db\n'), ((1886, 1914), 'app.db.ForeignKey', 'db.ForeignKey', (['"""hospital.id"""'], {}), "('hospital.id')\n", (1899, 1914), False, 'from app import db\n'), ((2146, 2173), 'app.db.ForeignKey', 'db.ForeignKey', (['"""disease.id"""'], {}), "('disease.id')\n", (2159, 2173), False, 'from app import db\n'), ((3380, 3408), 'app.db.ForeignKey', 'db.ForeignKey', (['"""district.id"""'], {}), "('district.id')\n", (3393, 3408), False, 'from app import db\n'), ((911, 964), 'app.db.backref', 'db.backref', (['"""hospitals"""'], {'lazy': '"""dynamic"""', 'uselist': '(True)'}), "('hospitals', lazy='dynamic', uselist=True)\n", (921, 964), False, 'from app import db\n'), ((1999, 2053), 'app.db.backref', 'db.backref', (['"""population"""'], {'lazy': '"""dynamic"""', 'uselist': '(True)'}), "('population', lazy='dynamic', uselist=True)\n", (2009, 2053), False, 'from app import db\n'), ((2254, 2308), 'app.db.backref', 'db.backref', (['"""population"""'], {'lazy': '"""dynamic"""', 'uselist': '(True)'}), "('population', lazy='dynamic', uselist=True)\n", (2264, 2308), False, 'from app import db\n'), ((3492, 3546), 'app.db.backref', 'db.backref', (['"""population"""'], {'lazy': '"""dynamic"""', 'uselist': '(True)'}), "('population', lazy='dynamic', uselist=True)\n", (3502, 3546), False, 'from app import db\n')] |
#!/usr/bin/env python3
import os, sys
dr = os.getenv('DISBATCH_ROOT')
if dr and dr not in sys.path:
sys.path.append(dr)
try:
import disbatch
except:
print(f'disBatch environment is incomplete. Check:\n\tDISBATCH_ROOT {dr!r}.', file=sys.stderr)
sys.exit(1)
dbExec = os.path.join(os.path.dirname(disbatch.__file__), 'disBatch.py')
os.execv(sys.executable, [sys.executable, dbExec] + sys.argv[1:])
| [
"os.getenv",
"os.path.dirname",
"os.execv",
"sys.exit",
"sys.path.append"
] | [((45, 71), 'os.getenv', 'os.getenv', (['"""DISBATCH_ROOT"""'], {}), "('DISBATCH_ROOT')\n", (54, 71), False, 'import os, sys\n'), ((357, 422), 'os.execv', 'os.execv', (['sys.executable', '([sys.executable, dbExec] + sys.argv[1:])'], {}), '(sys.executable, [sys.executable, dbExec] + sys.argv[1:])\n', (365, 422), False, 'import os, sys\n'), ((106, 125), 'sys.path.append', 'sys.path.append', (['dr'], {}), '(dr)\n', (121, 125), False, 'import os, sys\n'), ((306, 340), 'os.path.dirname', 'os.path.dirname', (['disbatch.__file__'], {}), '(disbatch.__file__)\n', (321, 340), False, 'import os, sys\n'), ((271, 282), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (279, 282), False, 'import os, sys\n')] |
from aiohttp import web
from prometheus_client import generate_latest
from prometheus_client.core import REGISTRY
def metric_to_text():
return generate_latest(REGISTRY).decode('utf-8')
async def handle_metrics(_request):
return web.Response(text=metric_to_text())
async def handle_health(_request):
health_text = 'ok'
health_status = 200
return web.Response(status=health_status, text=health_text)
| [
"aiohttp.web.Response",
"prometheus_client.generate_latest"
] | [((372, 424), 'aiohttp.web.Response', 'web.Response', ([], {'status': 'health_status', 'text': 'health_text'}), '(status=health_status, text=health_text)\n', (384, 424), False, 'from aiohttp import web\n'), ((150, 175), 'prometheus_client.generate_latest', 'generate_latest', (['REGISTRY'], {}), '(REGISTRY)\n', (165, 175), False, 'from prometheus_client import generate_latest\n')] |
from django.http.response import Http404
from django.http import HttpResponse
from blogs.helpers import unmark, clean_text
from blogs.views.blog import resolve_address
from feedgen.feed import FeedGenerator
import mistune
def feed(request):
blog = resolve_address(request)
if not blog:
raise Http404("Blog does not exist")
all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date')
fg = FeedGenerator()
fg.id(blog.useful_domain())
fg.author({'name': blog.subdomain, 'email': 'hidden'})
fg.title(blog.title)
fg.subtitle(blog.meta_description or clean_text(unmark(blog.content)[:160]) or blog.title)
fg.link(href=f"{blog.useful_domain()}/", rel='alternate')
for post in all_posts:
fe = fg.add_entry()
fe.id(f"{blog.useful_domain()}/{post.slug}/")
fe.title(post.title)
fe.author({'name': blog.subdomain, 'email': 'hidden'})
fe.link(href=f"{blog.useful_domain()}/{post.slug}/")
fe.content(clean_text(mistune.html(post.content)), type="html")
fe.published(post.published_date)
fe.updated(post.published_date)
if request.GET.get('type') == 'rss':
fg.link(href=f"{blog.useful_domain()}/feed/?type=rss", rel='self')
rssfeed = fg.rss_str(pretty=True)
return HttpResponse(rssfeed, content_type='application/rss+xml')
else:
fg.link(href=f"{blog.useful_domain()}/feed/", rel='self')
atomfeed = fg.atom_str(pretty=True)
return HttpResponse(atomfeed, content_type='application/atom+xml')
| [
"django.http.HttpResponse",
"blogs.views.blog.resolve_address",
"mistune.html",
"blogs.helpers.unmark",
"django.http.response.Http404",
"feedgen.feed.FeedGenerator"
] | [((256, 280), 'blogs.views.blog.resolve_address', 'resolve_address', (['request'], {}), '(request)\n', (271, 280), False, 'from blogs.views.blog import resolve_address\n'), ((449, 464), 'feedgen.feed.FeedGenerator', 'FeedGenerator', ([], {}), '()\n', (462, 464), False, 'from feedgen.feed import FeedGenerator\n'), ((313, 343), 'django.http.response.Http404', 'Http404', (['"""Blog does not exist"""'], {}), "('Blog does not exist')\n", (320, 343), False, 'from django.http.response import Http404\n'), ((1329, 1386), 'django.http.HttpResponse', 'HttpResponse', (['rssfeed'], {'content_type': '"""application/rss+xml"""'}), "(rssfeed, content_type='application/rss+xml')\n", (1341, 1386), False, 'from django.http import HttpResponse\n'), ((1522, 1581), 'django.http.HttpResponse', 'HttpResponse', (['atomfeed'], {'content_type': '"""application/atom+xml"""'}), "(atomfeed, content_type='application/atom+xml')\n", (1534, 1581), False, 'from django.http import HttpResponse\n'), ((1031, 1057), 'mistune.html', 'mistune.html', (['post.content'], {}), '(post.content)\n', (1043, 1057), False, 'import mistune\n'), ((633, 653), 'blogs.helpers.unmark', 'unmark', (['blog.content'], {}), '(blog.content)\n', (639, 653), False, 'from blogs.helpers import unmark, clean_text\n')] |
from time import strftime, gmtime, time
from collections import defaultdict
import tensorflow as tf
from utility.aggregator import Aggregator
from utility.display import pwc
def timeit(func, *args, name=None, to_print=True,
return_duration=False, **kwargs):
start_time = gmtime()
start = time()
result = func(*args, **kwargs)
end = time()
end_time = gmtime()
if to_print:
pwc(f'{name if name else func.__name__}: '
f'Start "{strftime("%d %b %H:%M:%S", start_time)}"',
f'End "{strftime("%d %b %H:%M:%S", end_time)}" '
f'Duration "{end - start:.3g}s"', color='blue')
if return_duration:
return end - start, result
else:
return result
class Timer:
aggregators = defaultdict(Aggregator)
def __init__(self, summary_name, period=None, mode='average', to_record=True):
self._to_log = to_record
if self._to_log:
self._summary_name = summary_name
self._period = period
assert mode in ['average', 'sum']
self._mode = mode
def __enter__(self):
if self._to_log:
self._start = time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._to_log:
duration = time() - self._start
aggregator = self.aggregators[self._summary_name]
aggregator.add(duration)
if self._period is not None and aggregator.count >= self._period:
if self._mode == 'average':
duration = aggregator.average()
duration = (f'{duration*1000:.3g}ms' if duration < 1e-1
else f'{duration:.3g}s')
pwc(f'{self._summary_name} duration: "{duration}" averaged over {self._period} times', color='blue')
aggregator.reset()
else:
duration = aggregator.sum
pwc(f'{self._summary_name} duration: "{duration}" for {aggregator.count} times', color='blue')
def reset(self):
aggregator = self.aggregators[self._summary_name]
aggregator.reset()
def average(self):
return self.aggregators[self._summary_name].average()
def last(self):
return self.aggregators[self._summary_name].last
def total(self):
return self.aggregators[self._summary_name].total
class TBTimer:
aggregators = defaultdict(Aggregator)
def __init__(self, summary_name, period=1, to_record=True, print_terminal_info=False):
self._to_log = to_record
if self._to_log:
self._summary_name = summary_name
self._period = period
self._print_terminal_info = print_terminal_info
def __enter__(self):
if self._to_log:
self._start = time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._to_log:
duration = time() - self._start
aggregator = self.aggregators[self._summary_name]
aggregator.add(duration)
if aggregator.count >= self._period:
duration = aggregator.average()
step = tf.summary.experimental.get_step()
tf.summary.scalar(f'timer/{self._summary_name}', duration, step=step)
aggregator.reset()
if self._print_terminal_info:
pwc(f'{self._summary_name} duration: "{duration}" averaged over {self._period} times', color='blue')
class LoggerTimer:
def __init__(self, logger, summary_name, to_record=True):
self._to_log = to_record
if self._to_log:
self._logger = logger
self._summary_name = summary_name
def __enter__(self):
if self._to_log:
self._start = time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._to_log:
duration = time() - self._start
self._logger.store(**{self._summary_name: duration})
class Every:
def __init__(self, period, start=0):
self._period = period
self._next = start
def __call__(self, step):
if step >= self._next:
while step >= self._next:
self._next += self._period
return True
return False
def step(self):
return self._next - self._period
| [
"utility.display.pwc",
"time.strftime",
"tensorflow.summary.experimental.get_step",
"collections.defaultdict",
"time.time",
"tensorflow.summary.scalar",
"time.gmtime"
] | [((287, 295), 'time.gmtime', 'gmtime', ([], {}), '()\n', (293, 295), False, 'from time import strftime, gmtime, time\n'), ((308, 314), 'time.time', 'time', ([], {}), '()\n', (312, 314), False, 'from time import strftime, gmtime, time\n'), ((360, 366), 'time.time', 'time', ([], {}), '()\n', (364, 366), False, 'from time import strftime, gmtime, time\n'), ((382, 390), 'time.gmtime', 'gmtime', ([], {}), '()\n', (388, 390), False, 'from time import strftime, gmtime, time\n'), ((773, 796), 'collections.defaultdict', 'defaultdict', (['Aggregator'], {}), '(Aggregator)\n', (784, 796), False, 'from collections import defaultdict\n'), ((2480, 2503), 'collections.defaultdict', 'defaultdict', (['Aggregator'], {}), '(Aggregator)\n', (2491, 2503), False, 'from collections import defaultdict\n'), ((1172, 1178), 'time.time', 'time', ([], {}), '()\n', (1176, 1178), False, 'from time import strftime, gmtime, time\n'), ((2871, 2877), 'time.time', 'time', ([], {}), '()\n', (2875, 2877), False, 'from time import strftime, gmtime, time\n'), ((3868, 3874), 'time.time', 'time', ([], {}), '()\n', (3872, 3874), False, 'from time import strftime, gmtime, time\n'), ((1308, 1314), 'time.time', 'time', ([], {}), '()\n', (1312, 1314), False, 'from time import strftime, gmtime, time\n'), ((3007, 3013), 'time.time', 'time', ([], {}), '()\n', (3011, 3013), False, 'from time import strftime, gmtime, time\n'), ((3247, 3281), 'tensorflow.summary.experimental.get_step', 'tf.summary.experimental.get_step', ([], {}), '()\n', (3279, 3281), True, 'import tensorflow as tf\n'), ((3298, 3367), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['f"""timer/{self._summary_name}"""', 'duration'], {'step': 'step'}), "(f'timer/{self._summary_name}', duration, step=step)\n", (3315, 3367), True, 'import tensorflow as tf\n'), ((4004, 4010), 'time.time', 'time', ([], {}), '()\n', (4008, 4010), False, 'from time import strftime, gmtime, time\n'), ((482, 520), 'time.strftime', 'strftime', (['"""%d %b %H:%M:%S"""', 'start_time'], {}), "('%d %b %H:%M:%S', start_time)\n", (490, 520), False, 'from time import strftime, gmtime, time\n'), ((546, 582), 'time.strftime', 'strftime', (['"""%d %b %H:%M:%S"""', 'end_time'], {}), "('%d %b %H:%M:%S', end_time)\n", (554, 582), False, 'from time import strftime, gmtime, time\n'), ((1756, 1861), 'utility.display.pwc', 'pwc', (['f"""{self._summary_name} duration: "{duration}" averaged over {self._period} times"""'], {'color': '"""blue"""'}), '(f\'{self._summary_name} duration: "{duration}" averaged over {self._period} times\'\n , color=\'blue\')\n', (1759, 1861), False, 'from utility.display import pwc\n'), ((1984, 2083), 'utility.display.pwc', 'pwc', (['f"""{self._summary_name} duration: "{duration}" for {aggregator.count} times"""'], {'color': '"""blue"""'}), '(f\'{self._summary_name} duration: "{duration}" for {aggregator.count} times\'\n , color=\'blue\')\n', (1987, 2083), False, 'from utility.display import pwc\n'), ((3469, 3574), 'utility.display.pwc', 'pwc', (['f"""{self._summary_name} duration: "{duration}" averaged over {self._period} times"""'], {'color': '"""blue"""'}), '(f\'{self._summary_name} duration: "{duration}" averaged over {self._period} times\'\n , color=\'blue\')\n', (3472, 3574), False, 'from utility.display import pwc\n')] |
import io
import multiprocessing
import pathlib
from urllib.parse import quote_plus
import pytest
import yappi
semaphore = multiprocessing.Semaphore(1)
class PytestProfiler:
def __init__(self, outdir):
self.func_stats_summary = io.StringIO()
self.outdir = pathlib.Path(outdir)
def pytest_sessionstart(self, session):
pass
def pytest_sessionfinish(self):
yappi.clear_stats()
pass
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write("\n" + "=" * 80 + "\n")
terminalreporter.write(str(self.func_stats_summary.getvalue()))
terminalreporter.write("\n" + "=" * 80 + "\n")
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
yappi.start(builtins=False, profile_threads=True)
yield
yappi.stop()
func_stats = yappi.get_func_stats()
self.outdir.mkdir(parents=True, exist_ok=True)
path = (
self.outdir / pathlib.Path(quote_plus(item.name, safe="/[],._") + ".prof")
).absolute()
func_stats.save(str(path), type="PSTAT")
self.func_stats_summary.write("\n" + "-" * 80 + "\n")
self.func_stats_summary.write(
f"function statistics\n{item.name}\n{str(path)}\n"
)
func_stats.sort("ttot").print_all(self.func_stats_summary)
self.func_stats_summary.write("\n" + "-" * 80 + "\n")
func_stats.clear()
def pytest_addoption(parser):
group = parser.getgroup("Profiling")
group.addoption(
"--profile",
action="store_true",
default=False,
help="generate profiling information",
)
group.addoption(
"--profile-outdir",
default="prof",
help="output directory (places pstat .prof files here)",
)
def pytest_configure(config):
with semaphore:
profiling_enabled = bool(config.getoption("--profile"))
if profiling_enabled:
if not config.pluginmanager.is_registered("pytest_profiler"):
config.pluginmanager.register(
PytestProfiler(config.getoption("--profile-outdir"))
)
| [
"yappi.start",
"pathlib.Path",
"yappi.stop",
"yappi.get_func_stats",
"multiprocessing.Semaphore",
"io.StringIO",
"pytest.hookimpl",
"urllib.parse.quote_plus",
"yappi.clear_stats"
] | [((125, 153), 'multiprocessing.Semaphore', 'multiprocessing.Semaphore', (['(1)'], {}), '(1)\n', (150, 153), False, 'import multiprocessing\n'), ((683, 716), 'pytest.hookimpl', 'pytest.hookimpl', ([], {'hookwrapper': '(True)'}), '(hookwrapper=True)\n', (698, 716), False, 'import pytest\n'), ((244, 257), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (255, 257), False, 'import io\n'), ((280, 300), 'pathlib.Path', 'pathlib.Path', (['outdir'], {}), '(outdir)\n', (292, 300), False, 'import pathlib\n'), ((404, 423), 'yappi.clear_stats', 'yappi.clear_stats', ([], {}), '()\n', (421, 423), False, 'import yappi\n'), ((766, 815), 'yappi.start', 'yappi.start', ([], {'builtins': '(False)', 'profile_threads': '(True)'}), '(builtins=False, profile_threads=True)\n', (777, 815), False, 'import yappi\n'), ((838, 850), 'yappi.stop', 'yappi.stop', ([], {}), '()\n', (848, 850), False, 'import yappi\n'), ((873, 895), 'yappi.get_func_stats', 'yappi.get_func_stats', ([], {}), '()\n', (893, 895), False, 'import yappi\n'), ((1007, 1043), 'urllib.parse.quote_plus', 'quote_plus', (['item.name'], {'safe': '"""/[],._"""'}), "(item.name, safe='/[],._')\n", (1017, 1043), False, 'from urllib.parse import quote_plus\n')] |
#/bin/python3
## Step1 scan recursively over all files
import os
import re
import pdb
import datetime
path = "./notes"
dest = "_posts"
magic_prefix = "Active-"
def extractModifiedDate(string):
regexp = r"\d+-\d+-\d+T\d+:\d+:\d+.\d+Z"
date_strings_all = re.findall(regexp,string)
date = None
if (len(date_strings_all) == 1):
date = datetime.datetime.strptime(date_strings_all[0], "%Y-%m-%dT%H:%M:%S.%fZ")
return date
def insert_str(string, str_to_insert, index):
return string[:index] + str_to_insert + string[index:]
def processFile(src, dest):
state_none = 0
state_hdr_start = 1
state_hdr_stop = 2
state_post_start = 3
modified_date = None
print("Process file ", src, " -> ", dest)
state = state_none
skiplines = 0
with open(src, "r") as f_in, open(dest, "w+") as f_out:
src_lines = f_in.readlines()
for line in src_lines:
#pdb.set_trace()
if ("---" in line):
state = state + 1
if (state == state_post_start):
break
skiplines = skiplines + 1
if state == state_hdr_start:
if ("modified" in line):
modified_date = extractModifiedDate(line)
dest_lines = src_lines[skiplines:]
for i in range(0, len(dest_lines)):
if state == state_post_start:
line = dest_lines[i]
# find lines with single $
start_pos = 0
pos = line.find('$', start_pos)
while (pos != -1):
if pos + 1 < len(line):
if (line[pos + 1] != '$'):
line = insert_str(line, '$' ,pos)
pos = pos + 1
else:
while(line[pos + 1] == '$'):
pos = pos + 1
else:
line = insert_str(line, '$' ,pos)
pos = pos + 1
start_pos = pos + 1
pos = line.find('$', start_pos)
dest_lines[i] = line
for i in dest_lines:
f_out.write(i)
if (modified_date is not None):
f_out.write(os.linesep)
f_out.write("*Last update:" + modified_date.strftime("%d %B %Y") + "*" + os.linesep)
f_in.close()
f_out.close()
for root,d_names,f_names in os.walk(path):
if ("notes" in root):
category = os.path.split(os.path.split(root)[0])[1]
for post_fn in f_names:
## Find all with name Active...dd
#print(root, post_fn, f_names)
if ((post_fn.startswith(magic_prefix)) and (".bak" not in post_fn)):
#print (root, post_fn)
new_filename = post_fn[len(magic_prefix):]
src = os.path.join(root, post_fn)
dest_filename = os.path.join(dest, new_filename)
print (root, category, src, "->", dest_filename)
processFile(src, dest_filename)
## Copy file with new name without Active prefix
## extract tag, remove first line
| [
"datetime.datetime.strptime",
"os.path.join",
"os.path.split",
"re.findall",
"os.walk"
] | [((2537, 2550), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2544, 2550), False, 'import os\n'), ((264, 290), 're.findall', 're.findall', (['regexp', 'string'], {}), '(regexp, string)\n', (274, 290), False, 'import re\n'), ((358, 430), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_strings_all[0]', '"""%Y-%m-%dT%H:%M:%S.%fZ"""'], {}), "(date_strings_all[0], '%Y-%m-%dT%H:%M:%S.%fZ')\n", (384, 430), False, 'import datetime\n'), ((2973, 3000), 'os.path.join', 'os.path.join', (['root', 'post_fn'], {}), '(root, post_fn)\n', (2985, 3000), False, 'import os\n'), ((3033, 3065), 'os.path.join', 'os.path.join', (['dest', 'new_filename'], {}), '(dest, new_filename)\n', (3045, 3065), False, 'import os\n'), ((2615, 2634), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (2628, 2634), False, 'import os\n')] |
#!/usr/bin/env python
import unittest
# import your test modules
import test_unittest_01
import test_unittest_02
import test_unittest_03
import test_unittest_04
if __name__ == '__main__':
# initialize the test suite
loader = unittest.TestLoader()
suite = unittest.TestSuite()
# add tests to the test suite
suite.addTests(loader.loadTestsFromModule(test_unittest_01))
suite.addTests(loader.loadTestsFromModule(test_unittest_02))
suite.addTests(loader.loadTestsFromModule(test_unittest_03))
suite.addTests(loader.loadTestsFromModule(test_unittest_04))
# initialize a runner, pass it your suite and run it
runner = unittest.TextTestRunner(verbosity=3)
result = runner.run(suite)
| [
"unittest.TestSuite",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((236, 257), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (255, 257), False, 'import unittest\n'), ((271, 291), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (289, 291), False, 'import unittest\n'), ((658, 694), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(3)'}), '(verbosity=3)\n', (681, 694), False, 'import unittest\n')] |
#!python3
# from https://stackoverflow.com/questions/21241708/python-get-a-list-of-selected-files-in-explorer-windows-7/52959617#52959617
import win32gui, time
from win32con import PAGE_READWRITE, MEM_COMMIT, MEM_RESERVE, MEM_RELEASE, PROCESS_ALL_ACCESS, WM_GETTEXTLENGTH, WM_GETTEXT
from commctrl import LVS_OWNERDATA, LVM_GETITEMCOUNT, LVM_GETNEXTITEM, LVNI_SELECTED
import os
import struct
import ctypes
import win32api
import datetime
import win32com.client as win32
import win32ui
import psutil
import subprocess
import time
import urllib.parse
clsid = '{9BA05972-F6A8-11CF-A442-00A0C90A8F39}' #Valid for IE as well!
def getEditText(hwnd):
# api returns 16 bit characters so buffer needs 1 more char for null and twice the num of chars
buf_size = (win32gui.SendMessage(hwnd, WM_GETTEXTLENGTH, 0, 0) +1 ) * 2
target_buff = ctypes.create_string_buffer(buf_size)
win32gui.SendMessage(hwnd, WM_GETTEXT, buf_size, ctypes.addressof(target_buff))
return target_buff.raw.decode('utf16')[:-1]# remove the null char on the end
def _normaliseText(controlText):
'''Remove '&' characters, and lower case.
Useful for matching control text.'''
return controlText.lower().replace('&', '')
def _windowEnumerationHandler(hwnd, resultList):
'''Pass to win32gui.EnumWindows() to generate list of window handle,
window text, window class tuples.'''
resultList.append((hwnd, win32gui.GetWindowText(hwnd), win32gui.GetClassName(hwnd)))
def searchChildWindows(currentHwnd,
wantedText=None,
wantedClass=None,
selectionFunction=None):
results = []
childWindows = []
try:
win32gui.EnumChildWindows(currentHwnd,
_windowEnumerationHandler,
childWindows)
except win32gui.error:
# This seems to mean that the control *cannot* have child windows,
# i.e. not a container.
return
for childHwnd, windowText, windowClass in childWindows:
descendentMatchingHwnds = searchChildWindows(childHwnd)
if descendentMatchingHwnds:
results += descendentMatchingHwnds
if wantedText and \
not _normaliseText(wantedText) in _normaliseText(windowText):
continue
if wantedClass and \
not windowClass == wantedClass:
continue
if selectionFunction and \
not selectionFunction(childHwnd):
continue
results.append(childHwnd)
return results
def explorer_fileselection():
global clsid
address_1=""
files = []
shellwindows = win32.Dispatch(clsid)
w=win32gui
window = w.GetForegroundWindow()
#print("window: %s" % window)
if (window != 0):
if (w.GetClassName(window) == 'CabinetWClass'): # the main explorer window
#print("class: %s" % w.GetClassName(window))
#print("text: %s " %w.GetWindowText(window))
children = list(set(searchChildWindows(window)))
addr_edit = None
file_view = None
for child in children:
if (w.GetClassName(child) == 'WorkerW'): # the address bar
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'ReBarWindow32'):
addr_edit = addr_child
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'Address Band Root'):
addr_edit = addr_child
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'msctls_progress32'):
addr_edit = addr_child
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'Breadcrumb Parent'):
addr_edit = addr_child
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'ToolbarWindow32'):
text=getEditText(addr_child)
if "\\" in text:
address_1=getEditText(addr_child)[text.index(" ")+1:]
print("Address --> "+address_1)
for window in range(shellwindows.Count):
window_URL = urllib.parse.unquote(shellwindows[window].LocationURL,encoding='ISO 8859-1')
window_dir = window_URL.split("///")[1].replace("/", "\\")
print("Directory --> "+window_dir)
if window_dir==address_1:
selected_files = shellwindows[window].Document.SelectedItems()
for file in range(selected_files.Count):
files.append(selected_files.Item(file).Path)
print("Files --> "+str(files))
while True:
explorer_fileselection()
time.sleep(1) | [
"ctypes.addressof",
"win32com.client.Dispatch",
"win32gui.SendMessage",
"time.sleep",
"ctypes.create_string_buffer",
"win32gui.GetWindowText",
"win32gui.GetClassName",
"win32gui.EnumChildWindows"
] | [((841, 878), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['buf_size'], {}), '(buf_size)\n', (868, 878), False, 'import ctypes\n'), ((2629, 2650), 'win32com.client.Dispatch', 'win32.Dispatch', (['clsid'], {}), '(clsid)\n', (2643, 2650), True, 'import win32com.client as win32\n'), ((5647, 5660), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5657, 5660), False, 'import time\n'), ((932, 961), 'ctypes.addressof', 'ctypes.addressof', (['target_buff'], {}), '(target_buff)\n', (948, 961), False, 'import ctypes\n'), ((1664, 1743), 'win32gui.EnumChildWindows', 'win32gui.EnumChildWindows', (['currentHwnd', '_windowEnumerationHandler', 'childWindows'], {}), '(currentHwnd, _windowEnumerationHandler, childWindows)\n', (1689, 1743), False, 'import win32gui, time\n'), ((763, 813), 'win32gui.SendMessage', 'win32gui.SendMessage', (['hwnd', 'WM_GETTEXTLENGTH', '(0)', '(0)'], {}), '(hwnd, WM_GETTEXTLENGTH, 0, 0)\n', (783, 813), False, 'import win32gui, time\n'), ((1406, 1434), 'win32gui.GetWindowText', 'win32gui.GetWindowText', (['hwnd'], {}), '(hwnd)\n', (1428, 1434), False, 'import win32gui, time\n'), ((1436, 1463), 'win32gui.GetClassName', 'win32gui.GetClassName', (['hwnd'], {}), '(hwnd)\n', (1457, 1463), False, 'import win32gui, time\n')] |
from django.shortcuts import render, render_to_response
from django.core.mail import mail_admins
from django.contrib import messages
from django.template import RequestContext
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.views.generic.base import TemplateView
from .forms import ContactForm
class PageView(TemplateView):
template_name = "404.html"
def get_context_data(self, **kwargs):
context = super(PageView, self).get_context_data(**kwargs)
context['contact_form'] = ContactForm()
return context
def contact(request):
form = ContactForm()
if request.POST:
form = ContactForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
email = form.cleaned_data['email']
message = '{} from {}'.format(form.cleaned_data['feedback'], email)
subject = unicode('Feedback: {}').format(subject)
mail_admins(subject, message)
_next = request.POST.get('next')
messages.success(request, 'Thanks for the feedback!')
if _next:
return HttpResponseRedirect(_next)
_next = ""
if request.GET.get('next'):
_next = request.GET.get('next')
context = {'form': form, 'next': _next}
return render_to_response('pages/contact.html',
context,
context_instance=RequestContext(request)) | [
"django.http.HttpResponseRedirect",
"django.template.RequestContext",
"django.core.mail.mail_admins",
"django.contrib.messages.success"
] | [((963, 992), 'django.core.mail.mail_admins', 'mail_admins', (['subject', 'message'], {}), '(subject, message)\n', (974, 992), False, 'from django.core.mail import mail_admins\n'), ((1050, 1103), 'django.contrib.messages.success', 'messages.success', (['request', '"""Thanks for the feedback!"""'], {}), "(request, 'Thanks for the feedback!')\n", (1066, 1103), False, 'from django.contrib import messages\n'), ((1448, 1471), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (1462, 1471), False, 'from django.template import RequestContext\n'), ((1149, 1176), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['_next'], {}), '(_next)\n', (1169, 1176), False, 'from django.http import HttpResponseRedirect, Http404, HttpResponse\n')] |
from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name = 'dogey',
version = '0.1',
description = 'A pythonic dogehouse API.',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = 'Shadofer#7312',
author_email = '<EMAIL>',
python_requires = '>=3.8.0',
url = 'https://github.com/Shadofer/dogey',
packages = ['dogey'],
install_requires = ['websockets'],
extras_require = {
'sound': ['pymediasoup']
},
license = 'MIT'
)
| [
"setuptools.setup"
] | [((97, 503), 'setuptools.setup', 'setup', ([], {'name': '"""dogey"""', 'version': '"""0.1"""', 'description': '"""A pythonic dogehouse API."""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'author': '"""Shadofer#7312"""', 'author_email': '"""<EMAIL>"""', 'python_requires': '""">=3.8.0"""', 'url': '"""https://github.com/Shadofer/dogey"""', 'packages': "['dogey']", 'install_requires': "['websockets']", 'extras_require': "{'sound': ['pymediasoup']}", 'license': '"""MIT"""'}), "(name='dogey', version='0.1', description='A pythonic dogehouse API.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Shadofer#7312', author_email='<EMAIL>',\n python_requires='>=3.8.0', url='https://github.com/Shadofer/dogey',\n packages=['dogey'], install_requires=['websockets'], extras_require={\n 'sound': ['pymediasoup']}, license='MIT')\n", (102, 503), False, 'from setuptools import setup\n')] |
# coding: utf-8
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from h1.configuration import Configuration
class Billing(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'period': 'str',
'price': 'float',
'quantity': 'float',
'project': 'str',
'one_time': 'bool',
'service': 'BillingService',
'resource': 'BillingResource',
'charges': 'list[BillingCharges]'
}
attribute_map = {
'id': 'id',
'period': 'period',
'price': 'price',
'quantity': 'quantity',
'project': 'project',
'one_time': 'oneTime',
'service': 'service',
'resource': 'resource',
'charges': 'charges'
}
def __init__(self, id=None, period=None, price=None, quantity=None, project=None, one_time=None, service=None, resource=None, charges=None, local_vars_configuration=None): # noqa: E501
"""Billing - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._period = None
self._price = None
self._quantity = None
self._project = None
self._one_time = None
self._service = None
self._resource = None
self._charges = None
self.discriminator = None
if id is not None:
self.id = id
if period is not None:
self.period = period
if price is not None:
self.price = price
if quantity is not None:
self.quantity = quantity
if project is not None:
self.project = project
if one_time is not None:
self.one_time = one_time
if service is not None:
self.service = service
if resource is not None:
self.resource = resource
if charges is not None:
self.charges = charges
@property
def id(self):
"""Gets the id of this Billing. # noqa: E501
:return: The id of this Billing. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Billing.
:param id: The id of this Billing. # noqa: E501
:type: str
"""
self._id = id
@property
def period(self):
"""Gets the period of this Billing. # noqa: E501
:return: The period of this Billing. # noqa: E501
:rtype: str
"""
return self._period
@period.setter
def period(self, period):
"""Sets the period of this Billing.
:param period: The period of this Billing. # noqa: E501
:type: str
"""
self._period = period
@property
def price(self):
"""Gets the price of this Billing. # noqa: E501
:return: The price of this Billing. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this Billing.
:param price: The price of this Billing. # noqa: E501
:type: float
"""
self._price = price
@property
def quantity(self):
"""Gets the quantity of this Billing. # noqa: E501
:return: The quantity of this Billing. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this Billing.
:param quantity: The quantity of this Billing. # noqa: E501
:type: float
"""
self._quantity = quantity
@property
def project(self):
"""Gets the project of this Billing. # noqa: E501
:return: The project of this Billing. # noqa: E501
:rtype: str
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this Billing.
:param project: The project of this Billing. # noqa: E501
:type: str
"""
self._project = project
@property
def one_time(self):
"""Gets the one_time of this Billing. # noqa: E501
:return: The one_time of this Billing. # noqa: E501
:rtype: bool
"""
return self._one_time
@one_time.setter
def one_time(self, one_time):
"""Sets the one_time of this Billing.
:param one_time: The one_time of this Billing. # noqa: E501
:type: bool
"""
self._one_time = one_time
@property
def service(self):
"""Gets the service of this Billing. # noqa: E501
:return: The service of this Billing. # noqa: E501
:rtype: BillingService
"""
return self._service
@service.setter
def service(self, service):
"""Sets the service of this Billing.
:param service: The service of this Billing. # noqa: E501
:type: BillingService
"""
self._service = service
@property
def resource(self):
"""Gets the resource of this Billing. # noqa: E501
:return: The resource of this Billing. # noqa: E501
:rtype: BillingResource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this Billing.
:param resource: The resource of this Billing. # noqa: E501
:type: BillingResource
"""
self._resource = resource
@property
def charges(self):
"""Gets the charges of this Billing. # noqa: E501
:return: The charges of this Billing. # noqa: E501
:rtype: list[BillingCharges]
"""
return self._charges
@charges.setter
def charges(self, charges):
"""Sets the charges of this Billing.
:param charges: The charges of this Billing. # noqa: E501
:type: list[BillingCharges]
"""
self._charges = charges
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Billing):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Billing):
return True
return self.to_dict() != other.to_dict()
| [
"six.iteritems",
"h1.configuration.Configuration"
] | [((6807, 6840), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (6820, 6840), False, 'import six\n'), ((1636, 1651), 'h1.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1649, 1651), False, 'from h1.configuration import Configuration\n')] |
#!/usr/bin/env python
# -*- test-case-name: twisted.names.test.test_examples -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Print the IP address for a given hostname. eg
python gethostbyname.py www.google.com
This script does a host lookup using the default Twisted Names
resolver, a chained resolver, which attempts to lookup a name from:
* local hosts file
* memory cache of previous lookup results
* system recursive DNS servers
"""
import sys
from twisted.names import client, error
from twisted.internet.task import react
from twisted.python import usage
class Options(usage.Options):
synopsis = "Usage: gethostbyname.py HOSTNAME"
def parseArgs(self, hostname):
self["hostname"] = hostname
def printResult(address, hostname):
"""
Print the IP address or an error message if an IP address was not
found.
"""
if address:
sys.stdout.write(address + "\n")
else:
sys.stderr.write(
"ERROR: No IP addresses found for name {!r}\n".format(hostname)
)
def printError(failure, hostname):
"""
Print a friendly error message if the hostname could not be
resolved.
"""
failure.trap(error.DNSNameError)
sys.stderr.write("ERROR: hostname not found {!r}\n".format(hostname))
def main(reactor, *argv):
options = Options()
try:
options.parseOptions(argv)
except usage.UsageError as errortext:
sys.stderr.write(str(options) + "\n")
sys.stderr.write("ERROR: {}\n".format(errortext))
raise SystemExit(1)
hostname = options["hostname"]
d = client.getHostByName(hostname)
d.addCallback(printResult, hostname)
d.addErrback(printError, hostname)
return d
if __name__ == "__main__":
react(main, sys.argv[1:])
| [
"twisted.names.client.getHostByName",
"twisted.internet.task.react",
"sys.stdout.write"
] | [((1627, 1657), 'twisted.names.client.getHostByName', 'client.getHostByName', (['hostname'], {}), '(hostname)\n', (1647, 1657), False, 'from twisted.names import client, error\n'), ((1784, 1809), 'twisted.internet.task.react', 'react', (['main', 'sys.argv[1:]'], {}), '(main, sys.argv[1:])\n', (1789, 1809), False, 'from twisted.internet.task import react\n'), ((916, 948), 'sys.stdout.write', 'sys.stdout.write', (["(address + '\\n')"], {}), "(address + '\\n')\n", (932, 948), False, 'import sys\n')] |
import swapper
from accelerator_abstract.models.base_ethno_racial_identity import (
BaseEthnoRacialIdentity,
)
class EthnoRacialIdentity(BaseEthnoRacialIdentity):
class Meta(BaseEthnoRacialIdentity.Meta):
swappable = swapper.swappable_setting(
BaseEthnoRacialIdentity.Meta.app_label, 'EthnoRacialIdentity')
| [
"swapper.swappable_setting"
] | [((235, 327), 'swapper.swappable_setting', 'swapper.swappable_setting', (['BaseEthnoRacialIdentity.Meta.app_label', '"""EthnoRacialIdentity"""'], {}), "(BaseEthnoRacialIdentity.Meta.app_label,\n 'EthnoRacialIdentity')\n", (260, 327), False, 'import swapper\n')] |
from __future__ import annotations
import logging
from datetime import date, datetime, timedelta
from typing import Dict, Generator, List, Optional, Union, Tuple
import pandas as pd
import metrics
from api.models import ( # noqa
ChangeDeleteLog,
County,
ProductionHorizontal,
ProductionMasterHorizontal,
ProductionMasterVertical,
ProductionVertical,
WellHorizontal,
WellMasterHorizontal,
WellMasterVertical,
WellVertical,
)
from collector import ExportJob # noqa
from collector import (
CDExporter,
Collector,
Endpoint,
ExportBuilder,
ExportParameter,
ExportRetriever,
ProductionList,
ProductionTransformer,
WellboreTransformer,
WellList,
XMLParser,
)
from collector.identity_list import IdentityList
from collector.task import Task
from config import ExportDataTypes, IdentityTemplates, get_active_config
from exc import CollectorError, NoIdsError
from ihs import create_app
logger = logging.getLogger(__name__)
conf = get_active_config()
endpoints = Endpoint.load_from_config(conf)
def run_endpoint_task(
endpoint_name: str, task_name: str
) -> Generator[dict, None, None]:
""" Unpack task options and assemble metadata for job configuration """
endpoint = endpoints[endpoint_name]
task = endpoint.tasks[task_name]
metrics.post(
"task.execution", 1, tags={"endpoint": endpoint_name, "task": task_name}
)
for config in task.configs:
yield config
def submit_job(job_options: dict, metadata: dict) -> Optional[ExportJob]:
endpoint_name = metadata.get("endpoint")
endpoint = endpoints[endpoint_name]
# name = metadata.get("name", None)
target_model = metadata.get("target_model", None)
task_name = metadata.get("task", None)
source_name = metadata.get("source_name", None)
try:
ep = ExportParameter(**job_options)
requestor = ExportBuilder(endpoint)
job = requestor.submit(ep, metadata=metadata or {})
return job
except CollectorError as e:
logger.warning(
f"({target_model}) Skipping job {task_name} -> {source_name}: {e}"
)
return None
def collect(job: Union[dict, ExportJob]):
if isinstance(job, dict):
job = ExportJob(**job)
is_identity_export = IdentityTemplates.has_member(job.template)
data = get_job_results(job)
if is_identity_export:
collect_identities(job, data)
else:
collect_data(job, data)
def get_job_results(job: Union[ExportJob, dict]) -> bytes:
if not isinstance(job, ExportJob):
job = ExportJob(**job)
retr = ExportRetriever(job, base_url=job.url, endpoint=endpoints[job.endpoint])
data = retr.get(auto_delete=True)
return data
def collect_data(job: ExportJob, xml: bytes):
if xml:
parser = XMLParser.load_from_config(conf.PARSER_CONFIG)
document = parser.parse(xml)
model = endpoints[job.endpoint].model
collector = Collector(model)
data: List[Dict] = []
if job.data_type == ExportDataTypes.WELL.value:
data = WellboreTransformer.extract_from_collection(document, model=model)
elif job.data_type == ExportDataTypes.PRODUCTION.value:
data = ProductionTransformer.extract_from_collection(document, model=model)
metrics.post("job.collection.success", len(data), tags=job.limited_dict())
collector.save(data, replace=True)
def collect_identities(job: ExportJob, data: bytes) -> IdentityList:
interface = None
if job.data_type == ExportDataTypes.WELL.value:
interface = WellList(job.name, job.hole_direction)
interface.ids = data
elif job.data_type == ExportDataTypes.PRODUCTION.value:
interface = ProductionList(job.name, job.hole_direction)
interface.ids = data
return interface
# def delete_job(job: ExportJob) -> bool:
# endpoint = endpoints[job.endpoint]
# requestor = ExportBuilder(endpoint)
# result = False
# if requestor.job_exists(job):
# result = requestor.delete_job(job)
# return result
def purge_remote_exports() -> bool:
eb = ExportBuilder(None)
eb.delete_all_jobs()
return True
def calc_remote_export_capacity() -> Dict[str, Union[float, int]]:
"""Calculate the amount of storage space currently consumed by job exports on IHS' servers.
Returns:
dict -- {
capacity_used: space used in KB,
njobs: number of existing completed jobs
"""
mean_doc_size_bytes = (
18000 * conf.TASK_BATCH_SIZE
) # average single entity document size
inflation_pct: float = 0.1 # over estimate the used capacity by this percentage
doc_size_bytes = mean_doc_size_bytes + (inflation_pct * mean_doc_size_bytes)
remote_capacity_bytes: int = 1000000000 # 1 GB
eb = ExportBuilder(None)
try:
njobs = len(eb.list_completed_jobs())
except CollectorError as e:
logger.exception(f"Unable to calculate export capacity -- {e}", stack_info=True)
return {}
return {
"remote.capacity.used": njobs * doc_size_bytes,
"remote.capacity.available": remote_capacity_bytes - (njobs * doc_size_bytes),
"remote.capacity.total": remote_capacity_bytes,
"remote.jobs": njobs,
}
def download_changes_and_deletes() -> int:
max_date = ChangeDeleteLog.max_date()
max_sequence = ChangeDeleteLog.max_sequence() or 0
today = datetime.now()
if max_date:
last_date = max_date - timedelta(days=1)
else:
last_date = date.today() - timedelta(days=30)
cde = CDExporter(from_date=last_date, to_date=today)
results = cde.get_all()
logger.info(f"Downloaded {len(results)} changes and deletes")
records: List[Dict] = []
for r in results:
new = {}
for k, v in r.items():
if v is not None:
if "uwi" in k:
v = str(v)
if k == "reasoncode":
k = "reason_code"
elif k == "activecode":
k = "active_code"
elif k == "referenceuwi":
k = "reference_uwi"
elif k == "newuwi":
k = "new_uwi"
new[k] = v
if new.get("sequence", 0) > max_sequence:
new["processed"] = False
records.append(new)
logger.info(
f"Found {len(records)} changes and deletes (filtered {len(results) - len(records)})"
)
collector = Collector(ChangeDeleteLog)
return collector.save(records)
# def process_changes_and_deletes():
# # reason_action_map = {
# # "no_action": [0, 6],
# # "update_to_new_uwi": [1, 5, 7, 8, 9],
# # "update_to_ref_uwi": [2],
# # "delete": [3, 4],
# # }
# reason_action_map = {
# 0: "no_action",
# 1: "update_to_new_uwi",
# 2: "update_to_ref_uwi",
# 3: "delete",
# 4: "delete",
# 5: "update_to_new_uwi",
# 6: "no_action",
# 7: "update_to_new_uwi",
# 8: "update_to_new_uwi",
# 9: "update_to_new_uwi",
# }
# objs = ChangeDeleteLog.objects(processed=False)
# obj = objs[len(objs) - 80]
# obj._data
# #! unfinished
# # for obj in objs:
# # if obj.processed is False:
# # action = reason_action_map[obj.reason_code]
# # if action == "delete":
# # document = WellHorizontal.objects(api14=obj.uwi).first()
# # document = WellVertical.objects(api14=obj.uwi).first()
def synchronize_master_lists():
county_model_name = County.__name__.split(".")[-1]
master_counties = County.as_df().index.tolist()
for model in [
WellMasterHorizontal,
WellMasterVertical,
ProductionMasterHorizontal,
ProductionMasterVertical,
]:
target_model_name = model.__name__.split(".")[-1]
model_counties = model.as_df().index.tolist()
missing_from_model = [x for x in master_counties if x not in model_counties]
# add missing counties to model
added = []
for county in missing_from_model:
i = model(name=county)
i.save()
added.append(county)
if added:
logger.info(
f"({target_model_name}) Added {len(added)} entries from {county_model_name} master: {added}" # noqa
)
missing_from_master = [x for x in model_counties if x not in master_counties]
if missing_from_master:
logger.info(
f"({target_model_name}) has {len(missing_from_master)} entries missing from {county_model_name} master" # noqa
)
logger.info(f"({target_model_name}) synchronized to {county_model_name} master")
def refresh_master_lists() -> List[Tuple[List[Dict], str, str]]:
endpoints = Endpoint.from_yaml(conf.COLLECTOR_CONFIG_PATH)
endpoints = {
k: v for k, v in endpoints.items() if "master" in v.model.__name__.lower()
}
all_endpoint_configs: List[Tuple[List[Dict], str, str]] = []
for endpoint_name, endpoint in endpoints.items():
# endpoint_name, endpoint = list(endpoints.items())[0]
target_model_name = endpoint.model.__name__.split(".")[-1]
county_record_dict = (
County.as_df().loc[:, ["county_code", "state_code"]].to_dict(orient="index")
)
task = endpoint.tasks["sync"]
task.options.matrix = county_record_dict # override the yaml defined matrix
configs = task.configs
logger.warning(f"({target_model_name}) refreshing {len(configs)} counties")
all_endpoint_configs.append((configs, endpoint_name, task.task_name))
return all_endpoint_configs
# job_options, metadata = task.configs[0].values()
# ep = ExportParameter(**job_options)
# print(ep.params["Query"])
if __name__ == "__main__":
import loggers
loggers.config(10)
logging.getLogger("collector.parser").setLevel(30)
logging.getLogger("zeep").setLevel(30)
from time import sleep
# from uuid import UUID
from ihs import create_app
logging.basicConfig(level=10)
app = create_app()
app.app_context().push()
# endpoint_name = "well_master_vertical"
# endpoint_name = "well_master_vertical"
# task_name = "sync"
# endpoint = endpoints[endpoint_name]
# task = endpoint.tasks[task_name]
# # configs =
# job_options, metadata = task.configs[0].values()
# for configs, endpoint_name, task_name in refresh_master_lists():
# for job
# ep = ExportParameter(**job_options)
# print(ep.params["Query"])
# requestor = ExportBuilder(endpoint)
# job = submit_job(job_options=job_options, metadata=metadata)
# # job.to_dict()
# sleep(5)
# if job:
# collect(job)
# xml = get_job_results(job)
# parser = XMLParser.load_from_config(conf.PARSER_CONFIG)
# document = parser.parse(xml)
# model = endpoint.model
# data = WellboreTransformer.extract_from_collection(document, model=model)
# len(data)
# [x["api14"] for x in data]
# collector = Collector(model)
# collector.save(data, replace=True)
# from api.models import County, WellMasterHorizontal
# import pandas as pd
# df = pd.DataFrame([x._data for x in County.objects.all()]).set_index("name")
# df.columns
# df = df.drop(columns=["state_code", "county_code"]).sort_values("well_h_last_run")
# df.shape
# hz_ids = (
# pd.DataFrame([x._data for x in WellMasterHorizontal.objects.all()])
# .set_index("name")
# .sort_index()
# )
# hz_ids.loc[~hz_ids.index.str.contains("County")].shape
# joined = df.join(hz_ids.ids)
# joined[joined.ids.isna()]
# # data[7]
# self = task.options
| [
"logging.getLogger",
"collector.Collector",
"collector.Endpoint.from_yaml",
"collector.XMLParser.load_from_config",
"datetime.timedelta",
"loggers.config",
"collector.CDExporter",
"metrics.post",
"config.get_active_config",
"collector.Endpoint.load_from_config",
"api.models.County.__name__.split... | [((975, 1002), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (992, 1002), False, 'import logging\n'), ((1011, 1030), 'config.get_active_config', 'get_active_config', ([], {}), '()\n', (1028, 1030), False, 'from config import ExportDataTypes, IdentityTemplates, get_active_config\n'), ((1043, 1074), 'collector.Endpoint.load_from_config', 'Endpoint.load_from_config', (['conf'], {}), '(conf)\n', (1068, 1074), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((1330, 1420), 'metrics.post', 'metrics.post', (['"""task.execution"""', '(1)'], {'tags': "{'endpoint': endpoint_name, 'task': task_name}"}), "('task.execution', 1, tags={'endpoint': endpoint_name, 'task':\n task_name})\n", (1342, 1420), False, 'import metrics\n'), ((2307, 2349), 'config.IdentityTemplates.has_member', 'IdentityTemplates.has_member', (['job.template'], {}), '(job.template)\n', (2335, 2349), False, 'from config import ExportDataTypes, IdentityTemplates, get_active_config\n'), ((2632, 2704), 'collector.ExportRetriever', 'ExportRetriever', (['job'], {'base_url': 'job.url', 'endpoint': 'endpoints[job.endpoint]'}), '(job, base_url=job.url, endpoint=endpoints[job.endpoint])\n', (2647, 2704), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((4158, 4177), 'collector.ExportBuilder', 'ExportBuilder', (['None'], {}), '(None)\n', (4171, 4177), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((4869, 4888), 'collector.ExportBuilder', 'ExportBuilder', (['None'], {}), '(None)\n', (4882, 4888), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((5392, 5418), 'api.models.ChangeDeleteLog.max_date', 'ChangeDeleteLog.max_date', ([], {}), '()\n', (5416, 5418), False, 'from api.models import ChangeDeleteLog, County, ProductionHorizontal, ProductionMasterHorizontal, ProductionMasterVertical, ProductionVertical, WellHorizontal, WellMasterHorizontal, WellMasterVertical, WellVertical\n'), ((5487, 5501), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5499, 5501), False, 'from datetime import date, datetime, timedelta\n'), ((5644, 5690), 'collector.CDExporter', 'CDExporter', ([], {'from_date': 'last_date', 'to_date': 'today'}), '(from_date=last_date, to_date=today)\n', (5654, 5690), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((6567, 6593), 'collector.Collector', 'Collector', (['ChangeDeleteLog'], {}), '(ChangeDeleteLog)\n', (6576, 6593), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((9071, 9117), 'collector.Endpoint.from_yaml', 'Endpoint.from_yaml', (['conf.COLLECTOR_CONFIG_PATH'], {}), '(conf.COLLECTOR_CONFIG_PATH)\n', (9089, 9117), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((10139, 10157), 'loggers.config', 'loggers.config', (['(10)'], {}), '(10)\n', (10153, 10157), False, 'import loggers\n'), ((10348, 10377), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '(10)'}), '(level=10)\n', (10367, 10377), False, 'import logging\n'), ((10388, 10400), 'ihs.create_app', 'create_app', ([], {}), '()\n', (10398, 10400), False, 'from ihs import create_app\n'), ((1857, 1887), 'collector.ExportParameter', 'ExportParameter', ([], {}), '(**job_options)\n', (1872, 1887), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((1908, 1931), 'collector.ExportBuilder', 'ExportBuilder', (['endpoint'], {}), '(endpoint)\n', (1921, 1931), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((2264, 2280), 'collector.ExportJob', 'ExportJob', ([], {}), '(**job)\n', (2273, 2280), False, 'from collector import ExportJob\n'), ((2604, 2620), 'collector.ExportJob', 'ExportJob', ([], {}), '(**job)\n', (2613, 2620), False, 'from collector import ExportJob\n'), ((2836, 2882), 'collector.XMLParser.load_from_config', 'XMLParser.load_from_config', (['conf.PARSER_CONFIG'], {}), '(conf.PARSER_CONFIG)\n', (2862, 2882), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((2986, 3002), 'collector.Collector', 'Collector', (['model'], {}), '(model)\n', (2995, 3002), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((3618, 3656), 'collector.WellList', 'WellList', (['job.name', 'job.hole_direction'], {}), '(job.name, job.hole_direction)\n', (3626, 3656), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((5438, 5468), 'api.models.ChangeDeleteLog.max_sequence', 'ChangeDeleteLog.max_sequence', ([], {}), '()\n', (5466, 5468), False, 'from api.models import ChangeDeleteLog, County, ProductionHorizontal, ProductionMasterHorizontal, ProductionMasterVertical, ProductionVertical, WellHorizontal, WellMasterHorizontal, WellMasterVertical, WellVertical\n'), ((7812, 7838), 'api.models.County.__name__.split', 'County.__name__.split', (['"""."""'], {}), "('.')\n", (7833, 7838), False, 'from api.models import ChangeDeleteLog, County, ProductionHorizontal, ProductionMasterHorizontal, ProductionMasterVertical, ProductionVertical, WellHorizontal, WellMasterHorizontal, WellMasterVertical, WellVertical\n'), ((3108, 3174), 'collector.WellboreTransformer.extract_from_collection', 'WellboreTransformer.extract_from_collection', (['document'], {'model': 'model'}), '(document, model=model)\n', (3151, 3174), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((3766, 3810), 'collector.ProductionList', 'ProductionList', (['job.name', 'job.hole_direction'], {}), '(job.name, job.hole_direction)\n', (3780, 3810), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((5551, 5568), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5560, 5568), False, 'from datetime import date, datetime, timedelta\n'), ((5599, 5611), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5609, 5611), False, 'from datetime import date, datetime, timedelta\n'), ((5614, 5632), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (5623, 5632), False, 'from datetime import date, datetime, timedelta\n'), ((10162, 10199), 'logging.getLogger', 'logging.getLogger', (['"""collector.parser"""'], {}), "('collector.parser')\n", (10179, 10199), False, 'import logging\n'), ((10217, 10242), 'logging.getLogger', 'logging.getLogger', (['"""zeep"""'], {}), "('zeep')\n", (10234, 10242), False, 'import logging\n'), ((3258, 3326), 'collector.ProductionTransformer.extract_from_collection', 'ProductionTransformer.extract_from_collection', (['document'], {'model': 'model'}), '(document, model=model)\n', (3303, 3326), False, 'from collector import CDExporter, Collector, Endpoint, ExportBuilder, ExportParameter, ExportRetriever, ProductionList, ProductionTransformer, WellboreTransformer, WellList, XMLParser\n'), ((7865, 7879), 'api.models.County.as_df', 'County.as_df', ([], {}), '()\n', (7877, 7879), False, 'from api.models import ChangeDeleteLog, County, ProductionHorizontal, ProductionMasterHorizontal, ProductionMasterVertical, ProductionVertical, WellHorizontal, WellMasterHorizontal, WellMasterVertical, WellVertical\n'), ((9519, 9533), 'api.models.County.as_df', 'County.as_df', ([], {}), '()\n', (9531, 9533), False, 'from api.models import ChangeDeleteLog, County, ProductionHorizontal, ProductionMasterHorizontal, ProductionMasterVertical, ProductionVertical, WellHorizontal, WellMasterHorizontal, WellMasterVertical, WellVertical\n')] |
import numpy as np
import random
import pandas as pd
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import select
from sqlalchemy import and_
from sqlalchemy import between
from sqlalchemy.sql import exists
from sqlalchemy import desc
from datetime import datetime, timezone, timedelta
from damadicsDBMapping import *
from sequenced_data_handler import SequenceDataHandler
# IP Address: 172.16.17.32
# User: dbAdmin
# Password: <PASSWORD>
# Database: damadics
class ValveDataHandler(SequenceDataHandler):
'''
TODO: column information here
'''
#Method definition
def __init__(self, start_time, end_time, selected_features, sequence_length = 1, sequence_stride = 1, data_scaler = None):
#Public properties
self._start_time = start_time
self._end_time = end_time
self._selected_features = selected_features
self._rectify_labels = False
self._data_scaler = data_scaler
# Database connection
# self._db_connection = mysql.connector.connect(user = 'root', password = '<PASSWORD>#', database = 'damadics')
self._load_from_db = True
self._column_names = {0: 'timestamp', 1: 'externalControllerOutput', 2: 'undisturbedMediumFlow', 3: 'pressureValveInlet', 4:'pressureValveOutlet',
5: 'mediumTemperature', 6: 'rodDisplacement', 7: 'disturbedMediumFlow', 8: 'selectedFault', 9: 'faultType', 10: 'faultIntensity'}
# Entire Dataset
self._df = None
self._X = None
self._y = None
# Splitting. This is what is used to train
self._df_train = None
self._df_test = None
#create one time session
self._sqlsession = None
print("init")
#super init
super().__init__(sequence_length, sequence_stride, len(selected_features), data_scaler)
def connect_to_db(self,username,pasw,host,dbname):
# self.username = username
# self.pasw = pasw
# self.host = host
self.dbname = dbname
databaseString = "mysql+mysqldb://"+username+":"+pasw+"@"+host+"/"+dbname
self._sqlsession = None
try:
sqlengine = sqlalchemy.create_engine(databaseString)
SQLSession = sessionmaker(bind=sqlengine)
self._sqlsession = SQLSession()
print("Connection to " + databaseString + " successfull")
except Exception as e:
print("e:", e)
print("Error in connection to the database")
def extract_data_from_db(self):
startTime = datetime.now()
self._df = self._sqlsession.query(ValveReading).filter(ValveReading.timestamp.between (self._start_time,self._end_time) )
self._df = pd.read_sql(self._df.statement, self._df.session.bind)
#dataPoints = self._sqlsession.query(exists().where(ValveReading.timestamp == '2018-07-27 15:56:22')).scalar()
#dataPoints = self._sqlsession.query(ValveReading).order_by(ValveReading.timestamp)
# TODO: need to check whether dataPoints is of type DataFrame. Needs to be in type DataFrame
# TODO: check whether column names are extracted out
# All the data with selected features is saved in this variable
# TODO: check if self._selected_features is an array of indexes or strings
# self._df = df.iloc[:, self._selected_features].values
# Assumption that the output is only one column and is located at the last column out of all the selected features
# Below if self._selected_features is an array of indexes
column_names = ['externalControllerOutput', 'pressureValveInlet',
'pressureValveOutlet', 'mediumTemperature','rodDisplacement', 'disturbedMediumFlow', 'selectedFault']
self._X = self._df.loc[:, column_names[:-1]].values
self._y = self._df.loc[:, column_names[len(column_names) - 1]].values
# Below if self._selected_features is an array of strings
# inputs = df.loc[:, column_names[:-1]].values
# outputs = df.loc[:, column_names[len(column_names) - 1]].values
# for data in self._df:
# print(self._df)
print("Extracting data from database runtime:", datetime.now() - startTime)
def one_hot_encode(self, num_readings):
startTime = datetime.now()
fault_column = list()
one_hot_matrix = np.zeros((num_readings, 20))
fault_column = self._y
for i in range(num_readings):
one_hot_matrix[i, int(fault_column[i] - 1)] = 1
print("One-hot-encoding:", datetime.now() - startTime)
return one_hot_matrix
# Private
def find_samples(self, data_samples):
'''
Assumptions made when using this functions
1.) The value always starts of as NOT BROKEN. First faultType value is 20.
2.) Function is used to entire dataset and not in chunks
'''
# TODO: handle cases when the first readings start of as a broken value
# TODO: ask David if he wants a minimum amount of samples in the dataset
startTime = datetime.now()
small_list, big_list = list(), list()
normal_status = 20.0
isBroken = False
counter = 0
for i in range(len(self._y)):
# If True, then the current status of the valve is that it is broken
if (isBroken):
# The valve has been fixed and is back to its normal status
if (self._y[i] == normal_status):
isBroken = False
counter += 1
# Save everything from the small_list into the big_list
small_list = np.vstack(small_list)
big_list.append(small_list)
small_list = list()
small_list.append(data_samples[i, :])
# The current status of the valve is that it is not broken
else:
if (self._y[i] != normal_status):
isBroken = True
# small_list = np.append(data_samples[i, :], small_list)
small_list.append(data_samples[i, :])
print("Splitting into samples:",datetime.now() - startTime)
print("counter:", counter)
return big_list, counter
#
#
#
#
#
#
#
# # Private
# def find_samples(self, data_samples):
#
# '''
# Assumptions made when using this function
# 1.) The valve always starts of as NOT BROKEN. First faultType value is 20.
# 2.) Function is used to entire dataset and not in chunks
# '''
#
# # TODO: handle cases when the first readings starts of as a broken valve
# # TODO: ask David if he wants a minimum amount of samples in the dataset
#
# small_list, big_list = list(), list()``
# normal_status = 20.0
# isBroken = False
# # Counter for the number of samples there are in the dataset
# counter = 0
#
# for i in range(len(self._y)):
# # If True, then the current status of the valve is that it is broken
# if (isBroken):
# # The valve has been fixed and is back to its normal status
# if (self._y[i] == normal_status):
# isBroken = False
# counter += 1
# # Save everything from the small_list into the big_list
# small_list = np.vstack(small_list)
# big_list.append(small_list)
# # Clear the small_list (reinitialize)
# small_list = list()
# small_list.append(data_samples[i, :])
# # The current status of the valve is that it is not broken
# else:
# # Broken valve discovered
# if (self._y[i] != normal_status):
# isBroken = True
# small_list.append(data_samples[i, :])
#
# # SPECIAL CASE: the simulation does not end with a fixed valve. Therefore we shall whatever is inside the small_list and say that it is an entire sample
# if (self._y[i] != 20):
# counter += 1
# small_list = np.vstack(small_list)
# big_list.append(small_list)
#
# return big_list, counter
# Public
def load_data(self, verbose = 0, cross_validation_ratio = 0, test_ratio = 0, unroll = True):
"""Load the data using the specified parameters"""
'''
TODO: extracting data from MySQL database using SQLALCHEMY
Functions called here: generate_df_with_rul(self, df), generate_train_arrays(self, cross_validation_ratio = 0), generate_test_arrays(self),
create_sequenced_train_data(self), create_sequenced_test_data(self)
X: df[timestamp, ..., selectedFault]
y: df['faultType']
'''
# dataPoints = self._sqlsession.query(ValveReading)
if verbose == 1:
print("Loading data for dataset {} with window_size of {}, stride of {}. Cros-Validation ratio {}".format(self._dataset_number,
self._sequence_length, self._sequence_stride, cross_validation_ratio))
if cross_validation_ratio < 0 or cross_validation_ratio > 1:
print("Error, cross validation must be between 0 and 1")
return
if test_ratio < 0 or test_ratio > 1:
print("Error, test ratio must be between 0 and 1")
return
if cross_validation_ratio + test_ratio > 1:
print("Sum of cross validation and test ratios is greater than 1. Need to pick smaller ratios.")
return
if self._load_from_db == True:
print("Loading data from database")
# These variables are where the entire data is saved at
self.extract_data_from_db()
# One hot encoding
output_one_hot_matrix = self.one_hot_encode(self._df.shape[0])
# Finds samples within the inputs
self._X, num_samples = self.find_samples(self._X)
self._y, _ = self.find_samples(output_one_hot_matrix)
# self._df_train = self.load_db_into_df(self._file_train_data)
# self._df_test = self.load_db_into_df(self._file_test_data)
# self._df_train, num_units, trimmed_rul_train = self.generate_df_with_rul(self._df_train)
else:
print("Loading data from memory")
#Reset arrays
"""
self._X_train_list = list()
self._X_crossVal_list = list()
self._X_test_list = list()
self._y_train_list = list()
self._y_crossVal_list = list()
self._y_test_list = list()
"""
# Split up the data into its different samples
#Modify properties in the parent class, and let the parent class finish the data processing
self.train_cv_test_split(cross_validation_ratio, test_ratio, num_samples)
self.print_sequence_shapes()
# Unroll = True for ANN
# Unroll = False for RNN
self.generate_train_data(unroll)
self.generate_crossValidation_data(unroll)
self.generate_test_data(unroll)
#
self._load_from_db = False # As long as the dataframe doesnt change, there is no need to reload from file
# Private
def train_cv_test_split(self, cross_validation_ratio, test_ratio, num_samples):
''' From the dataframes generate the feature arrays and their labels'''
print("split_samples num_samples:", num_samples)
print("cross_validation_ratio:", cross_validation_ratio)
print("test_ratio:", test_ratio)
startTime = datetime.now()
X_train_list, y_train_list = list(), list()
X_crossVal_list, y_crossVal_list = list(), list()
X_test_list, y_test_list = list(), list()
if cross_validation_ratio < 0 or cross_validation_ratio > 1:
print("Error, cross validation must be between 0 and 1")
return
if test_ratio < 0 or test_ratio > 1:
print("Error, test ratio must be between 0 and 1")
return
if cross_validation_ratio != 0 or test_ratio != 0:
self._X_train_list, self._y_train_list, self._X_crossVal_list, self._y_crossVal_list, self._X_test_list, self._y_test_list = self.split_samples(cross_validation_ratio, test_ratio, num_samples)
print("Train, cv, and test splitting:",datetime.now() - startTime)
print()
# Private
def split_samples(self, cross_validation_ratio, test_ratio, num_samples):
'''Split the samples according to their respective ratios'''
shuffled_samples = list(range(0, num_samples))
random.shuffle(shuffled_samples)
num_crossVal = int(cross_validation_ratio * num_samples)
#print("num_crossVal:", num_crossVal)
num_test = int(test_ratio * num_samples)
#print("num_test:", num_test)
num_train = num_samples - num_crossVal - num_test
#print("num_train:", num_train)
X_train_list, y_train_list = list(), list()
X_crossVal_list, y_crossVal_list = list(), list()
X_test_list, y_test_list = list(), list()
print(self._y[0])
for i in range(num_train):
#print("i:", i)
X_train_list.append(self._X[shuffled_samples[i]])
y_train_list.append(self._y[shuffled_samples[i]])
# y_train_list.append(self._y[shuffled_samples[i]][-1].reshape(1, 20))
# x = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][x][19] != 1):
# y_train_list.append(self._y[shuffled_samples[i]])
# x += 1
# for x in range(self._y[shuffled_samples[i]].shape[0]):
# if (self._y[shuffled_samples[i]][x][19] != 1 and len(y_train_list) == 0):
# y_train_list.append(self._y[shuffled_samples[i]])
# print(len(y_train_list))
for j in range(num_train, num_train + num_crossVal):
#print("j:", j)
X_crossVal_list.append(self._X[shuffled_samples[j]])
y_crossVal_list.append(self._y[shuffled_samples[j]][-1].reshape(1, 20))
# y = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][y][19] != 1):
# y_crossVal_list.append(self._y[shuffled_samples[i]])
# y += 1
# for y in range(self._y[shuffled_samples[j]].shape[0]):
# if (self._y[shuffled_samples[j]][y][19] != 1 and len(y_crossVal_list) == 0):
# y_crossVal_list.append(self._y[shuffled_samples[j]])
for k in range(num_train + num_crossVal, num_samples):
#print("k:", k)
X_test_list.append(self._X[shuffled_samples[k]])
y_test_list.append(self._y[shuffled_samples[k]][-1].reshape(1, 20))
# z = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][x][19] != 1):
# y_test_list.append(self._y[shuffled_samples[i]])
# z += 1
# for z in range(self._y[shuffled_samples[k]].shape[0]):
# if (self._y[shuffled_samples[k]][z][19] != 1 and len(y_test_list) == 0):
# y_test_list.append(self._y[shuffled_samples[k]])
#print("X_test_list shape:", len(X_test_list[0].shape))
return X_train_list, y_train_list, X_crossVal_list, y_crossVal_list, X_test_list, y_test_list
# def train_cv_test_split(self, cross_validation_ratio = 0, test_ratio = 0, num_samples):
# """From the dataframes generate the feature arrays and their labels"""
#
# '''
# Functions called here: split_samples(self, df, splitting_ratio), generate_cross_validation_from_df(self, df, sequence_length)
# '''
#
# X_train_list, y_train_list = list(), list()
# X_crossVal_list, y_crossVal_list = list(), list()
# X_test_list, y_test_list = list()
#
# if cross_validation_ratio < 0 or cross_validation_ratio > 1 :
# print("Error, cross validation must be between 0 and 1")
# return
#
# if test_ratio < 0 or test_ratio > 1 :
# print("Error, test ratio must be between 0 and 1")
# return
#
# if cross_validation_ratio != 0 or test_ratio != 0:
# X_train_list, X_test_list, X_crossVal_list, y_crossVal_list, y_train_list, y_test_list = self.split_samples(cross_validation_ratio, test_ratio, num_samples)
#
# return X_train_list, y_train_list, X_crossVal_list, y_crossVal_list, X_test_list, y_test_list
# Private
# def split_samples(self, cross_validation_ratio, test_ratio, num_samples):
# """Split the samples according to their respective ratios"""
#
# shuffled_samples = list(range(0, num_samples))
# random.shuffle(shuffled_samples)
#
# num_crossVal = int(cross_validation_ratio * num_samples)
# num_test = int(test_ratio * num_samples)
# num_train = num_samples - num_crossVal - num_test
#
# X_train_list, y_train_list = list(), list()
# X_crossVal, y_crossVal_list = list(), list()
# X_test_list, y_test_list = list(), list()
#
# for i in range(num_train):
# X_train_list.append(self._X[shuffled_samples[i]])
# y_train_list.append(self._y[shuffled_samples[i]])
#
# for j in range(num_train, num_train + num_crossVal):
# X_crossVal.append(self._X[shuffled_samples[j]])
# y_crossVal_list.append(self._y[shuffled_samples[j]])
#
# for k in range(num_train + num_crossVal, num_samples):
# X_test.append(self._X[shuffled_samples[k]])
# y_test_list.append(self._y[shuffled_samples[k]])
#
# return X_train_list, X_test, X_crossVal, y_crossVal_list, y_train_list, y_test
#Property definition
@property
def df(self):
return self._df
@df.setter
def df(self, df):
self._df = df
@property
def X(self):
return self.X
@X.setter
def X(self, X):
self.X = X
@property
def y(self):
return self._y
@y.setter
def df(self, y):
self._y = y
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self,start_time):
self._start_time = start_time
@property
def sqlsession(self):
return self._sqlsession
@sqlsession.setter
def sqlsession(self,sqlsession):
self._sqlsession = sqlsession
def __str__(self):
return "<ValveReading(timestamp='%s',externalControllerOutput='%s',undisturbedMediumFlow='%s',pressureValveInlet='%s',pressureValveOutlet='%s',mediumTemperature='%s',\
rodDisplacement='%s',disturbedMediumFlow='%s',selectedFault='%s',faultType='%s',faultIntensity='%s')>"\
%(str(self._timestamp),self._externalControllerOutput,self._undisturbedMediumFlow,self.pressureValveInlet,\
self.pressureValveOutlet,self.mediumTemperature,self.rodDisplacement,self.disturbedMediumFlow,self.selectedFault,\
self.faultType,self.faultIntensity)
# def selectedFeatures(self):
# return self._selectedFeatures
#
# @selectedFeatures.setter
# def selectedFeatures(self, selectedFeatures):
# self._selectedFeatures = selectedFeatures
#
# @property
# def max_rul(self):
# return self._max_rul
#
# @max_rul.setter
# def max_rul(self, max_rul):
# self._max_rul = max_rul
#
# @property
# def rectify_labels(self):
# return self._rectify_labels
#
# @rectify_labels.setter
# def rectify_labels(self, rectify_labels):
# self._rectify_labels = rectify_labels
#
# #ReadOnly Properties
#
# @property
# def dataset_number(self):
# return self._dataset_number
#
# @property
# def data_folder(self):
# return self._data_folder
#
# @property
# def file_train_data(self):
# return self._file_train_data
#
# @property
# def file_test_data(self):
# return self._file_test_data
#
# @property
# def file_rul(self):
# return self._file_rul
#
# @property
# def load_from_file(self):
# return self._load_from_db
#
# @property
# def column_names(self):
# return self._column_names
#
# @property
# def df_train(self):
# return self._df_train
#
# @property
# def df_test(self):
# return self._df_test
#
#
#
# #Auxiliary functions
#
# def compute_training_rul(df_row, *args):
# """Compute the RUL at each entry of the DF"""
#
# max_rul = args[1]
# rul_vector = args[0]
# rul_vector_index = int(df_row['Unit Number']) - 1
#
#
# if max_rul > 0 and rul_vector[rul_vector_index] - df_row['Cycle'] > max_rul:
# return max_rul
# else:
# return rul_vector[rul_vector_index] - df_row['Cycle']
| [
"sqlalchemy.orm.sessionmaker",
"random.shuffle",
"sqlalchemy.create_engine",
"datetime.datetime.now",
"numpy.zeros",
"numpy.vstack",
"pandas.read_sql"
] | [((2315, 2329), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2327, 2329), False, 'from datetime import datetime, timezone, timedelta\n'), ((2468, 2522), 'pandas.read_sql', 'pd.read_sql', (['self._df.statement', 'self._df.session.bind'], {}), '(self._df.statement, self._df.session.bind)\n', (2479, 2522), True, 'import pandas as pd\n'), ((3929, 3943), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3941, 3943), False, 'from datetime import datetime, timezone, timedelta\n'), ((3988, 4016), 'numpy.zeros', 'np.zeros', (['(num_readings, 20)'], {}), '((num_readings, 20))\n', (3996, 4016), True, 'import numpy as np\n'), ((4619, 4633), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4631, 4633), False, 'from datetime import datetime, timezone, timedelta\n'), ((10186, 10200), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10198, 10200), False, 'from datetime import datetime, timezone, timedelta\n'), ((11114, 11146), 'random.shuffle', 'random.shuffle', (['shuffled_samples'], {}), '(shuffled_samples)\n', (11128, 11146), False, 'import random\n'), ((1993, 2033), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['databaseString'], {}), '(databaseString)\n', (2017, 2033), False, 'import sqlalchemy\n'), ((2050, 2078), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'sqlengine'}), '(bind=sqlengine)\n', (2062, 2078), False, 'from sqlalchemy.orm import sessionmaker\n'), ((3843, 3857), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3855, 3857), False, 'from datetime import datetime, timezone, timedelta\n'), ((4156, 4170), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4168, 4170), False, 'from datetime import datetime, timezone, timedelta\n'), ((5465, 5479), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5477, 5479), False, 'from datetime import datetime, timezone, timedelta\n'), ((10874, 10888), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10886, 10888), False, 'from datetime import datetime, timezone, timedelta\n'), ((5075, 5096), 'numpy.vstack', 'np.vstack', (['small_list'], {}), '(small_list)\n', (5084, 5096), True, 'import numpy as np\n')] |
from __future__ import annotations
import os
from functools import partial
from pathlib import Path
from typing import TYPE_CHECKING, Dict
from PySide2.QtGui import QIcon, QKeySequence
from bw_tools.common.bw_node import BWNode
from bw_tools.modules.bw_settings.bw_settings import BWModuleSettings
from PySide2.QtWidgets import QAction
from sd.api import sdbasetypes
from sd.api.sdgraph import SDGraph
from sd.api.sdgraphobject import SDGraphObject
from sd.api.sdgraphobjectframe import SDGraphObjectFrame
from sd.api.sdhistoryutils import SDHistoryUtils
from sd.api.sdnode import SDNode
if TYPE_CHECKING:
from bw_tools.common.bw_api_tool import BWAPITool
class BWFramerSettings(BWModuleSettings):
def __init__(self, file_path: Path):
super().__init__(file_path)
self.hotkey: str = self.get("Hotkey;value")
self.margin: float = self.get("Margin;value")
self.default_color: list = self.get("Default Color;value")
self.default_title: str = self.get("Default Title;value")
self.default_description: str = self.get("Default Description;value")
def get_frames(graph_objects: list[SDGraphObject]) -> list[SDGraphObjectFrame]:
return [obj for obj in graph_objects if isinstance(obj, SDGraphObjectFrame)]
def delete_frames(
graph: SDGraph,
frames: list[SDGraphObjectFrame],
):
[graph.deleteGraphObject(frame) for frame in frames]
def run_framer(
nodes: list[SDNode],
graph_objects: list[SDGraphObject],
graph: SDGraph,
settings: BWFramerSettings,
):
x0 = min(nodes, key=lambda node: node.getPosition().x)
x1 = max(nodes, key=lambda node: node.getPosition().x)
y0 = max(nodes, key=lambda node: node.getPosition().y)
y1 = min(nodes, key=lambda node: node.getPosition().y)
x0 = BWNode(x0)
x1 = BWNode(x1)
y0 = BWNode(y0)
y1 = BWNode(y1)
min_x = x0.pos.x - x0.width / 2
max_x = x1.pos.x - x1.width / 2
min_y = y1.pos.y - y1.width / 2
max_y = y0.pos.y - y0.width / 2
width = (max_x - min_x) + x1.width + settings.margin * 2
height = (max_y - min_y) + y0.height + settings.margin * 3
frames = get_frames(graph_objects)
if frames:
frames.sort(key=lambda f: f.getPosition().x)
frame = frames[0]
delete_frames(graph, frames[1:])
else:
frame: SDGraphObjectFrame = SDGraphObjectFrame.sNew(graph)
frame.setTitle(settings.default_title)
frame.setColor(
sdbasetypes.ColorRGBA(
settings.default_color[0],
settings.default_color[1],
settings.default_color[2],
settings.default_color[3],
)
)
frame.setDescription(settings.default_description)
frame.setPosition(sdbasetypes.float2(min_x - settings.margin, min_y - settings.margin * 2))
frame.setSize(sdbasetypes.float2(width, height))
def on_clicked_run_framer(api: BWAPITool):
if not api.current_graph_is_supported:
api.log.error("Graph type is unsupported")
return
pkg = api.current_package
file_path = Path(pkg.getFilePath())
if not os.access(file_path, os.W_OK):
api.log.error("Permission denied to write to package")
return
with SDHistoryUtils.UndoGroup("Framer"):
settings = BWFramerSettings(Path(__file__).parent / "bw_framer_settings.json")
nodes = api.current_node_selection
if len(nodes) == 0:
return
run_framer(
nodes,
api.current_graph_object_selection,
api.current_graph,
settings,
)
def on_graph_view_created(graph_view_id, api: BWAPITool):
toolbar = api.get_graph_view_toolbar(graph_view_id)
settings = BWFramerSettings(Path(__file__).parent / "bw_framer_settings.json")
icon = Path(__file__).parent / "resources" / "bw_framer_icon.png"
tooltip = f"""
Frames the selected nodes by reusing an existing frame, or drawing
a new one.
Shortcut: {settings.hotkey}
"""
action = QAction()
action.setIcon(QIcon(str(icon.resolve())))
action.setToolTip(tooltip)
action.setShortcut(QKeySequence(settings.hotkey))
action.triggered.connect(lambda: on_clicked_run_framer(api))
toolbar.add_action("bw_framer", action)
def on_initialize(api: BWAPITool):
api.register_on_graph_view_created_callback(partial(on_graph_view_created, api=api))
def get_default_settings() -> Dict:
return {
"Hotkey": {"widget": 1, "value": "Alt+D"},
"Margin": {"widget": 2, "value": 32},
"Default Color": {"widget": 6, "value": [0.0, 0.0, 0.0, 0.25]},
"Default Title": {"widget": 1, "value": ""},
"Default Description": {"widget": 1, "value": ""},
}
| [
"sd.api.sdbasetypes.float2",
"pathlib.Path",
"bw_tools.common.bw_node.BWNode",
"os.access",
"sd.api.sdbasetypes.ColorRGBA",
"functools.partial",
"sd.api.sdgraphobjectframe.SDGraphObjectFrame.sNew",
"sd.api.sdhistoryutils.SDHistoryUtils.UndoGroup",
"PySide2.QtGui.QKeySequence",
"PySide2.QtWidgets.Q... | [((1788, 1798), 'bw_tools.common.bw_node.BWNode', 'BWNode', (['x0'], {}), '(x0)\n', (1794, 1798), False, 'from bw_tools.common.bw_node import BWNode\n'), ((1808, 1818), 'bw_tools.common.bw_node.BWNode', 'BWNode', (['x1'], {}), '(x1)\n', (1814, 1818), False, 'from bw_tools.common.bw_node import BWNode\n'), ((1828, 1838), 'bw_tools.common.bw_node.BWNode', 'BWNode', (['y0'], {}), '(y0)\n', (1834, 1838), False, 'from bw_tools.common.bw_node import BWNode\n'), ((1848, 1858), 'bw_tools.common.bw_node.BWNode', 'BWNode', (['y1'], {}), '(y1)\n', (1854, 1858), False, 'from bw_tools.common.bw_node import BWNode\n'), ((4038, 4047), 'PySide2.QtWidgets.QAction', 'QAction', ([], {}), '()\n', (4045, 4047), False, 'from PySide2.QtWidgets import QAction\n'), ((2349, 2379), 'sd.api.sdgraphobjectframe.SDGraphObjectFrame.sNew', 'SDGraphObjectFrame.sNew', (['graph'], {}), '(graph)\n', (2372, 2379), False, 'from sd.api.sdgraphobjectframe import SDGraphObjectFrame\n'), ((2764, 2836), 'sd.api.sdbasetypes.float2', 'sdbasetypes.float2', (['(min_x - settings.margin)', '(min_y - settings.margin * 2)'], {}), '(min_x - settings.margin, min_y - settings.margin * 2)\n', (2782, 2836), False, 'from sd.api import sdbasetypes\n'), ((2856, 2889), 'sd.api.sdbasetypes.float2', 'sdbasetypes.float2', (['width', 'height'], {}), '(width, height)\n', (2874, 2889), False, 'from sd.api import sdbasetypes\n'), ((3127, 3156), 'os.access', 'os.access', (['file_path', 'os.W_OK'], {}), '(file_path, os.W_OK)\n', (3136, 3156), False, 'import os\n'), ((3246, 3280), 'sd.api.sdhistoryutils.SDHistoryUtils.UndoGroup', 'SDHistoryUtils.UndoGroup', (['"""Framer"""'], {}), "('Framer')\n", (3270, 3280), False, 'from sd.api.sdhistoryutils import SDHistoryUtils\n'), ((4149, 4178), 'PySide2.QtGui.QKeySequence', 'QKeySequence', (['settings.hotkey'], {}), '(settings.hotkey)\n', (4161, 4178), False, 'from PySide2.QtGui import QIcon, QKeySequence\n'), ((4374, 4413), 'functools.partial', 'partial', (['on_graph_view_created'], {'api': 'api'}), '(on_graph_view_created, api=api)\n', (4381, 4413), False, 'from functools import partial\n'), ((2463, 2596), 'sd.api.sdbasetypes.ColorRGBA', 'sdbasetypes.ColorRGBA', (['settings.default_color[0]', 'settings.default_color[1]', 'settings.default_color[2]', 'settings.default_color[3]'], {}), '(settings.default_color[0], settings.default_color[1],\n settings.default_color[2], settings.default_color[3])\n', (2484, 2596), False, 'from sd.api import sdbasetypes\n'), ((3758, 3772), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3762, 3772), False, 'from pathlib import Path\n'), ((3820, 3834), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3824, 3834), False, 'from pathlib import Path\n'), ((3318, 3332), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3322, 3332), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Github attached AWS Code Pipeline"""
__author__ = '<NAME>'
__version__ = '0.1'
import boto3
import os
import sys
import subprocess
import logging
from troposphere import Parameter, Ref, Template, iam
from troposphere.iam import Role
from troposphere.s3 import Bucket
from troposphere.codepipeline import (
Pipeline, Stages, Actions, ActionTypeId, OutputArtifacts, InputArtifacts, Webhook,
WebhookAuthConfiguration, WebhookFilterRule,
ArtifactStore, DisableInboundStageTransitions)
import troposphere.codebuild as cb
import argparse
from awacs.aws import Allow, Statement, Principal, PolicyDocument, Policy
from awacs.sts import AssumeRole
from util import *
def create_codebuild_project(template) -> cb.Project:
from troposphere.codebuild import Project, Environment, Artifacts, Source
environment = Environment(
ComputeType='BUILD_GENERAL1_SMALL',
Image='aws/codebuild/standard:3.0',
Type='LINUX_CONTAINER',
)
codebuild_role = template.add_resource(
Role(
"CodeBuildRole",
AssumeRolePolicyDocument=Policy(
Statement=[
Statement(
Effect=Allow,
Action=[AssumeRole],
Principal=Principal("Service", ["codebuild.amazonaws.com"])
)
]
),
ManagedPolicyArns=[
'arn:aws:iam::aws:policy/AmazonS3FullAccess',
'arn:aws:iam::aws:policy/CloudWatchFullAccess',
'arn:aws:iam::aws:policy/AWSCodeBuildAdminAccess',
],
)
)
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-source.html
return Project(
"ContinuousCodeBuild",
Name = "ContinuousCodeBuild",
Description = 'Continous pipeline',
Artifacts = Artifacts(Type='CODEPIPELINE'),
Environment = environment,
Source = Source(Type='CODEPIPELINE'),
ServiceRole = Ref(codebuild_role)
)
def create_pipeline_template(config) -> Template:
t = Template()
github_token = t.add_parameter(Parameter(
"GithubToken",
Type = "String"
))
github_owner = t.add_parameter(Parameter(
"GitHubOwner",
Type = 'String',
Default = 'aiengines',
AllowedPattern = "[A-Za-z0-9-_]+"
))
github_repo = t.add_parameter(Parameter(
"GitHubRepo",
Type = 'String',
Default = 'codebuild_pipeline_skeleton',
AllowedPattern = "[A-Za-z0-9-_]+"
))
github_branch = t.add_parameter(Parameter(
"GitHubBranch",
Type = 'String',
Default = 'master',
AllowedPattern = "[A-Za-z0-9-_]+"
))
artifact_store_s3_bucket = t.add_resource(Bucket(
"S3Bucket",
))
cloudformationrole = t.add_resource(Role(
"CloudformationRole",
AssumeRolePolicyDocument = PolicyDocument(
Version = "2012-10-17",
Statement = [
Statement(
Effect = Allow,
Action = [AssumeRole],
Principal = Principal("Service", ["cloudformation.amazonaws.com"])
)
]
),
ManagedPolicyArns = ['arn:aws:iam::aws:policy/AdministratorAccess']
))
codepipelinerole = t.add_resource(Role(
"CodePipelineRole",
AssumeRolePolicyDocument = PolicyDocument(
Statement = [
Statement(
Effect = Allow,
Action = [AssumeRole],
Principal = Principal("Service", ["codepipeline.amazonaws.com"])
)
]
),
ManagedPolicyArns = ['arn:aws:iam::aws:policy/AdministratorAccess']
))
codebuild_project = t.add_resource(create_codebuild_project(t))
pipeline = t.add_resource(Pipeline(
"CDPipeline",
ArtifactStore = ArtifactStore(
Type = "S3",
Location = Ref(artifact_store_s3_bucket)
),
# DisableInboundStageTransitions = [
# DisableInboundStageTransitions(
# StageName = "Release",
# Reason = "Disabling the transition until "
# "integration tests are completed"
# )
# ],
RestartExecutionOnUpdate = True,
RoleArn = codepipelinerole.GetAtt('Arn'),
Stages = [
Stages(
Name = "Source",
Actions = [
Actions(
Name = "SourceAction",
ActionTypeId = ActionTypeId(
Category = "Source",
Owner = "ThirdParty",
Provider = "GitHub",
Version = "1",
),
OutputArtifacts = [
OutputArtifacts(
Name = "GitHubSourceCode"
)
],
Configuration = {
'Owner': Ref(github_owner),
'Repo': Ref(github_repo),
'Branch': Ref(github_branch),
'PollForSourceChanges': False,
'OAuthToken': Ref(github_token)
},
RunOrder = "1"
)
]
),
Stages(
Name = "Build",
Actions = [
Actions(
Name = "BuildAction",
ActionTypeId = ActionTypeId(
Category = "Build",
Owner = "AWS",
Provider = "CodeBuild",
Version = "1"
),
InputArtifacts = [
InputArtifacts(
Name = "GitHubSourceCode"
)
],
OutputArtifacts = [
OutputArtifacts(
Name = "BuildArtifacts"
)
],
Configuration = {
'ProjectName': Ref(codebuild_project),
},
RunOrder = "1"
)
]
),
],
))
t.add_resource(Webhook(
"GitHubWebHook",
Authentication = 'GITHUB_HMAC',
AuthenticationConfiguration = WebhookAuthConfiguration(
SecretToken = Ref(github_token)
),
Filters = [
WebhookFilterRule(
JsonPath = '$.ref',
MatchEquals = 'refs/heads/{Branch}'
)
],
TargetPipeline = Ref(pipeline),
TargetAction = 'Source',
TargetPipelineVersion = pipeline.GetAtt('Version')
))
return t
def parameters_interactive(template: Template) -> List[dict]:
"""
Fill template parameters from standard input
:param template:
:return: A list of Parameter dictionary suitable to instantiate the template
"""
print("Please provide values for the Cloud Formation template parameters.")
parameter_values = []
for name, parameter in template.parameters.items():
paramdict = parameter.to_dict()
if 'Default' in paramdict:
default_value = paramdict['Default']
param_value = input(f"{name} [{default_value}]: ")
if not param_value:
param_value = default_value
else:
param_value = input(f"{name}: ")
parameter_values.append({'ParameterKey': name, 'ParameterValue': param_value})
return parameter_values
def config_logging():
import time
logging.getLogger().setLevel(os.environ.get('LOGLEVEL', logging.INFO))
logging.getLogger("requests").setLevel(logging.WARNING)
logging.basicConfig(format='{}: %(asctime)sZ %(levelname)s %(message)s'.format(script_name()))
logging.Formatter.converter = time.gmtime
def script_name() -> str:
""":returns: script name with leading paths removed"""
return os.path.split(sys.argv[0])[1]
def config_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Code pipeline",
epilog="""
""")
parser.add_argument('config', nargs='?', help='config file', default='config.yaml')
return parser
def main():
config_logging()
parser = config_argparse()
args = parser.parse_args()
with open(args.config, 'r') as fh:
config = yaml.load(fh, Loader=yaml.SafeLoader)
boto3.setup_default_session(region_name=config['aws_region'], profile_name=config['aws_profile'])
template = create_pipeline_template(config)
client = boto3.client('cloudformation')
logging.info(f"Creating stack {config['stack_name']}")
client = boto3.client('cloudformation')
delete_stack(client, config['stack_name'])
param_values_dict = parameters_interactive(template)
tparams = dict(
TemplateBody = template.to_yaml(),
Parameters = param_values_dict,
Capabilities=['CAPABILITY_IAM'],
#OnFailure = 'DELETE',
)
instantiate_CF_template(template, config['stack_name'], **tparams)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"logging.getLogger",
"troposphere.codepipeline.ActionTypeId",
"troposphere.codebuild.Environment",
"boto3.client",
"troposphere.Parameter",
"troposphere.codepipeline.OutputArtifacts",
"troposphere.Ref",
"troposphere.Template",
"logging.info",
"argparse.ArgumentParser",
"os.path.split",
"tropos... | [((877, 989), 'troposphere.codebuild.Environment', 'Environment', ([], {'ComputeType': '"""BUILD_GENERAL1_SMALL"""', 'Image': '"""aws/codebuild/standard:3.0"""', 'Type': '"""LINUX_CONTAINER"""'}), "(ComputeType='BUILD_GENERAL1_SMALL', Image=\n 'aws/codebuild/standard:3.0', Type='LINUX_CONTAINER')\n", (888, 989), False, 'from troposphere.codebuild import Project, Environment, Artifacts, Source\n'), ((2175, 2185), 'troposphere.Template', 'Template', ([], {}), '()\n', (2183, 2185), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((8558, 8623), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Code pipeline"""', 'epilog': '"""\n"""'}), "(description='Code pipeline', epilog='\\n')\n", (8581, 8623), False, 'import argparse\n'), ((8937, 9039), 'boto3.setup_default_session', 'boto3.setup_default_session', ([], {'region_name': "config['aws_region']", 'profile_name': "config['aws_profile']"}), "(region_name=config['aws_region'], profile_name=\n config['aws_profile'])\n", (8964, 9039), False, 'import boto3\n'), ((9097, 9127), 'boto3.client', 'boto3.client', (['"""cloudformation"""'], {}), "('cloudformation')\n", (9109, 9127), False, 'import boto3\n'), ((9133, 9187), 'logging.info', 'logging.info', (['f"""Creating stack {config[\'stack_name\']}"""'], {}), '(f"Creating stack {config[\'stack_name\']}")\n', (9145, 9187), False, 'import logging\n'), ((9202, 9232), 'boto3.client', 'boto3.client', (['"""cloudformation"""'], {}), "('cloudformation')\n", (9214, 9232), False, 'import boto3\n'), ((2222, 2261), 'troposphere.Parameter', 'Parameter', (['"""GithubToken"""'], {'Type': '"""String"""'}), "('GithubToken', Type='String')\n", (2231, 2261), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((2323, 2421), 'troposphere.Parameter', 'Parameter', (['"""GitHubOwner"""'], {'Type': '"""String"""', 'Default': '"""aiengines"""', 'AllowedPattern': '"""[A-Za-z0-9-_]+"""'}), "('GitHubOwner', Type='String', Default='aiengines', AllowedPattern\n ='[A-Za-z0-9-_]+')\n", (2332, 2421), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((2497, 2612), 'troposphere.Parameter', 'Parameter', (['"""GitHubRepo"""'], {'Type': '"""String"""', 'Default': '"""codebuild_pipeline_skeleton"""', 'AllowedPattern': '"""[A-Za-z0-9-_]+"""'}), "('GitHubRepo', Type='String', Default=\n 'codebuild_pipeline_skeleton', AllowedPattern='[A-Za-z0-9-_]+')\n", (2506, 2612), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((2690, 2786), 'troposphere.Parameter', 'Parameter', (['"""GitHubBranch"""'], {'Type': '"""String"""', 'Default': '"""master"""', 'AllowedPattern': '"""[A-Za-z0-9-_]+"""'}), "('GitHubBranch', Type='String', Default='master', AllowedPattern=\n '[A-Za-z0-9-_]+')\n", (2699, 2786), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((2874, 2892), 'troposphere.s3.Bucket', 'Bucket', (['"""S3Bucket"""'], {}), "('S3Bucket')\n", (2880, 2892), False, 'from troposphere.s3 import Bucket\n'), ((8118, 8158), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', 'logging.INFO'], {}), "('LOGLEVEL', logging.INFO)\n", (8132, 8158), False, 'import os\n'), ((8463, 8489), 'os.path.split', 'os.path.split', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (8476, 8489), False, 'import os\n'), ((1954, 1984), 'troposphere.codebuild.Artifacts', 'Artifacts', ([], {'Type': '"""CODEPIPELINE"""'}), "(Type='CODEPIPELINE')\n", (1963, 1984), False, 'from troposphere.codebuild import Project, Environment, Artifacts, Source\n'), ((2038, 2065), 'troposphere.codebuild.Source', 'Source', ([], {'Type': '"""CODEPIPELINE"""'}), "(Type='CODEPIPELINE')\n", (2044, 2065), False, 'from troposphere.codebuild import Project, Environment, Artifacts, Source\n'), ((2089, 2108), 'troposphere.Ref', 'Ref', (['codebuild_role'], {}), '(codebuild_role)\n', (2092, 2108), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((8089, 8108), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (8106, 8108), False, 'import logging\n'), ((8164, 8193), 'logging.getLogger', 'logging.getLogger', (['"""requests"""'], {}), "('requests')\n", (8181, 8193), False, 'import logging\n'), ((7087, 7100), 'troposphere.Ref', 'Ref', (['pipeline'], {}), '(pipeline)\n', (7090, 7100), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((6930, 7000), 'troposphere.codepipeline.WebhookFilterRule', 'WebhookFilterRule', ([], {'JsonPath': '"""$.ref"""', 'MatchEquals': '"""refs/heads/{Branch}"""'}), "(JsonPath='$.ref', MatchEquals='refs/heads/{Branch}')\n", (6947, 7000), False, 'from troposphere.codepipeline import Pipeline, Stages, Actions, ActionTypeId, OutputArtifacts, InputArtifacts, Webhook, WebhookAuthConfiguration, WebhookFilterRule, ArtifactStore, DisableInboundStageTransitions\n'), ((4105, 4134), 'troposphere.Ref', 'Ref', (['artifact_store_s3_bucket'], {}), '(artifact_store_s3_bucket)\n', (4108, 4134), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((6869, 6886), 'troposphere.Ref', 'Ref', (['github_token'], {}), '(github_token)\n', (6872, 6886), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((1325, 1374), 'awacs.aws.Principal', 'Principal', (['"""Service"""', "['codebuild.amazonaws.com']"], {}), "('Service', ['codebuild.amazonaws.com'])\n", (1334, 1374), False, 'from awacs.aws import Allow, Statement, Principal, PolicyDocument, Policy\n'), ((3237, 3291), 'awacs.aws.Principal', 'Principal', (['"""Service"""', "['cloudformation.amazonaws.com']"], {}), "('Service', ['cloudformation.amazonaws.com'])\n", (3246, 3291), False, 'from awacs.aws import Allow, Statement, Principal, PolicyDocument, Policy\n'), ((3706, 3758), 'awacs.aws.Principal', 'Principal', (['"""Service"""', "['codepipeline.amazonaws.com']"], {}), "('Service', ['codepipeline.amazonaws.com'])\n", (3715, 3758), False, 'from awacs.aws import Allow, Statement, Principal, PolicyDocument, Policy\n'), ((4726, 4813), 'troposphere.codepipeline.ActionTypeId', 'ActionTypeId', ([], {'Category': '"""Source"""', 'Owner': '"""ThirdParty"""', 'Provider': '"""GitHub"""', 'Version': '"""1"""'}), "(Category='Source', Owner='ThirdParty', Provider='GitHub',\n Version='1')\n", (4738, 4813), False, 'from troposphere.codepipeline import Pipeline, Stages, Actions, ActionTypeId, OutputArtifacts, InputArtifacts, Webhook, WebhookAuthConfiguration, WebhookFilterRule, ArtifactStore, DisableInboundStageTransitions\n'), ((5806, 5884), 'troposphere.codepipeline.ActionTypeId', 'ActionTypeId', ([], {'Category': '"""Build"""', 'Owner': '"""AWS"""', 'Provider': '"""CodeBuild"""', 'Version': '"""1"""'}), "(Category='Build', Owner='AWS', Provider='CodeBuild', Version='1')\n", (5818, 5884), False, 'from troposphere.codepipeline import Pipeline, Stages, Actions, ActionTypeId, OutputArtifacts, InputArtifacts, Webhook, WebhookAuthConfiguration, WebhookFilterRule, ArtifactStore, DisableInboundStageTransitions\n'), ((5030, 5070), 'troposphere.codepipeline.OutputArtifacts', 'OutputArtifacts', ([], {'Name': '"""GitHubSourceCode"""'}), "(Name='GitHubSourceCode')\n", (5045, 5070), False, 'from troposphere.codepipeline import Pipeline, Stages, Actions, ActionTypeId, OutputArtifacts, InputArtifacts, Webhook, WebhookAuthConfiguration, WebhookFilterRule, ArtifactStore, DisableInboundStageTransitions\n'), ((5241, 5258), 'troposphere.Ref', 'Ref', (['github_owner'], {}), '(github_owner)\n', (5244, 5258), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((5296, 5312), 'troposphere.Ref', 'Ref', (['github_repo'], {}), '(github_repo)\n', (5299, 5312), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((5352, 5370), 'troposphere.Ref', 'Ref', (['github_branch'], {}), '(github_branch)\n', (5355, 5370), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((5473, 5490), 'troposphere.Ref', 'Ref', (['github_token'], {}), '(github_token)\n', (5476, 5490), False, 'from troposphere import Parameter, Ref, Template, iam\n'), ((6103, 6142), 'troposphere.codepipeline.InputArtifacts', 'InputArtifacts', ([], {'Name': '"""GitHubSourceCode"""'}), "(Name='GitHubSourceCode')\n", (6117, 6142), False, 'from troposphere.codepipeline import Pipeline, Stages, Actions, ActionTypeId, OutputArtifacts, InputArtifacts, Webhook, WebhookAuthConfiguration, WebhookFilterRule, ArtifactStore, DisableInboundStageTransitions\n'), ((6306, 6344), 'troposphere.codepipeline.OutputArtifacts', 'OutputArtifacts', ([], {'Name': '"""BuildArtifacts"""'}), "(Name='BuildArtifacts')\n", (6321, 6344), False, 'from troposphere.codepipeline import Pipeline, Stages, Actions, ActionTypeId, OutputArtifacts, InputArtifacts, Webhook, WebhookAuthConfiguration, WebhookFilterRule, ArtifactStore, DisableInboundStageTransitions\n'), ((6521, 6543), 'troposphere.Ref', 'Ref', (['codebuild_project'], {}), '(codebuild_project)\n', (6524, 6543), False, 'from troposphere import Parameter, Ref, Template, iam\n')] |
import cost_function as cf
import pic
target_image = pic.pic2rgb("../data/img03.jpg", 50, 50)
cf.set_target_image(target_image)
s = "(H 0.73 (V 0.451 (H 0.963 (L color)(L color))(V 0.549 (L color)(L color)))(L color))"
matrix = cf.to_array(s, 50, 50, 1)
#print(matrix)
pic.rgb2pic(matrix, 'LAB', "./master_piece.png")
| [
"pic.rgb2pic",
"cost_function.set_target_image",
"cost_function.to_array",
"pic.pic2rgb"
] | [((53, 93), 'pic.pic2rgb', 'pic.pic2rgb', (['"""../data/img03.jpg"""', '(50)', '(50)'], {}), "('../data/img03.jpg', 50, 50)\n", (64, 93), False, 'import pic\n'), ((94, 127), 'cost_function.set_target_image', 'cf.set_target_image', (['target_image'], {}), '(target_image)\n', (113, 127), True, 'import cost_function as cf\n'), ((228, 253), 'cost_function.to_array', 'cf.to_array', (['s', '(50)', '(50)', '(1)'], {}), '(s, 50, 50, 1)\n', (239, 253), True, 'import cost_function as cf\n'), ((269, 317), 'pic.rgb2pic', 'pic.rgb2pic', (['matrix', '"""LAB"""', '"""./master_piece.png"""'], {}), "(matrix, 'LAB', './master_piece.png')\n", (280, 317), False, 'import pic\n')] |
import json
import pygments.formatters
import pygments.lexers
def pretty_view(mapping: dict, /) -> str:
"""
Args:
mapping:
Returns:
"""
dumped_mapping = json.dumps(mapping, ensure_ascii=False, indent=4)
pretty_mapping = pygments.highlight(
dumped_mapping,
pygments.lexers.JsonLexer(), # noqa
pygments.formatters.TerminalFormatter(bg="light"), # noqa
)
return pretty_mapping
| [
"json.dumps"
] | [((185, 234), 'json.dumps', 'json.dumps', (['mapping'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(mapping, ensure_ascii=False, indent=4)\n', (195, 234), False, 'import json\n')] |
from pathlib import Path
from configparser import ConfigParser
from utils.installer import Installer
from utils.chalk import print_header
from utils.utils import link_file
import utils.platform as platform
MOZILLA_DIR = Path.home().joinpath(".mozilla", "firefox")
SCRIPT_DIR = Path(__file__).parent
class Main(Installer):
def run(self):
if platform.is_mac:
return
print_header("Setting up Firefox profile")
profiles = ConfigParser()
profiles.read(MOZILLA_DIR.joinpath("profiles.ini"))
default_profile = ""
for k, v in profiles.items():
if v.get("Default", fallback=0) == "1":
default_profile = v
break
profile_dir = MOZILLA_DIR.joinpath(default_profile.get("Path"))
link_file(SCRIPT_DIR.joinpath("user.js"), profile_dir.joinpath("user.js"))
| [
"configparser.ConfigParser",
"pathlib.Path.home",
"utils.chalk.print_header",
"pathlib.Path"
] | [((279, 293), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (283, 293), False, 'from pathlib import Path\n'), ((222, 233), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (231, 233), False, 'from pathlib import Path\n'), ((401, 443), 'utils.chalk.print_header', 'print_header', (['"""Setting up Firefox profile"""'], {}), "('Setting up Firefox profile')\n", (413, 443), False, 'from utils.chalk import print_header\n'), ((464, 478), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (476, 478), False, 'from configparser import ConfigParser\n')] |
# 网易云音乐批量下载
# By Tsing
# Python3.4.4
import requests
import urllib
# 榜单歌曲批量下载
# r = requests.get('http://music.163.com/api/playlist/detail?id=2884035') # 网易原创歌曲榜
# r = requests.get('http://music.163.com/api/playlist/detail?id=19723756') # 云音乐飙升榜
# r = requests.get('http://music.163.com/api/playlist/detail?id=3778678') # 云音乐热歌榜
r = requests.get('http://music.163.com/api/playlist/detail?id=3779629') # 云音乐新歌榜
# 歌单歌曲批量下载
# r = requests.get('http://music.163.com/api/playlist/detail?id=123415635') # 云音乐歌单——【华语】中国风的韵律,中国人的印记
# r = requests.get('http://music.163.com/api/playlist/detail?id=122732380') # 云音乐歌单——那不是爱,只是寂寞说的谎
arr = r.json()['result']['tracks'] # 共有100首歌
for i in range(10): # 输入要下载音乐的数量,1到100。
name = str(i+1) + ' ' + arr[i]['name'] + '.mp3'
link = arr[i]['mp3Url']
urllib.request.urlretrieve(link, '网易云音乐\\' + name) # 提前要创建文件夹
print(name + ' 下载完成') | [
"requests.get",
"urllib.request.urlretrieve"
] | [((351, 418), 'requests.get', 'requests.get', (['"""http://music.163.com/api/playlist/detail?id=3779629"""'], {}), "('http://music.163.com/api/playlist/detail?id=3779629')\n", (363, 418), False, 'import requests\n'), ((828, 878), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['link', "('网易云音乐\\\\' + name)"], {}), "(link, '网易云音乐\\\\' + name)\n", (854, 878), False, 'import urllib\n')] |
""" De- and Encoding Layer, using a predefined Encoder """
import multiprocessing
from PiCN.Layers.PacketEncodingLayer.Encoder import BasicEncoder
from PiCN.Processes import LayerProcess
class BasicPacketEncodingLayer(LayerProcess):
""" De- and Encoding Layer, using a predefined Encoder """
def __init__(self, encoder: BasicEncoder=None, log_level=255):
LayerProcess.__init__(self, logger_name="PktEncLayer", log_level=log_level)
self._encoder: BasicEncoder = encoder
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, encoder):
self._encoder = encoder
def data_from_higher(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data):
face_id, packet = self.check_data(data)
if face_id == None or packet is None:
return
self.logger.info("Packet from higher, Faceid: " + str(face_id) + ", Name: " + str(packet.name))
encoded_packet = self.encode(packet)
if encoded_packet is None:
self.logger.info("Dropping Packet since None")
return
to_lower.put([face_id, encoded_packet])
def data_from_lower(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data):
face_id, packet = self.check_data(data)
if face_id == None or packet == None:
return
decoded_packet = self.decode(packet)
if decoded_packet is None:
self.logger.info("Dropping Packet since None")
return
self.logger.info("Packet from lower, Faceid: " + str(face_id) + ", Name: " + str(decoded_packet.name))
to_higher.put([face_id, decoded_packet])
def encode(self, data):
self.logger.info("Encode packet")
return self._encoder.encode(data)
def decode(self, data):
self.logger.info("Decode packet")
return self._encoder.decode(data)
def check_data(self, data):
"""check if data from queue match the requirements"""
if len(data) != 2:
self.logger.warning("PacketEncoding Layer expects queue elements to have size 2")
return (None, None)
if type(data[0]) != int:
self.logger.warning("PacketEncoding Layer expects first element to be a faceid (int)")
return (None, None)
#TODO test if data[1] has type packet or bin data? howto?
return data[0], data[1]
| [
"PiCN.Processes.LayerProcess.__init__"
] | [((375, 450), 'PiCN.Processes.LayerProcess.__init__', 'LayerProcess.__init__', (['self'], {'logger_name': '"""PktEncLayer"""', 'log_level': 'log_level'}), "(self, logger_name='PktEncLayer', log_level=log_level)\n", (396, 450), False, 'from PiCN.Processes import LayerProcess\n')] |
from tkinter import *
import random
import time
class Widget(object): # 画面上で動く物の基本となるクラス
def __init__(self, window, size, color, pos, speed=[0, 0]):
self.window = window
self.size = size
self.color = color
self.pos = pos
self.speed = speed
def acty(self): # インスタンスを動かす
self.window.move(self.id, self.speed[0], self.speed[1])
def xturn(self): # 横軸の方向転換
self.speed[0] *= -1
def yturn(self): # 縦軸の方向転換
self.speed[1] *= -1
def current_speed(self): # 現在の速度
return self.speed
class Ball(Widget): # Widgetを継承する、ボールのためのクラス
def __init__(self, window, size, color, pos, speed):
super().__init__(window, size, color, pos, speed)
self.id = self.window.create_oval(self.pos[0], self.pos[1],
self.pos[0]+self.size,
self.pos[1]+self.size,
fill=self.color)
def current_place(self): # 今いる場所
return self.window.coords(self.id)
def hit_check(self, obj): # 当たったかどうかのチェック
own_pos = self.current_place()
obj_pos = obj.current_place()
own_center = (own_pos[0] + own_pos[2])/2
if (own_center > obj_pos[0] and own_center < obj_pos[2]) \
and (own_pos[1] <= obj_pos[3] and own_pos[3] >= obj_pos[1]):
return 1
else:
return 0
class Bar(Widget): # Widgetを継承する、長方形物体用のクラス
def __init__(self, window, size, color, pos):
super().__init__(window, size, color, pos)
self.point = 0
self.id = self.window.create_rectangle(self.pos[0], self.pos[1],
self.pos[0]+self.size[0],
self.pos[1]+self.size[1],
fill=self.color)
def current_place(self): # 今いる場所
return self.window.coords(self.id)
def current_point(self): # ☆現在の得点
return self.point
def add_point(self, add=1): # ☆得点加算
self.point += add
class Player_Racket(Bar): # Barを継承する、プレイヤーラケット用のクラス
def __init__(self, window, size, color, pos, step=10):
super().__init__(window, size, color, pos)
self.step = step
self.window.bind_all('<Key>', self.control)
def control(self, event): # 操作設定
if event.keysym == "Right":
self.speed = [self.step, 0]
elif event.keysym == "Left":
self.speed = [-self.step, 0]
else:
return
self.acty()
class COM_Racket(Bar): # ☆Barを継承する、COMラケット用のクラス
def __init__(self, window, size, color, pos, step=10, count=10,
distance=100):
super().__init__(window, size, color, pos)
self.step = step
self.count_range = count
self.counter = 0
self.distance = distance
def control(self, obj):
self.counter += 1
if self.counter == self.count_range:
self.counter = 0
self.speed[0] = random.randrange(-self.step, self.step)
if (obj.current_place()[0] - self.current_place()[0] >=
self.distance and self.speed[0] < 0) \
or (self.current_place()[2] - obj.current_place()[2] >=
self.distance and self.speed[0] > 0):
self.xturn()
self.acty()
# ウィンドウの設定
tk = Tk()
canvas_size = [500, 400]
canvas = Canvas(tk, width=canvas_size[0], height=canvas_size[1])
tk.title("熱くなれよ!!!")
canvas.pack()
# ☆画面表示の設定
canvas.create_text(50, 150, text='COM', fill='green', font=('メイリオ', 20))
canvas.create_text(50, 250, text='YOU', fill='red', font=('メイリオ', 20))
canvas.create_text(50, 200, text='TIME', fill='purple', font=('メイリオ', 20))
enemy_score = canvas.create_text(130, 150, fill='green', font=('メイリオ', 20))
my_score = canvas.create_text(130, 250, fill='red', font=('メイリオ', 20))
play_time = canvas.create_text(130, 200, fill='purple', font=('メイリオ', 20))
def show_score(player_score, score):
canvas.itemconfig(player_score, text=str(score))
def show_time(time_text, time_game):
canvas.itemconfig(time_text, text=str(time_game))
# ☆試合の設定
finish_point = 3
# ボールとラケットの設定
ball_radius = 50
ball_start = [random.randrange(50, 400), random.randrange(50, 100)]
ball_init_speed = [2.0, 2.0]
bar_size = [100, 10]
player_start = [200, 340]
# ☆COMの設定
com_start = [200, 50]
com_distance = 100
# ボールとラケットのインスタンス作成
ball = Ball(canvas, ball_radius, 'blue', ball_start, ball_init_speed)
player_racket = Player_Racket(canvas, bar_size, 'red', pos=player_start)
com_racket = COM_Racket(canvas, bar_size, 'green', pos=com_start,
distance=com_distance)
# ☆時刻設定
game_start = int(time.perf_counter())
game_time = game_start
while True:
ball.acty() # ボールを動かす
ball_pos = ball.current_place()
ball_speed = ball.current_speed()
com_racket.control(ball) # ☆COMを動かす
# ☆画面表示の更新
show_score(my_score, player_racket.current_point())
show_score(enemy_score, com_racket.current_point())
show_time(play_time, game_time-game_start)
now_time = int(time.perf_counter())
if now_time - game_time >= 1: # ☆一秒経過したら時刻表示切り替え
game_time = now_time
if player_racket.current_point() >= finish_point: # ☆プレイヤーの勝利
judge_text = 'YOU WIN'
judge_color = 'blue'
break
if com_racket.current_point() >= finish_point: # ☆COMの勝利
judge_text = 'YOU LOSE'
judge_color = 'red'
break
if ball_pos[2] >= canvas_size[0] or ball_pos[0] <= 0:
ball.xturn()
if ball_pos[3] >= canvas_size[1]: # ☆COMの得点
com_racket.add_point()
ball.yturn()
if ball_pos[1] <= 0: # ☆プレイヤーの得点
player_racket.add_point()
ball.yturn()
if (ball.hit_check(player_racket) == 1 and ball_speed[1] > 0) \
or (ball.hit_check(com_racket) == 1 and ball_speed[1] < 0):
ball.yturn() # ☆相手のラケットに当たった場合を追加
tk.update()
time.sleep(0.01)
# ☆結果発表
canvas.create_text(250, 200, text=judge_text,
fill=judge_color, font=('メイリオ', 30))
tk.update()
time.sleep(10)
| [
"time.perf_counter",
"time.sleep",
"random.randrange"
] | [((6172, 6186), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (6182, 6186), False, 'import time\n'), ((4294, 4319), 'random.randrange', 'random.randrange', (['(50)', '(400)'], {}), '(50, 400)\n', (4310, 4319), False, 'import random\n'), ((4321, 4346), 'random.randrange', 'random.randrange', (['(50)', '(100)'], {}), '(50, 100)\n', (4337, 4346), False, 'import random\n'), ((4778, 4797), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4795, 4797), False, 'import time\n'), ((6032, 6048), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (6042, 6048), False, 'import time\n'), ((5172, 5191), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5189, 5191), False, 'import time\n'), ((3084, 3123), 'random.randrange', 'random.randrange', (['(-self.step)', 'self.step'], {}), '(-self.step, self.step)\n', (3100, 3123), False, 'import random\n')] |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/PlayerClientPropertiesMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
from pyatv.protocols.mrp.protobuf import PlayerPath_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_PlayerPath__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n@pyatv/protocols/mrp/protobuf/PlayerClientPropertiesMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\x1a-pyatv/protocols/mrp/protobuf/PlayerPath.proto\"^\n\x1dPlayerClientPropertiesMessage\x12\x1f\n\nplayerPath\x18\x01 \x01(\x0b\x32\x0b.PlayerPath\x12\x1c\n\x14lastPlayingTimestamp\x18\x02 \x01(\x01:W\n\x1dplayerClientPropertiesMessage\x12\x10.ProtocolMessage\x18V \x01(\x0b\x32\x1e.PlayerClientPropertiesMessage')
PLAYERCLIENTPROPERTIESMESSAGE_FIELD_NUMBER = 86
playerClientPropertiesMessage = DESCRIPTOR.extensions_by_name['playerClientPropertiesMessage']
_PLAYERCLIENTPROPERTIESMESSAGE = DESCRIPTOR.message_types_by_name['PlayerClientPropertiesMessage']
PlayerClientPropertiesMessage = _reflection.GeneratedProtocolMessageType('PlayerClientPropertiesMessage', (_message.Message,), {
'DESCRIPTOR' : _PLAYERCLIENTPROPERTIESMESSAGE,
'__module__' : 'pyatv.protocols.mrp.protobuf.PlayerClientPropertiesMessage_pb2'
# @@protoc_insertion_point(class_scope:PlayerClientPropertiesMessage)
})
_sym_db.RegisterMessage(PlayerClientPropertiesMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(playerClientPropertiesMessage)
DESCRIPTOR._options = None
_PLAYERCLIENTPROPERTIESMESSAGE._serialized_start=167
_PLAYERCLIENTPROPERTIESMESSAGE._serialized_end=261
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.descriptor_pool.Default",
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.symbol_database.Default",
"pyatv.protocols.mrp.protobuf.ProtocolMessage_pb2.ProtocolMessage.RegisterExtension"
] | [((527, 553), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (551, 553), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1586, 1821), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""PlayerClientPropertiesMessage"""', '(_message.Message,)', "{'DESCRIPTOR': _PLAYERCLIENTPROPERTIESMESSAGE, '__module__':\n 'pyatv.protocols.mrp.protobuf.PlayerClientPropertiesMessage_pb2'}"], {}), "('PlayerClientPropertiesMessage', (\n _message.Message,), {'DESCRIPTOR': _PLAYERCLIENTPROPERTIESMESSAGE,\n '__module__':\n 'pyatv.protocols.mrp.protobuf.PlayerClientPropertiesMessage_pb2'})\n", (1626, 1821), True, 'from google.protobuf import reflection as _reflection\n'), ((1993, 2128), 'pyatv.protocols.mrp.protobuf.ProtocolMessage_pb2.ProtocolMessage.RegisterExtension', 'pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension', (['playerClientPropertiesMessage'], {}), '(\n playerClientPropertiesMessage)\n', (2092, 2128), True, 'from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2\n'), ((821, 847), 'google.protobuf.descriptor_pool.Default', '_descriptor_pool.Default', ([], {}), '()\n', (845, 847), True, 'from google.protobuf import descriptor_pool as _descriptor_pool\n')] |
# Generated by Django 2.0.8 on 2019-06-19 19:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('app_challenges_sections_units', '0035_auto_20190619_1847'),
]
operations = [
migrations.RenameModel(
old_name='Slideshow',
new_name='Gallery',
),
migrations.RenameModel(
old_name='SlideshowImage',
new_name='GalleryImage',
),
]
| [
"django.db.migrations.RenameModel"
] | [((298, 362), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Slideshow"""', 'new_name': '"""Gallery"""'}), "(old_name='Slideshow', new_name='Gallery')\n", (320, 362), False, 'from django.db import migrations\n'), ((407, 481), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""SlideshowImage"""', 'new_name': '"""GalleryImage"""'}), "(old_name='SlideshowImage', new_name='GalleryImage')\n", (429, 481), False, 'from django.db import migrations\n')] |
from self_organising_systems.texture_ca.config import cfg
from self_organising_systems.shared.util import imread
import tensorflow as tf
import numpy as np
style_layers = ['block%d_conv1'%i for i in range(1, 6)]
content_layer = 'block4_conv2'
class StyleModel:
def __init__(self, input_texture_path):
vgg = tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet')
vgg.trainable = False
layers = style_layers + [content_layer]
layers = {name:vgg.get_layer(name).output for name in layers}
self.model = tf.keras.Model([vgg.input], layers)
self.style_img = imread(input_texture_path, cfg.texture_ca.vgg_input_img_size)
self.target_style, _ = self.calc_style_content(self.style_img[None,...])
def run_model(self, img):
img = img[..., ::-1]*255.0 - np.float32([103.939, 116.779, 123.68])
layers = self.model(img)
style = [layers[name] for name in style_layers]
return style, layers[content_layer]
def calc_style_content(self, img):
style_layers, content = self.run_model(img)
style = [self.gram_style(a) for a in style_layers]
return style, content
@tf.function
def __call__(self, x):
gs, content = self.calc_style_content(x)
sl = tf.reduce_mean(self.style_loss(gs, self.target_style))
return sl
@tf.function
def style_loss(self, a, b):
return tf.add_n([tf.reduce_mean(tf.square(x-y), [-2, -1]) for x, y in zip(a, b)])
def gram_style(self, a):
n, h, w, ch = tf.unstack(tf.shape(a))
a = tf.sqrt(a+1.0)-1.0
gram = tf.einsum('bhwc, bhwd -> bcd', a, a)
return gram / tf.cast(h*w, tf.float32)
class Inception:
def __init__(self, layer, ch):
with tf.io.gfile.GFile(cfg.texture_ca.inception_pb, 'rb') as f:
self.graph_def = tf.compat.v1.GraphDef.FromString(f.read())
self.layer = layer
self.ch = ch
avgpool0_idx = [n.name for n in self.graph_def.node].index('avgpool0')
del self.graph_def.node[avgpool0_idx:]
# use pre_relu layers for Concat nodes
node = {n.name:n for n in self.graph_def.node}[layer]
self.outputs = [layer+':0']
if 'Concat' in node.op:
self.outputs = [inp+'_pre_relu:0' for inp in node.input[1:]]
@tf.function
def __call__(self, x):
overflow_loss = tf.reduce_mean(tf.square(tf.clip_by_value(x, 0.0, 1.0)-x))
imgs = x*255.0-117.0
outputs = tf.import_graph_def(self.graph_def, {'input':imgs}, self.outputs)
a = tf.concat(outputs, -1)
return -tf.reduce_mean(a[...,self.ch]) + overflow_loss*cfg.texture_ca.overflow_loss_coef
| [
"tensorflow.shape",
"tensorflow.io.gfile.GFile",
"tensorflow.einsum",
"tensorflow.concat",
"tensorflow.sqrt",
"tensorflow.clip_by_value",
"tensorflow.import_graph_def",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.Model",
"tensorflow.reduce_mean",
"self_organising_systems.share... | [((317, 389), 'tensorflow.keras.applications.vgg16.VGG16', 'tf.keras.applications.vgg16.VGG16', ([], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(include_top=False, weights='imagenet')\n", (350, 389), True, 'import tensorflow as tf\n'), ((543, 578), 'tensorflow.keras.Model', 'tf.keras.Model', (['[vgg.input]', 'layers'], {}), '([vgg.input], layers)\n', (557, 578), True, 'import tensorflow as tf\n'), ((600, 661), 'self_organising_systems.shared.util.imread', 'imread', (['input_texture_path', 'cfg.texture_ca.vgg_input_img_size'], {}), '(input_texture_path, cfg.texture_ca.vgg_input_img_size)\n', (606, 661), False, 'from self_organising_systems.shared.util import imread\n'), ((1532, 1568), 'tensorflow.einsum', 'tf.einsum', (['"""bhwc, bhwd -> bcd"""', 'a', 'a'], {}), "('bhwc, bhwd -> bcd', a, a)\n", (1541, 1568), True, 'import tensorflow as tf\n'), ((2344, 2410), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['self.graph_def', "{'input': imgs}", 'self.outputs'], {}), "(self.graph_def, {'input': imgs}, self.outputs)\n", (2363, 2410), True, 'import tensorflow as tf\n'), ((2418, 2440), 'tensorflow.concat', 'tf.concat', (['outputs', '(-1)'], {}), '(outputs, -1)\n', (2427, 2440), True, 'import tensorflow as tf\n'), ((801, 839), 'numpy.float32', 'np.float32', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (811, 839), True, 'import numpy as np\n'), ((1481, 1492), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (1489, 1492), True, 'import tensorflow as tf\n'), ((1502, 1518), 'tensorflow.sqrt', 'tf.sqrt', (['(a + 1.0)'], {}), '(a + 1.0)\n', (1509, 1518), True, 'import tensorflow as tf\n'), ((1587, 1613), 'tensorflow.cast', 'tf.cast', (['(h * w)', 'tf.float32'], {}), '(h * w, tf.float32)\n', (1594, 1613), True, 'import tensorflow as tf\n'), ((1672, 1724), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['cfg.texture_ca.inception_pb', '"""rb"""'], {}), "(cfg.texture_ca.inception_pb, 'rb')\n", (1689, 1724), True, 'import tensorflow as tf\n'), ((2453, 2484), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['a[..., self.ch]'], {}), '(a[..., self.ch])\n', (2467, 2484), True, 'import tensorflow as tf\n'), ((1374, 1390), 'tensorflow.square', 'tf.square', (['(x - y)'], {}), '(x - y)\n', (1383, 1390), True, 'import tensorflow as tf\n'), ((2271, 2300), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (2287, 2300), True, 'import tensorflow as tf\n')] |
import numpy as np
import os
import traceback
import yaml
from edflow.hooks.hook import Hook
from edflow.util import walk, retrieve, contains_key
from edflow.custom_logging import get_logger
class RuntimeInputHook(Hook):
"""Given a textfile reads that at each step and passes the results to
a callback function."""
def __init__(self, update_file, callback):
"""Args:
update_file (str): path/to/yaml-file containing the parameters of
interest.
callback (Callable): Each time something changes in the update_file
this function is called with the content of the file as
argument.
"""
self.logger = get_logger(self)
self.ufile = update_file
self.callback = callback
self.last_updates = None
if not os.path.exists(self.ufile):
msg = (
"# Automatically created file. Changes made in here will "
"be recognized during runtime."
)
with open(self.ufile, "w+") as f:
f.write(msg)
def before_step(self, *args, **kwargs):
"""Checks if something changed and if yes runs the callback."""
try:
updates = yaml.full_load(open(self.ufile, "r"))
if self.last_updates is not None:
changes = {}
def is_changed(key, val, changes=changes):
if contains_key(key, updates):
other_val = retrieve(key, updates)
change = np.any(val != other_val)
else:
# This key is new -> Changes did happen!
change = True
changes[key] = change
self.logger.debug("Pre CHANGES: {}".format(changes))
walk(self.last_updates, is_changed, pass_key=True)
self.logger.debug("Post CHANGES: {}".format(changes))
if np.any(list(changes.values())):
self.callback(updates)
self.logger.debug("Runtime inputs received.")
self.logger.debug("{}".format(updates))
self.last_updates = updates
else:
if updates is not None:
self.callback(updates)
self.logger.info("Runtime inputs received.")
self.logger.debug("{}".format(updates))
self.last_updates = updates
except Exception as e:
self.logger.error("Something bad happend :(")
self.logger.error("{}".format(e))
self.logger.error(traceback.format_exc())
| [
"os.path.exists",
"traceback.format_exc",
"edflow.custom_logging.get_logger",
"edflow.util.walk",
"numpy.any",
"edflow.util.contains_key",
"edflow.util.retrieve"
] | [((708, 724), 'edflow.custom_logging.get_logger', 'get_logger', (['self'], {}), '(self)\n', (718, 724), False, 'from edflow.custom_logging import get_logger\n'), ((842, 868), 'os.path.exists', 'os.path.exists', (['self.ufile'], {}), '(self.ufile)\n', (856, 868), False, 'import os\n'), ((1856, 1906), 'edflow.util.walk', 'walk', (['self.last_updates', 'is_changed'], {'pass_key': '(True)'}), '(self.last_updates, is_changed, pass_key=True)\n', (1860, 1906), False, 'from edflow.util import walk, retrieve, contains_key\n'), ((1452, 1478), 'edflow.util.contains_key', 'contains_key', (['key', 'updates'], {}), '(key, updates)\n', (1464, 1478), False, 'from edflow.util import walk, retrieve, contains_key\n'), ((2689, 2711), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2709, 2711), False, 'import traceback\n'), ((1516, 1538), 'edflow.util.retrieve', 'retrieve', (['key', 'updates'], {}), '(key, updates)\n', (1524, 1538), False, 'from edflow.util import walk, retrieve, contains_key\n'), ((1573, 1597), 'numpy.any', 'np.any', (['(val != other_val)'], {}), '(val != other_val)\n', (1579, 1597), True, 'import numpy as np\n')] |
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from cajas.users.models.partner import Partner
from cajas.loans.models.loan import Loan, LoanType
class ValidatePartnerWithdraw(APIView):
def post(self, request):
data = request.data
validate = self.validate_withdraw(data)
if validate == 'loan':
return Response(
"El socio tiene préstamos activos.",
status=status.HTTP_202_ACCEPTED
)
elif validate == 'value':
return Response(
"El socio no tiene los fondos suficientes en su caja para realizar el retiro.",
status=status.HTTP_202_ACCEPTED
)
else:
return Response(
"Validación exitosa. El socio puede hacer el retiro.",
status=status.HTTP_200_OK
)
def validate_withdraw(self, data):
if self.validate_loans(data):
return 'loan'
elif not self.validate_value(data):
return 'value'
return True
def validate_loans(self, data):
partner = get_object_or_404(Partner, pk=data['partner'])
loans = Loan.objects.filter(lender=partner.user, loan_type=LoanType.SOCIO_DIRECTO)
if loans.exists():
for loan in loans:
if loan.balance > 0:
return True
return False
return False
def validate_value(self, data):
partner = get_object_or_404(Partner, pk=data['partner'])
box = partner.box
if (int(data['value']) * 3) < box.balance:
return True
return False
| [
"rest_framework.response.Response",
"django.shortcuts.get_object_or_404",
"cajas.loans.models.loan.Loan.objects.filter"
] | [((1232, 1278), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Partner'], {'pk': "data['partner']"}), "(Partner, pk=data['partner'])\n", (1249, 1278), False, 'from django.shortcuts import get_object_or_404\n'), ((1295, 1369), 'cajas.loans.models.loan.Loan.objects.filter', 'Loan.objects.filter', ([], {'lender': 'partner.user', 'loan_type': 'LoanType.SOCIO_DIRECTO'}), '(lender=partner.user, loan_type=LoanType.SOCIO_DIRECTO)\n', (1314, 1369), False, 'from cajas.loans.models.loan import Loan, LoanType\n'), ((1598, 1644), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Partner'], {'pk': "data['partner']"}), "(Partner, pk=data['partner'])\n", (1615, 1644), False, 'from django.shortcuts import get_object_or_404\n'), ((466, 544), 'rest_framework.response.Response', 'Response', (['"""El socio tiene préstamos activos."""'], {'status': 'status.HTTP_202_ACCEPTED'}), "('El socio tiene préstamos activos.', status=status.HTTP_202_ACCEPTED)\n", (474, 544), False, 'from rest_framework.response import Response\n'), ((644, 775), 'rest_framework.response.Response', 'Response', (['"""El socio no tiene los fondos suficientes en su caja para realizar el retiro."""'], {'status': 'status.HTTP_202_ACCEPTED'}), "(\n 'El socio no tiene los fondos suficientes en su caja para realizar el retiro.'\n , status=status.HTTP_202_ACCEPTED)\n", (652, 775), False, 'from rest_framework.response import Response\n'), ((845, 940), 'rest_framework.response.Response', 'Response', (['"""Validación exitosa. El socio puede hacer el retiro."""'], {'status': 'status.HTTP_200_OK'}), "('Validación exitosa. El socio puede hacer el retiro.', status=\n status.HTTP_200_OK)\n", (853, 940), False, 'from rest_framework.response import Response\n')] |
"""
todo: check pandas
"""
from openpyxl import Workbook
from openpyxl.styles import Font
from pi88reader.pi88_importer import PI88Measurement, SegmentType
def main():
filename = '..\\resources\\quasi_static_12000uN.tdm'
filename = '..\\resources\\AuSn_Creep\\1000uN 01 LC.tdm'
measurement = PI88Measurement(filename)
to_excel = PI88ToExcel(measurement)
to_excel.write("delme.xlsx")
class PI88ToExcel:
def __init__(self, pi88_measurement):
self.measurement = pi88_measurement
self.workbook = Workbook()
self.workbook.remove(self.workbook.active)
def write(self, filename):
self.add_sheet_quasi_static_data() # self.workbook.active)
self.add_sheet_segment_data()
self.workbook.save(filename=filename)
def add_sheet_quasi_static_data(self):
wb = self.workbook
#mws_title = self.measurement.filename.split('.')[2].split('\\')[2]
ws_title = self.measurement.filename.split('\\')[-1].split('.')[0]
ws = wb.create_sheet(title=ws_title)
data = self.measurement.get_quasi_static_curve()
self.write_data(ws, data)
def add_sheet_segment_data(self):
wb = self.workbook
ws_title = "segments"
ws = wb.create_sheet(title=ws_title)
ws.cell(row=1, column=1).value = "LOAD:"
data = self.measurement.get_segment_curve(SegmentType.LOAD)
self.write_data(ws, data, row=1, col=2)
ws.cell(row=1, column=5).value = "HOLD:"
data = self.measurement.get_segment_curve(SegmentType.HOLD)
self.write_data(ws, data, row=1, col=6)
ws.cell(row=1, column=9).value = "UNLOAD:"
data = self.measurement.get_segment_curve(SegmentType.UNLOAD)
self.write_data(ws, data, row=1, col=10)
@staticmethod
def write_row(ws, data, row, col):
font = Font(bold=True)
for i, value in enumerate(data):
ws.cell(row=row, column=col+i).value = value
ws.cell(row=row, column=col + i).font = font
@staticmethod
def write_cols(ws, data, row, col):
for i, value in enumerate(data[0]):
for j, column in enumerate(data):
ws.cell(row=row+i, column=col+j).value = column[i]
def write_data(self, ws, data, row=1, col=1):
header = data[0]
if header:
self.write_row(ws, header, row, col)
row += 1
self.write_cols(ws, data[1:], row, col)
# for i, value in enumerate(data[1]):
# for j, column in enumerate(data[1:]):
# ws.cell(row=row+i, column=col+j).value = column[i]
if __name__ == "__main__":
main()
| [
"pi88reader.pi88_importer.PI88Measurement",
"openpyxl.styles.Font",
"openpyxl.Workbook"
] | [((307, 332), 'pi88reader.pi88_importer.PI88Measurement', 'PI88Measurement', (['filename'], {}), '(filename)\n', (322, 332), False, 'from pi88reader.pi88_importer import PI88Measurement, SegmentType\n'), ((538, 548), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (546, 548), False, 'from openpyxl import Workbook\n'), ((1859, 1874), 'openpyxl.styles.Font', 'Font', ([], {'bold': '(True)'}), '(bold=True)\n', (1863, 1874), False, 'from openpyxl.styles import Font\n')] |
import json
import sys
# import matplotlib.pyplot as plt
import copy
import numpy as np
import tensorflow as tf
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import class_weight
from collections import Counter
import random
from tensorflow.keras.callbacks import Callback
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from nltk.tokenize import sent_tokenize
import os
# read entire json file
# if loading the original dataset ignore the reviews with score 0
# return a list of json entries
def readJson(file_path, original=False):
data = []
with open(file_path, encoding="utf8") as json_file:
for line in json_file:
entry = json.loads(line)
if original == True:
if entry['_source']['Review']['ReviewRating'] == 0:
continue
data.append(entry)
return data
# compute histogram of review scores
# input -> list of jsons
# output -> dict score -> #reviews
def computeScoreHistogram(data, normalize = False):
histo = {}
for entry in data:
score = entry['_source']['Review']['ReviewRating']
if score in histo:
histo[score] += 1
else:
histo[score] = 1
if normalize == True:
for key, value in histo.items():
histo[key] = 1.0 * value / len(data)
print(histo)
return histo
def computeTextStatistics(data, superior_threshold=None, inferior_threshold=None):
histo_char = {}
histo_word = {}
histo_category = {}
sup_threshold = 0
inf_threshold = 0
for entry in data:
text = entry['_source']['Review']['ReviewBody']
category = entry['_source']['Product']['ProductCategory']
chars = len(text)
words = len(text.split(" "))
if superior_threshold != None and words > superior_threshold:
sup_threshold += 1
if inferior_threshold != None and words < inferior_threshold:
inf_threshold += 1
if chars in histo_char:
histo_char[chars] += 1
else:
histo_char[chars] = 1
if words in histo_word:
histo_word[words] += 1
else:
histo_word[words] = 1
if category in histo_category:
histo_category[category] += 1
else:
histo_category[category] = 1
return histo_char, histo_word, histo_category, sup_threshold, inf_threshold
def computeDatasetStatistics(data, superior_threshold=None, inferior_threshold=None):
histo_scores = computeScoreHistogram(data)
histo_chars, histo_words, histo_category, sup_threshold, inf_threshold = computeTextStatistics(data, superior_threshold, inferior_threshold)
print("Reviews with number of words over", superior_threshold, "=", sup_threshold, "percentage =", 100.0*sup_threshold/len(data))
print("Reviews with number of words under", inferior_threshold, "=", inf_threshold, "percentage =", 100.0*inf_threshold/len(data))
print(histo_category)
plt.bar(histo_scores.keys(), histo_scores.values(), 1.0, color='g')
plt.title("Scores")
plt.show()
plt.bar(histo_chars.keys(), histo_chars.values(), 1.0, color='g')
plt.title("Chars")
plt.show()
plt.bar(histo_words.keys(), histo_words.values(), 1.0, color='g')
plt.title("Words")
plt.show()
# split the dataset in 5 vs ALL -> 1,2,3,4 -> label 0
# 5 -> label 1
# input -> dataset list of jsons
# output -> dataset list of jsons
def splitData5vAll(data):
new_data = copy.deepcopy(data)
for entry in new_data:
if entry['_source']['Review']['ReviewRating'] == 5:
entry['_source']['Review']['ReviewRating'] = 1
else:
entry['_source']['Review']['ReviewRating'] = 0
return new_data
# save the dataset
# input -> dataset list of jsons, filename to save
def saveData(data, filename):
with open(filename, 'w') as outfile:
for entry in data:
json.dump(entry, outfile)
outfile.write("\n")
# get features from data
# input -> data list of json
# sample_majority -> sample or not from majority class
# sample_count -> how many entries to sample from majority class
# set seed -> random seed value
# output -> list of dicts | one entry is a dict with features and labels
def getFeatures(data, use_review_text=True, sample_majority=False, sample_count=0, seed=None, majority_class=3):
if sample_majority == False:
train_list = []
for data_entry in data:
train_entry = {}
if use_review_text == True:
train_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
train_entry['label'] = data_entry['_source']['Review']['ReviewRating']
train_list.append(train_entry)
return train_list
elif sample_majority == True:
majority_list = []
for data_entry in data:
majority_entry = {}
if data_entry['_source']['Review']['ReviewRating'] == majority_class:
if use_review_text == True:
majority_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
majority_entry['label'] = data_entry['_source']['Review']['ReviewRating']
majority_list.append(majority_entry)
random.seed(seed)
sampled_majority_list = random.sample(majority_list, sample_count)
random.seed()
train_list = []
for data_entry in data:
train_entry = {}
if data_entry['_source']['Review']['ReviewRating'] != majority_class:
if use_review_text == True:
train_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
train_entry['label'] = data_entry['_source']['Review']['ReviewRating']
# train_list.append(train_entry)
sampled_majority_list.append(train_entry)
# train_list.extend(sampled_majority_list)
train_list = sampled_majority_list
return train_list
# get processed features and labels
# input -> features
# output -> list of processed features, list of labels, dict of class_weights
def processFeatures(data, bert_proc):
features = []
labels = []
iids = []
sids = []
i = 0
for entry in data:
review_text = entry["features:review_text"]
input_ids, segment_ids = bert_proc.process_text(review_text)
iids.append(input_ids)
sids.append(segment_ids)
labels.append(entry['label'])
features = [np.array(iids), np.array(sids)]
class_weights = class_weight.compute_class_weight('balanced', np.unique(labels), labels)
class_weights = class_weights.astype(np.float32)
return features, labels, class_weights
# get processed features and labels from texst
# input -> features
# output -> list of processed features, list of labels, dict of class_weights
def processFeaturesRawText(data, bert_proc):
features = []
iids = []
sids = []
i = 0
for entry in data:
review_text = entry
input_ids, segment_ids = bert_proc.process_text(review_text)
iids.append(input_ids)
sids.append(segment_ids)
features = [np.array(iids), np.array(sids)]
return features
# split data in train dev test split using stratified
# input -> data
# output -> train, dev, test data
def splitTrainDevTest(data):
train_data = []
dev_data = []
test_data = []
full_indices = np.array(range(len(data)))
full_classes = np.array(list(map(lambda x: x['_source']['Review']['ReviewRating'], data)))
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1)
for tr, te in sss.split(full_indices, full_classes):
aux_train_indexes = tr
test_indexes = te
aux_train_data = []
for i in test_indexes:
test_data.append(data[i])
for i in aux_train_indexes:
aux_train_data.append(data[i])
indices = np.array(range(len(aux_train_data)))
classes = np.array(list(map(lambda x: x['_source']['Review']['ReviewRating'], aux_train_data)))
sss_ = StratifiedShuffleSplit(n_splits=1, test_size=0.111111)
for tr, de in sss_.split(indices, classes):
train_indexes = tr
dev_indexes = de
for i in dev_indexes:
dev_data.append(aux_train_data[i])
for i in train_indexes:
train_data.append(aux_train_data[i])
print(len(train_data), len(dev_data), len(test_data), len(train_data) + len(dev_data) + len(test_data), len(data))
print(len(list(set(train_indexes) & set(dev_indexes) & set(test_indexes))))
return train_data, dev_data, test_data
# split the dataset in 4 classes -> 1 -> label 0
# 2,3 -> label 1
# 4 -> label 2
# 5 -> label 3
# input -> dataset list of jsons
# output -> dataset list of jsons
def splitData4Classes(data):
new_data = copy.deepcopy(data)
for entry in new_data:
if entry['_source']['Review']['ReviewRating'] == 1:
entry['_source']['Review']['ReviewRating'] = 0
elif entry['_source']['Review']['ReviewRating'] == 2 or entry['_source']['Review']['ReviewRating'] == 3:
entry['_source']['Review']['ReviewRating'] = 1
elif entry['_source']['Review']['ReviewRating'] == 4:
entry['_source']['Review']['ReviewRating'] = 2
elif entry['_source']['Review']['ReviewRating'] == 5:
entry['_source']['Review']['ReviewRating'] = 3
return new_data
class FScoreCallback(Callback):
def __init__(self, dataset, steps, labels):
super().__init__()
self.steps = steps
self.dataset = dataset
self.labels_int = []
for x in labels:
self.labels_int.append(np.argmax(x))
def on_test_end(self, epoch, logs={}):
y_pred = []
y_true = self.labels_int
predict_results = self.model.predict(self.dataset, steps=self.steps)
for prediction in predict_results:
y_pred.append(np.argmax(prediction))
print()
print(classification_report(y_true, y_pred, digits=4))
def compute_parameters(model_folder_path):
# define input
input_ids = tf.keras.layers.Input(shape=(64), dtype=tf.int32, name="input_ids")
segment_ids = tf.keras.layers.Input(shape=(64), dtype=tf.int32, name="segment_ids")
import BertModel
import tensorflow.keras as keras
import bert
# define model
bert_model = BertModel.BertModel(model_folder_path, 64)
bert_output = bert_model.bert_layer([input_ids, segment_ids])
cls_output = keras.layers.Lambda(lambda seq: seq[:, 0, :])(bert_output)
cls_drop = keras.layers.Dropout(0.1)(cls_output)
fc1 = keras.layers.Dense(units=100, activation="relu")(cls_drop)
prediction = keras.layers.Dense(units=10, activation="softmax")(fc1)
# build model
model = keras.Model(inputs=[input_ids, segment_ids], outputs=prediction)
model.build(input_shape=[(None, 64), (None, 64)])
# load pretrained
bert.load_bert_weights(bert_model.bert_layer, model_folder_path+"bert_model.ckpt")
model.compile(optimizer=keras.optimizers.Adam(lr=0.1), loss = 'categorical_crossentropy', metrics = [tf.keras.metrics.categorical_accuracy])
model.summary()
from tensorflow.python.keras.utils.layer_utils import count_params
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print(trainable_count/1e6)
print(non_trainable_count)
# return model, bert_model
def build_reallife_corpus(model_folder_path):
new_model_folder_path = "/".join(model_folder_path.split("/")[:-2])
new_model_folder_path = os.path.join(new_model_folder_path, "reallife")
train_data = readJson(model_folder_path+"train.json")
train_data = clean_dict(train_data)
new_train_data = add_last_sentence_to_data(train_data)
new_train_data_over = perform_oversampling(new_train_data)
print(len(train_data), len(new_train_data), len(new_train_data_over))
saveData(new_train_data_over, os.path.join(new_model_folder_path, "train.json"))
dev_data = readJson(model_folder_path+"dev.json")
dev_data = clean_dict(dev_data)
new_dev_data = add_last_sentence_to_data(dev_data)
new_dev_data_over = perform_oversampling(new_dev_data)
print(len(dev_data), len(new_dev_data), len(new_dev_data_over))
saveData(new_dev_data_over, os.path.join(new_model_folder_path, "dev.json"))
test_data = readJson(model_folder_path+"test.json")
test_data = clean_dict(test_data)
new_test_data = add_last_sentence_to_data(test_data)
new_test_data_over = perform_oversampling(new_test_data)
print(len(test_data), len(new_test_data), len(new_test_data_over))
saveData(new_test_data_over, os.path.join(new_model_folder_path, "test.json"))
def add_last_sentence_to_data(data):
new_data = copy.deepcopy(data)
new_entries = []
count = 0
for entry in new_data:
review_text = entry['_source']['Review']['ReviewBody']
sentences = sent_tokenize(review_text)
if len(sentences) > 1:
# add new entry to dataset
new_entry = copy.deepcopy(entry)
new_entry['_source']['Review']['ReviewBody'] = sentences[-1]
new_entry['_score'] = 2
new_entries.append(new_entry)
if entry == new_entry:
print(entry)
print(new_entry)
sys.exit()
count += 1
# print(new_entries)
new_data.extend(new_entries)
return new_data
def perform_oversampling(data):
new_data = copy.deepcopy(data)
new_entries = []
counter = [0,0,0,0,0]
for entry in new_data:
label = entry['_source']['Review']['ReviewRating']
counter[label-1] += 1
while True:
random_entry = random.choice(data)
random_label = random_entry['_source']['Review']['ReviewRating']
if counter[random_label-1] == counter[-1]:
continue
else:
new_entries.append(random_entry)
counter[random_label-1] += 1
if counter[0] == counter[1] and counter[1] == counter[2] and counter[2] == counter[3] and counter[3] == counter[4]:
break
print(counter)
new_data.extend(new_entries)
return new_data
def clean_dict(data):
new_data = copy.deepcopy(data)
for entry in new_data:
del entry["_index"]
del entry["_type"]
del entry["_id"]
del entry["_score"]
del entry["_source"]["Review"]["ReviewTitle"]
del entry["_source"]["Review"]["ReviewDate"]
del entry["_source"]["Review"]["ReviewProductVerified"]
del entry["_source"]["Product"]
return new_data
if __name__ == "__main__":
# data = readJson("../Dataset/Reviews/4Classes/train.json")
# computeDatasetStatistics(data, 32, 32)
# print("--------------------------DEV--------------------------")
# data = readJson("../Dataset/Reviews/4Classes/dev.json")
# computeDatasetStatistics(data, 32, 32)
# print("--------------------------TEST--------------------------")
# data = readJson("../Dataset/Reviews/4Classes/test.json")
# computeDatasetStatistics(data, 32, 32)
# compute_parameters("../Models/raw/small/clean/trained_512/ro2/")
# sys.exit()
# # split data
# raw = readJson("../Dataset/Reviews/all_reviews.json", original=True)
# # computeDatasetStatistics(raw, 256, 256)
# train_data, dev_data, test_data = splitTrainDevTest(raw)
# saveData(train_data, "../Dataset/Reviews/emag_train.json")
# saveData(dev_data, "../Dataset/Reviews/emag_dev.json")
# saveData(test_data, "../Dataset/Reviews/emag_test.json")
# raw = readJson("../Dataset/Reviews/all_reviews.json", original=True)
train_data = readJson("../Dataset/Reviews/emag_train.json")
# computeDatasetStatistics(train_data, 256, 256)
dev_data = readJson("../Dataset/Reviews/emag_dev.json")
test_data = readJson("../Dataset/Reviews/emag_test.json")
computeScoreHistogram(train_data, normalize=True)
split_train = splitData4Classes(train_data)
computeScoreHistogram(split_train, normalize=True)
saveData(split_train, "../Dataset/Reviews/4Classes/train.json")
computeScoreHistogram(dev_data, normalize=True)
split_dev = splitData4Classes(dev_data)
computeScoreHistogram(split_dev, normalize=True)
saveData(split_dev, "../Dataset/Reviews/4Classes/dev.json")
computeScoreHistogram(test_data, normalize=True)
split_test = splitData4Classes(test_data)
computeScoreHistogram(split_test, normalize=True)
saveData(split_test, "../Dataset/Reviews/4Classes/test.json")
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"sklearn.metrics.classification_report",
"bert.load_bert_weights",
"tensorflow.python.keras.utils.layer_utils.count_params",
"numpy.array",
"tensorflow.keras.layers.Dense",
"nltk.tokenize.sent_tokenize",
"BertModel.BertModel",
"copy.deepcopy",
"sys... | [((3640, 3659), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3653, 3659), False, 'import copy\n'), ((7818, 7867), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.1)'}), '(n_splits=1, test_size=0.1)\n', (7840, 7867), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((8309, 8363), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.111111)'}), '(n_splits=1, test_size=0.111111)\n', (8331, 8363), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((9163, 9182), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (9176, 9182), False, 'import copy\n'), ((10462, 10527), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(64)', 'dtype': 'tf.int32', 'name': '"""input_ids"""'}), "(shape=64, dtype=tf.int32, name='input_ids')\n", (10483, 10527), True, 'import tensorflow as tf\n'), ((10548, 10615), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(64)', 'dtype': 'tf.int32', 'name': '"""segment_ids"""'}), "(shape=64, dtype=tf.int32, name='segment_ids')\n", (10569, 10615), True, 'import tensorflow as tf\n'), ((10734, 10776), 'BertModel.BertModel', 'BertModel.BertModel', (['model_folder_path', '(64)'], {}), '(model_folder_path, 64)\n', (10753, 10776), False, 'import BertModel\n'), ((11145, 11209), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[input_ids, segment_ids]', 'outputs': 'prediction'}), '(inputs=[input_ids, segment_ids], outputs=prediction)\n', (11156, 11209), True, 'import tensorflow.keras as keras\n'), ((11290, 11378), 'bert.load_bert_weights', 'bert.load_bert_weights', (['bert_model.bert_layer', "(model_folder_path + 'bert_model.ckpt')"], {}), "(bert_model.bert_layer, model_folder_path +\n 'bert_model.ckpt')\n", (11312, 11378), False, 'import bert\n'), ((11633, 11670), 'tensorflow.python.keras.utils.layer_utils.count_params', 'count_params', (['model.trainable_weights'], {}), '(model.trainable_weights)\n', (11645, 11670), False, 'from tensorflow.python.keras.utils.layer_utils import count_params\n'), ((11697, 11738), 'tensorflow.python.keras.utils.layer_utils.count_params', 'count_params', (['model.non_trainable_weights'], {}), '(model.non_trainable_weights)\n', (11709, 11738), False, 'from tensorflow.python.keras.utils.layer_utils import count_params\n'), ((11983, 12030), 'os.path.join', 'os.path.join', (['new_model_folder_path', '"""reallife"""'], {}), "(new_model_folder_path, 'reallife')\n", (11995, 12030), False, 'import os\n'), ((13195, 13214), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (13208, 13214), False, 'import copy\n'), ((13927, 13946), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (13940, 13946), False, 'import copy\n'), ((14705, 14724), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (14718, 14724), False, 'import copy\n'), ((6743, 6757), 'numpy.array', 'np.array', (['iids'], {}), '(iids)\n', (6751, 6757), True, 'import numpy as np\n'), ((6759, 6773), 'numpy.array', 'np.array', (['sids'], {}), '(sids)\n', (6767, 6773), True, 'import numpy as np\n'), ((6841, 6858), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (6850, 6858), True, 'import numpy as np\n'), ((7416, 7430), 'numpy.array', 'np.array', (['iids'], {}), '(iids)\n', (7424, 7430), True, 'import numpy as np\n'), ((7432, 7446), 'numpy.array', 'np.array', (['sids'], {}), '(sids)\n', (7440, 7446), True, 'import numpy as np\n'), ((10860, 10905), 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda seq: seq[:, 0, :])'], {}), '(lambda seq: seq[:, 0, :])\n', (10879, 10905), True, 'import tensorflow.keras as keras\n'), ((10934, 10959), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (10954, 10959), True, 'import tensorflow.keras as keras\n'), ((10982, 11030), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(100)', 'activation': '"""relu"""'}), "(units=100, activation='relu')\n", (11000, 11030), True, 'import tensorflow.keras as keras\n'), ((11058, 11108), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (11076, 11108), True, 'import tensorflow.keras as keras\n'), ((12364, 12413), 'os.path.join', 'os.path.join', (['new_model_folder_path', '"""train.json"""'], {}), "(new_model_folder_path, 'train.json')\n", (12376, 12413), False, 'import os\n'), ((12721, 12768), 'os.path.join', 'os.path.join', (['new_model_folder_path', '"""dev.json"""'], {}), "(new_model_folder_path, 'dev.json')\n", (12733, 12768), False, 'import os\n'), ((13087, 13135), 'os.path.join', 'os.path.join', (['new_model_folder_path', '"""test.json"""'], {}), "(new_model_folder_path, 'test.json')\n", (13099, 13135), False, 'import os\n'), ((13360, 13386), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['review_text'], {}), '(review_text)\n', (13373, 13386), False, 'from nltk.tokenize import sent_tokenize\n'), ((14155, 14174), 'random.choice', 'random.choice', (['data'], {}), '(data)\n', (14168, 14174), False, 'import random\n'), ((732, 748), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (742, 748), False, 'import json\n'), ((4086, 4111), 'json.dump', 'json.dump', (['entry', 'outfile'], {}), '(entry, outfile)\n', (4095, 4111), False, 'import json\n'), ((5472, 5489), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5483, 5489), False, 'import random\n'), ((5522, 5564), 'random.sample', 'random.sample', (['majority_list', 'sample_count'], {}), '(majority_list, sample_count)\n', (5535, 5564), False, 'import random\n'), ((5573, 5586), 'random.seed', 'random.seed', ([], {}), '()\n', (5584, 5586), False, 'import random\n'), ((10332, 10379), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {'digits': '(4)'}), '(y_true, y_pred, digits=4)\n', (10353, 10379), False, 'from sklearn.metrics import classification_report\n'), ((11402, 11431), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.1)'}), '(lr=0.1)\n', (11423, 11431), True, 'import tensorflow.keras as keras\n'), ((13481, 13501), 'copy.deepcopy', 'copy.deepcopy', (['entry'], {}), '(entry)\n', (13494, 13501), False, 'import copy\n'), ((10021, 10033), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (10030, 10033), True, 'import numpy as np\n'), ((10278, 10299), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (10287, 10299), True, 'import numpy as np\n'), ((13766, 13776), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13774, 13776), False, 'import sys\n')] |
from constructor import ArrayConstructor
from measure import MeasureMemory
import re
import array
class Trie(object):
def __init__(self, words, unit_scale=8):
bit_array, labels = self.create_tree(words)
self.rank1 = self.get_rank(1)
self.unit_scale = unit_scale
self.split_list = BitVector(bit_array, self.unit_scale).split_array()
self.zero_pos = [0]
c = 1
for i, v in enumerate(bit_array):
if v == 0:
self.zero_pos.append(i)
c+=1
self.zero_pos = array.array('I', self.zero_pos)
self.bit_array = array.array('B',bit_array)
self.labels = array.array('u',labels)
# Trie木作成
def create_tree(self, words):
words = [word.lower() for word in words]
words.sort()
constructor = ArrayConstructor()
for word in words:
constructor.add(word)
bit_array, labels = constructor.dump()
return bit_array, labels
def rank(self, position, target_bit):
n = 0
for bit in self.bit_array[:position+1]:
if(bit == target_bit):
n += 1
return n
def select0(self, n):
return self.zero_pos[n]
def sub_rank1(self, position):
unit_num = int(position / self.unit_scale)
n = self.split_list[unit_num-1]
n+=sum(self.bit_array[unit_num * self.unit_scale : position+1])
return n
def get_rank(self, target_bit):
return lambda position: self.rank(position, target_bit)
# ノード探索
def trace_children(self, current_node, character, cnt):
# ビット列の先頭から見て、n 個目の 0 ビットの次の位置
index = self.select0(current_node) + 1
while(self.bit_array[index] == 1):
# ビット列の先頭から位置 k までに、1 のビットがいくつあるかを返す
if cnt == 0:
node = self.rank1(index)
else:
node = self.sub_rank1(index)
if(self.labels[node] == character):
cnt=1
return node, cnt
index += 1
return None, cnt
# 単語検索
def search(self, query):
query = query.lower()
cnt = 0
node = 1
for c in query:
node, cnt = self.trace_children(node, c, cnt)
if(node is None):
return None
return node
# 子ノードのindexを取得
def get_children(self, parent_node_seq):
return [i for j in parent_node_seq for i in range(self.select0(int(j)), self.select0(int(j+1)))[1:]]
# 検索ノード以下のwordをすべて取得する
def get_below_nodes(self, node_list):
below_nodes = []
below_nodes.extend(node_list)
cnt = 0
# 子ノードが存在する限り実行
while self.get_children(node_list) != []:
tmp_list = [self.sub_rank1(i) for i in self.get_children(node_list)]
below_nodes.extend(tmp_list)
node_list = tmp_list
cnt+=1
return below_nodes
# rank
class BitVector:
def __init__(self, bit_array, unit_scale):
self.bit_array = bit_array
self.splited_array = None
self.n = 0
self.split_list = []
self.unit_scale = unit_scale
self.split_size = int(len(self.bit_array) / self.unit_scale)
def rank(self, position, target_bit):
n = 0
for bit in self.splited_array[:position+1]:
if(bit == target_bit):
n += 1
return n
def get_rank(self, target_bit):
return lambda position: self.rank(position, target_bit)
def split_array(self):
for i in range(self.split_size):
if i == self.split_size-1:
self.splited_array = self.bit_array[i*self.unit_scale:]
rank1 = self.get_rank(1)
else:
self.splited_array = self.bit_array[i*self.unit_scale:(i+1)*self.unit_scale]
rank1 = self.get_rank(1)
self.n+=rank1(len(self.splited_array))
self.split_list.append(self.n)
self.split_list = array.array('I', self.split_list)
return self.split_list | [
"constructor.ArrayConstructor",
"array.array"
] | [((565, 596), 'array.array', 'array.array', (['"""I"""', 'self.zero_pos'], {}), "('I', self.zero_pos)\n", (576, 596), False, 'import array\n'), ((623, 650), 'array.array', 'array.array', (['"""B"""', 'bit_array'], {}), "('B', bit_array)\n", (634, 650), False, 'import array\n'), ((672, 696), 'array.array', 'array.array', (['"""u"""', 'labels'], {}), "('u', labels)\n", (683, 696), False, 'import array\n'), ((845, 863), 'constructor.ArrayConstructor', 'ArrayConstructor', ([], {}), '()\n', (861, 863), False, 'from constructor import ArrayConstructor\n'), ((4135, 4168), 'array.array', 'array.array', (['"""I"""', 'self.split_list'], {}), "('I', self.split_list)\n", (4146, 4168), False, 'import array\n')] |
"""A library for converting weather codes to symbols."""
import os.path
from io import BytesIO
import cairosvg
import imageio
from .glyphs import WMO_GLYPH_LOOKUP, DEFAULT_GLYPHS
from .codes import DATAPOINT_TO_WMO_LOOKUP, DARKSKY_TO_WMO_LOOKUP
class GlyphSet():
"""A set of glyphs."""
def __init__(self, name=None, recolor=None):
"""Load the lookup tables and cache all svgs into memory."""
self.name = name or DEFAULT_GLYPHS
self.glyph_set = WMO_GLYPH_LOOKUP[self.name]
self.recolor = recolor
self.cache = {}
for wmo_code in self.glyph_set:
self._load_svg(wmo_code)
def _repr_html_(self):
"""Return an inline HTML object of the unique glyphs in the set."""
response = ""
for _, svg in self.cache.items():
response += "{}".format(
Glyph(svg, recolor=self.recolor).repr_html())
return response
def _load_svg(self, wmo_code):
"""Load the svg image for a given WMO code as a bytestring."""
try:
svg_path = os.path.join(
os.path.dirname(__file__),
"assets",
self.name,
self.glyph_set[wmo_code])
except KeyError:
svg_path = os.path.join(
os.path.dirname(__file__), "assets", "missing.svg")
if svg_path in self.cache:
return self.cache[svg_path]
else:
with open(svg_path, 'rb') as svg:
self.cache[svg_path] = svg.read()
return self.cache[svg_path]
@staticmethod
def datapoint_to_wmo(datapoint_code):
"""Convert a datapoint code to a WMO code."""
return DATAPOINT_TO_WMO_LOOKUP[str(datapoint_code)]
@staticmethod
def darksky_to_wmo(darksky_code):
"""Convert a darksky code to a WMO code."""
return DARKSKY_TO_WMO_LOOKUP[str(darksky_code)]
def get_glyph(self, wmo_code=None, datapoint_code=None,
darksky_code=None, recolor=None):
"""Return a Glyph for a given weather code."""
if wmo_code is not None:
return Glyph(self._load_svg(wmo_code),
recolor=recolor or self.recolor)
if datapoint_code is not None:
return Glyph(self._load_svg(self.datapoint_to_wmo(datapoint_code)),
recolor=recolor or self.recolor)
if darksky_code is not None:
return Glyph(self._load_svg(self.darksky_to_wmo(darksky_code)),
recolor=recolor or self.recolor)
raise Exception("You must specify a valid type code")
class Glyph():
"""An individual glyph with methods to convert between types."""
def __init__(self, svg, recolor=None):
"""Init method."""
self.svg = svg
if recolor:
decoded_svg = self.svg.decode('utf-8')
for old_color, new_color in recolor.items():
decoded_svg = decoded_svg.replace(old_color, new_color)
self.svg = decoded_svg.encode('utf-8')
def _repr_html_(self):
"""Return an inline HTML object of the raw SVG."""
html = "<div style='width:40px;display:inline-block;'>{}</div>"
return html.format(self.svg.decode("utf-8"))
def repr_html(self):
"""Public version of _repr_html_."""
return self._repr_html_()
def to_svg(self):
"""Return a SVG bytestring."""
return self.svg
def to_png(self, scale=1):
"""Convert to a PNG bytestring."""
return cairosvg.svg2png(bytestring=self.svg,
scale=scale)
def to_np_array(self, scale=1):
"""Convert to a numpy array of RGB values."""
return imageio.imread(BytesIO(self.to_png(scale=scale)))
| [
"cairosvg.svg2png"
] | [((3574, 3624), 'cairosvg.svg2png', 'cairosvg.svg2png', ([], {'bytestring': 'self.svg', 'scale': 'scale'}), '(bytestring=self.svg, scale=scale)\n', (3590, 3624), False, 'import cairosvg\n')] |
#! /usr/bin/env python3
# randomQuizGenerator.py - Creates quizzes with questions and answers in
# random order, along with the answer key
import random
# The quiz data. Keys are states and values are their capitals.
capitals = {'Alabama': 'Montgomery',
'Alaska': 'Juneau',
'Arizona': 'Phoenix',
'Arkansas': '<NAME>',
'California': 'Sacramento',
'Colorado': 'Denver',
'Connecticut': 'Hartford',
'Delaware': 'Dover',
'Florida': 'Tallahassee',
'Georgia': 'Atlanta',
'Hawaii': 'Honolulu',
'Idaho': 'Boise',
'Illinois': 'Springfield',
'Indiana': 'Indianapolis',
'Iowa': 'Des Moines',
'Kansas': 'Topeka',
'Kentucky': 'Frankfort',
'Louisiana': 'Baton Rouge',
'Maine': 'Augusta',
'Maryland': 'Annapolis',
'Massachusetts': 'Boston',
'Michigan': 'Lansing',
'Minnesota': 'Saint Paul',
'Mississippi': 'Jackson',
'Missouri': 'Jefferson City',
'Montana': 'Helena',
'Nebraska': 'Lincoln',
'Nevada': 'Carson City',
'New Hampshire': 'Concord',
'New Jersey': 'Trenton',
'New Mexico': 'Santa Fe',
'New York': 'Albany',
'North Carolina': 'Raleigh',
'North Dakota': 'Bismarck',
'Ohio': 'Columbus',
'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem',
'Pennsylvania': 'Harrisburg',
'Rhode Island': 'Providence',
'South Carolina': 'Columbia',
'South Dakota': 'Pierre',
'Tennessee': 'Nashville',
'Texas': 'Austin',
'Utah': 'Salt Lake City',
'Vermont': 'Montpelier',
'Virginia': 'Richmond',
'Washington': 'Olympia',
'West Virginia': 'Charleston',
'Wisconsin': 'Madison',
'Wyoming': 'Cheyenne'
}
# generates 35 quiz/answer key files (can be altered to any number)
for x in range(35):
quiz = open(f'quizzes/capitals_quiz{x + 1}.txt', 'w')
answer_key = open(f'quizzes/capitals_quiz_answers{x + 1}.txt', 'w')
quiz.write('Name:\n\nDate:\n\nPeriod\n\n')
quiz.write((' ' * 20) + f'State Capitals Quiz (Form {x + 1})\n\n')
states = list(capitals.keys())
random.shuffle(states)
# iterates over each state
for Q in range(50):
correct_answer = capitals[states[Q]]
wrong_answers = list(capitals.values())
wrong_answers.remove(capitals[states[Q]])
answer_options = random.sample(wrong_answers, 3)
answer_options += [correct_answer]
random.shuffle(answer_options)
quiz.write(f'{Q + 1}. What\'s the capital of {states[Q]}?\n')
# creates 4 possible choices
for i in range(4):
quiz.write(f'\t{"ABCD"[i]}.\t{answer_options[i]}\n')
quiz.write('\n')
answer_key.write(f'{Q + 1}.\t{"ABCD"[answer_options.index(correct_answer)]}\n')
quiz.close()
answer_key.close()
| [
"random.sample",
"random.shuffle"
] | [((2723, 2745), 'random.shuffle', 'random.shuffle', (['states'], {}), '(states)\n', (2737, 2745), False, 'import random\n'), ((2970, 3001), 'random.sample', 'random.sample', (['wrong_answers', '(3)'], {}), '(wrong_answers, 3)\n', (2983, 3001), False, 'import random\n'), ((3053, 3083), 'random.shuffle', 'random.shuffle', (['answer_options'], {}), '(answer_options)\n', (3067, 3083), False, 'import random\n')] |
# MIT License
# Copyright (c) 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from jarvis.skills.skill import AssistantSkill
from jarvis.utils.mongoDB import db
from jarvis.utils import input
header = """
-----------------------------------------------------------------------------------------------
I would like to learn, tell me the right answer!
-----------------------------------------------------------------------------------------------
* Note: Create new skill! Write your question and the appropriate answer.
\n
"""
class RememberSkills(AssistantSkill):
@classmethod
def remember(cls, **kwargs):
cls.console(header)
continue_add = True
while continue_add:
cls.console(text='Question: ')
tags = cls.user_input()
cls.console(text='Suggested Response: ')
response = cls.user_input()
new_skill = {'name': 'learned_skill',
'enable': True,
'func': cls.tell_response.__name__,
'response': response,
'tags': tags,
},
cls.response('Add more? ', refresh_console=False)
continue_add = input.check_input_to_continue()
db.insert_many_documents(collection='learned_skills', documents=new_skill)
@classmethod
def tell_response(cls, **kwargs):
cls.response(kwargs.get('skill').get('response'))
@classmethod
def clear_learned_skills(cls, **kwargs):
if db.is_collection_empty(collection='learned_skills'):
cls.response("I can't find learned skills in my database")
else:
cls.response('I found learned skills..')
cls.response('Are you sure to remove learned skills? ', refresh_console=False)
user_answer = input.check_input_to_continue()
if user_answer:
db.drop_collection(collection='learned_skills')
cls.response("Perfect I have deleted them all")
| [
"jarvis.utils.mongoDB.db.is_collection_empty",
"jarvis.utils.mongoDB.db.drop_collection",
"jarvis.utils.mongoDB.db.insert_many_documents",
"jarvis.utils.input.check_input_to_continue"
] | [((2559, 2610), 'jarvis.utils.mongoDB.db.is_collection_empty', 'db.is_collection_empty', ([], {'collection': '"""learned_skills"""'}), "(collection='learned_skills')\n", (2581, 2610), False, 'from jarvis.utils.mongoDB import db\n'), ((2252, 2283), 'jarvis.utils.input.check_input_to_continue', 'input.check_input_to_continue', ([], {}), '()\n', (2281, 2283), False, 'from jarvis.utils import input\n'), ((2296, 2370), 'jarvis.utils.mongoDB.db.insert_many_documents', 'db.insert_many_documents', ([], {'collection': '"""learned_skills"""', 'documents': 'new_skill'}), "(collection='learned_skills', documents=new_skill)\n", (2320, 2370), False, 'from jarvis.utils.mongoDB import db\n'), ((2867, 2898), 'jarvis.utils.input.check_input_to_continue', 'input.check_input_to_continue', ([], {}), '()\n', (2896, 2898), False, 'from jarvis.utils import input\n'), ((2943, 2990), 'jarvis.utils.mongoDB.db.drop_collection', 'db.drop_collection', ([], {'collection': '"""learned_skills"""'}), "(collection='learned_skills')\n", (2961, 2990), False, 'from jarvis.utils.mongoDB import db\n')] |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from kubernetes.client import api_client
from kubernetes.client.api import batch_v1_api
from kubernetes.e2e_test import base
class TestClientBatch(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = base.get_e2e_configuration()
def test_job_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = batch_v1_api.BatchV1Api(client)
name = 'test-job-' + str(uuid.uuid4())
job_manifest = {
'kind': 'Job',
'spec': {
'template':
{'spec':
{'containers': [
{'image': 'busybox',
'name': name,
'command': ["sh", "-c", "sleep 5"]
}],
'restartPolicy': 'Never'},
'metadata': {'name': name}}},
'apiVersion': 'batch/v1',
'metadata': {'name': name}}
resp = api.create_namespaced_job(
body=job_manifest, namespace='default')
self.assertEqual(name, resp.metadata.name)
resp = api.read_namespaced_job(
name=name, namespace='default')
self.assertEqual(name, resp.metadata.name)
resp = api.delete_namespaced_job(
name=name, body={}, namespace='default') | [
"kubernetes.client.api.batch_v1_api.BatchV1Api",
"kubernetes.client.api_client.ApiClient",
"kubernetes.e2e_test.base.get_e2e_configuration",
"uuid.uuid4"
] | [((833, 861), 'kubernetes.e2e_test.base.get_e2e_configuration', 'base.get_e2e_configuration', ([], {}), '()\n', (859, 861), False, 'from kubernetes.e2e_test import base\n'), ((910, 957), 'kubernetes.client.api_client.ApiClient', 'api_client.ApiClient', ([], {'configuration': 'self.config'}), '(configuration=self.config)\n', (930, 957), False, 'from kubernetes.client import api_client\n'), ((972, 1003), 'kubernetes.client.api.batch_v1_api.BatchV1Api', 'batch_v1_api.BatchV1Api', (['client'], {}), '(client)\n', (995, 1003), False, 'from kubernetes.client.api import batch_v1_api\n'), ((1038, 1050), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1048, 1050), False, 'import uuid\n')] |
"""
Model definition adapted from: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import math
from typing import Optional, List, Union, Type
import torch.nn as nn
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), stride=stride,
padding=1, bias=False)
class _BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation: int = 1):
super(_BasicBlock, self).__init__()
if dilation > 1:
raise NotImplementedError("Dilation > 1 not implemented in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class _Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation: int = 1):
super(_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=(3, 3), stride=stride,
padding=(1, 1), bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=(1, 1), bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block: Type[Union[_BasicBlock, _Bottleneck]], layers: List[int],
replace_stride_with_dilation: Optional[List[bool]] = None, **kwargs):
super(ResNet, self).__init__()
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
f"replace_stride_with_dilation should be None or a 3-tuple, got {replace_stride_with_dilation}")
self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3),
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block: Type[Union[_BasicBlock, _Bottleneck]], planes: int, blocks: int, stride: int = 1,
dilate: bool = False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=(1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(inplanes=self.inplanes, planes=planes, stride=stride, downsample=downsample,
dilation=previous_dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResNet18(ResNet):
def __init__(self, **kwargs):
super(ResNet18, self).__init__(_BasicBlock, [2, 2, 2, 2], **kwargs)
class ResNet34(ResNet):
def __init__(self, **kwargs):
super(ResNet34, self).__init__(_BasicBlock, [3, 4, 6, 3], **kwargs)
class ResNet50(ResNet):
def __init__(self, **kwargs):
super(ResNet50, self).__init__(_Bottleneck, [3, 4, 6, 3], **kwargs)
class ResNet101(ResNet):
def __init__(self, **kwargs):
super(ResNet101, self).__init__(_Bottleneck, [3, 4, 23, 3], **kwargs)
class ResNet152(ResNet):
def __init__(self, **kwargs):
super(ResNet152, self).__init__(_Bottleneck, [3, 8, 36, 3], **kwargs)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Sequential",
"math.sqrt",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d"
] | [((701, 796), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3, 3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding\n =1, bias=False)\n', (710, 796), True, 'import torch.nn as nn\n'), ((1179, 1201), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1193, 1201), True, 'import torch.nn as nn\n'), ((1222, 1243), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1229, 1243), True, 'import torch.nn as nn\n'), ((1308, 1330), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1322, 1330), True, 'import torch.nn as nn\n'), ((1951, 2010), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1, 1)', 'bias': '(False)'}), '(inplanes, planes, kernel_size=(1, 1), bias=False)\n', (1960, 2010), True, 'import torch.nn as nn\n'), ((2030, 2052), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2044, 2052), True, 'import torch.nn as nn\n'), ((2074, 2185), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3, 3)', 'stride': 'stride', 'padding': '(1, 1)', 'bias': '(False)', 'dilation': 'dilation'}), '(planes, planes, kernel_size=(3, 3), stride=stride, padding=(1, 1),\n bias=False, dilation=dilation)\n', (2083, 2185), True, 'import torch.nn as nn\n'), ((2232, 2254), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2246, 2254), True, 'import torch.nn as nn\n'), ((2276, 2350), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(planes * self.expansion)'], {'kernel_size': '(1, 1)', 'bias': '(False)'}), '(planes, planes * self.expansion, kernel_size=(1, 1), bias=False)\n', (2285, 2350), True, 'import torch.nn as nn\n'), ((2370, 2409), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * self.expansion)'], {}), '(planes * self.expansion)\n', (2384, 2409), True, 'import torch.nn as nn\n'), ((2430, 2451), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2437, 2451), True, 'import torch.nn as nn\n'), ((3582, 3661), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7, 7)', 'stride': '(2, 2)', 'padding': '(3, 3)', 'bias': '(False)'}), '(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n', (3591, 3661), True, 'import torch.nn as nn\n'), ((3712, 3730), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (3726, 3730), True, 'import torch.nn as nn\n'), ((3751, 3772), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3758, 3772), True, 'import torch.nn as nn\n'), ((3796, 3859), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)'}), '(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n', (3808, 3859), True, 'import torch.nn as nn\n'), ((5564, 5586), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (5577, 5586), True, 'import torch.nn as nn\n'), ((5018, 5129), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.inplanes', '(planes * block.expansion)'], {'kernel_size': '(1, 1)', 'stride': '(stride, stride)', 'bias': '(False)'}), '(self.inplanes, planes * block.expansion, kernel_size=(1, 1),\n stride=(stride, stride), bias=False)\n', (5027, 5129), True, 'import torch.nn as nn\n'), ((5169, 5209), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (5183, 5209), True, 'import torch.nn as nn\n'), ((4446, 4464), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (4455, 4464), False, 'import math\n')] |
import os
import pefile
import time
import re
import click
import subprocess
data_directory_list = ['DIRECTORY_ENTRY_DEBUG', 'DIRECTORY_ENTRY_EXPORT', 'DIRECTORY_ENTRY_LOAD_CONFIG',
'DIRECTORY_ENTRY_RESOURCE', 'DIRECTORY_ENTRY_BASERELOC', 'DIRECTORY_ENTRY_TLS']
normal_section_names = ['.text', '.rdata', '.data', '.pdata', '.rsrc', '.idata', '.bss', '.code', '.edata']
def entropy(name, path):
entropy_list = []
entropy = subprocess.check_output("ent '{}' | head -n 1 | cut -d' ' -f 3".format((path + name)),
shell=True).decode('utf8')
entropy_list.append(entropy[0:-1])
pe = pefile.PE(path + name)
text_flag = False
data_flag = False
for section in pe.sections:
try:
section_name = (section.Name).decode('utf-8')
section_name = section_name.replace('\x00','')
if section_name =='.text':
text_entropy = section.get_entropy()
text_flag = True
elif section_name =='.data':
data_entropy = section.get_entropy()
data_flag = True
except:
continue
entropy_list.append(text_entropy if text_flag else -1)
entropy_list.append(data_entropy if data_flag else -1)
return entropy_list
def section_name_checker(section_names):
"""
:param section_names:
an array of section names of a program
:return:
a 1*2d array that indicate number of nonsuspicious sections and number of suspicious sections,respectively
"""
number_of_suspicious_names = 0
number_of_nonsuspicious_names = 0
for name in section_names:
if name in normal_section_names:
number_of_nonsuspicious_names += 1
else:
number_of_suspicious_names += 1
return number_of_suspicious_names,number_of_nonsuspicious_names
def empty_section_name_checker(section_names):
#---- normalize names --------
for i in range(len(section_names)):
section_names[i] = re.sub(' +', ' ',section_names[i])
if '' in section_names or ' ' in section_names:
# print(file_name)
return 0
else:
return 1
def data_directory_checker(pe,data_directory_name):
try:
if getattr(pe,data_directory_name):
return 1
else:
return 0
except:
return 0
@click.command()
@click.option('--path', required=True, help='path of samples')
@click.option('--outputfile', default='features.txt', help='output file name for storing extracted features')
def feature_extractor(path,outputfile):
start_time = time.time()
samples = os.listdir(path)
features_outputfile = open(outputfile,'w')
for sample in samples:
try:
pe = pefile.PE(path + sample)
# ----------------- Data Directories --------------------
temp = ''
for data_directory in data_directory_list:
temp += str(data_directory_checker(pe, data_directory))
features_outputfile.write('{},'.format(int(temp,2)))
print(int(temp,2))
# ---------------------- file_info -----------------------
count = 0
try:
for entry in pe.FileInfo:
if entry[0].Key == b'StringFileInfo':
entry = entry[0]
for st in entry.StringTable:
for entry in (st.entries.items()):
count += 1
if entry[1].Key == b'StringFileInfo':
entry = entry[1]
for st in entry.StringTable:
for entry in (st.entries.items()):
count += 1
features_outputfile.write('{},'.format(count))
except:
features_outputfile.write('{},'.format(count))
print(count)
# ---------------------- checksum ------------------------
try:
checksum = pe.OPTIONAL_HEADER.CheckSum
features_outputfile.write('0,'.format(sample)) if checksum == 0 else features_outputfile.write(
'1,'.format(sample))
except:
features_outputfile.write('0,'.format(sample))
# ------------------------- entropy ---------------------------
entropies = entropy(sample, path)
for entro in entropies:
features_outputfile.write('{},'.format(entro))
print(entropies)
# ----------------------- section names -----------------------
section_names = []
try:
sections = pe.sections
for section in sections:
name = (section.Name).decode('utf-8')
name = name.replace('\x00', '')
section_names.append(name)
except:
continue
section_name_features = section_name_checker(section_names)
features_outputfile.write('{},{},'.format(section_name_features[0], section_name_features[1]))
empty_section_names = empty_section_name_checker(section_names)
features_outputfile.write('{},{}\n'.format(empty_section_names, sample))
print(section_name_features)
print(empty_section_names)
except:
print('{} is not a pe file'.format(sample))
end_time = time.time()
print('feature extraction time: {}s'.format(end_time - start_time))
if __name__ == '__main__':
feature_extractor()
| [
"os.listdir",
"click.option",
"time.time",
"re.sub",
"click.command",
"pefile.PE"
] | [((2394, 2409), 'click.command', 'click.command', ([], {}), '()\n', (2407, 2409), False, 'import click\n'), ((2411, 2472), 'click.option', 'click.option', (['"""--path"""'], {'required': '(True)', 'help': '"""path of samples"""'}), "('--path', required=True, help='path of samples')\n", (2423, 2472), False, 'import click\n'), ((2474, 2587), 'click.option', 'click.option', (['"""--outputfile"""'], {'default': '"""features.txt"""', 'help': '"""output file name for storing extracted features"""'}), "('--outputfile', default='features.txt', help=\n 'output file name for storing extracted features')\n", (2486, 2587), False, 'import click\n'), ((659, 681), 'pefile.PE', 'pefile.PE', (['(path + name)'], {}), '(path + name)\n', (668, 681), False, 'import pefile\n'), ((2641, 2652), 'time.time', 'time.time', ([], {}), '()\n', (2650, 2652), False, 'import time\n'), ((2668, 2684), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2678, 2684), False, 'import os\n'), ((5515, 5526), 'time.time', 'time.time', ([], {}), '()\n', (5524, 5526), False, 'import time\n'), ((2041, 2076), 're.sub', 're.sub', (['""" +"""', '""" """', 'section_names[i]'], {}), "(' +', ' ', section_names[i])\n", (2047, 2076), False, 'import re\n'), ((2789, 2813), 'pefile.PE', 'pefile.PE', (['(path + sample)'], {}), '(path + sample)\n', (2798, 2813), False, 'import pefile\n')] |
import threading
import time
from dredis.db import NUMBER_OF_REDIS_DATABASES, DB_MANAGER, KEY_CODEC
DEFAULT_GC_INTERVAL = 500 # milliseconds
DEFAULT_GC_BATCH_SIZE = 10000 # number of storage keys to delete in a batch
class KeyGarbageCollector(threading.Thread):
def __init__(self, gc_interval=DEFAULT_GC_INTERVAL, batch_size=DEFAULT_GC_BATCH_SIZE):
threading.Thread.__init__(self, name="Key Garbage Collector")
self._gc_interval_in_secs = gc_interval / 1000.0 # convert to seconds
self._batch_size = batch_size
def run(self):
while True:
self.collect()
time.sleep(self._gc_interval_in_secs)
def collect(self):
for db_id in range(NUMBER_OF_REDIS_DATABASES):
with DB_MANAGER.thread_lock:
self._collect(DB_MANAGER.get_db(db_id))
def _collect(self, db):
deleted = 0
with db.write_batch() as batch:
for deleted_db_key, _ in db.iterator(prefix=KEY_CODEC.MIN_DELETED_VALUE):
_, _, deleted_key_value = KEY_CODEC.decode_key(deleted_db_key)
for db_key, _ in db.iterator(prefix=deleted_key_value):
deleted += 1
batch.delete(db_key)
if deleted == self._batch_size:
return
batch.delete(deleted_db_key)
| [
"threading.Thread.__init__",
"dredis.db.KEY_CODEC.decode_key",
"time.sleep",
"dredis.db.DB_MANAGER.get_db"
] | [((369, 430), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {'name': '"""Key Garbage Collector"""'}), "(self, name='Key Garbage Collector')\n", (394, 430), False, 'import threading\n'), ((627, 664), 'time.sleep', 'time.sleep', (['self._gc_interval_in_secs'], {}), '(self._gc_interval_in_secs)\n', (637, 664), False, 'import time\n'), ((1058, 1094), 'dredis.db.KEY_CODEC.decode_key', 'KEY_CODEC.decode_key', (['deleted_db_key'], {}), '(deleted_db_key)\n', (1078, 1094), False, 'from dredis.db import NUMBER_OF_REDIS_DATABASES, DB_MANAGER, KEY_CODEC\n'), ((815, 839), 'dredis.db.DB_MANAGER.get_db', 'DB_MANAGER.get_db', (['db_id'], {}), '(db_id)\n', (832, 839), False, 'from dredis.db import NUMBER_OF_REDIS_DATABASES, DB_MANAGER, KEY_CODEC\n')] |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
urlencode_postdata,
)
class PromptFileIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?promptfile\.com/l/(?P<id>[0-9A-Z\-]+)'
_TEST = {
'url': 'http://www.promptfile.com/l/86D1CE8462-576CAAE416',
'md5': '5a7e285a26e0d66d9a263fae91bc92ce',
'info_dict': {
'id': '86D1CE8462-576CAAE416',
'ext': 'mp4',
'title': 'oceans.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if re.search(r'<div.+id="not_found_msg".+>(?!We are).+</div>[^-]', webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id,
expected=True)
chash = self._search_regex(
r'val\("([^"]*)"\s*\+\s*\$\("#chash"\)', webpage, 'chash')
fields = self._hidden_inputs(webpage)
keys = list(fields.keys())
chash_key = keys[0] if len(keys) == 1 else next(
key for key in keys if key.startswith('cha'))
fields[chash_key] = chash + fields[chash_key]
webpage = self._download_webpage(
url, video_id, 'Downloading video page',
data=urlencode_postdata(fields),
headers={'Content-type': 'application/x-www-form-urlencoded'})
video_url = self._search_regex(
(r'<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1[^>]*>\s*Download File',
r'<a[^>]+href=(["\'])(?P<url>https?://(?:www\.)?promptfile\.com/file/(?:(?!\1).)+)\1'),
webpage, 'video url', group='url')
title = self._html_search_regex(
r'<span.+title="([^"]+)">', webpage, 'title')
thumbnail = self._html_search_regex(
r'<div id="player_overlay">.*button>.*?<img src="([^"]+)"',
webpage, 'thumbnail', fatal=False, flags=re.DOTALL)
formats = [{
'format_id': 'sd',
'url': video_url,
'ext': determine_ext(title),
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| [
"re.search"
] | [((802, 873), 're.search', 're.search', (['"""<div.+id="not_found_msg".+>(?!We are).+</div>[^-]"""', 'webpage'], {}), '(\'<div.+id="not_found_msg".+>(?!We are).+</div>[^-]\', webpage)\n', (811, 873), False, 'import re\n')] |
#! /usr/bin/python3
##################################################################
#
# Raspberry Pi Antenna Driver (RPiAntDrv.py)
#
# Python GUI script to control H-Bridge via RPi.
# H-Bridge drives single DC motor tuned antenna.
#
# Name Call Date(s)
# Authors: <NAME> N7IFC Mar-May2020
#
##################################################################
from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button
from tkinter import Scale, IntVar, StringVar, Toplevel
from tkinter import RAISED, HORIZONTAL, LEFT, S, W, SW, NW
from pathlib import Path
import configparser
import RPi.GPIO as GPIO
class Window(Frame):
# Define settings upon initialization
def __init__(self, master=None):
# parameters to send through the Frame class.
Frame.__init__(self, master)
#reference to the master widget, which is the tk window
self.master = master
# Retrieve parent script directory for absolute addressing
self.base_path = Path(__file__).parent
self.ini_path = str(self.base_path)+'/RPiAntDrv.ini'
#print (self.ini_path)
# Raspberry Pi I/O pins get reassigned when ini file is read
self.pwm_freq = 4000 # PWM Freq in Hz
self.pwm_duty = 0 # PWM Duty in percent, default to 0%
self.stall_time = 250 # Motor stall time in mS
self.encoder_count = IntVar() # Antenna reed switch count
self.encoder_count.set(0)
self.motor_running = False # Motor running flag
self.motor_stalled = False # Motor stalled flag
self.stall_active = False # Stall detection active
self.stall_count = 0 # Encoder count during stall detection
self.full_speed = 100 # Full speed PWM duty cycle
self.slow_speed = 25 # Slow speed PWM duty cycle
self.antenna_raising = False # Motor direction flag
self.ant_config_sect = ("null") # Active ini file config section
self.ant_preset_sect = ("null") # Active ini file preset section
self.ant_preset_val = 0 # Preset encoder target value from ini presets
self.status_message = StringVar() # Status message text for text_2
# Run init_window, which doesn't yet exist
self.init_window()
#Creation of init_window
def init_window(self):
self.master.title('RPi Antenna Driver (v1.6)')
# Set up root window & size (width x height + x_offset + y_offset)
self.bg_color = 'azure'
self.master.geometry("350x275+150+100")
self.master.configure(bg= self.bg_color)
# Create menu entry and sub-options
menubar = Menu(self.master)
self.master.config(menu=menubar)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", command=self.about)
filemenu.add_command(label="Save", command=self.about)
filemenu.add_command(label="Save as...", command=self.about)
filemenu.add_separator()
filemenu.add_command(label="Quit", command=self.close)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Default ini", command=self.confirm_newini)
editmenu.add_command(label="Sync Count", command=self.confirm_sync)
editmenu.add_command(label="Undefined 2", command=self.about)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=self.about)
menubar.add_cascade(label="Help", menu=helpmenu)
text_1 = Label(textvariable=self.encoder_count, font = ('Helvetica', 30),
bg = self.bg_color, fg='black', pady=5, height=1)
text_1.grid(row=0, column=0, rowspan=2, pady=1, sticky=S)
text_2 = Label(text='Status:', font = ('Helvetica', 14),
bg = self.bg_color, fg='black', height=1,
anchor=SW, width=22, justify=LEFT)
text_2.grid(row=0, column=1, columnspan=1, sticky=SW)
text_3 = Label(textvariable=self.status_message, font = ('Helvetica', 12),
bg='white', fg='black', height=1, anchor=NW, width=22,
borderwidth=1, relief="solid")
text_3.grid(row=1, column=1, sticky=NW)
text_4 = Label(text='Motor Speed (%):', font = ('Helvetica', 14),
bg = self.bg_color, fg='black', padx=1, height=1,
anchor=SW, width=22, justify=LEFT)
text_4.grid(row=2, column=1, columnspan=1, sticky=S)
text_5 = Label(text='Antenna Selection:', font = ('Helvetica', 14),
bg = self.bg_color, fg='black', padx=1, height=1,
anchor=SW, width=22, justify=LEFT)
text_5.grid(row=4, column=1, columnspan=1, sticky=S)
text_6 = Label(text='Preset Selection:', font = ('Helvetica', 14),
bg = self.bg_color, fg='black', padx=1, height=1,
anchor=W, width=22, justify=LEFT)
text_6.grid(row=6, column=1, columnspan=1, sticky=S)
self.raise_button = Button(text='Raise', relief=RAISED, bd=4, padx=1,
pady=1, height=2, width=6, font=('Helvetica', 14))
self.raise_button.grid(row=2, column=0, padx=20, pady=5, rowspan=2)
self.raise_button.bind("<ButtonPress>", self.raise_button_press)
self.raise_button.bind("<ButtonRelease>", self.RL_button_release)
self.lower_button = Button(text='Lower', relief=RAISED, bd=4, padx=1,
pady=1, height=2, width=6, font=('Helvetica', 14))
self.lower_button.grid(row=4, column=0, padx=20, pady=5, rowspan=2)
self.lower_button.bind("<ButtonPress>", self.lower_button_press)
self.lower_button.bind("<ButtonRelease>", self.RL_button_release)
self.preset_button = Button(text='Preset', relief=RAISED, bd=4, padx=1,
pady=1, height=2, width=6, font=('Helvetica', 14))
self.preset_button.grid(row=6, column=0, padx=5, pady=5, rowspan=2)
self.preset_button.bind("<ButtonPress>", self.preset_button_press)
self.duty_scale = Scale(from_=1, to=100, orient = HORIZONTAL,
resolution = 1, length=200,
command = self.update_pwm_duty)
self.duty_scale.grid(row=3,column=1, sticky=NW)
# Antenna preset combo box is populated with values from ini file
self.antenna_combobox = ttk.Combobox(width=19, font=('Helvetica', 14),
state='readonly')
self.antenna_combobox.grid(row=5, column=1, sticky=NW)
self.antenna_combobox.bind("<<ComboboxSelected>>", self.get_antenna_val)
# Antenna preset combo box is populated with values from ini file
self.preset_combobox = ttk.Combobox(width=19, font=('Helvetica', 14),
state='readonly')
self.preset_combobox.grid(row=7, column=1, sticky=NW)
self.preset_combobox.bind("<<ComboboxSelected>>", self.get_preset_val)
self.ini_test () # Check for ini file existence
self.ini_read() # Retrieve ini file settings
self.gpioconfig() # Set up GPIO for antenna control
return
def raise_button_press(self, _unused):
self.motor_stalled = 0
self.motor_up ()
def lower_button_press(self, _unused):
self.motor_stalled = 0
self.motor_down ()
def RL_button_release(self, _unused):
self.motor_stop ()
self.status_message.set ("Ready")
def preset_button_press(self, _unused):
self.motor_stalled = 0
self.motor_move()
def confirm_newini(self):
okay = messagebox.askokcancel('RPiAntDrv',
'Overwrite Configuration File?',
detail='This will overwrite the '
'RPiAntDrv.ini file with default '
'values.', icon='question')
if okay:
# Overwrite the ini file and refresh values
self.ini_new()
self.ini_read()
self.status_message.set ("RPiAntDrv.ini written")
else:
self.status_message.set ("Operation cancelled")
def confirm_sync(self):
okay = messagebox.askokcancel('RPiAntDrv',
'Proceed with Sync?',
detail='This will sychronize the '
'antenna encoder count to the preset '
'value selected.', icon='question')
if okay:
# Sychronize encoder count with current preset value
self.encoder_count.set(self.ant_preset_val)
self.status_message.set ("Encoder syncronized")
else:
self.status_message.set ("Encoder sync canceled")
def motor_up(self):
# We can change speed on the fly
self.pwm_set.ChangeDutyCycle(self.pwm_duty)
# If motor is not already running and in correct direction
if not(self.motor_running and self.antenna_raising):
# check reverse motor lead flag
GPIO.output(self.dir1_pin, GPIO.HIGH) # Run motor FWD
GPIO.output(self.dir2_pin, GPIO.LOW)
self.antenna_raising = 1
self.motor_running = 1
# Initialize stall counter and start stall timer
self.motor_stall()
def motor_down(self):
# We can change speed on the fly
self.pwm_set.ChangeDutyCycle(self.pwm_duty)
# If motor is not running and in correct direction
if not(self.motor_running and not self.antenna_raising):
GPIO.output(self.dir1_pin, GPIO.LOW) # Run motor
GPIO.output(self.dir2_pin, GPIO.HIGH)
self.motor_running = 1
self.antenna_raising = 0
# Initialize stall detection
self.motor_stall()
def motor_stop(self):
GPIO.output(self.dir1_pin, GPIO.LOW) # Stop motor
GPIO.output(self.dir2_pin, GPIO.LOW)
self.pwm_set.ChangeDutyCycle(0) # Kill PWM
self.motor_running = 0
#self.ini_update()
def motor_stall(self):
# Set stall period proportional to motor speed
self.stall_period = int((100 / self.duty_scale.get())* self.stall_time)
# If motor is still running, perform stall check
if (self.motor_running):
# If stall detection is not already active
if not(self.stall_active):
self.stall_count = self.encoder_count.get()
self.stall_active = 1
self.master.after(self.stall_period, self.motor_stall)
# Otherwise see if we stalled
elif (self.stall_count == self.encoder_count.get()):
self.motor_stalled = 1
self.motor_stop()
self.stall_active = 0
self.status_message.set ("! Antenna Stalled !")
# Else reset stall count and timer
else:
self.stall_count = self.encoder_count.get()
self.master.after(self.stall_period, self.motor_stall)
else:
self.stall_active = 0
def motor_move(self):
# If motor is stalled, exit
if (self.motor_stalled == 1):
return
# If encoder count = preset, stop and exit
if self.encoder_count.get() == (self.ant_preset_val):
self.motor_stop()
self.status_message.set ("We have arrived")
return
# If encoder count within 5 counts of preset, slow down
Lval= (self.ant_preset_val -5)
Hval= (self.ant_preset_val +6)
if self.encoder_count.get() in range(Lval, Hval):
self.status_message.set ("Slowing down")
self.duty_scale.set(self.slow_speed)
# Else run full speed
else:
self.status_message.set ("Full speed")
self.duty_scale.set(self.full_speed)
# If encoder count > preset drive antenna down
if self.encoder_count.get() > (self.ant_preset_val):
self.motor_down()
# Else drive antenna up
else:
self.motor_up()
# after 100mS, call this function again
self.master.after(100, self.motor_move)
def get_antenna_val(self, _unused):
# fetch new antenna configuration and presets
config = configparser.ConfigParser()
config.read (self.ini_path)
self.last_antenna = self.antenna_combobox.get()
self.ant_refresh(config)
self.pwm_set.ChangeFrequency(self.pwm_freq)
def get_preset_val(self, _unused):
# get the preset value stored in the ini file
config = configparser.ConfigParser()
config.read (self.ini_path)
self.ant_preset_val = (config.getint(self.ant_preset_sect,
self.preset_combobox.get()))
#print (self.ant_preset_val)
def update_pwm_duty(self, _unused):
self.pwm_duty = self.duty_scale.get()
#print (_unused)
def gpioconfig(self): # Configure GPIO pins
GPIO.setwarnings(False)
GPIO.cleanup() # In case user changes running configuration
GPIO.setmode(GPIO.BOARD) # Refer to IO as Board header pins
GPIO.setup(self.dir1_pin, GPIO.OUT) # Direction output 1 to H-bridge
GPIO.setup(self.dir2_pin, GPIO.OUT) # Direction output 2 to H-bridge
GPIO.output(self.dir1_pin, GPIO.LOW) # Turn direction output 1 off
GPIO.output(self.dir2_pin, GPIO.LOW) # Turn direction output 2 off
GPIO.setup(self.pwm_pin, GPIO.OUT) # PWM output to H-bridge
# Set up the simple encoder switch input and add de-bounce time in mS
# GPIO.RISING interrupts on both edges, GPIO.FALLING seems better behaved
GPIO.setup(self.encoder_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(self.encoder_pin, GPIO.FALLING,
bouncetime=40, callback=self.encoder_ISR)
# Note GPIO.PWM is software not hardware PWM
self.pwm_set = GPIO.PWM(self.pwm_pin, self.pwm_freq) # Set up PWM for use
#self.pwm_set.stop() # Stop pwm output
self.pwm_set.start(self.pwm_duty) # Start pwm output at 0%
GPIO.setwarnings(True)
def encoder_ISR(self, _channel):
# Do as little as possible in the ISR, get in and get out!
# Increment the encoder count and jump out
if self.antenna_raising == 1:
self.encoder_count.set (self.encoder_count.get()+1)
else:
self.encoder_count.set (self.encoder_count.get()-1)
def ini_new(self): # Set up an ini file if it does not exist
# Configuration file parser to read and write ini file
config = configparser.ConfigParser()
# User configurable program settings
config['Settings'] = {'pwm_pin':'19',
'dir1_pin':'13',
'dir2_pin':'15',
'encoder_pin':'11',
'antennas':'Antenna 1, Antenna 2',
'last_position':'0',
'last_antenna':'Antenna 1',
'last_preset':'20m 14.400 (037)'}
# Set up default antennas
config['Antenna 1_Config'] = {'pwm_freq':'4000',
'full_speed':'100',
'slow_speed':'25',
'stall_time':'250'}
config['Antenna 1_Preset'] = {'maximum (270)':'270',
'80m _3.500 (226)':'226',
'80m _3.580 (221)':'221',
'80m _3.800 (206)':'206',
'80m _3.900 (199)':'199',
'80m _4.000 (192)':'192',
'60m _5.300 (130)':'130',
'60m _5.400 (127)':'127',
'40m _7.035 (091)':'91',
'40m _7.175 (089)':'89',
'40m _7.300 (087)':'87',
'30m 10.000 (056)':'56',
'30m 10.100 (055)':'55',
'30m 10.200 (054)':'54',
'20m 14.000 (039)':'39',
'20m 14.200 (038)':'38',
'20m 14.400 (037)':'37',
'15m 21.275 (019)':'19',
'12m 24.930 (014)':'14',
'10m 28.000 (008)':'8',
'10m 29.700 (006)':'6',
'minimum (000)':'0'}
config['Antenna 2_Config'] = {'pwm_freq':'4000',
'full_speed':'95',
'slow_speed':'20',
'stall_time':'250'}
config['Antenna 2_Preset'] = {'maximum (270)':'270',
'80m _3.700 (200)':'200',
'60m _5.350 (129)':'129',
'40m _7.250 (090)':'90',
'30m 10.100 (055)':'55',
'20m 14.200 (038)':'38',
'minimum (000)':'0'}
# Save the default configuration file
with open(self.ini_path, 'w') as configfile:
config.write(configfile)
def ini_test(self):
# Test to see if configuration file exists
try:
with open(self.ini_path) as _file:
# pass condition
self.status_message.set ("Configuration file loaded")
except IOError as _e:
#Does not exist OR no read permissions
self.status_message.set ("Configuration file created")
self.ini_new ()
def ini_read(self):
# Read ini file and set up parameters
config = configparser.ConfigParser()
config.read (self.ini_path)
# Retrieve I/O pin assignments
self.pwm_pin = (config.getint ('Settings','pwm_pin',fallback=19))
self.dir1_pin = (config.getint ('Settings','dir1_pin',fallback=13))
self.dir2_pin = (config.getint ('Settings','dir2_pin',fallback=15))
self.encoder_pin = (config.getint ('Settings','encoder_pin',fallback=11))
# Restore the encoder count to preset value
self.encoder_count.set (config.getint('Settings','last_position',fallback=0))
self.ant_preset_val = self.encoder_count.get()
# Retrieve the last antenna used and restore saved state
# Grab CSV list of antennas to act as combobox values and keys
# The .strip method removes leading and trailing spaces from .split list
_antennas = (config.get('Settings','antennas',fallback="Antenna 1"))
self.antenna_combobox['values']=[item.strip() for item in _antennas.split(',')]
self.last_antenna = (config.get('Settings','last_antenna',fallback="Antenna 1"))
self.antenna_combobox.set(self.last_antenna)
self.preset_combobox.set(config.get('Settings','last_preset',fallback='None'))
# refresh antenna settings and presets
self.ant_refresh(config)
def ant_refresh (self,config):
# Using selected antenna refresh antenna settings and presets
self.ant_config_sect = (self.last_antenna + '_Config')
self.ant_preset_sect = (self.last_antenna + '_Preset')
self.pwm_freq = (config.getint (self.ant_config_sect,'pwm_freq',fallback=4000))
self.full_speed = (config.getint (self.ant_config_sect,'full_speed',fallback=100))
self.slow_speed = (config.getint (self.ant_config_sect,'slow_speed',fallback=25))
self.stall_time = (config.getint (self.ant_config_sect,'stall_time',fallback=250))
self.preset_combobox['values']=(config.options(self.ant_preset_sect))
def ini_update(self):
config = configparser.ConfigParser()
# Perform read-modify-write of ini file
# Note: Anytyhing written must be a string value
config.read (self.ini_path)
config.set ('Settings','last_position',str(self.encoder_count.get()))
config.set ('Settings','last_antenna',self.antenna_combobox.get())
config.set ('Settings','last_preset',self.preset_combobox.get())
# Save modified configuration file
with open(self.ini_path, 'w') as configfile:
config.write(configfile)
self.status_message.set ("ini file updated")
def close(self): # Cleanly close the GUI and cleanup the GPIO
self.ini_update() # Save current settings
GPIO.cleanup()
#print ("GPIO cleanup executed")
self.master.destroy()
#print ("master window destroyed")
def about(self):
popup = Toplevel()
popup.title("About RPiAntDrv")
popup.geometry("325x225+162+168")
popup.configure(bg= 'snow')
popup_text1 = Label(popup, text='RPiAntDrv.py v1.6',
font = ('Helvetica', 12), wraplength=300, justify=LEFT,
bg = 'snow', fg='black', padx=10, pady=10)
popup_text1.grid(row=0, column=0, columnspan=1)
popup_text2 = Label(popup, text='This Python script is used to control '
'a motor tuned antenna like a screwdriver antenna or '
'tuned loop. Feedback from the antenna is provided by '
'a simple dry contact or pulse output relative to the '
'output shaft turning.',
font = ('Helvetica', 12), wraplength=300, justify=LEFT,
bg = 'snow', fg='black', padx=10, pady=10)
popup_text2.grid(row=1, column=0, columnspan=1)
popup.mainloop()
def main():
# root window created. Here, that would be the only window, but
# you can later have windows within windows.
root = Tk()
app = Window(root) #creation of an instance
root.protocol("WM_DELETE_WINDOW", app.close) # cleanup GPIO when X closes window
root.mainloop() # Loops forever
if __name__ == '__main__':
main()
| [
"configparser.ConfigParser",
"RPi.GPIO.output",
"tkinter.Button",
"RPi.GPIO.PWM",
"tkinter.Label",
"RPi.GPIO.setmode",
"RPi.GPIO.cleanup",
"pathlib.Path",
"tkinter.StringVar",
"tkinter.ttk.Combobox",
"tkinter.Menu",
"RPi.GPIO.setup",
"RPi.GPIO.setwarnings",
"tkinter.Toplevel",
"tkinter.I... | [((23276, 23280), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (23278, 23280), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((809, 837), 'tkinter.Frame.__init__', 'Frame.__init__', (['self', 'master'], {}), '(self, master)\n', (823, 837), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((1463, 1471), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (1469, 1471), False, 'from tkinter import Scale, IntVar, StringVar, Toplevel\n'), ((2286, 2297), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (2295, 2297), False, 'from tkinter import Scale, IntVar, StringVar, Toplevel\n'), ((2813, 2830), 'tkinter.Menu', 'Menu', (['self.master'], {}), '(self.master)\n', (2817, 2830), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((2899, 2923), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (2903, 2923), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((3300, 3324), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (3304, 3324), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((3635, 3659), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (3639, 3659), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((3807, 3922), 'tkinter.Label', 'Label', ([], {'textvariable': 'self.encoder_count', 'font': "('Helvetica', 30)", 'bg': 'self.bg_color', 'fg': '"""black"""', 'pady': '(5)', 'height': '(1)'}), "(textvariable=self.encoder_count, font=('Helvetica', 30), bg=self.\n bg_color, fg='black', pady=5, height=1)\n", (3812, 3922), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((4037, 4161), 'tkinter.Label', 'Label', ([], {'text': '"""Status:"""', 'font': "('Helvetica', 14)", 'bg': 'self.bg_color', 'fg': '"""black"""', 'height': '(1)', 'anchor': 'SW', 'width': '(22)', 'justify': 'LEFT'}), "(text='Status:', font=('Helvetica', 14), bg=self.bg_color, fg='black',\n height=1, anchor=SW, width=22, justify=LEFT)\n", (4042, 4161), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((4296, 4449), 'tkinter.Label', 'Label', ([], {'textvariable': 'self.status_message', 'font': "('Helvetica', 12)", 'bg': '"""white"""', 'fg': '"""black"""', 'height': '(1)', 'anchor': 'NW', 'width': '(22)', 'borderwidth': '(1)', 'relief': '"""solid"""'}), "(textvariable=self.status_message, font=('Helvetica', 12), bg='white',\n fg='black', height=1, anchor=NW, width=22, borderwidth=1, relief='solid')\n", (4301, 4449), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((4568, 4710), 'tkinter.Label', 'Label', ([], {'text': '"""Motor Speed (%):"""', 'font': "('Helvetica', 14)", 'bg': 'self.bg_color', 'fg': '"""black"""', 'padx': '(1)', 'height': '(1)', 'anchor': 'SW', 'width': '(22)', 'justify': 'LEFT'}), "(text='Motor Speed (%):', font=('Helvetica', 14), bg=self.bg_color, fg\n ='black', padx=1, height=1, anchor=SW, width=22, justify=LEFT)\n", (4573, 4710), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((4843, 4986), 'tkinter.Label', 'Label', ([], {'text': '"""Antenna Selection:"""', 'font': "('Helvetica', 14)", 'bg': 'self.bg_color', 'fg': '"""black"""', 'padx': '(1)', 'height': '(1)', 'anchor': 'SW', 'width': '(22)', 'justify': 'LEFT'}), "(text='Antenna Selection:', font=('Helvetica', 14), bg=self.bg_color,\n fg='black', padx=1, height=1, anchor=SW, width=22, justify=LEFT)\n", (4848, 4986), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((5120, 5261), 'tkinter.Label', 'Label', ([], {'text': '"""Preset Selection:"""', 'font': "('Helvetica', 14)", 'bg': 'self.bg_color', 'fg': '"""black"""', 'padx': '(1)', 'height': '(1)', 'anchor': 'W', 'width': '(22)', 'justify': 'LEFT'}), "(text='Preset Selection:', font=('Helvetica', 14), bg=self.bg_color,\n fg='black', padx=1, height=1, anchor=W, width=22, justify=LEFT)\n", (5125, 5261), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((5407, 5511), 'tkinter.Button', 'Button', ([], {'text': '"""Raise"""', 'relief': 'RAISED', 'bd': '(4)', 'padx': '(1)', 'pady': '(1)', 'height': '(2)', 'width': '(6)', 'font': "('Helvetica', 14)"}), "(text='Raise', relief=RAISED, bd=4, padx=1, pady=1, height=2, width=6,\n font=('Helvetica', 14))\n", (5413, 5511), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((5783, 5887), 'tkinter.Button', 'Button', ([], {'text': '"""Lower"""', 'relief': 'RAISED', 'bd': '(4)', 'padx': '(1)', 'pady': '(1)', 'height': '(2)', 'width': '(6)', 'font': "('Helvetica', 14)"}), "(text='Lower', relief=RAISED, bd=4, padx=1, pady=1, height=2, width=6,\n font=('Helvetica', 14))\n", (5789, 5887), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((6160, 6266), 'tkinter.Button', 'Button', ([], {'text': '"""Preset"""', 'relief': 'RAISED', 'bd': '(4)', 'padx': '(1)', 'pady': '(1)', 'height': '(2)', 'width': '(6)', 'font': "('Helvetica', 14)"}), "(text='Preset', relief=RAISED, bd=4, padx=1, pady=1, height=2, width=\n 6, font=('Helvetica', 14))\n", (6166, 6266), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((6463, 6565), 'tkinter.Scale', 'Scale', ([], {'from_': '(1)', 'to': '(100)', 'orient': 'HORIZONTAL', 'resolution': '(1)', 'length': '(200)', 'command': 'self.update_pwm_duty'}), '(from_=1, to=100, orient=HORIZONTAL, resolution=1, length=200, command\n =self.update_pwm_duty)\n', (6468, 6565), False, 'from tkinter import Scale, IntVar, StringVar, Toplevel\n'), ((6802, 6866), 'tkinter.ttk.Combobox', 'ttk.Combobox', ([], {'width': '(19)', 'font': "('Helvetica', 14)", 'state': '"""readonly"""'}), "(width=19, font=('Helvetica', 14), state='readonly')\n", (6814, 6866), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((7169, 7233), 'tkinter.ttk.Combobox', 'ttk.Combobox', ([], {'width': '(19)', 'font': "('Helvetica', 14)", 'state': '"""readonly"""'}), "(width=19, font=('Helvetica', 14), state='readonly')\n", (7181, 7233), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((8133, 8301), 'tkinter.messagebox.askokcancel', 'messagebox.askokcancel', (['"""RPiAntDrv"""', '"""Overwrite Configuration File?"""'], {'detail': '"""This will overwrite the RPiAntDrv.ini file with default values."""', 'icon': '"""question"""'}), "('RPiAntDrv', 'Overwrite Configuration File?', detail\n ='This will overwrite the RPiAntDrv.ini file with default values.',\n icon='question')\n", (8155, 8301), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((8775, 8946), 'tkinter.messagebox.askokcancel', 'messagebox.askokcancel', (['"""RPiAntDrv"""', '"""Proceed with Sync?"""'], {'detail': '"""This will sychronize the antenna encoder count to the preset value selected."""', 'icon': '"""question"""'}), "('RPiAntDrv', 'Proceed with Sync?', detail=\n 'This will sychronize the antenna encoder count to the preset value selected.'\n , icon='question')\n", (8797, 8946), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((10500, 10536), 'RPi.GPIO.output', 'GPIO.output', (['self.dir1_pin', 'GPIO.LOW'], {}), '(self.dir1_pin, GPIO.LOW)\n', (10511, 10536), True, 'import RPi.GPIO as GPIO\n'), ((10558, 10594), 'RPi.GPIO.output', 'GPIO.output', (['self.dir2_pin', 'GPIO.LOW'], {}), '(self.dir2_pin, GPIO.LOW)\n', (10569, 10594), True, 'import RPi.GPIO as GPIO\n'), ((13000, 13027), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (13025, 13027), False, 'import configparser\n'), ((13324, 13351), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (13349, 13351), False, 'import configparser\n'), ((13751, 13774), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (13767, 13774), True, 'import RPi.GPIO as GPIO\n'), ((13783, 13797), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (13795, 13797), True, 'import RPi.GPIO as GPIO\n'), ((13876, 13900), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (13888, 13900), True, 'import RPi.GPIO as GPIO\n'), ((13962, 13997), 'RPi.GPIO.setup', 'GPIO.setup', (['self.dir1_pin', 'GPIO.OUT'], {}), '(self.dir1_pin, GPIO.OUT)\n', (13972, 13997), True, 'import RPi.GPIO as GPIO\n'), ((14046, 14081), 'RPi.GPIO.setup', 'GPIO.setup', (['self.dir2_pin', 'GPIO.OUT'], {}), '(self.dir2_pin, GPIO.OUT)\n', (14056, 14081), True, 'import RPi.GPIO as GPIO\n'), ((14130, 14166), 'RPi.GPIO.output', 'GPIO.output', (['self.dir1_pin', 'GPIO.LOW'], {}), '(self.dir1_pin, GPIO.LOW)\n', (14141, 14166), True, 'import RPi.GPIO as GPIO\n'), ((14211, 14247), 'RPi.GPIO.output', 'GPIO.output', (['self.dir2_pin', 'GPIO.LOW'], {}), '(self.dir2_pin, GPIO.LOW)\n', (14222, 14247), True, 'import RPi.GPIO as GPIO\n'), ((14292, 14326), 'RPi.GPIO.setup', 'GPIO.setup', (['self.pwm_pin', 'GPIO.OUT'], {}), '(self.pwm_pin, GPIO.OUT)\n', (14302, 14326), True, 'import RPi.GPIO as GPIO\n'), ((14528, 14591), 'RPi.GPIO.setup', 'GPIO.setup', (['self.encoder_pin', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(self.encoder_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (14538, 14591), True, 'import RPi.GPIO as GPIO\n'), ((14600, 14699), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['self.encoder_pin', 'GPIO.FALLING'], {'bouncetime': '(40)', 'callback': 'self.encoder_ISR'}), '(self.encoder_pin, GPIO.FALLING, bouncetime=40,\n callback=self.encoder_ISR)\n', (14621, 14699), True, 'import RPi.GPIO as GPIO\n'), ((14802, 14839), 'RPi.GPIO.PWM', 'GPIO.PWM', (['self.pwm_pin', 'self.pwm_freq'], {}), '(self.pwm_pin, self.pwm_freq)\n', (14810, 14839), True, 'import RPi.GPIO as GPIO\n'), ((15023, 15045), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(True)'], {}), '(True)\n', (15039, 15045), True, 'import RPi.GPIO as GPIO\n'), ((15548, 15575), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (15573, 15575), False, 'import configparser\n'), ((19122, 19149), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (19147, 19149), False, 'import configparser\n'), ((21172, 21199), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (21197, 21199), False, 'import configparser\n'), ((21893, 21907), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (21905, 21907), True, 'import RPi.GPIO as GPIO\n'), ((22076, 22086), 'tkinter.Toplevel', 'Toplevel', ([], {}), '()\n', (22084, 22086), False, 'from tkinter import Scale, IntVar, StringVar, Toplevel\n'), ((22235, 22375), 'tkinter.Label', 'Label', (['popup'], {'text': '"""RPiAntDrv.py v1.6"""', 'font': "('Helvetica', 12)", 'wraplength': '(300)', 'justify': 'LEFT', 'bg': '"""snow"""', 'fg': '"""black"""', 'padx': '(10)', 'pady': '(10)'}), "(popup, text='RPiAntDrv.py v1.6', font=('Helvetica', 12), wraplength\n =300, justify=LEFT, bg='snow', fg='black', padx=10, pady=10)\n", (22240, 22375), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((22516, 22864), 'tkinter.Label', 'Label', (['popup'], {'text': '"""This Python script is used to control a motor tuned antenna like a screwdriver antenna or tuned loop. Feedback from the antenna is provided by a simple dry contact or pulse output relative to the output shaft turning."""', 'font': "('Helvetica', 12)", 'wraplength': '(300)', 'justify': 'LEFT', 'bg': '"""snow"""', 'fg': '"""black"""', 'padx': '(10)', 'pady': '(10)'}), "(popup, text=\n 'This Python script is used to control a motor tuned antenna like a screwdriver antenna or tuned loop. Feedback from the antenna is provided by a simple dry contact or pulse output relative to the output shaft turning.'\n , font=('Helvetica', 12), wraplength=300, justify=LEFT, bg='snow', fg=\n 'black', padx=10, pady=10)\n", (22521, 22864), False, 'from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button\n'), ((1061, 1075), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1065, 1075), False, 'from pathlib import Path\n'), ((9683, 9720), 'RPi.GPIO.output', 'GPIO.output', (['self.dir1_pin', 'GPIO.HIGH'], {}), '(self.dir1_pin, GPIO.HIGH)\n', (9694, 9720), True, 'import RPi.GPIO as GPIO\n'), ((9749, 9785), 'RPi.GPIO.output', 'GPIO.output', (['self.dir2_pin', 'GPIO.LOW'], {}), '(self.dir2_pin, GPIO.LOW)\n', (9760, 9785), True, 'import RPi.GPIO as GPIO\n'), ((10214, 10250), 'RPi.GPIO.output', 'GPIO.output', (['self.dir1_pin', 'GPIO.LOW'], {}), '(self.dir1_pin, GPIO.LOW)\n', (10225, 10250), True, 'import RPi.GPIO as GPIO\n'), ((10275, 10312), 'RPi.GPIO.output', 'GPIO.output', (['self.dir2_pin', 'GPIO.HIGH'], {}), '(self.dir2_pin, GPIO.HIGH)\n', (10286, 10312), True, 'import RPi.GPIO as GPIO\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'window_profile.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow
class Profile(QMainWindow):
def __init__(self):
super().__init__()
self.setObjectName("MainWindow")
self.resize(397, 374)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 361))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.lineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit.setText("")
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 6, 0, 1, 1)
self.lineEdit_6 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_6.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_6.setObjectName("lineEdit_6")
self.gridLayout.addWidget(self.lineEdit_6, 7, 1, 1, 1)
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.dateEdit = QtWidgets.QDateEdit(self.verticalLayoutWidget)
self.dateEdit.setAlignment(QtCore.Qt.AlignCenter)
self.dateEdit.setObjectName("dateEdit")
self.gridLayout.addWidget(self.dateEdit, 6, 1, 1, 1)
self.lineEdit_8 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.gridLayout.addWidget(self.lineEdit_8, 8, 1, 1, 1)
self.lineEdit_8.setObjectName("lineEdit_8")
self.lineEdit_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 7, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 8, 0, 1, 1)
self.lineEdit_4 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_4.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_4.setObjectName("lineEdit_4")
self.gridLayout.addWidget(self.lineEdit_4, 4, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.lineEdit_5 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_5.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_5.setObjectName("lineEdit_5")
self.gridLayout.addWidget(self.lineEdit_5, 5, 1, 1, 1)
self.lineEdit_3 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_3.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_3.setObjectName("lineEdit_3")
self.gridLayout.addWidget(self.lineEdit_3, 3, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 5, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 4, 0, 1, 1)
self.lineEdit_2 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout.addWidget(self.lineEdit_2, 2, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(
self.label_9, 1, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.lineEdit_7 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_7.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_7.setObjectName("lineEdit_7")
self.gridLayout.addWidget(self.lineEdit_7, 1, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.setCentralWidget(self.centralwidget)
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName(self)
self.show()
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_6.setText(_translate("MainWindow", "Birthday:"))
self.label.setText(_translate("MainWindow", "Name:"))
self.label_8.setText(_translate("MainWindow", "Company:"))
self.label_7.setText(_translate("MainWindow", "Salary:"))
self.label_3.setText(_translate("MainWindow", "Password:"))
self.label_2.setText(_translate("MainWindow", "Username:"))
self.label_5.setText(_translate("MainWindow", "Status:"))
self.label_4.setText(_translate("MainWindow", "Repeat Password:"))
self.label_9.setText(_translate("MainWindow", "CPF/CNPJ"))
self.pushButton_2.setText(_translate("MainWindow", "Cancel"))
self.pushButton.setText(_translate("MainWindow", "Save"))
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QDateEdit",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QLineE... | [((466, 489), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self'], {}), '(self)\n', (483, 489), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((584, 621), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (601, 621), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((802, 850), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (823, 850), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((996, 1019), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (1017, 1019), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1096, 1142), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (1115, 1142), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1367, 1410), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (1383, 1410), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1600, 1646), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (1619, 1646), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1843, 1886), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (1859, 1886), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2066, 2112), 'PyQt5.QtWidgets.QDateEdit', 'QtWidgets.QDateEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (2085, 2112), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2306, 2352), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (2325, 2352), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2551, 2594), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (2567, 2594), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2781, 2824), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (2797, 2824), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3014, 3060), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (3033, 3060), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3259, 3302), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (3275, 3302), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3489, 3532), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (3505, 3532), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3722, 3768), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (3741, 3768), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3970, 4016), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (3989, 4016), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4215, 4258), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (4231, 4258), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4445, 4488), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (4461, 4488), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4678, 4724), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (4697, 4724), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4923, 4966), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (4939, 4966), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5136, 5182), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (5155, 5182), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5445, 5468), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (5466, 5468), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5561, 5609), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (5582, 5609), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5751, 5799), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (5772, 5799), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6058, 6101), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['self'], {}), '(self)\n', (6095, 6101), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((668, 698), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(381)', '(361)'], {}), '(10, 10, 381, 361)\n', (680, 698), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import glob
import os
import tempfile
from unittest import mock
from . import utils
from hotsos.core.config import setup_config, HotSOSConfig
from hotsos.core.searchtools import (
FileSearcher,
FilterDef,
SearchDef,
SearchResult,
SequenceSearchDef,
)
FILTER_TEST_1 = """blah blah ERROR blah
blah blah ERROR blah
blah blah INFO blah
"""
SEQ_TEST_1 = """a start point
leads to
an ending
"""
SEQ_TEST_2 = """a start point
another start point
leads to
an ending
"""
SEQ_TEST_3 = """a start point
another start point
leads to
an ending
a start point
"""
SEQ_TEST_4 = """a start point
another start point
value is 3
"""
SEQ_TEST_5 = """a start point
another start point
value is 3
another start point
value is 4
"""
SEQ_TEST_6 = """section 1
1_1
1_2
section 2
2_1
"""
SEQ_TEST_7 = """section 1
1_1
1_2
section 2
2_1
section 3
3_1
"""
MULTI_SEQ_TEST = """
sectionB 1
1_1
sectionA 1
1_1
sectionB 2
2_2
sectionA 2
2_1
"""
class TestSearchTools(utils.BaseTestCase):
@mock.patch.object(os, "environ", {})
@mock.patch.object(os, "cpu_count")
def test_filesearcher_num_cpus_no_override(self, mock_cpu_count):
mock_cpu_count.return_value = 3
with mock.patch.object(FileSearcher, 'num_files_to_search', 4):
s = FileSearcher()
self.assertEqual(s.num_cpus, 3)
@mock.patch.object(os, "environ", {})
@mock.patch.object(os, "cpu_count")
def test_filesearcher_num_cpus_files_capped(self, mock_cpu_count):
mock_cpu_count.return_value = 3
with mock.patch.object(FileSearcher, 'num_files_to_search', 2):
s = FileSearcher()
self.assertEqual(s.num_cpus, 2)
@mock.patch.object(os, "cpu_count")
def test_filesearcher_num_cpus_w_override(self, mock_cpu_count):
setup_config(MAX_PARALLEL_TASKS=2)
mock_cpu_count.return_value = 3
with mock.patch.object(FileSearcher, 'num_files_to_search', 4):
s = FileSearcher()
self.assertEqual(s.num_cpus, 2)
def test_filesearcher_logs(self):
expected = {9891: '2022-02-09 22:50:18.131',
9892: '2022-02-09 22:50:19.703'}
logs_root = "var/log/neutron/"
filepath = os.path.join(HotSOSConfig.DATA_ROOT, logs_root,
'neutron-openvswitch-agent.log.2.gz')
globpath = os.path.join(HotSOSConfig.DATA_ROOT, logs_root,
'neutron-l3-agent.log')
globpath_file1 = os.path.join(HotSOSConfig.DATA_ROOT, logs_root,
'neutron-l3-agent.log')
globpath_file2 = os.path.join(HotSOSConfig.DATA_ROOT, logs_root,
'neutron-l3-agent.log.1.gz')
s = FileSearcher()
sd = SearchDef(r'^(\S+\s+[0-9:\.]+)\s+.+full sync.+', tag="T1")
s.add_search_term(sd, filepath)
sd = SearchDef(r'^(\S+\s+[0-9:\.]+)\s+.+ERROR.+', tag="T2")
s.add_search_term(sd, filepath)
sd = SearchDef((r'^(\S+\s+[0-9:\.]+)\s+.+ INFO .+ Router [0-9a-f\-]+'
'.+'), tag="T3")
s.add_search_term(sd, globpath)
sd = SearchDef(r'non-existant-pattern', tag="T4")
# search for something that doesn't exist to test that code path
s.add_search_term(sd, globpath)
results = s.search()
self.assertEqual(set(results.files), set([filepath, globpath]))
self.assertEqual(len(results.find_by_path(filepath)), 1220)
tag_results = results.find_by_tag("T1", path=filepath)
self.assertEqual(len(tag_results), 2)
for result in tag_results:
ln = result.linenumber
self.assertEqual(result.tag, "T1")
self.assertEqual(result.get(1), expected[ln])
tag_results = results.find_by_tag("T1")
self.assertEqual(len(tag_results), 2)
for result in tag_results:
ln = result.linenumber
self.assertEqual(result.tag, "T1")
self.assertEqual(result.get(1), expected[ln])
self.assertEqual(len(results.find_by_path(globpath_file1)), 1)
self.assertEqual(len(results.find_by_path(globpath_file2)), 0)
# these files have the same content so expect same result from both
expected = {5380: '2022-02-10 16:09:22.641'}
path_results = results.find_by_path(globpath_file1)
for result in path_results:
ln = result.linenumber
self.assertEqual(result.tag, "T3")
self.assertEqual(result.get(1), expected[ln])
path_results = results.find_by_path(globpath_file2)
for result in path_results:
ln = result.linenumber
self.assertEqual(result.tag, "T3")
self.assertEqual(result.get(1), expected[ln])
def test_filesearcher_network_info(self):
filepath = os.path.join(HotSOSConfig.DATA_ROOT, 'sos_commands',
'networking', 'ip_-d_address')
filepath2 = os.path.join(HotSOSConfig.DATA_ROOT, 'sos_commands',
'networking', 'ip_-s_-d_link')
ip = "10.0.0.128"
mac = "22:c2:7b:1c:12:1b"
s = FileSearcher()
sd = SearchDef(r".+({}).+".format(ip))
s.add_search_term(sd, filepath)
sd = SearchDef(r"^\s+link/ether\s+({})\s+.+".format(mac))
s.add_search_term(sd, filepath2)
results = s.search()
self.assertEqual(set(results.files), set([filepath, filepath2]))
self.assertEqual(len(results.find_by_path(filepath)), 1)
self.assertEqual(len(results.find_by_path(filepath2)), 2)
self.assertEqual(results.find_by_path(filepath)[0].linenumber, 38)
for result in results.find_by_path(filepath):
self.assertEqual(result.get(1), ip)
expected = {52: mac,
141: mac}
for result in results.find_by_path(filepath2):
ln = result.linenumber
self.assertEqual(result.tag, None)
self.assertEqual(result.get(1), expected[ln])
def test_filesearcher_error(self):
s = FileSearcher()
with mock.patch.object(SearchResult, '__init__') as mock_init:
def fake_init(*args, **kwargs):
raise EOFError("some error")
mock_init.side_effect = fake_init
path = os.path.join(HotSOSConfig.DATA_ROOT)
s.add_search_term(SearchDef("."), path)
s.search()
def test_filesearch_filesort(self):
ordered_contents = []
self.maxDiff = None
with tempfile.TemporaryDirectory() as dtmp:
os.mknod(os.path.join(dtmp, "my-test-agent.log"))
ordered_contents.append("my-test-agent.log")
os.mknod(os.path.join(dtmp, "my-test-agent.log.1"))
ordered_contents.append("my-test-agent.log.1")
# add in an erroneous file that does not follow logrotate format
os.mknod(os.path.join(dtmp, "my-test-agent.log.tar.gz"))
for i in range(2, 100):
fname = "my-test-agent.log.{}.gz".format(i)
os.mknod(os.path.join(dtmp, fname))
ordered_contents.append(fname)
self.assertEqual(FileSearcher().logrotate_file_sort(fname), i)
ordered_contents.append("my-test-agent.log.tar.gz")
contents = os.listdir(dtmp)
self.assertEqual(sorted(contents,
key=FileSearcher().logrotate_file_sort),
ordered_contents)
def test_filesearch_glob_filesort(self):
dir_contents = []
self.maxDiff = None
with tempfile.TemporaryDirectory() as dtmp:
dir_contents.append(os.path.join(dtmp, "my-test-agent.0.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.1.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.1.log.1"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.2.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.16.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.49.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.49.log.1"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.77.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.100.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.100.log.1"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.110.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.142.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.183.log"))
for e in dir_contents:
os.mknod(e)
for i in range(2, HotSOSConfig.MAX_LOGROTATE_DEPTH + 10):
fname = os.path.join(dtmp,
"my-test-agent.1.log.{}.gz".format(i))
os.mknod(fname)
if i <= HotSOSConfig.MAX_LOGROTATE_DEPTH:
dir_contents.append(fname)
for i in range(2, HotSOSConfig.MAX_LOGROTATE_DEPTH + 10):
fname = os.path.join(dtmp,
"my-test-agent.49.log.{}.gz".format(i))
os.mknod(fname)
if i <= HotSOSConfig.MAX_LOGROTATE_DEPTH:
dir_contents.append(fname)
for i in range(2, HotSOSConfig.MAX_LOGROTATE_DEPTH + 10):
fname = os.path.join(dtmp,
"my-test-agent.100.log.{}.gz".format(i))
os.mknod(fname)
if i <= HotSOSConfig.MAX_LOGROTATE_DEPTH:
dir_contents.append(fname)
exp = sorted(dir_contents)
path = os.path.join(dtmp, 'my-test-agent*.log*')
act = sorted(FileSearcher().filtered_paths(glob.glob(path)))
self.assertEqual(act, exp)
def test_sequence_searcher(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_1)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^a\S* (start\S*) point\S*"),
body=SearchDef(r"leads to"),
end=SearchDef(r"^an (ending)$"),
tag="seq-search-test1")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "start")
elif r.tag == sd.end_tag:
self.assertEqual(r.get(1), "ending")
os.remove(ftmp.name)
def test_sequence_searcher_overlapping(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_2)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^(a\S*) (start\S*) point\S*"),
body=SearchDef(r"leads to"),
end=SearchDef(r"^an (ending)$"),
tag="seq-search-test2")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "another")
elif r.tag == sd.end_tag:
self.assertEqual(r.get(1), "ending")
os.remove(ftmp.name)
def test_sequence_searcher_overlapping_incomplete(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_3)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^(a\S*) (start\S*) point\S*"),
body=SearchDef(r"leads to"),
end=SearchDef(r"^an (ending)$"),
tag="seq-search-test3")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "another")
elif r.tag == sd.end_tag:
self.assertEqual(r.get(1), "ending")
os.remove(ftmp.name)
def test_sequence_searcher_incomplete_eof_match(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_4)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^(a\S*) (start\S*) point\S*"),
body=SearchDef(r"value is (\S+)"),
end=SearchDef(r"^$"),
tag="seq-search-test4")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "another")
elif r.tag == sd.body_tag:
self.assertEqual(r.get(1), "3")
elif r.tag == sd.end_tag:
self.assertEqual(r.get(0), "")
os.remove(ftmp.name)
def test_sequence_searcher_multiple_sections(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_5)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^(a\S*) (start\S*) point\S*"),
body=SearchDef(r"value is (\S+)"),
end=SearchDef(r"^$"),
tag="seq-search-test5")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 2)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "another")
elif r.tag == sd.body_tag:
self.assertTrue(r.get(1) in ["3", "4"])
elif r.tag == sd.end_tag:
self.assertEqual(r.get(0), "")
os.remove(ftmp.name)
def test_sequence_searcher_eof(self):
"""
Test scenario:
* multiple sections that end with start of the next
* start def matches any start
* end def matches any start
* file ends before start of next
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_6)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(r"^section (\d+)"),
body=SearchDef(r"\d_\d"),
tag="seq-search-test6")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 2)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
section = r.get(1)
self.assertTrue(r.get(1) in ["1", "2"])
elif r.tag == sd.body_tag:
if section == "1":
self.assertTrue(r.get(0) in ["1_1", "1_2"])
else:
self.assertTrue(r.get(0) in ["2_1"])
os.remove(ftmp.name)
def test_sequence_searcher_section_start_end_same(self):
"""
Test scenario:
* multiple sections that end with start of the next
* start def matches unique start
* end def matches any start
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_7)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(r"^section (2)"),
body=SearchDef(r"\d_\d"),
end=SearchDef(
r"^section (\d+)"),
tag="seq-search-test7")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "2")
elif r.tag == sd.body_tag:
self.assertTrue(r.get(0) in ["2_1"])
os.remove(ftmp.name)
def test_sequence_searcher_multi_sequence(self):
"""
Test scenario:
* search containing multiple seqeunce definitions
* data containing 2 results of each where one is incomplete
* test that single incomplete result gets removed
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(MULTI_SEQ_TEST)
ftmp.close()
s = FileSearcher()
sdA = SequenceSearchDef(start=SearchDef(r"^sectionA (\d+)"),
body=SearchDef(r"\d_\d"),
end=SearchDef(
r"^section\S+ (\d+)"),
tag="seqA-search-test")
sdB = SequenceSearchDef(start=SearchDef(r"^sectionB (\d+)"),
body=SearchDef(r"\d_\d"),
end=SearchDef(
r"^section\S+ (\d+)"),
tag="seqB-search-test")
s.add_search_term(sdA, path=ftmp.name)
s.add_search_term(sdB, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sdA)
self.assertEqual(len(sections), 1)
sections = results.find_sequence_sections(sdB)
self.assertEqual(len(sections), 2)
os.remove(ftmp.name)
def test_search_filter(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(FILTER_TEST_1)
ftmp.close()
s = FileSearcher()
fd = FilterDef(r" (INFO)")
s.add_filter_term(fd, path=ftmp.name)
sd = SearchDef(r".+ INFO (.+)")
s.add_search_term(sd, path=ftmp.name)
results = s.search().find_by_path(ftmp.name)
self.assertEqual(len(results), 1)
for r in results:
self.assertEqual(r.get(1), "blah")
os.remove(ftmp.name)
def test_search_filter_invert_match(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(FILTER_TEST_1)
ftmp.close()
s = FileSearcher()
fd = FilterDef(r" (ERROR)", invert_match=True)
s.add_filter_term(fd, path=ftmp.name)
sd = SearchDef(r".+ INFO (.+)")
s.add_search_term(sd, path=ftmp.name)
results = s.search().find_by_path(ftmp.name)
self.assertEqual(len(results), 1)
for r in results:
self.assertEqual(r.get(1), "blah")
os.remove(ftmp.name)
| [
"hotsos.core.config.setup_config",
"tempfile.TemporaryDirectory",
"os.listdir",
"hotsos.core.searchtools.FilterDef",
"hotsos.core.searchtools.SearchDef",
"os.path.join",
"tempfile.NamedTemporaryFile",
"hotsos.core.searchtools.FileSearcher",
"unittest.mock.patch.object",
"os.mknod",
"glob.glob",
... | [((997, 1033), 'unittest.mock.patch.object', 'mock.patch.object', (['os', '"""environ"""', '{}'], {}), "(os, 'environ', {})\n", (1014, 1033), False, 'from unittest import mock\n'), ((1039, 1073), 'unittest.mock.patch.object', 'mock.patch.object', (['os', '"""cpu_count"""'], {}), "(os, 'cpu_count')\n", (1056, 1073), False, 'from unittest import mock\n'), ((1337, 1373), 'unittest.mock.patch.object', 'mock.patch.object', (['os', '"""environ"""', '{}'], {}), "(os, 'environ', {})\n", (1354, 1373), False, 'from unittest import mock\n'), ((1379, 1413), 'unittest.mock.patch.object', 'mock.patch.object', (['os', '"""cpu_count"""'], {}), "(os, 'cpu_count')\n", (1396, 1413), False, 'from unittest import mock\n'), ((1678, 1712), 'unittest.mock.patch.object', 'mock.patch.object', (['os', '"""cpu_count"""'], {}), "(os, 'cpu_count')\n", (1695, 1712), False, 'from unittest import mock\n'), ((1790, 1824), 'hotsos.core.config.setup_config', 'setup_config', ([], {'MAX_PARALLEL_TASKS': '(2)'}), '(MAX_PARALLEL_TASKS=2)\n', (1802, 1824), False, 'from hotsos.core.config import setup_config, HotSOSConfig\n'), ((2216, 2305), 'os.path.join', 'os.path.join', (['HotSOSConfig.DATA_ROOT', 'logs_root', '"""neutron-openvswitch-agent.log.2.gz"""'], {}), "(HotSOSConfig.DATA_ROOT, logs_root,\n 'neutron-openvswitch-agent.log.2.gz')\n", (2228, 2305), False, 'import os\n'), ((2353, 2424), 'os.path.join', 'os.path.join', (['HotSOSConfig.DATA_ROOT', 'logs_root', '"""neutron-l3-agent.log"""'], {}), "(HotSOSConfig.DATA_ROOT, logs_root, 'neutron-l3-agent.log')\n", (2365, 2424), False, 'import os\n'), ((2482, 2553), 'os.path.join', 'os.path.join', (['HotSOSConfig.DATA_ROOT', 'logs_root', '"""neutron-l3-agent.log"""'], {}), "(HotSOSConfig.DATA_ROOT, logs_root, 'neutron-l3-agent.log')\n", (2494, 2553), False, 'import os\n'), ((2617, 2693), 'os.path.join', 'os.path.join', (['HotSOSConfig.DATA_ROOT', 'logs_root', '"""neutron-l3-agent.log.1.gz"""'], {}), "(HotSOSConfig.DATA_ROOT, logs_root, 'neutron-l3-agent.log.1.gz')\n", (2629, 2693), False, 'import os\n'), ((2745, 2759), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (2757, 2759), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((2773, 2834), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^(\\\\S+\\\\s+[0-9:\\\\.]+)\\\\s+.+full sync.+"""'], {'tag': '"""T1"""'}), "('^(\\\\S+\\\\s+[0-9:\\\\.]+)\\\\s+.+full sync.+', tag='T1')\n", (2782, 2834), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((2885, 2942), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^(\\\\S+\\\\s+[0-9:\\\\.]+)\\\\s+.+ERROR.+"""'], {'tag': '"""T2"""'}), "('^(\\\\S+\\\\s+[0-9:\\\\.]+)\\\\s+.+ERROR.+', tag='T2')\n", (2894, 2942), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((2993, 3078), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^(\\\\S+\\\\s+[0-9:\\\\.]+)\\\\s+.+ INFO .+ Router [0-9a-f\\\\-]+.+"""'], {'tag': '"""T3"""'}), "('^(\\\\S+\\\\s+[0-9:\\\\.]+)\\\\s+.+ INFO .+ Router [0-9a-f\\\\-]+.+', tag='T3'\n )\n", (3002, 3078), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((3152, 3195), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""non-existant-pattern"""'], {'tag': '"""T4"""'}), "('non-existant-pattern', tag='T4')\n", (3161, 3195), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((4848, 4935), 'os.path.join', 'os.path.join', (['HotSOSConfig.DATA_ROOT', '"""sos_commands"""', '"""networking"""', '"""ip_-d_address"""'], {}), "(HotSOSConfig.DATA_ROOT, 'sos_commands', 'networking',\n 'ip_-d_address')\n", (4860, 4935), False, 'import os\n'), ((4984, 5071), 'os.path.join', 'os.path.join', (['HotSOSConfig.DATA_ROOT', '"""sos_commands"""', '"""networking"""', '"""ip_-s_-d_link"""'], {}), "(HotSOSConfig.DATA_ROOT, 'sos_commands', 'networking',\n 'ip_-s_-d_link')\n", (4996, 5071), False, 'import os\n'), ((5173, 5187), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (5185, 5187), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((6102, 6116), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (6114, 6116), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((1197, 1254), 'unittest.mock.patch.object', 'mock.patch.object', (['FileSearcher', '"""num_files_to_search"""', '(4)'], {}), "(FileSearcher, 'num_files_to_search', 4)\n", (1214, 1254), False, 'from unittest import mock\n'), ((1272, 1286), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (1284, 1286), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((1538, 1595), 'unittest.mock.patch.object', 'mock.patch.object', (['FileSearcher', '"""num_files_to_search"""', '(2)'], {}), "(FileSearcher, 'num_files_to_search', 2)\n", (1555, 1595), False, 'from unittest import mock\n'), ((1613, 1627), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (1625, 1627), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((1878, 1935), 'unittest.mock.patch.object', 'mock.patch.object', (['FileSearcher', '"""num_files_to_search"""', '(4)'], {}), "(FileSearcher, 'num_files_to_search', 4)\n", (1895, 1935), False, 'from unittest import mock\n'), ((1953, 1967), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (1965, 1967), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((6130, 6173), 'unittest.mock.patch.object', 'mock.patch.object', (['SearchResult', '"""__init__"""'], {}), "(SearchResult, '__init__')\n", (6147, 6173), False, 'from unittest import mock\n'), ((6344, 6380), 'os.path.join', 'os.path.join', (['HotSOSConfig.DATA_ROOT'], {}), '(HotSOSConfig.DATA_ROOT)\n', (6356, 6380), False, 'import os\n'), ((6568, 6597), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6595, 6597), False, 'import tempfile\n'), ((7358, 7374), 'os.listdir', 'os.listdir', (['dtmp'], {}), '(dtmp)\n', (7368, 7374), False, 'import os\n'), ((7658, 7687), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7685, 7687), False, 'import tempfile\n'), ((9798, 9839), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent*.log*"""'], {}), "(dtmp, 'my-test-agent*.log*')\n", (9810, 9839), False, 'import os\n'), ((10004, 10055), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (10031, 10055), False, 'import tempfile\n'), ((10141, 10155), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (10153, 10155), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((10964, 10984), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (10973, 10984), False, 'import os\n'), ((11049, 11100), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (11076, 11100), False, 'import tempfile\n'), ((11186, 11200), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (11198, 11200), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((12011, 12031), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (12020, 12031), False, 'import os\n'), ((12107, 12158), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (12134, 12158), False, 'import tempfile\n'), ((12244, 12258), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (12256, 12258), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((13069, 13089), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (13078, 13089), False, 'import os\n'), ((13163, 13214), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (13190, 13214), False, 'import tempfile\n'), ((13300, 13314), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (13312, 13314), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((14217, 14237), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (14226, 14237), False, 'import os\n'), ((14308, 14359), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (14335, 14359), False, 'import tempfile\n'), ((14445, 14459), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (14457, 14459), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((15370, 15390), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (15379, 15390), False, 'import os\n'), ((15673, 15724), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (15700, 15724), False, 'import tempfile\n'), ((15810, 15824), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (15822, 15824), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((16698, 16718), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (16707, 16718), False, 'import os\n'), ((16981, 17032), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (17008, 17032), False, 'import tempfile\n'), ((17118, 17132), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (17130, 17132), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((17921, 17941), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (17930, 17941), False, 'import os\n'), ((18243, 18294), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (18270, 18294), False, 'import tempfile\n'), ((18384, 18398), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (18396, 18398), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((19392, 19412), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (19401, 19412), False, 'import os\n'), ((19461, 19512), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (19488, 19512), False, 'import tempfile\n'), ((19601, 19615), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (19613, 19615), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((19633, 19653), 'hotsos.core.searchtools.FilterDef', 'FilterDef', (['""" (INFO)"""'], {}), "(' (INFO)')\n", (19642, 19653), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((19722, 19747), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['""".+ INFO (.+)"""'], {}), "('.+ INFO (.+)')\n", (19731, 19747), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((19996, 20016), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (20005, 20016), False, 'import os\n'), ((20078, 20129), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (20105, 20129), False, 'import tempfile\n'), ((20218, 20232), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (20230, 20232), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((20250, 20290), 'hotsos.core.searchtools.FilterDef', 'FilterDef', (['""" (ERROR)"""'], {'invert_match': '(True)'}), "(' (ERROR)', invert_match=True)\n", (20259, 20290), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((20359, 20384), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['""".+ INFO (.+)"""'], {}), "('.+ INFO (.+)')\n", (20368, 20384), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((20633, 20653), 'os.remove', 'os.remove', (['ftmp.name'], {}), '(ftmp.name)\n', (20642, 20653), False, 'import os\n'), ((6411, 6425), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""."""'], {}), "('.')\n", (6420, 6425), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((6628, 6667), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.log"""'], {}), "(dtmp, 'my-test-agent.log')\n", (6640, 6667), False, 'import os\n'), ((6747, 6788), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.log.1"""'], {}), "(dtmp, 'my-test-agent.log.1')\n", (6759, 6788), False, 'import os\n'), ((6947, 6993), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.log.tar.gz"""'], {}), "(dtmp, 'my-test-agent.log.tar.gz')\n", (6959, 6993), False, 'import os\n'), ((7729, 7770), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.0.log"""'], {}), "(dtmp, 'my-test-agent.0.log')\n", (7741, 7770), False, 'import os\n'), ((7804, 7845), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.1.log"""'], {}), "(dtmp, 'my-test-agent.1.log')\n", (7816, 7845), False, 'import os\n'), ((7879, 7922), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.1.log.1"""'], {}), "(dtmp, 'my-test-agent.1.log.1')\n", (7891, 7922), False, 'import os\n'), ((7956, 7997), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.2.log"""'], {}), "(dtmp, 'my-test-agent.2.log')\n", (7968, 7997), False, 'import os\n'), ((8031, 8073), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.16.log"""'], {}), "(dtmp, 'my-test-agent.16.log')\n", (8043, 8073), False, 'import os\n'), ((8107, 8149), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.49.log"""'], {}), "(dtmp, 'my-test-agent.49.log')\n", (8119, 8149), False, 'import os\n'), ((8183, 8227), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.49.log.1"""'], {}), "(dtmp, 'my-test-agent.49.log.1')\n", (8195, 8227), False, 'import os\n'), ((8261, 8303), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.77.log"""'], {}), "(dtmp, 'my-test-agent.77.log')\n", (8273, 8303), False, 'import os\n'), ((8337, 8380), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.100.log"""'], {}), "(dtmp, 'my-test-agent.100.log')\n", (8349, 8380), False, 'import os\n'), ((8414, 8459), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.100.log.1"""'], {}), "(dtmp, 'my-test-agent.100.log.1')\n", (8426, 8459), False, 'import os\n'), ((8493, 8536), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.110.log"""'], {}), "(dtmp, 'my-test-agent.110.log')\n", (8505, 8536), False, 'import os\n'), ((8570, 8613), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.142.log"""'], {}), "(dtmp, 'my-test-agent.142.log')\n", (8582, 8613), False, 'import os\n'), ((8647, 8690), 'os.path.join', 'os.path.join', (['dtmp', '"""my-test-agent.183.log"""'], {}), "(dtmp, 'my-test-agent.183.log')\n", (8659, 8690), False, 'import os\n'), ((8743, 8754), 'os.mknod', 'os.mknod', (['e'], {}), '(e)\n', (8751, 8754), False, 'import os\n'), ((8961, 8976), 'os.mknod', 'os.mknod', (['fname'], {}), '(fname)\n', (8969, 8976), False, 'import os\n'), ((9289, 9304), 'os.mknod', 'os.mknod', (['fname'], {}), '(fname)\n', (9297, 9304), False, 'import os\n'), ((9618, 9633), 'os.mknod', 'os.mknod', (['fname'], {}), '(fname)\n', (9626, 9633), False, 'import os\n'), ((7116, 7141), 'os.path.join', 'os.path.join', (['dtmp', 'fname'], {}), '(dtmp, fname)\n', (7128, 7141), False, 'import os\n'), ((9895, 9910), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (9904, 9910), False, 'import glob\n'), ((10197, 10238), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^a\\\\S* (start\\\\S*) point\\\\S*"""'], {}), "('^a\\\\S* (start\\\\S*) point\\\\S*')\n", (10206, 10238), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((10328, 10349), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""leads to"""'], {}), "('leads to')\n", (10337, 10349), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((10391, 10417), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^an (ending)$"""'], {}), "('^an (ending)$')\n", (10400, 10417), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((11242, 11285), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^(a\\\\S*) (start\\\\S*) point\\\\S*"""'], {}), "('^(a\\\\S*) (start\\\\S*) point\\\\S*')\n", (11251, 11285), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((11373, 11394), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""leads to"""'], {}), "('leads to')\n", (11382, 11394), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((11436, 11462), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^an (ending)$"""'], {}), "('^an (ending)$')\n", (11445, 11462), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((12300, 12343), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^(a\\\\S*) (start\\\\S*) point\\\\S*"""'], {}), "('^(a\\\\S*) (start\\\\S*) point\\\\S*')\n", (12309, 12343), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((12431, 12452), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""leads to"""'], {}), "('leads to')\n", (12440, 12452), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((12494, 12520), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^an (ending)$"""'], {}), "('^an (ending)$')\n", (12503, 12520), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((13356, 13399), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^(a\\\\S*) (start\\\\S*) point\\\\S*"""'], {}), "('^(a\\\\S*) (start\\\\S*) point\\\\S*')\n", (13365, 13399), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((13487, 13515), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""value is (\\\\S+)"""'], {}), "('value is (\\\\S+)')\n", (13496, 13515), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((13556, 13571), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^$"""'], {}), "('^$')\n", (13565, 13571), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((14501, 14544), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^(a\\\\S*) (start\\\\S*) point\\\\S*"""'], {}), "('^(a\\\\S*) (start\\\\S*) point\\\\S*')\n", (14510, 14544), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((14632, 14660), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""value is (\\\\S+)"""'], {}), "('value is (\\\\S+)')\n", (14641, 14660), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((14701, 14716), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^$"""'], {}), "('^$')\n", (14710, 14716), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((15866, 15894), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^section (\\\\d+)"""'], {}), "('^section (\\\\d+)')\n", (15875, 15894), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((15936, 15956), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""\\\\d_\\\\d"""'], {}), "('\\\\d_\\\\d')\n", (15945, 15956), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((17174, 17199), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^section (2)"""'], {}), "('^section (2)')\n", (17183, 17199), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((17242, 17262), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""\\\\d_\\\\d"""'], {}), "('\\\\d_\\\\d')\n", (17251, 17262), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((17302, 17330), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^section (\\\\d+)"""'], {}), "('^section (\\\\d+)')\n", (17311, 17330), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((18441, 18470), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^sectionA (\\\\d+)"""'], {}), "('^sectionA (\\\\d+)')\n", (18450, 18470), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((18513, 18533), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""\\\\d_\\\\d"""'], {}), "('\\\\d_\\\\d')\n", (18522, 18533), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((18574, 18606), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^section\\\\S+ (\\\\d+)"""'], {}), "('^section\\\\S+ (\\\\d+)')\n", (18583, 18606), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((18758, 18787), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^sectionB (\\\\d+)"""'], {}), "('^sectionB (\\\\d+)')\n", (18767, 18787), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((18830, 18850), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""\\\\d_\\\\d"""'], {}), "('\\\\d_\\\\d')\n", (18839, 18850), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((18891, 18923), 'hotsos.core.searchtools.SearchDef', 'SearchDef', (['"""^section\\\\S+ (\\\\d+)"""'], {}), "('^section\\\\S+ (\\\\d+)')\n", (18900, 18923), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((9865, 9879), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (9877, 9879), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((7223, 7237), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (7235, 7237), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n'), ((7461, 7475), 'hotsos.core.searchtools.FileSearcher', 'FileSearcher', ([], {}), '()\n', (7473, 7475), False, 'from hotsos.core.searchtools import FileSearcher, FilterDef, SearchDef, SearchResult, SequenceSearchDef\n')] |
from setuptools import setup, find_packages
VERSION = '0.5.0'
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
with open('requirements.txt') as f:
DEPENDENCIES = f.read().split('\n')
setup(
name = 'bqtools',
version = VERSION,
description = 'Python Tools for BigQuery',
long_description = LONG_DESCRIPTION,
long_description_content_type = 'text/markdown',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/42DIGITAL/bqtools',
packages = find_packages(exclude=['tests']),
install_requires=DEPENDENCIES,
extras_require={'test': ['pytest']},
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Database',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| [
"setuptools.find_packages"
] | [((519, 551), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (532, 551), False, 'from setuptools import setup, find_packages\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('gender', models.CharField(max_length=20, null=True, choices=[('male', 'Male'), ('female', 'Female')], blank=True)),
('city', models.CharField(max_length=250, null=True, blank=True)),
('dob', models.DateField(null=True, blank=True)),
('locale', models.CharField(max_length=10, null=True, blank=True)),
],
),
]
| [
"django.db.models.DateField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((303, 396), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)'}), "(primary_key=True, auto_created=True, verbose_name='ID',\n serialize=False)\n", (319, 396), False, 'from django.db import migrations, models\n'), ((422, 531), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'choices': "[('male', 'Male'), ('female', 'Female')]", 'blank': '(True)'}), "(max_length=20, null=True, choices=[('male', 'Male'), (\n 'female', 'Female')], blank=True)\n", (438, 531), False, 'from django.db import migrations, models\n'), ((554, 609), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)', 'blank': '(True)'}), '(max_length=250, null=True, blank=True)\n', (570, 609), False, 'from django.db import migrations, models\n'), ((636, 675), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (652, 675), False, 'from django.db import migrations, models\n'), ((705, 759), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (721, 759), False, 'from django.db import migrations, models\n')] |
from datetime import timedelta
from dotenv import load_dotenv
from azure.identity import DefaultAzureCredential
from azure.mgmt.media import AzureMediaServices
from azure.storage.blob import BlobServiceClient
from azure.mgmt.media.models import (
Asset,
Transform,
TransformOutput,
StandardEncoderPreset,
AacAudio,
AacAudioProfile,
H264Video,
H264Complexity,
H264Layer,
Mp4Format,
Filters,
Rectangle,
VideoOverlay,
Job,
JobInputs,
JobInputAsset,
JobOutputAsset,
OnErrorType,
Priority
)
import os
#Timer for checking job progress
import time
#Get environment variables
load_dotenv()
# Get the default Azure credential from the environment variables AZURE_CLIENT_ID and AZURE_CLIENT_SECRET and AZURE_TENTANT_ID
default_credential = DefaultAzureCredential()
# Get the environment variables SUBSCRIPTIONID, RESOURCEGROUP and ACCOUNTNAME
subscription_id = os.getenv('SUBSCRIPTIONID')
resource_group = os.getenv('RESOURCEGROUP')
account_name = os.getenv('ACCOUNTNAME')
# The file you want to upload. For this example, the file is placed under Media folder.
# The file ignite.mp4 has been provided for you.
source_file_location = os.chdir("../../Media/")
source_file = "ignite.mp4"
# This is a random string that will be added to the naming of things so that you don't have to keep doing this during testing
uniqueness = "encodeOverlayPng"
# Use the following PNG image to overlay on top of the video
overlay_file = "AzureMediaService.png"
overlay_label = "overlayCloud"
# Set the attributes of the input Asset using the random number
in_asset_name = 'inputassetName' + uniqueness
in_alternate_id = 'inputALTid' + uniqueness
in_description = 'inputdescription' + uniqueness
# Create an Asset object
# The asset_id will be used for the container parameter for the storage SDK after the asset is created by the AMS client.
in_asset = Asset(alternate_id=in_alternate_id, description=in_description)
# Create the JobInput for the PNG Image Overlay
overlay_asset_name = 'overlayassetName' + uniqueness
overlay_asset_alternate_id = 'inputALTid' + uniqueness
overlay_asset_description = 'inputdescription' + uniqueness
# Create an Asset object for PNG Image overlay
overlay_in_asset = Asset(alternate_id=overlay_asset_alternate_id, description=overlay_asset_description)
# Set the attributes of the output Asset using the random number
out_asset_name = 'outputassetName' + uniqueness
out_alternate_id = 'outputALTid' + uniqueness
out_description = 'outputdescription' + uniqueness
# Create Ouput Asset object
out_asset = Asset(alternate_id=out_alternate_id, description=out_description)
# The AMS Client
print("Creating AMS Client")
client = AzureMediaServices(default_credential, subscription_id)
# Create an input Asset
print(f"Creating input asset {in_asset_name}")
input_asset = client.assets.create_or_update(resource_group, account_name, in_asset_name, in_asset)
# An AMS asset is a container with a specific id that has "asset-" prepended to the GUID.
# So, you need to create the asset id to identify it as the container
# where Storage is to upload the video (as a block blob)
in_container = 'asset-' + input_asset.asset_id
# Create an Overlay input Asset
print(f"Creating input asset {overlay_asset_name}")
overlay_asset = client.assets.create_or_update(resource_group, account_name, overlay_asset_name, overlay_in_asset)
# # An AMS asset is a container with a specific id that has "asset-" prepended to the GUID.
# # So, you need to create the asset id to identify it as the container
# # where Storage is to upload the video (as a block blob)
overlay_container = 'asset-' + overlay_asset.asset_id
# create an output Asset
print(f"Creating output asset {out_asset_name}")
output_asset = client.assets.create_or_update(resource_group, account_name, out_asset_name, out_asset)
### Use the Storage SDK to upload the video ###
print(f"Uploading the file {source_file}")
blob_service_client = BlobServiceClient.from_connection_string(os.getenv('STORAGEACCOUNTCONNECTION'))
blob_client = blob_service_client.get_blob_client(in_container, source_file)
working_dir = os.getcwd()
print(f"Current working directory: {working_dir}")
upload_file_path = os.path.join(working_dir, source_file)
# WARNING: Depending on where you are launching the sample from, the path here could be off, and not include the BasicEncoding folder.
# Adjust the path as needed depending on how you are launching this python sample file.
# Upload the video to storage as a block blob
with open(upload_file_path, "rb") as data:
blob_client.upload_blob(data)
### Use the Storage SDK to upload the Overlay file
print(f"Uploading the file {overlay_file}")
blob_service_client = BlobServiceClient.from_connection_string(os.getenv('STORAGEACCOUNTCONNECTION'))
blob_client = blob_service_client.get_blob_client(overlay_container, overlay_file)
working_dir = os.getcwd()
print(f"Current working directory: {working_dir}")
upload_file_path = os.path.join(working_dir, overlay_file)
# WARNING: Depending on where you are launching the sample from, the path here could be off, and not include the BasicEncoding folder.
# Adjust the path as needed depending on how you are launching this python sample file.
# Upload the video to storage as a block blob
with open(upload_file_path, "rb") as data:
blob_client.upload_blob(data)
transform_name = 'H264EncodingOverlayImagePng'
# Create a new BuiltIn Standard encoding Transform for H264 ContentAware Constrained
print(f"Creating Standard Encoding transform named: {transform_name}")
# For this snippet, we are using 'StandardEncoderPreset' with Overlay Image
transform_output = TransformOutput(
preset = StandardEncoderPreset(
codecs=[
AacAudio(
channels=2,
sampling_rate=48000,
bitrate=128000,
profile=AacAudioProfile.AAC_LC
),
H264Video(
key_frame_interval=timedelta(seconds=2),
complexity=H264Complexity.BALANCED,
layers=[
H264Layer(
bitrate=3600000,
width="1280",
height="720",
label="HD-3600kbps"
),
H264Layer(
bitrate=1600000,
width="960",
height="540",
label="SD-1600kbps"
)
]
)
],
# Specify the format for the output files - one for video + audio, and another for the thumbnails
formats=[
Mp4Format(filename_pattern="Video-{Basename}-{Label}-{Bitrate}{Extension}")
],
filters=Filters(
overlays=[
VideoOverlay(
input_label=overlay_label, # same label that is used in the JobInput to identify which file in the asset is the actual overlay image .png file.
position=Rectangle(left="10%", top="10%"), # left and top position of the overlay in absolute pixel or percentage relative to the source video resolution.
# You can also set the height and width of the rectangle to draw into, but there is known problem here.
# If you use % for the top and left (or any of these) you have to stick with % for all or you will get a job configuration Error
# Also, it can alter your aspect ratio when using percentages, so you have to know the source video size in relation to the source image to
# provide the proper image size. Recommendation is to just use the right size image for the source video here and avoid passing in height and width for now.
# height: (if above is percentage based, this has to be also! Otherwise pixels are allowed. No mixing. )
# width: (if above is percentage based, this has to be also! Otherwise pixels are allowed No mixing. )
opacity=0.75, # Sets the blending opacity value to make the image slightly transparent over the video
start=timedelta(seconds=0), # Start at beginning of the video
fade_in_duration=timedelta(seconds=2), # 2 second fade in
fade_out_duration=timedelta(seconds=2), # 2 second fade out
end=timedelta(seconds=5) # end the fade out at 5 seconds on the timeline... fade will begin 2 seconds before this end time
)
]
)
),
# What should we do with the job if there is an error?
on_error=OnErrorType.STOP_PROCESSING_JOB,
# What is the relative priority of this job to others? Normal, high or low?
relative_priority=Priority.NORMAL
)
print("Creating encoding transform...")
# Adding transform details
my_transform = Transform()
my_transform.description="A simple custom H264 encoding transform that overlays a PNG image on the video source"
my_transform.outputs = [transform_output]
print(f"Creating transform {transform_name}")
transform = client.transforms.create_or_update(
resource_group_name=resource_group,
account_name=account_name,
transform_name=transform_name,
parameters=my_transform)
print(f"{transform_name} created (or updated if it existed already). ")
job_name = 'MyEncodingH264OverlayImagePng'+ uniqueness
print(f"Creating Encoding264OverlayImagePng job {job_name}")
files = (source_file, overlay_file)
# Create Video Input Asset
job_video_input_asset = JobInputAsset(asset_name=in_asset_name)
job_input_overlay = JobInputAsset(
asset_name=overlay_asset_name,
label=overlay_label # Order does not matter here, it is the "label" used on the Filter and the jobInput Overlay that is important!
)
# Create a list of job inputs - we will add both the video and overlay image assets here as the inputs to the job.
job_inputs=[
job_video_input_asset,
job_input_overlay
]
# Create Job Output Asset
outputs = JobOutputAsset(asset_name=out_asset_name)
# Create Job object and then create Trasnform Job
the_job = Job(input=JobInputs(inputs=job_inputs), outputs=[outputs], correlation_data={ "propertyname": "string" })
job: Job = client.jobs.create(resource_group, account_name, transform_name, job_name, parameters=the_job)
# Check Job State
job_state = client.jobs.get(resource_group, account_name, transform_name, job_name)
# First check
print("First job check")
print(job_state.state)
# Check the state of the job every 10 seconds. Adjust time_in_seconds = <how often you want to check for job state>
def countdown(t):
while t:
mins, secs = divmod(t, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
job_current = client.jobs.get(resource_group, account_name, transform_name, job_name)
if(job_current.state == "Finished"):
print(job_current.state)
# TODO: Download the output file using blob storage SDK
return
if(job_current.state == "Error"):
print(job_current.state)
# TODO: Provide Error details from Job through API
return
else:
print(job_current.state)
countdown(int(time_in_seconds))
time_in_seconds = 10
countdown(int(time_in_seconds))
| [
"azure.mgmt.media.models.Asset",
"azure.mgmt.media.models.JobOutputAsset",
"azure.identity.DefaultAzureCredential",
"os.getenv",
"azure.mgmt.media.models.Rectangle",
"os.path.join",
"time.sleep",
"dotenv.load_dotenv",
"os.chdir",
"os.getcwd",
"azure.mgmt.media.models.JobInputs",
"azure.mgmt.me... | [((610, 623), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (621, 623), False, 'from dotenv import load_dotenv\n'), ((773, 797), 'azure.identity.DefaultAzureCredential', 'DefaultAzureCredential', ([], {}), '()\n', (795, 797), False, 'from azure.identity import DefaultAzureCredential\n'), ((895, 922), 'os.getenv', 'os.getenv', (['"""SUBSCRIPTIONID"""'], {}), "('SUBSCRIPTIONID')\n", (904, 922), False, 'import os\n'), ((940, 966), 'os.getenv', 'os.getenv', (['"""RESOURCEGROUP"""'], {}), "('RESOURCEGROUP')\n", (949, 966), False, 'import os\n'), ((982, 1006), 'os.getenv', 'os.getenv', (['"""ACCOUNTNAME"""'], {}), "('ACCOUNTNAME')\n", (991, 1006), False, 'import os\n'), ((1170, 1194), 'os.chdir', 'os.chdir', (['"""../../Media/"""'], {}), "('../../Media/')\n", (1178, 1194), False, 'import os\n'), ((1876, 1939), 'azure.mgmt.media.models.Asset', 'Asset', ([], {'alternate_id': 'in_alternate_id', 'description': 'in_description'}), '(alternate_id=in_alternate_id, description=in_description)\n', (1881, 1939), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((2224, 2314), 'azure.mgmt.media.models.Asset', 'Asset', ([], {'alternate_id': 'overlay_asset_alternate_id', 'description': 'overlay_asset_description'}), '(alternate_id=overlay_asset_alternate_id, description=\n overlay_asset_description)\n', (2229, 2314), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((2562, 2627), 'azure.mgmt.media.models.Asset', 'Asset', ([], {'alternate_id': 'out_alternate_id', 'description': 'out_description'}), '(alternate_id=out_alternate_id, description=out_description)\n', (2567, 2627), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((2684, 2739), 'azure.mgmt.media.AzureMediaServices', 'AzureMediaServices', (['default_credential', 'subscription_id'], {}), '(default_credential, subscription_id)\n', (2702, 2739), False, 'from azure.mgmt.media import AzureMediaServices\n'), ((4119, 4130), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4128, 4130), False, 'import os\n'), ((4201, 4239), 'os.path.join', 'os.path.join', (['working_dir', 'source_file'], {}), '(working_dir, source_file)\n', (4213, 4239), False, 'import os\n'), ((4886, 4897), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4895, 4897), False, 'import os\n'), ((4968, 5007), 'os.path.join', 'os.path.join', (['working_dir', 'overlay_file'], {}), '(working_dir, overlay_file)\n', (4980, 5007), False, 'import os\n'), ((8563, 8574), 'azure.mgmt.media.models.Transform', 'Transform', ([], {}), '()\n', (8572, 8574), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((9230, 9269), 'azure.mgmt.media.models.JobInputAsset', 'JobInputAsset', ([], {'asset_name': 'in_asset_name'}), '(asset_name=in_asset_name)\n', (9243, 9269), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((9291, 9356), 'azure.mgmt.media.models.JobInputAsset', 'JobInputAsset', ([], {'asset_name': 'overlay_asset_name', 'label': 'overlay_label'}), '(asset_name=overlay_asset_name, label=overlay_label)\n', (9304, 9356), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((9696, 9737), 'azure.mgmt.media.models.JobOutputAsset', 'JobOutputAsset', ([], {'asset_name': 'out_asset_name'}), '(asset_name=out_asset_name)\n', (9710, 9737), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((3989, 4026), 'os.getenv', 'os.getenv', (['"""STORAGEACCOUNTCONNECTION"""'], {}), "('STORAGEACCOUNTCONNECTION')\n", (3998, 4026), False, 'import os\n'), ((4750, 4787), 'os.getenv', 'os.getenv', (['"""STORAGEACCOUNTCONNECTION"""'], {}), "('STORAGEACCOUNTCONNECTION')\n", (4759, 4787), False, 'import os\n'), ((9809, 9837), 'azure.mgmt.media.models.JobInputs', 'JobInputs', ([], {'inputs': 'job_inputs'}), '(inputs=job_inputs)\n', (9818, 9837), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((10453, 10466), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10463, 10466), False, 'import time\n'), ((5727, 5821), 'azure.mgmt.media.models.AacAudio', 'AacAudio', ([], {'channels': '(2)', 'sampling_rate': '(48000)', 'bitrate': '(128000)', 'profile': 'AacAudioProfile.AAC_LC'}), '(channels=2, sampling_rate=48000, bitrate=128000, profile=\n AacAudioProfile.AAC_LC)\n', (5735, 5821), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((6425, 6500), 'azure.mgmt.media.models.Mp4Format', 'Mp4Format', ([], {'filename_pattern': '"""Video-{Basename}-{Label}-{Bitrate}{Extension}"""'}), "(filename_pattern='Video-{Basename}-{Label}-{Bitrate}{Extension}')\n", (6434, 6500), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((5903, 5923), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(2)'}), '(seconds=2)\n', (5912, 5923), False, 'from datetime import timedelta\n'), ((5996, 6071), 'azure.mgmt.media.models.H264Layer', 'H264Layer', ([], {'bitrate': '(3600000)', 'width': '"""1280"""', 'height': '"""720"""', 'label': '"""HD-3600kbps"""'}), "(bitrate=3600000, width='1280', height='720', label='HD-3600kbps')\n", (6005, 6071), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((6143, 6217), 'azure.mgmt.media.models.H264Layer', 'H264Layer', ([], {'bitrate': '(1600000)', 'width': '"""960"""', 'height': '"""540"""', 'label': '"""SD-1600kbps"""'}), "(bitrate=1600000, width='960', height='540', label='SD-1600kbps')\n", (6152, 6217), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((6750, 6782), 'azure.mgmt.media.models.Rectangle', 'Rectangle', ([], {'left': '"""10%"""', 'top': '"""10%"""'}), "(left='10%', top='10%')\n", (6759, 6782), False, 'from azure.mgmt.media.models import Asset, Transform, TransformOutput, StandardEncoderPreset, AacAudio, AacAudioProfile, H264Video, H264Complexity, H264Layer, Mp4Format, Filters, Rectangle, VideoOverlay, Job, JobInputs, JobInputAsset, JobOutputAsset, OnErrorType, Priority\n'), ((7874, 7894), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (7883, 7894), False, 'from datetime import timedelta\n'), ((7970, 7990), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(2)'}), '(seconds=2)\n', (7979, 7990), False, 'from datetime import timedelta\n'), ((8041, 8061), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(2)'}), '(seconds=2)\n', (8050, 8061), False, 'from datetime import timedelta\n'), ((8098, 8118), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (8107, 8118), False, 'from datetime import timedelta\n')] |
from sysu_dataset import SYSU
import numpy as np
import scipy
import itertools
import cv2
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from config import *
vox_size=54
all_tups = np.array(list(itertools.product(range(vox_size), repeat=2)))
rot_array = np.arange(vox_size*vox_size).reshape([vox_size,vox_size])
K = 5
T = 10
class SYSUdataset(Dataset):
def __init__(self, test=False, full_train=False):
# Underlying dataset and features
self.dataset = SYSU()
# What to return
self.images = DATA_IMAGES
self.images_3D = DATA_IMAGES_3D
self.op_flow = DATA_OP_FLOW
self.op_flow_2D = DATA_OP_FLOW_2D
self.single_feature = DATA_SINGLE_FEAT
self.augmentation = DATA_AUGMENTATION
# Train, validation, test split
self.train = full_train
if test:
self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[1]
else:
self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[0]
def __len__(self):
return len(self.vid_ids)
def image_transforms(self, numpy_imgs):
''' Transformations on a list of images
Returns
-------
images : Torch Tensor
Stacked tensor of all images with the transformations applied
'''
# Get random parameters to apply same transformation to all images in list
color_jitter = transforms.ColorJitter.get_params(.25,.25,.25,.25)
rotation_param = transforms.RandomRotation.get_params((-15,15))
crop_params = None
# Apply transformations
images = []
for numpy_img in numpy_imgs:
i = transforms.functional.to_pil_image(numpy_img)
i = transforms.functional.resize(i, (224,224))
if self.train:
i = color_jitter(i)
i = transforms.functional.rotate(i, rotation_param)
i = transforms.functional.to_tensor(i)
i = transforms.functional.normalize(i, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
images.append(i)
return torch.stack(images)
def op_flow_transforms(self, op_flow):
''' Transformations on a tensor of optical flow voxel grids
Parameters
----------
op_flow : ndarray
Returns
-------
op_flow : Torch Tensor
A torch tensor of an optical flow voxel grid with the
transformations (rotation, scale, translation) applied to it
'''
def translate(op_flow):
# op_flow[:,0::3,:,:,:] ---> x axis vectors
# op_flow = scipy.ndimage.interpolation.shift(op_flow, [0,0,x_move,y_move,z_move], cval=0, order=0) # Slower alternative
# Get amount to shift
max_shift = int(op_flow.shape[2] * 0.10)
x_move, y_move, z_move = np.random.randint(-max_shift, max_shift, 3)
# Translate values
if x_move > 0:
op_flow[:,:,x_move:,:,:] = op_flow[:,:,:-x_move,:,:]
op_flow[:,:,:x_move,:,:] = 0
elif x_move < 0:
op_flow[:,:,:x_move,:,:] = op_flow[:,:,-x_move:,:,:]
op_flow[:,:,x_move:,:,:] = 0
if y_move > 0:
op_flow[:,:,:,y_move:,:] = op_flow[:,:,:,:-y_move,:]
op_flow[:,:,:,:y_move,:] = 0
elif y_move < 0:
op_flow[:,:,:,:y_move,:] = op_flow[:,:,:,-y_move:,:]
op_flow[:,:,:,y_move:,:] = 0
if z_move > 0:
op_flow[:,:,:,:,z_move:] = op_flow[:,:,:,:,:-z_move]
op_flow[:,:,:,:,:z_move] = 0
elif z_move < 0:
op_flow[:,:,:,:,:z_move] = op_flow[:,:,:,:,-z_move:]
op_flow[:,:,:,:,z_move:] = 0
return op_flow
def rotate(op_flow):
''' Rotate an optical flow tensor a random amount about the y axis '''
# Get angle
angle = np.random.randint(-45, 45)
# Rotate positions
rot_mat = scipy.ndimage.interpolation.rotate(rot_array, angle, (0,1), reshape=False, order=0)
op_flow_new = np.zeros(op_flow.shape, dtype=np.float32)
tup = all_tups[rot_mat]
op_flow_new = op_flow[:,:,tup[:, :, 0],:,tup[:, :, 1]].transpose(2,3,0,4,1)
# Rotate flow vectors
cos = np.cos(np.radians(-angle))
sin = np.sin(np.radians(-angle))
x_copy = op_flow_new[:,0].copy()
z_copy = op_flow_new[:,2].copy()
op_flow_new[:,0] = x_copy * cos + z_copy * sin
op_flow_new[:,2] = x_copy * -sin + z_copy * cos
return op_flow_new
def scale(op_flow):
return op_flow
# import datetime as dt
if self.train:
op_flow = translate(op_flow)
op_flow = rotate(op_flow)
return torch.from_numpy(op_flow)
def get_3D_op_flow(self, vid_id):
# Load the data
feat_values = np.load("{}/{:05}.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
feat_nonzero = np.load("{}/{:05}.nonzeros.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
feat_shape = np.load("{}/{:05}.shape.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
# Rebuild the feature from the saved data
feature = np.zeros(feat_shape, np.float32)
feature[tuple(feat_nonzero)] = feat_values
return feature
def __getitem__(self, idx):
vid_id = self.vid_ids[idx]
to_return = []
# Images
if self.images:
images = np.load('{}/{:05}.npy'.format(CACHE_2D_IMAGES_SYSU, vid_id))
images = self.image_transforms(images)
to_return.append(images)
# Optical flow 3D
if self.op_flow:
op_flow = self.get_3D_op_flow(vid_id)
op_flow = self.op_flow_transforms(op_flow)
to_return.append(op_flow)
# Labels
to_return.append(self.dataset.get_label(vid_id))
return to_return
def get_train_loader():
dataset = SYSUdataset(full_train=True)
return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,
shuffle=True, num_workers=NUM_WORKERS,
pin_memory=True)
def get_test_loader():
dataset = SYSUdataset(test=True)
return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS,
pin_memory=True)
| [
"numpy.radians",
"torchvision.transforms.functional.to_tensor",
"torchvision.transforms.RandomRotation.get_params",
"torchvision.transforms.functional.to_pil_image",
"torch.stack",
"torch.from_numpy",
"numpy.zeros",
"numpy.random.randint",
"torchvision.transforms.functional.rotate",
"torchvision.t... | [((6173, 6298), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'DATA_BATCH_SIZE', 'shuffle': '(True)', 'num_workers': 'NUM_WORKERS', 'pin_memory': '(True)'}), '(dataset, batch_size=DATA_BATCH_SIZE, shuffle=\n True, num_workers=NUM_WORKERS, pin_memory=True)\n', (6200, 6298), False, 'import torch\n'), ((6446, 6572), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'DATA_BATCH_SIZE', 'shuffle': '(False)', 'num_workers': 'NUM_WORKERS', 'pin_memory': '(True)'}), '(dataset, batch_size=DATA_BATCH_SIZE, shuffle=\n False, num_workers=NUM_WORKERS, pin_memory=True)\n', (6473, 6572), False, 'import torch\n'), ((306, 336), 'numpy.arange', 'np.arange', (['(vox_size * vox_size)'], {}), '(vox_size * vox_size)\n', (315, 336), True, 'import numpy as np\n'), ((525, 531), 'sysu_dataset.SYSU', 'SYSU', ([], {}), '()\n', (529, 531), False, 'from sysu_dataset import SYSU\n'), ((1449, 1506), 'torchvision.transforms.ColorJitter.get_params', 'transforms.ColorJitter.get_params', (['(0.25)', '(0.25)', '(0.25)', '(0.25)'], {}), '(0.25, 0.25, 0.25, 0.25)\n', (1482, 1506), True, 'import torchvision.transforms as transforms\n'), ((1525, 1572), 'torchvision.transforms.RandomRotation.get_params', 'transforms.RandomRotation.get_params', (['(-15, 15)'], {}), '((-15, 15))\n', (1561, 1572), True, 'import torchvision.transforms as transforms\n'), ((2142, 2161), 'torch.stack', 'torch.stack', (['images'], {}), '(images)\n', (2153, 2161), False, 'import torch\n'), ((4946, 4971), 'torch.from_numpy', 'torch.from_numpy', (['op_flow'], {}), '(op_flow)\n', (4962, 4971), False, 'import torch\n'), ((5380, 5412), 'numpy.zeros', 'np.zeros', (['feat_shape', 'np.float32'], {}), '(feat_shape, np.float32)\n', (5388, 5412), True, 'import numpy as np\n'), ((1705, 1750), 'torchvision.transforms.functional.to_pil_image', 'transforms.functional.to_pil_image', (['numpy_img'], {}), '(numpy_img)\n', (1739, 1750), True, 'import torchvision.transforms as transforms\n'), ((1767, 1810), 'torchvision.transforms.functional.resize', 'transforms.functional.resize', (['i', '(224, 224)'], {}), '(i, (224, 224))\n', (1795, 1810), True, 'import torchvision.transforms as transforms\n'), ((1957, 1991), 'torchvision.transforms.functional.to_tensor', 'transforms.functional.to_tensor', (['i'], {}), '(i)\n', (1988, 1991), True, 'import torchvision.transforms as transforms\n'), ((2008, 2102), 'torchvision.transforms.functional.normalize', 'transforms.functional.normalize', (['i'], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(i, mean=[0.485, 0.456, 0.406], std=[0.229, \n 0.224, 0.225])\n', (2039, 2102), True, 'import torchvision.transforms as transforms\n'), ((2902, 2945), 'numpy.random.randint', 'np.random.randint', (['(-max_shift)', 'max_shift', '(3)'], {}), '(-max_shift, max_shift, 3)\n', (2919, 2945), True, 'import numpy as np\n'), ((4015, 4041), 'numpy.random.randint', 'np.random.randint', (['(-45)', '(45)'], {}), '(-45, 45)\n', (4032, 4041), True, 'import numpy as np\n'), ((4096, 4184), 'scipy.ndimage.interpolation.rotate', 'scipy.ndimage.interpolation.rotate', (['rot_array', 'angle', '(0, 1)'], {'reshape': '(False)', 'order': '(0)'}), '(rot_array, angle, (0, 1), reshape=False,\n order=0)\n', (4130, 4184), False, 'import scipy\n'), ((4206, 4247), 'numpy.zeros', 'np.zeros', (['op_flow.shape'], {'dtype': 'np.float32'}), '(op_flow.shape, dtype=np.float32)\n', (4214, 4247), True, 'import numpy as np\n'), ((1893, 1940), 'torchvision.transforms.functional.rotate', 'transforms.functional.rotate', (['i', 'rotation_param'], {}), '(i, rotation_param)\n', (1921, 1940), True, 'import torchvision.transforms as transforms\n'), ((4432, 4450), 'numpy.radians', 'np.radians', (['(-angle)'], {}), '(-angle)\n', (4442, 4450), True, 'import numpy as np\n'), ((4477, 4495), 'numpy.radians', 'np.radians', (['(-angle)'], {}), '(-angle)\n', (4487, 4495), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as numpy
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv1D, MaxPooling1D, LeakyReLU, PReLU
from keras.utils import np_utils
from keras.callbacks import CSVLogger, ModelCheckpoint
import h5py
import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
# Use CNN to capture local temporal dependency of data in risk prediction or other related tasks.
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
with h5py.File(''.join(['bitcoin2012_2017_50_30_prediction.h5']), 'r') as hf:
datas = hf['inputs'].value
labels = hf['outputs'].value
output_file_name='bitcoin2015to2017_close_CNN_2_relu'
step_size = datas.shape[1]
batch_size= 8
nb_features = datas.shape[2]
epochs = 100
#split training validation
training_size = int(0.8* datas.shape[0])
training_datas = datas[:training_size,:]
training_labels = labels[:training_size,:]
validation_datas = datas[training_size:,:]
validation_labels = labels[training_size:,:]
#build model
# 2 layers
model = Sequential()
model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=3, filters=8, kernel_size=20))
#model.add(PReLU())
model.add(Dropout(0.5))
model.add(Conv1D( strides=4, filters=nb_features, kernel_size=16))
'''
# 3 Layers
model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=3, filters=8, kernel_size=8))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D(activation='relu', strides=2, filters=8, kernel_size=8))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D( strides=2, filters=nb_features, kernel_size=8))
# 4 layers
model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=2, filters=8, kernel_size=2))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D(activation='relu', strides=2, filters=8, kernel_size=2))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D(activation='relu', strides=2, filters=8, kernel_size=2))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D( strides=2, filters=nb_features, kernel_size=2))
'''
model.compile(loss='mse', optimizer='adam')
model.fit(training_datas, training_labels,verbose=1, batch_size=batch_size,validation_data=(validation_datas,validation_labels), epochs = epochs, callbacks=[CSVLogger(output_file_name+'.csv', append=True),ModelCheckpoint('weights/'+output_file_name+'-{epoch:02d}-{val_loss:.5f}.hdf5', monitor='val_loss', verbose=1,mode='min')])
# model.fit(datas,labels)
#model.save(output_file_name+'.h5')
| [
"keras.callbacks.CSVLogger",
"keras.callbacks.ModelCheckpoint",
"tensorflow.Session",
"keras.models.Sequential",
"tensorflow.ConfigProto",
"keras.layers.Dropout",
"keras.layers.Conv1D"
] | [((631, 647), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (645, 647), True, 'import tensorflow as tf\n'), ((1285, 1297), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1295, 1297), False, 'from keras.models import Sequential\n'), ((699, 724), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (709, 724), True, 'import tensorflow as tf\n'), ((1310, 1415), 'keras.layers.Conv1D', 'Conv1D', ([], {'activation': '"""relu"""', 'input_shape': '(step_size, nb_features)', 'strides': '(3)', 'filters': '(8)', 'kernel_size': '(20)'}), "(activation='relu', input_shape=(step_size, nb_features), strides=3,\n filters=8, kernel_size=20)\n", (1316, 1415), False, 'from keras.layers import Conv1D, MaxPooling1D, LeakyReLU, PReLU\n'), ((1443, 1455), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1450, 1455), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1467, 1521), 'keras.layers.Conv1D', 'Conv1D', ([], {'strides': '(4)', 'filters': 'nb_features', 'kernel_size': '(16)'}), '(strides=4, filters=nb_features, kernel_size=16)\n', (1473, 1521), False, 'from keras.layers import Conv1D, MaxPooling1D, LeakyReLU, PReLU\n'), ((2577, 2626), 'keras.callbacks.CSVLogger', 'CSVLogger', (["(output_file_name + '.csv')"], {'append': '(True)'}), "(output_file_name + '.csv', append=True)\n", (2586, 2626), False, 'from keras.callbacks import CSVLogger, ModelCheckpoint\n'), ((2625, 2760), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["('weights/' + output_file_name + '-{epoch:02d}-{val_loss:.5f}.hdf5')"], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'mode': '"""min"""'}), "('weights/' + output_file_name +\n '-{epoch:02d}-{val_loss:.5f}.hdf5', monitor='val_loss', verbose=1, mode\n ='min')\n", (2640, 2760), False, 'from keras.callbacks import CSVLogger, ModelCheckpoint\n')] |
'''
NAME: <NAME>
TE-B
ROLL NO: 08
ASSIGNMENT NO: 11
PROBLEM STATEMENT:
Write a program using TCP sockets for wired network to implement
a. Peer to Peer Chat (server side)
'''
import socket
import sys
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost',23000))
sock.listen(1)
clisock, (ip,port) = sock.accept()
while True:
data = clisock.recv(16)
dt = data.decode()
if "stop."==dt:
break
else:
print("client: " + dt)
data = input("you: ")
clisock.send(str.encode(data))
if "stop."==data:
break
sock.close()
'''
res@res-HP-280-G2-MT-Legacy:~/Desktop/FINAL 1/assignment 14/tcp peer 2 peer$ sudo su
[sudo] password for res:
root@res-HP-280-G2-MT-Legacy:/home/res/Desktop/FINAL 1/assignment 14/tcp peer 2 peer# python pptcpserv.py
client: hi from client
you: hello!
client: hi
you: STOP.
root@res-HP-280-G2-MT-Legacy:/home/res/Desktop/FINAL 1/assignment 14/tcp peer 2 peer#
'''
| [
"socket.socket"
] | [((210, 259), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (223, 259), False, 'import socket\n')] |
from django.shortcuts import render
from rest_framework import viewsets
from institutions.models import Institution, Career, Course
from institutions.serializers import InstitutionSerializer, CareerSerializer, CourseSerializer
from permissions.services import APIPermissionClassFactory
class InstitutionViewSet(viewsets.ModelViewSet):
queryset = Institution.objects.all()
serializer_class = InstitutionSerializer
permission_classes = (
APIPermissionClassFactory(
name='InstitutionPermission',
permission_configuration={
'base': {
'create': lambda user, req: user.is_authenticated,
'list': lambda user, req: user.is_authenticated,
},
'instance': {
'retrieve': lambda user, obj, req: user.is_authenticated,
'update': lambda user, obj, req: user.is_authenticated,
'partial_update': lambda user, obj, req: user.is_authenticated,
'destroy': lambda user, obj, req: user.is_authenticated,
}
}
),
)
class CareerViewSet(viewsets.ModelViewSet):
queryset = Career.objects.all()
serializer_class = CareerSerializer
permission_classes = (
APIPermissionClassFactory(
name='CareerPermission',
permission_configuration={
'base': {
'create': lambda user, req: user.is_authenticated,
'list': lambda user, req: user.is_authenticated,
},
'instance': {
'retrieve': lambda user, obj, req: user.is_authenticated,
'update': lambda user, obj, req: user.is_authenticated,
'partial_update': lambda user, obj, req: user.is_authenticated,
'destroy': lambda user, obj, req: user.is_authenticated,
}
}
),
)
class CourseViewSet(viewsets.ModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseSerializer
permission_classes = (
APIPermissionClassFactory(
name='CoursePermission',
permission_configuration={
'base': {
'create': lambda user, req: user.is_authenticated,
'list': lambda user, req: user.is_authenticated,
},
'instance': {
'retrieve': lambda user, obj, req: user.is_authenticated,
'update': lambda user, obj, req: user.is_authenticated,
'partial_update': lambda user, obj, req: user.is_authenticated,
'destroy': lambda user, obj, req: user.is_authenticated,
}
}
),
)
| [
"institutions.models.Institution.objects.all",
"permissions.services.APIPermissionClassFactory",
"institutions.models.Course.objects.all",
"institutions.models.Career.objects.all"
] | [((353, 378), 'institutions.models.Institution.objects.all', 'Institution.objects.all', ([], {}), '()\n', (376, 378), False, 'from institutions.models import Institution, Career, Course\n'), ((1207, 1227), 'institutions.models.Career.objects.all', 'Career.objects.all', ([], {}), '()\n', (1225, 1227), False, 'from institutions.models import Institution, Career, Course\n'), ((2046, 2066), 'institutions.models.Course.objects.all', 'Course.objects.all', ([], {}), '()\n', (2064, 2066), False, 'from institutions.models import Institution, Career, Course\n'), ((459, 925), 'permissions.services.APIPermissionClassFactory', 'APIPermissionClassFactory', ([], {'name': '"""InstitutionPermission"""', 'permission_configuration': "{'base': {'create': lambda user, req: user.is_authenticated, 'list': lambda\n user, req: user.is_authenticated}, 'instance': {'retrieve': lambda user,\n obj, req: user.is_authenticated, 'update': lambda user, obj, req: user.\n is_authenticated, 'partial_update': lambda user, obj, req: user.\n is_authenticated, 'destroy': lambda user, obj, req: user.is_authenticated}}"}), "(name='InstitutionPermission',\n permission_configuration={'base': {'create': lambda user, req: user.\n is_authenticated, 'list': lambda user, req: user.is_authenticated},\n 'instance': {'retrieve': lambda user, obj, req: user.is_authenticated,\n 'update': lambda user, obj, req: user.is_authenticated,\n 'partial_update': lambda user, obj, req: user.is_authenticated,\n 'destroy': lambda user, obj, req: user.is_authenticated}})\n", (484, 925), False, 'from permissions.services import APIPermissionClassFactory\n'), ((1303, 1767), 'permissions.services.APIPermissionClassFactory', 'APIPermissionClassFactory', ([], {'name': '"""CareerPermission"""', 'permission_configuration': "{'base': {'create': lambda user, req: user.is_authenticated, 'list': lambda\n user, req: user.is_authenticated}, 'instance': {'retrieve': lambda user,\n obj, req: user.is_authenticated, 'update': lambda user, obj, req: user.\n is_authenticated, 'partial_update': lambda user, obj, req: user.\n is_authenticated, 'destroy': lambda user, obj, req: user.is_authenticated}}"}), "(name='CareerPermission', permission_configuration\n ={'base': {'create': lambda user, req: user.is_authenticated, 'list': \n lambda user, req: user.is_authenticated}, 'instance': {'retrieve': lambda\n user, obj, req: user.is_authenticated, 'update': lambda user, obj, req:\n user.is_authenticated, 'partial_update': lambda user, obj, req: user.\n is_authenticated, 'destroy': lambda user, obj, req: user.is_authenticated}}\n )\n", (1328, 1767), False, 'from permissions.services import APIPermissionClassFactory\n'), ((2142, 2606), 'permissions.services.APIPermissionClassFactory', 'APIPermissionClassFactory', ([], {'name': '"""CoursePermission"""', 'permission_configuration': "{'base': {'create': lambda user, req: user.is_authenticated, 'list': lambda\n user, req: user.is_authenticated}, 'instance': {'retrieve': lambda user,\n obj, req: user.is_authenticated, 'update': lambda user, obj, req: user.\n is_authenticated, 'partial_update': lambda user, obj, req: user.\n is_authenticated, 'destroy': lambda user, obj, req: user.is_authenticated}}"}), "(name='CoursePermission', permission_configuration\n ={'base': {'create': lambda user, req: user.is_authenticated, 'list': \n lambda user, req: user.is_authenticated}, 'instance': {'retrieve': lambda\n user, obj, req: user.is_authenticated, 'update': lambda user, obj, req:\n user.is_authenticated, 'partial_update': lambda user, obj, req: user.\n is_authenticated, 'destroy': lambda user, obj, req: user.is_authenticated}}\n )\n", (2167, 2606), False, 'from permissions.services import APIPermissionClassFactory\n')] |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytype helpers."""
import collections
from typing import Any, Iterable, Mapping, NamedTuple, Optional, Union, get_type_hints
from reverb import pybind
import tensorflow.compat.v1 as tf
from reverb.cc import schema_pb2
Fifo = pybind.FifoSelector
Heap = pybind.HeapSelector
Lifo = pybind.LifoSelector
Prioritized = pybind.PrioritizedSelector
Uniform = pybind.UniformSelector
DistributionType = Union[Fifo, Heap, Lifo, Prioritized, Uniform]
# Note that this is effectively treated as `Any`; see b/109648354.
SpecNest = Union[
tf.TensorSpec, Iterable['SpecNest'], Mapping[str, 'SpecNest']] # pytype: disable=not-supported-yet
_table_info_proto_types = get_type_hints(schema_pb2.TableInfo) or {}
_table_info_type_dict = collections.OrderedDict(
(descr.name, _table_info_proto_types.get(descr.name, Any))
for descr in schema_pb2.TableInfo.DESCRIPTOR.fields)
_table_info_type_dict['signature'] = Optional[SpecNest]
"""A tuple describing Table information.
The main difference between this object and a `schema_pb2.TableInfo` message
is that the signature is a nested structure of `tf.TypeSpec` objects,
instead of a raw proto.
"""
TableInfo = NamedTuple('TableInfo', tuple(_table_info_type_dict.items()))
| [
"typing.get_type_hints"
] | [((1278, 1314), 'typing.get_type_hints', 'get_type_hints', (['schema_pb2.TableInfo'], {}), '(schema_pb2.TableInfo)\n', (1292, 1314), False, 'from typing import Any, Iterable, Mapping, NamedTuple, Optional, Union, get_type_hints\n')] |
import os
import shutil
import unittest
from oeqa.core.utils.path import remove_safe
from oeqa.sdk.case import OESDKTestCase
class GccCompileTest(OESDKTestCase):
td_vars = ['MACHINE']
@classmethod
def setUpClass(self):
files = {'test.c' : self.tc.files_dir, 'test.cpp' : self.tc.files_dir,
'testsdkmakefile' : self.tc.sdk_files_dir}
for f in files:
shutil.copyfile(os.path.join(files[f], f),
os.path.join(self.tc.sdk_dir, f))
def setUp(self):
machine = self.td.get("MACHINE")
if not (self.tc.hasTargetPackage("packagegroup-cross-canadian-%s" % machine) or
self.tc.hasTargetPackage("gcc")):
raise unittest.SkipTest("GccCompileTest class: SDK doesn't contain a cross-canadian toolchain")
def test_gcc_compile(self):
self._run('$CC %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
def test_gpp_compile(self):
self._run('$CXX %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
def test_gpp2_compile(self):
self._run('$CXX %s/test.cpp -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
def test_make(self):
self._run('cd %s; make -f testsdkmakefile' % self.tc.sdk_dir)
@classmethod
def tearDownClass(self):
files = [os.path.join(self.tc.sdk_dir, f) \
for f in ['test.c', 'test.cpp', 'test.o', 'test',
'testsdkmakefile']]
for f in files:
remove_safe(f)
| [
"os.path.join",
"unittest.SkipTest",
"oeqa.core.utils.path.remove_safe"
] | [((725, 819), 'unittest.SkipTest', 'unittest.SkipTest', (['"""GccCompileTest class: SDK doesn\'t contain a cross-canadian toolchain"""'], {}), '(\n "GccCompileTest class: SDK doesn\'t contain a cross-canadian toolchain")\n', (742, 819), False, 'import unittest\n'), ((1340, 1372), 'os.path.join', 'os.path.join', (['self.tc.sdk_dir', 'f'], {}), '(self.tc.sdk_dir, f)\n', (1352, 1372), False, 'import os\n'), ((1517, 1531), 'oeqa.core.utils.path.remove_safe', 'remove_safe', (['f'], {}), '(f)\n', (1528, 1531), False, 'from oeqa.core.utils.path import remove_safe\n'), ((425, 450), 'os.path.join', 'os.path.join', (['files[f]', 'f'], {}), '(files[f], f)\n', (437, 450), False, 'import os\n'), ((472, 504), 'os.path.join', 'os.path.join', (['self.tc.sdk_dir', 'f'], {}), '(self.tc.sdk_dir, f)\n', (484, 504), False, 'import os\n')] |
from fastapi import APIRouter
from . import auth, index, list, task
router = APIRouter()
router.include_router(index.router)
router.include_router(auth.router, prefix='/auth', tags=['Authenticate'])
router.include_router(list.router, prefix='/lists', tags=['Lists'])
router.include_router(task.router, prefix='/lists', tags=['Tasks'])
| [
"fastapi.APIRouter"
] | [((79, 90), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (88, 90), False, 'from fastapi import APIRouter\n')] |
# -*- coding: utf-8 -*-
'''
Created on Oct 23, 2015
@author: jrm
'''
from inkcut.device.plugin import DeviceProtocol
from inkcut.core.utils import async_sleep, log
class DebugProtocol(DeviceProtocol):
""" A protocol that just logs what is called """
def connection_made(self):
log.debug("protocol.connectionMade()")
def move(self, x, y, z, absolute=True):
log.debug("protocol.move({x},{y},{z})".format(x=x, y=y, z=z))
#: Wait some time before we get there
return async_sleep(0.1)
def set_pen(self, p):
log.debug("protocol.set_pen({p})".format(p=p))
def set_velocity(self, v):
log.debug("protocol.set_velocity({v})".format(v=v))
def set_force(self, f):
log.debug("protocol.set_force({f})".format(f=f))
def data_received(self, data):
log.debug("protocol.data_received({}".format(data))
def connection_lost(self):
log.debug("protocol.connection_lost()") | [
"inkcut.core.utils.async_sleep",
"inkcut.core.utils.log.debug"
] | [((296, 334), 'inkcut.core.utils.log.debug', 'log.debug', (['"""protocol.connectionMade()"""'], {}), "('protocol.connectionMade()')\n", (305, 334), False, 'from inkcut.core.utils import async_sleep, log\n'), ((515, 531), 'inkcut.core.utils.async_sleep', 'async_sleep', (['(0.1)'], {}), '(0.1)\n', (526, 531), False, 'from inkcut.core.utils import async_sleep, log\n'), ((952, 991), 'inkcut.core.utils.log.debug', 'log.debug', (['"""protocol.connection_lost()"""'], {}), "('protocol.connection_lost()')\n", (961, 991), False, 'from inkcut.core.utils import async_sleep, log\n')] |
# -*- encoding: utf8 -*-
# version 1.11
import tkinter.messagebox,os
from tkinter import *
from tkinter.ttk import *
from tkinter import Menu
import datetime
import threading
import pickle
import time
import tushare as ts
import pywinauto
import pywinauto.clipboard
import pywinauto.application
NUM_OF_STOCKS = 5 # 自定义股票数量
is_start = False
is_monitor = True
set_stocks_info = []
actual_stocks_info = []
consignation_info = []
is_ordered = [1] * NUM_OF_STOCKS # 1:未下单 0:已下单
is_dealt = [0] * NUM_OF_STOCKS # 0: 未成交 负整数:卖出数量, 正整数:买入数量
stock_codes = [''] * NUM_OF_STOCKS
class OperationThs:
def __init__(self):
try:
self.__app = pywinauto.application.Application()
self.__app.connect(title='网上股票交易系统5.0')
top_hwnd = pywinauto.findwindows.find_window(title='网上股票交易系统5.0')
dialog_hwnd = pywinauto.findwindows.find_windows(top_level_only=False, class_name='#32770', parent=top_hwnd)[0]
wanted_hwnds = pywinauto.findwindows.find_windows(top_level_only=False, parent=dialog_hwnd)
print('wanted_hwnds length', len(wanted_hwnds))
if len(wanted_hwnds) not in (99,97,96,98,100,101):
tkinter.messagebox.showerror('错误', '无法获得“同花顺双向委托界面”的窗口句柄,请将同花顺交易系统切换到“双向委托界面”!')
exit()
self.__main_window = self.__app.window_(handle=top_hwnd)
self.__dialog_window = self.__app.window_(handle=dialog_hwnd)
except:
pass
def __buy(self, code, quantity):
"""买函数
:param code: 代码, 字符串
:param quantity: 数量, 字符串
"""
self.__dialog_window.Edit1.SetFocus()
time.sleep(0.2)
self.__dialog_window.Edit1.SetEditText(code)
time.sleep(0.2)
if quantity != '0':
self.__dialog_window.Edit3.SetEditText(quantity)
time.sleep(0.2)
self.__dialog_window.Button1.Click()
time.sleep(0.2)
def __sell(self, code, quantity):
"""
卖函数
:param code: 股票代码, 字符串
:param quantity: 数量, 字符串
"""
self.__dialog_window.Edit4.SetFocus()
time.sleep(0.2)
self.__dialog_window.Edit4.SetEditText(code)
time.sleep(0.2)
if quantity != '0':
self.__dialog_window.Edit6.SetEditText(quantity)
time.sleep(0.2)
self.__dialog_window.Button2.Click()
time.sleep(0.2)
def __closePopupWindow(self):
"""
关闭一个弹窗。
:return: 如果有弹出式对话框,返回True,否则返回False
"""
popup_hwnd = self.__main_window.PopupWindow()
if popup_hwnd:
popup_window = self.__app.window_(handle=popup_hwnd)
popup_window.SetFocus()
popup_window.Button.Click()
return True
return False
def __closePopupWindows(self):
"""
关闭多个弹出窗口
:return:
"""
while self.__closePopupWindow():
time.sleep(0.5)
def order(self, code, direction, quantity):
"""
下单函数
:param code: 股票代码, 字符串
:param direction: 买卖方向, 字符串
:param quantity: 买卖数量, 字符串
"""
if direction == 'B':
self.__buy(code, quantity)
if direction == 'S':
self.__sell(code, quantity)
self.__closePopupWindows()
def maxWindow(self):
"""
最大化窗口
"""
if self.__main_window.GetShowState() != 3:
self.__main_window.Maximize()
self.__main_window.SetFocus()
def minWindow(self):
"""
最小化窗体
"""
if self.__main_window.GetShowState() != 2:
self.__main_window.Minimize()
def refresh(self, t=0.5):
"""
点击刷新按钮
:param t:刷新后的等待时间
"""
self.__dialog_window.Button5.Click()
time.sleep(t)
def getMoney(self):
"""
获取可用资金
"""
return float(self.__dialog_window.Static19.WindowText())
@staticmethod
def __cleanClipboardData(data, cols=11):
"""
清洗剪贴板数据
:param data: 数据
:param cols: 列数
:return: 清洗后的数据,返回列表
"""
lst = data.strip().split()[:-1]
matrix = []
for i in range(0, len(lst) // cols):
matrix.append(lst[i * cols:(i + 1) * cols])
return matrix[1:]
def __copyToClipboard(self):
"""
拷贝持仓信息至剪贴板
:return:
"""
self.__dialog_window.CVirtualGridCtrl.RightClick(coords=(30, 30))
self.__main_window.TypeKeys('C')
def __getCleanedData(self):
"""
读取ListView中的信息
:return: 清洗后的数据
"""
self.__copyToClipboard()
data = pywinauto.clipboard.GetData()
return self.__cleanClipboardData(data)
def __selectWindow(self, choice):
"""
选择tab窗口信息
:param choice: 选择个标签页。持仓,撤单,委托,成交
:return:
"""
rect = self.__dialog_window.CCustomTabCtrl.ClientRect()
x = rect.width() // 8
y = rect.height() // 2
if choice == 'W':
x = x
elif choice == 'E':
x *= 3
elif choice == 'R':
x *= 5
elif choice == 'A':
x *= 7
self.__dialog_window.CCustomTabCtrl.ClickInput(coords=(x, y))
time.sleep(0.5)
def __getInfo(self, choice):
"""
获取股票信息
"""
self.__selectWindow(choice=choice)
return self.__getCleanedData()
def getPosition(self):
"""
获取持仓
:return:
"""
return self.__getInfo(choice='W')
@staticmethod
def getDeal(code, pre_position, cur_position):
"""
获取成交数量
:param code: 需检查的股票代码, 字符串
:param pre_position: 下单前的持仓
:param cur_position: 下单后的持仓
:return: 0-未成交, 正整数是买入的数量, 负整数是卖出的数量
"""
if pre_position == cur_position:
return 0
pre_len = len(pre_position)
cur_len = len(cur_position)
if pre_len == cur_len:
for row in range(cur_len):
if cur_position[row][0] == code:
return int(float(cur_position[row][1]) - float(pre_position[row][1]))
if cur_len > pre_len:
return int(float(cur_position[-1][1]))
def withdraw(self, code, direction):
"""
指定撤单
:param code: 股票代码
:param direction: 方向 B, S
:return:
"""
row_pos = []
info = self.__getInfo(choice='R')
if direction == 'B':
direction = '买入'
elif direction == 'S':
direction = '卖出'
if info:
for index, element in enumerate(info):
if element[0] == code:
if element[1] == direction:
row_pos.append(index)
if row_pos:
for row in row_pos:
self.__dialog_window.CVirtualGridCtrl.ClickInput(coords=(7, 28 + 16 * row))
self.__dialog_window.Button12.Click()
self.__closePopupWindows()
def withdrawBuy(self):
"""
撤买
:return:
"""
self.__selectWindow(choice='R')
self.__dialog_window.Button8.Click()
self.__closePopupWindows()
def withdrawSell(self):
"""
撤卖
:return:
"""
self.__selectWindow(choice='R')
self.__dialog_window.Button9.Click()
self.__closePopupWindows()
def withdrawAll(self):
"""
全撤
:return:
"""
self.__selectWindow(choice='R')
self.__dialog_window.Button7.Click()
self.__closePopupWindows()
def getStockData():
"""
获取股票实时数据
:return:股票实时数据
"""
global stock_codes
code_name_price = []
try:
df = ts.get_realtime_quotes(stock_codes)
df_len = len(df)
for stock_code in stock_codes:
is_found = False
for i in range(df_len):
actual_code = df['code'][i]
if stock_code == actual_code:
code_name_price.append((actual_code, df['name'][i], float(df['price'][i])))
is_found = True
break
if is_found is False:
code_name_price.append(('', '', 0))
except:
code_name_price = [('', '', 0)] * NUM_OF_STOCKS # 网络不行,返回空
return code_name_price
def monitor():
"""
实时监控函数
"""
global actual_stocks_info, consignation_info, is_ordered, is_dealt, set_stocks_info
count = 1
pre_position = []
try:
operation = OperationThs()
operation.maxWindow()
pre_position = operation.getPosition()
# print(pre_position)
while is_monitor:
if is_start:
actual_stocks_info = getStockData()
for row, (actual_code, actual_name, actual_price) in enumerate(actual_stocks_info):
if actual_code and is_start and is_ordered[row] == 1 and actual_price > 0 \
and set_stocks_info[row][1] and set_stocks_info[row][2] > 0 \
and set_stocks_info[row][3] and set_stocks_info[row][4] \
and datetime.datetime.now().time() > set_stocks_info[row][5]:
if (set_stocks_info[row][1] == '>' and actual_price > set_stocks_info[row][2]) or \
(set_stocks_info[row][1] == '<' and float(actual_price) < set_stocks_info[row][2]):
operation.maxWindow()
operation.order(actual_code, set_stocks_info[row][3], set_stocks_info[row][4])
dt = datetime.datetime.now()
is_ordered[row] = 0
operation.refresh()
cur_position = operation.getPosition()
is_dealt[row] = operation.getDeal(actual_code, pre_position, cur_position)
consignation_info.append(
(dt.strftime('%x'), dt.strftime('%X'), actual_code,
actual_name, set_stocks_info[row][3],
actual_price, set_stocks_info[row][4], '已委托', is_dealt[row]))
pre_position = cur_position
if count % 200 == 0:
operation.refresh()
time.sleep(3)
count += 1
except:
tkinter.messagebox.showerror('错误', '请先打开“同花顺双向委托界面”后在打开自动交易系统!')
sys.exit()
class StockGui:
global is_monitor
def __init__(self):
self.window = Tk()
self.window.title("自动化交易系统-同花顺")
# 左上角图标
self.window.iconbitmap('e:\ico.ico')
self.window.resizable(0, 0)
frame1 = Frame(self.window)
frame1.pack(padx=10, pady=10)
Label(frame1, text="股票代码", width=8, justify=CENTER).grid(
row=1, column=1, padx=5, pady=5)
Label(frame1, text="股票名称", width=8, justify=CENTER).grid(
row=1, column=2, padx=5, pady=5)
Label(frame1, text="实时价格", width=8, justify=CENTER).grid(
row=1, column=3, padx=5, pady=5)
Label(frame1, text="关系", width=4, justify=CENTER).grid(
row=1, column=4, padx=5, pady=5)
Label(frame1, text="设定价格", width=8, justify=CENTER).grid(
row=1, column=5, padx=5, pady=5)
Label(frame1, text="方向", width=4, justify=CENTER).grid(
row=1, column=6, padx=5, pady=5)
Label(frame1, text="数量", width=8, justify=CENTER).grid(
row=1, column=7, padx=5, pady=5)
Label(frame1, text="时间可选", width=8, justify=CENTER).grid(
row=1, column=8, padx=5, pady=5)
Label(frame1, text="委托", width=6, justify=CENTER).grid(
row=1, column=9, padx=5, pady=5)
Label(frame1, text="成交", width=6, justify=CENTER).grid(
row=1, column=10, padx=5, pady=5)
self.rows = NUM_OF_STOCKS
self.cols = 10
self.variable = []
for row in range(self.rows):
self.variable.append([])
for col in range(self.cols):
self.variable[row].append(StringVar())
for row in range(self.rows):
Entry(frame1, textvariable=self.variable[row][0],
width=8).grid(row=row + 2, column=1, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][1], state=DISABLED,
width=8).grid(row=row + 2, column=2, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][2], state=DISABLED, justify=RIGHT,
width=8).grid(row=row + 2, column=3, padx=5, pady=5)
Combobox(frame1, values=('<', '>'), textvariable=self.variable[row][3],
width=2).grid(row=row + 2, column=4, padx=5, pady=5)
Spinbox(frame1, from_=0, to=999, textvariable=self.variable[row][4], justify=RIGHT,
increment=0.01, width=6).grid(row=row + 2, column=5, padx=5, pady=5)
Combobox(frame1, values=('B', 'S'), textvariable=self.variable[row][5],
width=2).grid(row=row + 2, column=6, padx=5, pady=5)
Spinbox(frame1, from_=0, to=10000, textvariable=self.variable[row][6], justify=RIGHT,
increment=100, width=6).grid(row=row + 2, column=7, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][7],
width=8).grid(row=row + 2, column=8, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][8], state=DISABLED, justify=CENTER,
width=6).grid(row=row + 2, column=9, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][9], state=DISABLED, justify=RIGHT,
width=6).grid(row=row + 2, column=10, padx=5, pady=5)
frame3 = Frame(self.window)
frame3.pack(padx=10, pady=10)
# 创建菜单功能
self.menuBar = Menu(self.window)
self.window.config(menu=self.menuBar)
# tearoff=0 代表将菜单项最上面的一条虚线去掉,默认是存在的
self.fileMenu = Menu(self.menuBar,tearoff=0)
# 创建一个名为“帮助”的菜单项
self.menuBar.add_cascade(label="帮助",menu=self.fileMenu)
# 在“帮助”项下添加一个名为“关于”的选项
self.fileMenu.add_command(label="关于",command =self.about)
# 增加一条横线
self.fileMenu.add_separator()
# 在“帮助”项下添加一个名为“退出”的选项,并绑定执行函数
self.fileMenu.add_command(label="退出",command=self.close)
# 增加第二个导航栏
# self.helpMenu = Menu(self.menuBar,tearoff=0)
# self.menuBar.add_cascade(label="Help", menu=self.helpMenu)
# self.helpMenu.add_command(label="About")
self.start_bt = Button(frame3, text="开始", command=self.start)
self.start_bt.pack(side=LEFT)
self.set_bt = Button(frame3, text='重置买卖', command=self.setFlags)
self.set_bt.pack(side=LEFT)
Button(frame3, text="历史记录", command=self.displayHisRecords).pack(side=LEFT)
Button(frame3, text='保存', command=self.save).pack(side=LEFT)
self.load_bt = Button(frame3, text='载入', command=self.load)
self.load_bt.pack(side=LEFT)
self.window.protocol(name="WM_DELETE_WINDOW", func=self.close)
self.window.after(100, self.updateControls)
self.window.mainloop()
def displayHisRecords(self):
"""
显示历史信息
"""
global consignation_info
tp = Toplevel()
tp.title('历史记录')
tp.iconbitmap('e:\ico.ico')
tp.resizable(0, 1)
scrollbar = Scrollbar(tp)
scrollbar.pack(side=RIGHT, fill=Y)
col_name = ['日期', '时间', '证券代码', '证券名称', '方向', '价格', '数量', '委托', '成交']
tree = Treeview(
tp, show='headings', columns=col_name, height=30, yscrollcommand=scrollbar.set)
tree.pack(expand=1, fill=Y)
scrollbar.config(command=tree.yview)
for name in col_name:
tree.heading(name, text=name)
tree.column(name, width=70, anchor=CENTER)
for msg in consignation_info:
tree.insert('', 0, values=msg)
def save(self):
"""
保存设置
"""
global set_stocks_info, consignation_info
self.getItems()
with open('stockInfo.dat', 'wb') as fp:
pickle.dump(set_stocks_info, fp)
pickle.dump(consignation_info, fp)
def load(self):
"""
载入设置
"""
global set_stocks_info, consignation_info
try:
with open('stockInfo.dat', 'rb') as fp:
set_stocks_info = pickle.load(fp)
consignation_info = pickle.load(fp)
for row in range(self.rows):
for col in range(self.cols):
if col == 0:
self.variable[row][col].set(set_stocks_info[row][0])
elif col == 3:
self.variable[row][col].set(set_stocks_info[row][1])
elif col == 4:
self.variable[row][col].set(set_stocks_info[row][2])
elif col == 5:
self.variable[row][col].set(set_stocks_info[row][3])
elif col == 6:
self.variable[row][col].set(set_stocks_info[row][4])
elif col == 7:
temp = set_stocks_info[row][5].strftime('%X')
if temp == '01:00:00':
self.variable[row][col].set('')
else:
self.variable[row][col].set(temp)
except Exception :
tkinter.messagebox.showerror('错误', "没有找到配置保存文件,请先进行股票买卖配置信息保存!")
def setFlags(self):
"""
重置买卖标志
"""
global is_start, is_ordered
if is_start is False:
is_ordered = [1] * NUM_OF_STOCKS
tkinter.messagebox.showinfo('重置成功', "重置成功!")
def updateControls(self):
"""
实时股票名称、价格、状态信息
"""
global actual_stocks_info, is_start
if is_start:
for row, (actual_code, actual_name, actual_price) in enumerate(actual_stocks_info):
if actual_code:
self.variable[row][1].set(actual_name)
self.variable[row][2].set(str(actual_price))
if is_ordered[row] == 1:
self.variable[row][8].set('监控中')
elif is_ordered[row] == 0:
self.variable[row][8].set('已委托')
self.variable[row][9].set(str(is_dealt[row]))
else:
self.variable[row][1].set('')
self.variable[row][2].set('')
self.variable[row][8].set('')
self.variable[row][9].set('')
self.window.after(3000, self.updateControls)
@staticmethod
def __pickCodeFromItems(items_info):
"""
提取股票代码
:param items_info: UI下各项输入信息
:return:股票代码列表
"""
stock_codes = []
for item in items_info:
stock_codes.append(item[0])
return stock_codes
def start(self):
"""
启动停止
"""
global is_start, stock_codes, set_stocks_info
if is_start is False:
is_start = True
else:
is_start = False
if is_start:
self.getItems()
stock_codes = self.__pickCodeFromItems(set_stocks_info)
self.start_bt['text'] = '停止'
self.set_bt['state'] = DISABLED
self.load_bt['state'] = DISABLED
tkinter.messagebox.showinfo('成功','启动成功!')
else:
self.start_bt['text'] = '开始'
self.set_bt['state'] = NORMAL
self.load_bt['state'] = NORMAL
def about(self):
tkinter.messagebox.showinfo("关于",'\r此系统仅适应于同花顺网上交易5.0,使用时请先登陆同花顺网上交易系统并切换到“同花顺双向委托界面”。\r 版本号:v 1.0.0 \r 作者:水域\r 发布日期:2017.01.11')
def close(self):
"""
关闭程序时,停止monitor线程
"""
global is_monitor
is_monitor = False
self.window.quit()
def getItems(self):
"""
获取UI上用户输入的各项数据,
"""
global set_stocks_info
set_stocks_info = []
# 获取买卖价格数量输入项等
for row in range(self.rows):
set_stocks_info.append([])
for col in range(self.cols):
temp = self.variable[row][col].get().strip()
if col == 0:
if len(temp) == 6 and temp.isdigit(): # 判断股票代码是否为6位数
set_stocks_info[row].append(temp)
else:
set_stocks_info[row].append('')
elif col == 3:
if temp in ('>', '<'):
set_stocks_info[row].append(temp)
else:
set_stocks_info[row].append('')
elif col == 4:
try:
price = float(temp)
if price > 0:
set_stocks_info[row].append(price) # 把价格转为数字
else:
set_stocks_info[row].append(0)
except ValueError:
set_stocks_info[row].append(0)
elif col == 5:
if temp in ('B', 'S'):
set_stocks_info[row].append(temp)
else:
set_stocks_info[row].append('')
elif col == 6:
if temp.isdigit() and int(temp) >= 0:
set_stocks_info[row].append(str(int(temp) // 100 * 100))
else:
set_stocks_info[row].append('')
elif col == 7:
try:
set_stocks_info[row].append(datetime.datetime.strptime(temp, '%H:%M:%S').time())
except ValueError:
set_stocks_info[row].append(datetime.datetime.strptime('1:00:00', '%H:%M:%S').time())
if __name__ == '__main__':
# StockGui()
t1 = threading.Thread(target=StockGui)
t1.start()
t2 = threading.Thread(target=monitor)
t2.start()
| [
"pywinauto.clipboard.GetData",
"tkinter.Menu",
"pickle.dump",
"datetime.datetime.strptime",
"pywinauto.findwindows.find_windows",
"pickle.load",
"time.sleep",
"pywinauto.findwindows.find_window",
"datetime.datetime.now",
"tushare.get_realtime_quotes",
"threading.Thread",
"pywinauto.application... | [((22092, 22125), 'threading.Thread', 'threading.Thread', ([], {'target': 'StockGui'}), '(target=StockGui)\n', (22108, 22125), False, 'import threading\n'), ((22150, 22182), 'threading.Thread', 'threading.Thread', ([], {'target': 'monitor'}), '(target=monitor)\n', (22166, 22182), False, 'import threading\n'), ((1650, 1665), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1660, 1665), False, 'import time\n'), ((1727, 1742), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1737, 1742), False, 'import time\n'), ((1913, 1928), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1923, 1928), False, 'import time\n'), ((2121, 2136), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2131, 2136), False, 'import time\n'), ((2198, 2213), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2208, 2213), False, 'import time\n'), ((2384, 2399), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2394, 2399), False, 'import time\n'), ((3804, 3817), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (3814, 3817), False, 'import time\n'), ((4674, 4703), 'pywinauto.clipboard.GetData', 'pywinauto.clipboard.GetData', ([], {}), '()\n', (4701, 4703), False, 'import pywinauto\n'), ((5279, 5294), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (5289, 5294), False, 'import time\n'), ((7769, 7804), 'tushare.get_realtime_quotes', 'ts.get_realtime_quotes', (['stock_codes'], {}), '(stock_codes)\n', (7791, 7804), True, 'import tushare as ts\n'), ((13950, 13967), 'tkinter.Menu', 'Menu', (['self.window'], {}), '(self.window)\n', (13954, 13967), False, 'from tkinter import Menu\n'), ((14082, 14111), 'tkinter.Menu', 'Menu', (['self.menuBar'], {'tearoff': '(0)'}), '(self.menuBar, tearoff=0)\n', (14086, 14111), False, 'from tkinter import Menu\n'), ((657, 692), 'pywinauto.application.Application', 'pywinauto.application.Application', ([], {}), '()\n', (690, 692), False, 'import pywinauto\n'), ((768, 822), 'pywinauto.findwindows.find_window', 'pywinauto.findwindows.find_window', ([], {'title': '"""网上股票交易系统5.0"""'}), "(title='网上股票交易系统5.0')\n", (801, 822), False, 'import pywinauto\n'), ((974, 1050), 'pywinauto.findwindows.find_windows', 'pywinauto.findwindows.find_windows', ([], {'top_level_only': '(False)', 'parent': 'dialog_hwnd'}), '(top_level_only=False, parent=dialog_hwnd)\n', (1008, 1050), False, 'import pywinauto\n'), ((1844, 1859), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1854, 1859), False, 'import time\n'), ((2315, 2330), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2325, 2330), False, 'import time\n'), ((2928, 2943), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2938, 2943), False, 'import time\n'), ((10402, 10415), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (10412, 10415), False, 'import time\n'), ((16250, 16282), 'pickle.dump', 'pickle.dump', (['set_stocks_info', 'fp'], {}), '(set_stocks_info, fp)\n', (16261, 16282), False, 'import pickle\n'), ((16295, 16329), 'pickle.dump', 'pickle.dump', (['consignation_info', 'fp'], {}), '(consignation_info, fp)\n', (16306, 16329), False, 'import pickle\n'), ((849, 948), 'pywinauto.findwindows.find_windows', 'pywinauto.findwindows.find_windows', ([], {'top_level_only': '(False)', 'class_name': '"""#32770"""', 'parent': 'top_hwnd'}), "(top_level_only=False, class_name=\n '#32770', parent=top_hwnd)\n", (883, 948), False, 'import pywinauto\n'), ((16536, 16551), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (16547, 16551), False, 'import pickle\n'), ((16588, 16603), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (16599, 16603), False, 'import pickle\n'), ((9671, 9694), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9692, 9694), False, 'import datetime\n'), ((9199, 9222), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9220, 9222), False, 'import datetime\n'), ((21836, 21880), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['temp', '"""%H:%M:%S"""'], {}), "(temp, '%H:%M:%S')\n", (21862, 21880), False, 'import datetime\n'), ((21980, 22029), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""1:00:00"""', '"""%H:%M:%S"""'], {}), "('1:00:00', '%H:%M:%S')\n", (22006, 22029), False, 'import datetime\n')] |
import pytest
import re
from proknow import Exceptions
def test_create(app, custom_metric_generator):
pk = app.pk
# Verify returned CustomMetricItem
params, custom_metric = custom_metric_generator()
assert custom_metric.name == params["name"]
assert custom_metric.context == params["context"]
assert custom_metric.type == params["type"]
# Assert item can be found in query
custom_metrics = pk.custom_metrics.query()
for custom_metric in custom_metrics:
if custom_metric.name == params["name"]:
custom_metric_match = custom_metric
break
else:
custom_metric_match = None
assert custom_metric_match is not None
assert custom_metric_match.name == params["name"]
assert custom_metric_match.context == params["context"]
assert custom_metric_match.type == params["type"]
def test_create_failure(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator()
# Assert error is raised for duplicate custom metric
with pytest.raises(Exceptions.HttpError) as err_wrapper:
pk.custom_metrics.create(**params)
assert err_wrapper.value.status_code == 409
assert err_wrapper.value.body == 'Custom metric already exists with name "' + params["name"] + '"'
def test_delete(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator(do_not_mark=True)
# Verify custom metric was deleted successfully
custom_metric.delete()
for custom_metric in pk.custom_metrics.query():
if custom_metric.name == params["name"]:
match = custom_metric
break
else:
match = None
assert match is None
def test_delete_failure(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator(do_not_mark=True)
custom_metric.delete()
# Assert error is raised when attempting to delete protected custom metric
with pytest.raises(Exceptions.HttpError) as err_wrapper:
custom_metric.delete()
assert err_wrapper.value.status_code == 404
assert err_wrapper.value.body == 'Custom metric "' + custom_metric.id + '" not found'
def test_find(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator(name="Find Me")
expr = re.compile(r"ind M")
# Find with no args
found = pk.custom_metrics.find()
assert found is None
# Find using predicate
found = pk.custom_metrics.find(lambda ws: expr.search(ws.data["name"]) is not None)
assert found is not None
assert found.name == params["name"]
assert found.context == params["context"]
assert found.type == params["type"]
# Find using props
found = pk.custom_metrics.find(id=custom_metric.id, name=params["name"])
assert found is not None
assert found.name == params["name"]
assert found.context == params["context"]
assert found.type == params["type"]
# Find using both
found = pk.custom_metrics.find(lambda ws: expr.search(ws.data["name"]) is not None, id=custom_metric.id, name=params["name"])
assert found is not None
assert found.name == params["name"]
assert found.context == params["context"]
assert found.type == params["type"]
# Find failure
found = pk.custom_metrics.find(lambda ws: expr.search(ws.data["id"]) is not None)
assert found is None
found = pk.custom_metrics.find(id=custom_metric.id, name=params["name"].lower())
assert found is None
def test_query(app, custom_metric_generator):
pk = app.pk
params1, custom_metric1 = custom_metric_generator()
params2, custom_metric2 = custom_metric_generator()
# Verify test 1
for custom_metric in pk.custom_metrics.query():
if custom_metric.name == params1["name"]:
match = custom_metric
break
else:
match = None
assert match is not None
assert match.name == params1["name"]
assert match.context == params1["context"]
assert match.type == params1["type"]
# Verify test 2
for custom_metric in pk.custom_metrics.query():
if custom_metric.name == params2["name"]:
match = custom_metric
break
else:
match = None
assert match is not None
assert match.name == params2["name"]
assert match.context == params2["context"]
assert match.type == params2["type"]
def test_resolve(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator()
# Test resolve by id
resolved = pk.custom_metrics.resolve(custom_metric.id)
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
# Test resolve by name
resolved = pk.custom_metrics.resolve(params["name"])
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
def test_resolve_failure(app):
pk = app.pk
# Test resolve by id
with pytest.raises(Exceptions.CustomMetricLookupError) as err_wrapper:
pk.custom_metrics.resolve("00000000000000000000000000000000")
assert err_wrapper.value.message == "Custom metric with id `00000000000000000000000000000000` not found."
# Test resolve by name
with pytest.raises(Exceptions.CustomMetricLookupError) as err_wrapper:
pk.custom_metrics.resolve("My Metric")
assert err_wrapper.value.message == "Custom metric with name `My Metric` not found."
def test_resolve_by_id(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator()
resolved = pk.custom_metrics.resolve_by_id(custom_metric.id)
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
def test_resolve_by_id_failure(app):
pk = app.pk
with pytest.raises(Exceptions.CustomMetricLookupError) as err_wrapper:
pk.custom_metrics.resolve_by_id("00000000000000000000000000000000")
assert err_wrapper.value.message == "Custom metric with id `00000000000000000000000000000000` not found."
def test_resolve_by_name(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator(name="custom-lower1")
resolved = pk.custom_metrics.resolve_by_name(params["name"])
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
resolved = pk.custom_metrics.resolve_by_name(params["name"].upper())
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
def test_resolve_by_name_failure(app):
pk = app.pk
with pytest.raises(Exceptions.CustomMetricLookupError) as err_wrapper:
pk.custom_metrics.resolve("My Custom Metric")
assert err_wrapper.value.message == "Custom metric with name `My Custom Metric` not found."
def test_update(app, custom_metric_generator):
pk = app.pk
resource_prefix = app.resource_prefix
params, custom_metric = custom_metric_generator()
# Verify custom metric was updated successfully
updated_name = resource_prefix + "Updated Custom Metric Name"
custom_metric.name = updated_name
custom_metric.context = "image_set"
custom_metric.save()
custom_metrics = pk.custom_metrics.query()
for custom_metric in custom_metrics:
if custom_metric.name == updated_name:
custom_metric_match = custom_metric
break
else:
custom_metric_match = None
assert custom_metric_match is not None
assert custom_metric_match.name == updated_name
assert custom_metric_match.context == "image_set"
assert custom_metric_match.type == params["type"]
def test_update_failure(app, custom_metric_generator):
pk = app.pk
params1, _ = custom_metric_generator()
params2, custom_metric = custom_metric_generator()
# Assert error is raised for duplicate workspace
with pytest.raises(Exceptions.HttpError) as err_wrapper:
custom_metric.name = params1["name"]
custom_metric.save()
assert err_wrapper.value.status_code == 409
assert err_wrapper.value.body == 'Custom metric already exists with name "' + params1["name"] + '"'
| [
"pytest.raises",
"re.compile"
] | [((2350, 2369), 're.compile', 're.compile', (['"""ind M"""'], {}), "('ind M')\n", (2360, 2369), False, 'import re\n'), ((1058, 1093), 'pytest.raises', 'pytest.raises', (['Exceptions.HttpError'], {}), '(Exceptions.HttpError)\n', (1071, 1093), False, 'import pytest\n'), ((1987, 2022), 'pytest.raises', 'pytest.raises', (['Exceptions.HttpError'], {}), '(Exceptions.HttpError)\n', (2000, 2022), False, 'import pytest\n'), ((5145, 5194), 'pytest.raises', 'pytest.raises', (['Exceptions.CustomMetricLookupError'], {}), '(Exceptions.CustomMetricLookupError)\n', (5158, 5194), False, 'import pytest\n'), ((5428, 5477), 'pytest.raises', 'pytest.raises', (['Exceptions.CustomMetricLookupError'], {}), '(Exceptions.CustomMetricLookupError)\n', (5441, 5477), False, 'import pytest\n'), ((6053, 6102), 'pytest.raises', 'pytest.raises', (['Exceptions.CustomMetricLookupError'], {}), '(Exceptions.CustomMetricLookupError)\n', (6066, 6102), False, 'import pytest\n'), ((6993, 7042), 'pytest.raises', 'pytest.raises', (['Exceptions.CustomMetricLookupError'], {}), '(Exceptions.CustomMetricLookupError)\n', (7006, 7042), False, 'import pytest\n'), ((8275, 8310), 'pytest.raises', 'pytest.raises', (['Exceptions.HttpError'], {}), '(Exceptions.HttpError)\n', (8288, 8310), False, 'import pytest\n')] |
import torch
from torch import dtype, nn
import torch.nn.functional as F
class PAM_Module(nn.Module):
def __init__(self, num, sizes,mode=None):
super(PAM_Module, self).__init__()
self.sizes = sizes
self.mode = mode
for i in range(num):
setattr(self, "query" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
setattr(self, "value" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
setattr(self, "key" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
def forward(self, feat_sources, feat_targets):
"""calculate the attention weight and alpha"""
ret_feats, ret_alphas = [], []
for i, query in enumerate(feat_targets):
Bt, Ct, Ht, Wt = query.size()
pro_query = getattr(self, "query"+str(i)
)(query).view(Bt, -1, Ht*Wt).permute(0, 2, 1)
attentions, means = [], []
for j, key in enumerate(feat_sources):
pro_key = getattr(self, "key" + str(j))(key).view(Bt, -1, Ht * Wt)
energy = torch.bmm(pro_query, pro_key)
means.append(energy.mean().item())
attentions.append(torch.softmax(energy, dim=-1))
if self.mode.find('alpha')>=0:
ret_alphas.append(torch.softmax(torch.tensor(means), dim=0))
else:
ret_alphas.append(torch.tensor(means).mean())
if self.mode in ['all', 'pam', 'cam', 'alpha_cam', 'alpha_cam', 'alpha_all']:
attention = torch.stack(attentions, dim=0).sum(0)
value = getattr(self, "value" + str(i))(query).view(Bt, -1, Ht * Wt)
out = torch.bmm(value, attention.permute(0, 2, 1)).view(Bt, Ct, Ht, Wt)
ret_feats.append(out)
if self.mode.find('alpha') >= 0:
ret_alphas = torch.stack(ret_alphas, dim=0)
else:
ret_alphas = torch.softmax(torch.tensor(ret_alphas), dim=0)
return ret_feats, ret_alphas
class CAM_Module(nn.Module):
def __init__(self, num, sizes, mode=None):
super(CAM_Module, self).__init__()
self.sizes = sizes
self.mode = mode
for i in range(num):
setattr(self, "value" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
def forward(self, feat_sources, feat_targets):
ret_feats, ret_alphas = [], []
for i, query in enumerate(feat_targets):
Bt, Ct, Ht, Wt = query.size()
pro_query = query.view(Bt, Ct, -1)
attentions, means = [], []
for j, key in enumerate(feat_sources):
pro_key = key.view(Bt, Ct, -1).permute(0, 2, 1)
energy = torch.bmm(pro_query, pro_key)
means.append(energy.mean().item())
attentions.append(torch.softmax(energy, dim=-1))
if self.mode.find('alpha') >= 0:
ret_alphas.append(torch.softmax(torch.tensor(means), dim=0))
else:
ret_alphas.append(torch.tensor(means).mean())
if self.mode in ['all', 'pam', 'cam', 'alpha_cam', 'alpha_cam', 'alpha_all']:
attention = torch.stack(attentions, dim=0).sum(0)
value = getattr(self, "value"+str(i))(query).view(Bt, Ct, -1)
out = torch.bmm(attention, value).view(Bt, Ct, Ht, Wt)
ret_feats.append(out)
if self.mode.find('alpha') >= 0:
ret_alphas = torch.stack(ret_alphas, dim=0)
else:
ret_alphas = torch.softmax(torch.tensor(ret_alphas), dim=0)
return ret_feats, ret_alphas
class ConvReg(nn.Module):
def __init__(self, s_shape, t_shape, factor=1):
super(ConvReg, self).__init__()
s_N, s_C, s_H, s_W = s_shape
t_N, t_C, t_H, t_W = t_shape
if s_H == 2 * t_H:
self.conv = nn.Conv2d(
s_C, t_C // factor, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(
s_C, t_C // factor, kernel_size=4, stride=2, padding=1)
elif s_H >= t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=(1 + s_H - t_H, 1 + s_W - t_W))
else:
raise NotImplemented(
'student size {}, teacher size {}'.format(s_H, t_H))
def forward(self, x):
x = self.conv(x)
return x
class Fit(nn.Module):
def __init__(self, s_shape, t_shape, factor=1):
super(Fit, self).__init__()
_, s_C, s_H, s_W = s_shape
_, t_C, t_H, t_W = t_shape
if s_H == 2*t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(
s_C, t_C//factor, kernel_size=4, stride=2, padding=1)
elif s_H == t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=1, stride=1, padding=0)
else:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=(1+s_H-t_H, 1 + s_W-t_W))
# if channels:
# self.conv = nn.Conv2d(s_C,channels,kernel_size=(1+s_H-t_H, 1+s_W-t_W))
# else:
# self.conv = nn.Conv2d(s_C,t_C//factor,kernel_size=(1+s_H-t_H, 1+s_W-t
def forward(self, x):
x = self.conv(x)
return x
# torch.Size([16, 128, 28, 28]) torch.Size([16, 256, 14, 14]) torch.Size([16, 512, 7, 7])
class Project(nn.Module):
def __init__(self, origin_sizes, new_size=torch.Size([-1, 16, 14, 14]), factor=1):
super(Project, self).__init__()
for i, size_o in enumerate(origin_sizes):
setattr(self, "target"+str(i),
Fit(size_o, new_size, factor=factor))
setattr(self, "source"+str(i),
Fit(size_o, new_size, factor=factor))
def forward(self, feat_sources, feat_targets):
new_feat_sources, new_feat_targets = [], []
for i, source in enumerate(feat_sources):
new_feat_sources.append(getattr(self, "source" + str(i))(source))
for i, target in enumerate(feat_targets):
new_feat_targets.append(getattr(self, "target" + str(i))(target))
return new_feat_sources, new_feat_targets
class DAAttention(nn.Module):
def __init__(self, origin_sizes, new_size=torch.Size([-1, 32, 7, 7]), factor=1, mode="all"):
super(DAAttention, self).__init__()
self.pro = Project(origin_sizes, new_size=new_size, factor=factor)
self.mode = mode
self.layer_num = len(origin_sizes)
if mode in ['all', 'alpha', 'pam', 'alpha_pam', 'alpha_all']:
self.pam = PAM_Module(self.layer_num, new_size, self.mode)
if mode in ['all', 'alpha', 'cam', 'alpha_cam', 'alpha_all']:
self.cam = CAM_Module(self.layer_num, new_size, self.mode)
self.C = new_size[1]
self.H = new_size[2]
self.W = new_size[3]
def forward(self, feat_sources, feat_targets):
new_feat_sources, new_feat_targets = self.pro(
feat_sources, feat_targets)
if self.mode in ['pam', 'all', 'alpha', 'alpha_pam', 'alpha_all']:
feat_pam, alpha_pam = self.pam(new_feat_sources, new_feat_targets)
if self.mode in ['cam', 'all', 'alpha', 'alpha_cam', 'alpha_all']:
feat_cam, alpha_cam = self.cam(new_feat_sources, new_feat_targets)
ret_alpha = None
ret_targets, ret_sources = [], []
for i in range(self.layer_num):
if self.mode in ['all', 'alpha_all']:
ret_targets.append(((feat_pam[i] + feat_cam[i]) * 0.5).view(-1, self.C * self.H * self.W))
ret_alpha = (alpha_cam+alpha_pam) * 0.5
elif self.mode == 'cam':
ret_targets.append(feat_cam[i].view(-1, self.C * self.H * self.W))
ret_alpha = alpha_cam
elif self.mode == 'pam':
ret_targets.append(feat_pam[i].view(-1, self.C * self.H * self.W))
ret_alpha = alpha_pam
elif self.mode in ['alpha', 'alpha_pam', 'alpha_cam']:
if self.mode == 'alpha':ret_alpha = (alpha_pam + alpha_cam) * 0.5
elif self.mode == 'alpha_cam': ret_alpha = alpha_cam
elif self.mode == 'alpha_pam': ret_alpha = alpha_pam
elif self.mode[:3] == 'noa':
ret_targets.append(new_feat_targets[i].view(-1, self.C * self.H * self.W))
ret_sources.append(new_feat_sources[i].view(-1, self.C * self.H * self.W))
return ret_sources, ret_alpha, ret_targets
if __name__ == '__main__':
# feat_source1 = torch.rand((16,512,28,28))
# feat_source2 = torch.rand((16,1024,14,14))
# feat_source3 = torch.rand((16,2048,7,7))
# feat_target1 = torch.rand((16, 512, 28, 28))
# feat_target2 = torch.rand((16, 1024, 14, 14))
# feat_target3 = torch.rand((16, 2048, 7, 7))
# att = DAAttention([feat_source1.size(),feat_source2.size(),feat_source3.size()])
# out,alpha = att([feat_source1,feat_source2,feat_source3],[feat_target1,feat_target2,feat_target3])
# print(out[0].size(),alpha.size())
# print(out[1].size(),alpha.size())
# print(out[2].size(),alpha.size())
# import sys
# sys.path.append('../..')
# sys.path.append('..')
# from models.fullnet import FLDGFullNet
# from models.backbone import resnet18
# backbone = resnet18()
# net = FLDGFullNet(backbone, 7)
# data = torch.rand((16, 3, 224, 224))
# a, b, c, d, e = net(data)
# print(c.size(), d.size(), e.size())
# torch.Size([16, 128, 28, 28]) torch.Size([16, 256, 14, 14]) torch.Size([16, 512, 7, 7])
import torch
a = torch.rand(3, 3)
print(a, a[0, 0].item())
| [
"torch.nn.ConvTranspose2d",
"torch.stack",
"torch.nn.Conv2d",
"torch.softmax",
"torch.tensor",
"torch.bmm",
"torch.Size",
"torch.rand"
] | [((10040, 10056), 'torch.rand', 'torch.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (10050, 10056), False, 'import torch\n'), ((5812, 5840), 'torch.Size', 'torch.Size', (['[-1, 16, 14, 14]'], {}), '([-1, 16, 14, 14])\n', (5822, 5840), False, 'import torch\n'), ((6633, 6659), 'torch.Size', 'torch.Size', (['[-1, 32, 7, 7]'], {}), '([-1, 32, 7, 7])\n', (6643, 6659), False, 'import torch\n'), ((2053, 2083), 'torch.stack', 'torch.stack', (['ret_alphas'], {'dim': '(0)'}), '(ret_alphas, dim=0)\n', (2064, 2083), False, 'import torch\n'), ((3713, 3743), 'torch.stack', 'torch.stack', (['ret_alphas'], {'dim': '(0)'}), '(ret_alphas, dim=0)\n', (3724, 3743), False, 'import torch\n'), ((4112, 4177), 'torch.nn.Conv2d', 'nn.Conv2d', (['s_C', '(t_C // factor)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(s_C, t_C // factor, kernel_size=3, stride=2, padding=1)\n', (4121, 4177), False, 'from torch import dtype, nn\n'), ((4895, 4960), 'torch.nn.Conv2d', 'nn.Conv2d', (['s_C', '(t_C // factor)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(s_C, t_C // factor, kernel_size=3, stride=2, padding=1)\n', (4904, 4960), False, 'from torch import dtype, nn\n'), ((338, 407), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'sizes[1]', 'out_channels': 'sizes[1]', 'kernel_size': '(1)'}), '(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1)\n', (347, 407), False, 'from torch import dtype, nn\n'), ((473, 542), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'sizes[1]', 'out_channels': 'sizes[1]', 'kernel_size': '(1)'}), '(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1)\n', (482, 542), False, 'from torch import dtype, nn\n'), ((606, 675), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'sizes[1]', 'out_channels': 'sizes[1]', 'kernel_size': '(1)'}), '(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1)\n', (615, 675), False, 'from torch import dtype, nn\n'), ((1243, 1272), 'torch.bmm', 'torch.bmm', (['pro_query', 'pro_key'], {}), '(pro_query, pro_key)\n', (1252, 1272), False, 'import torch\n'), ((2137, 2161), 'torch.tensor', 'torch.tensor', (['ret_alphas'], {}), '(ret_alphas)\n', (2149, 2161), False, 'import torch\n'), ((2473, 2542), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'sizes[1]', 'out_channels': 'sizes[1]', 'kernel_size': '(1)'}), '(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1)\n', (2482, 2542), False, 'from torch import dtype, nn\n'), ((2953, 2982), 'torch.bmm', 'torch.bmm', (['pro_query', 'pro_key'], {}), '(pro_query, pro_key)\n', (2962, 2982), False, 'import torch\n'), ((3797, 3821), 'torch.tensor', 'torch.tensor', (['ret_alphas'], {}), '(ret_alphas)\n', (3809, 3821), False, 'import torch\n'), ((4248, 4322), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['s_C', '(t_C // factor)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(s_C, t_C // factor, kernel_size=4, stride=2, padding=1)\n', (4266, 4322), False, 'from torch import dtype, nn\n'), ((5029, 5103), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['s_C', '(t_C // factor)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(s_C, t_C // factor, kernel_size=4, stride=2, padding=1)\n', (5047, 5103), False, 'from torch import dtype, nn\n'), ((1358, 1387), 'torch.softmax', 'torch.softmax', (['energy'], {'dim': '(-1)'}), '(energy, dim=-1)\n', (1371, 1387), False, 'import torch\n'), ((3068, 3097), 'torch.softmax', 'torch.softmax', (['energy'], {'dim': '(-1)'}), '(energy, dim=-1)\n', (3081, 3097), False, 'import torch\n'), ((4389, 4462), 'torch.nn.Conv2d', 'nn.Conv2d', (['s_C', '(t_C // factor)'], {'kernel_size': '(1 + s_H - t_H, 1 + s_W - t_W)'}), '(s_C, t_C // factor, kernel_size=(1 + s_H - t_H, 1 + s_W - t_W))\n', (4398, 4462), False, 'from torch import dtype, nn\n'), ((5168, 5233), 'torch.nn.Conv2d', 'nn.Conv2d', (['s_C', '(t_C // factor)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(s_C, t_C // factor, kernel_size=1, stride=1, padding=0)\n', (5177, 5233), False, 'from torch import dtype, nn\n'), ((5287, 5360), 'torch.nn.Conv2d', 'nn.Conv2d', (['s_C', '(t_C // factor)'], {'kernel_size': '(1 + s_H - t_H, 1 + s_W - t_W)'}), '(s_C, t_C // factor, kernel_size=(1 + s_H - t_H, 1 + s_W - t_W))\n', (5296, 5360), False, 'from torch import dtype, nn\n'), ((1493, 1512), 'torch.tensor', 'torch.tensor', (['means'], {}), '(means)\n', (1505, 1512), False, 'import torch\n'), ((1720, 1750), 'torch.stack', 'torch.stack', (['attentions'], {'dim': '(0)'}), '(attentions, dim=0)\n', (1731, 1750), False, 'import torch\n'), ((3193, 3212), 'torch.tensor', 'torch.tensor', (['means'], {}), '(means)\n', (3205, 3212), False, 'import torch\n'), ((3421, 3451), 'torch.stack', 'torch.stack', (['attentions'], {'dim': '(0)'}), '(attentions, dim=0)\n', (3432, 3451), False, 'import torch\n'), ((3560, 3587), 'torch.bmm', 'torch.bmm', (['attention', 'value'], {}), '(attention, value)\n', (3569, 3587), False, 'import torch\n'), ((1574, 1593), 'torch.tensor', 'torch.tensor', (['means'], {}), '(means)\n', (1586, 1593), False, 'import torch\n'), ((3274, 3293), 'torch.tensor', 'torch.tensor', (['means'], {}), '(means)\n', (3286, 3293), False, 'import torch\n')] |
from docassemble.base.util import variables_snapshot_connection, user_info
__all__ = ['get_stats']
def get_stats(filename: str):
conn = variables_snapshot_connection()
cur = conn.cursor()
# use a parameterized query to prevent SQL injection
query = "select modtime, data from jsonstorage where filename=%(filename)s"
cur.execute(query, {'filename': filename})
records = list()
for record in cur.fetchall():
records.append(record)
conn.close()
return records
| [
"docassemble.base.util.variables_snapshot_connection"
] | [((143, 174), 'docassemble.base.util.variables_snapshot_connection', 'variables_snapshot_connection', ([], {}), '()\n', (172, 174), False, 'from docassemble.base.util import variables_snapshot_connection, user_info\n')] |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'prompt'.
Dans ce fichier ne se trouve que la commande.
Les sous-commandes peuvent être trouvées dans le package.
"""
from primaires.interpreteur.commande.commande import Commande
from .defaut import PrmDefaut
# Constantes
AIDE = """
Cette commande permet de configurer vos différents prompts. Le prompt
est un message qui s'affiche généralement après l'entrée d'une commande ou une
action quelconque dans l'univers. Ce message donne des informations
générales sur votre personnage (par défaut, sa vitalité, mana et
endurance).
Il existe plusieurs prompts. Par exemple, celui que vous verrez à
votre première connexion est le prompt par défaut qui s'affiche dans
la plupart des circonstances. Il existe également un prompt de combat
qui est affiché quand votre personnage est en combat et peut donner
des informations supplémentaires.
Vous pouvez ici configurer votre prompt, c'est-à-dire changer ce
message. En utilisant une des sous-commandes ci-dessous, vous pouvez
soit consulter, masquer, modifier ou réinitialiser votre prompt.
Ce que vous entrez grâce à cette commande deviendra votre prompt. Vous
pouvez aussi utiliser des symboles (par exemple, vous pouvez entrer
%prompt% %prompt:défaut%|cmd| Vit(|pc|v) Man(|pc|m) End(|pc|e)|ff| pour
avoir un prompt sous la forme |ent|Vit(50) Man(50) End(50)|ff|.
Les symboles sont des combinaisons de lettres précédées du signe
pourcent (|pc|). Voici les symboles que vous pouvez utiliser pour tous
les prompts :
|pc|v Vitalité actuelle
|pc|m Mana actuelle
|pc|e Endurance actuelle
|pc|vx Vitalité maximum
|pc|mx Mana maximum
|pc|ex Endurance maximum
|pc|sl Saut de ligne (pour avoir un prompt sur deux lignes)
|pc|f Force
|pc|a Agilité
|pc|r Robustesse
|pc|i Intelligence
|pc|c Charisme
|pc|s Sensibilité
""".strip()
class CmdPrompt(Commande):
"""Commande 'prompt'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "prompt", "prompt")
self.schema = ""
self.aide_courte = "affiche ou configure votre prompt"
self.aide_longue = AIDE
def ajouter_parametres(self):
"""Ajout dynamique des paramètres."""
for prompt in importeur.perso.prompts.values():
self.ajouter_parametre(PrmDefaut(prompt))
| [
"primaires.interpreteur.commande.commande.Commande.__init__"
] | [((3656, 3699), 'primaires.interpreteur.commande.commande.Commande.__init__', 'Commande.__init__', (['self', '"""prompt"""', '"""prompt"""'], {}), "(self, 'prompt', 'prompt')\n", (3673, 3699), False, 'from primaires.interpreteur.commande.commande import Commande\n')] |
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xray
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = atm.homedir() + 'datastore/merra/daily/'
year = 2014
subset = '_40E-120E_90S-90N'
def get_var(datadir, varnm, subset, year):
filenm = '%smerra_%s%s_%d.nc' % (datadir, varnm, subset, year)
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
return var
uq_int = get_var(datadir, 'UFLXQV', subset, year)
vq_int = get_var(datadir, 'VFLXQV', subset, year)
mfc = atm.moisture_flux_conv(uq_int, vq_int, already_int=True)
mfcbar = mfc.mean(dim='YDim').mean(dim='XDim')
# Test atm.gradient
a = atm.constants.radius_earth.values
latdim, londim = 1, 2
lat = atm.get_coord(uq_int, 'lat')
latrad = np.radians(lat)
latrad[abs(lat) > 89] = np.nan
coslat = xray.DataArray(np.cos(latrad), coords={'YDim' : lat})
lon = atm.get_coord(uq_int, 'lon')
lonrad = np.radians(lon)
mfc_x = atm.gradient(uq_int, lonrad, londim) / (a*coslat)
mfc_y = atm.gradient(vq_int * coslat, latrad, latdim) / (a*coslat)
mfc_test = mfc_x + mfc_y
mfc_test = - atm.precip_convert(mfc_test, 'kg/m2/s', 'mm/day')
mfc_test_bar = mfc_test.mean(dim='YDim').mean(dim='XDim')
diff = mfc_test - mfc
print(diff.max())
print(diff.min())
plt.plot(mfcbar)
plt.plot(mfc_test_bar)
print(mfc_test_bar - mfcbar)
# ----------------------------------------------------------------------
# Vertical gradient du/dp
lon1, lon2 = 40, 120
pmin, pmax = 100, 300
subset_dict = {'XDim' : (lon1, lon2), 'Height' : (pmin, pmax)}
urls = merra.merra_urls([year])
month, day = 7, 15
url = urls['%d%02d%02d' % (year, month, day)]
with xray.open_dataset(url) as ds:
u = atm.subset(ds['U'], subset_dict, copy=False)
u = u.mean(dim='TIME')
pres = u['Height']
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dp = np.gradient(pres)
# Calc 1
dims = u.shape
dudp = np.nan * u
for i in range(dims[1]):
for j in range(dims[2]):
dudp.values[:, i, j] = np.gradient(u[:, i, j], dp)
# Test atm.gradient
dudp_test = atm.gradient(u, pres, axis=0)
diff = dudp_test - dudp
print(diff.max())
print(diff.min())
| [
"numpy.radians",
"numpy.gradient",
"atmos.gradient",
"atmos.homedir",
"matplotlib.pyplot.plot",
"atmos.pres_convert",
"atmos.get_coord",
"xray.open_dataset",
"atmos.moisture_flux_conv",
"numpy.cos",
"merra.merra_urls",
"sys.path.append",
"atmos.precip_convert",
"atmos.subset"
] | [((11, 71), 'sys.path.append', 'sys.path.append', (['"""/home/jwalker/dynamics/python/atmos-tools"""'], {}), "('/home/jwalker/dynamics/python/atmos-tools')\n", (26, 71), False, 'import sys\n'), ((72, 131), 'sys.path.append', 'sys.path.append', (['"""/home/jwalker/dynamics/python/atmos-read"""'], {}), "('/home/jwalker/dynamics/python/atmos-read')\n", (87, 131), False, 'import sys\n'), ((770, 826), 'atmos.moisture_flux_conv', 'atm.moisture_flux_conv', (['uq_int', 'vq_int'], {'already_int': '(True)'}), '(uq_int, vq_int, already_int=True)\n', (792, 826), True, 'import atmos as atm\n'), ((961, 989), 'atmos.get_coord', 'atm.get_coord', (['uq_int', '"""lat"""'], {}), "(uq_int, 'lat')\n", (974, 989), True, 'import atmos as atm\n'), ((999, 1014), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (1009, 1014), True, 'import numpy as np\n'), ((1115, 1143), 'atmos.get_coord', 'atm.get_coord', (['uq_int', '"""lon"""'], {}), "(uq_int, 'lon')\n", (1128, 1143), True, 'import atmos as atm\n'), ((1153, 1168), 'numpy.radians', 'np.radians', (['lon'], {}), '(lon)\n', (1163, 1168), True, 'import numpy as np\n'), ((1502, 1518), 'matplotlib.pyplot.plot', 'plt.plot', (['mfcbar'], {}), '(mfcbar)\n', (1510, 1518), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1541), 'matplotlib.pyplot.plot', 'plt.plot', (['mfc_test_bar'], {}), '(mfc_test_bar)\n', (1527, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1810), 'merra.merra_urls', 'merra.merra_urls', (['[year]'], {}), '([year])\n', (1802, 1810), False, 'import merra\n'), ((2018, 2067), 'atmos.pres_convert', 'atm.pres_convert', (['pres', "pres.attrs['units']", '"""Pa"""'], {}), "(pres, pres.attrs['units'], 'Pa')\n", (2034, 2067), True, 'import atmos as atm\n'), ((2073, 2090), 'numpy.gradient', 'np.gradient', (['pres'], {}), '(pres)\n', (2084, 2090), True, 'import numpy as np\n'), ((2280, 2309), 'atmos.gradient', 'atm.gradient', (['u', 'pres'], {'axis': '(0)'}), '(u, pres, axis=0)\n', (2292, 2309), True, 'import atmos as atm\n'), ((381, 394), 'atmos.homedir', 'atm.homedir', ([], {}), '()\n', (392, 394), True, 'import atmos as atm\n'), ((1070, 1084), 'numpy.cos', 'np.cos', (['latrad'], {}), '(latrad)\n', (1076, 1084), True, 'import numpy as np\n'), ((1178, 1214), 'atmos.gradient', 'atm.gradient', (['uq_int', 'lonrad', 'londim'], {}), '(uq_int, lonrad, londim)\n', (1190, 1214), True, 'import atmos as atm\n'), ((1236, 1281), 'atmos.gradient', 'atm.gradient', (['(vq_int * coslat)', 'latrad', 'latdim'], {}), '(vq_int * coslat, latrad, latdim)\n', (1248, 1281), True, 'import atmos as atm\n'), ((1334, 1383), 'atmos.precip_convert', 'atm.precip_convert', (['mfc_test', '"""kg/m2/s"""', '"""mm/day"""'], {}), "(mfc_test, 'kg/m2/s', 'mm/day')\n", (1352, 1383), True, 'import atmos as atm\n'), ((1881, 1903), 'xray.open_dataset', 'xray.open_dataset', (['url'], {}), '(url)\n', (1898, 1903), False, 'import xray\n'), ((1919, 1963), 'atmos.subset', 'atm.subset', (["ds['U']", 'subset_dict'], {'copy': '(False)'}), "(ds['U'], subset_dict, copy=False)\n", (1929, 1963), True, 'import atmos as atm\n'), ((583, 608), 'xray.open_dataset', 'xray.open_dataset', (['filenm'], {}), '(filenm)\n', (600, 608), False, 'import xray\n'), ((2219, 2246), 'numpy.gradient', 'np.gradient', (['u[:, i, j]', 'dp'], {}), '(u[:, i, j], dp)\n', (2230, 2246), True, 'import numpy as np\n')] |
'''
Solution for day 6 of the 2021 Advent of Code calendar.
Run it with the command `python -m adventofcode run_solution -y 2021 6` from the project root.
'''
import time
from adventofcode.types import Solution
class LanternFish:
def __init__(self, timer_til_fork):
self.timer_til_fork = timer_til_fork
def __repr__(self):
return f"{self.timer_til_fork}"
def evolve(self) -> bool:
if self.timer_til_fork == 0:
self.timer_til_fork = 6
return True
self.timer_til_fork -= 1
return False
def part1(data):
fishes = [LanternFish(int(age)) for age in data.split(",")]
for day in range(0, 80):
start = time.time()
print(f"Generating: {day}")
evolve = (lantern_fish.evolve() for lantern_fish in fishes)
fishes += [LanternFish(8) for fork in evolve if fork]
end = time.time()
print(f"Took: {end - start}, generated {len(fishes)} fish")
return len(fishes)
def part2(data):
fish_age = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for age in data.split(","):
fish_age[int(age)] += 1
for day in range(0, 256):
start = time.time()
print(f"Generating: {day}")
should_fork = fish_age.pop(0)
fish_age.append(should_fork)
fish_age[6] += should_fork
end = time.time()
print(f"{len(fish_age)}{fish_age}, {sum(fish_age)} fish, took {end - start}, ")
return sum(fish_age)
# Each day, a 0 becomes a 6 and adds a new 8 to the end of the list,
# while each other number decreases by 1 if it was present at the start of the day.
def run(data: str) -> Solution:
return part1(data), part2(data)
| [
"time.time"
] | [((694, 705), 'time.time', 'time.time', ([], {}), '()\n', (703, 705), False, 'import time\n'), ((886, 897), 'time.time', 'time.time', ([], {}), '()\n', (895, 897), False, 'import time\n'), ((1163, 1174), 'time.time', 'time.time', ([], {}), '()\n', (1172, 1174), False, 'import time\n'), ((1335, 1346), 'time.time', 'time.time', ([], {}), '()\n', (1344, 1346), False, 'import time\n')] |
import os
import json
import numpy as np
import matplotlib.pyplot as plt
def compute_iou(box_1, box_2):
'''
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
'''
intersection = 0
tlr1, tlc1, brr1, brc1 = box_1[0], box_1[1], box_1[2], box_1[3]
tlr2, tlc2, brr2, brc2 = box_2[0], box_2[1], box_2[2], box_2[3]
dx = min(brr1, brr2) - max(tlr1, tlr2)
dy = min(brc1, brc1) - max(tlc1, tlc2)
if (dx>=0) and (dy>=0):
intersection = dx * dy
area1 = (brc1 - tlc1) * (brr1 - tlr1)
area2 = (brc2 - tlc2) * (brr2 - tlr2)
union = area1 + area2 - intersection
iou = intersection / union
assert (iou >= 0) and (iou <= 1.0)
return iou
def compute_counts(preds, gts, iou_thr=0.5, conf_thr=0.5):
'''
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<gts> is a dictionary containing ground truth bounding boxes for a
collection of images.
'''
TP = 0
FP = 0
FN = 0
for pred_file, pred in preds.items():
gt = gts[pred_file]
for i in range(len(gt)):
not_found = True
for j in range(len(pred)):
iou = compute_iou(pred[j][:4], gt[i])
if iou >= iou_thr and pred[j][4] >= conf_thr:
TP += 1
not_found = False
break
elif pred[j][4] >= conf_thr:
FP += 1
not_found = False
break
if not_found:
FN += 1
return TP, FP, FN
# set a path for predictions and annotations:
preds_path = 'hw02_preds'
gts_path = 'hw02_annotations'
# load splits:
split_path = 'hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
# Set this parameter to True when you're done with algorithm development:
done_tweaking = True
'''
Load training data.
'''
with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
preds_train = json.load(f)
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts_train = json.load(f)
if done_tweaking:
'''
Load test data.
'''
with open(os.path.join(preds_path,'preds_test.json'),'r') as f:
preds_test = json.load(f)
with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
gts_test = json.load(f)
# For a fixed IoU threshold, vary the confidence thresholds.
# The code below gives an example on the training set for one IoU threshold.
def compute_PR(iou, preds, gts):
lst = []
for fname in preds:
if preds[fname] != []:
for pred in preds[fname]:
lst.append(pred[4])
confidence_thrs = np.sort(np.array(lst,dtype=float)) # using (ascending) list of confidence scores as thresholds
tp = np.zeros(len(confidence_thrs))
fp = np.zeros(len(confidence_thrs))
fn = np.zeros(len(confidence_thrs))
for i, conf_thr in enumerate(confidence_thrs):
tp[i], fp[i], fn[i] = compute_counts(preds, gts, iou_thr=iou, conf_thr=conf_thr)
# Plot training set PR curves
recall = np.zeros(len(confidence_thrs))
precision = np.zeros(len(confidence_thrs))
for i, elem in enumerate(tp):
precision[i] = tp[i]/(tp[i] + fp[i])
recall[i] = tp[i]/(tp[i] + fn[i])
return recall, precision
recall, precision = compute_PR(0.5, preds_train, gts_train)
recall_l, precision_l = compute_PR(0.25, preds_train, gts_train)
recall_m, precision_m = compute_PR(0.75, preds_train, gts_train)
plt.plot(recall, precision, color='black', marker='o')
plt.plot(recall_l, precision_l, color='blue', marker='o')
plt.plot(recall_m, precision_m, color='green', marker='o')
plt.legend(["IOU 0.5", "IOU 0.25", "IOU 0.75"])
plt.title("PR Curves Training")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
if done_tweaking:
print('Code for plotting test set PR curves.')
recall, precision = compute_PR(0.5, preds_test, gts_test)
recall_l, precision_l = compute_PR(0.25, preds_test, gts_test)
recall_m, precision_m = compute_PR(0.75, preds_test, gts_test)
plt.figure()
plt.plot(recall, precision, color='black', marker='o')
plt.plot(recall_l, precision_l, color='blue', marker='o')
plt.plot(recall_m, precision_m, color='green', marker='o')
plt.legend(["IOU 0.5", "IOU 0.25", "IOU 0.75"])
plt.title("PR Curves Testing")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.array",
"matplotlib.pyplot.figure",
"json.load",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((4004, 4058), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {'color': '"""black"""', 'marker': '"""o"""'}), "(recall, precision, color='black', marker='o')\n", (4012, 4058), True, 'import matplotlib.pyplot as plt\n'), ((4059, 4116), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_l', 'precision_l'], {'color': '"""blue"""', 'marker': '"""o"""'}), "(recall_l, precision_l, color='blue', marker='o')\n", (4067, 4116), True, 'import matplotlib.pyplot as plt\n'), ((4117, 4175), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_m', 'precision_m'], {'color': '"""green"""', 'marker': '"""o"""'}), "(recall_m, precision_m, color='green', marker='o')\n", (4125, 4175), True, 'import matplotlib.pyplot as plt\n'), ((4176, 4223), 'matplotlib.pyplot.legend', 'plt.legend', (["['IOU 0.5', 'IOU 0.25', 'IOU 0.75']"], {}), "(['IOU 0.5', 'IOU 0.25', 'IOU 0.75'])\n", (4186, 4223), True, 'import matplotlib.pyplot as plt\n'), ((4224, 4255), 'matplotlib.pyplot.title', 'plt.title', (['"""PR Curves Training"""'], {}), "('PR Curves Training')\n", (4233, 4255), True, 'import matplotlib.pyplot as plt\n'), ((4256, 4276), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (4266, 4276), True, 'import matplotlib.pyplot as plt\n'), ((4277, 4300), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (4287, 4300), True, 'import matplotlib.pyplot as plt\n'), ((4301, 4311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4309, 4311), True, 'import matplotlib.pyplot as plt\n'), ((2107, 2155), 'os.path.join', 'os.path.join', (['split_path', '"""file_names_train.npy"""'], {}), "(split_path, 'file_names_train.npy')\n", (2119, 2155), False, 'import os\n'), ((2182, 2229), 'os.path.join', 'os.path.join', (['split_path', '"""file_names_test.npy"""'], {}), "(split_path, 'file_names_test.npy')\n", (2194, 2229), False, 'import os\n'), ((2439, 2451), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2448, 2451), False, 'import json\n'), ((2543, 2555), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2552, 2555), False, 'import json\n'), ((4584, 4596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4594, 4596), True, 'import matplotlib.pyplot as plt\n'), ((4601, 4655), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {'color': '"""black"""', 'marker': '"""o"""'}), "(recall, precision, color='black', marker='o')\n", (4609, 4655), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4717), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_l', 'precision_l'], {'color': '"""blue"""', 'marker': '"""o"""'}), "(recall_l, precision_l, color='blue', marker='o')\n", (4668, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4722, 4780), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_m', 'precision_m'], {'color': '"""green"""', 'marker': '"""o"""'}), "(recall_m, precision_m, color='green', marker='o')\n", (4730, 4780), True, 'import matplotlib.pyplot as plt\n'), ((4785, 4832), 'matplotlib.pyplot.legend', 'plt.legend', (["['IOU 0.5', 'IOU 0.25', 'IOU 0.75']"], {}), "(['IOU 0.5', 'IOU 0.25', 'IOU 0.75'])\n", (4795, 4832), True, 'import matplotlib.pyplot as plt\n'), ((4837, 4867), 'matplotlib.pyplot.title', 'plt.title', (['"""PR Curves Testing"""'], {}), "('PR Curves Testing')\n", (4846, 4867), True, 'import matplotlib.pyplot as plt\n'), ((4872, 4892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (4882, 4892), True, 'import matplotlib.pyplot as plt\n'), ((4897, 4920), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (4907, 4920), True, 'import matplotlib.pyplot as plt\n'), ((4925, 4935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4933, 4935), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2410), 'os.path.join', 'os.path.join', (['preds_path', '"""preds_train.json"""'], {}), "(preds_path, 'preds_train.json')\n", (2378, 2410), False, 'import os\n'), ((2467, 2515), 'os.path.join', 'os.path.join', (['gts_path', '"""annotations_train.json"""'], {}), "(gts_path, 'annotations_train.json')\n", (2479, 2515), False, 'import os\n'), ((2710, 2722), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2719, 2722), False, 'import json\n'), ((2824, 2836), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2833, 2836), False, 'import json\n'), ((3184, 3210), 'numpy.array', 'np.array', (['lst'], {'dtype': 'float'}), '(lst, dtype=float)\n', (3192, 3210), True, 'import numpy as np\n'), ((2635, 2678), 'os.path.join', 'os.path.join', (['preds_path', '"""preds_test.json"""'], {}), "(preds_path, 'preds_test.json')\n", (2647, 2678), False, 'import os\n'), ((2746, 2793), 'os.path.join', 'os.path.join', (['gts_path', '"""annotations_test.json"""'], {}), "(gts_path, 'annotations_test.json')\n", (2758, 2793), False, 'import os\n')] |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import wx
from atom.api import Typed
from enaml.widgets.widget import ProxyWidget
from .wx_layout_request import wxEvtLayoutRequested
from .wx_resource_helpers import get_cached_wxcolor, get_cached_wxfont
from .wx_toolkit_object import WxToolkitObject
class WxWidget(WxToolkitObject, ProxyWidget):
""" A Wx implementation of an Enaml ProxyWidget.
"""
#: A reference to the toolkit widget created by the proxy.
widget = Typed(wx.Window)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Creates the underlying wx.Window widget.
"""
self.widget = wx.Window(self.parent_widget())
def init_widget(self):
""" Initialize the underlying widget.
"""
super(WxWidget, self).init_widget()
d = self.declaration
if d.background:
self.set_background(d.background)
if d.foreground:
self.set_foreground(d.foreground)
if d.font:
self.set_font(d.font)
if -1 not in d.minimum_size:
self.set_minimum_size(d.minimum_size)
if -1 not in d.maximum_size:
self.set_maximum_size(d.maximum_size)
if d.tool_tip:
self.set_tool_tip(d.tool_tip)
if d.status_tip:
self.set_status_tip(d.status_tip)
self.set_enabled(d.enabled)
self.set_visible(d.visible)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def update_geometry(self):
""" Notify the layout system that this widget has changed.
This method should be called when the geometry of the widget has
changed and the layout system should update the layout. This will
post a wxEvtLayoutRequested event to the parent of this widget.
"""
widget = self.widget
if widget:
parent = widget.GetParent()
if parent:
event = wxEvtLayoutRequested(widget.GetId())
wx.PostEvent(parent, event)
#--------------------------------------------------------------------------
# ProxyWidget API
#--------------------------------------------------------------------------
def set_minimum_size(self, min_size):
""" Sets the minimum size on the underlying widget.
"""
self.widget.SetMinSize(wx.Size(*min_size))
def set_maximum_size(self, max_size):
""" Sets the maximum size on the underlying widget.
"""
self.widget.SetMaxSize(wx.Size(*max_size))
def set_enabled(self, enabled):
""" Set the enabled state on the underlying widget.
"""
self.widget.Enable(enabled)
def set_visible(self, visible):
""" Set the visibility state on the underlying widget.
"""
self.widget.Show(visible)
def set_background(self, background):
""" Set the background color on the underlying widget.
"""
if background is None:
wxcolor = wx.NullColour
else:
wxcolor = get_cached_wxcolor(background)
widget = self.widget
widget.SetBackgroundColour(wxcolor)
widget.Refresh()
def set_foreground(self, foreground):
""" Set the foreground color on the underlying widget.
"""
if foreground is None:
wxcolor = wx.NullColour
else:
wxcolor = get_cached_wxcolor(foreground)
widget = self.widget
widget.SetForegroundColour(wxcolor)
widget.Refresh()
def set_font(self, font):
""" Set the font on the underlying widget.
"""
wxfont = get_cached_wxfont(font)
widget = self.widget
widget.SetFont(wxfont)
widget.Refresh()
def set_show_focus_rect(self, show):
""" This is not supported on Wx.
"""
pass
def set_tool_tip(self, tool_tip):
""" Set the tool tip of for this widget.
"""
self.widget.SetToolTipString(tool_tip)
def set_status_tip(self, status_tip):
""" This is not supported on Wx.
"""
pass
def ensure_visible(self):
""" Ensure the widget is visible.
"""
self.widget.Show(True)
def ensure_hidden(self):
""" Ensure the widget is hidden.
"""
self.widget.Show(False)
| [
"atom.api.Typed",
"wx.PostEvent",
"wx.Size"
] | [((791, 807), 'atom.api.Typed', 'Typed', (['wx.Window'], {}), '(wx.Window)\n', (796, 807), False, 'from atom.api import Typed\n'), ((2934, 2952), 'wx.Size', 'wx.Size', (['*min_size'], {}), '(*min_size)\n', (2941, 2952), False, 'import wx\n'), ((3101, 3119), 'wx.Size', 'wx.Size', (['*max_size'], {}), '(*max_size)\n', (3108, 3119), False, 'import wx\n'), ((2577, 2604), 'wx.PostEvent', 'wx.PostEvent', (['parent', 'event'], {}), '(parent, event)\n', (2589, 2604), False, 'import wx\n')] |
import radical.saga as saga
c = saga.Context ('ssh')
c.user_id = 'dinesh'
s = saga.Session ()
s.add_context (c)
js = saga.job.Service("lsf+ssh://yellowstone.ucar.edu", session=s)
| [
"radical.saga.job.Service",
"radical.saga.Context",
"radical.saga.Session"
] | [((34, 53), 'radical.saga.Context', 'saga.Context', (['"""ssh"""'], {}), "('ssh')\n", (46, 53), True, 'import radical.saga as saga\n'), ((81, 95), 'radical.saga.Session', 'saga.Session', ([], {}), '()\n', (93, 95), True, 'import radical.saga as saga\n'), ((121, 182), 'radical.saga.job.Service', 'saga.job.Service', (['"""lsf+ssh://yellowstone.ucar.edu"""'], {'session': 's'}), "('lsf+ssh://yellowstone.ucar.edu', session=s)\n", (137, 182), True, 'import radical.saga as saga\n')] |
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
led_state = False
try:
while True:
GPIO.output(18, led_state)
if led_state:
print("The LED is on. Press 'enter' to switch it off")
else:
print("The LED is off. Press 'enter' to switch it on")
arg = input("Press 'q' then 'enter' to quit.")
if arg == "q":
exit()
elif led_state:
led_state = False
else:
led_state = True
finally:
GPIO.cleanup()
| [
"RPi.GPIO.setup",
"RPi.GPIO.cleanup",
"RPi.GPIO.output",
"RPi.GPIO.setmode"
] | [((25, 47), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (37, 47), True, 'import RPi.GPIO as GPIO\n'), ((48, 72), 'RPi.GPIO.setup', 'GPIO.setup', (['(18)', 'GPIO.OUT'], {}), '(18, GPIO.OUT)\n', (58, 72), True, 'import RPi.GPIO as GPIO\n'), ((528, 542), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (540, 542), True, 'import RPi.GPIO as GPIO\n'), ((122, 148), 'RPi.GPIO.output', 'GPIO.output', (['(18)', 'led_state'], {}), '(18, led_state)\n', (133, 148), True, 'import RPi.GPIO as GPIO\n')] |
# grasp.py
# This script implements the GRASP heuristic for the dynamic bin packing
# problem.
# Author: <NAME>
from __future__ import print_function
import numpy as np
import random
import solutions_dynamic as solmaker
import sys
from copy import deepcopy
from itertools import combinations
from math import ceil, sqrt
from operator import attrgetter
class BPP:
# This class groups the bin packing problem information and performs
# the GRASP operations.
def __init__(self, n, cookies, moop):
self.beta = 5 # Cardinality restriction
self.n = int(n) # Number of cookies to sort
self.cookies = cookies # dictionary of item objects
self.moop = moop # Multiobjective problem class
self.lb = 0 # initialize lower bound
self.calclowerbound()
def generate_newsol(self, index, p_ls1, p_ls2, *args):
# This module creates an instance of a NewSolution class and
# performs the generate_newsol procedure
newbie = NewSolution(self.beta, self.n, self.cookies, self.moop)
newsol = newbie.make_newsol(index, *args)
newsol = self.checkandfit(newsol)
p = index + 1 # ID number for first neighbor
rannum = random.random()
if rannum < p_ls1:
if newsol.getopenbins() > self.lb:
p, neighbors = self.ls1(p, 1, newsol)
else:
p, neighbors = self.bin_mutation(p, 1, newsol)
elif rannum < p_ls2:
p, neighbors = self.ls2(p, 1, newsol)
else:
p, neighbors = self.ls3(p, 1, newsol)
if neighbors:
winner = self.test_domination(newsol, neighbors[0])
return p, winner
return p, newsol
def checkandfit(self, solution):
# This function checks the feasibility of a solution and calculates fitness
# values.
solution = self.moop.calcfeasibility(solution)
checkformismatch(solution.getx(), solution.getvlrep())
fits = self.moop.calcfits(solution)
solution.updatefitvals(fits)
return solution
def test_domination(self, solution, neighbor):
# This function determines if neighbor dominates solution.
u = solution.getfits()
v = neighbor.getfits()
if dom2(v, u):
return neighbor
else:
return solution
def ls_time(self, solution, rcl_t):
# This function seeks to find a better time to fill bins
# Start by finding the dynamic residual matrix for the cooling rack
neighbor = deepcopy(solution)
tfill = neighbor.gettfill()
i_tlowtohigh = list(np.argsort(tfill[:neighbor.openbins], axis=0))
for i in i_tlowtohigh:
neighbor, rcl_t = self.find_new_tfilli(i, neighbor, rcl_t)
# Check if modified solution is nondominated
neighbor = self.checkandfit(neighbor)
winner = self.test_domination(solution, neighbor)
return winner
def find_new_tfilli(self, i, solution, rcl_t):
# This function determines a new time for box i to be filled and updates
# the RCLTime instance
vlrep = solution.getvlrep()
tfill = solution.gettfill()
told = tfill[i]
tmin = self.get_box_tmin(vlrep[i])
kwargs = {'mode': 'hload', 'nmove': len(vlrep[i]), 'told': told}
t, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t:
solution.edit_tfilli(i, t)
# Adapt Greedy Function
rcl_t.adapt_changetime(told, t, len(vlrep[i]))
return solution, rcl_t
def get_feasible_tfilli(self, rcl_t, tmin, **kwargs):
# This function locates a new value for tfill[i] that doesn't violate
# rack or fill limits
# Find new time for box i
t_new, p_t, rcl_t = self.find_new_time_value(rcl_t, tmin, **kwargs)
if not t_new:
return None, rcl_t
kappa = 0 # Counter to exit loop
# Check if possible to fill in period
while rcl_t.res_fill[p_t] < 1:
if kappa == 10:
return None, rcl_t
# If not possible, find new time value
t_new, p_t, rcl_t = self.find_new_time_value(rcl_t, tmin, **kwargs)
if not t_new:
return None, rcl_t
kappa += 1
# If returning t_new to open bin, reduce fill capacity by 1
rcl_t.res_fill[p_t] -= 1
return t_new, rcl_t
def get_box_tmin(self, vlrepi):
# Find minimum time for box i
boxi_contents = {k: v for k, v in self.cookies.items() if k in vlrepi}
maxbatch = max(boxi_contents.values(), key=attrgetter('batch')).batch
tmin = maxbatch * 600
return tmin
def find_new_time_value(self, rcl_t, tmin, **kwargs):
# This module retrieves a new time value and also returns which period
# it belongs to
t_new = rcl_t.get_new_t(tmin, **kwargs)
if not t_new:
return None, None, rcl_t
t_p = self.find_t_in_fill_periods(t_new, rcl_t)
return t_new, t_p, rcl_t
def find_t_in_fill_periods(self, t, rcl_t):
# If the new time value is beyond the current fill periods, extend
while t > rcl_t.t_t[-1]:
rcl_t.extend_fill_periods()
# Find the period containing t_new
tlist = np.where(t >= np.array(rcl_t.t_t))[0]
return tlist[-1]
def ls1(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the first objective:
# minimizing the number of bins in use
k = 0
neighbors = []
searchfrom = solution
while k < numls:
coolneighbor, rcl_t = self.ls1_loading(searchfrom)
if coolneighbor:
k += 1
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls2(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the second objective:
# minimizing the weighted average initial heat in a box
# p - current id number for new solution
# numls - number of neighbors to find during local search
# Returns updated p and list of neighbors
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.ls2_loading(k, searchfrom)
if coolneighbor:
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls3(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the third objective:
# minimizing the maximum time to move to store front.
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.ls3_loading(k, searchfrom)
if coolneighbor:
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls1_loading(self, searchfrom):
# This function attempts to empty the least filled bin and move its
# cookies into available boxes.
u = searchfrom.getfits()
vlrep = searchfrom.getvlrep()
r, rcl_t = self.getresiduals(vlrep, searchfrom.gettfill())
copy = deepcopy(searchfrom)
half = len(vlrep) // 2
for iloop in range(half):
# Find the emptiest bin's index number
lengths = [len(i) for i in copy.getvlrep()]
i = np.argmin(np.array(lengths))
copy, r, rcl_t = self.empty_bin(i, copy, r, rcl_t)
# If a nondominated solution wasn't found, return nothing
copy = self.checkandfit(copy)
v = copy.getfits()
if not dom2(u, v):
return copy, rcl_t
return None, rcl_t
def empty_bin(self, i, copy, r, rcl_t):
# This function moves items in box i to other boxes
for j in list(copy.getvlrep()[i]):
# Find rcl_bins
tfill = copy.gettfill()
rcl_bins = self.ls1_makercl(i, j, r, rcl_t, tfill)
if len(rcl_bins) == 0:
return copy, r, rcl_t
# Pick random bin
inew = random.choice(rcl_bins)
# Move cookie to new bin
copy.moveitem(i, j, inew)
r = self.update_spaceresiduals(r, i, inew)
r[i, 1], r[inew, 1] = rcl_t.adapt_movebins(tfill[i], tfill[inew])
return copy, r, rcl_t
def ls1_makercl(self, iold, j, r, rcl_t, tfill):
# This function returns the restricted candidate list for cookie
# j to move into based on the dot product strategy
# Set weights for the dot product array (1/boxcap, 1/coolrackcap)
weights = [1.0 / self.moop.boxcap, 1.0 / self.moop.coolrack]
# The cookie should not move into a box that is filled until after
# it is done baking
tmin = self.cookies.get(j).getbatch() * 600
tmax = rcl_t.get_tmax(tmin, 1)
options_byt = [i for i in range(self.n) if tfill[i] > tmin]
if tfill[iold] != tmin:
options_byt.remove(iold)
# Form dot product array
dparray = np.zeros(self.n)
for i in options_byt:
if tfill[i] <= tmax:
# Make sure there is space available
if r[i, 0] > 1:
tk = rcl_t.find_t_in_timeline(tfill[i])
# Filling early will reduce onrack for all after time[tk]
onrack = np.subtract(self.moop.coolrack, rcl_t.space[tk:])
maxonrack_fromtk = max(onrack)
dparray[i] = weights[0] * r[i, 0] + weights[1] * maxonrack_fromtk
# Max fill
if len(np.nonzero(dparray)[0]) > self.beta:
options = list(np.argsort(-dparray)[:self.beta])
return options
else:
options = list(np.nonzero(dparray)[0])
return options
def ls2_loading(self, k, searchfrom):
# This function finds the restricted candidate list and tries to move
# cookies toward more favorable configurations to minimize the weighted avg
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
hotbins = np.argsort(searchfrom.getq0bins())
for s in range(searchfrom.openbins):
i = hotbins[-s - 1]
vlrep = copy.getvlrep()
# If there is only one item in the box, no point in moving
if len(vlrep[i]) < 2:
return k, None, rcl_t
rcl_j = self.ls2_makercl(i, vlrep)
k, newsol, rcl_t = self.search_rclj(k, i, copy, u, r, rcl_j, rcl_t)
if newsol:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def ls2_makercl(self, i, vlrep):
# This function returns the restricted candidate list for local search 2
# Restricted candidate list
binkeys = list(vlrep[i])
avglen = averageLen(vlrep)
nrcl_min = min(len(binkeys) - 1, self.beta)
nrcl = max(len(binkeys) - avglen, nrcl_min)
rcl_j = random.sample(binkeys, nrcl)
return rcl_j
def ls3_loading(self, k, searchfrom):
# This function finds the restricted candidate list for bin i and tries to
# move cookies to find a new nondominated solution. If unsuccessful, moves
# to a new bin
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
latebins = np.argsort(searchfrom.gettavail(), axis=0)
for s in range(searchfrom.openbins):
i = latebins[-s - 1]
vlrep = copy.getvlrep()
# If there is only one item in the box, no point in moving
if len(vlrep[i]) < 2:
return k, None, rcl_t
# Restricted candidate list
rcl_j = self.ls3_makercl(i, vlrep)
k, newsol, rcl_t = self.search_rclj(k, i, copy, u, r, rcl_j, rcl_t)
if newsol:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def ls3_makercl(self, i, vlrep):
# This function returns the restricted candidate list for local search 3
# Restricted candidate list
binkeys = list(vlrep[i])
n_rclj = int(0.5 * len(binkeys))
rcl_j = binkeys[-n_rclj - 1: -1]
return rcl_j
def search_rclj(self, k, i, solution, u, r, rcl_j, rcl_t):
# This function moves cookies into new boxes until either it finds a new
# nondominated solution or it runs out of candidates from this solution
for m in range(len(rcl_j)):
k += 1
j = random.choice(rcl_j)
rcl_j.remove(j)
r, rcl_t, solution = self.lsmove(i, j, r, rcl_t, solution)
# Check if modified solution is nondominated
solution = self.checkandfit(solution)
v = solution.getfits()
if not dom2(u, v):
return k, solution, rcl_t
return k, None, rcl_t
def lsmove(self, i, j, r, rcl_t, solution):
# This function determines where cookie j should move to
m = solution.getopenbins()
tfill = solution.gettfill()
# Gather bin options and pick new bin for the move
ilist = self.move_options(j, m, r, rcl_t, tfill)
inew = random.choice(ilist)
# Open a new bin or move cookie to a new bin
if inew == m:
tmin = self.get_box_tmin([j])
kwargs = {'mode': 'hload'}
t, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t:
solution.opennewbin(i, j, round(t, 1))
r[inew, 0] = self.moop.boxcap
r[inew, 1] = rcl_t.adapt_greedy_function_newbin(t)
else:
return r, rcl_t, solution
else:
solution.moveitem(i, j, inew)
r[i, 1], r[inew, 1] = rcl_t.adapt_movebins(tfill[i], tfill[inew])
r = self.update_spaceresiduals(r, i, inew)
return r, rcl_t, solution
def move_options(self, j, m, r, rcl_t, tfill):
# This function retrieves a candidate list for moving a cookie.
bcookiej = self.cookies.get(j).getbatch() # cookie batch number
tmax = rcl_t.get_tmax(bcookiej * 600, 1)
i_rlowtohigh = np.argsort(r[:m, 0], axis=0)
# This module performs the sorting for module ll.
for i in range(m):
# Find open bin with max. residual value, moving backward thru i_rlowtohigh
lsi = i_rlowtohigh[-1 - i]
if tfill[lsi] <= tmax:
pack = packable(r[lsi, :], bcookiej, tfill[lsi])
if pack:
return [m, lsi]
# If least loaded bin won't fit item, need to open new bin.
return [m]
def bin_mutation(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the first objective:
# minimizing the number of bins.
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.select_mutation_operation(k, searchfrom)
if coolneighbor:
coolneighbor.updateid(p)
coolneighbor = self.ls_time(coolneighbor, rcl_t)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def select_mutation_operation(self, k, searchfrom):
# This function selects the mutation operator
vlrep = searchfrom.getvlrep()
avg_bin_size = averageLen(vlrep)
too_small_lengths = [i for i in vlrep if 2 * len(i) <= avg_bin_size]
if too_small_lengths:
k, coolneighbor, rcl_t = self.move_cookies(k, searchfrom)
else:
rannum = random.random()
if rannum < 0.50:
k, coolneighbor, rcl_t = self.part_swap(k, searchfrom)
else:
k, coolneighbor, rcl_t = self.cookie_swap(k, searchfrom)
return k, coolneighbor, rcl_t
def time_mutation_by_heat(self, solution, rcl_t):
# This function tries a new time value for the initial hottest bin to
# see if that helps
tfill = solution.gettfill()
q0_bybin = solution.getq0bins()[:solution.getopenbins()]
i_hot_list = np.argsort(q0_bybin)
i_hot = i_hot_list[-1]
told = tfill[i_hot]
kwargs = {'mode': 'hload', 'nmove': len(solution.vlrep[i_hot])}
t_new, rcl_t = self.get_feasible_tfilli(rcl_t, told - 5.0, **kwargs)
if t_new:
neighbor = deepcopy(solution)
neighbor.edit_tfilli(i_hot, t_new)
# Adapt Greedy Function
rcl_t.adapt_changetime(told, t_new, len(neighbor.vlrep[i_hot]))
# Check if modified solution is nondominated
neighbor = self.checkandfit(neighbor)
solution = self.test_domination(solution, neighbor)
return solution
def split_bin(self, solution, rcl_t):
# This function splits the highest capacity bin into two boxes.
vlrep = solution.getvlrep()
i = self.getmaxbin(vlrep)
# Get random place to split bin
jsplit = random.randrange(1, len(vlrep[i]))
newbin = list(vlrep[i][jsplit:])
# Open new bin with feasible time value
tmin = self.get_box_tmin(newbin)
kwargs = {'mode': 'hload', 'nmove': len(newbin)}
t_new, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t_new:
tfill = solution.gettfill()
solution.opennewbin(i, newbin[0], round(t_new, 1))
inew = solution.getopenbins() - 1
rcl_t.adapt_greedy_function_newbin(t_new, add=0)
rcl_t.adapt_movebins(tfill[i], t_new)
if len(newbin) > 1:
for j in newbin[1:]:
solution.moveitem(i, j, inew)
rcl_t.adapt_movebins(tfill[i], tfill[inew])
return solution, rcl_t
def cookie_swap(self, k, searchfrom):
# This function selects two random bins and tries to swap cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['random', 'moveheat', 'movelate'])
i1, i2 = self.select_two_bins(copy, mode)
if not i2:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_cookie_swap(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_cookie_swap(self, solution, rcl_t, i1, i2, mode):
# This function performs the part swap between box i1 and i2
tfill = solution.gettfill()
vlrep = solution.getvlrep()
# Get cookies to swap
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
if mode == 'moveheat':
j1 = bini1_options[-1]
j2 = bini2_options[0]
else:
j1 = random.choice(bini1_options)
j2 = random.choice(bini2_options)
solution.moveitem(i1, j1, i2)
solution.moveitem(i2, j2, i1)
return solution, rcl_t
def part_swap(self, k, searchfrom):
# This function selects two random bins and tries to swap cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['random', 'moveheat', 'movelate'])
i1, i2 = self.select_two_bins(copy, mode)
if not i2:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_part_swap(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_part_swap(self, solution, rcl_t, i1, i2, mode):
# This function performs the part swap between box i1 and i2
# Get swap points
if mode == 'moveheat':
movetobin2, movetobin1 = self.get_heat_swap_sets(solution, i1, i2)
else:
movetobin2, movetobin1 = self.get_random_swap_sets(solution, i1, i2)
if movetobin2:
kwargs = {'i1': i1, 'movetobin2': movetobin2,
'i2': i2, 'movetobin1': movetobin1}
solution, rcl_t = \
self.make_swap_happen(solution, rcl_t, **kwargs)
else:
solution, rcl_t = self.split_bin(solution, rcl_t)
return solution, rcl_t
def make_swap_happen(self, solution, rcl_t, i1, movetobin2, i2, movetobin1):
# This function swaps a portion of box i1 with box i2
# potentially fix this: adapt rcl_t all at once instead of cookie by cookie
tfill = solution.gettfill()
for j in movetobin2:
solution.moveitem(i1, j, i2)
rcl_t.adapt_movebins(tfill[i1], tfill[i2])
for j in movetobin1:
solution.moveitem(i2, j, i1)
rcl_t.adapt_movebins(tfill[i2], tfill[i1])
return solution, rcl_t
def get_heat_swap_sets(self, solution, i1, i2):
# This function returns sets of cookies meant to reduce overall heat
# between boxes
vlrep = solution.getvlrep()
tfill = solution.gettfill()
# Determine eligible cookies
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
# Pick random swap sets
min_box_fill = min(len(vlrep[i1]), len(vlrep[i2]))
max_swap = min(len(bini1_options), len(bini2_options), min_box_fill - 1)
swap_number = random.randint(1, max_swap)
movetobin2 = bini1_options[-swap_number:]
movetobin1 = bini2_options[:swap_number]
return movetobin2, movetobin1
def get_random_swap_sets(self, solution, i1, i2):
# This function returns a random set of cookies to swap between boxes.
vlrep = solution.getvlrep()
tfill = solution.gettfill()
# Determine eligible cookies
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
# Pick random swap sets
min_box_fill = min(len(vlrep[i1]), len(vlrep[i2]))
max_swap = min(len(bini1_options), len(bini2_options), min_box_fill - 1)
swap_number = random.randint(1, max_swap)
movetobin2 = random.sample(bini1_options, swap_number)
movetobin1 = random.sample(bini2_options, swap_number)
return movetobin2, movetobin1
def getpoints_4swap(self, binitems1, t1, binitems2, t2):
# This function returns two points to perform the swap on
# Retrieve boolean lists
bool1 = self.moop.packatt(binitems1, t2)
bool2 = self.moop.packatt(binitems2, t1)
p1 = self.get_swap_point(bool1)
p2 = self.get_swap_point(bool2)
# If no swap point, return false
if not p1 or not p2:
return None, None
# Check for capacity violations
newbin1 = binitems1[:p1] + binitems2[p2:]
if len(newbin1) > self.moop.boxcap:
p2 = self.get_new_swap_point(binitems1, p1, binitems2, bool2)
newbin2 = binitems2[:p2] + binitems1[p1:]
if len(newbin2) > self.moop.boxcap:
p1 = self.get_new_swap_point(binitems2, p2, binitems1, bool1)
# Return the lists of cookies to be swapped
movetobin2 = list(binitems1[p1:])
movetobin1 = list(binitems2[p2:])
return movetobin2, movetobin1
def get_swap_point(self, booli):
# This function finds a feasible point to swap with another box
# Find starting point for bin i
starti = self.findstartforswap(booli)
if starti == len(booli):
return False
else:
pi = random.randrange(starti, len(booli))
return pi
def get_new_swap_point(self, bin_into, p1, bin_outta, bool_outta):
# This function finds a swap point that won't violate bin_into's capacity
can_accept = self.moop.boxcap - len(bin_into[:p1])
p2 = self.get_swap_point(bool_outta)
kappa = 10
while len(bin_outta[p2:]) > can_accept:
# If can't find point, only swap one item
if kappa == 10:
return len(bin_outta) - 1
p2 = self.get_swap_point(bool_outta)
return p2
def findstartforswap(self, boollist):
# This function returns the index after which all values are True
start = 1
for k in range(len(boollist) - 1, 0, -1):
if boollist[k] is False:
start = k + 1
return start
return start
def move_cookies(self, k, searchfrom):
# This function selects two random bins and tries to move cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['moveheat', 'movelate'])
i1, i2 = self.get_hot_empty_bins(copy, mode)
if i2 == None or len(copy.vlrep[i2]) == self.moop.boxcap:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_cookie_move(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_cookie_move(self, solution, rcl_t, i1, i2, mode):
# This function performs the move of one cookie from box i1 to i2
tfill = solution.gettfill()
vlrep = solution.getvlrep()
# Get cookies to swap
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
empty_space = self.moop.boxcap - len(vlrep[i2])
max_move = min(empty_space, empty_space // 2 + 1, len(bini1_options))
nmove = random.randint(1, max_move)
for k in range(nmove):
j1 = bini1_options[-1 - k]
solution.moveitem(i1, j1, i2)
return solution, rcl_t
def select_two_bins(self, solution, mode):
# This module selects two bins for swap using specified function
vlrep = solution.getvlrep()
tfill = solution.gettfill()
if mode == 'moveheat':
i1, i2 = self.get_hot_cold_bins(vlrep, tfill, solution.getq0bins())
elif mode == 'movelate':
i1, i2 = self.get_hot_cold_bins(vlrep, tfill, solution.gettavail())
else:
# Pick random bins
i1, i2 = self.get_two_random_bins(vlrep, tfill)
return i1, i2
def get_hot_cold_bins(self, vlrep, tfill, characteristic):
# This function returns the indices of the hottest bin and the coldest
# bin that are compatible
m = len(vlrep) # number of open bins
ilist_hot = np.argsort(characteristic[:m])
for kh in range(m):
i_hot = ilist_hot[-1 - kh]
for kc in range(m - kh):
i_cold = ilist_hot[kc]
if i_hot != i_cold:
compatible = self.good_match(vlrep, tfill, i_hot, i_cold)
if compatible:
return i_hot, i_cold
return None, None
def get_hot_empty_bins(self, solution, mode):
# This function returns the indices of the hottest bin compatible with
# the emptiest bin
m = solution.getopenbins()
vlrep = solution.getvlrep()
tfill = solution.gettfill()
i2 = self.getminbin(vlrep)
if mode == 'moveheat':
ilist_hot = np.argsort(solution.getq0bins()[:m])
else:
ilist_hot = np.argsort(solution.gettavail()[:m])
for k in range(m):
i_hot = ilist_hot[-1 - k]
compatible = self.good_match(vlrep, tfill, i_hot, i2,
ignore_length=True)
if compatible:
return i_hot, i2
return None, None
def get_two_random_bins(self, vlrep, tfill):
# This function returns two individual random bins that can swap cookies
bin_pairs = list(combinations(range(len(vlrep)), 2))
for bp in range(len(bin_pairs)):
i1, i2 = random.choice(bin_pairs)
can_swap = self.good_match(vlrep, tfill, i1, i2)
if can_swap:
return i1, i2
return None, None
def good_match(self, vlrep, tfill, i1, i2, ignore_length=False):
# This function returns True if i1 and i2 are a good match for swapping
# and False if they are a bad match
if i1 == i2:
return False
if not ignore_length:
if len(vlrep[i1]) <= 1 or len(vlrep[i2]) <= 1:
return False
list1 = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
if not list1:
return False
list2 = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
if not list2:
return False
# If made it past conditions, return True
return True
def getrandombin(self, vlrep):
# This function returns a random bin with more than one item in it
bins = range(len(vlrep))
bini = random.choice(bins)
while len(vlrep[bini]) <= 1:
bini = random.choice(bins)
return bini
def getrandsecondbin(self, i1, vlrep, tfill):
# This function returns a second random bin that is not
# bin i1 and that items in bin i1 can be moved to
i2 = random.choice(range(len(vlrep)))
kappa = 1
while not self.good_match(vlrep, tfill, i1, i2):
if kappa == len(vlrep):
return None
i2 = random.choice(range(len(vlrep)))
kappa += 1
return i2
def getmaxbin(self, vlrep):
# This function returns the index of the fullest bin.
bincapacity = np.zeros(len(vlrep))
for i in range(len(vlrep)):
bincapacity[i] = len(vlrep[i])
bini = np.argmax(bincapacity)
return bini
def getminbin(self, vlrep):
# This function returns the index of the emptiest bin.
bincapacity = np.zeros(len(vlrep))
for i in range(len(vlrep)):
bincapacity[i] = len(vlrep[i])
minbin = np.argmin(bincapacity)
return minbin
def getresiduals(self, vlrep, tfill):
# This function calculates the residual matrix associated with a given
# dynamic bin packing loading. The first column represents the open box
# capacities, and the second column represents the maximum number of
# cookies that can be added to the cooling rack right before tfill_i
coolrack = self.moop.coolrack
r = np.zeros((self.n, 2), dtype=np.int)
# Set box capacity residuals
for i in range(len(vlrep)):
r[i, 0] = self.moop.boxcap - len(vlrep[i])
r[i, 1] = coolrack
# Set cooling rack capacity residuals
n_b = self.n // self.moop.nbatches
rcl_t = RCLtime(coolrack, self.moop.fillcap, n_b,
self.moop.tbatch, self.moop.nbatches)
r[:len(vlrep), 1] = rcl_t.initialize_withtfill(len(vlrep), vlrep, tfill)
return r, rcl_t
def update_spaceresiduals(self, r, i, inew):
# This function updates the space residual r after a cookie moves
# from box i to box inew
# Update r: box capacity
r[i, 0] += 1
r[inew, 0] -= 1
return r
def check4nondomination(self, u, solution):
# Check if modified solution is nondominated
solution = self.checkandfit(solution)
v = solution.getfits()
if not dom2(u, v):
return True
else:
return False
def countonrack(self, t, solution):
# Cookies from boxes filled after t might be on rack
vlrep = solution.getvlrep()
tfill = solution.gettfill()
timecheckindices = np.where(tfill > t)
nrackitems = 0
for i in timecheckindices[0]:
for j in vlrep[i]:
onrack = self.moop.rackij(t, tfill[i], self.cookies.get(j))
nrackitems += onrack
return nrackitems
def calclowerbound(self):
# This function calculates theoretical lower bound for the number of
# bins. It assumes this is the total number of cookies divided by
# the box capacity.
minbins = ceil(float(self.n) / self.moop.boxcap)
self.lb = int(minbins)
def getub(self):
# Returns the upper bound (bin capacity)
return self.moop.boxcap
def getcookies(self):
# Returns the list of items to pack
return self.cookies
def getlb(self):
# Returns the theoretical lower bound
return self.lb
class NewSolution:
# This class performs the GRASP creation of a new solution.
def __init__(self, beta, n, cookies, moop):
self.beta = beta # Cardinality restriction
self.n = int(n) # Number of cookies to sort
self.cookies = cookies # dictionary of item objects
self.moop = moop # Multiobjective problem class
self.m = 0 # initialize open bins count
self.r = np.zeros((n, 2)) # Residual capacity matrix
self.x = np.zeros((n, n), dtype=np.int)
self.y = np.zeros(n, dtype=np.int)
self.vlrep = []
self.tfill = np.zeros(n, dtype=np.float)
# Initialize restricted candidate list
n_b = self.n // self.moop.nbatches
self.rcl_t = RCLtime(moop.coolrack, moop.fillcap, n_b,
moop.tbatch, moop.nbatches)
def make_newsol(self, index, *args):
# This function takes the solution from generate_newsol and creates
# a CookieSol instance.
# Possible args: a newgenes list containing a chromosome representation
# and a suggested tfill.
if args:
self.generate_newsol_from_chromosome(args[0], args[1])
else:
self.generate_newsol()
newsol = solmaker.CookieSol(index, self.x, self.y, self.vlrep, self.tfill)
return newsol
def generate_newsol(self):
# This function generates a new solution from scratch using GRASP
modes = ['ss', 'hload'] # Modes for retrieving new tfill time
self.initialize_greedy_tfill()
self.open_new_bin(0, 0)
# Set strategy for the loading
theta_i = random.random()
for j in range(1, self.n):
rcl_i = self.get_rcl_bins(theta_i, j)
i = random.choice(rcl_i)
if self.y[i] == 0:
self.tfill[i] = self.get_feasible_tfilli(j, modes)
self.open_new_bin(i, j)
else:
self.vlrep[i].append(j)
self.r[i, 0] -= 1
self.rcl_t.adapt_greedy_function_addtobin(self.tfill[i])
self.r[:self.m, 1] = \
self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
self.constructx()
def generate_newsol_from_chromosome(self, chrom, tfill_suggested):
# This function generates a new solution based on a given chromosome
modes = ['ss', 'hload'] # Modes for retrieving new tfill time
self.initialize_greedy_tfill(*tfill_suggested)
chrom = self.initialize_first_bin(chrom)
# Set strategy for the loading
theta_i = random.random()
for j in chrom:
rcl_i = self.get_rcl_bins(theta_i, j)
i = random.choice(rcl_i)
if self.y[i] == 0:
self.tfill[i] = self.pick_tfilli(j, modes, tfill_suggested)
self.open_new_bin(i, j)
else:
self.vlrep[i].append(j)
self.r[i, 0] -= 1
self.rcl_t.adapt_greedy_function_addtobin(self.tfill[i])
self.r[:self.m, 1] = \
self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
self.constructx()
def initialize_greedy_tfill(self, *args):
# This function initializes t_fill
# Calculate tfill_0 using inverse cdf and set residual capacity
if args:
# args = tfill_suggested
self.tfill[0] = self.rcl_t.pick_suggested_t(args, self.moop.tbatch)
else:
self.tfill[0] = self.rcl_t.get_new_t(self.moop.tbatch)
def initialize_first_bin(self, chrom):
# This function finds the first cookie in list chrom that can be packed
# at tfill[0] and opens the first bin with that cookie
for j in chrom:
if self.moop.cookiedonebaking(j, self.tfill[0]):
self.open_new_bin(0, j)
chrom.remove(j)
return chrom
print('Error: NewSolution picked a time that cannot be filled.')
def pick_tfilli(self, j, modes, tfill_maybe):
# This module tries to use one of the time values from tfill
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
# If tmin when coolrack is overfull, find least worst solution
tk = self.find_t_in_trange(tmin)
if self.rcl_t.space[tk] <= 0:
t_new = self.rcl_t.find_least_worst_newt(tmin)
return t_new
t_possible = self.get_t_from_oldtfill(tmin, tfill_maybe)
if t_possible:
return t_possible
else:
# If nothing in tfill_maybe worked, return new value:
t_new = self.get_feasible_tfilli(j, modes)
return t_new
def get_t_from_oldtfill(self, tmin, tfill_maybe):
# This function returns a feasible time from tfill_maybe
# First establish tmax based on moving 1 cookie from the rack
tmax = self.rcl_t.get_tmax(tmin, 1)
t_options = np.unique(tfill_maybe)
for i in range(len(t_options)):
if t_options[i] < tmax:
# Avoid reusing a value from tfill_maybe
if t_options[i] not in self.tfill:
if self.rcl_t.time_feasible(t_options[i], tmin):
return t_options[i]
return None
def get_feasible_tfilli(self, j, modes):
# This function locates a new value for tfill[i] that doesn't violate
# rack or fill limits
theta_t = random.randint(0, 1)
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
# Find fill time for box i
t_new, p_t = self.find_new_time_value(tmin, modes[theta_t])
kappa = 0 # Counter to exit loop
# Check if possible to fill in period
while self.rcl_t.res_fill[p_t] < 1:
if kappa == 10:
return None
# If not possible, find new time value
t_new, p_t = self.find_new_time_value(tmin, modes[theta_t])
kappa += 1
return t_new
def find_new_time_value(self, tmin, mode):
# This module retrieves a new time value and also returns which period
# it belongs to
t_new = self.rcl_t.get_new_t(tmin, mode=mode)
t_t = self.find_t_in_fill_periods(t_new)
return t_new, t_t
def find_t_in_fill_periods(self, t):
# If the new time value is beyond the current fill periods, extend
while t > self.rcl_t.t_t[-1]:
self.rcl_t.extend_fill_periods()
# Find the period containing t_new
tlist = np.where(t >= np.array(self.rcl_t.t_t))[0]
return tlist[-1]
def find_t_in_trange(self, t):
# If the new time value is beyond the current timeline, extend
while t > self.rcl_t.trange[-1]:
self.rcl_t.extend_timeline()
tklist = np.where(np.array(self.rcl_t.trange) <= t)[0]
return tklist[-1]
def get_rcl_bins(self, theta_i, j):
# This module selects the strategy based on theta_i and returns
# the corresponding restricted candidate list.
if theta_i < 0.33:
# Least loaded strategy
rcl_i = self.llmove(j)
elif theta_i < 0.66:
# Weighted max strategy
rcl_i = self.wmaxmove(j)
else:
# Combo-t strategy
rcl_i = self.combot_move(j)
# Return either a new bin or the list found above
if not rcl_i:
rcl_i = self.find_alternative_bin(j)
return rcl_i
else:
return rcl_i
def llmove(self, j):
# This module performs the sorting for module ll.
# The goal of this strategy is to balance the loading of the boxes.
rcl_i = []
i_rlowtohigh = np.argsort(self.r[:self.m, 0], axis=0)
# Add new bin as an option if others are starting to get full
if self.r[i_rlowtohigh[-1], 0] <= 0.5 * self.moop.boxcap:
rcl_i.append(self.m)
for k in range(self.m):
# Find open bin with max. residual value, moving backward thru i_rlowtohigh
lli = i_rlowtohigh[- 1 - k]
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[lli, :], bcookiej, self.tfill[lli])
if pack:
rcl_i.append(lli)
if len(rcl_i) == self.beta:
return rcl_i
return rcl_i
def wmaxmove(self, j):
# This module determines the restricted candidate list by the weighted
# max strategy. The goal is to keep the number of boxes to a minimum.
rcl_i = []
# Gather weights: space on rack / maximum space over time
maxval = np.max(self.r[:self.m, 1])
weights = np.zeros(self.m)
for k in range(self.m):
weights[k] = self.r[k, 1] / maxval
# Calculate weighted residuals
wresidual = np.multiply(self.r[:self.m, 0], weights)
i_rlowtohigh = np.argsort(wresidual, axis=0)
for k in range(self.m):
# Find open bin with min. weighted residual value
i = i_rlowtohigh[k]
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[i, :], bcookiej, self.tfill[i])
if pack:
rcl_i.append(i)
if len(rcl_i) == self.beta // 2:
return rcl_i
return rcl_i
def combot_move(self, j):
# This module determines the restricted candidate list by the combo-t
# strategy. The goal is to reduce the maximum time until the boxes
# can be moved to the store front.
n_b = self.n // self.moop.nbatches # Number of cookies per batch
jmax = j - (j % n_b) # Max. cookie no. for heat restriction
rcl_i = []
i_rlowtohigh = np.argsort(self.r[:self.m, 0], axis=0)
# Add new bin as an option after all bins meet a minimum level
if self.r[i_rlowtohigh[-1], 0] <= 0.7 * self.moop.boxcap:
rcl_i.append(self.m)
for k in range(self.m):
# Find open bin with max. residual value
lli = i_rlowtohigh[- 1 - k]
otherbatch = [jo for jo in self.vlrep[lli] if jo < jmax]
# Heat restriction
if (self.r[lli, 0] <= 0.5 * self.moop.boxcap) & \
(len(otherbatch) == 0):
pass
else:
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[lli, :], bcookiej, self.tfill[lli])
if pack:
rcl_i.append(lli)
if len(rcl_i) == self.beta:
return rcl_i
return rcl_i
def open_new_bin(self, i, j):
# This module opens a new bin i with cookie j
self.m += 1
self.y[i] = 1
self.vlrep.insert(i, [j])
self.r[i, 0] = self.moop.boxcap - 1
# Adapt Greedy Function (time)
self.rcl_t.adapt_greedy_function_newbin(self.tfill[i])
t_t = self.find_t_in_fill_periods(self.tfill[i])
self.rcl_t.res_fill[t_t] -= 1
self.r[:self.m, 1] = self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
def find_alternative_bin(self, j):
# If tmin when coolrack is overfull, find least worst solution
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
tk = self.find_t_in_trange(tmin)
if self.rcl_t.space[tk] <= 0:
# Find least-worst alternative
options = [i for i in range(self.m)
if tmin < self.tfill[i] and self.r[i, 0] > 0]
if options:
return options
else:
return [self.m]
else:
return [self.m]
def constructx(self):
# This function transforms the variable length representation into
# the x-matrix
for i in range(self.m):
for j in self.vlrep[i]:
self.x[i, j] = 1
checkformismatch(self.x, self.vlrep)
class RCLtime:
# This class maintains and updates the restricted candidate list for a
# unique t_fill
def __init__(self, coolrack, fillcap, n_b, tbatch, nbatches):
self.coolrack = coolrack # Cooling rack capacity
self.fillcap = fillcap # Fill period limit
self.n_b = n_b # Number of cookies in one batch
self.tbatch = tbatch # Time to cook one batch
self.nbatches = nbatches # Number of batches cooked
# Set the time range, extend one cycle past last pull
self.trange = [(b + 1) * self.tbatch for b in range(self.nbatches + 1)]
# Space on the cooling rack as a function of time
self.space = [self.coolrack - (b + 1) * self.n_b
for b in range(self.nbatches)]
self.space.append(self.space[-1])
# Include restrictions for period fill limits
n_period = 2 * (nbatches - 1) + 2
self.t_t = [self.tbatch * (1.0 + t / 2.0) for t in range(n_period)]
self.res_fill = [fillcap for _ in range(n_period)]
def initialize_withtfill(self, m, vlrep, tfill):
# This function adds the information from vlrep and tfill
# into the trange and space lists
# First fix the cooling rack related items
r2 = np.zeros(m, dtype=np.int) # Collect residual values
i_lowtohigh = list(np.argsort(tfill[:m], axis=0))
for i in i_lowtohigh:
r2[i] = self.adapt_greedy_function_newbin(tfill[i],
add=len(vlrep[i]))
# Then fix the fill period related items
t_latest = np.amax(tfill)
while t_latest > self.t_t[-1]:
self.extend_fill_periods()
for t in range(len(self.t_t) - 1):
p_t = [i for i in range(m)
if self.t_t[t] <= tfill[i] < self.t_t[t + 1]]
self.res_fill[t] -= len(p_t)
return r2
def pick_suggested_t(self, t_maybe, tmin):
# This function returns a possible starting t-value, first by trying
# the suggested t values in t_maybe, and then by finding a feasible one
for i in range(len(t_maybe)):
if t_maybe[i] < self.trange[-1]:
if self.time_feasible(t_maybe[i], tmin):
return t_maybe[i]
t_new = self.get_new_t(tmin)
return t_new
def time_feasible(self, t, tmin):
# This function checks if time t is feasible to open a new bin
if t < tmin:
return False
while self.trange[-1] < t:
self.extend_timeline()
tk = self.find_t_in_timeline(t)
# To be feasible, the cooling rack cannot be overcrowded
if self.space[tk] > 0:
return self.time_period_feasible(t)
# If overcrowded, return False
return False
def time_period_feasible(self, t):
# This module determines if time value t is valid within period fill
# limit constraints.
if t < self.t_t[0]:
return False
ttlist = np.where(np.array(self.t_t) <= t)[0]
# The number of boxes filled during the period < limit
if self.res_fill[ttlist[-1]] > 0:
return True
else:
return False
def get_new_t(self, tmin, mode='ss', nmove=1, told=None):
# This function returns a random time on the cumulative
# distribution function of space(trange) greater than tmin
t = 0
tmax = self.get_tmax(tmin, nmove)
dist = self.retrieve_pdensityfunction(mode)
c_min = dist.cdf(tmin)
c_max = dist.cdf(tmax)
if c_min == c_max:
return None
k = 0
while round(t) <= tmin or round(t) >= tmax:
rannum = random.uniform(c_min, c_max)
t = dist.ppf(rannum)
k += 1
if k == 10:
return None
return round(t)
def retrieve_pdensityfunction(self, mode):
# This function returns the needed pdf
if mode == 'hload':
dist = PiecewiseLinearPDF(self.trange, self.space)
else:
dist = PiecewisePDF(self.trange, self.space)
return dist
def find_least_worst_newt(self, tmin):
# This function returns the least worst time for a box to be opened
# based on tmin.
tklist = np.where(np.array(self.trange) >= tmin)[0]
max_space = self.space[tklist[0]]
tmax = self.get_tmax(tmin, max_space)
t_new = random.uniform(tmin + 1, tmax)
kappa = 0
while not self.time_period_feasible(t_new):
if kappa == 10:
return tmin + 1.0
t_new = random.uniform(tmin + 1, tmax)
kappa += 1
return round(t_new)
def get_tmax(self, tmin, nmove):
# This function determines if the get_new_t function needs to limit its
# search to a max. value. If not, it returns the last trange value.
tklist = np.where(np.array(self.trange) > tmin)[0]
for tk in tklist:
if self.space[tk] - nmove <= 0:
return self.trange[tk]
# If did not find t_max, and enough space at end of timeline, extend
if self.space[-1] >= nmove:
self.extend_timeline()
return self.trange[-1]
def adapt_greedy_function_newbin(self, t, add=1):
# This function updates the space and trange lists after a new bin is
# opened, add is the space being opened by # of cookies being removed
# If t is larger than the range, add it on to the end
if t > self.trange[-1]:
self.trange.append(t)
self.space.append(self.space[-1])
self.update_space(-1, add=add)
return self.space[-1]
# If the new t is the same as the last t in trange, extend it by some
elif t == self.trange[-1]:
self.update_space(-1, add=add)
self.extend_timeline()
return self.space[-2]
else:
ilist = np.where(np.array(self.trange) >= t)[0]
if t == self.trange[ilist[0]]:
start = ilist[0]
else:
self.trange.insert(ilist[0], t)
self.space.insert(ilist[0], self.space[ilist[0] - 1] + add)
start = ilist[0] + 1
for tk in range(start, len(self.space)):
self.update_space(tk, add=add)
return self.space[ilist[0]]
def adapt_greedy_function_addtobin(self, t):
# This function updates the space and trange lists after a cookie is
# added to a box and removed from the cooling rack at time t
tklist = np.where(np.array(self.trange) >= t)[0]
for tk in tklist:
self.update_space(tk)
return self.space[tklist[0]]
def adapt_movebins(self, t1, t2):
# This function updates the space list after a cookie is moved from
# the box filled at t1 to the one filled at t2
tklist1 = np.where(np.array(self.trange) >= t1)[0]
tklist2 = np.where(np.array(self.trange) >= t2)[0]
tklist = np.setxor1d(tklist1, tklist2)
if t1 == t2:
return self.space[tklist1[0]], self.space[tklist1[0]]
elif t1 < t2:
for tk in tklist:
self.update_space(tk, add=-1)
else:
for tk in tklist:
self.update_space(tk)
return self.space[tklist1[0]], self.space[tklist2[0]]
def adapt_changetime(self, told, tnew, nmove):
# This function updates the trange and space lists to account for a bin
# being filled at tnew instead of told.
# nmove is the size of the box being changed
while tnew > self.trange[-1]:
self.extend_timeline()
tklist1 = np.where(np.array(self.trange) >= told)[0]
tklist2 = np.where(np.array(self.trange) >= tnew)[0]
tklist = np.setxor1d(tklist1, tklist2)
if told < tnew:
for tk in tklist:
self.update_space(tk, add=-nmove)
else:
for tk in tklist:
self.update_space(tk, add=nmove)
self.trange.insert(tklist2[0], tnew)
self.space.insert(tklist2[0], self.space[tklist2[0] - 1] + nmove)
return self.space
def update_space(self, tk, add=1):
# This function updates the space list at time tk, assuming one cookie
# was removed from the cooling rack
self.space[tk] += add
if self.space[tk] > self.coolrack:
self.space[tk] = self.coolrack
def retrieve_space_by_tfill(self, m, tfill):
# This function returns the space residuals matching tfill
r2 = np.zeros(m, dtype=np.int) # Collect residual values
for i in range(m):
ilist = np.where(np.array(self.trange) == tfill[i])[0]
r2[i] = self.space[ilist[0]]
return r2
def find_t_in_timeline(self, t):
tklist = np.where(np.array(self.trange) > t)[0]
tk = tklist[0] - 1
return tk
def extend_timeline(self):
# This function extends trange by one batch time period.
new_tlast = self.trange[-1] + 0.5 * self.tbatch
self.trange.append(new_tlast)
self.space.append(self.space[-1])
def extend_fill_periods(self):
# This function extends t_t by one period
self.t_t.append(self.t_t[-1] + 0.5 * self.tbatch)
self.res_fill.append(self.fillcap)
class PiecewisePDF:
# This class defines a piecewise function along with its pdf and cdf
def __init__(self, trange, space):
self.tchunk = np.ediff1d(trange)
space_array = np.array(space)
for tk in range(len(space_array)):
if space_array[tk] < 0.0:
space_array[tk] = 0.0
area_chunks = np.multiply(self.tchunk, space_array[:-1])
area_total = np.sum(area_chunks)
self.tk = np.array(trange) # time range for distribution
self.pk = space_array / float(area_total) # probability at tk
self.ck = np.cumsum(np.multiply(self.pk[:-1], self.tchunk)) # cumulative probability
self.ck = np.insert(self.ck, 0, 0.0)
def pdf(self, t):
# This function returns the probability at time t
if t < self.tk[0]:
return 0.0
listi = np.where(t < self.tk)
probt = self.pk[listi[0][0] - 1]
return probt
def cdf(self, t):
# This function returns the cumulative probability of quantile t
if t < self.tk[0]:
return 0.0
i = np.where(t == self.tk)[0]
if any(i):
return self.ck[i[0]]
else:
ilist = np.where(t < self.tk)[0]
i1 = ilist[0] - 1
i2 = ilist[0]
slope = (self.ck[i2] - self.ck[i1]) / (self.tk[i2] - self.tk[i1])
p_c = slope * (t - self.tk[i1]) + self.ck[i1]
return p_c
def ppf(self, p):
# This function returns the time associated with percentile p
# This is the inverse cumulative distribution function.
i = np.where(p == self.ck)[0]
if any(i):
return self.tk[i[0]]
else:
ilist = np.where(p < self.ck)[0]
# Linear function: t = (t_high - t_low)/(c_high - c_low)* (p - c_low) + t_low
i1 = ilist[0] - 1
i2 = ilist[0]
slope = (self.tk[i2] - self.tk[i1]) / (self.ck[i2] - self.ck[i1])
return slope * (p - self.ck[i1]) + self.tk[i1]
class PiecewiseLinearPDF:
# This class defines a piecewise function along with its pdf and cdf, with a
# linear increase in probability over each given time range
def __init__(self, trange, space):
self.tk = np.array(trange) # time range for distribution
self.space_array = np.array(space) # space available in each time range
for tk in range(len(self.space_array)):
if self.space_array[tk] < 0.0:
self.space_array[tk] = 0.0
self.tchunk = np.ediff1d(trange) # differences between time values
area_chunks = np.multiply(self.tchunk, self.space_array[:-1])
self.area_total = float(np.sum(area_chunks)) # total area under the space(t) curve
self.ck = np.cumsum(np.divide(area_chunks, self.area_total)) # cumulative probability
self.ck = np.insert(self.ck, 0, 0.0)
def pdf(self, t):
# This function returns the probability at time t
if t < self.tk[0]:
return 0.0
listi = np.where(t < self.tk)[0]
k = listi[0] - 1
# Linear function: probt = [(2 * space(tk) - 0) / (tk+1 - tk) * (t - tk)] / totalarea
slope = 2 * (self.space_array[k]/self.area_total)/self.tchunk[k]
probt = slope * (t - self.tk[k])
return probt
def cdf(self, t):
# This function returns the cumulative probability of quantile t
if t < self.tk[0]:
return 0.0
i = np.where(t == self.tk)[0]
if any(i):
return self.ck[i[0]]
else:
ilist = np.where(t < self.tk)[0]
k = ilist[0] - 1 # index for lower boundary of chunk
slope = 2 * (self.space_array[k] / self.area_total) / self.tchunk[k]
p_c = slope * (t - self.tk[k]) ** 2 / 2 + self.ck[k]
return p_c
def ppf(self, p):
# This function returns the time associated with percentile p
# This is the inverse cumulative distribution function.
i = np.where(p == self.ck)[0]
if any(i):
return self.tk[i[0]]
else:
ilist = np.where(p < self.ck)[0]
# Quad function: t = sqrt(2*(p-c_low)/slope) + t_low
k = ilist[0] - 1
slope = 2 * (self.space_array[k]/self.area_total)/self.tchunk[k]
x = sqrt(2 * (p - self.ck[k]) / slope)
return x + self.tk[k]
def dom2(u, v):
# Determines if fitness vector u dominates fitness vector v
# This function assumes a minimization problem.
# For u to dominate v, every fitness value must be either
# equal to or less than the value in v AND one fitness value
# must be less than the one in v
equaltest = np.allclose(u, v)
if equaltest is True:
# If u == v then nondominated
return False
# less_equal returns boolean for each element u[i] <= v[i]
domtest = np.less_equal(u, v)
return np.all(domtest)
def packable(ri, batch, tfilli):
# This module checks to see if cookie j can fit inside bin i at time tfilli
# Capacity constraints
r1 = ri[0] - 1
r2 = ri[1] - 1
# Time constraint: tbatch = 10 min = 600 s
t_cook = batch * 600
return r1 >= 0 and r2 >= 0 and t_cook < tfilli
def checkformismatch(x, vlrep, out=sys.stdout):
# This function identifies if the given solution does not have an x-matrix
# and a variable length representation that match.
for i in range(len(vlrep)):
for j in vlrep[i]:
if x[i, j] != 1:
out.write('Error: NewSolution is not coordinated on item', j)
def averageLen(lst):
# Calculates the average length of lists inside a list, returns integer value
lengths = [len(i) for i in lst]
return 0 if len(lengths) == 0 else (int(sum(lengths) / len(lengths)))
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
# This function determines if value a and value b are about equal
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
if __name__ == '__main__':
print('grasp.py needs to be combined with coolcookies.py') | [
"numpy.less_equal",
"math.sqrt",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"numpy.divide",
"numpy.multiply",
"numpy.where",
"numpy.subtract",
"numpy.max",
"random.random",
"numpy.setxor1d",
"numpy.argmin",
"random.randint",
"operator.attrgetter",
"random.sample",
"numpy.allclo... | [((62326, 62343), 'numpy.allclose', 'np.allclose', (['u', 'v'], {}), '(u, v)\n', (62337, 62343), True, 'import numpy as np\n'), ((62506, 62525), 'numpy.less_equal', 'np.less_equal', (['u', 'v'], {}), '(u, v)\n', (62519, 62525), True, 'import numpy as np\n'), ((62537, 62552), 'numpy.all', 'np.all', (['domtest'], {}), '(domtest)\n', (62543, 62552), True, 'import numpy as np\n'), ((1283, 1298), 'random.random', 'random.random', ([], {}), '()\n', (1296, 1298), False, 'import random\n'), ((2629, 2647), 'copy.deepcopy', 'deepcopy', (['solution'], {}), '(solution)\n', (2637, 2647), False, 'from copy import deepcopy\n'), ((7982, 8002), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (7990, 8002), False, 'from copy import deepcopy\n'), ((9889, 9905), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (9897, 9905), True, 'import numpy as np\n'), ((10995, 11015), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (11003, 11015), False, 'from copy import deepcopy\n'), ((11954, 11982), 'random.sample', 'random.sample', (['binkeys', 'nrcl'], {}), '(binkeys, nrcl)\n', (11967, 11982), False, 'import random\n'), ((12367, 12387), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (12375, 12387), False, 'from copy import deepcopy\n'), ((14301, 14321), 'random.choice', 'random.choice', (['ilist'], {}), '(ilist)\n', (14314, 14321), False, 'import random\n'), ((15284, 15312), 'numpy.argsort', 'np.argsort', (['r[:m, 0]'], {'axis': '(0)'}), '(r[:m, 0], axis=0)\n', (15294, 15312), True, 'import numpy as np\n'), ((17369, 17389), 'numpy.argsort', 'np.argsort', (['q0_bybin'], {}), '(q0_bybin)\n', (17379, 17389), True, 'import numpy as np\n'), ((19364, 19384), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (19372, 19384), False, 'from copy import deepcopy\n'), ((21250, 21270), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (21258, 21270), False, 'from copy import deepcopy\n'), ((23970, 23997), 'random.randint', 'random.randint', (['(1)', 'max_swap'], {}), '(1, max_swap)\n', (23984, 23997), False, 'import random\n'), ((24846, 24873), 'random.randint', 'random.randint', (['(1)', 'max_swap'], {}), '(1, max_swap)\n', (24860, 24873), False, 'import random\n'), ((24895, 24936), 'random.sample', 'random.sample', (['bini1_options', 'swap_number'], {}), '(bini1_options, swap_number)\n', (24908, 24936), False, 'import random\n'), ((24958, 24999), 'random.sample', 'random.sample', (['bini2_options', 'swap_number'], {}), '(bini2_options, swap_number)\n', (24971, 24999), False, 'import random\n'), ((27515, 27535), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (27523, 27535), False, 'from copy import deepcopy\n'), ((28823, 28850), 'random.randint', 'random.randint', (['(1)', 'max_move'], {}), '(1, max_move)\n', (28837, 28850), False, 'import random\n'), ((29793, 29823), 'numpy.argsort', 'np.argsort', (['characteristic[:m]'], {}), '(characteristic[:m])\n', (29803, 29823), True, 'import numpy as np\n'), ((32275, 32294), 'random.choice', 'random.choice', (['bins'], {}), '(bins)\n', (32288, 32294), False, 'import random\n'), ((33072, 33094), 'numpy.argmax', 'np.argmax', (['bincapacity'], {}), '(bincapacity)\n', (33081, 33094), True, 'import numpy as np\n'), ((33350, 33372), 'numpy.argmin', 'np.argmin', (['bincapacity'], {}), '(bincapacity)\n', (33359, 33372), True, 'import numpy as np\n'), ((33801, 33836), 'numpy.zeros', 'np.zeros', (['(self.n, 2)'], {'dtype': 'np.int'}), '((self.n, 2), dtype=np.int)\n', (33809, 33836), True, 'import numpy as np\n'), ((35032, 35051), 'numpy.where', 'np.where', (['(tfill > t)'], {}), '(tfill > t)\n', (35040, 35051), True, 'import numpy as np\n'), ((36347, 36363), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (36355, 36363), True, 'import numpy as np\n'), ((36410, 36440), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'np.int'}), '((n, n), dtype=np.int)\n', (36418, 36440), True, 'import numpy as np\n'), ((36458, 36483), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int'}), '(n, dtype=np.int)\n', (36466, 36483), True, 'import numpy as np\n'), ((36529, 36556), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.float'}), '(n, dtype=np.float)\n', (36537, 36556), True, 'import numpy as np\n'), ((37180, 37245), 'solutions_dynamic.CookieSol', 'solmaker.CookieSol', (['index', 'self.x', 'self.y', 'self.vlrep', 'self.tfill'], {}), '(index, self.x, self.y, self.vlrep, self.tfill)\n', (37198, 37245), True, 'import solutions_dynamic as solmaker\n'), ((37576, 37591), 'random.random', 'random.random', ([], {}), '()\n', (37589, 37591), False, 'import random\n'), ((38538, 38553), 'random.random', 'random.random', ([], {}), '()\n', (38551, 38553), False, 'import random\n'), ((40891, 40913), 'numpy.unique', 'np.unique', (['tfill_maybe'], {}), '(tfill_maybe)\n', (40900, 40913), True, 'import numpy as np\n'), ((41403, 41423), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (41417, 41423), False, 'import random\n'), ((43709, 43747), 'numpy.argsort', 'np.argsort', (['self.r[:self.m, 0]'], {'axis': '(0)'}), '(self.r[:self.m, 0], axis=0)\n', (43719, 43747), True, 'import numpy as np\n'), ((44634, 44660), 'numpy.max', 'np.max', (['self.r[:self.m, 1]'], {}), '(self.r[:self.m, 1])\n', (44640, 44660), True, 'import numpy as np\n'), ((44679, 44695), 'numpy.zeros', 'np.zeros', (['self.m'], {}), '(self.m)\n', (44687, 44695), True, 'import numpy as np\n'), ((44834, 44874), 'numpy.multiply', 'np.multiply', (['self.r[:self.m, 0]', 'weights'], {}), '(self.r[:self.m, 0], weights)\n', (44845, 44874), True, 'import numpy as np\n'), ((44898, 44927), 'numpy.argsort', 'np.argsort', (['wresidual'], {'axis': '(0)'}), '(wresidual, axis=0)\n', (44908, 44927), True, 'import numpy as np\n'), ((45749, 45787), 'numpy.argsort', 'np.argsort', (['self.r[:self.m, 0]'], {'axis': '(0)'}), '(self.r[:self.m, 0], axis=0)\n', (45759, 45787), True, 'import numpy as np\n'), ((49306, 49331), 'numpy.zeros', 'np.zeros', (['m'], {'dtype': 'np.int'}), '(m, dtype=np.int)\n', (49314, 49331), True, 'import numpy as np\n'), ((49656, 49670), 'numpy.amax', 'np.amax', (['tfill'], {}), '(tfill)\n', (49663, 49670), True, 'import numpy as np\n'), ((52532, 52562), 'random.uniform', 'random.uniform', (['(tmin + 1)', 'tmax'], {}), '(tmin + 1, tmax)\n', (52546, 52562), False, 'import random\n'), ((55149, 55178), 'numpy.setxor1d', 'np.setxor1d', (['tklist1', 'tklist2'], {}), '(tklist1, tklist2)\n', (55160, 55178), True, 'import numpy as np\n'), ((55953, 55982), 'numpy.setxor1d', 'np.setxor1d', (['tklist1', 'tklist2'], {}), '(tklist1, tklist2)\n', (55964, 55982), True, 'import numpy as np\n'), ((56734, 56759), 'numpy.zeros', 'np.zeros', (['m'], {'dtype': 'np.int'}), '(m, dtype=np.int)\n', (56742, 56759), True, 'import numpy as np\n'), ((57655, 57673), 'numpy.ediff1d', 'np.ediff1d', (['trange'], {}), '(trange)\n', (57665, 57673), True, 'import numpy as np\n'), ((57696, 57711), 'numpy.array', 'np.array', (['space'], {}), '(space)\n', (57704, 57711), True, 'import numpy as np\n'), ((57853, 57895), 'numpy.multiply', 'np.multiply', (['self.tchunk', 'space_array[:-1]'], {}), '(self.tchunk, space_array[:-1])\n', (57864, 57895), True, 'import numpy as np\n'), ((57917, 57936), 'numpy.sum', 'np.sum', (['area_chunks'], {}), '(area_chunks)\n', (57923, 57936), True, 'import numpy as np\n'), ((57955, 57971), 'numpy.array', 'np.array', (['trange'], {}), '(trange)\n', (57963, 57971), True, 'import numpy as np\n'), ((58237, 58263), 'numpy.insert', 'np.insert', (['self.ck', '(0)', '(0.0)'], {}), '(self.ck, 0, 0.0)\n', (58246, 58263), True, 'import numpy as np\n'), ((58411, 58432), 'numpy.where', 'np.where', (['(t < self.tk)'], {}), '(t < self.tk)\n', (58419, 58432), True, 'import numpy as np\n'), ((59824, 59840), 'numpy.array', 'np.array', (['trange'], {}), '(trange)\n', (59832, 59840), True, 'import numpy as np\n'), ((59911, 59926), 'numpy.array', 'np.array', (['space'], {}), '(space)\n', (59919, 59926), True, 'import numpy as np\n'), ((60125, 60143), 'numpy.ediff1d', 'np.ediff1d', (['trange'], {}), '(trange)\n', (60135, 60143), True, 'import numpy as np\n'), ((60207, 60254), 'numpy.multiply', 'np.multiply', (['self.tchunk', 'self.space_array[:-1]'], {}), '(self.tchunk, self.space_array[:-1])\n', (60218, 60254), True, 'import numpy as np\n'), ((60461, 60487), 'numpy.insert', 'np.insert', (['self.ck', '(0)', '(0.0)'], {}), '(self.ck, 0, 0.0)\n', (60470, 60487), True, 'import numpy as np\n'), ((2712, 2757), 'numpy.argsort', 'np.argsort', (['tfill[:neighbor.openbins]'], {'axis': '(0)'}), '(tfill[:neighbor.openbins], axis=0)\n', (2722, 2757), True, 'import numpy as np\n'), ((8916, 8939), 'random.choice', 'random.choice', (['rcl_bins'], {}), '(rcl_bins)\n', (8929, 8939), False, 'import random\n'), ((13620, 13640), 'random.choice', 'random.choice', (['rcl_j'], {}), '(rcl_j)\n', (13633, 13640), False, 'import random\n'), ((16840, 16855), 'random.random', 'random.random', ([], {}), '()\n', (16853, 16855), False, 'import random\n'), ((17639, 17657), 'copy.deepcopy', 'deepcopy', (['solution'], {}), '(solution)\n', (17647, 17657), False, 'from copy import deepcopy\n'), ((19449, 19498), 'random.choice', 'random.choice', (["['random', 'moveheat', 'movelate']"], {}), "(['random', 'moveheat', 'movelate'])\n", (19462, 19498), False, 'import random\n'), ((20745, 20773), 'random.choice', 'random.choice', (['bini1_options'], {}), '(bini1_options)\n', (20758, 20773), False, 'import random\n'), ((20791, 20819), 'random.choice', 'random.choice', (['bini2_options'], {}), '(bini2_options)\n', (20804, 20819), False, 'import random\n'), ((21335, 21384), 'random.choice', 'random.choice', (["['random', 'moveheat', 'movelate']"], {}), "(['random', 'moveheat', 'movelate'])\n", (21348, 21384), False, 'import random\n'), ((27600, 27639), 'random.choice', 'random.choice', (["['moveheat', 'movelate']"], {}), "(['moveheat', 'movelate'])\n", (27613, 27639), False, 'import random\n'), ((31185, 31209), 'random.choice', 'random.choice', (['bin_pairs'], {}), '(bin_pairs)\n', (31198, 31209), False, 'import random\n'), ((32351, 32370), 'random.choice', 'random.choice', (['bins'], {}), '(bins)\n', (32364, 32370), False, 'import random\n'), ((37693, 37713), 'random.choice', 'random.choice', (['rcl_i'], {}), '(rcl_i)\n', (37706, 37713), False, 'import random\n'), ((38644, 38664), 'random.choice', 'random.choice', (['rcl_i'], {}), '(rcl_i)\n', (38657, 38664), False, 'import random\n'), ((49390, 49419), 'numpy.argsort', 'np.argsort', (['tfill[:m]'], {'axis': '(0)'}), '(tfill[:m], axis=0)\n', (49400, 49419), True, 'import numpy as np\n'), ((51789, 51817), 'random.uniform', 'random.uniform', (['c_min', 'c_max'], {}), '(c_min, c_max)\n', (51803, 51817), False, 'import random\n'), ((52715, 52745), 'random.uniform', 'random.uniform', (['(tmin + 1)', 'tmax'], {}), '(tmin + 1, tmax)\n', (52729, 52745), False, 'import random\n'), ((58153, 58191), 'numpy.multiply', 'np.multiply', (['self.pk[:-1]', 'self.tchunk'], {}), '(self.pk[:-1], self.tchunk)\n', (58164, 58191), True, 'import numpy as np\n'), ((58653, 58675), 'numpy.where', 'np.where', (['(t == self.tk)'], {}), '(t == self.tk)\n', (58661, 58675), True, 'import numpy as np\n'), ((59174, 59196), 'numpy.where', 'np.where', (['(p == self.ck)'], {}), '(p == self.ck)\n', (59182, 59196), True, 'import numpy as np\n'), ((60287, 60306), 'numpy.sum', 'np.sum', (['area_chunks'], {}), '(area_chunks)\n', (60293, 60306), True, 'import numpy as np\n'), ((60376, 60415), 'numpy.divide', 'np.divide', (['area_chunks', 'self.area_total'], {}), '(area_chunks, self.area_total)\n', (60385, 60415), True, 'import numpy as np\n'), ((60635, 60656), 'numpy.where', 'np.where', (['(t < self.tk)'], {}), '(t < self.tk)\n', (60643, 60656), True, 'import numpy as np\n'), ((61072, 61094), 'numpy.where', 'np.where', (['(t == self.tk)'], {}), '(t == self.tk)\n', (61080, 61094), True, 'import numpy as np\n'), ((61619, 61641), 'numpy.where', 'np.where', (['(p == self.ck)'], {}), '(p == self.ck)\n', (61627, 61641), True, 'import numpy as np\n'), ((61943, 61977), 'math.sqrt', 'sqrt', (['(2 * (p - self.ck[k]) / slope)'], {}), '(2 * (p - self.ck[k]) / slope)\n', (61947, 61977), False, 'from math import ceil, sqrt\n'), ((8201, 8218), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (8209, 8218), True, 'import numpy as np\n'), ((58765, 58786), 'numpy.where', 'np.where', (['(t < self.tk)'], {}), '(t < self.tk)\n', (58773, 58786), True, 'import numpy as np\n'), ((59286, 59307), 'numpy.where', 'np.where', (['(p < self.ck)'], {}), '(p < self.ck)\n', (59294, 59307), True, 'import numpy as np\n'), ((61184, 61205), 'numpy.where', 'np.where', (['(t < self.tk)'], {}), '(t < self.tk)\n', (61192, 61205), True, 'import numpy as np\n'), ((61731, 61752), 'numpy.where', 'np.where', (['(p < self.ck)'], {}), '(p < self.ck)\n', (61739, 61752), True, 'import numpy as np\n'), ((4760, 4779), 'operator.attrgetter', 'attrgetter', (['"""batch"""'], {}), "('batch')\n", (4770, 4779), False, 'from operator import attrgetter\n'), ((5465, 5484), 'numpy.array', 'np.array', (['rcl_t.t_t'], {}), '(rcl_t.t_t)\n', (5473, 5484), True, 'import numpy as np\n'), ((10221, 10270), 'numpy.subtract', 'np.subtract', (['self.moop.coolrack', 'rcl_t.space[tk:]'], {}), '(self.moop.coolrack, rcl_t.space[tk:])\n', (10232, 10270), True, 'import numpy as np\n'), ((10442, 10461), 'numpy.nonzero', 'np.nonzero', (['dparray'], {}), '(dparray)\n', (10452, 10461), True, 'import numpy as np\n'), ((10506, 10526), 'numpy.argsort', 'np.argsort', (['(-dparray)'], {}), '(-dparray)\n', (10516, 10526), True, 'import numpy as np\n'), ((10608, 10627), 'numpy.nonzero', 'np.nonzero', (['dparray'], {}), '(dparray)\n', (10618, 10627), True, 'import numpy as np\n'), ((42529, 42553), 'numpy.array', 'np.array', (['self.rcl_t.t_t'], {}), '(self.rcl_t.t_t)\n', (42537, 42553), True, 'import numpy as np\n'), ((42798, 42825), 'numpy.array', 'np.array', (['self.rcl_t.trange'], {}), '(self.rcl_t.trange)\n', (42806, 42825), True, 'import numpy as np\n'), ((51091, 51109), 'numpy.array', 'np.array', (['self.t_t'], {}), '(self.t_t)\n', (51099, 51109), True, 'import numpy as np\n'), ((52394, 52415), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (52402, 52415), True, 'import numpy as np\n'), ((53017, 53038), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (53025, 53038), True, 'import numpy as np\n'), ((54716, 54737), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (54724, 54737), True, 'import numpy as np\n'), ((55041, 55062), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (55049, 55062), True, 'import numpy as np\n'), ((55100, 55121), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (55108, 55121), True, 'import numpy as np\n'), ((55841, 55862), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (55849, 55862), True, 'import numpy as np\n'), ((55902, 55923), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (55910, 55923), True, 'import numpy as np\n'), ((57004, 57025), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (57012, 57025), True, 'import numpy as np\n'), ((56843, 56864), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (56851, 56864), True, 'import numpy as np\n'), ((54068, 54089), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (54076, 54089), True, 'import numpy as np\n')] |
import argparse
import csv
from datetime import datetime
import logging
import sys
import time
from typing import Set, Tuple
from urllib.parse import urlparse, urlunparse
from botocore.config import Config
from botocore.exceptions import ClientError
from hca.util import SwaggerAPIException
from azul import config, require
from azul.dss import MiniDSS, shared_dss_credentials
from azul.logging import configure_script_logging
from azul.threads import DeferredTaskExecutor
from azul.types import MutableJSON
logger = logging.getLogger(__name__)
class CopyBundle(DeferredTaskExecutor):
def main(self):
if self.args.shared:
with shared_dss_credentials():
errors = self.run()
else:
errors = self.run()
if errors:
for e in errors:
# S3 errors often refer to the key they occurred for, providing useful context here
if isinstance(e, ClientError):
key = getattr(e, 'response', None).get('Error', {}).get('Key', None)
if key is None:
continue
logger.error('Error in deferred task for key %s:\n%s', key, e)
logger.error('Error in deferred task:\n%s', e)
raise RuntimeError(f'Some bundles or files could not be copied. '
f'The total number of failed tasks is {len(errors)}.', )
@classmethod
def _parse_args(cls, argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--source', '-s', metavar='URL', type=urlparse,
default=config.dss_endpoint,
help='The URL of the DSS REST API from which to copy the bundles (default: %(default)s).')
parser.add_argument('--destination', '-d', metavar='URL', type=urlparse,
default=config.dss_endpoint,
help='The URL of the DSS REST API to which to copy the bundles (default: %(default)s).')
parser.add_argument('--personal', '-P', dest='shared', action='store_false', default=True,
help="Do not use the shared credentials of the Google service account that represents the "
"current deployment, but instead use personal credentials for authenticating to the "
"DSS. When specifying this option you will need to a) run `hca dss login` prior to "
"running this script or b) set GOOGLE_APPLICATION_CREDENTIALS to point to another "
"service account's credentials.")
version = parser.add_mutually_exclusive_group()
version.add_argument('--keep-version', '-K', dest='version', action='store_const', const='keep',
default='keep',
help="This is the default. Use the original version string for each copy of a file or "
"bundle. This mode is idempotent when used together with --keep-uuid or --map-uuid.")
version.add_argument('--set-version', '-S', metavar='VERSION', dest='version', type=cls._validate_version,
help=f'Set the version of bundle and file copies to the given value. This mode is '
f'idempotent but it will lead to conflicts if the input contains multiple versions '
f'of the same bundle or file. The version must be a string like '
f'{cls._new_version()}.')
version.add_argument('--map-version', '-M', metavar='VERSION', dest='version', type=float,
help='Set the version of bundle and file copies to the version of the orginal plus/minus '
'the specified duration in seconds. This mode is idempotent but has a low '
'probability of introducing collisions.')
version.add_argument('--new-version', '-N', dest='version', action='store_const', const='new',
help='Allocate a new version for copies of bundles and files. This is not idempotent '
'because it creates new files and bundles everytime the program is run.')
parser.add_argument('--fix-tags', '-f', action='store_true', default=False,
help="Add checksum tags to the blob objects in the source (!) DSS if necessary.")
input_ = parser.add_mutually_exclusive_group(required=True)
input_.add_argument('--bundle', '-b', metavar='UUID.VERSION', nargs='+', dest='bundles',
help='One or more fully qualified identifiers (FQID) of bundles to be copied')
input_.add_argument('--manifest', '-m', metavar='PATH')
parser.add_argument('--prefix', '-p', type=str, metavar='HEX', default='',
help='Only copy input bundles whose UUID begins with the given string. Applied to both '
'--bundles and --manifest but really only makes sense with the latter where it can '
'be used copy only a deterministic subset of the bundles in the manifest.')
parser.add_argument('--suffix', '-x', metavar='HEX', type=str, default='',
help='Only copy input bundles whose UUID ends in the given string. Applied to both '
'--bundles and --manifest but really only makes sense with the latter where it can '
'be used copy only a deterministic subset of the bundles in the manifest.')
args = parser.parse_args(argv)
return args
num_workers = 32
def __init__(self, argv) -> None:
super().__init__(num_workers=self.num_workers)
self.args = self._parse_args(argv)
self.source = MiniDSS(dss_endpoint=urlunparse(self.args.source),
config=Config(max_pool_connections=self.num_workers))
self.destination = self._new_dss_client()
def _new_dss_client(self):
return config.dss_client(dss_endpoint=urlunparse(self.args.destination),
adapter_args=dict(pool_maxsize=self.num_workers))
def _run(self):
if self.args.bundles:
bundle_fqids = {(uuid, version)
for uuid, _, version in (fqid.partition('.')
for fqid in self.args.bundles)}
else:
with open(self.args.manifest) as f:
manifest = csv.DictReader(f, delimiter='\t')
columns = {'bundle_uuid', 'file_uuid'}
require(columns.issubset(manifest.fieldnames),
f'Expecting TSV with at least these columns: {columns}')
bundle_fqids = {(row['bundle_uuid'], row['bundle_version']) for row in manifest}
self._copy_bundles(bundle_fqids)
def _copy_bundles(self, bundle_fqids: Set[Tuple[str, str]]):
for bundle_fqid in bundle_fqids:
bundle_uuid, bundle_version = bundle_fqid
if bundle_uuid.endswith(self.args.suffix) and bundle_uuid.startswith(self.args.prefix):
self._defer(self._copy_files, bundle_uuid, bundle_version)
def _copy_files(self, bundle_uuid, bundle_version):
logger.info('Getting bundle %s, version %s', bundle_uuid, bundle_version)
manifest = self.source.get_bundle(uuid=bundle_uuid,
version=bundle_version,
replica='aws')
files = manifest['files']
logger.info('Copying %i file(s) from bundle %s, version %s',
len(files), bundle_uuid, bundle_version)
file: MutableJSON
futures = [self._defer(self._copy_file, bundle_uuid, bundle_version, file) for file in files]
self._defer(self._copy_bundle, bundle_uuid, bundle_version, manifest, run_after=futures)
def _copy_file(self, bundle_uuid, bundle_version, file, attempt=0):
attempt += 1
logger.info('Copying file %r from bundle %s, version %s', file, bundle_uuid, bundle_version)
source_url = self.source.get_native_file_url(uuid=(file['uuid']),
version=(file['version']),
replica='aws')
new_file = dict(uuid=file['uuid'],
version=(self._copy_version(file['version'])),
creator_uid=0,
source_url=source_url)
logger.info('Creating file %r', new_file)
try:
# noinspection PyProtectedMember
self.destination.put_file._request(new_file)
except SwaggerAPIException as e:
if e.code == 422 and e.reason == 'missing_checksum' and self.args.fix_tags and attempt < 10:
logger.warning('Target DSS complains that source blob for file %s, version %s lacks checksum tags, '
'retagging in %is.', file['uuid'], file['version'], attempt)
self.source.retag_blob(uuid=(file['uuid']),
version=(file['version']),
replica='aws')
# Object tag updates are eventually consistent so the DSS might not see the tag update
# immediately. Keep trying until it does
self._defer(self._copy_file, bundle_uuid, bundle_version, file, attempt=attempt, delay=attempt)
else:
raise
else:
# Update the source manifest to refer to the new bundle
file['version'] = new_file['version']
def _copy_bundle(self, bundle_uuid, bundle_version, manifest, attempt=0):
attempt += 1
new_bundle_version = self._copy_version(bundle_version)
try:
logger.info('Creating bundle %s, version %s', bundle_uuid, new_bundle_version)
self.destination.put_bundle(uuid=bundle_uuid,
version=new_bundle_version,
replica='aws',
creator_uid=0,
files=manifest['files'])
except SwaggerAPIException as e:
if e.code == 400 and e.reason == 'file_missing' and attempt < 10:
logger.warning('Target DSS complains that a source file in bundle %s, version %s is missing, '
'retrying in %is.', bundle_uuid, bundle_version, attempt)
self._defer(self._copy_bundle, bundle_uuid, bundle_version, manifest, attempt=attempt, delay=attempt)
else:
raise
def _copy_version(self, version: str):
mode = self.args.version
if mode == 'keep':
return version
elif mode == 'new':
return self._new_version()
else:
if isinstance(mode, float):
version = datetime.strptime(version, self.version_format)
version = datetime.fromtimestamp(version.timestamp() + mode)
return version.strftime(self.version_format)
else:
return mode
version_format = '%Y-%m-%dT%H%M%S.%fZ'
@classmethod
def _new_version(cls):
return datetime.utcfromtimestamp(time.time()).strftime(cls.version_format)
@classmethod
def _validate_version(cls, version: str):
"""
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version('2018-10-18T150431.370880Z')
'2018-10-18T150431.370880Z'
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version('2018-10-18T150431.0Z')
Traceback (most recent call last):
...
ValueError: ('2018-10-18T150431.0Z', '2018-10-18T150431.000000Z')
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version(' 2018-10-18T150431.370880Z')
Traceback (most recent call last):
...
ValueError: time data ' 2018-10-18T150431.370880Z' does not match format '%Y-%m-%dT%H%M%S.%fZ'
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version('2018-10-18T150431.370880')
Traceback (most recent call last):
...
ValueError: time data '2018-10-18T150431.370880' does not match format '%Y-%m-%dT%H%M%S.%fZ'
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version('2018-10-187150431.370880Z')
Traceback (most recent call last):
...
ValueError: time data '2018-10-187150431.370880Z' does not match format '%Y-%m-%dT%H%M%S.%fZ'
"""
reparsed_version = datetime.strptime(version, cls.version_format).strftime(cls.version_format)
if version != reparsed_version:
raise ValueError(version, reparsed_version)
return version
if __name__ == '__main__':
configure_script_logging(logger)
CopyBundle(sys.argv[1:]).main()
| [
"logging.getLogger",
"csv.DictReader",
"argparse.ArgumentParser",
"botocore.config.Config",
"datetime.datetime.strptime",
"urllib.parse.urlunparse",
"azul.dss.shared_dss_credentials",
"azul.logging.configure_script_logging",
"time.time"
] | [((520, 547), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (537, 547), False, 'import logging\n'), ((13175, 13207), 'azul.logging.configure_script_logging', 'configure_script_logging', (['logger'], {}), '(logger)\n', (13199, 13207), False, 'from azul.logging import configure_script_logging\n'), ((1497, 1541), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (1520, 1541), False, 'import argparse\n'), ((657, 681), 'azul.dss.shared_dss_credentials', 'shared_dss_credentials', ([], {}), '()\n', (679, 681), False, 'from azul.dss import MiniDSS, shared_dss_credentials\n'), ((6013, 6041), 'urllib.parse.urlunparse', 'urlunparse', (['self.args.source'], {}), '(self.args.source)\n', (6023, 6041), False, 'from urllib.parse import urlparse, urlunparse\n'), ((6080, 6125), 'botocore.config.Config', 'Config', ([], {'max_pool_connections': 'self.num_workers'}), '(max_pool_connections=self.num_workers)\n', (6086, 6125), False, 'from botocore.config import Config\n'), ((6255, 6288), 'urllib.parse.urlunparse', 'urlunparse', (['self.args.destination'], {}), '(self.args.destination)\n', (6265, 6288), False, 'from urllib.parse import urlparse, urlunparse\n'), ((6715, 6748), 'csv.DictReader', 'csv.DictReader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (6729, 6748), False, 'import csv\n'), ((12947, 12993), 'datetime.datetime.strptime', 'datetime.strptime', (['version', 'cls.version_format'], {}), '(version, cls.version_format)\n', (12964, 12993), False, 'from datetime import datetime\n'), ((11218, 11265), 'datetime.datetime.strptime', 'datetime.strptime', (['version', 'self.version_format'], {}), '(version, self.version_format)\n', (11235, 11265), False, 'from datetime import datetime\n'), ((11580, 11591), 'time.time', 'time.time', ([], {}), '()\n', (11589, 11591), False, 'import time\n')] |
#! /usr/bin/env python3 -B
# build utility for easy development
# complete and unreliable hack used for making it easier to develop
import click, os, platform, markdown, glob, textwrap
def build(target, dirname, buildtype, cmakeopts=''):
os.system('mkdir -p build/{dirname}; cd build/{dirname}; cmake ../../ -GNinja -DCMAKE_BUILD_TYPE={buildtype} -DYOCTO_EXPERIMENTAL=ON {cmakeopts}; cmake --build . {target}'.format(target=target, dirname=dirname, buildtype=buildtype, cmakeopts=cmakeopts))
os.system('ln -Ffs {dirname} build/latest'.format(dirname=dirname))
@click.group()
def run():
pass
@run.command()
@click.argument('target', required=False, default='')
def latest(target=''):
os.system('cd build/latest; cmake --build . {target}'.format(target=target))
@run.command()
@click.argument('target', required=False, default='')
def release(target=''):
build(target, 'release', 'Release')
@run.command()
@click.argument('target', required=False, default='')
def nogl(target=''):
build(target, 'nogl', 'Release', '-DYOCTO_OPENGL=OFF')
@run.command()
@click.argument('target', required=False, default='')
def debug(target=''):
build(target, 'debug', 'Debug')
@run.command()
@click.argument('target', required=False, default='')
def gcc(target=''):
build(target, 'gcc', 'Release', '-DCMAKE_C_COMPILER=gcc-7 -DCMAKE_CXX_COMPILER=g++-7')
@run.command()
def xcode():
os.system('mkdir -p build/xcode; cd build/xcode; cmake -G Xcode -DYOCTO_EXPERIMENTAL=ON ../../; open yocto-gl.xcodeproj')
@run.command()
def clean():
os.system('rm -rf bin; rm -rf build')
@run.command()
def format():
for glob in ['yocto/yocto_*.h', 'yocto/yocto_*.cpp', 'apps/y*.cpp']:
os.system('clang-format -i -style=file ' + glob)
@run.command()
def docs():
os.system('./tools/cpp2doc.py')
@run.command()
def doxygen():
os.system('doxygen ./tools/Doxyfile')
@run.command()
@click.argument('msg', required=True, default='')
def commit(msg=''):
os.system('./tools/build.py format')
os.system('./tools/build.py docs')
os.system('git commit -a -m ' + msg)
if __name__ == '__main__':
run()
| [
"click.group",
"os.system",
"click.argument"
] | [((573, 586), 'click.group', 'click.group', ([], {}), '()\n', (584, 586), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((624, 676), 'click.argument', 'click.argument', (['"""target"""'], {'required': '(False)', 'default': '""""""'}), "('target', required=False, default='')\n", (638, 676), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((798, 850), 'click.argument', 'click.argument', (['"""target"""'], {'required': '(False)', 'default': '""""""'}), "('target', required=False, default='')\n", (812, 850), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((932, 984), 'click.argument', 'click.argument', (['"""target"""'], {'required': '(False)', 'default': '""""""'}), "('target', required=False, default='')\n", (946, 984), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1082, 1134), 'click.argument', 'click.argument', (['"""target"""'], {'required': '(False)', 'default': '""""""'}), "('target', required=False, default='')\n", (1096, 1134), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1210, 1262), 'click.argument', 'click.argument', (['"""target"""'], {'required': '(False)', 'default': '""""""'}), "('target', required=False, default='')\n", (1224, 1262), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1918, 1966), 'click.argument', 'click.argument', (['"""msg"""'], {'required': '(True)', 'default': '""""""'}), "('msg', required=True, default='')\n", (1932, 1966), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1407, 1538), 'os.system', 'os.system', (['"""mkdir -p build/xcode; cd build/xcode; cmake -G Xcode -DYOCTO_EXPERIMENTAL=ON ../../; open yocto-gl.xcodeproj"""'], {}), "(\n 'mkdir -p build/xcode; cd build/xcode; cmake -G Xcode -DYOCTO_EXPERIMENTAL=ON ../../; open yocto-gl.xcodeproj'\n )\n", (1416, 1538), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1562, 1599), 'os.system', 'os.system', (['"""rm -rf bin; rm -rf build"""'], {}), "('rm -rf bin; rm -rf build')\n", (1571, 1599), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1792, 1823), 'os.system', 'os.system', (['"""./tools/cpp2doc.py"""'], {}), "('./tools/cpp2doc.py')\n", (1801, 1823), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1859, 1896), 'os.system', 'os.system', (['"""doxygen ./tools/Doxyfile"""'], {}), "('doxygen ./tools/Doxyfile')\n", (1868, 1896), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1991, 2027), 'os.system', 'os.system', (['"""./tools/build.py format"""'], {}), "('./tools/build.py format')\n", (2000, 2027), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((2032, 2066), 'os.system', 'os.system', (['"""./tools/build.py docs"""'], {}), "('./tools/build.py docs')\n", (2041, 2066), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((2071, 2107), 'os.system', 'os.system', (["('git commit -a -m ' + msg)"], {}), "('git commit -a -m ' + msg)\n", (2080, 2107), False, 'import click, os, platform, markdown, glob, textwrap\n'), ((1711, 1759), 'os.system', 'os.system', (["('clang-format -i -style=file ' + glob)"], {}), "('clang-format -i -style=file ' + glob)\n", (1720, 1759), False, 'import click, os, platform, markdown, glob, textwrap\n')] |
import sqlite3 as lite
import csv
# Constants.
inputPath = "/Users/kdinkla/Desktop/Novartis/HCS/CellMorph/www.ebi.ac.uk/huber-srv/cellmorph/data/"
outputPath = "/Users/kdinkla/MPDA/git/wrangle/db/"
sqlDotReplacement = '_'
# Screening parameters.
plates = ["HT" + str(i).zfill(2) for i in range(1, 69)]
plateDirectories = [inputPath + d + "/" for d in plates]
columns = [c for c in 'ABCDEFGHIJKLMNOP']
rows = [str(r).zfill(3) for r in range(4, 25)]
imageSpots = range(1, 5)
assignedClasses = {
"AF": "Actin fiber",
"BC": "Big cells",
"C": "Condensed",
"D": "Debris",
"LA": "Lamellipodia",
"M": "Metaphase",
"MB": "Membrane blebbing",
"N": "Normal",
"P": "Protruded",
"Z": "Telophase"
}
# Derived.
dbPath = outputPath + "core.db"
# Connect to SQLite database.
def connect():
return lite.connect(dbPath)
# Format object feature field for SQL.
def formatField(field):
return field.replace(".", sqlDotReplacement)
# Convert plate index (starting at 1) to plate tag.
def plateTag(index):
return plates[index]
def columnTag(index):
return columns[index]
def rowTag(index):
return rows[index]
# Determine object feature fields.
def objectFeatures():
firstFilePath = inputPath + "HT01/HT01A004_ftrs.tab"
with open(firstFilePath, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
header = reader.next()
return [formatField(f) for f in header if f != 'spot' and f != 'class']
# Directory of feature file of given plate, column, and row.
def featureDirectory(plate, column, row):
return inputPath + plate + "/" + plate + column + row + "_ftrs.tab"
# Resolves directory for given database column, row, and plate number. Image types: seg and rgb
def wellURL(column, row, plate, type):
plateTag = plates[plate]
wellTag = plateTag + columns[column] + rows[row]
return "http://www.ebi.ac.uk/huber-srv/cellmorph/view/" + plateTag + "/" + wellTag + "/" + wellTag + "_" + type + ".jpeg"
#return "dataset/images/" + plateTag + "/" + wellTag + "/" + wellTag + "_seg.jpeg" | [
"csv.reader",
"sqlite3.connect"
] | [((854, 874), 'sqlite3.connect', 'lite.connect', (['dbPath'], {}), '(dbPath)\n', (866, 874), True, 'import sqlite3 as lite\n'), ((1358, 1393), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""'}), "(csvfile, delimiter='\\t')\n", (1368, 1393), False, 'import csv\n')] |
from django.urls import resolve, reverse
from .base_classes import NetworkStationTest
from ..views import station_borehole_layer_add
class StationBoreholeLayerAddTests(NetworkStationTest):
def __init__(self, *args):
NetworkStationTest.__init__(
self,
*args,
url="station_borehole_layer_add",
arguments={"network_pk": "1", "station_pk": "1"}
)
def test_station_borehole_layer_add_view_status_code_authenticated(self):
self.login_and_refresh()
self.assertEquals(self.response.status_code, 200)
def test_station_borehole_layer_add_view_status_code_anon(self):
self.logout_and_refresh()
self.assertEquals(self.response.status_code, 302)
def test_station_borehole_layer_add_update_url_resolves_view(self):
view = resolve("/networks/1/station/1/add-borehole-layer/")
self.assertEquals(view.func, station_borehole_layer_add)
| [
"django.urls.resolve"
] | [((834, 886), 'django.urls.resolve', 'resolve', (['"""/networks/1/station/1/add-borehole-layer/"""'], {}), "('/networks/1/station/1/add-borehole-layer/')\n", (841, 886), False, 'from django.urls import resolve, reverse\n')] |
# Generated by Django 3.2.3 on 2021-06-19 00:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('frosh', '0003_alter_team_coin_amount'),
]
operations = [
migrations.AlterField(
model_name='team',
name='group',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='frosh_team', serialize=False, to='auth.group'),
),
]
| [
"django.db.models.OneToOneField"
] | [((427, 580), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'primary_key': '(True)', 'related_name': '"""frosh_team"""', 'serialize': '(False)', 'to': '"""auth.group"""'}), "(on_delete=django.db.models.deletion.CASCADE,\n primary_key=True, related_name='frosh_team', serialize=False, to=\n 'auth.group')\n", (447, 580), False, 'from django.db import migrations, models\n')] |
import os.path
import numpy as np
import itertools
import Tools
# Those patterns are used for tests and benchmarks.
# For tests, there is the need to add tests for saturation
def writeTests(config):
NBSAMPLES=128
inputsA=np.random.randn(NBSAMPLES)
inputsB=np.random.randn(NBSAMPLES)
inputsA = inputsA/max(inputsA)
inputsB = inputsB/max(inputsB)
config.writeInput(1, inputsA,"InputsA")
config.writeInput(1, inputsB,"InputsB")
PATTERNDIR = os.path.join("Patterns","DSP","Filtering","MISC","MISC")
PARAMDIR = os.path.join("Parameters","DSP","Filtering","MISC","MISC")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
configq7=Tools.Config(PATTERNDIR,PARAMDIR,"q7")
writeTests(configf32)
writeTests(configq31)
writeTests(configq15)
writeTests(configq7)
| [
"Tools.Config",
"numpy.random.randn"
] | [((624, 665), 'Tools.Config', 'Tools.Config', (['PATTERNDIR', 'PARAMDIR', '"""f32"""'], {}), "(PATTERNDIR, PARAMDIR, 'f32')\n", (636, 665), False, 'import Tools\n'), ((674, 715), 'Tools.Config', 'Tools.Config', (['PATTERNDIR', 'PARAMDIR', '"""q31"""'], {}), "(PATTERNDIR, PARAMDIR, 'q31')\n", (686, 715), False, 'import Tools\n'), ((724, 765), 'Tools.Config', 'Tools.Config', (['PATTERNDIR', 'PARAMDIR', '"""q15"""'], {}), "(PATTERNDIR, PARAMDIR, 'q15')\n", (736, 765), False, 'import Tools\n'), ((773, 813), 'Tools.Config', 'Tools.Config', (['PATTERNDIR', 'PARAMDIR', '"""q7"""'], {}), "(PATTERNDIR, PARAMDIR, 'q7')\n", (785, 813), False, 'import Tools\n'), ((235, 261), 'numpy.random.randn', 'np.random.randn', (['NBSAMPLES'], {}), '(NBSAMPLES)\n', (250, 261), True, 'import numpy as np\n'), ((274, 300), 'numpy.random.randn', 'np.random.randn', (['NBSAMPLES'], {}), '(NBSAMPLES)\n', (289, 300), True, 'import numpy as np\n')] |
# Generated by Django 3.2.1 on 2021-05-12 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0003_auto_20210511_0127'),
]
operations = [
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(blank=True, max_length=25),
),
]
| [
"django.db.models.CharField"
] | [((345, 388), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(25)'}), '(blank=True, max_length=25)\n', (361, 388), False, 'from django.db import migrations, models\n')] |
# encoding: UTF-8
import sys
import ctypes
import platform
from vtEngine import MainEngine
from ctaAlgo.uiStrategyWindow import *
#----------------------------------------------------------------------
def main():
"""主程序入口"""
# 设置底部任务栏图标,win7以下请注释掉
try:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('vn.py demo')
except:
pass
# 重载sys模块,设置默认字符串编码方式为utf8
reload(sys)
sys.setdefaultencoding('utf8')
# # 设置Windows底部任务栏图标
# if 'Windows' in platform.uname() :
# ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('vn.trader')
# 初始化Qt应用对象
app = QtGui.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon('vnpy.ico'))
app.setFont(BASIC_FONT)
# 设置Qt的皮肤
try:
f = file("VT_setting.json")
setting = json.load(f)
if setting['darkStyle']:
import qdarkstyle
app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=False))
except:
pass
# 初始化主引擎和主窗口对象
mainEngine = MainEngine()
mainWindow = MainWindow(mainEngine, mainEngine.eventEngine)
mainWindow.showMaximized()
# 在主线程中启动Qt事件循环
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| [
"vtEngine.MainEngine",
"ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID",
"sys.setdefaultencoding",
"qdarkstyle.load_stylesheet"
] | [((430, 460), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (452, 460), False, 'import sys\n'), ((1044, 1056), 'vtEngine.MainEngine', 'MainEngine', ([], {}), '()\n', (1054, 1056), False, 'from vtEngine import MainEngine\n'), ((277, 352), 'ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID', 'ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID', (['"""vn.py demo"""'], {}), "('vn.py demo')\n", (338, 352), False, 'import ctypes\n'), ((936, 976), 'qdarkstyle.load_stylesheet', 'qdarkstyle.load_stylesheet', ([], {'pyside': '(False)'}), '(pyside=False)\n', (962, 976), False, 'import qdarkstyle\n')] |
import sys
from csvcols import get_column
categories = get_column(sys.argv[1], col=1)
descriptions = get_column(sys.argv[1], col=2)
for c, n in categories.most_common(len(categories)):
print("%6d %s" % (n, c))
for d, n in descriptions.most_common(len(descriptions)):
print("%6d %s" % (n, d))
| [
"csvcols.get_column"
] | [((57, 87), 'csvcols.get_column', 'get_column', (['sys.argv[1]'], {'col': '(1)'}), '(sys.argv[1], col=1)\n', (67, 87), False, 'from csvcols import get_column\n'), ((103, 133), 'csvcols.get_column', 'get_column', (['sys.argv[1]'], {'col': '(2)'}), '(sys.argv[1], col=2)\n', (113, 133), False, 'from csvcols import get_column\n')] |