index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,400 | 30105423a5646175f94f5a48e52fb7c2549d0c67 | from django.apps import AppConfig
class ForignrelationshipConfig(AppConfig):
name = 'ForignRelationShip'
|
996,401 | bf0f0e1a7a9e8bfa71ceee0956621e3a3e615d1d | from glob import glob
import cv2
def get_imgs(path):
"""
- returns a dictionary of all files
having key => value as objectname => image path
- returns total number of files.
"""
imlist = {}
for each in glob(path + "*"):
word = each.split("/")[-1]
imlist[word] = []
for imagefile in glob(path+word+"/*"):
im = cv2.imread(imagefile, 0)
imlist[word].append(im)
return imlist
|
996,402 | 40421d8fc79cfb13bbb8fcdaec1fa230ba3efd63 | # String
a = "Python Programming"
# # Slice Constructor
# sub = slice(0, 6, 2)
# # Using indexing sequence
# print(a[-5 : -2 : 2])
l = [10, 20 , 30 , 40, 50]
# sub = slice(-3, -1, 2)
# # Using indexing sequence
# print(l[-3])
t = (10, 20, 30, 40, 50)
# Slice Constructor
sub = slice(-4, -1)
# Using indexing sequence
# print(t[-5:])
print(t[::-1])
print(a[::-1])
print(l[::-1])
|
996,403 | 4186c7f45f59f354a34cf1462c0c61d6512c6a39 | from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APITestCase
from orders.models import Order, Customer, Device, DeviceType, Manufacturer
User = get_user_model()
class OrdersList(APITestCase):
def setUp(self):
self.url = reverse('orders-list')
@classmethod
def setUpTestData(cls):
user = User.objects.create_user('usr', 'usr@screwman.test', 'usr')
manufacturer = Manufacturer.objects.create(title='Test', description='Test manufacturer')
device_type = DeviceType.objects.create(title='cell phone')
device1 = Device.objects.create(device_type=device_type, manufacturer=manufacturer, serial='sn-00001', model='d-00001', description='test device')
device2 = Device.objects.create(device_type=device_type, manufacturer=manufacturer, serial='sn-00002', model='d-00002', description='test device')
customer1 = Customer.objects.create(name='Customer 1', phone='+380000000')
customer2 = Customer.objects.create(name='Customer 2', phone='+380000001')
Order.objects.create(customer=customer1, device=device1, accept_by=user, state=Order.STATE_NEW, malfunction_description='description', updated_by=user)
Order.objects.create(customer=customer2, device=device2, accept_by=user, state=Order.STATE_NEW, malfunction_description='description', updated_by=user)
def test_unauthenticated_access(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.post(self.url, {'customer': 1, 'device': 1, 'state': Order.STATE_NEW, 'malfunction_description': 'description'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_authenticated_get(self):
self.client.login(username='usr', password='usr')
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_authenticated_create(self):
self.client.login(username='usr', password='usr')
response = self.client.post(self.url, {'customer': 1, 'device': 1, 'state': Order.STATE_NEW, 'malfunction_description': 'description'})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
996,404 | 9a4412f438398c959e78429d432c060692c47eac | #=======INPUT Parameter=====================================================================================
#---n1=ref index Core, n2=ref index cladding kiri, n3=ref index cladding kanan---------------------------
from scipy import*
n1=1.55e0
n2=1.545e0
n3=1.545e0
wle=1.55e-6
nmoda=4
rentang=4000
lim=(n1)-0.00001
hasil = open("beta.txt", 'w')
#=======mencari fx1(x)====================================================================================
def fx1(x):
vf1=k0*sqrt(n1*n1-n2*n2)
vf2=k0*sqrt(n1*n1-n3*n3)
fx1=fx1=2.0e0*t*k0*sqrt(n1*n1-x*x)-(arctan(sqrt(vf1*vf1-k0*k0*(n1*n1-x*x))/(k0*sqrt(n1*n1-x*x))))-(arctan(sqrt(vf2*vf2-k0*k0*(n1*n1-x*x))/(k0*sqrt(n1*n1-x*x))))-kk*pi
return fx1
#=======mencari dne=======================================================================================
def dne():
a=(n2)+0.0000001
b=(n1)
c=0
e=1.0e-10
npn=(log10((b-a)/e))/(log10((2.0e0)+0.5e0))
npn=int(npn)
#print ("npn=",npn)
fa=fx1(a)
#print ("fa=",fa)
for i in range(1,npn,1):
#print (m)
c=(a+b)/2.0e0
fc=fx1(c)
#print ("c=",c)
if (fa*fc)<0.0:
b=c
else:
a=c
dne=c
return dne
#=======Perhitungan LOOP Moda-moda--- MAIN PROGRAM========================================================
for kk in range(0,nmoda,1):
# print ("i=",i)
hasil.write("\n\n")
df=0.5e-6
for k in range (1,rentang,1):
#print ("j=",j)
df=df+0.01e-6
k0=2*pi/wle
t=df/2.0
dne_val=dne()
#print (dne_val)
if dne_val<lim:
# print(df,"\t",dne_val)
hasil.write(str(df))
hasil.write("\t")
hasil.write(str(dne_val))
hasil.write("\n")
|
996,405 | 987c1309ff4f33d0bce6d4210a4a38cc521d4bce | /home/gurpreet/anaconda3/lib/python3.6/re.py |
996,406 | 0ede0ad1c7dd99be3ec569a16e45bba4510495a5 | from flask.ext.script import Manager
from clapperboard import app
manager = Manager(app)
|
996,407 | a4597a73fac258c42d3a5a9a3c5e102d32182401 | print "import numpy"
import numpy as np
#from numpy.lib.recfunctions import stack_arrays
#from root_numpy import root2array, root2rec
#import glob
print "import keras stuff"
from keras.layers import Dense, Input, Activation
from keras.models import Model
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import regularizers, losses
from keras.layers import Dropout, add, BatchNormalization
from matplotlib import pyplot as plt
from sklearn.metrics import roc_curve, auc, roc_auc_score
import glob, time, argparse
import ROOT
filepath = "hist-MiniNtuple.h5"
def options():
parser = argparse.ArgumentParser()
parser.add_argument("--inputdir", default="")
return parser.parse_args()
def getHyperParameters():
nodes=30
alpha=0.01
regularizer=regularizers.l2(alpha)
return (nodes, regularizer)
def makeNetwork(inputwidth, nodes, regularizer):
# we define the input shape (i.e., how many input features) **without** the batch size
x = Input(shape=(inputwidth, ))
# all Keras Ops look like z = f(z) (like functional programming)
h = Dense(nodes, kernel_regularizer=regularizer)(x)
h = Activation('relu')(h)
h = BatchNormalization()(h)
h = Dense(nodes, kernel_regularizer=regularizer)(h)
h = Activation('relu')(h)
h = BatchNormalization()(h) ##modify turn on of the node's output
h = Dense(nodes,kernel_regularizer=regularizer)(h)
h = Activation('relu')(h)
h = BatchNormalization()(h)
# our output is a single number, the house price.
y = Dense(1)(h)
y = Activation('sigmoid')(y)
net = Model(input=x, output=y)
net.compile(optimizer='adam', loss=losses.binary_crossentropy)
return net
def main():
'''here is where everything is setup, basic options of plots and direcotries, fits'''
start_time = time.time()
ops = options()
##or just load the matricies
print "load the npy file directly"
X_train = np.load("X_sig_train.npy")
X_test = np.load("X_sig_test.npy")
y_train = np.load("y_sig_train.npy")
y_test = np.load("y_sig_test.npy")
Z_train = np.load("Z_sig_train.npy")
Z_test = np.load("Z_sig_test.npy")
##get the list
lst_0b = []
lst_2b = []
for k in range(y_train.shape[0]):
if y_train[k] == 0:
lst_0b.append(k)
else:
lst_2b.append(k)
##check the variables
inputs = ['j0_trk0_pt','j0_trk1_pt','j1_trk0_pt','j1_trk1_pt','j0_trkdr','j1_trkdr','j0_nTrk','j1_nTrk','detaHH','mHH', 'j1_m', 'j0_m']
# ##seperate the two training
# X_0b = X_train[lst_0b, :]
# X_2b = X_train[lst_2b, :]
# for i in range(X_train.shape[1]):
# bins = np.linspace(-5, 5, 100)
# plt.hist(X_0b[:, i], bins, alpha=0.5, label=inputs[i] + "_0b")
# plt.hist(X_2b[:, i], bins, alpha=0.5, label=inputs[i] + "_2b")
# plt.legend()
# plt.savefig(inputs[i] + "_var" + ".png")
# plt.clf()
##setup the constants
nodes, regularizer = getHyperParameters()
#regularizer=None
##setup the neutral net
# ##setup the epoc
# callbacks = [
# # if we don't have a decrease of the loss for 10 epochs, terminate training.
# EarlyStopping(verbose=True, patience=10, monitor='val_loss'),
# # Always make sure that we're saving the model weights with the best val loss.
# ModelCheckpoint('model.h5', monitor='val_loss', verbose=True, save_best_only=True)]
# net = makeNetwork(X_train.shape[1], nodes, regularizer)
# ##train
# history = net.fit(X_train, y_train, validation_split=0.2, epochs=40, verbose=1, callbacks=callbacks, batch_size=128)
# plt.plot(history.history['val_loss'], label='val_loss')
# plt.plot(history.history['loss'], label='loss')
# plt.legend()
# plt.savefig("loss.png")
# plt.clf()
#plt.show()
#raw_input()
# nodes, regularizer = getHyperParameters()
# net = makeNetwork(X_train.shape[1], nodes, regularizer)
# net.load_weights("model.h5")
# yhat_test = net.predict(X_test)
# yhat_test_round = np.array([1 if x>0.5 else 0 for x in yhat_test])
# correct_test = np.logical_not(np.logical_xor(y_test,yhat_test_round))
# yhat_train = net.predict(X_train)
# yhat_train_round = np.array([1 if x>0.5 else 0 for x in yhat_train])
# correct_train = np.logical_not(np.logical_xor(y_train,yhat_train_round))
# print "(train) Fraction Correct =",np.average(correct_train),"+/-",correct_train.size**-0.5
# print " (test) Fraction Correct =",np.average(correct_test),"+/-",correct_test.size**-0.5
# _, bins, _ = plt.hist(y_test, histtype='step', label=r'$y_{\mathsf{true}}$')
# plt.hist(yhat_test, bins=bins, histtype='step', label=r'$\hat{y}$')
# plt.hist(correct_test,bins=bins, histtype='step', label=r'NXOR')
# plt.legend()
# plt.savefig("output.png")
# plt.clf()
# net2 = makeNetwork(2, nodes, regularizer)
# callbacks2 = [
# # if we don't have a decrease of the loss for 10 epochs, terminate training.
# EarlyStopping(verbose=True, patience=10, monitor='val_loss'),
# # Always make sure that we're saving the model weights with the best val loss.
# ModelCheckpoint('model2.h5', monitor='val_loss', verbose=True, save_best_only=True)]
# ##train
# history2 = net2.fit(X_train[:, -2:], y_train, validation_split=0.2, epochs=100, verbose=1, callbacks=callbacks2, batch_size=128)
##or, load the neutral net
nodes, regularizer = getHyperParameters()
net2 = makeNetwork(2, nodes, regularizer)
net2.load_weights("model2.h5")
yhat_test2 = net2.predict(X_test[:, -2:])
##make the roc curve
#print y_test, yhat_test
#fpr, tpr, thresholds = roc_curve(y_test, yhat_test)
fpr2, tpr2, thresholds2 = roc_curve(y_test, yhat_test2)
##cut based
temp_lst = []
for k in X_test:
#print k
if (abs(k[-3] - 1) < 0.5):
temp_lst.append(1)
# if np.sqrt(((k[-2])/0.3) ** 2 + ((k[-1])/0.3) ** 2) < 1.6:
# temp_lst.append(1)
# else:
# temp_lst.append(0)
else:
temp_lst.append(0)
yhat_test_cut = np.array(temp_lst)
fpr3, tpr3, thresholds3 = roc_curve(y_test, yhat_test_cut)
#print fpr, tpr, thresholds
#roc_auc = auc(fpr, tpr)
roc_auc2 = auc(fpr2, tpr2)
roc_auc3 = auc(fpr3, tpr3)
#plt.plot(fpr, tpr, color='green', lw=2, label='Full curve (area = %0.2f)' % roc_auc)
plt.plot(fpr2, tpr2, color='darkorange', lw=2, label='Slice curve (area = %0.2f)' % roc_auc2)
#plt.plot(fpr3, tpr3, color='red', lw=2, label='Cut curve (area = %0.2f)' % roc_auc3)
plt.plot([0, 0], [1, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate--BKG')
plt.ylabel('True Positive Rate--Sig')
plt.title('ROC curves for Signal vs BKG')
plt.legend(loc="lower right")
plt.savefig("roc.png")
plt.clf()
#plt.show()
#raw_input()
##check the outputs
canv = ROOT.TCanvas("test", "test", 800, 800)
grid_Xtest = []
for i in np.arange(-5, 5, 0.1):
for j in np.arange(-5, 5, 0.1):
grid_Xtest.append([i, j])
grid_Xtest = np.array(grid_Xtest)
grid_ytest = net2.predict(grid_Xtest)
hist_mass = ROOT.TH2F("j0m_j1m", ";j0 m;j1 m ", 50, -5, 5, 50, -5, 5)
for i in range(grid_Xtest.shape[0]):
hist_mass.Fill(grid_Xtest[i][0], grid_Xtest[i][1], grid_ytest[i])
hist_mass.Draw("colz")
canv.SaveAs("mHH.png")
###check the weights
# yhat_0b = net.predict(X_0b)
# yhat_2b = net.predict(X_2b)
# fig, ax = plt.subplots()
# bins = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# plt.hist(yhat_0b, bins=bins, histtype='step', label=r'$\hat{y}_{0}$', normed=True)
# plt.hist(yhat_2b, bins=bins, histtype='step', label=r'$\hat{y}_{1}$', normed=True)
# plt.legend()
# ax.set_xlim([0,1])
# ax.set_xlabel("NN Score")
# ax.set_ylabel("Arb. Units")
# plt.savefig("separation.png")
# plt.clf()
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == "__main__":
main()
|
996,408 | f5992ee510b8088d0662303e0b1cbe07298f2c91 | # encoding: utf8
from rest_framework import serializers
from rest_framework.serializers import ValidationError
from .models import Employee, Daily
def md5(string):
import hashlib
m = hashlib.md5()
m.update(string)
return m.hexdigest()
class LoginSerializer(serializers.HyperlinkedModelSerializer):
part = serializers.ReadOnlyField(source='part.part_id')
"""
Over write to_internal_value().
"""
def to_internal_value(self, data):
emp_id = data.get('emp_id')
emp_pass = data.get('emp_pass')
name = ''
if not emp_id:
raise ValidationError({
'emp_id': '请输入工号!'
})
if not emp_pass:
raise ValidationError({
'emp_pass': '请输入密码!'
})
try:
employee = Employee.objects.get(emp_id=emp_id)
name = employee.name
except:
raise ValidationError({
'emp_id': '工号不存在!'
})
if md5(emp_pass) != employee.emp_pass:
raise ValidationError({
'emp_pass': '密码错误!'
})
return {
'emp_id': emp_id,
'name': name,
}
class Meta:
model = Employee
fields = ('emp_id', 'name', 'part', 'sex')
extra_kwargs = {'emp_pass': {'write_only': True}, }
class DailySerializer(serializers.HyperlinkedModelSerializer):
part = serializers.ReadOnlyField(source='part.part_id')
class Meta:
model = Daily
fields = ('daily_id', 'part', 'create_date', 'update_date', 'content', 'original_content', 'status')
|
996,409 | 2e649c7d588da3a35dbcad6ae0d53c8d6235fed7 | # -*- coding: utf-8 -*-
#__author__ = 'basearch'
import os
import sys
import xlrd
import pyutil.common.sys_utf8 as sys_utf8
import pyconf.db.manna as manna
import pyutil.db.mellow as mellow
eng_name_map = {
"enum_company_online_status" : "eng_status",
"enum_device_online_status" : "eng_status",
"enum_user_status" : "eng_status",
}
mysql = mellow.mellow(manna.config)
def parse():
for root, dirs, files in os.walk("pyutil/tools/import_manna_eng/manna_eng/", topdown=False):
for f_name in files:
table_name = f_name[:f_name.find(".")]
print table_name
field_name = eng_name_map.get(table_name, "eng_name")
data = xlrd.open_workbook('pyutil/tools/import_manna_eng/manna_eng/%s.xls' % (table_name))
table = data.sheets()[0]
for i in range(0, table.nrows):
print i
record = table.row_values(i)
id, eng_val = record[0], record[-1]
print table_name, {field_name: eng_val}, {"id":id}
mysql.Update(table_name, {field_name: eng_val}, {"id":int(id)})
print record
if __name__ == '__main__':
parse()
|
996,410 | 6ed65d10eb348dcd9a3f7c18081da6ca0cf22da7 | # coding=utf-8
import yaml
def yaml_parser(file):
with open(file, 'r') as stream:
try:
print(yaml.load(stream))
except yaml.YAMLError as ye:
print(ye)
if __name__ == '__main__':
yaml_parser('./test.yml')
|
996,411 | feb4d51a34966e168dc060d7d717f9e9e11e8e36 | #!/usr/bin/python
done = False
sum = 0
print("Please enter a number (999 to Quit)")
while not done:
value = eval(input())
if value < 0:
print("Value Entered", value, "is a Negative Number")
continue
if value != 999:
print("The Value goes on here: ")
sum += value
else:
done = (value == 999);
print("Sum is =", sum)
|
996,412 | b6f326810dd2975896befbd1cf3b7ac63082d94f | import sys
import operator
import load_data
import random
import itertools
import structures
import numpy as np
import matplotlib.pyplot as plt
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import metrics
def predict_sentences(model_file,sentence_file,entity_1,entity_1_file,entity_1_col,
entity_2,entity_2_file,entity_2_col,symmetric):
if entity_1_file.upper() != "NONE":
entity_1_ids = load_data.load_id_list(entity_1_file,entity_1_col)
else:
entity_1_ids = None
if entity_2_file.upper() != "NONE":
entity_2_ids = load_data.load_id_list(entity_2_file,entity_2_col)
else:
entity_2_ids = None
predict_candidate_sentences = load_data.load_xml(sentence_file, entity_1, entity_2)
model, dep_dictionary, dep_word_dictionary, between_word_dictionary = joblib.load(model_file)
predict_instances = load_data.build_instances_predict(predict_candidate_sentences, dep_dictionary, dep_word_dictionary,
between_word_dictionary, entity_1_ids, entity_2_ids, symmetric)
X = []
instance_sentences = set()
for p in predict_instances:
X.append(p.features)
instance_sentences.add(p.get_sentence())
X_predict = np.array(X)
predicted_labels = model.predict(X_predict)
print('Number of Sentences')
print(len(instance_sentences))
print('Number of Instances')
print(len(predict_instances))
return predict_instances, predicted_labels
def distant_train(model_out,sentence_file,distant_file,distant_e1_col,distant_e2_col,entity_1,entity_1_file,entity_1_col,
entity_2,entity_2_file,entity_2_col,symmetric):
if entity_1_file.upper() != "NONE":
entity_1_ids = load_data.load_id_list(entity_1_file,entity_1_col)
else:
entity_1_ids = None
if entity_2_file.upper() != "NONE":
entity_2_ids = load_data.load_id_list(entity_2_file,entity_2_col)
else:
entity_2_ids = None
distant_interactions = load_data.load_distant_kb(distant_file,distant_e1_col,distant_e2_col)
training_sentences = load_data.load_xml(sentence_file,entity_1,entity_2)
training_instances, dep_dictionary, dep_word_dictionary, between_word_dictionary = load_data.build_instances_training(
training_sentences, distant_interactions, entity_1_ids, entity_2_ids, symmetric )
X = []
y = []
instance_sentences = set()
for t in training_instances:
instance_sentences.add(t.get_sentence())
X.append(t.features)
y.append(t.label)
X_train = np.array(X)
y_train = np.ravel(y)
model = LogisticRegression()
model.fit(X_train, y_train)
print('Number of Sentences')
print(len(instance_sentences))
print('Number of Instances')
print(len(training_instances))
print('Number of Positive Instances')
print(y.count(1))
print(model.get_params)
joblib.dump((model,dep_dictionary,dep_word_dictionary,between_word_dictionary),model_out)
print("trained model")
'''
sorted_dep_dictionary = sorted(dep_dictionary.items(), key=operator.itemgetter(1))
dep_dictionary_keys = []
for s in sorted_dep_dictionary:
dep_dictionary_keys.append('Dep_path: ' + s[0])
sorted_word_dep_dictionary = sorted(dep_word_dictionary.items(), key=operator.itemgetter(1))
word_dep_keys = []
for s in sorted_word_dep_dictionary:
word_dep_keys.append('Word in Dependency Path: ' + s[0])
sorted_between_word_dictionary = sorted(between_word_dictionary.items(), key=operator.itemgetter(1))
between_word_keys = []
for s in sorted_between_word_dictionary:
between_word_keys.append('Word Between Entities: ' + s[0])
feature_values = dep_dictionary_keys + word_dep_keys + between_word_keys
print(feature_values)
print(len(feature_values))
print(model.coef_.size)
feature_dict = {}
for i in range(model.coef_.size):
feature_dict[feature_values[i]] = abs(model.coef_.item(i))
sorted_feature_dict = sorted(feature_dict.items(), key=operator.itemgetter(1))
for s in sorted_feature_dict:
print(s[0] + '\t' + str(s[1]))
'''
def main():
mode = sys.argv[1]
if mode.upper() == "DISTANT_TRAIN":
model_out = sys.argv[2]
sentence_file = sys.argv[3]
distant_file = sys.argv[4]
distant_e1_col = int(sys.argv[5])
distant_e2_col = int(sys.argv[6])
entity_1 = sys.argv[7].upper()
entity_1_file = sys.argv[8]
entity_1_col = int(sys.argv[9])
entity_2 = sys.argv[10].upper()
entity_2_file = sys.argv[11]
entity_2_col = int(sys.argv[12])
symmetric = sys.argv[13].upper() in ['TRUE','Y','YES']
distant_train(model_out,sentence_file,distant_file,distant_e1_col,distant_e2_col,entity_1,entity_1_file,entity_1_col,
entity_2,entity_2_file,entity_2_col,symmetric)
elif mode.upper() == "TEST":
model_file = sys.argv[2]
sentence_file = sys.argv[3]
entity_1 = sys.argv[4].upper()
entity_1_file = sys.argv[5]
entity_1_col = int(sys.argv[6])
entity_2 = sys.argv[7].upper()
entity_2_file = sys.argv[8]
entity_2_col = int(sys.argv[9])
symmetric = sys.argv[10].upper() in ['TRUE','Y','YES']
print('testing function not developed yet')
elif mode.upper() == "PREDICT":
model_file = sys.argv[2]
sentence_file = sys.argv[3]
entity_1 = sys.argv[4].upper()
entity_1_file = sys.argv[5]
entity_1_col = int(sys.argv[6])
entity_2 = sys.argv[7].upper()
entity_2_file = sys.argv[8]
entity_2_col = int(sys.argv[9])
symmetric = sys.argv[10].upper() in ['TRUE','Y','YES']
predicted_instances, predicted_labels = predict_sentences(model_file,sentence_file,entity_1,entity_1_file,entity_1_col,
entity_2,entity_2_file,entity_2_col,symmetric)
'''
#trying to assemble list of relations
outfile = open('/Users/kiblawi/Workspace/Data/predicted_interactions.txt','w')
outfile2 = open('/Users/kiblawi/Workspace/Data/predicted_interactions2.txt','w')
for i in range(len(predicted_labels)):
if predicted_labels[i] == 1:
pi = predicted_instances[i]
sp = []
ep = []
start_point = pi.get_sentence().get_token(pi.start)
end_point = pi.get_sentence().get_token(pi.end)
outfile2.write(start_point.get_normalized_ner() + '\t' + end_point.get_normalized_ner() + '\n')
for e in pi.get_sentence().entities:
for l in pi.get_sentence().entities[e]:
if pi.start in l:
sp = l
elif pi.end in l:
ep = l
outfile.write(' '.join(pi.get_sentence().get_token(a).get_word() for a in sp).encode('utf-8') + '\t' + ' '.join(
pi.get_sentence().get_token(b).get_word() for b in ep).encode('utf-8') + '\n')
outfile.close()
'''
outfile = open('/Users/kiblawi/Workspace/Data/predicted_sentences.txt','w')
for i in range(len(predicted_labels)):
pi = predicted_instances[i]
sp = []
ep = []
for e in pi.get_sentence().entities:
for l in pi.get_sentence().entities[e]:
if pi.start in l:
sp = l
elif pi.end in l:
ep = l
outfile.write('Instance: ' + str(i) + '\n')
outfile.write('Label: ' + str(predicted_labels[i]) + '\n')
outfile.write(
' '.join('Human_gene:' + pi.get_sentence().get_token(a).get_word() for a in sp).encode('utf-8') + '\t' + 'Viral_gene:' + ' '.join(
pi.get_sentence().get_token(b).get_word() for b in ep).encode('utf-8') + '\n')
outfile.write('Human_gene_index: ' + str(pi.start) + '\t' + 'Viral_gene_index: ' + str(pi.end) + '\n')
outfile.write(pi.get_sentence().get_sentence_string().encode('utf-8') + '\n')
outfile.write('Accuracy: \n\n')
outfile.close()
else:
print("usage error")
if __name__=="__main__":
main()
|
996,413 | db977a8f128c741f29d22cb8663a91a2efddac49 | #!/usr/bin/env python
import feedparser
from cgi import FieldStorage, escape
from time import ctime
ENTRY_TEMPLATE = '''
<a href="%(link)s"
onmouseover="$('#%(eid)s').show();"
onmouseout="$('#%(eid)s').hide();"
target="_new"
>
%(title)s
</a> <br />
<div class="summary" id="%(eid)s">
%(summary)s
</div>
'''
def main():
print "Content-type: text/html\n"
form = FieldStorage()
url = form.getvalue("url", "")
if not url:
raise SystemExit("error: not url given")
feed = feedparser.parse(url)
for enum, entry in enumerate(feed.entries):
entry.eid = "entry%d" % enum
try:
html = ENTRY_TEMPLATE % entry
print html
except Exception, e:
# FIXME: Log errors
pass
print "<br />%s" % ctime()
if __name__ == "__main__":
main()
|
996,414 | 5e4dd44923100278cae8e3580495706a1dba69e3 | name = raw_input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
dic = dict()
for x in handle:
if x .startswith("From") and len (x.split()) > 2:
list = x.split()
if not dic.has.key(1[5][:2])
dic[1[5][:2]]=1
else:
dic[1[5][:2]]+=1
key = sorted(dic)
for x in key:
print "%s %d" % (x,dic[x])
|
996,415 | 584d8cd860dc3f50227af8989ff11418374d53c9 | from nabl.nabladmin.models import *
from django.contrib import admin
admin.site.register(Members, MembersAdmin)
admin.site.register(Teams, TeamsAdmin)
admin.site.register(Leagues, LeaguesAdmin)
admin.site.register(Divisions, DivisionsAdmin)
admin.site.register(Transactions, TransactionsAdmin)
admin.site.register(Players, PlayersAdmin)
admin.site.register(Rotowire, RotowireAdmin)
admin.site.register(Rotowiremissing, RotowiremissingAdmin)
admin.site.register(Rosterassign, RosterassignAdmin)
admin.site.register(Rostermove, RostermoveAdmin)
admin.site.register(Teamresults, TeamresultsAdmin)
admin.site.register(Schedules, SchedulesAdmin)
admin.site.register(CardedPlayers, CardedPlayersAdmin)
admin.site.register(Draftpicks, DraftpicksAdmin)
|
996,416 | 3e26ad73eea8e54bef2ca074a48a439f2251b9f9 |
for i in range (0,15 , 2):
print(i)
if i == 6:
break
i = 0
while True:
print(i)
i = i + 1
if i == 5:
break |
996,417 | 633bc6122d515ebb82e96799fab0813a4fed953e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 9 13:58:55 2022
@author: jtm545
"""
import pandas as pd
from pysilsub.devices import StimulationDevice
from pysilsub.observers import _Observer
# Choose device
# sd = StimulationDevice.from_json("../data/STLAB_1_York.json")
# sd = StimulationDevice.from_json("../data/STLAB_2_York.json")
# sd = StimulationDevice.from_json("../data/STLAB_1_Oxford.json")
#sd = StimulationDevice.from_json("../data/STLAB_2_Oxford.json")
# sd = StimulationDevice.from_json("../data/BCGAR_8_bit_linear_config.json")
# sd = StimulationDevice.from_json("../data/VirtualSky.json")
# sd = StimulationDevice.from_json("../data/OneLight.json")
# sd = StimulationDevice.from_json('../data/LEDCube.json')
sd = StimulationDevice.from_package_data("STLAB_Oxford")
sd = StimulationDevice.from_package_data('OneLight')
# Plot the spds
spd_fig = sd.plot_calibration_spds()
# Plot the gamut
gamut_fig = sd.plot_gamut()
sd.do_gamma()
sd.do_gamma(fit="polynomial")
sd.plot_gamma(show_corrected=True)
rgb = [
(0.5, 0.5, 0.5),
(0.5, 0.5, 0.5),
(1.0, 0.5, 0.5),
(1.0, 0.5, 0.5),
(0.5, 0.5, 0.5),
]
rgb2 = [(50, 50, 50), (50, 50, 50), (50, 50, 50), (50, 50, 50), (50, 50, 50)]
col = ["red", 2, "blue", 3, 5]
sd = StimulationDevice(
calibration="../data/BCGAR_5_Primary_8_bit_linear.csv",
calibration_wavelengths=[380, 781, 1],
primary_resolutions=[255, 255, 255, 255, 255],
primary_colors=rgb,
observer=Observer(),
)
file = StimulationDevice.load_calibration_file(
"../data/BCGAR_5_Primary_8_bit_linear.csv"
)
print(sd)
sd.do_gamma(fit="polynomial")
|
996,418 | 68b12cb7a9ba6316e07e70d3487aaca5e4c1c612 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-04-25 18:14:59
# @Author : guangqiang_xu (981886190@qq.com)
# @Version : $Id$
import requests
from hashlib import sha1
# import http.cookiejar as cookielib
import time
import hmac
import json
import re
from lxml import etree
from retrying import retry
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('../')
from crawl_xiciip import get_ip
from config import *
from log import spider_log
spider_name = 'zhihu'
log_folder_name = '%s_logs' % spider_name
logger = spider_log(log_name=spider_name, log_folder_name=log_folder_name)
class ZhiHuLogin(object):
def __init__(self, username, password,
client_id='c3cef7c66a1843f8b3a9e6a1e3160e20',
key='d1b964811afb40118a12068ff74a12f4'):
self.login_url = 'https://www.zhihu.com/signup?next=%2F'
self.captcha_url = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=cn'
self.sign_in_url = 'https://www.zhihu.com/api/v3/oauth/sign_in'
self.captcha_flag = 1
self.sess = None
self.key = key
self.log = logger
self.form_data = {}
self.form_data['username'] = username
self.form_data['password'] = password
self.form_data['client_id'] = client_id
self.form_data['grant_type'] = 'password'
self.form_data['source'] = 'com.zhihu.web'
self.form_data['captcha'] = None
self.form_data['lang'] = 'en'
self.form_data['ref_source'] = 'homepage'
self.form_data['utm_source'] = None
self.form_data['timestamp'] = str(int(time.time()))
self.headers = self.get_headers()
def get_headers(self):
return {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0','HOST':'www.zhihu.com',\
'Referer':'https://www.zhihu.com/signin?next=%2F','Authorization':'oauth c3cef7c66a1843f8b3a9e6a1e3160e20'}
def get_sess(self):
i = 1
while self.captcha_flag:
self.log.info('开始尝试第{}次'.format(i))
i += 1
self.headers = self.get_headers()
self.sess = requests.Session()
# self.sess.cookies = cookielib.LWPCookieJar(filename = 'cookies_res.txt')
response = self.sess.get(self.login_url, headers=self.headers)
try:
x_udid = re.findall(r'{"xUDID":"([^;&]*)"}',
response.text)[0]
if not x_udid:
continue
self.headers['x-udid'] = x_udid
except:
pass
cap_response = self.sess.get(self.captcha_url,
headers=self.headers, verify=True)
dic = json.loads(cap_response.text)
self.log.info('请求参数: '.format(dic))
if not dic['show_captcha']:
self.captcha_flag = 0
return True
def get_captcha(self):
try:
# 获取验证码图片
self.headers = self.get_headers()
self.sess = requests.Session()
t = str(int(time.time() * 1000))
captcha_url = "https://www.zhihu.com/captcha.gif?r={0}&type=login".format(t)
t = self.sess.get(captcha_url, headers=self.headers)
with open("zhihu_captcha.jpg", "wb") as f:
f.write(t.content)
try:
from PIL import Image
im = Image.open("zhihu_captcha.jpg")
im.show()
im.close()
except:
pass
except Exception as error:
self.log.error('获取验证码失败: {}'.format(error))
captcha = raw_input("输入验证码:")
return captcha
# 计算 signature 值
def get_signature(self):
myhmac = hmac.new(self.key, digestmod=sha1)
myhmac.update(bytes(self.form_data['grant_type']))
myhmac.update(bytes(self.form_data['client_id']))
myhmac.update(bytes(self.form_data['source']))
myhmac.update(bytes(self.form_data['timestamp']))
return myhmac.hexdigest()
def get_data(self,data,keyword):
for d in data:
item = {}
try:
question_title = d['object']['question']['name']
except:
try:
question_title = d['highlight']['title']
except:
continue
try:
question_id = d['object']['question']['id']
except:
question_id = ""
try:
question_type = d['object']['question']['type']
except:
question_type = ""
try:
content_summary = d['object']['excerpt']
except:
content_summary = ""
try:
up_content = d['object']['content']
except:
up_content = ""
try:
up_author_name = d['object']['author']['name']
except:
up_author_name = ""
try:
up_author_url = 'https://www.zhihu.com/people/' + d['object']['author']['url_token']
except:
up_author_url = ""
try:
up_author_headline = d['object']['author']['headline']
except:
up_author_headline = ""
try:
up_comment_count = d['object']['comment_count']
except:
up_comment_count = ""
try:
up_create_time = d['object']['created_time']
except:
up_create_time = ""
try:
up_voteup_count = d['object']['voteup_count']
except:
up_voteup_count = ""
try:
up_update_time = d['object']['updated_time']
except:
up_update_time = ""
# 帖子标题
item['question_title'] = question_title
# 帖子id
item['question_id'] = question_id
# 帖子链接
item['question_url'] = 'https://www.zhihu.com/question/' + question_id
# 帖子类型
item['question_type'] = question_type
# 最高答案得赞数
item['up_voteup_count'] = up_voteup_count
# 回答赞数最高帖子摘要
item['content_summary'] = content_summary
# 回答赞数最高帖子内容
item['up_content'] = up_content
# 回答赞数最高的用户昵称
item['up_author_name'] = up_author_name
# 回答赞数最高的用户个人主页
item['up_author_url'] = up_author_url
# 回答赞数最高的用户个人简介
item['up_author_headline'] = up_author_headline
# 回答赞数最高答案的评论数
item['up_comment_count'] = up_comment_count
# 赞数最高答案的创建时间
item['up_create_time'] = up_create_time
# 赞数最高答案的最后更新时间
item['up_update_time'] = up_update_time
item['keyword'] = keyword
with open('./ZhiHu.json', 'a') as f:
f.write(json.dumps(item, ensure_ascii=False) + '\n')
def sign_in(self):
item = {}
signature = self.get_signature()
self.form_data['signature'] = signature
self.form_data['captcha'] = self.get_captcha()
#self.get_sess()
# print self.form_data
self.sess.post(self.sign_in_url, data=self.form_data,
headers=self.headers)
# self.sess.cookies.save(ignore_expires=True,ignore_discard=True)
# page URL
# https://www.zhihu.com/api/v4/search_v3?t=general&q=%E5%9C%9F%E8%80%B3%E5%85%B6&correction=1&offset=35&limit=10&search_hash_id=f4404ae2ce377b03c1ec63796a153b35
# 以土耳其为关键字的url
for keyword in search_list:
limit = 10
offset = 0
query_key = keyword
basic_url = "https://www.zhihu.com/api/v4/search_v3?t=general&q=%s&correction=1" \
"&search_hash_id=4507b273793a743841253e912a8edf5e&offset=%s&limit=%s"
while 1:
url = basic_url % (query_key, offset, limit)
response = self.sess.get(url, headers=self.headers)
response_rm_em = response.content.decode("utf-8").replace("<em>", "").replace("<\/em>", "")
data = json.loads(response_rm_em)['data']
if len(data) == 0:
self.log.info('关键词:{} 爬取结束!')
break
self.get_data(data,keyword)
self.log.info('request {}, page: {}'.format(url, offset/10 + 1))
offset += limit
time.sleep(1)
login = ZhiHuLogin(zh_username, zh_password)
if __name__=="__main__":
login = ZhiHuLogin(zh_username, zh_password)
login.sign_in() |
996,419 | ed4eb0bcfc0bae64639a6bec0c52ce12cadc8aa9 | class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.kids = dict()
self.val = None
self.isWord = False
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
current_node = self
for idx, letter in enumerate(word):
if letter not in current_node.kids:
current_node.kids[letter] = WordDictionary()
current_node.kids[letter].val = letter
current_node = current_node.kids[letter]
if idx == len(word) - 1:
current_node.isWord = True
def search(self, word):
"""
Returns if the word is in the data structur.e. A word could contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
if len(word) == 0:
return False
todo = [self]
for idx, letter in enumerate(word):
if len(todo) == 0:
break
new_todo = []
if letter == '.':
for node in todo:
new_todo += list(node.kids.values())
else:
for node in todo:
if letter in node.kids:
new_todo.append(node.kids[letter])
if idx == len(word) - 1 and node.kids[letter].isWord:
return True
todo = new_todo
if len(todo) > 0 and word[-1] == '.':
for ele in todo:
if ele.isWord:
return True
return False
# Your WordDictionary object will be instantiated and called as such:
obj = WordDictionary()
print(obj.addWord("at"))
print(obj.addWord("and"))
print(obj.addWord("an"))
print(obj.addWord("add"))
print(obj.search("a"))
print(obj.search(".at"))
print(obj.addWord("bat"))
print(obj.search(".at"))
print(obj.search("an."))
print(obj.search("a.d."))
print(obj.search("b."))
print(obj.search("a.d"))
print(obj.search("."))
|
996,420 | 4c505a5340f5d5fd6e7089be972eb84d4effb948 | from random import randint
from time import sleep
#
# Made by Filiph Wallsten 2019-09-19
#
i = 0
k = 0
total_wins = 0
dice_numbers = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
}
running = True
rounds = int(input('How many games do you want to play? (20 is max) '))
while running:
dice_result = randint(1, 6)
dice_numbers[dice_result] += 1
player_dice = int(input('Select number 1-6 '))
print('Dice rolling...')
sleep(1)
print('You rolled: ', player_dice)
print('House rolled: ', dice_result)
if dice_result == player_dice:
print('Its a match! You win!')
total_wins+=1
else:
print('Aw, better luck next time!')
i+=1
if i == rounds:
for x in dice_numbers:
k+=1
print('Dice landed on ', k, ' :', dice_numbers[x], 'times')
print('Average of correct guesses ', total_wins/rounds)
break
|
996,421 | ef06fd154cbb230502b3789f6120c537d30fb773 | import setuptools
long_description = "Functions to estimate the expected best-out-of-n (Boon) result from a set of validation and " \
"test results for a machine learning architecture. The measure is fully described in the paper" \
"Bajgar, O., Kadlec, R., and Kleindienst, J. A Boo(n) for Evaluating Architecture Performance. " \
"ICML 2018."
setuptools.setup(
name="boon",
version="0.1.0",
author="Ondrej Bajgar",
author_email="OBajgar@cz.ibm.com",
description="Functions to estimate the expected best-out-of-n result from a set of validation and test results.",
long_description=long_description,
long_description_content_type="text/markdown",
license='Apache-2.0',
url="https://gitlab.com/obajgar/boon/",
packages=setuptools.find_packages(),
install_requires=['numpy'],
classifiers=(
"Programming Language :: Python",
"Operating System :: OS Independent",
),
)
|
996,422 | 60e89c916d6a09eaca23c3d36cce0213c0833019 | from django.urls import path
# from apps.users.api.api import UserAPIView # Aqui se esta importando la clase que sirve como ruta para el JSON
from apps.users.api.api import user_api_view # Metodo con decorador para el JSON
from apps.users.api.api import user_detail_view # Metodo para realizar la actualizacion
"""
Este archivo servira para poner las rutas urls que estan enlazadas con el archivo urls.py del proyecto principal
"""
urlpatterns = [
# path('usuario/', UserAPIView.as_view(), name='usuario_api'),
path('usuario/', user_api_view, name='usuario_api'), # Metodo con decorador para la ruta
path('usuario/<int:pk>/', user_detail_view, name='usuario_detail_api_view'), # Metodo con parametro
]
|
996,423 | 77ad1af86a304a68460ba6dbc8ce457e0eebc952 | """
Created on 25 Aug, 2021
@author : Sai Vikhyath
"""
"""
This code must be refactored
Refactored code in PythonRefactoredCode.py
"""
import time
def main():
print(' ______')
print(' / \\')
print('/ \\')
print('\ /')
print(' \_______/')
print()
print('\ /')
print(' \______/')
print("+--------+")
print()
print(" ______")
print(" / \\")
print("/ \\")
print("| STOP |")
print("\ /")
print(" \_______/")
print()
print(" ______")
print(" / \\")
print("/ \\")
print("+--------+")
t1 = time.time()
for i in range(1000):
main()
t2 = time.time()
print('Time elasped : ', t2 - t1) |
996,424 | f604f04ca839e2575f48ccbe58bf5409bc1e6e99 | # -*- coding: utf-8 -*-
# @Author: xiweibo
# @Date: 2018-08-29 14:25:37
# @Last Modified by: Clarence
# @Last Modified time: 2018-08-29 23:53:10
"""
python2 实现xml解析 python3实现模块相同,与示例代码相同(除了print)
Python使用SAX解析xml
SAX是一种基于事件驱动的API
利用SAX解析XML文档牵涉到两部分:解析器和事件处理器
解析器负责读取XML文档,并向事件处理器发送事件,如元素开始跟元素结束事件
而事件处理器则负责对事件作出响应,对传递的XML数据进行处理
1.对大型文件进行处理
2.只需要文件的部分内容
3.想建立自己的对象模型的时候
在Python中使用sax方式处理xml要先引入xml.sax中的parse函数,还有xml.sax.handler中的ContentHandler
ContentHandler类方法介绍
characters(content)方法
调用时机:
从行开始,遇到标签之前,存在字符,content的值为这些字符串
从一个标签,遇到下一个标签之前,存在字符,content的值为这些字符串
从一个标签,遇到行结束符之前,存在字符,content的值为这些字符串
标签可以是开始标签,也可以是结束标签
startDocument()方法
文档启动的时候调用
endDocument()方法
解析器到达文档结尾时调用
startElement(name, attrs)方法
遇到XML开始标签时调用,name是标签的名字,attrs是标签的属性值字典
endElement(name)方法
遇到XML结束标签时调用
make_parser方法
xml.sax.make_parser([parser_list])
创建一个新的解析器对象并返回
参数: parser_list-可选参数,解析器列表
parser方法
xml.sax.parse(xmlfile, contenthandler[, errorhandler])
创建一个新的SAX解析器并解析xml文档
参数:
xmlfile-xml文件名
contenthandler-必须是一个ContentHandler的对象
errorhandler-如果指定该参数,errorhandler必须是一个SAX ErrorHandler对象
parseString方法
xml.sax.parseString(xmlstring, contenthandler[, errorhandler])
创建一个新的SAX解析器并解析xml字符串
参数:
xmlstring-xml字符串
contenthandler-必须是一个ContentHandler的对象
errorhandler-如果指定该参数,errorhandler必须是一个SAX ErrorHandler对象
"""
import xml.sax
class MovieHandler( xml.sax.ContentHandler ):
def __init__(self):
self.CurrentData = ""
self.type = ""
self.format = ""
self.year = ""
self.rating = ""
self.starts = ""
self.description = ""
# 元素开始事件处理 tag元素标签名 attrbutes元素标签所在的属性字典
def startElement(self, tag, attributes):
self.CurrentData = tag
if tag == "movie":
print "******Movie******"
title = attributes['title']
print "Title:", title
# 元素结束事件处理
def endElement(self, tag):
if self.CurrentData == "type":
print "Type:", self.type
elif self.CurrentData == "format":
print "Format:", self.format
elif self.CurrentData == "year":
print "Year:", self.year
elif self.CurrentData == "rating":
print "Rating:", self.rating
elif self.CurrentData == "starts":
print "Starts:", self.starts
elif self.CurrentData == "description":
print "Description:", self.description
self.CurrentData = ""
# 内容事件处理
def characters(self, content):
if self.CurrentData == "type":
self.type = content
elif self.CurrentData == "format":
self.format = content
elif self.CurrentData == "year":
self.year = content
elif self.CurrentData == "rating":
self.rating = content
elif self.CurrentData == "starts":
self.starts = content
elif self.CurrentData == "description":
self.description = content
if __name__ == "__main__":
# 创建一个XMLReader
parser = xml.sax.make_parser()
# turn off namespaces
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
# 重写ContextHandler
Handler = MovieHandler()
parser.setContentHandler(Handler)
parser.parse("movies.xml") |
996,425 | 99b878e579a053c917e75baebf9d0cc854d6b077 | from django import forms
from .models import Snap,Profile
from django.db import models
from django.contrib.auth.models import User
class PostForm(forms.ModelForm):
class Meta:
model = Snap
exclude = ['editor', 'pub_date']
widgets = {
'tags': forms.CheckboxSelectMultiple(),
}
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['bio', 'profilepicture']
# widgets = {
# 'tags': forms.CheckboxSelectMultiple(),
# }
# class VoteForm(forms.ModelForm):
# class Meta:
# model = Snap
# fields = ['design','usability','content']
|
996,426 | bb0f37b1ab12ad03f0366b8d9b3fe687690b8786 | '''
Created on Thu Jan 30 2020
@author: https://blog.floydhub.com/gentle-introduction-to-text-summarization-in-machine-learning/
'''
# importing libraries
from nltk.tokenize import sent_tokenize
from src.text_processing.preprocess_word import stem, lower, stop
from src.text_processing.preprocess_phrase import tokenize
def _create_dictionary_table(text_string) -> dict:
words = tokenize(text_string, [], [stem, lower, stop])
# creating dictionary for the word frequency table
frequency_table = dict()
for wd in words:
if wd in frequency_table:
frequency_table[wd] += 1
else:
frequency_table[wd] = 1
return frequency_table
def _calculate_sentence_scores(sentences, frequency_table) -> dict:
# algorithm for scoring a sentence by its words
sentence_weight = dict()
for sentence in sentences:
sentence_wordcount_without_stop_words = 0
for word_weight in frequency_table:
if word_weight in sentence.lower():
sentence_wordcount_without_stop_words += 1
if sentence[:7] in sentence_weight:
sentence_weight[sentence[:7]
] += frequency_table[word_weight]
else:
sentence_weight[sentence[:7]
] = frequency_table[word_weight]
sentence_weight[sentence[:7]] = sentence_weight[sentence[:7]
] / sentence_wordcount_without_stop_words
return sentence_weight
def _calculate_average_score(sentence_weight) -> int:
# calculating the average score for the sentences
sum_values = 0
for entry in sentence_weight:
sum_values += sentence_weight[entry]
# getting sentence average value from source text
average_score = (sum_values / len(sentence_weight))
return average_score
def _get_article_summary(sentences, sentence_weight, threshold,):
sentence_counter = 0
article_summary = ''
for sentence in sentences:
if sentence[:7] in sentence_weight and sentence_weight[sentence[:7]] >= (threshold):
article_summary += " " + sentence
sentence_counter += 1
if sentence_counter == 2:
break
return article_summary
def summarize(text):
"""very basic algorithm based on word frequencies. it extracts a number of phrases from a text
Arguments:
text {[string]} --
"""
# creating a dictionary for the word frequency table
frequency_table = _create_dictionary_table(text)
# tokenizing the sentences
sentences = sent_tokenize(text)
# algorithm for scoring a sentence by its words
sentence_scores = _calculate_sentence_scores(sentences, frequency_table)
# getting the threshold
threshold = _calculate_average_score(sentence_scores)
# producing the summary
article_summary = _get_article_summary(
sentences, sentence_scores, 0.7 * threshold)
return article_summary
|
996,427 | 9905baba0a25251b774bef786a8268764a837673 | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep
driver=webdriver.Chrome (executable_path="C:\\Users\\Sanket\\Desktop\\sele_python\\webdriver\\chromedriver.exe")
driver.get("https://jqueryui.com/")
driver.maximize_window()
sortable_link=driver.find_element_by_xpath("//a[text()='Sortable']")
sortable_link.click()
sleep(3)
driver.switch_to.frame(0)
actions=ActionChains(driver)
sortable_items=driver.find_element_by_xpath("(//ul[@id='sortable']/li)[1]")
#sortable_drag=driver.find_element_by_xpath("(//ul[@id='sortable']/li)[1]")
#sortable_drop=driver.find_element_by_xpath("(//ul[@id='sortable']/li)[7]")
actions=ActionChains(driver)
#actions.drag_and_drop(sortable_drag,sortable_drop).perform()
actions.drag_and_drop_by_offset(sortable_items,0,300).perform()
sleep(5)
driver.close()
driver.quit() |
996,428 | be9f3d503ec86b21a5a6d5b535871338969410c1 | """
https://leetcode.com/problems/contains-duplicate
Related:
- lt_219_contains-duplicate-ii
- lt_220_contains-duplicate-iii
"""
"""
Given an array of integers, find if the array contains any duplicates. Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
"""
from collections import defaultdict
class Solution:
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
# Time: O(n)
# Space: O(n)
# Approaches:
# Time Space
# 1) Brute force O(n^2) O(1)
# 2) Sort O(nlogn) O(1)
# 3) Hash Table O(n) O(n)
return len(nums) > len(set(nums))
def containsDuplicate_sort(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
nums.sort()
for i in range(1, len(nums)):
if nums[i] == nums[i-1]: return True
return False
if __name__ == '__main__':
test_cases = [
([1, 1], True),
([1, 1, 2], True),
([1], False),
([1, 2], False)
]
for test_case in test_cases:
print('case:', test_case)
output = Solution().containsDuplicate(test_case[0])
print('output:', output)
assert output == test_case[1]
|
996,429 | 73a23c024b4e1057b383d09a40d4de9730547b26 | #
# Copyright (c) 2017 Sebastian Muniz
#
# This code is part of point source decompiler
#
import abc
from traceback import format_exc
from output_media.output_media_base import OutputMediaBase, \
OutputMediaBaseException
try:
import idaapi
except ImportError, err:
raise OutputMediaBaseException("TextOutputMedia only available under IDA")
class TextOutputMediaException(OutputMediaBaseException):
"""Generic exception for text output media."""
pass
class TextOutputMedia(OutputMediaBase, idaapi.simplecustviewer_t):
"""
Translate the MIR into a C/C++ readable code, display it appropiately and
perform callbacks if applicable.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""Initialize instance."""
OutputMediaBase.__init__(self)
idaapi.simplecustviewer_t.__init__(self)
self.title = ""
self.statements = self.STATEMENTS
self.keywords = self.KEYWORDS
self.types = self.TYPES
def generate_output(self, title):
"""Generate readble output in a new IDA view window."""
try:
# First create the window inside the application used to hold it.
# Then proceed to show the newly created window and fill it with
# the data we want to display.
self.title = title
crea = self.create()
if not crea:
self.ClearLines()
#self.add_line("hola manola")
self.refresh()
else:
#print "1 OK..."
pass
self.show()
self.colorize() # This must be implemented in the
# derived class.
#self.refresh()
except Exception, err:
if self.debug:
print format_exc()
self.close()
raise TextOutputMediaException(
"Error creating viewer called \"%s\"" % title)
def close(self):
"""Close the current window."""
self.Close()
def create(self):
"""Create the new window with the specified title."""
if not self.Create(self.title):
#raise TextOutputMediaException("Unable to create custom viewer")
return False
return True
def show(self):
"""Display the window inside the current application."""
self.Show()
@abc.abstractmethod
def colorize(self):
"""Fill the recently created window with the text."""
return
def add_lines(self, lines):
"""Add multiple lines to the current display."""
# Make sure this is a line or a list of lines.
if isinstance(lines, str):
self.add_lines(lines)
else:
for line in lines:
self.add_line(line)
def add_line(self, string=None):
"""Display the specified text at the current line."""
if not string:
string = ""
# Invoke the simple viewer method.
self.AddLine(string)
@abc.abstractmethod
def on_close(self):
return
def OnClose(self):
"""Handle close event."""
self.on_close()
def OnKeydown(self, vkey, shift):
"""Handle every key pressed in the newly created window."""
if vkey == 27:
# The ESC key was pressed so close the window and leave.
self.Close()
else:
# An unknown key was pressed.
return self.on_key_down(vkey, shift)
return True
def OnCursorPosChanged(self):
"""
Cursor position changed.
@return: Nothing
"""
self.on_curor_position_changed()
def refresh(self):
"""Refresh the current output."""
self.Refresh()
#
# Colorize specific output.
#
def as_comment(self, s):
"""Display the specified text as a comment."""
return idaapi.COLSTR(s, idaapi.SCOLOR_RPTCMT)
def as_identifier(self, string):
"""Display the specified text as an id."""
t = string.lower()
if t in self.keywords:
return idaapi.COLSTR(string, idaapi.SCOLOR_ASMDIR)
elif t in self.statements:
return idaapi.COLSTR(string, idaapi.SCOLOR_LOCNAME)
elif t in self.types:
return idaapi.COLSTR(string, idaapi.SCOLOR_IMPNAME)
else:
return string
def as_string(self, string):
"""Display the specified text as a string."""
return idaapi.COLSTR(string, idaapi.SCOLOR_CHAR)
def as_number(self, string):
"""Display the specified text as a number."""
return idaapi.COLSTR(string, idaapi.SCOLOR_NUMBER)
def as_directive(self, string):
"""Display the specified text as a directive."""
return idaapi.COLSTR(string, idaapi.SCOLOR_KEYWORD)
def as_default(self, string):
"""Display the specified text as a default text."""
return idaapi.COLSTR(string, idaapi.SCOLOR_DEFAULT)
def as_variable_name(self, string):
"""Display the specified text as a variable name."""
return idaapi.COLSTR(string, idaapi.SCOLOR_REG)
def as_function_name(self, string):
"""Display the specified text as a function name."""
return idaapi.COLSTR(string, idaapi.SCOLOR_CNAME)
def get_colour(self, address):
"""Return the items colour."""
return idaapi.get_item_color(address)
def set_colour(self, address, colour):
"""Store an item colour."""
idaapi.set_item_color(address, colour)
|
996,430 | 67d945eb5ec50584393a8926eba8276ed5d29a68 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from datetime import datetime
from datetime import timedelta
from trader.collector import GCollector
@pytest.fixture
def collector():
return GCollector()
def test_get_error_historical_quote(collector):
c = collector
hist_data = c.get_histrical_data(
3371, datetime.now() - timedelta(days=2), datetime.now(), 10)
print(hist_data)
assert hist_data is None
|
996,431 | 0195b59c69466274c33ac795864b3ddf7c141617 | import random
import sys
size = int(sys.argv[1])
arr = []
f = open("./in.txt",'w')
items = 0
for i in range(size):
operation = random.randint(1,3)
if operation == 1: #insert
data = random.randint(1,20)
loc = random.randint(0,items+5)
items += 1
if operation == 2:
data = 0
loc = random.randint(0,items+5)
if items >= 1:
items -= 1
if operation == 3:
data = 0
loc = 0
if operation == 4:
data = 0
loc = 0
f.write(str(operation) + ' ' + str(data) + ' ' + str(loc) + '\n')
# for i in range(0,size):
# temp = random.randint(0,100)
# f.write(str(temp) + '\n')
# arr.append(temp)
# arr.sort()
# fo = open("./std.txt",'w')
# for i in range(0,size):
# te = arr[i]
# fo.write(str(te) + '\n')
|
996,432 | 46b7dcd7d71af55be676097874345e77a32ca712 | ### LAB 9 GROUP WORK
### PROBLEM 2
class Robot(object):
"""this is a blueprint for a program that models virtual fighting robots"""
robot_list = []
@staticmethod
def contenders():
if len(Robot.robot_list) == 0:
print("There are 0 robots.")
else:
print("There are " + str(len(Robot.robot_list)) + " robots.")
print("Here's a list of them:")
for robot in Robot.robot_list:
print(robot)
def __init__(self, name, weapon, strength, status = "ONLINE"):
self.name = name
self.weapon = weapon
self.strength = strength
self.status = status
print("Robot created!" + self.name)
Robot.robot_list.append(self)
def __str__(self):
reply = "-" * 20 + "\n"
reply += "Fighting Robot\n"
reply += "Name: " + self.name + "\n"
reply += "Weapon: " + self.weapon + "\n"
reply += "Strength: " + str(self.strength) + "\n"
reply += "Status: " + self.status + "\n"
reply += "-" * 20
return reply
# main
Robot.contenders()
r2d2 = Robot("Optimus", "Fists", 2)
c3po = Robot("C3PO", "Conversation", 2)
##print(r2d2)
##print(c3po)
Robot.contenders()
|
996,433 | ed5bafa16aad4eba449ce2436fbb6b4ad490efa7 | from .HistoryBuffer import HistoryBuffer
from .OperationEngineException import OperationEngineException
from .factory import factory
from .Operation import Operation
class Queue(HistoryBuffer):
"""docstring for Queue"""
def __init__(self, log):
HistoryBuffer.__init__(self)
self.log = log
def enqueue(self,op):
key = factory.createHistoryKey(op.siteId, op.seqId)
self.ops[key] = op
op.immutable = True
self.size += 1
def getProcessable(self, cv):
""" Pop and returns the operations whose context vectors now allows
processing """
ops = self.getMorrisSortedOperations()
for op in ops:
skip = False
for other in ops:
comp = other.compareByMorris(op)
if comp == -1:
skip = True
break
if skip:
continue
else:
comp = op.contextVector.morrisCompare(cv)
if comp < 0:
return self.remove(op)
if comp == 0 :
return self.remove(op)
return None
|
996,434 | f4c664eaa2a7846c63d2f15d3b879ca1e82adbda | # a ideia é cirar um cronometro, q ao apertar no butao a contagem se inicia, e no outro finaliza. e depois ir deixando mais complexo.
# descobrir o pq o visual nao esta lendo a biblioteca;
import pygame
import tkinter
pygame.init()
janela = pygame.display.set_mode((800,400))# assim eu crio uma janela e começo a criar meu app (largura, tamanho)
pygame.display.set_caption('Cronometro')
janela_aberta = True
while janela_aberta:# condição para poder fechar a janela
button(master = None, activebackground = blue)
for event in pygame.event.get():
if event.type == pygame.QUIT:
janela_aberta = False
pygame.quit()
|
996,435 | 6a1ec8455545820bdd74d4205c7658b0787ada9b | from dlrobot.common.remote_call import TRemoteDlrobotCallList
from dlrobot.common.dl_robot_round import TDeclarationRounds
from source_doc_http.source_doc_client import TSourceDocClient
from common.logging_wrapper import setup_logging
import argparse
import plotly.express as px
import pandas as pd
import datetime
import os
import sys
import json
from collections import defaultdict
import time
#this script is used for monitoring dlrobot (downloading declaration files)
#see examples in crontab.txt, how to run it
def build_html(args, fig, output_file):
output_file = os.path.join(args.output_folder, output_file)
fig.write_html(output_file, include_plotlyjs='cdn')
class TDlrobotStats:
def __init__(self, args, min_date=None, min_total_minutes=0, logger=None):
self.args = args
self.min_date = min_date
self.logger = logger
rounds = TDeclarationRounds(args.round_file)
self.remote_calls = TRemoteDlrobotCallList(file_name=args.central_stats_file, logger=self.logger,
min_start_time_stamp=rounds.start_time_stamp)
self.logger.debug("read {} records from {}".format(len(list(self.remote_calls.get_all_calls())),
args.central_stats_file))
self.cumulative_declaration_files_count = []
self.cumulative_processed_websites_count = []
self.end_times = []
self.end_time_stamps = []
self.websites = []
self.total_minutes = []
self.host_names = []
self.declaration_files_by_workers = []
self.exported_files_counts = []
self.failures = []
self.failures_by_hostnames = defaultdict(int)
self.successes_by_hostnames = defaultdict(int)
self.build_stats(min_date)
def build_stats(self, min_date=None, min_total_minutes=0):
min_time_stamp = min_date.timestamp() if min_date is not None else 0
website_count = 0
sum_count = 0
all_calls_sorted_by_end_time = sorted(self.remote_calls.get_all_calls(), key=lambda x: x.file_line_index)
self.logger.info("build_stats for {} records".format(len(all_calls_sorted_by_end_time)))
for remote_call in all_calls_sorted_by_end_time:
if remote_call.end_time is None or remote_call.end_time < min_time_stamp:
continue
if remote_call.get_total_minutes() < min_total_minutes:
continue
end_time = datetime.datetime.fromtimestamp(remote_call.end_time)
self.end_times.append(pd.Timestamp(end_time))
self.end_time_stamps.append(end_time.strftime("%Y-%m-%d %H:%M:%S"))
self.websites.append(remote_call.get_website())
self.host_names.append(remote_call.worker_host_name)
# len (self.declaration_files_by_workers) != len(self.remote_calls)
self.declaration_files_by_workers.extend([remote_call.worker_host_name] * remote_call.result_files_count)
self.total_minutes.append(remote_call.get_total_minutes())
self.exported_files_counts.append(remote_call.result_files_count)
sum_count += remote_call.result_files_count
self.cumulative_declaration_files_count.append(sum_count)
website_count += 1
self.cumulative_processed_websites_count.append(website_count)
if not remote_call.task_was_successful():
self.failures.append(remote_call.worker_host_name)
self.failures_by_hostnames[remote_call.worker_host_name] += 1
else:
self.successes_by_hostnames[remote_call.worker_host_name] += 1
self.logger.debug("build_stats: min_date={} web_sites_count={} sel".format(min_date, website_count))
def write_declaration_crawling_stats(self, html_file):
df = pd.DataFrame({'Date': self.end_times,
"DeclarationFileCount": self.cumulative_declaration_files_count,
"website": self.websites})
title = 'Declaration Count'
if self.min_date is not None:
title += " (recent)"
else:
title += " (history)"
fig = px.line(df, x='Date', y='DeclarationFileCount',
hover_data=["website"],
title=title)
build_html(self.args, fig, html_file)
def write_website_progress(self, html_file):
df = pd.DataFrame({
'Date': self.end_times,
"WebSiteCount": self.cumulative_processed_websites_count,
"website": self.websites})
title = 'Web Site Progress'
if self.min_date is not None:
title += " (recent)"
else:
title += " (history)"
fig = px.line(df, x='Date', y='WebSiteCount', title=title, hover_data=["website"])
build_html(self.args, fig, html_file)
def get_project_error_rates(self):
error_rates = dict()
worker_hosts = set(self.host_names)
self.logger.debug("build get_project_error_rates for {} worker hosts".format(len(worker_hosts)))
for host_name in worker_hosts:
f = self.failures_by_hostnames[host_name]
s = self.successes_by_hostnames[host_name]
self.logger.debug("host {} fail count={} success count={}".format(host_name, f, s))
error_rates[host_name] = 100 * (f / (s + f))
return error_rates
class TDlrobotAllStats:
@staticmethod
def parse_args(arg_list):
parser = argparse.ArgumentParser()
parser.add_argument("--central-stats-file", dest='central_stats_file', required=False,
help="for example /home/sokirko/declarator_hdd/declarator/dlrobot_central/processed_projects/dlrobot_remote_calls.dat")
parser.add_argument("--conversion-server-stats", dest='conversion_server_stats', required=False,
help="for example /home/sokirko/declarator_hdd/declarator/convert_stats.txt")
parser.add_argument("--central-server-cpu-and-mem", dest='central_server_cpu_and_mem', required=False,
help="for example /tmp/glances.dat")
parser.add_argument("--output-folder", dest='output_folder', required=False, default=".",
help="for example ~/smart_parser.disclosures_prod/tools/disclosures_site/disclosures/static")
parser.add_argument("--central-stats-history", dest='central_stats_history', required=False,
help="for example /tmp/dlrobot_central_stats_history.txt")
parser.add_argument("--log-file-name", dest='log_file_name', required=False, default="dl_monitoring.log")
parser.add_argument("--round-file", dest="round_file", default=TDeclarationRounds.default_dlrobot_round_path)
return parser.parse_args(arg_list)
def __init__(self, args):
self.args = args
self.logger = setup_logging(log_file_name=args.log_file_name)
def build_source_doc_stats(self):
history_file = "/tmp/source_doc.history"
if os.path.exists(history_file):
with open (history_file) as inp:
history = json.load(inp)
else:
history = list()
source_doc_client = TSourceDocClient(TSourceDocClient.parse_args([]), logger=self.logger)
stats = source_doc_client.get_stats()
now = int(time.time())
stats['ts'] = now
history.append(stats)
while len(history) > 0:
if now - history[0]['ts'] > 60*60*24: # 24 hours
history.pop(0)
else:
break
with open (history_file, "w") as out:
json.dump(history, out)
timestamps = list()
source_doc_count = list()
for l in history:
dttime = datetime.datetime.fromtimestamp(l['ts'])
timestamps.append(pd.Timestamp(dttime))
source_doc_count.append(l['source_doc_count'])
df = pd.DataFrame({'Time': timestamps, "source_doc_count": source_doc_count})
fig = px.line(df, x='Time', y='source_doc_count', title='Source Document Count')
build_html(self.args, fig, "source_doc_count.html")
def process_dlrobot_central_history_stats(self):
self.logger.info("process_dlrobot_central_history_stats")
times = list()
left_projects_count = list()
with open(self.args.central_stats_history, "r") as inp:
for line_str in inp:
h = json.loads(line_str)
if len(left_projects_count) > 0 and h['input_tasks'] > left_projects_count[-1]:
times.clear()
left_projects_count.clear()
dttime = datetime.datetime.fromtimestamp(h['last_service_action_time_stamp'])
times.append(pd.Timestamp(dttime))
left_projects_count.append(h['input_tasks'])
df = pd.DataFrame({'Time': times, "Input Tasks": left_projects_count})
fig = px.line(df, x='Time', y='Input Tasks', title='Left projects count')
build_html(self.args, fig, "left_projects_count.html")
def process_dlrobot_stats(self):
self.logger.info("process_dlrobot_stats")
stats = TDlrobotStats(self.args, logger=self.logger)
stats.write_declaration_crawling_stats('declaration_crawling_stats.html')
stats.write_website_progress('file_progress.html')
min_time = datetime.datetime.now() - datetime.timedelta(hours=12)
stats12hours = TDlrobotStats(self.args, min_time, logger=self.logger)
stats12hours.write_declaration_crawling_stats('declaration_crawling_stats_12h.html')
stats12hours.write_website_progress('file_progress_12h.html')
df = pd.DataFrame({'host_names': stats12hours.host_names})
fig = px.histogram(df, x="host_names", title="Projects By Workers (12 hours)")
build_html(self.args, fig, "worker_stats_12h.html")
df = pd.DataFrame({'declaration_files_by_workers': stats12hours.declaration_files_by_workers})
fig = px.histogram(df, x="declaration_files_by_workers", title="Declaration Files By Workers (12 hours)")
build_html(self.args, fig, "declaration_files_by_workers_12h.html")
df = pd.DataFrame({'failures': stats12hours.failures})
fig = px.histogram(df, x="failures", title="Worker Failures (12 hours)")
build_html(self.args, fig, "failures_12h.html")
host2error_rates = stats12hours.get_project_error_rates()
df = pd.DataFrame({'hostnames': list(host2error_rates.keys()),
'error_rate_in_percent': list(host2error_rates.values()),
})
fig = px.bar(df, x='hostnames', y='error_rate_in_percent', title="Dlrobot error rate in percent")
build_html(self.args, fig, "error_rates_12h.html")
self.build_source_doc_stats()
def process_convert_stats(self):
self.logger.info("process_convert_stats")
with open(self.args.conversion_server_stats, encoding="utf8") as inp:
timestamps = list()
ocr_pending_all_file_sizes = list()
line_no = 1
for l in inp:
try:
(timestamp, stats) = l.split("\t")
dttime = datetime.datetime.fromtimestamp(int(timestamp))
timestamps.append(pd.Timestamp(dttime))
ocr_pending_all_file_sizes.append( json.loads(stats)['ocr_pending_all_file_size'])
line_no += 1
except Exception as exp:
print("cannot parse line index {} file {}".format(line_no, self.args.conversion_server_stats))
raise
df = pd.DataFrame({'Time': timestamps, "ocr_pending_file_sizes": ocr_pending_all_file_sizes})
fig = px.line(df, x='Time', y='ocr_pending_file_sizes',
title='Ocr Conversion Server')
build_html(self.args, fig, "ocr_pending_file_sizes.html")
def process_cpu_and_mem_stats(self):
self.logger.info("process_cpu_and_mem_stats")
# input file is built by ~/smart_parser/tools/workstation_monitoring.py
with open(self.args.central_server_cpu_and_mem) as inp:
data_points = json.load(inp)
cpu_stats = list()
mem_stats = list()
timestamps = list()
for x in data_points:
dttime = datetime.datetime.fromtimestamp(x.pop('ts'))
timestamps.append(pd.Timestamp(dttime))
cpu_stats.append(x['cpu'])
mem_stats.append(x['mem'])
df = pd.DataFrame({'Time': timestamps, "cpu_stats": cpu_stats, "mem_stats": mem_stats})
fig = px.line(df, x='Time', y='cpu_stats', title='Dlrobot central cpu(%)')
build_html(self.args, fig, "dlrobot_central_cpu.html")
fig = px.line(df, x='Time', y='mem_stats', title='Dlrobot central memory(%)')
build_html(self.args, fig, "dlrobot_central_mem.html")
def build_stats(self):
if self.args.central_stats_history is not None:
self.process_dlrobot_central_history_stats()
if self.args.central_stats_file is not None:
self.process_dlrobot_stats()
if self.args.conversion_server_stats is not None:
self.process_convert_stats()
if self.args.central_server_cpu_and_mem is not None:
self.process_cpu_and_mem_stats()
if __name__ == "__main__":
args = TDlrobotAllStats.parse_args(sys.argv[1:])
TDlrobotAllStats(args).build_stats()
|
996,436 | c7cdeb7933626b7a7a8cf0e41f0d0b070682f56b | '''
Laboratorium 6 (kalkulator po telnet)
Na podstawie kalkulatora RPN opracuj serwer dostępny dla jednego użytkownika zapewniający dostęp przez protokół telnet na wybranym porcie.
'''
import socket
HEADERSIZE = 10
HOST = '127.0.0.1'
PORT = 9990
ENTER = b'\r\n'
SPACE = b' '
BACKSPACE = b'\x08'
import operator
ops = {'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv
}
def byte_encode(stack):
exp = ''
for i in stack:
exp += str(i) + ' '
exp += '\n\r'
return exp.encode('utf-8')
def is_not_special_char(character):
if character == SPACE:
return False
if character == BACKSPACE:
return False
return True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
def calculate(tokens, stack):
for token in tokens:
if set(token).issubset(set("0123456789.")):
stack.append(float(token))
elif token in ops:
if len(stack) < 2:
msg = 'Must have at least two parameters to perform operation'
clientsocket.send(msg.encode('utf-8'))
raise ValueError(msg)
a = stack.pop()
b = stack.pop()
op = ops[token]
stack.append(op(b, a))
else:
msg = 'Incorrect input!'
clientsocket.send(msg.encode('utf-8'))
raise ValueError(msg)
return stack
while True:
clientsocket, address = s.accept()
print(f"Connection from {address} has been established!")
msg = "-----------------------------------------\n\r"
msg += "Welcome to RPN calculator!\n\r"
msg += "q - to exit \n\r"
msg += "clear - to clear stack\n\r"
msg += "-----------------------------------------\n\r"
msg = bytes(msg, 'utf-8')
clientsocket.send(msg)
while True:
stack = []
numbers = []
expression = ''
while True:
data = clientsocket.recv(512)
while data != ENTER:
decoded = data.decode()
if len(decoded) > 0:
numbers.append(decoded)
temp = clientsocket.recv(512)
if temp == BACKSPACE:
if len(numbers) > 0:
numbers.pop()
data = b''
continue
if temp == SPACE:
continue
data = temp
for i in numbers:
expression += i
if expression == 'q':
exit()
elif expression == 'clear':
stack = []
expression = ''
numbers = []
continue
elif len(expression) == 0:
continue
stack = calculate(expression.split(), stack)
print(str(stack))
clientsocket.send(byte_encode(stack))
expression = ''
numbers = []
|
996,437 | 451c377dad2a27a8a3c3b7641760f951067c092a | n = int(input())
for i in range(n):
linha = input()
for j in range(len(linha)):
c = ""
if (linha[j] >= "A" and linha[j]<="Z"):
linha[j] =
|
996,438 | 8c10526cf7d0303cdc78ab21b366668a66fee1f8 | """
Created by Danny on 2018/12/11
"""
from app.models.base import db, Base
from app.libs.helper import get_current_date
__author__ = 'Danny'
class MemberCart(Base):
__tablename__ = 'member_cart'
id = db.Column(db.Integer, primary_key=True)
member_id = db.Column(db.BigInteger, nullable=False, index=True, server_default=db.FetchedValue())
food_id = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
quantity = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
@staticmethod
def set_items(member_id=0, food_id=0, number=0):
if member_id < 1 or food_id < 1 or number < 1:
return False
cart_info = MemberCart.query.filter_by(food_id=food_id, member_id=member_id).first()
if cart_info:
model_cart = cart_info
else:
model_cart = MemberCart()
model_cart.member_id = member_id
model_cart.created_time = get_current_date()
model_cart.food_id = food_id
model_cart.quantity = number
model_cart.updated_time = get_current_date()
with db.auto_commit():
db.session.add(model_cart)
return True
@staticmethod
def delete_item(member_id=0, items=None):
if member_id < 1 or not items:
return False
with db.auto_commit():
for item in items:
MemberCart.query.filter_by(food_id=item['id'],
member_id=member_id).delete()
return True
|
996,439 | 6fdaebb0c00244241d8026fff729ce6cf606e61e | from queue import Queue
from threading import Thread
q_result = Queue()
str_list=['222','444','333','666','888']
def str_to_int(arg, queue):
result = int(arg)
queue.put({arg: result})
def main():
thread_list=[]
for s in str_list:
t=Thread(target=str_to_int,args=(s,q_result))
t.start()
thread_list.append(t)
for i in thread_list:
i.join()
# print(q_result)
print([q_result.get() for _ in range(len(str_list))])
if __name__ == '__main__':
main() |
996,440 | 2b77419539b4dbd3c410df1ada1be72e9baa0701 | class EstadoRepartidor:
INACTIVO = 'INACTIVO'
ACTIVO = 'ACTIVO'
OCUPADO = 'OCUPADO'
|
996,441 | 7b1630f4ac189657919cc7e8d3648dc56b672baa | # coding: utf-8
from django.http import HttpResponse
from django.template.loader import get_template
from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse
from django.utils.cache import patch_cache_control
from django.views.decorators.csrf import csrf_exempt
import urllib.request, time, base64, json
def do_general(request, body):
t = get_template('general.html')
html = t.render({'body': body})
response = HttpResponse(html, content_type='text/html')
patch_cache_control(response, max_age=0)
return response
def debug_page(request):
return do_general(request, 'debug.html')
def video_page(request):
return do_general(request, 'video.html')
def main_page(request):
return do_general(request, 'main.html')
def Solve(cube):
try:
if not cube:
return None
request = urllib.request.Request("http://localhost:17071/solve?cube={}".format(cube), method='GET')
response = urllib.request.urlopen(request)
if response.status != 200:
return None
return response.read()
except Exception as e:
return None
def solve_page(request):
cube = request.GET.get("cube", "")
answer = Solve(cube)
if not answer:
answer = """{"state": "fail", "message": "could not reach backend or bad parameters"}"""
return HttpResponse(answer, content_type="text/json")
def Log(data):
try:
if not data:
return None
data = json.loads(base64.b64decode(data))
data["server_timestamp"] = time.time()
data = base64.b64encode(json.dumps(data))
request = urllib.request.Request("http://localhost:17071/log".format(cube), data=data)
response = urllib.request.urlopen(request)
if response.status != 200:
return None
return ressponse.read()
except Exception as e:
return None
@csrf_exempt
def log_page(request):
data = request.body
answer = Log(data)
if not answer:
answer = """{"state": "fail", "message": "could not reach backend or bad parameters"}"""
return HttpResponse(answer, content_type="text/json")
|
996,442 | 20e633ef8b8f7fbb8854d0cadeb6fa4ec1357526 | import franka_interface
import rospy
import itertools
import pickle
import pyrealsense2 as rs
import numpy as np
import cv2
from cv2 import aruco
import os
from std_msgs.msg import String
import tf2_ros
import geometry_msgs.msg
from tf2_geometry_msgs import PoseStamped
import pb_robot
import numpy
from rotation_util import *
from panda_vision.msg import NamedPose
from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist
from pb_robot.panda import Panda
def move_to_blocks(arm, gripper, block_poses):
block_id = 6 # options: 0, 2, 6
world_pose = None
for block_pose in block_poses:
if block_pose.block_id == str(block_id):
world_pose = block_pose.pose
if not world_pose:
print('Service did not return desired block pose')
return
pb_robot.utils.connect(use_gui=False)
robot = Panda()
robot_pose = numpy.eye(4)
robot.set_transform(robot_pose)
# First move to where we think the block is in the global frame.
p_w = [world_pose.pose.position.x,
world_pose.pose.position.y,
world_pose.pose.position.z]
o_w = [world_pose.pose.orientation.x,
world_pose.pose.orientation.y,
world_pose.pose.orientation.z,
world_pose.pose.orientation.w]
R_w = quat_to_rot(o_w)
X_w = Rt_to_pose_matrix(R_w, p_w)
print('T:', X_w)
# For now, keep orientation the same as it initially is.
curr_q = arm.joint_angles()
curr_tform = robot.arm.ComputeFK(arm.convertToList(curr_q))
curr_tform[0:3, 3] = p_w
curr_tform[2, 3] += 0.098 + 0.065 + 0.1 # 0.098
approach_world = robot.arm.ComputeIK(curr_tform)
x = input('Move?')
if x == 'y':
arm.move_to_joint_positions(arm.convertToDict(approach_world))
# Then update the pose using the wrist frame.
rospy.wait_for_service('get_block_poses_wrist')
try:
get_block_poses = rospy.ServiceProxy('get_block_poses_wrist', GetBlockPosesWrist)
block_poses = get_block_poses()
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
wrist_pose = None
for block_pose in block_poses.poses:
if block_pose.block_id == str(block_id):
wrist_pose = block_pose.pose
if not wrist_pose:
print('Wrist camera did not detect the desired pose.')
return
p_w = [wrist_pose.pose.position.x,
wrist_pose.pose.position.y,
wrist_pose.pose.position.z]
o_w = [wrist_pose.pose.orientation.x,
wrist_pose.pose.orientation.y,
wrist_pose.pose.orientation.z,
wrist_pose.pose.orientation.w]
R_w = quat_to_rot(o_w)
X_w = Rt_to_pose_matrix(R_w, p_w)
print('T:', X_w)
# For now, keep orientation the same as it initially is.
curr_q = arm.joint_angles()
curr_tform = robot.arm.ComputeFK(arm.convertToList(curr_q))
curr_tform[0:3, 3] = p_w
curr_tform[2, 3] += 0.098 + 0.065 + 0.1 # 0.098
approach_wrist = robot.arm.ComputeIK(curr_tform, seed_q=approach_world)
curr_tform[2, 3] -= 0.1
grasp_wrist = robot.arm.ComputeIK(curr_tform, seed_q=approach_wrist)
print('goal:', curr_tform)
x = 'y'#input('Correct to wrist pose?')
if x == 'y':
arm.move_to_joint_positions(arm.convertToDict(approach_wrist))
#input('Move to grasp.')
arm.move_to_joint_positions(arm.convertToDict(grasp_wrist))
arm.hand.grasp(0.02, 10, epsilon_inner=0.1, epsilon_outer=0.1)
#input('Return to neutral?')
arm.move_to_joint_positions(arm.convertToDict(approach_wrist))
arm.move_to_neutral()
arm.hand.open()
if __name__ == '__main__':
rospy.init_node('aruco_pick')
arm = franka_interface.ArmInterface()
gripper = franka_interface.GripperInterface()
rospy.wait_for_service('get_block_poses_world')
try:
get_block_poses = rospy.ServiceProxy('get_block_poses_world', GetBlockPosesWorld)
block_poses = get_block_poses()
move_to_blocks(arm, gripper, block_poses.poses)
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
|
996,443 | 61d5b0bda1cf4ad71b2fce4566fa2dd75c04d03b | ba1107.pngMap = [
'11111111111111111111111111111111111111111111111111111100111111111100111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100111111111100111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000000000000011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000001111000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000011010000000111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000000111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000001011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000001011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111000000000000000000000000000000000000000001011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111000000000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111110000000000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111101000000000000000000000000000000000000000000000111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111000000000000000000000000000000000010000000001011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110100000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111000000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000000000000111000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000011110100000000001111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000111111000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000011111111100000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110010000000000000011111111100000001111111111111111111111111111111111111111',
'11111111000010111111111111111111111111111111111111111100000000000000000000111111100000001111111111111111111111111100000000111111',
'11111111000000101111111111111111111111111111111111111000000000000000000000011111110000001111111111111111111111000000000000111111',
'11111100000000000000111111111111111111111111111111100000000000000000000000010111111100111111111111111111111110000000000000001111',
'11110000000100000000011111111111111111111111111111100000000000000000000000001111111111111111111111111111010000000000110000000111',
'11000000101100100000000000001111111111111111111100000000000000000000000000000000001111111111111111111100000000000111111100000000',
'00000000001111100001000000001111111111111111111010000000000000000000000000000000001111111111111111000000001000000000111000000000',
'00000000001111000000101000000000001100010000000000000000000000000000000000000000100000000100000010000000010000000001100000000000',
'00000000001100000000001100000000000000000000000000000000000000000000000000000000000000000000000000000100110000000000100000000000',
'00010000000000000000001111000000000000000000000000000000000000000000000000000000000000000000110000000011110000000001000000000000',
'00000000000111000000001100000000000000000000100000000000000000000000000000000000001010000000000000000011111000000001000000000010',
'01100000000011000000100100000000111000000010110000000000000000000000000000000000001100000000001000000000110000000011000000000100',
'11110000001111000000001000000000101000000000100000000000000000000000000000000000001100000000101000000000111000000011000000001100',
'00100000000011000000111100000000000000000000100000000000000000000000000000000000001100000000000000000000011100000011100000010110',
'00110000000011000000111100000000001000000010000000000000000000000000000000000000000100000000000000000000011100001011110000001100',
'00110000001111100000111100000000110100000001100000000000000000000000000000000000000100000000011100000000111110000011110000101100',
'00110000001111100000111100000010111000000001110000000000000000000000000000000000001101000000111100000000111100000011110000001000',
'11110000111111110001111100000000111100000011000000000000000000000000000000000000000100100000001111000011111100001111110000111110',
'11110100111111110011111111000010111100000011000000000000000000000000000000000000000001100000001110000011111111011111110100111111',
'11111000111111111111111111000011111110000011000000000000000000000000000000000000000011100000001111100111111111111111111001111111',
'11111111111111111111111111000011111100010010000000000000000000000000000000000000000101111000001111000011111111111111111111111111',
'11111111111111111111111111100111111110000100000000000000000000000000000000000000000000000001111111100111111111111111111111111111',
'11111111111111111111111111001111111101000000000000000000000000000000000000000000000000000110111111111111111111111111111111111111',
'11111111111111111111111111111111111101000000000000110000000000000000000000001000000000000000111111110111111111111111111111111111',
'11111111111111111111111111111111111000000000000000110000000000000000000000000100000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111000000000000000111110000000000000000000000001011100000000000000000000000111111111111111111111111',
'11111111111111111111111111111110000000000000001111100000000000000000000000000011110000000000000000000000111111111111111111111111',
'11111111111111111111000000000000000000000011111111110000000000000000000000000011111111000000000000000000001111111111111111111111',
'11111111111111111111000000000000000000000011111111000000000000000000000000000011111111000000000000000000001111111111111111111111',
'11111111111111111111010000000000000000011111111111000000000000000000000000000000111111111101000000000000001111111111111111111111',
'11111111111110010010000000000000000011111111111101000000000000000000000000000000111111111111000000000000000100011111111111111111',
'11111111111000000000000000000000011111111111111100000000000000000000000000000000111111111111111110000000000000000000111111111111',
'11111111111100000000000000000000111111111111111100000000000000000000000000000001011111111111111111100000000000000010111111111111',
'11111111111111101000000001111111111111111111111000000000000000000000000000000000001111111111111111111110010011111111111111111111',
'11111111111111111111100011111111111111111111111000000000000000000000000000000000001111111111111111111111110011111111111111111111',
'11111111111111111111111111111111111111111111110000000000000000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111000000000000000000000000000000000011111111111111111111111111111111111111111111111',
]
|
996,444 | 76456f04d141f1e18b50d948d872ffec3cb10683 | #coding:utf-8
import sys
import festival
print("talking")
festival.execCommand("(voice_el_diphone)")
string = unicode("Hola mundo, esta es una prueba del criticon, con una canción", "ascii")
festival.sayText(string)
|
996,445 | 91d64f1150ec00e308b809209ccb7ff465787131 | # -*- coding: utf-8 -*-
import scrapy
from scraper.items import AllNewsItem
from all_news.models import Category, News
class JaijaidinSpider(scrapy.Spider):
category = ''
name = 'jaijaidin'
allowed_domains = ['jaijaidinbd.com']
start_urls = [
'http://www.jaijaidinbd.com/todays-paper/sports',
'http://www.jaijaidinbd.com/todays-paper/homeland',
'http://www.jaijaidinbd.com/todays-paper/abroad',
'http://www.jaijaidinbd.com/todays-paper/trade-commerce',
'http://www.jaijaidinbd.com/todays-paper/entertainment',
'http://www.jaijaidinbd.com/feature/rong-berong',
'http://www.jaijaidinbd.com/feature/science-and-technology',
'http://www.jaijaidinbd.com/todays-paper/editorial',
]
user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
try:
news_db_urls = News.objects.filter(source='jaijaidin').values_list('url', flat=True)
news_db_urls = list(news_db_urls)
news_db_urls = [x.rsplit('/', 1)[0] for x in news_db_urls]
except Exception as e:
news_db_urls = []
def parse(self, response):
crawled_urls = response.css('#cat_parent_content_list a::attr("href")').extract()
news_urls = [x.rsplit('/', 1)[0] for x in crawled_urls]
unique_urls = list(set(news_urls) - set(self.news_db_urls))
for news_url in unique_urls:
if 'all-news' not in news_url:
yield response.follow(news_url, callback=self.parse_news)
else:
pass
def parse_news(self, response):
def listToString(s):
# initialize an empty string
str1 = " "
# return string
return (str1.join(s))
item = AllNewsItem()
item['title'] = response.css('.headline_section h1::text').extract_first()
description = response.css('#myText ::text').extract()
description = [x.strip() + '\n\n' for x in description]
description = listToString(description)
item['description'] = description
image = response.css('.dtl_img_section img::attr(src)').extract_first()
if image:
image = 'http://www.jaijaidinbd.com' + image
item['image'] = image
item['url'] = response.request.url + '/'
item['source'] = 'jaijaidin'
if 'sports' in response.request.url:
self.category = 'sports'
if 'homeland' in response.request.url:
self.category = 'bangladesh'
if 'politics' in response.request.url:
self.category = 'politics'
if 'rong-berong' in response.request.url:
self.category = 'lifestyle'
if 'abroad' in response.request.url:
self.category = 'international'
if 'trade-commerce' in response.request.url:
self.category = 'economy'
if 'entertainment' in response.request.url:
self.category = 'entertainment'
if 'science-and-technology' in response.request.url:
self.category = 'technology'
if 'editorial' in response.request.url:
self.category = 'opinion'
item['category'] = Category.objects.get(name=self.category)
if description:
if 'বিস্তারিত আসছে...' not in description:
yield item
else:
pass
|
996,446 | d74f2b88ba0b2df5456d075426790feff9c7930c | import hashlib
import json
from app.main import db
from app.main.location.models import Location
class JsonModel(object):
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Person(db.Model, JsonModel):
""" Person Model for storing user related details """
__tablename__ = 'person'
id = db.Column(db.String, primary_key=True, nullable=False)
key = db.Column(db.String, primary_key=False, nullable=False)
name = db.Column(db.String(50), unique=False, nullable=False)
surname = db.Column(db.String(150), unique=False, nullable=False)
birthday = db.Column(db.DateTime, unique=False, nullable=True)
location_id = db.Column(db.String, db.ForeignKey(Location.id), nullable=False)
father_id = db.Column(db.String, db.ForeignKey(__tablename__+'.id'), index=True)
father = db.relationship('Person',
remote_side="Person.id", primaryjoin=('person.c.id==person.c.father_id'),
backref="backref('children_of_father')", uselist=False)
# father = db.relation("Person", remote_side="Person.id")
mother_id = db.Column(db.String, db.ForeignKey(__tablename__+'.id'), index=True)
mother = db.relationship('Person',
remote_side="Person.id", primaryjoin=('person.c.id==person.c.mother_id'),
backref="backref('children_of_mother')", uselist=False)
def __repr__(self):
return self.name
def build_key(data):
return hashlib.sha1(json.dumps(data, sort_keys=True).encode('utf-8')).hexdigest()
|
996,447 | ef7d04c24040e6aaed950f80fd6358dc91e0bfca | #!/usr/bin/env python
# coding: utf-8
# In[23]:
help("reduce")
# In[ ]:
"""
1.1 Write a Python Program to implement your own myreduce() function which works exactly
like Python's built-in function reduce()
"""
# In[106]:
def My_reduce(func,b):
a=b[0] # store first index value from list in a
for i in range(1,len(b)):
a=func(a,b[i]) # calling the function
return a
# In[107]:
# use of My_reduce function :
lis=range(3)
My_reduce(lambda a,b:a+b,lis)
# In[108]:
# use of orignal reduce function :
import functools
lis=range(3)
print(functools.reduce(lambda a,b : a+b,lis))
# In[ ]:
"""
1.2 Write a Python program to implement your own myfilter() function which works exactly
like Python's built-in function filter()
"""
# In[95]:
# To Build my own Filter Function
def my_filter(func,lt):
empty=[] # Emplty list
for i in lt:
if func(i): # condition
empty.append(i) # adding element to the empty list
return empty
# In[94]:
# use of My_filter function :
lst=[1,2,3,4,5,6,7,8,9]
list(my_filter(lambda a: a+5,lst))
# In[98]:
# use of orignal filter function :
lst=[1,2,3,4,5,6,7,8,9]
list(filter(lambda a: a+5,lst))
# In[ ]:
# In[42]:
"""
2. Implement List comprehensions to produce the following lists.
Write List comprehensions to produce the following Lists
['x', 'xx', 'xxx', 'xxxx', 'y', 'yy', 'yyy', 'yyyy', 'z', 'zz', 'zzz', 'zzzz']
['x', 'y', 'z', 'xx', 'yy', 'zz', 'xxx', 'yyy', 'zzz', 'xxxx', 'yyyy', 'zzzz']
[[2], [3], [4], [3], [4], [5], [4], [5], [6]]
[[2, 3, 4, 5], [3, 4, 5, 6],[4, 5, 6, 7], [5, 6, 7, 8]]
[(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]
"""
# In[99]:
print("Answer :-")
[str(j)*i for j in "xyz" for i in range(1,5)]
# In[102]:
print("Answer :-")
[str(i)*j for j in range(1,5) for i in "xyz"]
# In[103]:
print("Answer :-")
[[j for j in range(i,i+4)] for i in range(2,6)]
# In[104]:
print("Answer :-")
[[i+j] for j in range(1,4) for i in range(1,4)]
# In[105]:
print("Answer :-")
[(i,j) for j in range(1,4) for i in range(1,4)]
# In[ ]:
|
996,448 | 9b2e4530df1993457c33b8df202580e1526846b2 | '''
Created on 9.4.2012
@author: xaralis
'''
from django import template
from django.conf import settings
from django.templatetags.static import static
from versioned_static.conf import ASSETS, USE_MINIFIED, USE_VERSIONING
register = template.Library()
@register.inclusion_tag('versioned_static/render_asset.html')
def asset(atype, aname):
"""
Renders CSS/JS asset with it's enclosing HTML tag (link/script). If asset
is composed from multiple files, this will be preserved (unless minifyed).
Respects settings if versioning should be incorporated or not.
"""
if atype not in ('css', 'js'):
raise template.TemplateSyntaxError('Type can only be one of css or js.')
if aname not in ASSETS[atype]:
raise ValueError('Invalid asset: %r' % aname)
meta = ASSETS[atype][aname]
return {
'USE_MINIFIED': USE_MINIFIED,
'type': atype,
'asset': aname,
'meta': meta,
}
@register.simple_tag
def versioned(filename, version, force_version=False, full_path=True):
"""
Returns filename enriched with version given as second argument.
"""
if not '.' in filename:
return None
if USE_VERSIONING or force_version:
dotindex = filename.rindex('.')
filename = u'%s.%s%s' % (filename[:dotindex], version, filename[dotindex:])
if full_path:
return static(filename)
return filename
|
996,449 | e0d353c64381eee404eb4d86264ab55142b6f26c | from timeit import Timer
def test1():
dict1 = {"a":1,
"b":2}
dict2 = dict1.copy()
def test2():
dict1 = {"a":1,
"b":2}
a = dict1.get('a')
def test3():
dict1 = {"a":1,
"b":2}
dict1['a'] = 3
def test4():
dict1 = {"a":1,
"b":2}
del dict1['a']
t1 = Timer('test1()','from __main__ import test1')
print('copy',t1.timeit(number=1000),'毫秒')
t2 = Timer('test2()','from __main__ import test2')
print('get',t2.timeit(number=1000),'毫秒')
t3 = Timer("test3()", "from __main__ import test3")
print("set",t3.timeit(number=1000), "毫秒")
t4 = Timer("test4()", "from __main__ import test4")
print("delete",t4.timeit(number=1000), "毫秒") |
996,450 | ffceb0accb6f828a84f30ccf8cf801b1f92832fd | #!/usr/bin/env python
import sys, os, time
from optparse import OptionParser
from contextlib import closing
from seqtools import solid, fastq
from seqtools.io import xopen
# Process the DGE data.
# This duplicates functionality in clean-dge-fastq, but uses lots less
# memory
#
# This funciontality requires that the "cutadapt" library is installed:
# http://code.google.com/p/cutadapt/
def trim_dpnii(read, *args, **kwargs):
if read.sequence.startswith('CGATC'):
try:
read = read.trim(1, 'left')
except ValueError:
read = None
return read
## Make sure you trim the dpnii adapter first
def filter_anchor(read, anchors, *args, **kwargs):
is_good = [read.sequence.startswith(x) for x in anchors]
if any(is_good):
return read
return None
def process(infile, outfile, funcs, trashfile=None, parser='fastq',
minlength=0, *args, **kwargs):
if parser == 'fastq':
parser = fastq.parse
else:
raise NotImplementedError("Only doing FASTQ for now")
count, good, bad = 0, 0, 0
for read in parser(infile):
count += 1
is_good = True
for func in funcs:
pread = func(read, *args, **kwargs)
if pread is None or (minlength > 0 and len(pread) < minlength):
if trashfile is not None:
trashfile.write("@%s\n%s\n+\n%s\n" % \
(read.id, read.sequence, read.quality))
is_good = False
bad +=1
break
if is_good:
good += 1
outfile.write("@%s\n%s\n+\n%s\n" % \
(pread.id, pread.sequence, pread.quality))
return (count, good, bad)
if __name__ == '__main__':
usage = """usage: %prog [options] [CMD] INPUT.fastq[.gz]
Runs the given CMD step in the cleaning of DGE data. If CMD is missing
then the entire pipeline is run. All files can be read and output in
gzip format -- the names of the files just have to end in *.gz
NOTE: You will have to remove the sequencing adapter either before
or after running the steps in this script. You can use the python
cutadapt library, or the fastx-toolkit (faster) for that.
The commands CMD to be run are as follows (in order):
dpnii : Trims the 5' C of the CGATC reads
filter-anchor : Only keeps reads with successful 5' restriction sites
By default the anchors are defined as GATC and CATG,
so we assume that dpnii was run before. If you want to
run this first, use --anchors=CATG,CGATC
"""
parser = OptionParser(usage=usage)
parser.add_option('-a', '--adapter', dest='adapter',
default="TCGTATGCCGTCTTCTGCTTG",
help="The sequence of the 3' adapter.")
parser.add_option('-r', '--anchors', dest='anchors',
default="GATC,CATG",
help="Comma separated list of expected anchor sites at 5' end")
parser.add_option('-o', '--outfile', dest='outfile', default=None,
help="Name of file to dump output, defaults to STDOUT")
parser.add_option('-t', '--trashfile', dest='trashfile', default=None,
help="Optional name of file to put 'bad' reads into")
parser.add_option('-m', '--minimum-length', dest='minlength', type=int,
default=0, help="Reads < this length are discarded.")
(options, args) = parser.parse_args()
steps = {'dpnii' : trim_dpnii, 'filter' : filter_anchor}
if len(args) < 1:
parser.error("Need at least one argument for input filename")
if args[0] in steps:
pfuncs = (steps[args[0]],)
infile = args[1]
else:
pfuncs = (steps['dpnii'], steps['filter'])
infile = args[0]
if not os.path.isfile(infile):
parser.error("Cannot read input file.")
if options.outfile is None:
outfile = sys.stdout
else:
outfile = xopen(options.outfile, 'w')
if options.trashfile is None:
trashfile = None
else:
trashfile = xopen(options.trashfile, 'w')
anchors = options.anchors.split(',')
elapsed = time.time()
(total, good, bad) = process(infile, outfile, pfuncs, trashfile,
anchors=anchors, minlength=options.minlength)
elapsed = time.time() - elapsed
if options.outfile is not None:
outfile.close()
if options.trashfile is not None:
trashfile.close()
sys.stderr.write('=== DGE Processing Done (%.2f seconds) ===\n' % elapsed)
sys.stderr.write(' Processed %d sequences.\n' % total)
sys.stderr.write(' Kept %d\n' % good)
sys.stderr.write(' Tossed %d\n' %bad)
|
996,451 | 0adc52856d0f86f30610d5dc31112752efe02e54 | #! /usr/bin/env python
#
## Begin copyright
##
## /home/jrf/Documents/books/Books20/Tools/python/aabooks/lib/isbn.py
##
## Part of the Books20 Project
##
## Copyright 2021 James R. Fowler
##
## All rights reserved. No part of this publication may be
## reproduced, stored in a retrival system, or transmitted
## in any form or by any means, electronic, mechanical,
## photocopying, recording, or otherwise, without prior written
## permission of the author.
##
##
## End copyright
'''Some useful utility functions for working with ISBN numbers
that are not supplied in the module isbnlib.
https://isbnsearch.com/search?s=0-667-02340-5
will return the book's information if this is a valid ISBN
'''
from math import fmod
import isbnlib as isbn
#
# for ISBN-10 the checksum is calculated by
# ISBN-10 is of the form a-bcd-efghi-j
# checksum is j = remainder of ([abcdefghi] x [123456789]) MOD 11
# Valid results are '0'-'9' and 'X'
#
isbn10_mults = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def checksum_10(isbnlike):
'''Calculate the proper ISBN-10 check sum for a test ISBN 10
string. The input string must be 10 legal characters with or
without dashes but the checksum character need not be valid.
Return a string character of the checksum digit or 'X'
'''
isbndigits = isbn.canonical(isbnlike)
tmp_sum = 0
for num, value in zip(isbn10_mults, isbndigits[:9]):
tmp_sum += num * int(value)
chksum = int(fmod(tmp_sum, 11))
if chksum == 10:
return 'X'
return str(chksum)
#
# ISBN-13 is of the form abc-def-ghijkl-m (where abc will usually be 978 or 979)
# checksum is m = 10 - the remainder of ([abcdefghiklm] x [131313131313]) MOD 10
# Valid results are '0'-'9'
#
isbn13_mults = [1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3]
def checksum_13(isbnlike):
'''Calculate the proper ISBN-13 check sum for a test ISBN 13
string. The input string must have 13 legal characters with or
without dashes but the checksum character need not be valid.
Return a string character of the checksum digit.
'''
isbndigits = isbn.canonical(isbnlike)
tmp_sum = 0
for num, value in zip(isbn13_mults, isbndigits[:12]):
tmp_sum += num * int(value)
return str(int(10 - fmod(tmp_sum, 10)))
#
# Generate a checksum for either a 10 or 13 digit ISBN
#
def checksum(isbnlike):
'''Calculate the proper ISBN-check sum for a test ISBN
string. The input string must have 10 or 13 legal characters with or
without dashes but the checksum character need not be valid.
Return a string character of the checksum digit.
'''
isbndigits = isbn.canonical(isbnlike)
isbnlen = len(isbndigits)
# get length, choose 10 or 13 checksum
if isbnlen == 10:
chksum = checksum_10(isbndigits)
elif isbnlen == 13:
chksum = checksum_13(isbndigits)
else:
return None
return chksum
#
#
#
if __name__ == '__main__':
import sys
import unittest
import argparse
# check for command line argument. Run checksum rather
# than unit tests
parser = argparse.ArgumentParser(description='parse and validate ISBN values')
parser.add_argument('isbn',
type=str,
help='''An ISBN value to test''',
default='',
nargs='?')
args = parser.parse_args()
if args.isbn:
cksum = checksum(args.isbn)
if cksum is None:
print('This ISBN value', args.isbn, 'does not seem to be a proper value')
else:
print('The proper ISBN checksum is', cksum)
sys.exit()
# still missing a checksum of 8
isbn10_list = [
('0-8357-0331', '2'), ('0-08-024620', '6'), ('3-540-09830', '5'),
('0-387-09830', '5'), ('3-540-09831', '3'), ('0-387-09831', '3'),
('0-292-75507', '4'), ('0-8243-0917', '0'), ('0-521-22285', '0'),
('0-262-02137', '4'), ('0-471-04492', 'X'), ('0-7167-1006', '4'),
('0-7167-1062', '5'), ('3-12-983890', '2'), ('3-12-983840', '6'),
('0-442-30215', '0'), ('0-442-30216', '9'), ('0-89490-027', '7'),
('0-7188-2433', '4'), ('3-519-02346', '6'), ('90-277-1001', '5'),
('90-277-1044', '9'), ('90-277-0957', '2'), ('90-277-0997', '1'),
('0-85264-244', 'X'), ('0-201-05674', '7'), ('0-444-85115', '1'),
('0-444-85266', '2'), ('0-444-85267', '0'), ('0-19-851462', 'X'),
('0-387-90369', '0'), ('3-540-90369', '0'), ('0-19-857553', 'X'),
('0-471-04815', '1'), ('3-411-01570', '5'), ('3-528-17236', '3'),
('3-528-17214', '2'), ('3-211-81430', '2'), ('0-387-81430', '2'),
('3-211-81475', '2'), ('0-387-81475', '2'), ('0-86008-258', 'X'),
('2-01-003860', '6'), ('0-86961-109', '7'), ('0-444-41802', '4'),
('0-08-026341', '0'), ('0-08-026342', '9'),
]
isbn13_list = [
('978-1-62040-593-', '2'), ('978-0-691-15271-', '4'),
('978-0-521-38200-', '1'), ('978-1-137-28008-', '4'),
('978-0-262-04318-', '2'), ('978-0-06-236359-', '6'),
('978-0-375-42429-', '8'), ('978-0-670-01695-', '2'),
('978-1-61614-739-', '6'), ('978-1-61636-023-', '5'),
('978-1-250-09896-', '2'), ('978-0-684-83252-', '4'),
('978-0-8229-4552-', '9'), ('978-1-108-47154-', '1'),
]
# check valid and invalid checksum values
class ISBNTestCase(unittest.TestCase):
'''The test suite for isbn.py.'''
def setUp(self):
'''Set up for the tests.'''
def tearDown(self):
'''Tear down for the next test.'''
def test_a_checksum_10(self):
'''Test checksum_10() function.'''
for isbntest, csum in isbn10_list:
self.assertEqual(checksum_10(isbntest + '0'), csum)
def test_b_checksum_13(self):
'''Test checksum_13() function.'''
for isbntest, csum in isbn10_list:
self.assertEqual(checksum_10(isbntest + '0'), csum)
unittest.main()
|
996,452 | 2ec8713c105612ba4b628282997c54babb667578 | #Morgan Baughman
#12/6/17
#fileDemo.py - how to read a file
dictionary = open('engmix.txt')
longest = 0
word = ''
for words in dictionary:
length = len(word)
if length > longest:
lenght = longest
words = word
print('The longest word is', word) |
996,453 | 98537429586c2a9e294d03d734b71072f05be235 | from onegov.api.models import ApiEndpoint
from onegov.api.models import ApiEndpointCollection
from onegov.api.models import ApiEndpointItem
from onegov.api.models import ApiException, ApiInvalidParamException
from onegov.core.utils import Bunch
def test_api_exceptions():
exception = ApiException()
assert exception.message == 'Internal Server Error'
assert exception.status_code == 500
exception = ApiException(exception=ValueError('foo'))
assert exception.message == 'Internal Server Error'
assert exception.status_code == 500
exception = ApiException(exception=ApiInvalidParamException('foo'))
assert exception.message == 'foo'
assert exception.status_code == 400
exception = ApiException(exception=ApiInvalidParamException('foo'),
status_code=299)
assert exception.message == 'foo'
assert exception.status_code == 400
exception = ApiException(
exception=ApiInvalidParamException('foo', status_code=300))
assert exception.message == 'foo'
assert exception.status_code == 300
exception = ApiInvalidParamException()
assert exception.message == 'Invalid Parameter'
assert exception.status_code == 400
exception = ApiInvalidParamException('Invalid Param x', status_code=99)
assert exception.message == 'Invalid Param x'
assert exception.status_code == 99
def test_api_endpoint_collection(app, endpoint_class):
collection = ApiEndpointCollection(app)
assert collection.endpoints == {'endpoint': endpoint_class}
def test_api_endpoint_item(app, endpoint_class):
item = ApiEndpointItem(app, 'endpoint', 1)
assert item.api_endpoint.__class__ == endpoint_class
assert item.item.id == 1
assert item.data == {'a': 1, 'title': 'First item'}
assert item.links == {'b': '2'}
def test_api_endpoint(app, endpoint_class):
# ... for_page
new = ApiEndpoint(app).for_page(None)
assert new.page is None
assert new.extra_parameters == {}
new = ApiEndpoint(app).for_page(1)
assert new.page == 1
assert new.extra_parameters == {}
new = ApiEndpoint(app).for_page('1')
assert new.page == 1
assert new.extra_parameters == {}
new = ApiEndpoint(app, {'a': 1}, 4).for_page(5)
assert new.page == 5
assert new.extra_parameters == {'a': 1}
new = ApiEndpoint(app).for_page(1).for_filter(a=1)
assert new.page is None
assert new.extra_parameters == {'a': 1}
# ... for_filter
new = ApiEndpoint(app).for_filter()
assert new.page is None
assert new.extra_parameters == {}
new = ApiEndpoint(app).for_filter(a=1)
assert new.page is None
assert new.extra_parameters == {'a': 1}
new = ApiEndpoint(app, {'a': 1}, 4).for_filter(b=2)
assert new.page is None
assert new.extra_parameters == {'b': 2}
new = ApiEndpoint(app).for_filter(a=1).for_filter(b=2)
assert new.page is None
assert new.extra_parameters == {'b': 2}
new = ApiEndpoint(app).for_filter(a=1).for_page(1)
assert new.page == 1
assert new.extra_parameters == {'a': 1}
# ... for_item
assert ApiEndpoint(app).for_item(None) is None
assert endpoint_class(app).for_item(Bunch(id=1)).id == '1'
assert endpoint_class(app).for_item(Bunch(id='1')).id == '1'
assert endpoint_class(app).for_item(Bunch(id=Bunch(hex='1'))).id == '1'
assert endpoint_class(app).for_item(Bunch(id=1)).endpoint == 'endpoint'
# ... get_filter
assert ApiEndpoint(app).get_filter('a') is None
assert ApiEndpoint(app, {'a': 1}).get_filter('a') == 1
# ... by_id
assert endpoint_class(app).by_id(1).id == 1
assert endpoint_class(app).by_id(2).id == 2
assert endpoint_class(app).by_id(3) is None
# .... item_data
assert endpoint_class(app).item_data(Bunch(title=1, a=2)) == {
'title': 1,
'a': 2
}
# .... item_links
assert endpoint_class(app).item_links(Bunch(b=2)) == {'b': 2}
# ... links
assert endpoint_class(app).links == {'next': None, 'prev': None}
endpoint = endpoint_class(app)
endpoint._collection.previous = Bunch(page=3)
endpoint._collection.next = Bunch(page=5)
assert endpoint.links['prev'].page == 3
assert endpoint.links['next'].page == 5
# ... batch
batch = endpoint_class(app).batch
assert {endpoint.id: item.title for endpoint, item in batch.items()} == {
'1': 'First item', '2': 'Second item'
}
|
996,454 | 1fe1b0d754768e407a92a7acd1832aa2863b1c30 | #!/usr/bin/env python
# coding: utf-8
'''
This is a simple arithmetic expression interpreter very much inspired
by Peter Norvig's lis.py [1]. It implements the arithmetic expression
subset of the language described in Chapter 1 of Samuel Kamin's book
Programming Languages book [2].
[1] http://norvig.com/lispy.html
[2] Samuel Kamin, "Programming Languages, An Interpreter-Based Approach",
Addison-Wesley, Reading, MA, 1990. ISBN 0-201-06824-9.
BNF of this mini-language:
<expression> ::= <integer>
| `(` <value-op> <expression>* `)`
<value-op> ::= `+` | `-` | `*` | `/` | `=` | `<` | `>`
<integer> ::= sequence of digits, possibly preceded by minus sign
'''
import operator as op
import re
REGEX_INTEGER = re.compile(r'-?\d+$')
class InterpreterError(Exception):
"""generic interpreter error"""
def __init__(self, value=None):
self.value = value
def __str__(self):
msg = self.__class__.__doc__
if self.value is not None:
return msg + ': ' + repr(self.value)
return msg
class InputError(InterpreterError):
"""generic parsing error"""
class UnexpectedEndOfInput(InputError):
"""unexpected end of input"""
class UnexpectedRightParen(InputError):
"""unexpected )"""
class EvaluationError(InterpreterError):
"""generic evaluation error"""
class InvalidOperator(EvaluationError):
"""invalid operator"""
class NullExpression(EvaluationError):
"""null expression"""
class MissingArguments(EvaluationError):
"""missing arguments"""
class TooManyArguments(EvaluationError):
"""too many arguments"""
def tokenize(source_code):
"""Convert a string into a list of tokens."""
return source_code.replace('(',' ( ').replace(')',' ) ').split()
def parse(source_code):
"""Convert a string into expressions represented as (nested) lists"""
tokens = tokenize(source_code)
return read(tokens)
def read(tokens):
"""Read tokens building recursively nested expressions"""
if len(tokens) == 0:
raise UnexpectedEndOfInput()
token = tokens.pop(0)
if token == '(':
parsed = []
if len(tokens) == 0:
raise UnexpectedEndOfInput()
while tokens[0] != ')':
parsed.append(read(tokens))
if len(tokens) == 0:
raise UnexpectedEndOfInput()
tokens.pop(0) # pop off ')'
return parsed
elif token == ')':
raise UnexpectedRightParen()
else:
return atom(token)
def atom(token):
"""Return numbers as numbers, everything else as symbols"""
if REGEX_INTEGER.match(token):
return int(token)
else:
return token
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.floordiv,
'=': lambda a, b: 1 if a == b else 0,
'<': lambda a, b: 1 if a < b else 0,
'>': lambda a, b: 1 if a > b else 0,
}
def evaluate(expression):
"""Calculate the value of an expression"""
if isinstance(expression, int):
return expression
elif isinstance(expression, str): # operator
try:
return operators[expression]
except KeyError:
raise InvalidOperator(expression)
else:
exps = [evaluate(exp) for exp in expression]
if len(exps) == 0:
raise NullExpression()
operator = exps.pop(0)
if callable(operator):
if len(exps) == 2:
arg1, arg2 = exps
return operator(arg1, arg2)
elif len(exps) < 2:
raise MissingArguments()
else:
raise TooManyArguments()
else:
raise InvalidOperator(operator)
def repl(prompt='> '):
"""A read-eval-print loop"""
while True:
try:
value = evaluate(parse(input(prompt)))
except (InterpreterError, ZeroDivisionError) as exc:
print('! ' + str(exc))
except KeyboardInterrupt:
print()
raise SystemExit
else:
print(value)
if __name__=='__main__':
repl()
|
996,455 | 8f92f40e89419ebb9c3de5c1cdd492d06ffd08d4 | import databases
import sqlalchemy
from fastapi_users.db import OrmarBaseUserModel, OrmarUserDatabase
from User.schemas import UserDB
DATABASE_URL = "sqlite:///sqlite2.db"
metadata = sqlalchemy.MetaData()
database = databases.Database(DATABASE_URL)
class UserModel(OrmarBaseUserModel):
class Meta:
tablename = "users_21"
metadata = metadata
database = database
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.create_all(engine)
def get_user_db():
yield OrmarUserDatabase(UserDB, UserModel) |
996,456 | 9a1319df4aee90eb1676d95a514c796aaa0d05eb | import json
import urllib2
from collections import namedtuple
def _json_object_hook(d): return namedtuple('X', d.keys())(*d.values())
def json2obj(data): return json.loads(data, object_hook=_json_object_hook)
def getJsonResponse(substr, page):
contents = urllib2.urlopen("https://jsonmock.hackerrank.com/api/movies/search/?Title=" + substr +"&page=" + str(page)).read()
return json2obj(contents)
def getMovieTitles(substr):
pageOne = getJsonResponse(substr, 1)
data = pageOne.data
titles=[]
for x in data:
titles.append(x.Title)
if pageOne.total_pages > 1:
for i in range(2, pageOne.total_pages+1):
extraPage = getJsonResponse(substr, i)
data = extraPage.data
for x in data:
titles.append(x.Title)
titles.sort()
return titles
#Enter the desired search term below
titles = getMovieTitles("spiderman")
for x in titles:
print x |
996,457 | f635d055c4febe42a130fc485493369a3a24a773 | /home/mohammed/anaconda3/lib/python3.7/rlcompleter.py |
996,458 | 2f4f16d577e41d4c9823496f5e8c6ef73224f26c | import logging, numpy, openravepy, time, math
from openravepy.databases import inversereachability
from openravepy import IkFilterOptions
#from openravepy.databases import inversereachability
class GraspPlanner(object):
def __init__(self, robot, base_planner, arm_planner):
self.robot = robot
self.env = self.robot.GetEnv()
self.manip = self.robot.GetActiveManipulator()
self.base_planner = base_planner
self.arm_planner = arm_planner
#self.task_manipulation = openravepy.interfaces.TaskManipulation(self.robot)
def GetBasePoseForObjectGrasp(self, obj):
# Load grasp database
self.gmodel = openravepy.databases.grasping.GraspingModel(self.robot, obj)
if not self.gmodel.load():
self.gmodel.autogenerate()
base_pose = None
grasp_config = None
###################################################################
# TODO: Here you will fill in the function to compute
# a base pose and associated grasp config for the
# grasping the bottle
###################################################################
#get the ordered valid grasp from homework1
print "robot start transformation -----------------"
print self.robot.GetTransform()
self.graspindices = self.gmodel.graspindices
self.grasps = self.gmodel.grasps
self.order_grasps()
# get the grasp transform
Tgrasp = self.gmodel.getGlobalGraspTransform(self.grasps_ordered[10],collisionfree=True)
# load inverserechability database
irmodel = openravepy.databases.inversereachability.InverseReachabilityModel(robot=self.robot)
starttime = time.time()
print 'loading irmodel'
if not irmodel.load():
irmodel.autogenerate()
loaded = irmodel.load()
print "irmodel loaded? {}".format(loaded)
densityfn,samplerfn,bounds = irmodel.computeBaseDistribution(Tgrasp)
#find the valid pose and joint states
# initialize sampling parameters
goals = []
numfailures = 0
N = 3
with self.robot.GetEnv():
while len(goals) < N:
poses,jointstate = samplerfn(N-len(goals))
for pose in poses:
self.robot.SetTransform(pose)
self.robot.SetDOFValues(*jointstate)
# validate that base is not in collision
if not self.manip.CheckIndependentCollision(openravepy.CollisionReport()):
q = self.manip.FindIKSolution(Tgrasp,filteroptions=IkFilterOptions.CheckEnvCollisions)
if q is not None:
values = self.robot.GetDOFValues()
values[self.manip.GetArmIndices()] = q
goals.append((Tgrasp,pose,values))
elif self.manip.FindIKSolution(Tgrasp,0) is None:
numfailures += 1
# To do still
#base_pose = goals[0][1]
#grasp_config = goals[0][2]
for i,goal in enumerate(goals):
grasp_with_pose,pose,values =goal
self.robot.SetTransform(pose)
self.robot.SetJointValues(values)
trans_pose = self.robot.GetTransform()
angle_pose = openravepy.axisAngleFromRotationMatrix(trans_pose)
pose = [trans_pose[0,3],trans_pose[1,3],angle_pose[2]]
base_pose = numpy.array(pose)
grasp_config = q
#import IPython
#IPython.embed()
print "grasping result"
print base_pose
print grasp_config
return base_pose, grasp_config
def PlanToGrasp(self, obj):
# Next select a pose for the base and an associated ik for the arm
base_pose, grasp_config = self.GetBasePoseForObjectGrasp(obj)
if base_pose is None or grasp_config is None:
print 'Failed to find solution'
exit()
# Now plan to the base pose
start_pose = numpy.array(self.base_planner.planning_env.herb.GetCurrentConfiguration())
base_plan = self.base_planner.Plan(start_pose, base_pose)
base_traj = self.base_planner.planning_env.herb.ConvertPlanToTrajectory(base_plan)
print 'Executing base trajectory'
self.base_planner.planning_env.herb.ExecuteTrajectory(base_traj)
# Now plan the arm to the grasp configuration
start_config = numpy.array(self.arm_planner.planning_env.herb.GetCurrentConfiguration())
arm_plan = self.arm_planner.Plan(start_config, grasp_config)
arm_traj = self.arm_planner.planning_env.herb.ConvertPlanToTrajectory(arm_plan)
print 'Executing arm trajectory'
print arm_traj
self.arm_planner.planning_env.herb.ExecuteTrajectory(arm_traj)
print "execute trajectory----------------"
# Grasp the bottle
task_manipulation = openravepy.interfaces.TaskManipulation(self.robot)
print "task manipulation---------------------"
task_manipulation.CloseFingers()
raw_input('')
print "close fingers"
#Code copied from hw1(the following two functions)
# order the grasps - call eval grasp on each, set the 'performance' index, and sort
def order_grasps(self):
self.grasps_ordered = self.grasps.copy() #you should change the order of self.grasps_ordered
for grasp in self.grasps_ordered:
grasp[self.graspindices.get('performance')] = self.eval_grasp(grasp)
# sort!
order = numpy.argsort(self.grasps_ordered[:,self.graspindices.get('performance')[0]])
order = order[::-1]
self.grasps_ordered = self.grasps_ordered[order]
def eval_grasp(self, grasp):
with self.robot:
#contacts is a 2d array, where contacts[i,0-2] are the positions of contact i and contacts[i,3-5] is the direction
try:
contacts,finalconfig,mindist,volume = self.gmodel.testGrasp(grasp=grasp,translate=True,forceclosure=False)
obj_position = self.gmodel.target.GetTransform()[0:3,3]
num_contacts = len(contacts)
# for each contact
G = numpy.zeros([6, num_contacts]) #the wrench matrix
for idx, c in enumerate(contacts):
pos = c[0:3] - obj_position
# print pos
dir = -c[3:] #this is already a unit vector
#TODO fill G
G[0:3,idx] = dir.T
G[3:6,idx] = numpy.cross(pos,dir).T
#TODO use G to compute scrores as discussed in class
U, s, V = numpy.linalg.svd(G, full_matrices=True)
# print U.shape, s.shape, V.shape
# Metric 1 minimum singular value
if s.all() >= 0:
m1 = numpy.amin(s)
else:
m1 = 0
# Metric 2: volume of the ellipsoid
if numpy.linalg.det(numpy.dot(G,G.T)) >= 0:
m2 = numpy.sqrt(numpy.linalg.det(numpy.dot(G,G.T)))
else:
m2 = 0;
#Metric 3: Isotropy
sigma_min = numpy.amin(s)
sigma_max = numpy.amax(s)
if sigma_max > 0:
m3 = sigma_min / sigma_max
else:
m3 = 0
# print U.shape, s.shape, V.shape
#Need to come up with weights for each of the metric for evaluation function
# print 'm1: ' + repr(m1) + '\nm2: ' + repr(m2) + '\nm3: ' + repr(m3)
# rationale, m1 and m3 are highly correlated so I bring them to about the same order of magnitude
# m2, is very small and boosted to about the same order of magnitude as well
if numpy.linalg.matrix_rank(G) == 6:
return 100*m1+50000*m2+1000*m3
else:
return 0
except openravepy.planning_error,e:
#you get here if there is a failure in planning
#example: if the hand is already intersecting the object at the initial position/orientation
return 0.00 # TODO you may want to change this
|
996,459 | 3100b8d145292509c26dca3093853b3442ff815d | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 13:30:03 2018
@author: hsseo
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from batch import *
from Network_D import *
# pylint: disable=missing-docstring
import argparse
import os.path
import os
import sys
import time
import math
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import scipy.io as sio
import numpy as np
#tf.device('/cpu:0'):
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
#os.environ["CUDA_lISIBLE_DEVICES"]="2,3"
image_size = 512
# Basic model parameters as external flags.
FLAGS = None
X = sio.loadmat('/raid/seo/CT/lung/TrainInput_l1.mat')
X1 = X['TrainInput_l1']
X_train_s = np.concatenate([X1],axis=0)
Y = sio.loadmat('/raid/seo/CT/lung/TrainOutput_l1.mat')
Y1 = Y['TrainOutput_l1']
Y_train_s = np.concatenate([Y1],axis=0)
X_t = sio.loadmat('/raid/seo/CT/lung/TestInput_l1.mat')
X_test = X_t['TestInput_l1']
Y_t = sio.loadmat('/raid/seo/CT/lung/TestOutput_l1.mat')
Y_test = Y_t['TestOutput_l1']
def placeholder_inputs(batch_size):
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,512,512))
labels_placeholder = tf.placeholder(tf.float32, shape=(batch_size,512,512))
return images_placeholder,labels_placeholder
def run_training():
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
phase_train = tf.placeholder(tf.bool, name='phase_train')
keep_prob = tf.placeholder(tf.float32)
# Build a Graph that computes predictions from the inference model.
logits = deepnn(images_placeholder, image_size, FLAGS.batch_size, keep_prob, phase_train)
alpha_p = tf.placeholder(tf.float32, shape=())
alpha_n = tf.placeholder(tf.float32, shape=())
beta_sq = tf.placeholder(tf.float32, shape=())
# Add to the Graph the Ops for loss calculation.
loss, updated_alpha_p, updated_alpha_n, updated_beta_sq = lossfn(logits, labels_placeholder,alpha_p, alpha_n, beta_sq)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = training(loss, FLAGS.learning_rate)
# calculate prediction error
#pred_err = prediction(logits, labels_placeholder, labels_mean, labels_std)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
#init = tf.initialize_all_variables
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
TRAIN_DATASIZE = X_train_s.shape[0]
batchtrain = Batchdata(np.arange(0,TRAIN_DATASIZE))
alpha_p_value = 1.0
alpha_n_value = 1.0
beta_sq_value = 1.0
for step in xrange(FLAGS.max_step):
start_time = time.time()
idxs = batchtrain.next_batch(FLAGS.batch_size) #shuffled ordering
batch_X = X_train_s[idxs,:,:]
batch_Y = Y_train_s[idxs,:,:]
feed_dict = {images_placeholder: batch_X, labels_placeholder: batch_Y, keep_prob: 0.6, phase_train: True, alpha_p: alpha_p_value, alpha_n: alpha_n_value, beta_sq: beta_sq_value}
_, loss_value, alpha_p_value, alpha_n_value, beta_sq_value = sess.run([train_op, loss, updated_alpha_p, updated_alpha_n, updated_beta_sq], feed_dict=feed_dict)
duration = time.time() - start_time
if (step + 1) % 5 == 0:
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print('%%%% save the model paramters ... ')
print('Step %d: loss = %.7f (%.3f sec)' % (step, loss_value, duration))
if (step + 1) % 5 == 0:
feed_dict = {images_placeholder: X_test, labels_placeholder: Y_test, keep_prob: 1.0, phase_train: False, alpha_p: alpha_p_value, alpha_n: alpha_n_value, beta_sq: beta_sq_value}
Test_results = sess.run([logits, updated_alpha_p, updated_alpha_n, updated_beta_sq], feed_dict=feed_dict)
loss_value = sess.run(loss, feed_dict=feed_dict)
print('alpha_p2_value %f' % (alpha_p_value))
print('alpha_n_value %f' % (alpha_n_value))
print('beta_sq_value %f' % (beta_sq_value))
sio.savemat('Test_results.mat', {'pred': Test_results})
################################################################
#####################run_kernel#################################
################################################################
def main(_):
tf.gfile.MakeDirs(FLAGS.log_dir)
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.00001,
help='Initial learning rate.'
)
parser.add_argument(
'--max_step',
type=int,
default=2000000000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--batch_size',
type=int,
default=10,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--log_dir',
type=str,
default='logs',
help='Directory to put the log data.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
996,460 | 35c413fabce495d1e4e2450c6a3805895dadd4eb | from flask import render_template, request
from ui import webapp
@webapp.route("/login")
def login_register():
return render_template("login.html")
@webapp.route('/login', methods=['POST'])
def login_attempt():
username = request.form.get("username")
password = request.form.get("password")
# TODO
# if users.authenticate(username, password):
# configure_user_session(username)
# return redirect(url_for("main"))
# else:
# return render_template("home.html", title="Welcome to Easy Text Recognition",
# error_msg="You have entered an incorrect password or username")
@webapp.route('/register', methods=['POST'])
def register_new_user():
username = request.form.get("username")
password = request.form.get("password")
# TODO
# if users.get_user(username) is not None:
# print("Failed to register - username is already taken!")
# return render_template("home.html", title="Welcome to Easy Text Recognition",
# error_msg="Selected username is already taken. Please choose a different username")
#
# if validator.registration(username, password) and users.create_new_user(username, password):
# configure_user_session(username)
# return redirect(url_for("main"))
# # , error_msg="Registration Successful!")
# # return render_template("registration_success.html", title="Registration Successful!")
# else:
# return render_template("home.html", title="Welcome to Easy Text Recognition",
# error_msg="Registration could not be completed at this time. Please try again later")
|
996,461 | 9b4eb876e4dbb27afc3fa40393fc227f77d1f5bb | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division
import unicodedata
from nltk import word_tokenize
import sys, re
import pandas as pd
import numpy as np
import json
import argparse
import json
import torch
'''
Change of this file.
1. simplify the preprocessing process
2. generate one sentence everytime.
3. remove the degree of lemmatize
4.
5. Down sampling the popular response.
'''
#text = clean_str(text.strip()) if clean else text.strip()
def tokenize_url(instring):
reg = re.compile(r'http[s]?:(//)?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', re.IGNORECASE)
reg1=re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?/[-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]*[^]'\\.}>\\),\\\"]")
reg2=re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?")
#reg3=re.compile("(~/|/|\\./)([-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]|\\\\)+")
reg4= re.compile("'\\<((mailto:)|)[-A-Za-z0-9\\.]+@[-A-Za-z0-9\\.]+")
instring = re.sub(reg, '_url_', instring)
instring = re.sub(reg1, '_url_', instring)
instring = re.sub(reg2, '_url_', instring)
#instring = re.sub(reg3, '_url_', instring)
#instring = re.sub(reg4, '_url_', instring)
return instring
def tokenize_email(instring):
reg = re.compile (r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)")
return re.sub(reg, '_email_', instring)
def tokenize_date(instring):
reg = re.compile(r'[0-9]+\/[0-9]+(\/\*\*\*\*|\/[0-9]+)?', re.IGNORECASE)
return re.sub(reg, '_date_', instring)
def tokenize_cost(instring):
outstring = instring
reg = re.compile(r'\$?[0-9]+(\.[0-9]+)?\/mo(nth)?', re.IGNORECASE)
outstring = re.sub(reg, '_cost_', outstring)
reg1 = re.compile(r'\$[0-9]+(\.[0-9]+)?')
return re.sub(reg1, '_cost_', outstring)
def tokenize_ispeed(instring):
reg = re.compile(r'[0-9]+(\.[0-9]+)? ?(mb(\/)?s|[km]bps)', re.IGNORECASE)
return re.sub(reg, '_ispeed_', instring)
def tokenize_phonenum(instring):
reg = re.compile (r'([0-9*]{3}[-\.\s]??[0-9*]{3}[-\.\s]??[0-9*]{4}|[0-9*]{1,2}[-\.\s]??[0-9*]{3}[-\.\s]??[0-9*]{3}[-\.\s]??[0-9*]{4})')
instring = re.sub(reg, '_phone_', instring)
reg1 = re.compile (r'([a-zA-Z0-9*]{3}[-][a-zA-Z0-9*]{3}[-][a-zA-Z0-9*]{4}|[0-9*]{1,2}[-][a-zA-Z0-9*]{3}[-][a-zA-Z0-9*]{3}[-][a-zA-Z0-9*]{4})')
instring = re.sub(reg1, '_phone_', instring)
reg2 = re.compile (r'(\([a-zA-Z0-9*]{3}\)\s*[a-zA-Z0-9*]{3}[-\.\s]??[a-zA-Z0-9*]{4}|[a-zA-Z0-9*]{3}[-\.\s]??[a-zA-Z0-9*]{4})')
#instring = re.sub(reg1, '_phone_', instring)
#reg = re.compile (r'[a-zA-Z0-9*]{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4})')
#reg = re.compile (r'(XXXXXX[0-9]{4})|(\\(XXXX\\)[0-9]{3}-[0-9]{4})|(XXXX [0-9]{3} [0-9]{4})')
return instring
rgx_DataVolume = re.compile ('[0-9]*gb', re.IGNORECASE)
rgx_Percentage = re.compile ('[0-9]+\.[0-9]*%')
rgx_Day = re.compile ('[0-9]?[0-9](st|nd|rd|th)', re.IGNORECASE)
#rgx_Month = re.compile ('(january)|(jan)|(february)|(feb)|(march)|(mar)|(april)|(apr)|(may)|(june)|(jun)|(july)|(jul)|(august)|(aug)|(september)|(sep)|(october)|(oct)|(november)|(nov)|(december)|(dec)', re.IGNORECASE)
rgx_Year = re.compile ('(19[0-9]{2})|(20[0-9]{2})')
rgx_Num = re.compile(r'[0-9]+(\.[0-9]+)?')
rgx_Accountnum = re.compile(r'[0-9*]{13,18}')
rgx_Time = re.compile(r'([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]')
# -----------------------------------------------------
def preclean_text (_text):
# nltk.word_tokenize doesn't seem to handle these correctly. ??
# Tokenization/string cleaning
_text = _text.replace ("I'm ", "I am ")
_text = re.sub(r"\'s", " \'s", _text)
_text = re.sub(r"\'ve", " \'ve", _text)
_text = re.sub(r"n\'t", " n\'t", _text)
_text = re.sub(r"\'re", " \'e", _text)
_text = re.sub(r"\'d", " \'d", _text)
_text = re.sub(r"\'ll", " \'ll", _text)
_text = re.sub(r"\s{2,}", " ", _text)
_text = re.sub(r"[-]{8,}", "--", _text)
_text = tokenize_url(_text)
_text = tokenize_email(_text)
_text = tokenize_date(_text)
_text = tokenize_cost(_text)
_text = tokenize_ispeed(_text)
_text = re.sub(rgx_Accountnum, "_account_", _text)
_text = tokenize_phonenum(_text)
_text = re.sub(rgx_DataVolume, "_data_", _text)
_text = re.sub(rgx_Time, "_time_", _text)
_text = re.sub(rgx_Percentage, "_percentage_", _text)
_text = re.sub(rgx_Day, "_day_", _text)
#_text = re.sub(rgx_Month, "_month_", _text)
_text = re.sub(rgx_Year, "_year_", _text)
_text = re.sub(rgx_Num, "_num_", _text)
_text = re.sub(r",", " , ", _text)
_text = re.sub(r"!", " ! ", _text)
_text = re.sub(r"\(", " \( ", _text)
_text = re.sub(r"\)", " \) ", _text)
_text = re.sub(r"\?", " \? ", _text)
_text = _text.replace ('.', ' . ')
#_text = _text.replace (',', ' , ')
_text = _text.replace (':', ' : ')
_text = _text.replace (';', ' ; ')
#_text = _text.replace ('?', ' ? ')
#_text = _text.replace ('!', ' ! ')
#_text = _text.replace ('(', ' ( ')
#_text = _text.replace (')', ' ) ')
_text = _text.replace ('"', ' " ')
_text = _text.replace ('[', ' [ ')
_text = _text.replace (']', ' ] ')
_text = _text.replace ('{', ' { ')
_text = _text.replace ('}', ' } ')
_text = _text.replace ('-', ' - ')
_text = _text.replace ('=', ' = ')
_text = _text.replace ('+', ' + ')
_text = _text.replace ('*', ' * ')
_text = _text.replace ('~', ' ~ ')
_text = _text.replace ('|', ' | ')
_text = _text.replace ('#', ' # ')
_text = _text.replace ('\\n', ' ')
_text = _text.replace ('\\', ' ')
_text = _text.replace ('…', ' ')
_text = _text.replace ('“', ' ')
_text = _text.replace ('”', ' ')
_text = _text.replace (',', ' , ')
#_text = _text.replace ('_', ' _ ')
_text = _text.replace ('#', ' # ')
'''
_text = _text.replace ('’', ' \' ')
_text = _text.replace ('\'', ' \' ')
'''
_text = re.sub(r"\s{2,}", " ", _text)
return _text.strip()
# -----------------------------------------------------
def load_conversations(csv_file='/D/data/autosuggest_data/cc/cc_20170204/'):
df = pd.read_csv(csv_file)
print('Finish reading csv file: {}.'.format(csv_file))
#df = df[["RowKey","eventflagfromrep","text"]]
df = df[[not x for x in df['isautogenerated']]]
df = df[["rowkey","eventflagfromrep","text"]]
df.columns = ["conversationid","eventflagfromrep","text"]
df = df[~pd.isnull(df.text)]
return df
def conversation_save(data1, file, args, MAX_wps = 50, MAX_turn =50, saving_starts_turnn=6 ):
conversation_begin_symbol = "__SOC__"
customer_begin_symbol = "<cus__"
customer_end_symbol = "__cus>"
agent_begin_symbol = "<agent__"
agent_end_symbol = "__agent>"
indicator = file.split('_')[0]
#fileout = args.dir+indicator+'_v'+args.version
#f_tgt = open(args.outdir +'/'+ 'tgt-'+indicator+'_v'+args.version+'.txt', 'w')
#f_src = open(args.outdir +'/'+ 'src-'+indicator+'_v'+args.version+'.txt', 'w')
conv_n = 0
pairn = 0
context_stats = []
Autt_stats = []
utter_n = 0
old_id = -1 #data1.iloc[0]['conversationid']
last_speaker = int(data1.iloc[0]['eventflagfromrep'])
context = []
conversation = []
replies = []
all_turn = []
turns = []
speaker = []
all_speaker = []
#with open('prob_dict.json', 'r') as f:
# prob_dict = json.load(f)
#print(prob_dict)
for i in range(len(data1)):
#print(i)
new_id = data1.iloc[i]['conversationid']
this_speaker = int(data1.iloc[i]['eventflagfromrep'])
text = data1.iloc[i]['text']
utt = preclean_text(text.lower()).split(' ')
if len(utt)> MAX_wps:
utt = utt[-MAX_wps:]
if new_id != old_id:
conv_n += 1
utter_n = 0
context=[]
turns = []
speaker = []
#context = conversation_begin_symbol
context.append(conversation_begin_symbol)
turns.append(utter_n)
speaker.append(this_speaker)
# customer is speaking
if this_speaker == False:
c_utt = [customer_begin_symbol] + utt + [customer_end_symbol]
#context = context + ' ' + c_utt
context.append(c_utt)
turns.append(utter_n)
speaker.append(this_speaker)
if this_speaker == True:
a_utt = [agent_begin_symbol] + utt + [agent_end_symbol]
if utter_n >= saving_starts_turnn:
'''
#'save the status'
if a_utt in prob_dict:
#print(a_utt)
p = prob_dict[a_utt]
if p<1 and np.random.binomial(1, p, 1) == 0:
#print(a_utt)
continue
'''
pairn +=1
Autt_stats.append(len(a_utt))
context_arr = [w for sent in context for w in sent]
context_stats.append(len(context_arr))
if len(context) > MAX_turn:
context = context[-MAX_turn:]
conversation.append(context)
all_turn.append(turns)
all_speaker.append(speaker)
replies.append(a_utt)
context.append(a_utt)
turns.append(utter_n)
speaker.append(this_speaker)
utter_n += 1
old_id = new_id
last_speaker = this_speaker
filename=args.outdir +'/'+ 'conv-'+indicator+'_v'+args.version+'.pt'
data = {'context':conversation, 'replies':replies, 'speaker':all_speaker, 'conv_turns':all_turn}
torch.save(data, filename)
filename2=args.outdir +'/'+ 'conv-'+indicator+'_v'+args.version+'_debug.pt'
data_debug = {'context':conversation[:200], 'replies':replies[:200], 'speaker':all_speaker[:200], 'conv_turns':all_turn[:200]}
torch.save(data_debug, filename2)
print('In total {} conversations'.format(conv_n))
print('Built {} seq-to-seq pairs'.format(pairn))
print('==simple data stats: ==')
print('Context: ')
hist, bin_edges = np.histogram(context_stats, bins=5)
print('length ranges of: ')
print(bin_edges)
print('Counts of context: ')
print(hist)
print('Replies: ')
hist, bin_edges = np.histogram(Autt_stats, bins=5)
print('length ranges of: ')
print(bin_edges)
print('Counts of replies: ')
print(hist)
print('File saved to: {}'.format(filename))
#return pairs
def main ():
parser = argparse.ArgumentParser(description='process from raw data for seq2seq training')
parser.add_argument('-indir', default='/D/home/lili/mnt/DATA/convaws/dialogue_csv', type=str, help='location of the file, e.g awsnas')
parser.add_argument('-outdir', default='/D/home/lili/mnt/DATA/convaws/convdata', type=str, help='location of the file, e.g awsnas')
parser.add_argument('--files', default=[], nargs='+', type=str, help='name of files to process')
parser.add_argument('--version', default='', type=str, help='version of the file')
args = parser.parse_args()
print(args.files)
for file in args.files:
filein = args.indir+'/'+str(file)
#indicator = file.split('_')[0]
#fileout = args.dir+indicator+'_v'+args.version
dff= load_conversations(filein)
conversation_save(dff, file, args)
if __name__ == '__main__':
sys.exit (main ())
|
996,462 | fb981aea379bca73f7dcf5f23ff75c964f8e49c0 | #!/usr/bin/env python
# coding=utf-8
import pytesseract
from PIL import Image
image=Image.open('./image.jpg')
vcode =pytesseract.image_to_string(image)
print (vcode)
|
996,463 | d0d65d2717f6ffeee17c6588f461b2fcf6798823 |
MAX = 10006
_data = [0] * MAX
pos = 0
def push(val):
global pos
if pos >= MAX:
return
_data[pos] = val
pos += 1
def pop():
global pos
if pos <= 0:
return -1
pos -= 1
val = _data[pos]
return val
def size():
return pos
def top():
if pos <= 0:
return -1
return _data[pos-1]
def empty():
if pos == 0:
return 1
return 0
n = int(input())
while n > 0:
string = input()
cmd = string.split()
if cmd[0] == 'push':
push(cmd[1])
elif cmd[0] == "top":
print(top())
elif cmd[0] == "empty":
print(empty())
elif cmd[0] == "pop":
print(pop())
elif cmd[0] == "size":
print(size())
n -= 1
|
996,464 | e32d7d60bdc5328090431478a62b73211aed4d00 | import os
from PIL import Image, ImageDraw, ImageFont
WATERMARK_POSITION = (
"top left",
"top right",
"center",
"bottom left",
"bottom right",
)
class CustomImage:
"""The CustomImage class implements the image watermark operation.
Attributes:
**image** *(Image)*: The image object from PIL.
**width** *(int)*: The width of the image.
**height** *(int)*: The height of the image.
**path** *(str)*: The path of the image.
**margin** *(int)*: The margin between the image border and the watermark.
**output_path** *(str)*: The path of the watermarked image.
"""
def __init__(self, path, margin=25, folder="output"):
"""The constructor of the custom image object.
:param path: The path of the image file.
:param margin: The margin between the image border and the watermark.
:param folder: The name of the output folder.
:type path: str
:type margin: int
:type folder: str
"""
self.image = Image.open(path)
self.width, self.height = self.image.size
self.path = path
self.margin = margin
self.output_path = os.path.join(os.path.dirname(self.path),
folder,
os.path.basename(self.path))
def watermark_text(self, text, color, font_type, font_size, pos_name):
"""Write text on the image.
:param text: The text to write on the image.
:param color: The color of the text.
:param font_type: The font type of the text.
:param font_size: The font size of the text.
:param pos_name: The position name of the text.
:type text: str
:type color: (int, int, int)
:type font_type: str
:type font_size: int
:type pos_name: str
:return: True if the path of the reduced image exists else False.
:rtype: bool
"""
image = Image.open(self.path)
drawing = ImageDraw.Draw(image)
text = text
font = ImageFont.truetype(font_type, font_size)
self.watermark_width, self.watermark_height = drawing.textsize(text, font)
pos = self.watermark_position(pos_name)
drawing.text(pos, text, fill=color, font=font)
parent_dir = os.path.dirname(self.output_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
image.save(self.output_path)
return os.path.exists(self.output_path)
def watermark_image(self, watermark_path, pos_name):
"""Add an image watermark on the image.
Supports only PNG and JPG files.
:param watermark_path: The path of the image watermark.
:param pos_name: The position name of the image watermark.
:type watermark_path: str
:type pos_name: str
:return: True if the path of the reduced image exists else False.
:rtype: bool
"""
image = Image.open(self.path)
watermark = Image.open(watermark_path)
self.watermark_width, self.watermark_height = watermark.size
pos = self.watermark_position(pos_name)
parent_dir = os.path.dirname(self.output_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
watermark_ext = os.path.splitext(watermark_path)[-1]
if watermark_ext in (".png", ".PNG"):
transparent = Image.new('RGBA', (self.width, self.height), (0, 0, 0, 0))
transparent.paste(image, (0, 0))
transparent.paste(watermark, pos, mask=watermark)
self.output_path = ".".join([os.path.splitext(self.output_path)[0], "png"])
transparent.save(self.output_path)
elif watermark_ext in (".jpg", ".JPG", ".jpeg", ".JPEG"):
image.paste(watermark, pos)
image.save(self.output_path)
return os.path.exists(self.output_path)
def watermark_position(self, pos_name):
if pos_name == "top left":
return self.margin, self.margin
if pos_name == "top right":
return self.width - self.margin - self.watermark_width, self.margin
if pos_name == "center":
return (round(self.width/2) - round(self.watermark_width/2),
round(self.height/2) - round(self.watermark_height/2))
if pos_name == "bottom left":
return self.margin, self.height - self.margin - self.watermark_height
if pos_name == "bottom right":
return self.width - self.margin - self.watermark_width, self.height - self.margin - self.watermark_height
if __name__ == '__main__':
img1 = CustomImage("F:/Workspaces/devenv/qt_for_python/source/_sample_images/bretagne-01.jpg")
# img1.watermark_text(text="My watermark",
# color=(0, 0, 0),
# font_type="C:/Windows/Fonts/arial.ttf",
# font_size=700,
# pos_name="center")
img1.watermark_image(watermark_path="F:/Workspaces/devenv/qt_for_python/source/_sample_images/python.png",
pos_name="center")
img2 = CustomImage("F:/Workspaces/devenv/qt_for_python/source/_sample_images/bretagne-02.jpg")
img2.watermark_image(watermark_path="F:/Workspaces/devenv/qt_for_python/source/_sample_images/python.jpg",
pos_name="top right")
|
996,465 | 6becc2073be8b0e8b40080f1d21d12a3159583a8 |
import os
import numpy as np
import pickle
import matplotlib.pyplot as plt
import matplotlib
def zero_pad(a, length):
z = np.zeros(length)
offset = len(z)//2 - len(a)//2
if offset < 0:
offset = 0
z[offset:offset+len(a)] = a
return z
def cached(cachefile):
"""
A function that creates a decorator which will use "cachefile" for caching the results of the decorated function "fn".
"""
def decorator(fn): # define a decorator for a function "fn"
def wrapped(*args, **kwargs): # define a wrapper that will finally call "fn" with all arguments
# if cache exists -> load it and return its content
if os.path.exists(cachefile):
with open(cachefile, 'rb') as cachehandle:
print("using cached result from '%s'" % cachefile)
return pickle.load(cachehandle)
# execute the function with all arguments passed
res = fn(*args, **kwargs)
# write to cache file
with open(cachefile, 'wb') as cachehandle:
print("saving result to cache '%s'" % cachefile)
pickle.dump(res, cachehandle)
return res
return wrapped
return decorator # return this "customized" decorator that uses "cachefile"
def plot_confusion_matrix_raw(cm, title="", path=None, fileName='confusion_matrix.png'):
colormap = "viridis"
sc = 1/np.sum(cm, axis=1)
cm_norm = sc[None].T * cm
plt.figure()
plt.title(title)
plt.imshow(cm_norm, cmap=colormap)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.colorbar()
plt.xticks(np.arange(len(cm_norm)))
plt.yticks(np.arange(len(cm_norm)))
cmap = matplotlib.cm.get_cmap(colormap)
for x in range(len(cm_norm)):
for y in range(len(cm_norm)):
plt.text(x, y, "{:.2f}".format(cm_norm[y, x]),
horizontalalignment='center',
verticalalignment='center',
fontsize=10,
c=cmap(1-np.round(cm_norm[y, x])))
|
996,466 | b35564543de5a9afcb9b650a03bb42d0e97a2cd1 | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
class ProtectedObjectsByEnv(object):
"""Implementation of the 'ProtectedObjectsByEnv' model.
Number of Protected Objects by Type.
Attributes:
env_type (string): Environment Type.
protected_count (int): Number of Protected Objects.
protected_size_bytes (long|int): Size of Protected Objects.
unprotected_count (int): Number of Unprotected Objects.
unprotected_size_bytes (long|int): Size of Unprotected Objects.
"""
# Create a mapping from Model property names to API property names
_names = {
"env_type":'envType',
"protected_count":'protectedCount',
"protected_size_bytes":'protectedSizeBytes',
"unprotected_count":'unprotectedCount',
"unprotected_size_bytes":'unprotectedSizeBytes',
}
def __init__(self,
env_type=None,
protected_count=None,
protected_size_bytes=None,
unprotected_count=None,
unprotected_size_bytes=None,
):
"""Constructor for the ProtectedObjectsByEnv class"""
# Initialize members of the class
self.env_type = env_type
self.protected_count = protected_count
self.protected_size_bytes = protected_size_bytes
self.unprotected_count = unprotected_count
self.unprotected_size_bytes = unprotected_size_bytes
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
env_type = dictionary.get('envType')
protected_count = dictionary.get('protectedCount')
protected_size_bytes = dictionary.get('protectedSizeBytes')
unprotected_count = dictionary.get('unprotectedCount')
unprotected_size_bytes = dictionary.get('unprotectedSizeBytes')
# Return an object of this model
return cls(
env_type,
protected_count,
protected_size_bytes,
unprotected_count,
unprotected_size_bytes
) |
996,467 | 074441104bddc0bfd06538fc0ea004af9ddba05d | from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import math as math
import random as rand
import os
import csv
rcParams.update({'figure.autolayout': True})
# Button palette
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86']
times_plot1, times_plot2 = [], []
V1_plot1, V2_plot1, V3_plot1, V4_plot1, V5_plot1 = [], [], [], [], []
V1_plot2, V2_plot2, V3_plot2, V4_plot2, V5_plot2 = [], [], [], [], []
Vth = 1
Vr = 0
fig, ax = plt.subplots(1, 2, figsize=(16,3.5), sharey='row')
with open('antiphase.dat', newline='') as file:
datareader = csv.reader(file, delimiter=' ')
for row in datareader:
if float(row[0]) <= 20 :
times_plot1.append(float(row[0]))
V1_plot1.append(float(row[1]))
V2_plot1.append(float(row[2]))
V3_plot1.append(float(row[3]))
V4_plot1.append(float(row[4]))
V5_plot1.append(float(row[5]))
if float(row[0]) >= 180 and float(row[0]) <= 200 :
times_plot2.append(float(row[0]))
V1_plot2.append(float(row[1]))
V2_plot2.append(float(row[2]))
V3_plot2.append(float(row[3]))
V4_plot2.append(float(row[4]))
V5_plot2.append(float(row[5]))
""" Plot 1 """
ax[0].plot(times_plot1, V1_plot1, alpha=0.75, color=c[0], linestyle='-', label='$V_1$')
ax[0].plot(times_plot1, V2_plot1, alpha=0.75, color=c[1], linestyle='-', label='$V_2$')
ax[0].plot(times_plot1, V3_plot1, alpha=0.75, color=c[2], linestyle='-', label='$V_3$')
ax[0].plot(times_plot1, V4_plot1, alpha=0.75, color=c[3], linestyle='-', label='$V_4$')
ax[0].plot(times_plot1, V5_plot1, alpha=0.75, color=c[4], linestyle='-', label='$V_5$')
# A spike occurs iff there was a reset
spike_times_V1 = [times_plot1[i] for i in range(1,len(V1_plot1)) if abs(V1_plot1[i]-V1_plot1[i-1]) > (Vth-Vr)/2]
spike_times_V2 = [times_plot1[i] for i in range(1,len(V2_plot1)) if abs(V2_plot1[i]-V2_plot1[i-1]) > (Vth-Vr)/2]
spike_times_V3 = [times_plot1[i] for i in range(1,len(V3_plot1)) if abs(V3_plot1[i]-V3_plot1[i-1]) > (Vth-Vr)/2]
spike_times_V4 = [times_plot1[i] for i in range(1,len(V4_plot1)) if abs(V4_plot1[i]-V4_plot1[i-1]) > (Vth-Vr)/2]
spike_times_V5 = [times_plot1[i] for i in range(1,len(V5_plot1)) if abs(V5_plot1[i]-V5_plot1[i-1]) > (Vth-Vr)/2]
for t in spike_times_V1:
ax[0].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[0])
for t in spike_times_V2:
ax[0].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[1])
for t in spike_times_V3:
ax[0].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[2])
for t in spike_times_V4:
ax[0].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[3])
for t in spike_times_V5:
ax[0].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[4])
""" Plot 2 """
ax[1].plot(times_plot2, V1_plot2, alpha=0.75, color=c[0], linestyle='-', label='$V_1$')
ax[1].plot(times_plot2, V2_plot2, alpha=0.75, color=c[1], linestyle='-', label='$V_2$')
ax[1].plot(times_plot2, V3_plot2, alpha=0.75, color=c[2], linestyle='-', label='$V_3$')
ax[1].plot(times_plot2, V4_plot2, alpha=0.75, color=c[3], linestyle='-', label='$V_4$')
ax[1].plot(times_plot2, V5_plot2, alpha=0.75, color=c[4], linestyle='-', label='$V_5$')
# A spike occurs iff there was a reset
spike_times_V1 = [times_plot2[i] for i in range(1,len(V1_plot2)) if abs(V1_plot2[i]-V1_plot2[i-1]) > (Vth-Vr)/2]
spike_times_V2 = [times_plot2[i] for i in range(1,len(V2_plot2)) if abs(V2_plot2[i]-V2_plot2[i-1]) > (Vth-Vr)/2]
spike_times_V3 = [times_plot2[i] for i in range(1,len(V3_plot2)) if abs(V3_plot2[i]-V3_plot2[i-1]) > (Vth-Vr)/2]
spike_times_V4 = [times_plot2[i] for i in range(1,len(V4_plot2)) if abs(V4_plot2[i]-V4_plot2[i-1]) > (Vth-Vr)/2]
spike_times_V5 = [times_plot2[i] for i in range(1,len(V5_plot2)) if abs(V5_plot2[i]-V5_plot2[i-1]) > (Vth-Vr)/2]
for t in spike_times_V1:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[0])
for t in spike_times_V2:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[1])
for t in spike_times_V3:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[2])
for t in spike_times_V4:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[3])
for t in spike_times_V5:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[4])
""" Figure Details """
ax[0].set_xlabel('Time ($10^{-2}$ seconds)', size=11)
ax[1].set_xlabel('Time ($10^{-2}$ seconds)', size=11)
ax[0].set_ylabel('Voltage $V_k, k \in \{1,..,5\}$', size=11)
fig.suptitle('Network of 5 electrically coupled neurons, $\\beta=0.1$ and $\gamma=0.1$', size=15)
ax[1].legend(loc='upper right') #bbox_to_anchor=(1, 1)
plt.tight_layout()
plt.savefig('5_neurons_anti.svg')
plt.show()
|
996,468 | c6c7b4e5ffc7955ad1ceb77f6c6e9be268af66b9 | import logging
import pytest
from kale.evaluate.uncertainty_metrics import evaluate_bounds, evaluate_jaccard
from kale.prepdata.tabular_transform import generate_struct_for_qbin
# from kale.utils.download import download_file_by_url
from kale.utils.seed import set_seed
# import os
LOGGER = logging.getLogger(__name__)
seed = 36
set_seed(seed)
ERRORS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UNCERTAINTIES = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
@pytest.fixture(scope="module")
def dummy_test_preds(landmark_uncertainty_tuples_path):
bins_all_targets, bins_targets_sep, bounds_all_targets, bounds_targets_sep = generate_struct_for_qbin(
["U-NET"], [0, 1], landmark_uncertainty_tuples_path[2], "SA"
)
return bins_all_targets, bounds_all_targets
class TestEvaluateJaccard:
# Using one uncertainty type, test numerous bins
@pytest.mark.parametrize("num_bins", [2, 3, 4, 5])
def test_one_uncertainty(self, dummy_test_preds, num_bins):
jacc_dict = evaluate_jaccard(
dummy_test_preds[0], [["S-MHA", "S-MHA Error", "S-MHA Uncertainty"]], num_bins, [0, 1], num_folds=8
)
all_jaccard_data = jacc_dict["Jaccard All"]
all_jaccard_bins_targets_sep = jacc_dict["Jaccard targets seperated"]
assert list(all_jaccard_data.keys()) == ["U-NET S-MHA"]
assert len(all_jaccard_data["U-NET S-MHA"]) == num_bins
assert list(all_jaccard_bins_targets_sep.keys()) == ["U-NET S-MHA"]
assert len(all_jaccard_bins_targets_sep["U-NET S-MHA"]) == num_bins
assert (
len(all_jaccard_bins_targets_sep["U-NET S-MHA"][0]) == 8 * 2
) # because each landmark has 8 folds - they are seperate
def test_one_fold(self, dummy_test_preds):
jacc_dict = evaluate_jaccard(
dummy_test_preds[0], [["S-MHA", "S-MHA Error", "S-MHA Uncertainty"]], 5, [0, 1], num_folds=1
)
all_jaccard_data = jacc_dict["Jaccard All"]
all_jaccard_bins_targets_sep = jacc_dict["Jaccard targets seperated"]
assert list(all_jaccard_data.keys()) == ["U-NET S-MHA"]
assert len(all_jaccard_data["U-NET S-MHA"]) == 5
assert list(all_jaccard_bins_targets_sep.keys()) == ["U-NET S-MHA"]
assert len(all_jaccard_bins_targets_sep["U-NET S-MHA"]) == 5
assert (
len(all_jaccard_bins_targets_sep["U-NET S-MHA"][0]) == 2
) # because each landmark has 1 folds - they are sep
def test_multiple_uncerts(self, dummy_test_preds):
jacc_dict = evaluate_jaccard(
dummy_test_preds[0],
[["S-MHA", "S-MHA Error", "S-MHA Uncertainty"], ["E-MHA", "E-MHA Error", "E-MHA Uncertainty"]],
5,
[0, 1],
num_folds=1,
)
all_jaccard_data = jacc_dict["Jaccard All"]
all_jaccard_bins_targets_sep = jacc_dict["Jaccard targets seperated"]
assert list(all_jaccard_data.keys()) == ["U-NET S-MHA", "U-NET E-MHA"]
assert len(all_jaccard_data["U-NET S-MHA"]) == len(all_jaccard_data["U-NET E-MHA"]) == 5
assert list(all_jaccard_bins_targets_sep.keys()) == ["U-NET S-MHA", "U-NET E-MHA"]
assert len(all_jaccard_bins_targets_sep["U-NET S-MHA"]) == len(all_jaccard_bins_targets_sep["U-NET E-MHA"]) == 5
assert (
len(all_jaccard_bins_targets_sep["U-NET S-MHA"][0])
== len(all_jaccard_bins_targets_sep["U-NET E-MHA"][0])
== 2
) # because each landmark has 8 folds - they are sep
class TestEvaluateBounds:
@pytest.mark.parametrize("num_bins", [2, 3, 4, 5])
def test_one_uncertainty(self, dummy_test_preds, num_bins):
bound_dict = evaluate_bounds(
dummy_test_preds[1],
dummy_test_preds[0],
[["S-MHA", "S-MHA Error", "S-MHA Uncertainty"]],
num_bins,
[0, 1],
num_folds=8,
)
all_bound_percents = bound_dict["Error Bounds All"]
all_bound_percents_notargetsep = bound_dict["all_bound_percents_notargetsep"]
assert list(all_bound_percents.keys()) == ["U-NET S-MHA"]
assert len(all_bound_percents["U-NET S-MHA"]) == num_bins
assert list(all_bound_percents_notargetsep.keys()) == ["U-NET S-MHA"]
assert len(all_bound_percents_notargetsep["U-NET S-MHA"]) == num_bins
assert (
len(all_bound_percents_notargetsep["U-NET S-MHA"][0]) == 8 * 2
) # because each landmark has 8 folds - they are seperate
def test_one_fold(self, dummy_test_preds):
bound_dict = evaluate_bounds(
dummy_test_preds[1],
dummy_test_preds[0],
[["S-MHA", "S-MHA Error", "S-MHA Uncertainty"]],
5,
[0, 1],
num_folds=1,
)
all_bound_percents = bound_dict["Error Bounds All"]
all_bound_percents_notargetsep = bound_dict["all_bound_percents_notargetsep"]
assert list(all_bound_percents.keys()) == ["U-NET S-MHA"]
assert len(all_bound_percents["U-NET S-MHA"]) == 5
assert list(all_bound_percents_notargetsep.keys()) == ["U-NET S-MHA"]
assert len(all_bound_percents_notargetsep["U-NET S-MHA"]) == 5
assert (
len(all_bound_percents_notargetsep["U-NET S-MHA"][0]) == 2
) # because each landmark has 1 folds - they are sep
def test_multiple_uncerts(self, dummy_test_preds):
bound_dict = evaluate_bounds(
dummy_test_preds[1],
dummy_test_preds[0],
[["S-MHA", "S-MHA Error", "S-MHA Uncertainty"], ["E-MHA", "E-MHA Error", "E-MHA Uncertainty"]],
5,
[0, 1],
num_folds=8,
)
all_bound_percents = bound_dict["Error Bounds All"]
all_bound_percents_notargetsep = bound_dict["all_bound_percents_notargetsep"]
assert list(all_bound_percents.keys()) == ["U-NET S-MHA", "U-NET E-MHA"]
assert len(all_bound_percents["U-NET S-MHA"]) == len(all_bound_percents["U-NET E-MHA"]) == 5
assert list(all_bound_percents_notargetsep.keys()) == ["U-NET S-MHA", "U-NET E-MHA"]
assert (
len(all_bound_percents_notargetsep["U-NET S-MHA"])
== len(all_bound_percents_notargetsep["U-NET E-MHA"])
== 5
)
assert (
len(all_bound_percents_notargetsep["U-NET S-MHA"][0])
== len(all_bound_percents_notargetsep["U-NET E-MHA"][0])
== 8 * 2
) # because each landmark has 8 folds - they are sep
|
996,469 | ca08a5357cd71b0a045c3abcce418950c921aa62 | # Functions for co-reference resolution
# Called by create_narrative_turtle.py
import uuid
import re
from typing import Union
import word2number as w2n
from dna.create_noun_turtle import create_noun_ttl
from dna.database import query_database
from dna.get_ontology_mapping import get_agent_or_loc_class, get_noun_mapping
from dna.nlp import get_head_word
from dna.queries import query_specific_noun
from dna.utilities_and_language_specific import dna_prefix, empty_string, family_members, female_titles, \
male_titles, names_to_geo_dict, ontologies_database, owl_thing2, personal_pronouns, space, underscore
def _account_for_cardinal_noun(elem_dict: dict, phrase: str, cardinal: str, alet_dict: dict, last_nouns: list,
last_events: list, turtle: list, ext_sources: bool) -> tuple:
"""
Get the semantics/mapping for the object of a prepositional phrase associated with a cardinal
number, such as "one of the band".
:param elem_dict: The dictionary (holding the details for the noun/verb containing the
cardinal text)
:param phrase: The full text of the noun phrase
:param cardinal: The text of the cardinal
:param alet_dict: A dictionary holding the agents, locations, events & times encountered in
the full narrative - For co-reference resolution; Keys = 'agents', 'locs', 'events',
'times' and Values vary by the key
:param last_nouns: An array of tuples of noun texts, types, class mappings and IRIs,
found in the narrative
:param last_events: An array of verb texts, mappings and IRIs from the current paragraph
:param turtle: A list of Turtle statements which will be updated in this function if a new
noun is found
:param ext_sources: A boolean indicating that data from GeoNames, Wikidata, etc. should be
added to the parse results if available
:return: A tuple of the resulting noun phrase's text, spaCy type, mappings and IRI; Also,
the Turtle is likely updated
"""
# noinspection PyBroadException
try:
card_number = w2n.word_to_num(cardinal)
except Exception:
card_number = 1 # TODO: Improve default
# Evaluate the object of any preposition related to the cardinal; Future: Need to handle > 1 prep?
prep_text = elem_dict['preps'][0]['prep_details'][0]['detail_text']
prep_type = elem_dict['preps'][0]['prep_details'][0]['detail_type']
if card_number < 2: # Account for the cardinality
prep_type = prep_type.replace('PLURAL', 'SING')
# Get the noun info for the prepositional object
prep_ttl = []
# check_nouns result = an array of tuples of the noun's texts, types, mappings and IRIs
result = check_nouns({'objects': [{'object_text': prep_text, 'object_type': prep_type}]}, 'objects',
alet_dict, last_nouns, last_events, prep_ttl, ext_sources)[0] # Should only need 1
# Adjust the label to reflect the text with the cardinal, and add the stmts to the current elem_dict's Turtle
for ttl_stmt in prep_ttl:
if 'rdfs:label' in ttl_stmt:
turtle.append(f'{ttl_stmt.split(" rdfs:label")[0]} rdfs:label "{phrase}" .')
else:
turtle.append(ttl_stmt)
return phrase, prep_type, result[2], result[3] # Return the mapping and IRI
def _check_alet_dict(text: str, text_type: str, alet_dict: dict, last_nouns: list) -> (list, str):
"""
Get the most likely co-reference for the text using the alet_dict details to
resolve co-references/anaphora. Subject/object information (the noun, and its types
and IRI) is returned.
The acronym, alet, stands for agent-location-event-time.
:param text: String holding the noun text
:param text_type: String holding the noun type (such as 'FEMALESINGPERSON')
:param alet_dict: A dictionary holding the agents, locations, events & times encountered in
the full narrative - For co-reference resolution; Keys = 'agents', 'locs', 'events',
'times' and Values vary by the key
:param last_nouns: An array of tuples of noun texts, types, mappings and IRI, from the narrative
:return: A tuple that consists of the matched noun's class mappings and IRI, or an empty array and string
"""
agent_match = [] # Match of text and type
agent_text_match = [] # Match of text only, not type
loc_text_match = []
event_text_match = []
if not text_type or 'PERSON' in text_type or text_type.endswith('ORG') or \
text_type.endswith('GPE') or text_type.endswith('NORP') or text_type.endswith('NOUN'):
agent_arrays = alet_dict['agents'] if 'agents' in alet_dict else []
for agent_array in agent_arrays:
alt_names = agent_array[0]
agent_type = agent_array[1]
if text not in personal_pronouns and text in alt_names:
if text_type and (text_type in agent_type or agent_type in text_type):
agent_match.append((agent_type, agent_array[2])) # index 2 holds the IRI
break
else:
agent_text_match.append((agent_type, agent_array[2]))
if not text_type or 'LOC' in text_type or 'GPE' in text_type or 'FAC' in text_type or 'NOUN' in text_type:
loc_arrays = alet_dict['locs'] if 'locs' in alet_dict else []
for loc_array in loc_arrays:
alt_names = loc_array[0]
loc_map = loc_array[1]
if text in alt_names:
loc_text_match.append((loc_map, loc_array[2])) # index 2 holds the IRI
if not text_type or 'EVENT' in text_type or 'NOUN' in text_type:
event_arrays = alet_dict['events'] if 'events' in alet_dict else []
for event_array in event_arrays:
alt_names = event_array[0]
if text in alt_names:
# event_array[1] holds the class mappings and [2] holds the IRI
event_text_match.append((event_array[1], event_array[2]))
return (_update_last_nouns(text, agent_match[-1][0], agent_match[-1][1], [get_agent_or_loc_class(text_type)],
last_nouns) if agent_match
else (_update_last_nouns(text, agent_text_match[-1][0], agent_text_match[-1][1],
[get_agent_or_loc_class(text_type)], last_nouns) if agent_text_match
else (_update_last_nouns(text, text_type, loc_text_match[-1][1], loc_text_match[-1][0], last_nouns)
if loc_text_match
else (_update_last_nouns(text, text_type, event_text_match[-1][1], event_text_match[-1][0],
last_nouns) if event_text_match else [], empty_string))))
def _check_criteria(text: str, last_nouns: list, looking_for_singular: Union[bool, None],
looking_for_female: Union[bool, None], looking_for_person: bool) -> list:
"""
Checks the values of the nouns in the last_nouns array for matches of the specified
gender/number criteria.
:param text: A string with the noun text
:param last_nouns: A list of noun texts, types, class mappings and IRIs, from the narrative
:param looking_for_singular: Boolean indicating that a singular noun is needed
:param looking_for_female: Boolean indicating that a female gender noun is needed
:param looking_for_person: Boolean indicating that a 'matched' noun should be a person
:return: Array of tuples of texts, types, class_mappings and IRIs of already processed nouns that
match the criteria; Note that an array is returned to support matching the pronouns 'they'/'them'
"""
poss_nouns = []
alt_nouns = [] # Fallback nouns that do not exactly match the criteria but are 'last resort'
for noun_tuple in reversed(last_nouns):
noun_text, noun_type, noun_mapping, noun_iri = noun_tuple
if noun_text == 'new_line': # new_line marks a paragraph boundary; Keep going if no match has been found
if poss_nouns or alt_nouns:
break
else:
continue
# Pronoun text already lower case, but may be called with other text such as 'Her father'
if text not in personal_pronouns and (text.lower() not in noun_text.lower() or
noun_text.lower() not in text.lower()):
continue # First match the text if not a pronoun; If no match, skip the rest of the criteria
if (looking_for_person and 'PERSON' not in noun_type) or \
(not looking_for_person and 'PERSON' in noun_type):
continue
# Check number
found_number = False
if looking_for_singular is None or (looking_for_singular and 'SING' in noun_type) or \
(not looking_for_singular and 'PLURAL' in noun_type):
found_number = True
found_gender = False
if looking_for_female is None or (looking_for_female and 'FEMALE' in noun_type) or \
(not looking_for_female and 'FEMALE' not in noun_type and 'MALE' in noun_type):
found_gender = True
# Check criteria
if found_gender and found_number:
poss_nouns.append(noun_tuple)
elif found_gender or found_number:
alt_nouns.append(noun_tuple)
if poss_nouns:
return [poss_nouns[0]] if looking_for_singular else poss_nouns
elif alt_nouns:
return [alt_nouns[0]] if looking_for_singular else alt_nouns
return []
def _check_last_nouns(text: str, text_type: str, last_nouns: list) -> list:
"""
Get the most likely co-reference for the noun text using the last_nouns details.
Subject/object information (the noun, and its type, mapping and IRI) is returned.
:param text: String holding the noun text
:param text_type: String holding the noun type (such as 'FEMALESINGPERSON')
:param last_nouns: An array of tuples of noun texts, types, mappings and IRI, from the narrative
:return: A tuple that is the 'matched' noun mapping and IRI, or two empty strings (if
no match is found)
"""
looking_for_female = True if 'FEMALE' in text_type else (False if 'MALE' in text_type else None)
looking_for_singular = False if 'PLURAL' in text_type else (True if 'SING' in text_type else None)
looking_for_person = True if 'PERSON' in text_type else False
match_nouns = _check_criteria(text, last_nouns, looking_for_singular, looking_for_female, looking_for_person)
final_nouns = []
for match_noun in match_nouns:
# Don't need the noun texts or entity types (those are used for pronouns)
final_nouns.append((match_noun[2], match_noun[3]))
return final_nouns
def _check_personal_pronouns(pronoun: str, last_nouns: list) -> list:
"""
Get the most likely co-reference(s) for the pronoun using the last_nouns details.
Subject/object information (the noun, and its types, mappings and IRI) is returned.
:param pronoun: String holding the pronoun text
:param last_nouns: An array of tuples of noun texts, types, mappings and IRI, from the narrative
:return: Array of tuples of text, spaCy type, class_mappings and IRIs of already processed nouns that
match the criteria; Note that an array is returned to support matching the pronouns 'they'/'them'
"""
pronoun_details = []
pronoun_lower = pronoun.lower()
if pronoun == 'I' or pronoun_lower in ('me', 'myself', 'my'):
pronoun_details.append(('Narrator', 'SINGPERSON', ':Person', ':Narrator'))
elif pronoun_lower in ('we', 'us', 'ourselves', 'our'):
# Find singular or plural person nouns (any gender)
pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, None, None, True))
pronoun_details.append(('Narrator', 'SINGPERSON', ':Person', ':Narrator'))
elif pronoun_lower in ('they', 'them', 'themselves', 'their'):
# Give preference to persons (any gender or number)
noun_list = _check_criteria(pronoun_lower, last_nouns, None, None, True)
if noun_list:
pronoun_details.extend(noun_list)
else:
# Check for non-persons
pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, None, None, False))
elif pronoun_lower in ('she', 'herself', 'her'):
# Find singular, feminine, person nouns
pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, True, True))
elif pronoun_lower in ('he', 'himself', 'him'):
# Find singular, masculine, person nouns
pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, False, True))
elif pronoun_lower in ('it', 'itself', 'its'):
# Find singular, non-person nouns (no gender)
pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, None, False))
final_details = [] # May be duplicates in the list due to duplicates in last_nouns
for pronoun_detail in pronoun_details:
if pronoun_detail in final_details:
continue
final_details.append(pronoun_detail)
return final_details
def _process_family_role(head_text: str, full_text: str, person_type: str, alet_dict: dict) -> tuple:
"""
Return the noun information for individual(s) in a family role.
:param head_text: String holding the full text's head word's text
:param full_text: String holding the full text
:param person_type: String holding the noun type from spaCy (such as 'FEMALESINGPERSON')
:param alet_dict: A dictionary holding the agents, locations, events & times encountered in
the full narrative; Keys = 'agents', 'locs', 'events' and 'times; Only concerned with
the values for 'agents' in this function (which are an array of arrays with index 0
holding an array of labels associated with the agent (variations on their name), index 1
storing the agent's entity type and index 2 storing the agent's IRI
:return: A tuple holding the text, spaCy type, class_mappings and an IRI for family members
in the specified role
"""
if 'agents' not in alet_dict: # Nothing to check
return tuple()
role_matches = []
if head_text in family_members.keys(): # Looking for singular family role
for alet in alet_dict['agents']:
alet_names, alet_type, alet_iri = alet
if f'_{head_text}' in alet_iri:
role_matches.append((full_text, person_type, [':Person'], alet_iri))
if len(role_matches) == 1:
return role_matches[0]
# TODO: Handle family role plurals
# No match or multiple matches found
return tuple()
def _remove_title_from_name(titles: tuple, text: str) -> str:
"""
Check for a male/female title (such as 'Ms' or 'Mr') in the noun string, and if present, remove it.
:param titles: Tuple of male or female titles
:param text: String holding the noun text
:return: The updated text with the title removed (if present) or the original text
"""
for title in titles:
if f'{title}.' in text:
return text.replace(f'{title}.', empty_string).replace(' ', space).strip()
elif title in text:
return text.replace(title, empty_string).replace(' ', space).strip()
return text
def _separate_possessives(text: str) -> (dict, str):
"""
If a noun text contains a possessive reference, separate it out, since proper noun processing will
override the details of the noun.
:param text: String holding the noun text
:return: A tuple with a dictionary holding the noun to which a possessive is a modifier (the noun
is the key, and the possessive is the value; this is a dictionary since a clause may have more
than 1 possessive), and a string with the possessive(s) removed from the noun text
"""
possessive_dict = dict() # Dictionary of nouns (keys) with their possessive modifiers (values)
revised_words = []
if '/poss/' in text:
space_splits = text.split()
for index in range(0, len(space_splits)):
if '/poss/' in space_splits[index]:
possessive_dict[space_splits[index + 1]] = space_splits[index].replace('/poss/', empty_string)
else:
revised_words.append(space_splits[index])
return possessive_dict, space.join(revised_words)
return possessive_dict, text
def _update_last_nouns(text: str, text_type: str, text_iri: str, class_maps: list, last_nouns: list) -> (list, str):
"""
Update the last_nouns array and return the class mappings and IRI.
:param text: String holding the noun text
:param text_type: String holding the noun type (such as 'FEMALESINGPERSON')
:param text_iri: String holding the noun IRI
:param class_maps: An array of strings holding the mapping(s) to the DNA ontology for the text
:param last_nouns: An array of tuples of noun texts, types, mappings and IRI, from the narrative
:return: A tuple that consists of the matched noun's class mappings and IRI, or an empty array and string
"""
last_nouns.append((text, text_type, class_maps, text_iri))
return class_maps, text_iri
def check_event(text: str, last_events: list) -> (list, str):
"""
Get a possible verb/event mapping for the noun and check it against any events
(from the current paragraph) that have a type = mapping.
:param text: The text which is possibly mapped to an event
:param last_events: The list/array of tuples defining event types and IRIs from the
current paragraph
:return: A tuple specifying the event class mappings and IRI if there is a type match,
or an empty list and string otherwise
"""
# Get the event class to which the noun may be mapped
ontol_classes, noun_ttl = get_noun_mapping(text, empty_string, False)
if not ontol_classes:
return [], empty_string
poss_events = []
for event_type, event_iri in last_events:
if event_type in ontol_classes:
poss_events.append(event_iri)
if poss_events:
return ontol_classes, poss_events[-1]
return [], empty_string
def check_nouns(elem_dictionary: dict, key: str, alet_dict: dict, last_nouns: list,
last_events: list, turtle: list, ext_sources: bool) -> list:
"""
Get the subject or object nouns (as indicated by the key input parameter) in the dictionary,
using last_nouns, alet_dict and last_events details to attempt to resolve co-references/anaphora.
Subject/object information (the nouns and their types and IRIs) is returned.
The order of checking for a match is last_nouns, alet_dict and then last_events. If there are no
matches, a new noun is created and added to either last_nouns or last_events.
The acronym, alet, stands for agent-location-event-time.
For example, consider the sentence/chunk "She was sickly." following "Mary was born on June 12,
1972, in Znojmo, Czechia." If the function parameters are (chunk_dictionary, 'subjects',
alet_dict, last_events), then the tuple, 'Mary', 'FEMALESINGPERSON' and ':Mary' will be returned
since 'she' should be resolved to Mary.
:param elem_dictionary: The dictionary (holding the details for the noun text and type from the spaCy parse)
:param key: Either 'subjects' or 'objects'
:param alet_dict: A dictionary holding the agents, locations, events & times encountered in
the full narrative - For co-reference resolution; Keys = 'agents', 'locs', 'events',
'times' and Values vary by the key
:param last_nouns: An array of tuples of noun texts, types, class mappings and IRIs,
found in the narrative
:param last_events: An array of verb texts, mappings and IRIs from the current paragraph
:param turtle: A list of Turtle statements which will be updated in this function if a new
noun is found
:param ext_sources: A boolean indicating that data from GeoNames, Wikidata, etc. should be
added to the parse results if available
:return: An array of tuples of the noun's texts, types, mappings and IRIs (also,
the last_nouns and last_events arrays may be updated)
"""
nouns = []
for elem in elem_dictionary[key]: # The subject or object nouns
elem_key = key[0:-1] # Create dictionary key = 'subject' or 'object'
elem_type = elem[f'{elem_key}_type']
elem_text = elem[f'{elem_key}_text']
# Get rid of titles (such as Ms, Miss, Mr, ...)
if 'FEMALE' in elem_type:
elem_text = _remove_title_from_name(female_titles, elem_text)
elif 'MALE' in elem_type:
elem_text = _remove_title_from_name(male_titles, elem_text)
head_lemma, head_text = get_head_word(elem_text)
# poss_dict = Dictionary of nouns (keys) with their possessive modifiers (values)
# Revised elem_text = noun text with possessives removed
poss_dict, elem_text = _separate_possessives(elem_text)
new_tuple = tuple()
possible_name = empty_string # For a proper name, may contain shortened form = given + surname (any order)
if elem_type == 'CARDINAL': # For example, 'one' in 'he has one' or in 'one of the band'
if 'preps' in elem:
new_tuple = _account_for_cardinal_noun(elem, elem_text, head_lemma,
alet_dict, last_nouns, last_events, turtle, ext_sources)
else:
iri = re.sub(r'[^:a-zA-Z0-9_]', '_', f':{elem_text}_{str(uuid.uuid4())[:13]}').replace('__', '_')
new_tuple = (elem_text, 'CARDINAL', [owl_thing2], iri)
turtle.extend([f'{iri} a owl:Thing .',
f'{iri} rdfs:label "{elem_text}" .'])
elif elem_text.lower() in personal_pronouns:
# Array of tuples of matched text, type, mappings and IRIs
new_tuples = _check_personal_pronouns(elem_text, last_nouns)
nouns.extend(new_tuples)
last_nouns.extend(new_tuples)
continue # More than 1 new tuple, so handled specifically in this code block; No need to 'drop through'
# Not a pronoun; Check for a match in instances of the ontology
elif ('PERSON' in elem_type or elem_type.endswith('GPE') or
elem_type.endswith('ORG') or elem_type.endswith('NORP')):
if space in head_lemma:
# Get last two words in the name (for given+surname or surname+given name, Eastern or Western ordering)
names = head_lemma.split(space)
possible_name = f'{names[-2]} {names[-1]}'
match_iri, match_type = check_specific_match(head_lemma, elem_type)
if not match_iri and possible_name:
match_iri, match_type = check_specific_match(possible_name, elem_type)
if match_iri:
new_tuple = (elem_text, elem_type, match_type, match_iri)
else:
# Check for family role and match to a name
new_tuple = _process_family_role(head_text, elem_text, elem_type, alet_dict)
if not new_tuple:
# No match - Try to match text and type in last_nouns
match_noun_tuples = _check_last_nouns(elem_text, elem_type, last_nouns)
if match_noun_tuples:
new_tuple = (elem_text, elem_type, match_noun_tuples[0][0], match_noun_tuples[0][1])
elif possible_name:
# Also check given + surname
match_noun_tuples = _check_last_nouns(possible_name, elem_type, last_nouns)
if match_noun_tuples:
new_tuple = (possible_name, elem_type, match_noun_tuples[0][0], match_noun_tuples[0][1])
if not new_tuple:
# No match - Try to match text and type in alet_dict
match_maps, match_iri = _check_alet_dict(elem_text, elem_type, alet_dict, last_nouns) # Updates last nouns
if match_iri:
new_tuple = (elem_text, elem_type, match_maps, match_iri)
elif possible_name:
# Also check given + surname
match_maps, match_iri = _check_alet_dict(possible_name, elem_type, alet_dict, last_nouns)
if match_iri:
new_tuple = (possible_name, elem_type, match_maps, match_iri)
if not new_tuple:
# No match - Check if the noun is aligned with an event that has already been described
event_classes, event_iri = check_event(elem_text, last_events)
if event_iri:
new_tuple = (elem_text, elem_type, event_classes, event_iri)
if not new_tuple:
# No match - Create new entity
iri = re.sub(r'[^:a-zA-Z0-9_]', underscore, f':{elem_text.lower()}_{str(uuid.uuid4())[:13]}').\
replace('__', '_')
noun_mappings, noun_turtle = create_noun_ttl(iri, elem_text, elem_type, alet_dict, ext_sources)
new_tuple = (elem_text, elem_type, noun_mappings, iri)
turtle.extend(noun_turtle)
nouns.append(new_tuple)
last_nouns.append(new_tuple)
return nouns
def check_specific_match(noun: str, noun_type: str) -> (str, str):
"""
Checks if the concept/Agent/Location/... is already defined in the DNA ontologies.
:param noun: String holding the text to be matched
:param noun_type: String holding the noun type (PERSON/GPE/LOC/...) from spacy's NER
:return: A tuple consisting of the matched IRI and its class mapping (if a match is found),
or two empty strings
"""
if noun_type.endswith('GPE') and noun in names_to_geo_dict:
return f'geo:{names_to_geo_dict[noun]}', ':Country'
class_type = get_agent_or_loc_class(noun_type).replace('+:Collection', empty_string) # PLURAL details ignored here
match_details = query_database(
'select', query_specific_noun.replace('keyword', noun).replace('class_type', class_type), ontologies_database)
if len(match_details) > 0:
return match_details[0]['iri']['value'].replace(dna_prefix, ':'), match_details[0]['type']['value']
return empty_string, empty_string
|
996,470 | b78520ac7a60efc06f5b38a47f7449df544b109e | import os
Import('env lib')
# boost libraries may be named differently
BOOST_LIBS = ['boost_system','boost_date_time','boost_program_options','boost_filesystem']
if hasattr(os,'uname') and os.uname()[0] == 'Darwin':
BOOST_LIBS = [x + "-mt" for x in BOOST_LIBS]
# clone environment and add libraries for modules
menv = env.Clone()
menv.Append(CPPPATH=['#'],
LIBS=['jack','samplerate','hdf5','hdf5_hl','sndfile','zmq','pthread'] + BOOST_LIBS,
)
programs = {'jdelay' : ['jdelay.cc'],
'jdetect' : ['jdetect.cc'],
'jstim' : ['jstim.cc'],
'jrecord' : ['jrecord.cc'],
'jclicker' : ['jclicker.cc'],
'jmonitor' : ['monitor_client.c'],
'jfilter' : ['jfilter.cc'],
'jflip' : ['jflip.cc']
}
out = []
for progname,srcs in programs.items():
prog = menv.Program(progname,srcs+[lib])
menv.Alias(progname,prog)
out.append(prog)
env.Alias('install', env.Install(env['BINDIR'],prog))
env.Alias('modules',out)
|
996,471 | c02da3824e59a4e3b6590e0150e83cade351b2d8 | '''
desispec.image
==============
Lightweight wrapper class for preprocessed image data.
'''
import copy
import numpy as np
from desispec.maskbits import ccdmask
from desispec import util
class Image(object):
def __init__(self, pix, ivar, mask=None, readnoise=0.0, camera='unknown',
meta=None):
"""
Create Image object
Args:
pix : 2D numpy.ndarray of image pixels
Optional:
ivar : inverse variance of pix, same shape as pix
mask : 0 is good, non-0 is bad; default is (ivar==0)
readnoise : CCD readout noise in electrons/pixel (float)
camera : e.g. 'b0', 'r1', 'z9'
meta : dict-like metadata key/values, e.g. from FITS header
"""
if pix.ndim != 2:
raise ValueError('pix must be 2D, not {}D'.format(pix.ndim))
if pix.shape != ivar.shape:
raise ValueError('pix.shape{} != ivar.shape{}'.format(pix.shape, ivar.shape))
if (mask is not None) and (pix.shape != mask.shape):
raise ValueError('pix.shape{} != mask.shape{}'.format(pix.shape, mask.shape))
self.pix = pix
self.ivar = ivar
self.meta = meta
if mask is not None:
self.mask = util.mask32(mask)
else:
self.mask = np.zeros(self.ivar.shape, dtype=np.uint32)
self.mask[self.ivar == 0] |= ccdmask.BAD
#- Optional parameters
self.readnoise = readnoise
self.camera = camera
#- Allow image slicing
def __getitem__(self, xyslice):
#- Slices must be a slice object, or a tuple of (slice, slice)
if isinstance(xyslice, slice):
pass #- valid slice
elif isinstance(xyslice, tuple):
#- tuples of (slice, slice) are valid
if len(xyslice) > 2:
raise ValueError('Must slice in 1D or 2D, not {}D'.format(len(xyslice)))
else:
if not isinstance(xyslice[0], slice) or \
not isinstance(xyslice[1], slice):
raise ValueError('Invalid slice for Image objects')
else:
raise ValueError('Invalid slice for Image objects')
pix = self.pix[xyslice]
ivar = self.ivar[xyslice]
mask = self.mask[xyslice]
meta = copy.copy(self.meta)
if np.isscalar(self.readnoise):
readnoise = self.readnoise
else:
readnoise = self.readnoise[xyslice]
#- NAXIS1 = x, NAXIS2 = y; python slices[y,x] = [NAXIS2, NAXIS1]
if meta is not None and (('NAXIS1' in meta) or ('NAXIS2' in meta)):
#- image[a:b] instead of image[a:b, c:d]
if isinstance(xyslice, slice):
ny = xyslice.stop - xyslice.start
meta['NAXIS2'] = ny
else:
slicey, slicex = xyslice
#- slices ranges could be None if using : instead of a:b
if (slicex.stop is not None):
nx = slicex.stop - slicex.start
meta['NAXIS1'] = nx
if (slicey.stop is not None):
ny = slicey.stop - slicey.start
meta['NAXIS2'] = ny
return Image(pix, ivar, mask, \
readnoise=readnoise, camera=self.camera, meta=meta)
|
996,472 | d8e412371adb810ddc618ecefac431303ea629a4 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 21:35:05 2020
@author: VICTOR
"""
import numpy as np
from numpy import *
import matplotlib.pyplot as plt
from scipy import stats
import pandas as pd
from pandas import DataFrame, Series
import seaborn as sns; sns.set()
from numpy import random
df = random.rand(1000)
figure = sns.distplot(df,bins = 10).get_figure()
figure.savefig('Data Visualization 5')
plt.show()
figure1 = sns.distplot(df,hist = True, bins = 10, rug=True,
rug_kws={'color':'blue', 'label':'Rug Plot'},
hist_kws={'color':'red', 'label':'Hist Plot'},
kde_kws= {'color':'green', 'label':'KDE Plot'}
).get_figure() # You will observe that kde and hist plots are by default enabled in distplot and can only be excluded using False in the statement.
figure.savefig('Data Visualization 5_1')
plt.show()
df2 = random.rand(1000)
sns.boxplot(df2).get_figure().savefig('BoxPlot1.png')
plt.show()
#You can use orient function here: ie orient ='v' for vertical and orient='h' for horizontal
sns.boxplot(df2, whis= np.inf, color ='yellow', order = 8).get_figure().savefig('BoxPlot2.png')
plt.show()
# Boxplot is usually used in stock market analysis or business analysis.
# The left-half of the box plot is the 25th percentile, the middle line is the 50th percentile, and the right- half of the plot is the 75th percentile.
# You can do well to look on the internet for the working principle of all the plot you want to use.
'''Violin Plots (Combination of box plot and KDE plot)'''
df3 = random.rand(100)
sns.violinplot(df3).get_figure().savefig('Violin Plot 1')
plt.show()
'''Changing bandwith'''
sns.violinplot(df3,bw= 0.2, color= 'red').get_figure().savefig('Violin Plot 2')
# You will observe a distortion in the plot.
plt.show()
sns.violinplot(df3, color='green',inner= 'stick').get_figure().savefig('Violin Plot 3')
# The thick line parts are the concentrated parts.
plt.show()
'''Heatmaps visualization'''
df4= pd.read_csv("C:\\Users\\VICTOR\\Documents\\Programming\\Python Programming\\FlightData.csv")
print(df4)
df5 = ([df4['DISTANCE'],df4['ACTUAL_ELAPSED_TIME']])
sns.heatmap(df5).get_figure().savefig('Heat Map 1.png')
plt.show()
'''Using annotation function'''
sns.heatmap(df5,fmt='d').get_figure().savefig('Heat Map 2.png')
plt.show()
# fmt function can take 'd' or 'c'. That is, diverging or converging. Here, we used diverging.
#Functions you can use with heatmap
'''data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, linewidths, linecolor, cbar, cbar_kws, cbar_ax, square, xticklabels, yticklabels, mask, ax, **kwargs'''
'''You can use the center function by:
center = df5.loc[1995,'January']. I didn't implement that here because i'm not using a real example just a modified one.'''
|
996,473 | 063f977797be18455cb3cebeb060eeea8182ba74 | from __future__ import absolute_import
from celery import Celery
from converter.corelib import YoutubeConverter,DownloadAudioInfoDTO
from converter.coreconfig import CONFIG
from celery import shared_task
# TODO : nofify that youtube conversion is success or fail to client
@shared_task
def convert_youtube_video(video_url):
conv = YoutubeConverter(CONFIG['path'])
result = conv.convert_youtube(video_url)
if "video_id" in result:
download_dto = DownloadAudioInfoDTO()
download_dto.insert(result["video_id"],result) |
996,474 | 8502ea2c4c640f1455ee1edd6be4acb3b5bb222c | #! /usr/bin/env python
import pymesh, argparse
import numpy as np
import fix_mesh as FM
from circ_helix import create_helix
def parse_args():
parser = argparse.ArgumentParser(description='Create a sphere with circular-section helix')
parser.add_argument('--out', help='output file name', type=str, required=True)
parser.add_argument('--pitch', type=float, default = 2.5)
parser.add_argument('--height', type=float, default = 10.0)
parser.add_argument('--nL', type=float, default = 64)
parser.add_argument('--nR', type=float, default = 6)
parser.add_argument('--radius', type=float, default = 1.0)
parser.add_argument('--smallRadiusStart', type=float, default = 0.7)
parser.add_argument('--smallRadiusEnd', type=float, default = 0.7)
parser.add_argument('--sphereRadius', type=float, default = 1.5)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
Hmesh, _ = create_helix(args.nL, args.height, args.radius, args.pitch,
args.nR, args.smallRadiusStart, args.smallRadiusEnd)
R = args.sphereRadius
H = args.height
Smesh = pymesh.generate_icosphere(R, [0.0, 0.0, H/2], refinement_order=2)
mesh = pymesh.boolean(Hmesh, Smesh,
operation="union",
engine="igl")
mesh = pymesh.subdivide(mesh, order=2, method="loop")
#mesh = FM.fix_mesh(mesh, "low")
mesh = FM.fix_mesh_target_length(mesh, 0.1)
pymesh.save_mesh(args.out, mesh)
|
996,475 | 311e6600189e4e01a9418cd830629d8aa2cc3634 | import sys
import time
import datetime
import exceptions
from mongosync.config import Config
from mongosync.logger import Logger
from mongosync.mongo_utils import get_optime, gen_namespace
from mongosync.optime_logger import OptimeLogger
try:
import gevent
except ImportError:
pass
log = Logger.get()
class Synchronizer(object):
""" Common synchronizer.
Other synchronizer entities should implement methods:
- __init__
- __del__
- _sync_database
- _sync_collection
- _sync_oplog
"""
def __init__(self, conf):
if not isinstance(conf, Config):
raise Exception('invalid config type')
self._conf = conf
self._ignore_dbs = ['admin', 'local']
self._ignore_colls = ['system.indexes', 'system.profile', 'system.users']
if conf.optime_logfilepath:
self._optime_logger = OptimeLogger(conf.optime_logfilepath)
else:
self._optime_logger = None
self._optime_log_interval = 10 # default 10s
self._last_optime = None # optime of the last oplog has been replayed
self._last_optime_logtime = time.time()
self._log_interval = 2 # default 2s
self._last_logtime = time.time() # use in oplog replay
@property
def from_to(self):
return "%s => %s" % (self._conf.src_hostportstr, self._conf.dst_hostportstr)
@property
def log_interval(self):
return self._log_interval
@log_interval.setter
def log_interval(self, n_secs):
if n_secs < 0:
n_secs = 0
self._log_interval = n_secs
def run(self):
""" Start to sync.
"""
# never drop database automatically
# you should clear the databases manually if necessary
try:
self._sync()
except exceptions.KeyboardInterrupt:
log.info('keyboard interrupt')
def _sync(self):
""" Sync databases and oplog.
"""
if self._conf.start_optime:
# TODO optimize
log.info("locating oplog, it will take a while")
oplog_start = self._conf.start_optime
doc = self._src.client()['local']['oplog.rs'].find_one({'ts': {'$gte': oplog_start}})
if not doc:
log.error('no oplogs newer than the specified oplog')
return
oplog_start = doc['ts']
log.info('start timestamp is %s actually' % oplog_start)
self._last_optime = oplog_start
self._sync_oplog(oplog_start)
else:
oplog_start = get_optime(self._src.client())
if not oplog_start:
log.error('get oplog_start failed, terminate')
sys.exit(1)
self._last_optime = oplog_start
self._sync_databases()
if self._optime_logger:
self._optime_logger.write(oplog_start)
log.info('first %s' % oplog_start)
self._sync_oplog(oplog_start)
def _sync_databases(self):
""" Sync databases excluding 'admin' and 'local'.
"""
host, port = self._src.client().address
log.info('sync databases from %s:%d' % (host, port))
for dbname in self._src.client().database_names():
if dbname in self._ignore_dbs:
log.info("skip database '%s'" % dbname)
continue
if not self._conf.data_filter.valid_db(dbname):
log.info("skip database '%s'" % dbname)
continue
self._sync_database(dbname)
log.info('all databases done')
def _sync_database(self, dbname):
""" Sync a database.
"""
raise Exception('you should implement %s.%s' % (self.__class__.__name__, self._sync_database.__name__))
def _sync_collections(self, dbname):
""" Sync collections in the database excluding system collections.
"""
collnames = self._src.client()[dbname].collection_names(include_system_collections=False)
for collname in collnames:
if collname in self._ignore_colls:
log.info("skip collection '%s'" % gen_namespace(dbname, collname))
continue
if not self._conf.data_filter.valid_coll(dbname, collname):
log.info("skip collection '%s'" % gen_namespace(dbname, collname))
continue
self._sync_collection(dbname, collname)
def _sync_collection(self, dbname, collname):
""" Sync a collection until success.
"""
raise Exception('you should implement %s.%s' % (self.__class__.__name__, self._sync_collection.__name__))
def _sync_oplog(self, oplog_start):
""" Replay oplog.
"""
raise Exception('you should implement %s.%s' % (self.__class__.__name__, self._sync_oplog.__name__))
def _log_progress(self, tag=''):
""" Print progress.
"""
now = time.time()
if now - self._last_logtime >= self._log_interval:
delay = now - self._last_optime.time
time_unit = 'second' if delay <= 1 else 'seconds'
if tag:
log.info('%s - sync to %s - %d %s delay - %s - %s' % (self.from_to,
datetime.datetime.fromtimestamp(self._last_optime.time),
delay,
time_unit,
self._last_optime,
tag))
else:
log.info('%s - sync to %s - %d %s delay - %s' % (self.from_to,
datetime.datetime.fromtimestamp(self._last_optime.time),
delay,
time_unit,
self._last_optime))
self._last_logtime = now
def _log_optime(self, optime):
""" Record optime.
"""
if not self._optime_logger:
return
now = time.time()
if now - self._last_optime_logtime >= self._optime_log_interval:
self._optime_logger.write(optime)
self._last_optime_logtime = now
log.info("flush optime into file '%s': %s" % (self._optime_logger.filepath, optime))
|
996,476 | b5099c8b63fd47b996313000a01ac833522fe0a6 | from django import forms
from .models import project, teams
from pagedown.widgets import PagedownWidget
class projectForm(forms.ModelForm):
project_description = forms.CharField(widget=PagedownWidget)
class Meta:
model = project
fields = [
'team_name',
'project_title',
'project_description',
'source_code',
'webpage',
'image',
'developing_or_developed',
'key',
]
class teamsForm(forms.ModelForm):
class Meta:
model = teams
fields = [
'Team_Name',
'Name',
'Facebook_Profile_Link',
'Github_Profile_Link',
'Linkedin_Profile_Link',
'Image',
'Key',
]
class edit(forms.ModelForm):
class Meta:
model = project
fields = [
'key',
]
class editForm(forms.ModelForm):
project_description = forms.CharField(widget=PagedownWidget)
class Meta:
model = project
fields = [
'team_name',
'project_title',
'project_description',
'source_code',
'webpage',
'image',
'developing_or_developed',
] |
996,477 | 1fdef670b7f8ddd4f0cc48e88018855c1f4c3a48 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#iterative
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
stack=[root, ]
res=[]
while stack:
print(stack)
root=stack.pop()
if root is not None:
res.append(root.val)
if(root.right is not None):
stack.append(root.right)
if(root.left is not None):
stack.append(root.left)
return res
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#recursive
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
def preorder(root,res):
if not root:
return
res.append(root.val)
preorder(root.left,res)
preorder(root.right,res)
return res
res=[]
res=preorder(root,res)
return res
|
996,478 | 536ff6e0e97fc690bcaf3a9a411f32a72900f129 | """
@Project :data_visualization
@File :mpl_squares.py
@Description:绘制简单的折线图
@Author :Life
@Date :2021/4/18 14:43
"""
import matplotlib.pyplot as plt
input_values = [1, 2, 3, 4, 5]
squares = [1, 4, 9, 16, 25]
print(squares[-1])
# 同时传入横纵坐标
plt.plot(input_values, squares, linewidth=3)
# 设置图像的标签和横纵坐标
plt.title("square of numbers", fontsize=24)
plt.xlabel("x", fontsize=14)
plt.ylabel("square", fontsize=14)
# 设置刻度大小
plt.tick_params(axis="both", labelsize=14)
plt.show()
|
996,479 | 6eaf399ac2fe9461dea1aa94eb80556918a7ec34 | #!/usr/bin/python3.5
from shapedetector import ShapeDetector
import argparse
import imutils
import numpy as np
import cv2
import matplotlib.pyplot as plt
from time import sleep
def auto_canny(image, sigma=0.95):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
if __name__ == "__main__":
video = cv2.VideoCapture('Video_sample.mp4')
while(video.isOpened()):
ret, img = video.read()
mask = np.zeros_like(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
thresh = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)[1]
edge = auto_canny(thresh)
(_, cnts, _) = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
if M['m00'] != 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
area = cv2.contourArea(c)
if (32<area) & (area < 40960):
print("(%3d, %3d): %3d"%(cx, cy, area))
cv2.circle(img,(cx,cy), 1, (0,0,255), 6)
location = '('+str(cx)+', '+str(cy)+')'
cv2.putText(img,location,(cx,cy),cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 2)
cv2.drawContours(img, cnts, -1, (0, 255, 0), 2)
cv2.imshow("thresh", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows() |
996,480 | 7d5b2788c4546ea8b198cbdd50d9eef279dcca25 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'Maillage et Éléments Finis'
copyright = '2020, Bertrand Thierry'
author = 'Bertrand Thierry'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinxcontrib.proof",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
numfig = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = "Maillage et Éléments Finis"
proof_theorem_types = {
"algorithm": "Algorithm",
"conjecture": "Conjecture",
"corollary": "Corollary",
"definition": "Definition",
"example": "Example",
"lemma": "Lemma",
"observation": "Observation",
"proof": "Proof",
"property": "Property",
"theorem": "Theorem",
"remark":"Remarque",
"proposition":"Proposition",
"exercise":"Exercice",
}
proof_latex_notheorem = ["proof"]
#proof_html_nonumbers = ["exercise"]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.2/css/all.min.css',
'css/proof.css',
'css/custom.css',
]
html_js_files = [
'js/proof.js',
# 'js/custom.js',
# 'js/basis-function/main.js',
# 'js/jacobian/main.js',
# 'js/loc2glob/main.js',
# 'https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js',
]
# Additional stuff for the LaTeX preamble.
latex_engine = 'lualatex'
latex_elements = {}
latex_elements['preamble'] = '\\usepackage{amsmath}\n\\usepackage{amssymb}\n\\usepackage{amsthm}\n'
latex_elements['babel'] = '\\usepackage{babel}'
latex_additional_files = ['mystyle.sty', 'img/normal/normal.tex']
latex_elements['extrapackages'] = '\\usepackage{tikz}\n\\usetikzlibrary{arrows, calc, fit}\n\\usepackage{standalone}\n\\usepackage{mathrsfs}\n\\usepackage{mystyle}'
latex_toplevel_sectioning = "part"
# copy to mathjax
mathjax3_config = {
"tex": {
"macros": {
'dsp' : '{\\displaystyle}',
'gD': '{g_D}',
'gN': '{g_N}',
'GammaN': '{\\Gamma_N}',
'GammaD': '{\\Gamma_D}',
'GammaF':'\Gamma_F',
'Lo':'{L^2(\\Omega)}',
'Ho':'{H^1(\\Omega)}',
'Hoz':'{H^1_{0}(\\Omega)}',
'HoD':'{H^1_{\\GammaD}(\\Omega)}',
'Hog':'{H^1_{\\gD}(\\Omega)}',
'Hoo':'{H^2(\\Omega)}',
'Vh': '{V_h}',
'Vhz': '{V_{h,0}}',
'VhD': '{V_{h,\GammaD}}',
'abs': ['{\\left|#1\\right|}',1],
'norm': ['{\\left\\|#1\\right\\|}',1],
'PS': ['{\\left(#1,#2\\right)}',2],
'PSL': ['{\\PS{#1}{#2}_{\Lo}}',2],
'PSLd': ['{\\PS{#1}{#2}_{\Lo^d}}',2],
'PSH': ['{\\PS{#1}{#2}_{\Ho}}',2],
'PSV': ['{\\PS{#1}{#2}_{V}}',2],
'normL': ['{\\norm{#1}_{\Lo}}',1],
'normLd': ['{\\norm{#1}_{(\Lo)^d}}',1],
'normH': ['{\\norm{#1}_{\Ho}}',1],
'normV': ['{\\norm{#1}_{V}}',1],
'ut': '{u_t}',
'uh': '{u_h}',
'vh': '{v_h}',
'Ahh': '{A}',
'Bh': '{B}',
'Uh': '{U}',
'Rb': '{\\mathbb{R}}',
'Nb': '{\\mathbb{N}}',
'nn': '{\\mathbf{n}}',
'dn': '{\\partial_{\\nn}}',
'ee': '{\\mathbf{e}}',
'xx': '{\\mathbf{x}}',
'yy': '{\\mathbf{y}}',
'zz': '{\\mathbf{z}}',
'diff': '{\\mathrm{d}}',
'Cscr': '{\\mathscr{C}}',
'Ccal': '{\\mathcal{C}}',
'mphi': '{\\varphi}',
'mphih': '{\\widehat{\\varphi}}',
'psih': '{\\widehat{\\psi}}',
'gh': '{\\widehat{g}}',
'deltaij': '{\\delta_{ij}}',
'tri': '{K}',
'trih': '{\\widehat{K}}',
'vertice': '{\\mathbf{s}}',
'verticeK': ['{\\vertice^{#1}}', 1],
'verticeh': '{\\widehat{\\vertice}}',
'grandO': ['{O\\left(#1\\right)}', 1],
'Nh':'{N_h}',
'Ns':'{N_s}',
'Nt':'{N_t}',
'Pb':'{\mathbb{P}}',
'Sh':'{\mathscr{S}_h}',
'Th':'{\mathscr{T}_h}',
'Ah':'{\mathscr{A}_h}',
'card':'{\\textrm{card}}',
'supp': '{\\textrm{supp}}',
'diam': '{\\textrm{diam}}',
'Image': '{\\textrm{Im}}',
'locToGlob':'{\\texttt{L2G}}',
'trihToTri':['{T_{#1}}',1],
'JK':['{J_{#1}}',1],
'BK':['{B_{#1}}',1],
'Meh':'{\\widehat{M}^e}',
'Deh':'{\\widehat{D}^e}',
'Me':['{M^e_{#1}}', 1],
'De':['{D^e_{#1}}', 1],
'enstq':['{\\left\\{#1 \\mathrel{}\\middle|\\mathrel{}#2\\right\\}}',2]
}
}
}
|
996,481 | bb0c5155111e0c6ad0be0dcc95af68e9fde7e24b | from django.test import TestCase
from django.urls import reverse
from .models import State
STATES = ['AK','AL','AR','AZ','BI','CA','CO','CT','DC','DE','FL','GA','HI','IA','ID','IL','IN','KS','KY','LA','MA','MD','ME','MI','MN','MO','MS','MT','NC','ND','NE','NH','NJ','NM','NV','NY','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VA','VT','WA','WI','WV','WY']
def create_null_states():
"""
Create data for the State database.
"""
for s in STATES:
State.objects.create(state=s,all_cohort=1,mam_cohort=0,mas_cohort=0,
mbl_cohort=0,mhi_cohort=0,mtr_cohort=0,mwh_cohort=0,cwd_cohort=0,
ecd_cohort=0,lep_cohort=0,num_schools=0,all_rate=0,mam_rate=None,
mas_rate=None,mbl_rate=None,mhi_rate=None,mtr_rate=None,mwh_rate=None,
cwd_rate=None,ecd_rate=None,lep_rate=None)
def create_states():
"""
Create data for the State database.
"""
for s in STATES:
State.objects.create(state=s,all_cohort=1000,mam_cohort=100,mas_cohort=100,
mbl_cohort=100,mhi_cohort=100,mtr_cohort=100,mwh_cohort=100,cwd_cohort=100,
ecd_cohort=100,lep_cohort=100,num_schools=10,all_rate=0.9,mam_rate=0.9,
mas_rate=0.9,mbl_rate=0.9,mhi_rate=0.9,mtr_rate=0.9,mwh_rate=0.9,
cwd_rate=0.9,ecd_rate=0.9,lep_rate=0.9)
class EducationIndexViewTests(TestCase):
def test_no_data(self):
"""
If no data is in the database, no content is displayed but there is no
map of graudation rates.
"""
response = self.client.get(reverse('education:index'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context.get("json_data"), None)
self.assertContains(response, "High School Graduation")
self.assertContains(response, "How Rates Were Calculated")
self.assertContains(response, "Home")
self.assertNotContains(response, '<svg id="graduation_rate_map"')
def test_with_data(self):
"""
If state data is in the database, make sure the conents renders and a
graph of the graudation rates is displayed.
"""
create_states()
response = self.client.get(reverse('education:index'))
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.context.get("json_data"), None)
self.assertContains(response, "High School Graduation")
self.assertContains(response, "How Rates Were Calculated")
self.assertContains(response, '<svg id="graduation_rate_map"')
class EducationStatesViewTest(TestCase):
def test_no_data(self):
"""
Make sure the page renders and gives an error message if no data is available.
"""
response = self.client.get(reverse('education:states'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context.get("states").count(), 0)
self.assertContains(response, "No Data Available")
self.assertNotContains(response, "Number of Public High Schools")
def test_with_data(self):
"""
Make sure page renders when state database is filled.
"""
create_states()
response = self.client.get(reverse('education:states'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context.get("states").count(), 52)
self.assertContains(response, "Home")
self.assertNotContains(response, "No Data Available")
self.assertContains(response, "Number of Public High Schools")
self.assertContains(response, "Mississippi")
self.assertContains(response, "90.0%")
self.assertContains(response, "1,000")
self.assertContains(response, "10")
class EducationStateDetailsViewTest(TestCase):
def test_no_data(self):
"""
Make sure each state page renders if there is no database data.
"""
for s in STATES:
response = self.client.get(reverse('education:state_detail',args=(s,)))
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.context.get("message"), None)
self.assertContains(response, "Error: No data for state {}".format(s))
def test_with_null_data(self):
"""
Make sure each state page renders if there is data in the database.
"""
create_null_states()
for s in STATES:
response = self.client.get(reverse('education:state_detail',args=(s,)))
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.context.get("data"), None)
self.assertNotEqual(response.context.get("json_data"), None)
self.assertContains(response, "Students in 15-16 Cohort")
self.assertNotContains(response, ">Native American</a></td>")
def test_with_data(self):
"""
Make sure each page renders if there is non-null data in the databasese
"""
create_states()
for s in STATES:
response = self.client.get(reverse('education:state_detail',args=(s,)))
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.context.get("data"), None)
self.assertNotEqual(response.context.get("json_data"), None)
self.assertContains(response, "Students in 15-16 Cohort")
self.assertContains(response, ">Native American</a></td>")
class EducationDemographicsViewTest(TestCase):
def test_no_data(self):
"""
Make sure demographics page renders even if there is no data in the
database.
"""
response = self.client.get(reverse('education:demographics'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context.get("json_data"), None)
self.assertEqual(response.context.get("all_cohort"), None)
self.assertEqual(response.context.get("all_rate"), None)
for demo in State.GROUP_NAMES:
self.assertEqual(response.context.get(demo+"_cohort"), None)
self.assertEqual(response.context.get(demo+"_rate"), None)
self.assertContains(response, "Home")
self.assertContains(response, "No Data Available")
self.assertNotContains(response, "Students in 15-16 Cohort")
def test_with_data(self):
"""
Make sure demographics page renders if there is data in the database.
"""
create_states()
response = self.client.get(reverse('education:demographics'))
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.context.get("json_data"), None)
self.assertNotEqual(response.context.get("all_cohort"), None)
self.assertNotEqual(response.context.get("all_rate"), None)
for demo in State.GROUP_NAMES:
self.assertNotEqual(response.context.get(demo+"_cohort"), None)
self.assertNotEqual(response.context.get(demo+"_rate"), None)
self.assertContains(response, "Home")
self.assertContains(response, "Students in 15-16 Cohort")
self.assertNotContains(response, "No Data Available")
class EducationDemographicDetailsViewTest(TestCase):
def test_fake_group(self):
"""
Make sure the page gives an error message if a group is specified that
does not actually exist.
"""
response = self.client.get(reverse('education:demographic_detail',args=("XYZ",)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context.get("json_rate_data"), None)
self.assertNotEqual(response.context.get("message"), None)
self.assertContains(response, "Home")
self.assertContains(response, "Error: No such group XYZ")
self.assertNotContains(response, '<svg id="popsvg"')
def test_no_data(self):
"""
Make sure all demographic pages render even when there is no data in the
database.
"""
for demo in State.GROUP_NAMES:
response = self.client.get(reverse('education:demographic_detail',args=(demo,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context.get("json_rate_data"), None)
self.assertNotEqual(response.context.get("message"), None)
self.assertContains(response, "Home")
self.assertContains(response, "No Data Available")
self.assertNotContains(response, '<svg id="popsvg"')
def test_with_data(self):
"""
Make sure all demographic pages render if there is data in the database.
"""
create_states()
for demo in State.GROUP_NAMES:
response = self.client.get(reverse('education:demographic_detail',args=(demo,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context.get("group"), State.GROUP_NAMES[demo])
self.assertNotEqual(response.context.get("json_rate_data"), None)
self.assertNotEqual(response.context.get("json_population_data"), None)
self.assertContains(response, "Home")
self.assertNotContains(response, "No Data Available")
self.assertContains(response, '<svg id="popsvg"')
|
996,482 | f613ae5862e264c80d6a86ffa0a06ff92edbc026 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-12 03:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('critical_list', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='part',
name='shop',
field=models.CharField(
choices=[('MDT ENGINE', 'MDT ENGINE'), ('HDT ENGINE', 'HDT ENGINE'), ('TRANSMISSION', 'TRANSMISSION'),
('CASTING AND FORGING', 'CASTING AND FORGING'), ('AXLE', 'AXLE')], default=1,
help_text='Enter Text', max_length=30),
),
migrations.AlterField(
model_name='part',
name='status',
field=models.CharField(blank=True, choices=[(1, 'Normal'), (2, 'Warning'), (3, 'Critical')],
help_text='Select the part Status', max_length=10),
),
]
|
996,483 | 9e9157c287c6543bdd00982b4e7a09231034ebd8 | import functools
from typing import (
Callable,
TypeVar,
)
from asks import Session
from p2p import trio_utils
from trinity.components.builtin.metrics.service.base import BaseMetricsService
T = TypeVar('T')
# temporary workaround to support decorator typing until we can use
# @functools.cached_property with python version >= 3.8
# https://github.com/python/mypy/issues/5858
def cache(func: Callable[..., T]) -> T:
return functools.lru_cache()(func) # type: ignore
class TrioMetricsService(BaseMetricsService):
@property # type: ignore
@cache
def session(self) -> Session:
url = self.reporter._get_post_url()
auth_header = self.reporter._generate_auth_header()
return Session(url, headers=auth_header)
async def async_post(self, data: str) -> None:
# use trio-compatible asks library for async http calls
await self.session.post(data=data)
async def continuously_report(self) -> None:
async for _ in trio_utils.every(self._reporting_frequency):
await self.report_now()
|
996,484 | f00c703b8c207d63395937f3c6b0b12e498bf6c7 | #!/usr/bin/env python
import findgtk
import gtk
class ClickCountGUI:
CLICK_COUNT = "Click count: %d"
def __init__(self):
"Set up the window and the button within"
self.window = gtk.Window()
self.button = gtk.Button(self.CLICK_COUNT %0)
self.button.timesClicked = 0
self.window.add(self.button)
self.button.connect("clicked", self.buttonClicked)
self.window.connect("destroy", self.destroy)
#Show the GUI
self.button.show()
self.window.show()
def buttonClicked(self, button):
"This button was clicked; increment the message on its lable"
self.button.timesClicked += 1
self.button.set_label(self.CLICK_COUNT %self.button.timesClicked)
def destroy(self, window):
window.hide()
gtk.main_quit()
if __name__ == "__main__":
ClickCountGUI()
gtk.main()
|
996,485 | 00237701cf15d706b5677935a2f6c7ae2c797d8d | def modify(k):
k.append(39)
print("K =", k)
|
996,486 | 3536c8ca8aa4a27e086f804e0c9e0d8ab039d113 | # -*- coding: utf-8 -*-
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import math
import torch
import torch.nn as nn
import numpy as np
#from skimage.measure.simple_metrics import compare_psnr
from skimage.measure import compare_psnr
#from skimage.metrics import peak_signal_noise_ratio
#一个数据管理的类
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
def reset(self):
""" clear """
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
""" and one val"""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def weights_init_kaiming(m):
""" init layers """
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
# nn.init.uniform(m.weight.data, 1.0, 0.02)
m.weight.data.normal_(mean=0, std=math.sqrt(2. / 9. / 64.)).clamp_(-0.025, 0.025)
nn.init.constant(m.bias.data, 0.0)
def batch_PSNR(img, imclean, data_range):
""" comprare two data """
Img = img.data.cpu().numpy().astype(np.float32)
Iclean = imclean.data.cpu().numpy().astype(np.float32)
PSNR = 0
for i in range(Img.shape[0]):
PSNR += compare_psnr(Iclean[i, :, :, :], Img[i, :, :, :], data_range=data_range)
return (PSNR / Img.shape[0])
def data_augmentation(image, mode):
""" change numpy matrix """
out = np.transpose(image, (1, 2, 0))
if mode == 0:
# original
out = out
elif mode == 1:
# flip up and down
out = np.flipud(out)
elif mode == 2:
# rotate counterwise 90 degree
out = np.rot90(out)
elif mode == 3:
# rotate 90 degree and flip up and down
out = np.rot90(out)
out = np.flipud(out)
elif mode == 4:
# rotate 180 degree
out = np.rot90(out, k=2)
elif mode == 5:
# rotate 180 degree and flip
out = np.rot90(out, k=2)
out = np.flipud(out)
elif mode == 6:
# rotate 270 degree
out = np.rot90(out, k=3)
elif mode == 7:
# rotate 270 degree and flip
out = np.rot90(out, k=3)
out = np.flipud(out)
return np.transpose(out, (2, 0, 1))
def changePose(out, mode):
""" change numpy matrix """
out = np.squeeze(out, 0)
if mode == 0:
out=out
elif mode == 1:
out = np.flipud(out)
elif mode == 2:
out = np.rot90(out, axes=(1, 0))
elif mode == 3:
out = np.rot90(out, axes=(1, 0))
out = np.flipud(out)
elif mode == 4:
out = np.rot90(out, k=2, axes=(1, 0))
elif mode == 5:
out = np.rot90(out, k=2, axes=(1, 0))
out = np.flipud(out)
elif mode == 6:
out = np.rot90(out, k=3, axes=(1, 0))
elif mode == 7:
out = np.rot90(out, k=3, axes=(1, 0))
out = np.flipud(out)
out=np.expand_dims(out, axis=0)
return out
|
996,487 | 768d2fe0beaf356c7d334f7cbbb6354f954c7c19 | #Written for Python 3.4.2
data = [line.rstrip('\n') for line in open("input.txt")]
totalpaper = 0
totalribbon = 0
for present in data:
dimensions = sorted([int(x) for x in present.split('x')])
f1 = dimensions[0]*dimensions[1]
f2 = dimensions[1]*dimensions[2]
f3 = dimensions[2]*dimensions[0]
extra = min(f1,f2,f3)
wraplenght = 2*(dimensions[0]+dimensions[1])
bow = dimensions[0]*dimensions[1]*dimensions[2]
totalpaper += 2*(f1+f2+f3)+extra
totalribbon += wraplenght+bow
print(totalpaper)
print(totalribbon)
|
996,488 | aff505c770117474282db460dc67f75620b11136 | from redis import StrictRedis
from sqlalchemy.orm import Session, scoped_session, Query
from m2core.utils.decorators import classproperty
from m2core.utils.error import M2Error
class SessionMixin:
__abstract__ = True
@classmethod
def set_db_session(cls, session) -> scoped_session or Session:
"""
Sets DB Session during M2Core initialization with this method
"""
cls._db_session = session
@classproperty
def s(cls) -> scoped_session or Session:
"""
Returns DB Session
"""
if cls._db_session:
return cls._db_session
else:
raise M2Error('No DB session defined')
@classmethod
def set_redis_session(cls, session) -> scoped_session or Session:
"""
Sets Redis Session during M2Core initialization with this method
"""
cls._redis_session = session
@classproperty
def r(cls) -> StrictRedis:
"""
Returns Redis Session
"""
if cls._redis_session:
return cls._redis_session
else:
raise M2Error('No Redis session defined')
@classproperty
def sh(cls):
"""
Returns instance of Session Helper
:return:
"""
if not cls.r:
raise M2Error('No Redis session defined')
return cls._sh_cls(cls.r['connector'], cls.r['scheme'])
@classmethod
def set_sh(cls, sh_cls):
"""
Sets DB Session during M2Core initialization with this method
"""
cls._sh_cls = sh_cls
@classproperty
def q(cls) -> Query:
"""
Returns prepared Query taken from DB Session
"""
if not cls.s:
raise M2Error('No DB session defined')
return cls.s.query(cls)
|
996,489 | 3f03f657184fd651d438487d59f60979dc1d7ece | __author__ = 'jsuit'
import Corpus
import numpy as np
from numpy import matlib
import json
#set up Corpus
corpus = Corpus.Corpus()
corpus.get_articles()
corpus.vectorize_articles()
bow = corpus.get_vect_articles()
n_docs = corpus.get_num_articles()
corpus.calc_num_terms()
n_terms = corpus.get_num_terms()
#pick number of topics
k=51
alpha = .01
beta = .001
DTMatrix = matlib.zeros((n_docs,k),dtype='float_')
TTMatrix =matlib.zeros((n_terms,k),dtype='float_')
DocVocab={}
w_tokens = False
if w_tokens:
word_tokens = np.sum(bow.sum(axis=0))
else:
word_tokens = n_terms
for doc_num in xrange(bow.shape[0]):
#get the indexes of word that occur in document
words_i = np.nonzero(bow[doc_num])[0]
for indx in words_i:
#for each time the word occurs in document randomly sample
p = 0
if w_tokens:
for j in range(bow[doc_num][indx]):
z = np.random.multinomial(1, [1/float(k)]*k, size=1).argmax()
DTMatrix[doc_num,z]+=1
TTMatrix[indx,z]+=1
if (doc_num,indx) not in DocVocab:
DocVocab[(doc_num,indx)] = [z]
else:
DocVocab[(doc_num,indx)].append(z)
else:
z = np.random.multinomial(1, [1/float(k)]*k, size=1).argmax()
DTMatrix[doc_num,z]+=1
TTMatrix[indx,z]+=1
if (doc_num,indx) not in DocVocab:
DocVocab[(doc_num,indx)] = [z]
else:
DocVocab[(doc_num,indx)].append(z)
iters = 400
#DTMatrix.dump('DTMatrix.txt')
#TTMatrix.dump('TTMatrix.txt')
for i in range(iters):
print i
for doc_num in xrange(bow.shape[0]):
words_i = np.nonzero(bow[doc_num])[0]
for indx in words_i:
topics = DocVocab[(doc_num,indx)]
for count, topic in enumerate(topics):
#take the word,topic count and decrement
TTMatrix[indx,topic]-=1
#take the document and the topic and decrement
DTMatrix[doc_num,topic] -=1
#math happens here thanks to the dirchlet being a conjugate prior to the multinomial
#pz is a vector representing each the probability of each topic k
#print DTMatrix[doc_num,:], TTMatrix[indx,:]
pz = np.divide(np.multiply(DTMatrix[doc_num,:] + alpha,TTMatrix[indx,:] + beta),DTMatrix.sum(axis=0)+beta*word_tokens)
sample_pz = np.random.multinomial(21, np.asarray(pz/pz.sum())[0],1)
topic = sample_pz.argmax()
#DocVocab[(doc_num,indx)] = topic
topics[count] = topic
TTMatrix[indx,topic]+=1
DTMatrix[doc_num,topic]+=1
DocVocab[(doc_num,indx)] = topics
#DTrow = np.nonzero(DTMatrix[doc_num,:])[0]
#compute Document distribution
TopicDict = {}
Topic_DictMax = {}
for doc_num in xrange(bow.shape[0]):
x = (DTMatrix[doc_num,:] + alpha) / (DTMatrix[doc_num,:].sum() + alpha)
#theta_d_z
t = np.asarray(x/x.sum())[0]
if np.argmax(t) not in Topic_DictMax:
Topic_DictMax[np.argmax(t)] = [doc_num]
else:
Topic_DictMax[np.argmax(t)].append(doc_num)
#print x
#print DTMatrix[doc_num,:]
#print DTMatrix[doc_num,:].sum()
#print doc_num,DTMatrix[doc_num,:]
#print doc_num, DTMatrix[doc_num,:].argmax()
#amax = DTMatrix[doc_num,:].argmax()
arr = np.asarray(DTMatrix[doc_num,:])
#amax = np.argpartition(array, -3)[-3:]
amax = np.argsort(arr[0])[-3:]
for m in amax:
if m not in TopicDict:
TopicDict[m] = [doc_num]
else:
TopicDict[m].append(doc_num)
from pprint import pprint
#pprint(TopicDict)
pprint(Topic_DictMax)
"""
[17, 29, 38, 46, 48, 53, 101, 122],
1: [15, 22, 27, 37, 47, 49],
2: [1, 11, 32, 33, 69],
3: [77],
4: [51, 93, 95, 105, 110],
"""
|
996,490 | caa724a8c0b8659ea2e360d92af59d1ebab5e293 | '''Tests figures.settings
These are currently just simple tests that make sure the basics are working.
They could use elaboration to make sure that the individual settings within
each of the Figures entries to ``WEBPACK_LOADER`` and ``CELERYBEAT_SCHEDULE``
are correctly assigned
'''
import mock
import pytest
from figures import update_settings
from figures import settings as figures_settings
@pytest.mark.parametrize('env_tokens, expected ', [
({'LOG_PIPELINE_ERRORS_TO_DB': True}, True),
({'LOG_PIPELINE_ERRORS_TO_DB': False}, False),
])
def test_log_pipeline_errors_to_db_true(env_tokens, expected):
with mock.patch('figures.settings.env_tokens', env_tokens):
assert figures_settings.log_pipeline_errors_to_db() == expected
class TestUpdateSettings(object):
'''
figures.settings.update_settings is a convenience method that wraps
around:
::
figures.settings.update_webpack_loader
figures.settings.update_celerybeat_schedule
'''
def setup(self):
self.webpack_loader_settings = {}
self.celerybeat_schedule_settings = {}
self.celery_task_name = figures_settings.DAILY_METRICS_CELERY_TASK_LABEL
def test_update_in_package_init(self):
'''Make sure that the ``update_settings`` method in the package init
module is the same as in ``figures.settings``
'''
assert update_settings == figures_settings.update_settings
def validate_webpack_loader_settings(self):
assert 'FIGURES_APP' in self.webpack_loader_settings
for key in ['BUNDLE_DIR_NAME', 'STATS_FILE']:
assert key in self.webpack_loader_settings['FIGURES_APP']
def validate_celerybeat_schedule_settings(self):
assert self.celery_task_name in self.celerybeat_schedule_settings
for key in ['task', 'schedule']:
assert key in self.celerybeat_schedule_settings['figures-populate-daily-metrics']
@pytest.mark.parametrize('figures_env_tokens, run_celery,', [
(None, True),
({}, True),
({'ENABLE_DAILY_METRICS_IMPORT': True}, True),
({'ENABLE_DAILY_METRICS_IMPORT': False}, False),
])
def test_update_settings(self, figures_env_tokens, run_celery):
'''
'''
figures_settings.env_tokens = dict()
update_settings(
webpack_loader_settings=self.webpack_loader_settings,
celerybeat_schedule_settings=self.celerybeat_schedule_settings,
figures_env_tokens=figures_env_tokens,
)
self.validate_webpack_loader_settings()
if run_celery:
self.validate_celerybeat_schedule_settings()
else:
assert self.celery_task_name not in self.celerybeat_schedule_settings
assert figures_settings.env_tokens == figures_env_tokens
|
996,491 | 0d99bd7861ddf980e568fd9f37a05900dea559d5 | STATIC_ROOT_DIR = "/home/banban"
class Application(object):
def __call__(self, env, start_response):
path = env.get("PATH")
# if path.startswith("/static"):
# path_way = path[7:]
file_name = STATIC_ROOT_DIR + path
print(file_name)
try:
file = open(file_name, "rb")
except:
status = "404 CANT FOUND"
headers = []
start_response(status, headers)
return("error name")
else:
status = "404 OK"
headers = [
("Content-Type", "text/plain")
]
start_response(status, headers)
response_date = file.read().decode("utf-8")
return response_date
app = Application()
|
996,492 | 3f0e2599577e098a0b913943fb14eaa4ac4d91d1 | import sys
def main():
n = int(sys.stdin.readline().strip())
A,B = [0 for i in xrange(101)], [0 for i in xrange(101)]
for line in sys.stdin:
a,b = [int(i) for i in line.split()]
A[a] += 1
B[b] += 1
a,b = 100, 1
ca, cb = 0, 0
curr_ans = 0
while True:
while a>0 and A[a] == 0:
a -= 1
while b<101 and B[b] == 0:
b +=1
if (a == 0 or b == 101):
break
if ca == 0:
ca = A[a]
if cb == 0:
cb = B[b]
if a+b > curr_ans:
curr_ans = a+b
if ca > cb:
ca -= cb
cb = 0
b+=1
elif cb > ca:
cb -= ca
ca = 0
a-=1
else:
a -= 1
b += 1
ca,cb = 0,0
print curr_ans
if __name__ == '__main__':
main()
|
996,493 | c16c56d95e1d1bbf1feb89f5416ea8ff93ddc683 | def countdown(n):
if n > 0:
print(n)
countdown(n-1)
n = int(input("Insira n "))
countdown(n)
|
996,494 | 8ea27274a45664cebe6bc18db2e0ac8024941c17 | #!/usr/bin/python3
"""
Datetime example
"""
import datetime
import pytz
if __name__=="__main__":
fmt = "%Y %m %d %H:%M"
current_time = datetime.datetime(2017, 1, 5, 9, 35, 44)
timezone = pytz.timezone('US/Pacific')
localized_time = timezone.localize(current_time)
print(localized_time.astimezone(pytz.timezone('US/Central')).strftime(fmt))
print(localized_time.astimezone(pytz.timezone('US/Eastern')).strftime(fmt))
print(localized_time.astimezone(pytz.timezone('Asia/Calcutta')).strftime(fmt))
print(localized_time.astimezone(pytz.timezone('Europe/Amsterdam')).strftime(fmt))
print(localized_time.astimezone(pytz.timezone('Australia/Sydney')).strftime(fmt))
print(localized_time.astimezone(pytz.timezone('America/St_Johns')).strftime(fmt)) |
996,495 | 86834805bc6abed63d64497ac74202dfb982fab7 | # -*- coding:utf-8 -*-
# @Author: clark
# @Time: 2021/5/26 5:53 下午
# @File: demo_1.py
# @project demand:
import requests
headers = {
'Proxy-Connection': 'keep-alive',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'http://deal.ggzy.gov.cn',
'Referer': 'http://deal.ggzy.gov.cn/ds/deal/dealList.jsp',
'Accept-Language': 'zh-CN,zh;q=0.9',
}
data = {
# 'TIMEBEGIN_SHOW': '2021-03-24',
# 'TIMEEND_SHOW': '2021-04-02',
# 'TIMEBEGIN': '2021-03-24',
# 'TIMEEND': '2021-04-02',
'SOURCE_TYPE': '1',
'DEAL_TIME': '02',
'DEAL_CLASSIFY': '00',
'DEAL_STAGE': '0000',
'DEAL_PROVINCE': '0',
'DEAL_CITY': '0',
'DEAL_PLATFORM': '0',
'BID_PLATFORM': '0',
'DEAL_TRADE': '0',
'isShowAll': '1',
'PAGENUMBER': '8181',
'FINDTXT': ''
}
response = requests.post('http://deal.ggzy.gov.cn/ds/deal/dealList_find.jsp',
headers=headers,
data=data,
verify=False,
)
from pprint import pprint
pprint(response.json())
|
996,496 | 7d00546eca2ce0dc2eb5cd6cb56ba09d044bc49f | #-----------------------------------------------------------------------------#
#projection.py
#
#NPS Night Skies Program
#
#Last updated: 2021/02/22
#
#This script plots the fits images in fisheye and Hammer projections.
#
#Input:
# (1) reading in the mask to get the x,y center and the fisheye view radius
# (2) all the processed fisheye fit images
#
#Output:
# (1) *fisheye.png
# (2) *hammer.png
#
#History:
# Li-Wei Hung -- Created
#
#------------------------------------------------------------------------------#
import copy
import matplotlib as mpl
import numpy as n
import warnings
from astropy.io import fits
from glob import glob
from matplotlib import pyplot as plt
from skimage.transform import rotate
# Local Source
import colormaps
import process_input as p
import upper_hammer
#------------------------------------------------------------------------------#
def main():
"""
This script plots fits images in fisheye and Hammer projection. See the
script description for detail.
"""
#--------------------------------------------------------------------------#
# Generate Polar Coordinates #
#--------------------------------------------------------------------------#
#Mask - read in the fisheye mask center coordinates and radius
mask = fits.open(p.mask,uint=False)[0].header
xc, yc, r0 = int(mask['CENTERX']), int(mask['CENTERY']), int(mask['RADIUS'])
X, Y = n.meshgrid(n.arange(-r0,r0),n.arange(-r0,r0))
#Polar coordinates
r = n.sqrt(X**2+Y**2) / r0
theta = -n.arctan2(Y,X)
#Fisheye takes r in degree
r_deg = 90 * r
theta_f = theta + n.pi/2
#Hammer plot requires the values to be sorted
r_str = n.pi/2 - r * n.pi/2
inds = n.argsort(theta[:,0])
theta_s = theta[inds,:]
r_s = r_str[inds,:]
#--------------------------------------------------------------------------#
# Define Plot settings #
#--------------------------------------------------------------------------#
#General plot settings
plt.close('all')
plt.style.use('dark_background')
plt.rcParams['image.cmap'] = 'NPS_mag'
cmap = copy.copy(mpl.cm.get_cmap("NPS_mag"))
cmap.set_bad(color='black')
#Fisheye plot setting
fig0 = plt.figure('fisheye')
ax0 = fig0.add_subplot(111, projection='polar')
ax0.set_rlim(0,90)
ax0.set_yticklabels([])
ax0.tick_params(colors='darkgray')
ax0.set_theta_zero_location('N')
#Hammer plot setting
fig1 = plt.figure('hammer',figsize=(15,5.2))
ax1 = fig1.add_subplot(111, projection="upper_hammer")
fig1.tight_layout(rect=(0.03,-0.6,0.98,0.97))
#Suppressing a MatPlotLib benign warning about pcolormesh shading
warnings.filterwarnings("ignore",category=UserWarning)
#--------------------------------------------------------------------------#
# Plot the image in fisheye and Hammer projections #
#--------------------------------------------------------------------------#
for f in glob(p.data_cal+'*sky*.fit'):
print('projecting ' + f[len(p.data_cal):])
img = fits.open(f,uint=False)[0].data[yc-r0:yc+r0,xc-r0:xc+r0]
img_hammer = rotate(img.astype('float32'),-90,cval=n.nan)[inds,:]
#plot fisheye
ax0.pcolormesh(theta_f,r_deg,img,shading='auto',vmin=14,vmax=24)
ax0.grid(True, color='gray', linestyle='dotted', linewidth=.5)
fig0.savefig(f[:-4]+'_fisheye.png', dpi=250)
#plot hammer
ax1.pcolormesh(theta_s,r_s,img_hammer,shading='auto',vmin=14,vmax=24)
ax1.grid(True)
fig1.savefig(f[:-4]+'_hammer.png')
if __name__ == '__main__':
main() |
996,497 | e59f29d53f9820350705693e96d1671f7514336d | import os
import codecs
import ntpath
import logging
import numpy
import logging
import cPickle
import theano.tensor as tensor
import theano
from blocks.extensions import SimpleExtension
from blocks.extensions.monitoring import DataStreamMonitoring
from blocks.monitoring.evaluators import DatasetEvaluator
from blocks.theano_expressions import l2_norm
from blocks.algorithms import Scale
from picklable_itertools.extras import equizip
logger = logging.getLogger('extensions.SaveLoadParams')
#region Extension
class EpochMonitor(SimpleExtension):
def __init__(self, max_epoch, **kwargs):
super(EpochMonitor, self).__init__(after_epoch = True, **kwargs)
self.cur_epoch = 0
self.max_epoch = max_epoch
def do(self, which_callback, *args):
if which_callback == "after_epoch":
self.cur_epoch += 1
if self.cur_epoch >= self.max_epoch:
self.main_loop.status['epoch_interrupt_received'] = True
class MyDataStreamMonitoring(DataStreamMonitoring):
"""Monitors Theano variables and monitored-quantities on a data stream.
By default monitoring is done before the first and after every epoch.
Parameters
----------
variables : list of :class:`~tensor.TensorVariable` and
:class:`MonitoredQuantity`
The variables to monitor. The variable names are used as record
names in the logs.
updates : list of tuples or :class:`~collections.OrderedDict` or None
:class:`~tensor.TensorSharedVariable` updates to be performed
during evaluation. This parameter is only for Theano variables.
Be careful not to update any model parameters as this is not
intended to alter your model in any meaningful way. A typical
use case of this option arises when the theano function used
for evaluation contains a call to :func:`~theano.scan` which
might have returned shared variable updates.
data_stream : instance of :class:`.DataStream`
The data stream to monitor on. A data epoch is requested
each time monitoring is done.
"""
PREFIX_SEPARATOR = '_'
def __init__(self, variables, data_stream, updates=None, coverage=1., **kwargs):
super(MyDataStreamMonitoring, self).__init__(variables, data_stream, updates, **kwargs)
self.coverage = coverage
def do(self, callback_name, *args):
"""Write the values of monitored variables to the log."""
value_dict = self._evaluator.evaluate(self.data_stream)
print("Train test coverage:{0}".format(self.coverage))
for key, value in value_dict.items():
print("{0}:{1}".format(key, value * self.coverage))
class BasicSaveLoadParams(SimpleExtension):
'''
Only save or load word, user and haashtag embeddings and parameters of bricks
'''
def __init__(self, load_from, save_to, model, dataset, **kwargs):
super(BasicSaveLoadParams, self).__init__(**kwargs)
self.load_from = load_from
self.save_to = save_to
self.model = model
self.dataset = dataset
def do_save(self):
if not os.path.exists(os.path.dirname(self.save_to)):
os.makedirs(os.path.dirname(self.save_to))
with open(self.save_to, 'wb+') as f:
logger.info('Saving parameters to %s...'%self.save_to)
# Save model and necessary dataset information
cPickle.dump(self.model.get_parameter_values(), f)
cPickle.dump(self.dataset.get_parameter_to_save(), f)
def do_load(self):
try:
with open(self.load_from, 'rb') as f:
logger.info('Loading parameters from %s...'%self.load_from)
last_model_params = cPickle.load(f)
last_dataset_params = cPickle.load(f)
self.do_initialize(last_model_params, last_dataset_params)
except IOError as e:
print("Cannot load parameters!")
def do_initialize(self, last_model_params, last_dataset_params):
cur_dataset_params = self.dataset.get_parameter_to_save()
cur_model_params = self.model.get_parameter_values()
# Initialize LSTM params
self._initialize_other(last_model_params,last_dataset_params, cur_model_params, cur_dataset_params)
#region Initialize embedding params
# Initialize hashtag embedding
self._initialize_hashtag(last_model_params,last_dataset_params,cur_model_params, cur_dataset_params)
# Initialize user embedding
self._initialize_user(last_model_params,last_dataset_params,cur_model_params, cur_dataset_params)
# Initialize word embedding
self._initialize_word(last_model_params,last_dataset_params,cur_model_params, cur_dataset_params)
#endregion
self.model.set_parameter_values(cur_model_params)
def _initialize_hashtag(self, last_model_params, last_dataset_params,
cur_model_params, cur_dataset_params):
last_hashtag_embed = last_model_params['/hashtag_embed.W']
cur_hashtag_embed = cur_model_params['/hashtag_embed.W']
last_hashtag2index = last_dataset_params['hashtag2index']
cur_hashtag2index = cur_dataset_params['hashtag2index']
for hashtag, index in last_hashtag2index.iteritems():
if hashtag in cur_hashtag2index:
cur_hashtag_embed[cur_hashtag2index[hashtag]] = last_hashtag_embed[index]
def _initialize_user(self,last_model_params, last_dataset_params,
cur_model_params, cur_dataset_params):
last_user_embed = last_model_params['/user_embed.W']
cur_user_embed = cur_model_params['/user_embed.W']
last_user2index = last_dataset_params['user2index']
cur_user2index = cur_dataset_params['user2index']
for user, index in last_user2index.iteritems():
if user in cur_user2index:
cur_user_embed[cur_user2index[user]] = last_user_embed[index]
def _initialize_word(self,last_model_params, last_dataset_params,
cur_model_params, cur_dataset_params):
last_word_embed = last_model_params['/word_embed.W']
cur_word_embed = cur_model_params['/word_embed.W']
last_word2index = last_dataset_params['word2index']
cur_word2index = cur_dataset_params['word2index']
for word, index in last_word2index.iteritems():
if word in cur_word2index:
cur_word_embed[cur_word2index[word]] = last_word_embed[index]
def _initialize_other(self, last_model_params, last_dataset_params,
cur_model_params, cur_dataset_params):
for key, value in last_model_params.iteritems():
if key != "/hashtag_embed.W" and key != "/user_embed.W" and key != '/word_embed.W':
cur_model_params[key] = value
def do(self, which_callback, *args):
if which_callback == 'before_training':
self.do_load()
else:
self.do_save()
class UHSaveLoadParams(BasicSaveLoadParams):
def __init__(self, load_from, save_to, model, dataset, **kwargs):
super(UHSaveLoadParams, self).__init__(load_from, save_to, model, dataset)
def _initialize_word(self,last_model_params, last_dataset_params,
cur_model_params, cur_dataset_params):
pass
def _initialize_other(self, last_model_params, last_dataset_params,
cur_model_params, cur_dataset_params):
pass
class ExtendSaveLoadParams(BasicSaveLoadParams):
'''
Save or load character, word, user and haashtag embeddings and parameters of bricks
'''
def __init__(self, load_from, save_to, model, dataset, **kwargs):
super(ExtendSaveLoadParams, self).__init__(load_from, save_to, model, dataset,**kwargs)
def _initialize_other(self, last_model_params, last_dataset_params,
cur_model_params, cur_dataset_params):
last_char_embed = last_model_params['/char_embed.W']
cur_char_embed = cur_model_params['/char_embed.W']
last_char2index = last_dataset_params['char2index']
cur_char2index = cur_dataset_params['char2index']
for char, index in last_char2index.iteritems():
if char in cur_char2index:
cur_char_embed[cur_char2index[char]] = last_char_embed[index]
for key, value in last_model_params.iteritems():
if key not in ("/hashtag_embed.W", "/user_embed.W", '/word_embed.W', '/char_embed.W'):
cur_model_params[key] = value
class ETHSaveLoadParams(ExtendSaveLoadParams):
'''
Save or load character, word, haashtag embeddings and parameters of bricks
'''
def __init__(self, load_from, save_to, model, dataset, **kwargs):
super(ETHSaveLoadParams, self).__init__(load_from, save_to, model, dataset,**kwargs)
def do_initialize(self, last_model_params, last_dataset_params):
cur_dataset_params = self.dataset.get_parameter_to_save()
cur_model_params = self.model.get_parameter_values()
# Initialize LSTM params
self._initialize_other(last_model_params,last_dataset_params, cur_model_params, cur_dataset_params)
#region Initialize embedding params
# Initialize hashtag embedding
self._initialize_hashtag(last_model_params,last_dataset_params,cur_model_params, cur_dataset_params)
# Initialize word embedding
self._initialize_word(last_model_params,last_dataset_params,cur_model_params, cur_dataset_params)
#endregion
self.model.set_parameter_values(cur_model_params)
class EarlyStopMonitor(DataStreamMonitoring):
PREFIX_SEPARATOR = '_'
def __init__(self, variables, monitor_variable, data_stream, updates=None, saver=None, tolerate_time = 5, **kwargs):
super(DataStreamMonitoring, self).__init__(**kwargs)
self._evaluator = DatasetEvaluator(variables, updates)
self.data_stream = data_stream
self.saver = saver
self.best_result = -numpy.inf
self.last_result = -numpy.inf
self.wait_time = 0
self.tolerate_time = tolerate_time
self.monitor_variable = monitor_variable
def do(self, callback_name, *args):
"""Write the values of monitored variables to the log."""
logger.info("Monitoring on auxiliary data started")
value_dict = self._evaluator.evaluate(self.data_stream)
self.add_records(self.main_loop.log, value_dict.items())
self.check_stop(value_dict)
logger.info("Monitoring on auxiliary data finished")
def check_stop(self, value_dict):
result = value_dict[self.monitor_variable.name]
if result > self.last_result:
self.last_result = result
self.wait_time = 0
if result > self.best_result:
self.best_result = result
if self.saver is not None:
self.saver.do_save()
else:
pass
else:
pass
else:
self.wait_time += 1
self.last_result = result
if self.wait_time > self.tolerate_time:
self.main_loop.status['batch_interrupt_received'] = True
self.main_loop.status['epoch_interrupt_received'] = True
class EvaluatorWithEarlyStop(EarlyStopMonitor):
def __init__(self, coverage, **kwargs):
super(EvaluatorWithEarlyStop, self).__init__(**kwargs)
self.coverage = coverage
def do(self, callback_name, *args):
"""Write the values of monitored variables to the log."""
logger.info("Monitoring on auxiliary data started")
value_dict = self._evaluator.evaluate(self.data_stream)
for key in value_dict.keys():
value_dict[key] *= self.coverage
value_dict['coverage'] = self.coverage
logging.info("coverage:{0}".format(self.coverage))
for key, value in value_dict.items():
logging.info("{0}:{1}".format(key,value))
self.add_records(self.main_loop.log, value_dict.items())
self.check_stop(value_dict)
logger.info("Monitoring on auxiliary data finished")
#endregion
|
996,498 | 3dd394c59838e2e8dca2bc077b5a2cc6d366327c | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: demo.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='demo.proto',
package='',
syntax='proto2',
serialized_pb=_b('\n\ndemo.proto\"\x0f\n\x01\x42\x12\n\n\x02\x62\x32\x18\x02 \x01(\t\"\x1e\n\x01\x41\x12\r\n\x01\x62\x18\x01 \x01(\x0b\x32\x02.B\x12\n\n\x02id\x18\x02 \x01(\t')
)
_B = _descriptor.Descriptor(
name='B',
full_name='B',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='b2', full_name='B.b2', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=14,
serialized_end=29,
)
_A = _descriptor.Descriptor(
name='A',
full_name='A',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='b', full_name='A.b', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='A.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=61,
)
_A.fields_by_name['b'].message_type = _B
DESCRIPTOR.message_types_by_name['B'] = _B
DESCRIPTOR.message_types_by_name['A'] = _A
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
B = _reflection.GeneratedProtocolMessageType('B', (_message.Message,), dict(
DESCRIPTOR = _B,
__module__ = 'demo_pb2'
# @@protoc_insertion_point(class_scope:B)
))
_sym_db.RegisterMessage(B)
A = _reflection.GeneratedProtocolMessageType('A', (_message.Message,), dict(
DESCRIPTOR = _A,
__module__ = 'demo_pb2'
# @@protoc_insertion_point(class_scope:A)
))
_sym_db.RegisterMessage(A)
# @@protoc_insertion_point(module_scope)
|
996,499 | d5632c715716c891565a52dbdae58f41f494c855 | import io
import re
import requests
def get_props_split_by_60_chars(prop):
prop = prop.replace('\n', '')
prop = prop.replace('"', '.')
prop = prop.replace('“', '.')
prop = prop.replace('”', '.')
prop = prop.replace('\'', '')
prop = prop.replace(':', '.')
prop = prop.replace(',', '.')
list = []
while len(prop) > 0:
first_space = prop.find('.', 0, 60)
if first_space == -1:
first_space = prop.find(' ', 60)
if first_space == -1:
list.append(prop)
break
else:
first_space += 1
list.append(prop[0:first_space])
prop = prop[first_space:].strip()
if prop.find('.') == -1:
if len(prop) >= 1:
list.append(prop)
break
final_res = []
for p in list:
marks = re.findall("[?!.]", p[-1])
if len(marks) == 0:
p += '.'
final_res.append(p)
print(final_res)
return final_res
file = io.open('text.txt', 'r', encoding="utf-8")
for p_index, prop in enumerate(file):
props = get_props_split_by_60_chars(prop)
for s_index, sentence in enumerate(props):
payload = {'text': sentence}
doc = requests.get('http://localhost:5002/api/tts', params=payload)
with open(f"audio\{p_index}-{s_index}.mp3", 'wb') as f:
f.write(doc.content)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.