text stringlengths 38 1.54M |
|---|
import random
from collections import defaultdict
import numpy as np
import operator
from sklearn.metrics import confusion_matrix, classification_report
fp=open('iris.data','r')
lables=[]
datadict={}
lists=fp.readlines()
print len(lists)
for line in lists:
line=line.strip()
data=line.split(",")
features=tuple(map(float, line.split(',')[:-1]))
lables.append(data[-1])
datadict[features]=data[-1]
features=datadict.keys()
random.seed(123)
random.shuffle(features)
def euclediandistance(x,c):
d=np.array(x)-np.array(c)
return np.sqrt(np.dot(d,d))
def assign(centers):
new_centers=defaultdict(list)
for cx in centers:
for x in centers[cx]:
best = min(centers, key=lambda c: euclediandistance(x,c))
new_centers[best]+=[x]
return new_centers
def mean(features):
return tuple(np.mean(features,axis=0))
def update(centers):
new_centers={}
for c in centers:
newkey=mean(centers[c])
new_centers[newkey]=centers[c]
return new_centers
def kmeans(features,k,Evolutions=200):
#print "hello"
centers={}
length=len(features)
part=length/3
features1=features[0:part]
features2=features[part:2*part]
features3=features[2*part:]
centers[features[0]]=features1
centers[features[part]]=features2
centers[features[2*part]]=features3
#print centers
for i in range(Evolutions):
#print i,"hello"
new_centers=assign(centers)
new_centers=update(new_centers)
if centers==new_centers:
break
else:
centers=new_centers
return centers
def counter(clus):
count=defaultdict(int)
for x in clus:
count[x]+=1
return dict(count)
clusters=kmeans(features,3)
dictcl={}
dictlabel={}
i=0
for c in clusters:
dictcl[i]=counter([datadict[x] for x in clusters[c]])
key=max(dictcl[i].iteritems(), key=operator.itemgetter(1))[0]
dictlabel[key]=counter([datadict[x] for x in clusters[c]])
i=i+1
print dictlabel
#######################External Measures####################################
outputLables=[]
for i in dictlabel:
outputLables.append(i)
TrueValue=datadict
PredValue={}
i=0
for c in clusters:
for f in clusters[c]:
PredValue[f]=outputLables[i]
i=i+1
Tr=[]
Pr=[]
for ft in TrueValue:
Tr.append(TrueValue[ft])
Pr.append(PredValue[ft])
#for i in range(len(Tr)): print Tr[i],Pr[i]
sets=set(lables)
sets=list(sets)
ll=len(sets)
#print ll;
CM=[[0 for x in range(ll)] for x in range(ll)]
for i in dictlabel:
for j in range(len(sets)):
if sets[j] in dictlabel[i]:
i1=sets.index(sets[j])
i2=sets.index(i)
CM[i1][i2]+=dictlabel[i][sets[j]]
else:
i1=sets.index(sets[j])
i2=sets.index(i)
CM[i1][i2]+=0
print CM
correctvalues=0;
wrongvalues=0;
for i in range(ll):
for j in range(ll):
if i==j:
correctvalues+=CM[i][j]
else:
wrongvalues+=CM[i][j]
Accuracy=(correctvalues*100)//(correctvalues+wrongvalues)
print "Accuracy =", Accuracy,"%"
print(classification_report(Tr, Pr))
#print features
#########################INTERNAL MEASURE ####################################
S=[]
i=0
centroids=[]
maxdist=[]
for c in clusters:
centroids.append(c)
sumd=0
d=0
l=len(clusters[c])
for f in clusters[c]:
sumd+=euclediandistance(f,c)
if d<euclediandistance(f,c):
d=euclediandistance(f,c)
ss=sumd/(1.0*l)
maxdist.append(d)
S.append(ss)
Mdist=[]
for i in centroids:
tempj=[]
for j in centroids:
tempj.append(euclediandistance(i,j))
Mdist.append(tempj)
dunnindex=0
x=[]
for i in range(len(S)):
for j in range(len(S)):
if i<j:
d=Mdist[i][j]/max(maxdist)
x.append(d)
dunnindex=min(x)
Rdist=[]
for i in range(len(S)):
tempj=[]
for j in range(len(S)):
num=S[i]+S[j]
denom=Mdist[i][j]
if denom==0 or i <j:
tempj.append(0)
else:
tempj.append(num/(1.0*denom))
Rdist.append(tempj)
D=[]
for i in Rdist:
D.append(max(i))
DBvalue=sum(D)/(1.0*len(D))
print DBvalue
print dunnindex
|
import random
from elevator import Elevator
class Scenario:
CONTINUE = 0
COMPLETED = 1
TIMED_OUT = 2
STUCK = 3
def __init__(self):
self._done = False
self._max_duration = 60 * 2
self._min_building_height = 2
def finished(self):
self._done = True
@property
def max_duration(self):
return self._max_duration
@max_duration.setter
def max_duration(self, value):
self._max_duration = value
@property
def min_building_height(self):
return self._min_building_height
@min_building_height.setter
def min_building_height(self, value):
self._min_building_height = value
def should_continue(self, elapsed, building):
if self._done:
return self.COMPLETED
elif elapsed > self.max_duration:
return self.TIMED_OUT
else:
return self.CONTINUE
class OneGuyGoesUp(Scenario):
def __init__(self, building):
super().__init__()
self.max_duration = Elevator.calculate_optimal_trip(0, len(building.floors) - 1) + 2
def update(self, elapsed, building):
if elapsed == 0:
rider = building.new_rider(0, len(building.floors) - 1, elapsed)
rider.started_waiting = elapsed
rider.on('reached_destination', lambda _: self.finished())
class TenRandomRides(Scenario):
RIDE_START_CHANCE_PER_SECOND = 1 / 5
def __init__(self, building):
super().__init__()
self.building_height = len(building.floors)
self.max_duration = 10 * Elevator.calculate_optimal_trip(0, self.building_height - 1)
self.riders_finished = 0
self.riders_started = 0
def handle_ride_finished(self, _):
self.riders_finished += 1
if self.riders_finished == 10:
self.finished()
def update(self, elapsed, building):
if self.riders_started == 10:
return
if not self.riders_started or random.random() < self.RIDE_START_CHANCE_PER_SECOND:
start = end = random.randint(0, self.building_height - 1)
while end == start:
end = random.randint(0, self.building_height - 1)
rider = building.new_rider(start, end, elapsed)
rider.started_waiting = elapsed
rider.on('reached_destination', self.handle_ride_finished)
self.riders_started += 1
|
def main():
num1 = int(raw_input())
num2 = int(raw_input())
num3 = int(raw_input())
num4 = int(raw_input())
print(num1**num2 + num3**num4)
return None
if __name__ == "__main__":
main()
|
from django.urls import path
from . import views
app_name = 'frontend'
urlpatterns = [
path('', views.index, name='index'),
path('etapa2/<int:id>', views.etapa2, name='etapa2'),
path('etapa3/<int:id>', views.etapa3, name='etapa3'),
] |
'''
Theo Cocco
Thursday March 25, 2021
Lab 16 Compute Automated Readability Index
'''
from math import ceil
ari_scale = {
1: {'ages': '5-6', 'grade_level': 'Kindergarten'},
2: {'ages': '6-7', 'grade_level': '1st Grade'},
3: {'ages': '7-8', 'grade_level': '2nd Grade'},
4: {'ages': '8-9', 'grade_level': '3rd Grade'},
5: {'ages': '9-10', 'grade_level': '4th Grade'},
6: {'ages': '10-11', 'grade_level': '5th Grade'},
7: {'ages': '11-12', 'grade_level': '6th Grade'},
8: {'ages': '12-13', 'grade_level': '7th Grade'},
9: {'ages': '13-14', 'grade_level': '8th Grade'},
10: {'ages': '14-15', 'grade_level': '9th Grade'},
11: {'ages': '15-16', 'grade_level': '10th Grade'},
12: {'ages': '16-17', 'grade_level': '11th Grade'},
13: {'ages': '17-18', 'grade_level': '12th Grade'},
14: {'ages': '18-22', 'grade_level': 'College'}
}
book = 'The Great Gatsby.txt'
book_title = book.replace('.txt', '')
with open(book, 'r', encoding='utf-8') as f:
text = f.read()
punct = '!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~ '
def compute(x,y,z):
score = ceil(4.72 * (x/y) + .5 * (y/z) - 21.43)
if score >= 14:
score = 14
return score
def get_c(x):
translator = str.maketrans('','',punct)
c = x.translate(translator)
c = len(c)
return c
def get_w(x):
w = x.split()
w = len(w)
return w
def get_s(x):
s = x.split('.')
s = len(s)
return s
print(f'''The ARI of {book_title} is {compute(get_c(text), get_w(text), get_s(text))}
This corresponds to a {ari_scale[compute(get_c(text), get_w(text), get_s(text))]['grade_level']} Grade level of difficulty
that is suitable for an average person {ari_scale[compute(get_c(text), get_w(text), get_s(text))]['ages']} years old.''')
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 3 02:45:36 2019
@author: Raktim Mondol
"""
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
#sns.set()
sns.set(style="whitegrid")
data=pd.read_csv('./figure_data/data_precision.csv')
ax = sns.lineplot(x="Classifier", y="Precision", hue="Method", estimator=None, lw=1.5, palette="Set1", data=data)
fig = ax.get_figure()
fig.savefig("./saved_figures/line_plot_precision.png", dpi=300)
# Rotate the labels on x-axis
|
def calcula(v):
vf = 1
for i in range(1,v+1):
vf *= i
return vf
n = int(raw_input())
print calcula(n)
|
#!/usr/bin/python3
#
# fields.py
#
# Definitions for all the fields in ORM layer
#
from datetime import datetime
def checker_choice(choices, item_type):
for choice in choices:
if isinstance(choice, item_type) != True:
raise TypeError('one of the choices is of wrong type')
class Field:
def __init__(self, type):
self.type = type
class Integer:
def __init__(self, blank=-1, default=None, choices=None):
if choices != None:
checker_choice(choices, int)
if blank == False:
raise AttributeError("blank is false")
if default is None:
default = 0
else:
blank = True
if isinstance(default, int) == False:
raise TypeError('default is of wrong type')
if choices != None and default not in choices:
raise TypeError('default cannot be found in choices')
self.blank = blank
self.default = default
self.choices = choices
pass
def setname(self,name):
self.name = '_' + name
def __set__(self, inst, value):
if value == "DEFAULT" and self.default == 0:
value = 0
self.blank = True
else:
if value != "DEFAULT":
if self.default != 0 and self.default != -99999:
value = self.default
self.default = -99999
else:
value = value
elif value == "DEFAULT":
value = self.default
if self.choices:
if value not in self.choices and value != 0:
raise ValueError ("not in choices")
if type(value) != int:
raise TypeError("trying ")
setattr(inst, self.name, value)
def __get__(self, inst,cls):
return getattr (inst, self.name)
class Float:
def __init__(self, blank=-1, default=None, choices=None):
if blank == False:
raise AttributeError("blank is false")
if choices != None:
checker_choice(choices, float)
if default is None:
default = 0.0
else:
blank = True
if isinstance(default, float) == False:
raise TypeError('default is of wrong type')
if choices != None and default not in choices:
raise TypeError('default cannot be found in choices')
self.blank = blank
self.default = default
self.choices = choices
pass
def setname(self,name):
self.name = '_' + name
def __get__(self, inst,cls):
return getattr (inst, self.name)
def __set__(self, inst, value):
if value == "DEFAULT" and self.default == 0.0:
value = 0.0
self.blank = True
else:
if value != "DEFAULT":
if self.default != 0.0 and self.default != -99999.99:
value = self.default
self.default = -99999.99
else:
value = value
elif value == "DEFAULT":
value = self.default
if self.choices:
if value not in self.choices and value != 0.0:
raise ValueError ("not in choices")
setattr(inst, self.name, float(value))
class String:
def __init__(self, blank=-1, default=None, choices=None):
if blank == False:
raise AttributeError("blank is false")
if choices != None:
checker_choice(choices, str)
if default is None:
default == ''
else:
blank = True
if isinstance(default, str) == False:
raise TypeError('default is of wrong type')
if choices != None and default not in choices:
raise TypeError('default cannot be found in choices')
self.blank = blank
self.default = default
self.choices = choices
pass
def setname(self,name):
self.name = '_' + name
def __get__(self, inst,cls):
if self.name in inst.__dict__:
return inst.__dict__[self.name]
return getattr (inst, self.name)
def __set__(self, inst, value):
if self.blank == -1 and value == "DEFAULT":
raise AttributeError ("Blank not set")
if value == "DEFAULT" and self.default is not None:
value = ""
self.blank = True
if self.default is not None and self.default != "-999999.99":
value = self.default
self.default = "-999999.99"
if self.choices:
if value not in self.choices and value != "":
raise ValueError ("not in choices")
setattr(inst, self.name, value)
class Foreign:
def __init__(self, table, blank=-1):
self.table = table
self.blank = blank
pass
def setname(self,name):
self.name = '_' + name
def __get__(self, inst,cls):
return getattr (inst, self.name)
def __set__(self,inst,value):
if type(value) == int:
raise TypeError("Setting int to foreign")
setattr(inst, self.name, value)
class DateTime:
implemented = True
def get_default(default):
if callable(default):
value = default()
else:
value = default
return value
def setname(self,name):
self.name = '_' + name
def __init__(self, blank=False, default=None, choices=None):
if choices != None:
checker_choice(choices, dateTime)
if default == None:
#default = datetime.fromtimestamp(0)
#default = datetime.now()
default = 0
else:
blank = True
if callable(default):
value = default()
else:
value = default
#if isinstance(default, DateTime) == False:
# raise TypeError('default is of wrong type')
if choices != None and default not in choices:
raise TypeError('default cannot be found in choices')
self.blank = blank
self.default = default
self.choices = choices
pass
def __get__(self, inst,cls):
return getattr (inst, self.name)
def __set__(self,inst,value):
setattr(inst, self.name, value)
class Coordinate:
implemented = True
def __init__(self, blank=False, default=None, choices=None):
if choices != None:
checker_choice(choices,tuple)
if default == None:
default = (0.0, 0.0)
else:
blank = True
#if isinstance(default, tuple) == False:
# raise TypeError('default is of wrong type')
if choices != None and default not in choices:
raise TypeError('default cannot be found in choices')
if isinstance(default[0], int) or isinstance(default[0], float):
if isinstance(default[1], int) or isinstance(default[1], float):
pass
else:
raise TypeError('coordinate values of wrong type')
if default[0]>90 or default[0]<-90 or default[1]>180 or default[0]<-180:
raise ValueError('coordinates out of range')
self.blank = blank
self.default = default
self.choices = choices
pass
def setname(self,name):
self.name = '_' + name
def __get__(self, inst,cls):
return getattr (inst, self.name)
def __set__(self,inst,value):
if isinstance(value,tuple) == False and value != "DEFAULT":
raise TypeError('wrong type for coordinate')
if value == "DEFAULT":
value = (0.0,0.0)
if isinstance(value[0], int) or isinstance(value[0], float):
if isinstance(value[1], int) or isinstance(value[1], float):
pass
else:
raise TypeError('coordinate values of wrong type')
if value[0]>90 or value[0]<-90 or value[1]>180 or value[0]<-180:
raise ValueError('coordinates out of range')
if self.default == (0.0,0.0) and value == 'DEFAULT':
value = (0.0,0.0)
self.blank = True
else:
if value != "DEFAULT":
if self.default != 0.0 and self.default != (-99999.99,-999999.99):
value = self.default
self.default = (-99999.99,-999999.99)
else:
value = value
elif value == "DEFAULT":
value = self.default
if self.choices:
if value not in self.choices and value != (0.0,0.0):
raise ValueError ("not in choices")
setattr(inst, self.name, value)
|
# Generated by Django 2.2.1 on 2020-09-16 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backstage', '0020_auto_20200914_1844'),
]
operations = [
migrations.AddField(
model_name='columnlistsinfo',
name='column_url',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='column_lists_nub'),
),
]
|
# IMPORT data processing and data visualisationn libraries
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import json
from operator import itemgetter
global s,brnch,year
global brnch2,year2
global gpmembers
global gpname
gpmembers=[]
gpname=[]
# Create the main function to draw bar graph
def mainfunc(x,y,plot_type):
# Load the database as a dictionary from database1.txt file (created database)
with open('dataBase1.txt', 'r') as f:
s = f.read()
database = json.loads(s)
rollno = []
rollno.append(x[0].upper())
rollno2 = []
if(len(y)==1):
rollno2.append(y[0].upper())
branch={ # branches as specified in the dataextractor file
'1':"CIVIL",
'2':"ELECTRICAL",
'3':"MECHANICAL",
'4':"ECE",
'5':"CSE",
'6':"ARCHI",
'7':"Chemical",
'8':"MATERIAL",
"MI4":"ECE DUAl",
"MI5":"CSE DUAL"
}
try:
try: # Selecting year and branch on basis of rollno.
year = 8-int(rollno[0][1])
az = rollno[0][2:-2]
for i in branch.keys():
if az == i:
brnch = branch[i]
except:
print("Please enter a valid rollno.")
if(len(rollno2)==1):
try:
year2 = 8-int(rollno2[0][1])
az2 = rollno2[0][2:-2]
for i in branch.keys():
if az2 == i:
brnch2 = branch[i]
except:
print("Please enter a valid rollno.")
# For SGPI data representation
if("SGPI" in plot_type):
semester = ['1st', '2nd', '3rd', '4th','5th', '6th', '7th', '8th'] # semesters list
x = database[str(year)][brnch][rollno[0]]["sgpi"]
y=[]
for i in x:
m = ((i.split("=")[1]))
y.append(float(m))
#Creating dataframe of student having sgpi
db = pd.DataFrame(y,columns=['SGPI'])
db['Semester']=semester[:year*2]
db["Name"] = list((database[str(year)][brnch][rollno[0]]["name"]) for i in range(year*2))
if(len(rollno2)==1):
q = database[str(year2)][brnch2][rollno2[0]]["sgpi"]
y2=[]
for i in q:
m2 = ((i.split("=")[1]))
y2.append(float(m2))
db2 = pd.DataFrame(y2,columns=['SGPI'])
db2['Semester']=semester[:year2*2]
db2["Name"] = list((database[str(year2)][brnch2][rollno2[0]]["name"]) for i in range(year*2))
db = pd.concat([db,db2])
# Plotting the bar graph
sns.set(style="whitegrid")
pl = sns.barplot(x="Semester", y="SGPI", data=db, hue='Name')#plotting barplot & setting parameters
plt.ylim(1,12.5)
plt.title("SGPI VS Semester")
ax = plt.gca()
totals=[]
for i in ax.patches:
totals.append(i.get_height())
# set individual bar lables using above list
total = sum(totals)
# Setting the place of bar labels
for i in ax.patches:
# get_x pulls left or right; get_height pushes up or down
z1=i.get_height()
z1 = "%0.2f" % float(z1)
ax.text(i.get_x()+(i.get_width()/2), i.get_height()/2, \
z1, fontsize=(20-(year*2)),
color='black', ha= 'center')
plt.show() #Shows the plot
# For CGPI data representation
if("CGPI" in plot_type):
semester = ['1st', '2nd', '3rd', '4th','5th', '6th', '7th', '8th']
x = database[str(year)][brnch][rollno[0]]["cgpi"] #getting cgpi data of student
y=[]
for i in x:
m = ((i.split("=")[1]))
y.append(float(m))
#Creating the dataframe of the student having CGPI
db = pd.DataFrame(y,columns=['CGPI'])
db['Semester']=semester[:year*2]
db["Name"] = list((database[str(year)][brnch][rollno[0]]["name"]) for i in range(year*2))
if(len(rollno2)==1):
q = database[str(year2)][brnch2][rollno2[0]]["cgpi"]
y2=[]
for i in q:
m2 = ((i.split("=")[1]))
y2.append(float(m2))
db2 = pd.DataFrame(y2,columns=['CGPI'])
db2['Semester']=semester[:year2*2]
db2["Name"] = list((database[str(year2)][brnch2][rollno2[0]]["name"]) for i in range(year*2))
db = pd.concat([db,db2])
#For class rank finding
# make a sorted list of the class of student
srt=[]
for z in database[str(year)][brnch]:
z1 = database[str(year)][brnch][z]['cgpi'][-1]
z1 = (float(z1.split("=")[1]))
info = {
"name": database[str(year)][brnch][z]['name'],
"cgpi": z1
}
srt.append(info)
srt = sorted(srt, key=itemgetter('cgpi', 'name'),reverse = True) #Creting sorted list of students
#Plotting the bar graph for CGPI
sns.set(style="whitegrid")
pl = sns.barplot(x="Semester", y="CGPI", data=db, hue='Name') # Plotting & Setting parameters
plt.ylim(1,12.5)
plt.title("CGPI VS Semester")
ax = plt.gca()
totals=[]
# Setting individual bar labels position
for i in ax.patches:
totals.append(i.get_height())
# set individual bar lables using above list
total = sum(totals)
for i in ax.patches:
# get_x pulls left or right; get_height pushes up or down
z1=i.get_height()
z1 = "%0.2f" % float(z1)
ax.text(i.get_x()+(i.get_width()/2), i.get_height()/2, \
z1, fontsize=(20-(year*2)),
color='black', ha= 'center')
if(len(rollno2)!=1):
z2 = database[str(year)][brnch][rollno[0]]['cgpi'][-1]
z2 = (float(z2.split("=")[1]))
dct = {"name" : database[str(year)][brnch][rollno[0]]["name"],
"cgpi" : z2
}
indx = srt.index(dct) # Finding class rank of student
rank = "Class Rank : " + str(indx + 1)
line, = ax.plot(0, label=rank)
plt.legend()
plt.show()#plotting the bar chart with CGPI and rank
except: # Exception handler
print("Please, enter a valid roll no.") |
import tkinter as tk
from tkinter import messagebox
from PIL import Image, ImageTk
root = tk.Tk()
root.title('Another app')
img = ImageTk.PhotoImage(Image.open('files/logo.png'))
root.iconphoto(True, img)
def popup():
response = messagebox.askyesnocancel('This is my Popup!', 'Hello World!')
if response == 1:
tk.Label(root, text = 'You clicked yes!').pack()
elif response == 0:
tk.Label(root, text = 'You clicked no!').pack()
else:
tk.Label(root, text = 'You\'ve canceled task!').pack()
tk.Button(root, text = 'popup', command = popup).pack()
root.mainloop() |
from src.runner.runner import Runner
from src.adityaork.tree import predict_from_json
if __name__ == "__main__":
p = Runner(14, file_list="train", verbose=True)
s = predict_from_json(p.task_json)
print(s)
p = Runner(15, file_list="train", verbose=True)
s = predict_from_json(p.task_json)
print(s)
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy
store = []
df = pd.read_excel("ENB2012_data.xlsx", "Sheet1")
for row in df:
expected = (0,1)
initial = (245,416)
initialdiff = initial[1] - initial[0]
new = expected[1] - expected[0]
currentval = (df['X3'])
newval = (((currentval - initial[0]) * new)/initialdiff) + expected[0]
store.append(newval)
print(df)
plt.hist(store, normed=True)
plt.title("Normalization: Histogram for Energy Efficiency")
plt.ylabel("Frequency")
plt.xlabel("Wall Area")
plt.show() |
from sklearn import tree
from loadData import ImportData
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Data Processing
headers = ["age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "sex", "capital-gain", "capital-loss",
"hours-per-week", "native-country", "goal"]
csv = 'http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
missing_value = ' ?'
adultData = ImportData(headers=headers, csv=csv, missing_value=missing_value, remove_goal=True)
# create split of test and train
X_train, X_test, y_train, y_test = train_test_split(adultData.X, adultData.y, test_size=0.25, random_state=42)
clf = tree.DecisionTreeClassifier()
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
print(accuracy_score(y_test, pred)) |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 13:31:50 2018
This script should be performed in ArcGIS Pro python console.
To test algorithms, a python window can be opened from
C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3
To run a python script:
"c:\Program Files\ArcGIS\Pro\bin\Python\Scripts\propy.bat" script.py
@author: zhenlinz
"""
import arcpy
arcpy.env.overwriteOutput = True
workspace = r'C:\Users\zhenlinz\Google Drive\1_Nutrient_Share\1_Projects_NUTRIENTS\Modeling\NOTES_ZZ\Projects\ModelAggregation\SuisunBay\AggProject'
arcpy.env.workspace = workspace
shapefilein = "AggProject/bathymetryPositivePlus"
Ncluster = 1000
shapefileout = shapefilein + str(Ncluster) + 'NH4'
arcpy.stats.SpatiallyConstrainedMultivariateClustering(shapefilein+'.shp',
shapefileout, ["NH4"], #["Depth","NO3","NH4"]
number_of_clusters=Ncluster,
spatial_constraints='CONTIGUITY_EDGES_ONLY')
#arcpy.stats.GenerateSpatialWeightsMatrix(shapefilein, "Index",
# "example.swm", "CONTIGUITY_EDGES_ONLY ", "EUCLIDEAN")
#
#arcpy.ConvertSpatialWeightsMatrixtoTable_stats("example.swm","example.dbf") |
#list : ordered collection of items
# we can store anything in list int float string.....
a=[1,2,3,4,5]
print(a)
word=["raj", "Kumar",'Thakur']
print(word)
mixed=[1,2,3,'raju','chaha',4,6,None]
print(mixed)
#to access element of list
print(mixed[4])
print(mixed[3:5])
mixed[2]=3.0 #change element 3 with 3.0
print(mixed) #[1, 2, 3.0, 'raju', 'chaha', 4, 6, None]
mixed[1:]=['one','two']
print(mixed) #[1, 'one', 'two']
|
'''
Created on 2018. 1. 28.
@author: hillk
'''
import sys
from konlpy.tag import Twitter
twitter = Twitter()
print('한글 문장을 입력하세요.')
try:
while True:
sys.stdout.write('>> ')
sys.stdout.flush()
text = sys.stdin.readline().strip()
if text:
answer = twitter.morphs(text)
print(answer)
except KeyboardInterrupt:
pass
|
from django.shortcuts import redirect
import datetime
#JABM (09-05/2020): Se agrega archivo de decoradores.py
from django.utils.decorators import method_decorator
def esta_logueado(vista):
# JABM; Decorador que valida si esta logueado
# Tomese como logueado que el usuario, pwd y token
# estan correctos
def interna(request):
if not request.session.get('logueado', False):
# JABM (09-05-2020): Si no esta logueado se redirigue al login.
return redirect('/login/')
# JABM (09-05-2020): Si esta logueado se permite acceso al recurso.
return vista(request)
return interna
#
def esperando_token_global(vista):
def interna(request):
if not request.session.get('token_global', False):
# MML (06-07-2020)
return redirect('login_global')
return vista(request)
return interna
def esperando_token(vista):
# JABM; Decorador que valida si esta esperando token
# Tomese como esperando token un usuario que ya ingreso
# su usuario y contraseña bien y esta en la página de solicitando_token
def interna(request):
if not request.session.get('token', False):
# JABM (09-05-2020): Si no ha ingresado primero sus datos en login.
return redirect('login')
#JABM (09-05-2020): Si ya ha ingresado primero sus datos en login.
return vista(request)
return interna
def no_esta_logueado(vista):
#JABM; Decorador que valida si no se esta logueado
#Tomese como logueado que el usuario, pwd y token
#estan correctos
def interna(request):
if request.session.get('global', False):
#JABM (09-05-2020): Si esta logueado como administrador global....
return redirect('global:index')
if request.session.get('logueado', False):
#JABM (09-05-2020): Si esta logueado se redirigue a servidores.
return redirect('/servidores/')
#JABM (09-05-2020): Si no esta logueado se permite acceso al recurso.
return vista(request)
return interna
def esta_logueado_global(vista):
# MML; Decorador que valida si esta logueado
# Tomese como logueado que el usuario, pwd y token
# estan correctos
def interna(request):
if not request.session.get('global', False):
# MML (05-07-2020): Si no esta logueado se redirigue al login.
return redirect('login_global')
# MML (05-07-2020): Si esta logueado se permite acceso al recurso.
return vista(request)
return interna
def class_view_decorator(function_decorator):
def deco(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return deco |
#!/usr/bin/env python
'''
Created by Samvel Khalatyan, Jul 11, 2012
Copyright 2012, All rights reserved
'''
from __future__ import print_function, division
import math
import ROOT
from config import channel
from root import stats
from template import templates
class Templates(templates.Templates):
''' Produce S / B plot '''
def __init__(self, options, args, config):
templates.Templates.__init__(self, options, args, config)
self._background_channels = set(["ttbar",])
def plot(self):
''' Process loaded histograms and draw these '''
canvases = []
for plot, channels in self.plots.items():
signal = ROOT.THStack()
background_ = ROOT.THStack()
legend = ROOT.TLegend(0, 0, 0, 0)
order = self._channel_config["order"]
order.extend(set(channels.keys()) - set(order))
# split channels into stacks
background = None
for channel_ in order:
if channel_ not in channels:
continue
hist = channels[channel_]
hist.Scale(1 / hist.Integral())
title = "#Delta R(q_{1}, q_{2}, b)"
for key, value in {
"/pt": "P_{T}",
"/eta": "#eta",
"/mass": "M",
}.items():
if key in plot:
title = value
hist.GetYaxis().SetTitle("dN / N / d" +
title + "")
if channel_ in self._background_channels:
hist.SetLineColor(hist.GetFillColor())
hist.SetLineStyle(1)
hist.SetLineWidth(2)
hist.SetFillStyle(0)
background_.Add(hist)
elif (channel_.startswith("zprime") or
channel_.startswith("kkgluon")):
signal.Add(hist)
else:
continue
legend.AddEntry(hist,
self._channel_config["channel"][channel_]
["legend"], "l")
canvas = self.draw_canvas(plot, signal=signal,
background=background_,
legend=legend, uncertainty=False)
if canvas:
canvases.append(canvas)
return canvases
def draw(self, background=None, uncertainty=None, data=None, signal=None):
''' Draw background and signal only '''
if background:
background.Draw("9 hist same nostack c")
if signal:
signal.Draw("9 hist same nostack c")
class DRTemplates(templates.Templates):
''' Produce S / B plot '''
def __init__(self, options, args, config):
templates.Templates.__init__(self, options, args, config)
self._background_channels = set(["ttbar",])
def plot(self):
'''
Process loaded histograms and draw them
The plot() method is responsible for processing loaded channels, e.g.
put data into Stack, conbine signals, etc.
'''
canvases = []
bg_channels = set(["mc", "qcd"])
channel.expand(self._channel_config, bg_channels)
for plot_, channels in self.plots.items():
# Prepare stacks for data, background and signal
signal = ROOT.THStack()
background = ROOT.THStack()
data = None
legend = ROOT.TLegend(0, 0, 0, 0) # coordinates will be adjusted
# prepare channels order and append any missing channels to the
# end in random order
order = self._channel_config["order"]
order.extend(set(channels.keys()) - set(order))
# process channels in order
backgrounds = [] # backgrounds should be added to THStack in
# reverse order to match legend order
for channel_ in order:
if channel_ not in channels:
continue
hist = channels[channel_]
hist.Scale(1 / hist.Integral())
hist.SetFillStyle(0)
if channel_ in bg_channels:
backgrounds.append(hist)
label = "l"
elif (channel_.startswith("zprime") or
channel_.startswith("kkgluon")):
# Signal order does not matter
signal.Add(hist)
label = "l"
elif channel_ == "data":
data = hist
label = "l"
legend.AddEntry(hist,
self._channel_config["channel"][channel_]
["legend"],
label)
# Add backgrounds to the Stack
if backgrounds:
map(background.Add, reversed(backgrounds))
canvas = self.draw_canvas(plot_,
signal=signal, background=background,
data=data, legend=legend,
uncertainty=False)
if canvas:
canvases.append(canvas)
return canvases
def draw(self, background=None, uncertainty=None, data=None, signal=None):
''' Let sub-classes redefine how each template should be drawn '''
if background:
background.Draw("9 hist same nostack")
if data:
data.Draw("9 hist same")
if signal:
signal.Draw("9 hist same nostack")
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
__author__ = 'bool'
from api import Api
api = Api()
soup = api.asdbi()
# print(soup)
|
# from .base import * # noqa
# DEBUG = False
# ALLOWED_HOSTS = ['evaluations.sens-large.com']
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'senslarge',
# 'USER': 'senslarge',
# }
# }
# STATIC_ROOT = '/srv/app/senslarge/static/'
# MEDIA_ROOT = '/srv/app/senslarge/media/'
# INSTALLED_APPS += [ # noqa
# 'raven.contrib.django.raven_compat',
# ]
# RAVEN_CONFIG = {
# 'dsn': 'https://27e44805b758497f82409820824b24b4:'
# 'aba8b4ab78e749108b38038e6dff3f92@sentry.hashbang.fr/28',
# }
# MIDDLEWARE = [
# 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
# ] + MIDDLEWARE # noqa
# DEFAULT_FROM_EMAIL = 'noreply@evaluations.sens-large.com'
# WEASYPRINT_BASEURL = 'https://evaluations.sens-large.com'
# EMAIL_HOST_USER = 'postmaster@evaluations.sens-large.com'
# EMAIL_HOST_PASSWORD = 'dbb4376978b47cad7a8990b97c1a4489'
|
import bz2
import os
data = 'Character with an åccent.'
with bz2.open('example.bz2', 'wt', encoding='utf-8') as output:
output.write(data)
with bz2.open('example.bz2', 'rt', encoding='utf-8') as input:
print('Full file: {}'.format(input.read()))
# Move to the beginning of the accented character.
with bz2.open('example.bz2', 'rt', encoding='utf-8') as input:
input.seek(18)
print('One character: {}'.format(input.read(1)))
# Move to the middle of the accented character.
with bz2.open('example.bz2', 'rt', encoding='utf-8') as input:
input.seek(19)
try:
print(input.read(1))
except UnicodeDecodeError:
print('ERROR: failed to decode')
|
# -*- coding: utf-8 -*-
{
'name': 'Pos Utilities',
'version': '1.0',
'description': 'Utilidades POS',
'summary': 'Cambios POS Odoo Experts',
'author': 'Edgardo Ortiz <edgardoficial.yo@gmail.com>',
'website': 'https://github.com/eortizromero/',
'license': 'LGPL-3',
'category': 'Odoo Experts',
'depends': [
'base',
'point_of_sale',
'credit',
#'contract',
# 'flexibite_com_advance',
],
'data': [
'security/ir.model.access.csv',
'views/res_partner_view.xml',
'views/account_cash_view.xml',
'views/account_journal_view.xml',
'views/eor_pos_templates.xml',
'views/pos_session_view.xml',
'views/contract_scheme_contract_views.xml',
'views/contract_view.xml',
'views/pos_order_pay_report_view.xml',
'report/pos_receipt.xml',
],
'qweb': ['static/src/xml/*.xml',],
'auto_install': True,
'application': True,
} |
import os
import sys
import random
import json
import msgpack
from locust import HttpUser,TaskSet,task
class GoodsListTask(TaskSet):
@task(1)
def goods_list_task( self ):
goods_data = {
"bound_to_chart": "all",
"cid": "",
"goods_category_id": "",
"keyword": "",
"keyword_type": "title",
"limit": 10,
"skip": 0,
"sort": "sold_quantity_desc",
"status": 1
}
req = self.client.post("/api/admin/goods/v2/list", json=goods_data, verify=False, name="goods_list", catch_response=True)
if req.status_code == 200:
data = json.loads(req.text)
err = data.get("error")
if err:
req.failure(str(err))
else:
req.success()
else:
req.failure('Failed!')
class GoodsList(HttpUser):
weight = 10 # 权重
task_set = GoodsListTask
min_wait = 500
max_wait = 2000
if __name__ == '__main__':
os.system("locust -f pttest.py --host=https://wangcai-test-gz.xiaoduoai.com") |
from collections import namedtuple
import datetime
import csv
import statistics
class DataPoint(namedtuple('DataPoint', ['date', 'value'])):
__slots__ = ()
def __le__(self, other):
return self.value <= other.value
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
def read_prices(csvfile, _strptime=datetime.datetime.strptime):
with open(csvfile) as f:
reader = csv.DictReader(f)
for row in reader:
yield DataPoint(date=_strptime(row['Date'], '%Y-%m-%d').date(),
value=float(row['Adj Close']))
prices = read_prices('D:\\Programs\\Python\\repositories\\python_codes\\exercises\\test.csv')
gains = tuple(DataPoint(day.date, 100*(day.value/prev_day.value - 1.))
for day, prev_day in zip(prices[1:], prices))
print(gains)
|
def map_team_names(name: str):
if name.split(' ')[-1] == 'St.':
name = name.replace('St.', 'State')
if name == 'University of California':
return 'California'
if name == 'Central Connecticut':
return 'Central Connecticut State'
if name == 'Illinois Chicago':
return 'Illinois-Chicago'
if name == 'North Carolina-Wilmington' or name == 'North Carolina Wilmington':
return 'UNC Wilmington'
if name == 'Pennsylvania':
return 'Penn'
if name == 'St. John\'s (NY)':
return 'St. John\'s'
if name == 'Miami FL' or name == 'Miami Fl':
return 'Miami (FL)'
if name == 'Southern California':
return 'USC'
if name == 'UC-Santa Barbara' or name == 'California Santa Barbara':
return 'UC Santa Barbara'
if name == 'Troy':
return 'Troy State'
if name == 'Brigham Young':
return 'BYU'
if name == 'Louisiana State':
return 'LSU'
if name == 'North Carolina-Asheville' or name == 'North Carolina Asheville':
return 'UNC Asheville'
if name == 'Alabama-Birmingham' or name == 'Alabama Birmingham':
return 'UAB'
if name == 'Texas-El Paso' or name == 'Texas El Paso':
return 'UTEP'
if name == 'Texas-San Antonio' or name == 'Texas San Antonio':
return 'UTSA'
if name == 'Louisiana' or name == 'Louisiana-Lafayette':
return 'Louisiana Lafayette'
if name == 'Virginia Commonwealth':
return 'VCU'
if name == 'Central Florida':
return 'UCF'
if name == "Saint Mary's (CA)":
return "Saint Mary's"
if name == 'Albany (NY)':
return 'Albany'
if name == 'Texas A&M Corpus Chris' or name == 'Texas Am Corpus Christi':
return 'Texas A&M-Corpus Christi'
if name == 'Miami (OH)' or name == 'Miami Oh':
return 'Miami OH'
if name == 'Nevada-Las Vegas' or name == 'Nevada Las Vegas':
return 'UNLV'
if name == 'Maryland-Baltimore County' or name == 'Maryland Baltimore County':
return 'UMBC'
if name == 'Texas-Arlington' or name == 'Texas Arlington':
return 'UT Arlington'
if name == 'Cal St. Fullerton':
return 'Cal State Fullerton'
if name == 'Cal St. Northridge':
return 'Cal State Northridge'
if name == 'Arkansas-Pine Bluff':
return 'Arkansas Pine Bluff'
if name == 'Little Rock':
return 'Arkansas Little Rock'
if name == 'Long Island University':
return 'LIU Brooklyn'
if name == 'Loyola (MD)' or name == "Loyola Md":
return 'Loyola MD'
if name == 'Detroit Mercy':
return 'Detroit'
if name == 'Southern Mississippi':
return 'Southern Miss'
if name == 'Southern Methodist':
return 'SMU'
if name == 'UC-Irvine' or name == 'California Irvine':
return 'UC Irvine'
if name == 'Cal St. Bakersfield':
return 'Cal State Bakersfield'
if name == 'UC-Davis' or name == 'California Davis':
return 'UC Davis'
if name == 'North Carolina-Greensboro' or name == 'North Carolina Greensboro':
return 'UNC Greensboro'
if name == 'Texas Christian':
return 'TCU'
if name == 'Loyola (IL)' or name == 'Loyola Il':
return 'Loyola Chicago'
if name == 'Gardner-Webb':
return 'Gardner Webb'
if name == 'Prairie View':
return 'Prairie View A&M'
if name == 'St Johns Ny':
return 'St. John\'s'
if name == 'Ucla':
return 'UCLA'
if name == 'Mcneese State':
return 'McNeese State'
if name == 'Saint Josephs' or name == "St. Joseph's":
return 'Saint Joseph\'s'
if name == 'Iupui':
return 'IUPUI'
if name == 'Depaul':
return 'DePaul'
if name == 'Florida Am':
return 'Florida A&M'
if name == 'Alabama Am':
return 'Alabama A&M'
if name == 'Saint Marys Ca':
return "Saint Mary's"
if name == 'Texas Am':
return 'Texas A&M'
if name == 'Albany Ny':
return 'Albany'
if name == 'Mount St Marys':
return "Mount St. Mary's"
if name == 'Stephen F Austin':
return 'Stephen F. Austin'
if name == 'Saint Peters':
return "Saint Peter's"
if name == 'St Bonaventure':
return "St. Bonaventure"
if name == 'North Carolina At':
return "North Carolina A&T"
if name == 'College Of Charleston':
return 'College of Charleston'
if name == 'N.C. State' or name == 'NCSU' or name == 'North Carolina State' or name == 'North Carolina State University':
return 'NC State'
if name == 'UNC':
return 'North Carolina'
if name == 'UConn':
return 'Connecticut'
if name == 'Pitt':
return 'Pittsburgh'
if name == 'Bethune-Cookman':
return 'Bethune Cookman'
if name == 'Bowling Green State':
return 'Bowling Green'
if name == 'California Baptist':
return 'Cal Baptist'
if name == 'Long Beach State':
return 'Cal State Long Beach'
if name == 'Charleston':
return 'College of Charleston'
if name == 'Florida International':
return 'FIU'
if name == 'Grambling State':
return 'Grambling'
if name == 'LIU':
return 'LIU Brooklyn'
if name == 'Louisiana-Monroe':
return 'Louisiana Monroe'
if name == 'Maryland-Eastern Shore':
return 'Maryland Eastern Shore'
if name == 'Citadel':
return 'The Citadel'
if name == 'Massachusetts-Lowell' or name == 'Massachusetts Lowell':
return 'UMass Lowell'
if name == 'Purdue-Fort Wayne':
return 'Purdue Fort Wayne'
if name == 'St. Francis (NY)':
return 'St. Francis NY'
if name == 'Saint Francis (PA)':
return 'St. Francis PA'
if name == 'Tennessee-Martin':
return 'Tennessee Martin'
if name == 'UC-Riverside':
return 'UC Riverside'
if name == 'Missouri-Kansas City':
return 'UMKC'
if name == 'Nebraska Omaha':
return 'Omaha'
if name == 'USC Upstate':
return 'South Carolina Upstate'
if name == 'Texas-Rio Grande Valley':
return 'UT Rio Grande Valley'
if name == 'Njit':
return 'NJIT'
if name == 'Ipfw':
return 'Purdue Fort Wayne'
if name == 'California Riverside':
return 'UC Riverside'
if name == 'Southern Illinois Edwardsville':
return 'SIU Edwardsville'
if name == 'St Francis Ny':
return 'St. Francis NY'
if name == 'Saint Francis Pa':
return 'St. Francis PA'
if name == 'Missouri Kansas City':
return 'UMKC'
if name == 'Virginia Military Institute':
return 'VMI'
if name == 'William Mary':
return 'William & Mary'
if name == 'Texas Pan American':
return 'UT Rio Grande Valley'
return name
|
from picshell.engine.util.Format import Format
from picshell.util.FileUtil import FileUtil
from picshell.ui.Context import Context
import wx.lib.newevent
import wx
MY_EVT = None
(UpdateEvent, MY_EVT) = wx.lib.newevent.NewEvent()
class CLED:
# bit in 1,2,4,8,16,32,64,128
#
def __init__(self,ui,on,off,address,bit,name="",color="red",varAdrMapping=None):
self.varname = address
self.address = Format.toNumber(str(address),varAdrMapping)
self.bit = bit
# print "Create LED at address " + str( address ) + "=> %02X" % self.address + " bit %d" % bit
self.color = color
self.oldValue = -1
self.name = name
self.off = on
self.on = off
self.ui = ui
self.reset()
self.type = "CLED"
Context.frame.Bind(MY_EVT, Context.frame.OnUpdateUI)
def reset(self):
self.cpt = 0
def execute(self,value):
# print "CLED Execute value %02X" % value + " MY ADDRESS %04X" % self.address
value = value & self.bit
if value != self.oldValue:
self.oldValue = value
evtCallBack = UpdateEvent(value = value, obj = self)
wx.PostEvent( Context.frame, evtCallBack )
def UpdateUI(self,value):
if value > 0 :
self.ui.SetBitmap(self.on)
else :
self.ui.SetBitmap(self.off)
def CreateUI(self,parent, psizer ):
iconPath = Context.icon_path()
self.off = wx.Image(FileUtil.opj( iconPath + '/gray.GIF'), wx.BITMAP_TYPE_GIF).ConvertToBitmap()
col = self.color
if col == "red" :
self.on = wx.Image(FileUtil.opj( iconPath + '/red.GIF'), wx.BITMAP_TYPE_GIF).ConvertToBitmap()
elif col == "blue" :
self.on = wx.Image(FileUtil.opj( iconPath + '/blue.GIF'), wx.BITMAP_TYPE_GIF).ConvertToBitmap()
elif col == "green" :
self.on = wx.Image(FileUtil.opj( iconPath + '/green.GIF'), wx.BITMAP_TYPE_GIF).ConvertToBitmap()
elif col == "orange" :
self.on = wx.Image(FileUtil.opj( iconPath + '/orange.GIF'), wx.BITMAP_TYPE_GIF).ConvertToBitmap()
elif col == "yellow" :
self.img_on = wx.Image(FileUtil.opj( iconPath + '/yellow.GIF'), wx.BITMAP_TYPE_GIF).ConvertToBitmap()
panel = wx.Panel(parent,-1)
sizer = wx.BoxSizer(wx.HORIZONTAL)
panel.SetSizer(sizer)
label = wx.StaticText(panel,-1," " + self.name)
self.ui = wx.StaticBitmap(panel, -1, self.off, (10, 0), (self.off.GetWidth(), self.off.GetHeight()))
sizer.Add(self.ui,0,0)
sizer.Add(label,0,0)
psizer.Add(panel,0,0)
|
from typing import Optional
import tree
from ray.rllib.core.models.base import (
Encoder,
ActorCriticEncoder,
StatefulActorCriticEncoder,
STATE_IN,
STATE_OUT,
ENCODER_OUT,
)
from ray.rllib.core.models.base import Model, tokenize
from ray.rllib.core.models.configs import (
ActorCriticEncoderConfig,
CNNEncoderConfig,
MLPEncoderConfig,
RecurrentEncoderConfig,
)
from ray.rllib.core.models.specs.specs_base import Spec
from ray.rllib.core.models.specs.specs_base import TensorSpec
from ray.rllib.core.models.specs.specs_dict import SpecDict
from ray.rllib.core.models.torch.base import TorchModel
from ray.rllib.core.models.torch.primitives import TorchMLP, TorchCNN
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
class TorchActorCriticEncoder(TorchModel, ActorCriticEncoder):
"""An actor-critic encoder for torch."""
framework = "torch"
def __init__(self, config: ActorCriticEncoderConfig) -> None:
TorchModel.__init__(self, config)
ActorCriticEncoder.__init__(self, config)
class TorchStatefulActorCriticEncoder(TorchModel, StatefulActorCriticEncoder):
"""A stateful actor-critic encoder for torch."""
framework = "torch"
def __init__(self, config: ActorCriticEncoderConfig) -> None:
TorchModel.__init__(self, config)
StatefulActorCriticEncoder.__init__(self, config)
class TorchMLPEncoder(TorchModel, Encoder):
def __init__(self, config: MLPEncoderConfig) -> None:
TorchModel.__init__(self, config)
Encoder.__init__(self, config)
# Create the neural network.
self.net = TorchMLP(
input_dim=config.input_dims[0],
hidden_layer_dims=config.hidden_layer_dims,
hidden_layer_activation=config.hidden_layer_activation,
hidden_layer_use_layernorm=config.hidden_layer_use_layernorm,
hidden_layer_use_bias=config.hidden_layer_use_bias,
output_dim=config.output_layer_dim,
output_activation=config.output_layer_activation,
output_use_bias=config.output_layer_use_bias,
)
@override(Model)
def get_input_specs(self) -> Optional[Spec]:
return SpecDict(
{
SampleBatch.OBS: TensorSpec(
"b, d", d=self.config.input_dims[0], framework="torch"
),
}
)
@override(Model)
def get_output_specs(self) -> Optional[Spec]:
return SpecDict(
{
ENCODER_OUT: TensorSpec(
"b, d", d=self.config.output_dims[0], framework="torch"
),
}
)
@override(Model)
def _forward(self, inputs: dict, **kwargs) -> dict:
return {ENCODER_OUT: self.net(inputs[SampleBatch.OBS])}
class TorchCNNEncoder(TorchModel, Encoder):
def __init__(self, config: CNNEncoderConfig) -> None:
TorchModel.__init__(self, config)
Encoder.__init__(self, config)
layers = []
# The bare-bones CNN (no flatten, no succeeding dense).
cnn = TorchCNN(
input_dims=config.input_dims,
cnn_filter_specifiers=config.cnn_filter_specifiers,
cnn_activation=config.cnn_activation,
cnn_use_layernorm=config.cnn_use_layernorm,
cnn_use_bias=config.cnn_use_bias,
)
layers.append(cnn)
# Add a flatten operation to move from 2/3D into 1D space.
if config.flatten_at_end:
layers.append(nn.Flatten())
# Create the network from gathered layers.
self.net = nn.Sequential(*layers)
@override(Model)
def get_input_specs(self) -> Optional[Spec]:
return SpecDict(
{
SampleBatch.OBS: TensorSpec(
"b, w, h, c",
w=self.config.input_dims[0],
h=self.config.input_dims[1],
c=self.config.input_dims[2],
framework="torch",
),
}
)
@override(Model)
def get_output_specs(self) -> Optional[Spec]:
return SpecDict(
{
ENCODER_OUT: (
TensorSpec("b, d", d=self.config.output_dims[0], framework="torch")
if self.config.flatten_at_end
else TensorSpec(
"b, w, h, c",
w=self.config.output_dims[0],
h=self.config.output_dims[1],
d=self.config.output_dims[2],
framework="torch",
)
)
}
)
@override(Model)
def _forward(self, inputs: dict, **kwargs) -> dict:
return {ENCODER_OUT: self.net(inputs[SampleBatch.OBS])}
class TorchGRUEncoder(TorchModel, Encoder):
"""A recurrent GRU encoder.
This encoder has...
- Zero or one tokenizers.
- One or more GRU layers.
- One linear output layer.
"""
def __init__(self, config: RecurrentEncoderConfig) -> None:
TorchModel.__init__(self, config)
# Maybe create a tokenizer
if config.tokenizer_config is not None:
self.tokenizer = config.tokenizer_config.build(framework="torch")
gru_input_dims = config.tokenizer_config.output_dims
else:
self.tokenizer = None
gru_input_dims = config.input_dims
# We only support 1D spaces right now.
assert len(gru_input_dims) == 1
gru_input_dim = gru_input_dims[0]
# Create the torch LSTM layer.
self.gru = nn.GRU(
gru_input_dim,
config.hidden_dim,
config.num_layers,
batch_first=config.batch_major,
bias=config.use_bias,
)
@override(Model)
def get_input_specs(self) -> Optional[Spec]:
return SpecDict(
{
# b, t for batch major; t, b for time major.
SampleBatch.OBS: TensorSpec(
"b, t, d",
d=self.config.input_dims[0],
framework="torch",
),
STATE_IN: {
"h": TensorSpec(
"b, l, h",
h=self.config.hidden_dim,
l=self.config.num_layers,
framework="torch",
),
},
}
)
@override(Model)
def get_output_specs(self) -> Optional[Spec]:
return SpecDict(
{
ENCODER_OUT: TensorSpec(
"b, t, d", d=self.config.output_dims[0], framework="torch"
),
STATE_OUT: {
"h": TensorSpec(
"b, l, h",
h=self.config.hidden_dim,
l=self.config.num_layers,
framework="torch",
),
},
}
)
@override(Model)
def get_initial_state(self):
return {
"h": torch.zeros(self.config.num_layers, self.config.hidden_dim),
}
@override(Model)
def _forward(self, inputs: dict, **kwargs) -> dict:
outputs = {}
if self.tokenizer is not None:
# Push observations through the tokenizer encoder if we built one.
out = tokenize(self.tokenizer, inputs, framework="torch")
else:
# Otherwise, just use the raw observations.
out = inputs[SampleBatch.OBS].float()
# States are batch-first when coming in. Make them layers-first.
states_in = tree.map_structure(lambda s: s.transpose(0, 1), inputs[STATE_IN])
out, states_out = self.gru(out, states_in["h"])
states_out = {"h": states_out}
# Insert them into the output dict.
outputs[ENCODER_OUT] = out
outputs[STATE_OUT] = tree.map_structure(lambda s: s.transpose(0, 1), states_out)
return outputs
class TorchLSTMEncoder(TorchModel, Encoder):
"""A recurrent LSTM encoder.
This encoder has...
- Zero or one tokenizers.
- One or more LSTM layers.
- One linear output layer.
"""
def __init__(self, config: RecurrentEncoderConfig) -> None:
TorchModel.__init__(self, config)
# Maybe create a tokenizer
if config.tokenizer_config is not None:
self.tokenizer = config.tokenizer_config.build(framework="torch")
lstm_input_dims = config.tokenizer_config.output_dims
else:
self.tokenizer = None
lstm_input_dims = config.input_dims
# We only support 1D spaces right now.
assert len(lstm_input_dims) == 1
lstm_input_dim = lstm_input_dims[0]
# Create the torch LSTM layer.
self.lstm = nn.LSTM(
lstm_input_dim,
config.hidden_dim,
config.num_layers,
batch_first=config.batch_major,
bias=config.use_bias,
)
@override(Model)
def get_input_specs(self) -> Optional[Spec]:
return SpecDict(
{
# b, t for batch major; t, b for time major.
SampleBatch.OBS: TensorSpec(
"b, t, d", d=self.config.input_dims[0], framework="torch"
),
STATE_IN: {
"h": TensorSpec(
"b, l, h",
h=self.config.hidden_dim,
l=self.config.num_layers,
framework="torch",
),
"c": TensorSpec(
"b, l, h",
h=self.config.hidden_dim,
l=self.config.num_layers,
framework="torch",
),
},
}
)
@override(Model)
def get_output_specs(self) -> Optional[Spec]:
return SpecDict(
{
ENCODER_OUT: TensorSpec(
"b, t, d", d=self.config.output_dims[0], framework="torch"
),
STATE_OUT: {
"h": TensorSpec(
"b, l, h",
h=self.config.hidden_dim,
l=self.config.num_layers,
framework="torch",
),
"c": TensorSpec(
"b, l, h",
h=self.config.hidden_dim,
l=self.config.num_layers,
framework="torch",
),
},
}
)
@override(Model)
def get_initial_state(self):
return {
"h": torch.zeros(self.config.num_layers, self.config.hidden_dim),
"c": torch.zeros(self.config.num_layers, self.config.hidden_dim),
}
@override(Model)
def _forward(self, inputs: dict, **kwargs) -> dict:
outputs = {}
if self.tokenizer is not None:
# Push observations through the tokenizer encoder if we built one.
out = tokenize(self.tokenizer, inputs, framework="torch")
else:
# Otherwise, just use the raw observations.
out = inputs[SampleBatch.OBS].float()
# States are batch-first when coming in. Make them layers-first.
states_in = tree.map_structure(lambda s: s.transpose(0, 1), inputs[STATE_IN])
out, states_out = self.lstm(out, (states_in["h"], states_in["c"]))
states_out = {"h": states_out[0], "c": states_out[1]}
# Insert them into the output dict.
outputs[ENCODER_OUT] = out
outputs[STATE_OUT] = tree.map_structure(lambda s: s.transpose(0, 1), states_out)
return outputs
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from abc import abstractmethod
from typing import Any, Mapping
from warnings import warn
from deprecate import deprecated
import flash
from flash.core.data.io.output import Output
from flash.core.data.properties import Properties
class Deserializer(Properties):
"""Deserializer."""
def deserialize(self, sample: Any) -> Any: # TODO: Output must be a tensor???
raise NotImplementedError
@property
@abstractmethod
def example_input(self) -> str:
raise NotImplementedError
def __call__(self, sample: Any) -> Any:
return self.deserialize(sample)
class DeserializerMapping(Deserializer):
# TODO: This is essentially a duplicate of OutputMapping, should be abstracted away somewhere
"""Deserializer Mapping."""
def __init__(self, deserializers: Mapping[str, Deserializer]):
super().__init__()
self._deserializers = deserializers
def deserialize(self, sample: Any) -> Any:
if isinstance(sample, Mapping):
return {key: deserializer.deserialize(sample[key]) for key, deserializer in self._deserializers.items()}
raise ValueError("The model output must be a mapping when using a DeserializerMapping.")
def attach_data_pipeline_state(self, data_pipeline_state: "flash.core.data.data_pipeline.DataPipelineState"):
for deserializer in self._deserializers.values():
deserializer.attach_data_pipeline_state(data_pipeline_state)
class Serializer(Output):
"""Deprecated.
Use ``Output`` instead.
"""
@deprecated(
None,
"0.6.0",
"0.7.0",
template_mgs="`Serializer` was deprecated in v%(deprecated_in)s in favor of `Output`. "
"It will be removed in v%(remove_in)s.",
stream=functools.partial(warn, category=FutureWarning),
)
def __init__(self):
super().__init__()
self._is_enabled = True
@staticmethod
@deprecated(
None,
"0.6.0",
"0.7.0",
template_mgs="`Serializer` was deprecated in v%(deprecated_in)s in favor of `Output`. "
"It will be removed in v%(remove_in)s.",
stream=functools.partial(warn, category=FutureWarning),
)
def serialize(sample: Any) -> Any:
"""Deprecated.
Use ``Output.transform`` instead.
"""
return sample
def transform(self, sample: Any) -> Any:
return self.serialize(sample)
|
from integration_tests.utils import populate_mock_db
from src.queries.get_top_user_track_tags import _get_top_user_track_tags
from src.utils.db_session import get_db
def test_get_top_user_track_tags(app):
"""Tests that top tags for users can be queried"""
with app.app_context():
db = get_db()
test_entities = {
"tracks": [
{"tags": ""},
{},
{"tags": "pop,rock,electric"},
{"tags": "pop,rock"},
{"tags": "funk,pop"},
]
}
populate_mock_db(db, test_entities)
with db.scoped_session() as session:
session.execute("REFRESH MATERIALIZED VIEW tag_track_user")
user_1_tags = _get_top_user_track_tags(session, {"user_id": 1})
user_2_tags = _get_top_user_track_tags(session, {"user_id": 2})
assert len(user_1_tags) == 4
assert user_1_tags[0] == "pop"
assert user_1_tags[1] == "rock"
assert "electric" in user_1_tags
assert "funk" in user_1_tags
assert not user_2_tags
|
def counting_valleys(s: str) -> int:
valley_count = 0
level = 0
in_valley = False
for ch in s:
if ch == 'D':
if level == 0:
in_valley = True
level -= 1
else:
level += 1
if level == 0 and in_valley:
in_valley = False
valley_count += 1
return valley_count
if __name__ == '__main__':
print(counting_valleys('UDDDUDUU'))
|
import json
import time
import nearest_neighbor
import pickle
from rtree_util import calcMinDist
from rtree_node import node
import matplotlib.pyplot as plt
def range_search(node, dist, reslist, point, dimen):
if node.isLeaf == 1:
for rec in node.bounding_rectangles:
if calcMinDist(point, rec, dimen) <= dist:
reslist.append(rec)
return
n = len(node.bounding_rectangles)
for i in range(0,n):
if calcMinDist(point, node.bounding_rectangles[i], dimen) <= dist:
range_search(node.children_[i], dist, reslist, point, dimen)
def range_queries(nr):
queryfile = open("RangeQueries.txt","r")
rtreefile = open("rtree.pkl","rb")
# outfilename = str(nr)+"rectangles_rangeoutput.txt"
# outfile = open(outfilename, "w")
# resfile = open("RangeResults_RTree.txt","a")
root = pickle.load(rtreefile)
dimen = len(root.bounding_rectangles[0])
queries=json.load(queryfile)
res=[]
res2=[]
totaltime = 0
query=queries[0]
point=query[0]
plotx = []
ploty = []
for i in range(1,len(query)):
dist = query[i]
reslist = []
start = time.time()
range_search(root, dist, reslist, point, dimen)
end = time.time()
interval=end-start
totaltime += interval
plotx.append(dist)
ploty.append(len(reslist))
# res2.append([len(reslist), interval])
# res.append(reslist)
# avgtime = totaltime/len(queryfile)
# json.dump(res2, outfile)
# resfile.write(str(nr)+" rectangles: "+str(avgtime))
plt.plot(plotx,ploty)
plt.show()
|
class Solution:
def recursive_fibonacci(self, num):
if num < 0 or not isinstance(num, int):
return
elif num == 0 or num == 1:
return num
else:
return self.regular_fibonacci(n - 1) + self.regular_fibonacci(n - 2)
def iterative_fibonacci(self, num):
if num < 0 or not isinstance(num, int):
return
f_first = 0
f_second = 1
f_sum = 0
for i in range(num - 1):
f_sum = f_first + f_second
f_first = f_second
f_second = f_sum
return f_sum
|
import os
import sys
import re
import gzip
import csv
import json
import logging
from contextlib import contextmanager
from collections import defaultdict, OrderedDict
import numpy as np
import pandas as pd
import h5py as h5
logger = logging.getLogger('yanocomp')
pd.options.mode.chained_assignment = None
# Functions for parsing input text files e.g. eventalign tsv or GTF
@contextmanager
def path_or_stdin_reader(fn):
'''
Read from stdin, or a gzipped or ungzipped text file
'''
if fn == '-':
handle = sys.stdin
decode_method = str
elif os.path.splitext(fn)[1] == '.gz':
handle = gzip.open(fn)
decode_method = bytes.decode
else:
handle = open(fn)
decode_method = str
try:
gen = (decode_method(line) for line in handle)
yield gen
finally:
handle.close()
gen.close()
def parse_eventalign_summary(summary_fn):
read_name_idx = {}
with path_or_stdin_reader(summary_fn) as handle:
sm_parser = csv.DictReader(handle, delimiter='\t')
fieldnames = set(sm_parser.fieldnames)
for record in sm_parser:
read_name_idx[record['read_index']] = record['read_name']
return read_name_idx
def parse_eventalign(eventalign_fn, summary_fn=None):
if summary_fn is not None:
read_name_idx = parse_eventalign_summary(summary_fn)
else:
read_name_idx = None
with path_or_stdin_reader(eventalign_fn) as handle:
ea_parser = csv.DictReader(handle, delimiter='\t')
fieldnames = set(ea_parser.fieldnames)
if 'read_name' not in fieldnames and read_name_idx is None:
raise KeyError(
'nanopolish must be either run with --print-read-names option '
'or else a eventalign summary file must be provided'
)
if not fieldnames.issuperset(['start_idx', 'end_idx']):
raise KeyError(
'cannot find "start_idx" or "end_idx" fields, '
'nanopolish must be run with --signal-index option'
)
for record in ea_parser:
parsed = {}
parsed['t_id'] = record['contig']
parsed['pos'] = int(record['position']) + 2
parsed['kmer'] = record['reference_kmer']
try:
parsed['r_id'] = record['read_name']
except KeyError:
parsed['r_id'] = read_name_idx[record['read_index']]
parsed['mean'] = float(record['event_level_mean'])
parsed['std'] = float(record['event_stdv'])
parsed['duration'] = float(record['event_length'])
parsed['points'] = (int(record['end_idx']) -
int(record['start_idx']))
yield parsed
def ignore_comments(handle, comment_char='#'):
for line in handle:
if not line.startswith(comment_char):
yield line
def parse_gtf_attrs(attrs):
attrs = re.findall(r'(\w+) \"(.+?)\"(?:;|$)', attrs)
return dict(attrs)
def read_gtf(gtf_fn, use_ftype):
with path_or_stdin_reader(gtf_fn) as handle:
gtf_parser = csv.DictReader(
ignore_comments(handle),
delimiter='\t',
fieldnames=['chrom', 'source', 'ftype',
'start', 'end', 'score',
'strand', 'frame', 'attrs']
)
for record in gtf_parser:
ftype = record['ftype']
if ftype == use_ftype:
chrom = record['chrom']
start, end = int(record['start']) - 1, int(record['end'])
strand = record['strand']
attrs = parse_gtf_attrs(record['attrs'])
yield chrom, start, end, ftype, strand, attrs
def load_gtf_database(gtf_fn, mapped_to='transcript_id', parent_id='gene_id'):
gtf = {}
for chrom, start, end, ftype, strand, attrs in read_gtf(gtf_fn, 'exon'):
transcript_id = attrs[mapped_to]
gene_id = attrs[parent_id]
if transcript_id not in gtf:
gtf[transcript_id] = {
'gene_id': gene_id,
'chrom': chrom,
'strand': strand,
'invs': []
}
gtf[transcript_id]['invs'].append((start, end))
for transcript in gtf.values():
transcript['invs'].sort()
transcript['start'] = transcript['invs'][0][0]
transcript['invs_lns'] = [e - s for s, e in transcript['invs']]
transcript['ln'] = sum(transcript['invs_lns'])
transcript['inv_cs'] = []
i = 0
for ln in transcript['invs_lns']:
transcript['inv_cs'].append(i)
i += ln
return gtf
# functions for creating/loading data from diffmod's HDF5 layout.
def incrementing_index(index, identifier):
try:
idx = index[identifier]
except KeyError:
idx = len(index)
index[identifier] = idx
return idx
EVENT_DTYPE = np.dtype([
('transcript_idx', np.uint8),
('read_idx', np.uint32),
('pos', np.uint32),
('mean', np.float16),
('std', np.float16),
('duration', np.float16)
])
KMER_DTYPE = np.dtype([
('pos', np.uint32),
('kmer', h5.string_dtype(length=5))
])
TRANSCRIPT_DTYPE = h5.string_dtype()
READ_DTYPE = h5.string_dtype(length=36)
def save_events_to_hdf5(collapsed, hdf5_fn):
kmer_index = defaultdict(dict)
transcript_index = defaultdict(OrderedDict)
read_index = defaultdict(OrderedDict)
gene_attrs = {}
with h5.File(hdf5_fn, 'w') as o:
for gene_id, chrom, strand, records in collapsed:
data = []
gene_attrs[gene_id] = (chrom, strand)
for _, t_id, r_id, pos, kmer, mean, std, duration in records:
t_x = incrementing_index(transcript_index[gene_id], t_id)
r_x = incrementing_index(read_index[gene_id], r_id)
data.append((t_x, r_x, pos, mean, std, duration))
kmer_index[gene_id][pos] = kmer
data = np.asarray(data, dtype=EVENT_DTYPE)
n_records = len(data)
try:
# if records for transcript are split across
# multiple chunks we will need to extend the existing
# dataset.
output_dataset = o[f'{gene_id}/events']
i = len(output_dataset)
output_dataset.resize(i + n_records, axis=0)
except KeyError:
output_dataset = o.create_dataset(
f'{gene_id}/events',
shape=(n_records,),
maxshape=(None,),
dtype=EVENT_DTYPE,
chunks=True,
compression='gzip',
)
i = 0
output_dataset[i: i + n_records] = data
# now add kmers, read ids and transcript_ids
for gene_id, kmer_pos in kmer_index.items():
kmers = np.array(list(kmer_pos.items()), dtype=KMER_DTYPE)
o.create_dataset(
f'{gene_id}/kmers',
data=kmers,
dtype=KMER_DTYPE,
compression='gzip'
)
transcript_ids = np.array(
list(transcript_index[gene_id].keys()),
dtype=TRANSCRIPT_DTYPE
)
o.create_dataset(
f'{gene_id}/transcript_ids',
data=transcript_ids,
dtype=TRANSCRIPT_DTYPE,
compression='gzip'
)
read_ids = np.array(
list(read_index[gene_id].keys()),
dtype=READ_DTYPE
)
o.create_dataset(
f'{gene_id}/read_ids',
data=read_ids,
dtype=READ_DTYPE,
compression='gzip'
)
chrom, strand = gene_attrs[gene_id]
o[gene_id].attrs.create(
'chrom', data=chrom, dtype=h5.string_dtype()
)
o[gene_id].attrs.create(
'strand', data=strand, dtype=h5.string_dtype()
)
@contextmanager
def hdf5_list(hdf5_fns):
'''Context manager for list of HDF5 files'''
hdf5_list = [
h5.File(fn, 'r') for fn in hdf5_fns
]
try:
yield hdf5_list
finally:
for f in hdf5_list:
f.close()
def get_shared_keys(hdf5_handles):
'''
Identify the intersection of the keys in a list of hdf5 files
'''
# filter out any transcripts that are not expressed in all samples
genes = set(hdf5_handles[0].keys())
for d in hdf5_handles[1:]:
genes.intersection_update(set(d.keys()))
return list(genes)
def load_gene_kmers(gene_id, datasets):
'''
Get all recorded kmers from the different datasets
'''
kmers = {}
# positions (and their kmers) which are recorded may vary across datasets
for d in datasets:
k = d[f'{gene_id}/kmers'][:].astype(
np.dtype([('pos', np.uint32), ('kmer', 'U5')])
)
kmers.update(dict(k))
return pd.Series(kmers)
def load_gene_attrs(gene_id, datasets):
'''
Extracts important info i.e. chromosome, strand
for a gene from the HDF5 files...
'''
# get general info which should be same for all datasets
g = datasets[0][gene_id]
chrom = g.attrs['chrom']
strand = g.attrs['strand']
return chrom, strand
def load_gene_events(gene_id, datasets,
by_transcript_ids=False):
'''
Extract the event alignment table for a given gene from a
list of HDF5 file objects
'''
if not by_transcript_ids:
gene_events = []
else:
gene_events = defaultdict(list)
for rep, d in enumerate(datasets, 1):
# read full dataset from disk
e = pd.DataFrame(d[f'{gene_id}/events'][:])
e.drop_duplicates(['read_idx', 'pos'], keep='first', inplace=True)
# convert f16 to f64
e.loc[:, 'mean'] = e['mean'].astype(np.float64, copy=False)
e.loc[:, 'transcript_idx'] = e['transcript_idx'].astype('category', copy=False)
e.loc[:, 'duration'] = np.log10(e['duration'].astype(np.float64, copy=False))
# skip stalls longer than a second as they might skew the data
e = e.query('duration <= 0')
r_ids = d[f'{gene_id}/read_ids'][:].astype('U32')
e.loc[:, 'read_idx'] = e['read_idx'].map(dict(enumerate(r_ids)))
e.loc[:, 'replicate'] = rep
e.set_index(['pos', 'read_idx', 'replicate'], inplace=True)
if by_transcript_ids:
t_ids = d[f'{gene_id}/transcript_ids'][:]
e.transcript_idx.cat.rename_categories(
dict(enumerate(t_ids)),
inplace=True
)
for transcript_id, group in e.groupby('transcript_idx'):
group = group['mean'].unstack(0)
gene_events[transcript_id].append(group)
else:
e = e['mean'].unstack(0)
gene_events.append(e)
if by_transcript_ids:
gene_events = {
t_id: pd.concat(e, sort=False)
for t_id, e in gene_events.items()
}
else:
gene_events = pd.concat(gene_events, sort=False)
return gene_events
DEFAULT_PRIORS_MODEL_FN = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
'data/r9.4_70bps.u_to_t_rna.5mer.template.model'
)
def load_model_priors(model_fn=None):
'''
Load the parameters for the expected kmer distributions.
'''
if model_fn is None:
model_fn = DEFAULT_PRIORS_MODEL_FN
m = pd.read_csv(
model_fn, sep='\t', comment='#', index_col='kmer'
)
m = m[['level_mean', 'level_stdv']]
return m.transpose()
def save_gmmtest_results(res, output_bed_fn):
'''
write main results to bed file
'''
res = res.sort_values(by=['chrom', 'pos'])
logger.info(f'Writing output to {os.path.abspath(output_bed_fn)}')
with open(output_bed_fn, 'w') as bed:
for record in res.itertuples(index=False):
(chrom, pos, gene_id, strand, kmer,
kmers, centre,
log_odds, lb, ub,
pval, fdr, c_fm, t_fm,
g_stat, hom_g_stat,
unmod_mus, unmod_stds, mod_mus, mod_stds,
ks) = record
try:
unmod_mu, unmod_std = unmod_mus[centre], unmod_stds[centre]
mod_mu, mod_std = mod_mus[centre], mod_stds[centre]
except TypeError:
# no gmm testing
unmod_mu, unmod_std, mod_mu, mod_std = np.nan, np.nan, np.nan, np.nan
with np.errstate(divide='ignore'):
score = int(round(min(- np.log10(fdr), 100)))
bed_record = (
f'{chrom:s}\t{pos - 2:d}\t{pos + 3:d}\t'
f'{gene_id}:{kmer}\t{score:d}\t{strand:s}\t'
f'{log_odds:.2f}[{lb:.2f},{ub:.2f}]\t'
f'{pval:.2g}\t{fdr:.2g}\t'
f'{c_fm:.2f}\t{t_fm:.2f}\t'
f'{g_stat:.2f}\t{hom_g_stat:.2f}\t'
f'{unmod_mu:.2f}\t{unmod_std:.2f}\t'
f'{mod_mu:.2f}\t{mod_std:.2f}\t'
f'{ks:.2f}\n'
)
bed.write(bed_record)
def save_sm_preds(sm_preds, cntrl_hdf5_fns, treat_hdf5_fns, output_json_fn):
sm_preds_json = {
'input_fns': {
'cntrl': dict(enumerate(cntrl_hdf5_fns)),
'treat': dict(enumerate(treat_hdf5_fns)),
},
'single_molecule_predictions': sm_preds
}
if os.path.splitext(output_json_fn)[1] == '.gz':
handle = gzip.open(output_json_fn, 'wt', encoding="ascii")
else:
handle = open(output_json_fn, 'w')
json.dump(sm_preds_json, handle)
handle.close() |
# (1)
# (A)
A = list([1, 2, 3, 4, 5, 6])
A.append(8)
print(A)
# (B)
A.remove(3)
print(A)
# (C)
C = max(A)
print(C)
# (D)
D = min(A)
print(D)
# (2)
org_tuple = (1, 2, 3, 4, 5)
print(org_tuple)
new_tuple = tuple(reversed(org_tuple))
print(new_tuple)
# (3)
mon = (123, 'xyz', 12.34)
newlist = list(mon)
print(newlist) |
#!/usr/bin/python3
# v5:
# Messages are not signed. HTTP Basic Auth is used instead (rely on SSL).
#
# [deprecated] v4:
# Messages are signed by the private key of the sender device
# Messages are verified by remote host with the public key of the sender device
#
# Demo: If arguments are supplied, they are sent as individual messages to glazerlab-i7nuc.
# If not, a list of sender's IPs is sent.
#
# Stanley H.I. Lio
# hlio@hawaii.edu
# University of Hawaii
# All Rights Reserved. 2017
import requests, time, socket, json, subprocess
from os.path import expanduser, join, exists
nodeid = socket.gethostname()
def getIP():
proc = subprocess.Popen(['hostname -I'], stdout=subprocess.PIPE, shell=True)
out, err = proc.communicate()
ips = out.decode().strip().split(' ')
return ips
url = 'https://grogdata.soest.hawaii.edu/api/5/raw'
def post5(m, endpoint, auth):
"""POST a string to an endpoint"""
r = requests.post(endpoint,
data={'m':m, 'ts':time.time(), 'src':nodeid},
auth=auth)
return r.text
if '__main__' == __name__:
import sys
sys.path.append(expanduser('~'))
from cred import cred
M = []
if len(sys.argv) == 1:
print('No argument supplied. Sending own IPs.')
M.append(json.dumps(getIP(), separators=(',', ':')))
else:
M = sys.argv[1:]
for m in M:
print(post5(m, url, ('uhcm', cred['uhcm'])))
|
#!/urs/bin/env python2
from mako.template import Template
mytemplate = Template("Hello world!")
print mytemplate.render()
|
# -*- coding: utf-8 -*-
from selenium.webdriver.support.wait import WebDriverWait
from page.business_page.trip_page import TripPage
import time
from allure import MASTER_HELPER as allure
# 行程中页面操作
class TripPageHandle(object):
def __init__(self, driver):
self.trip_page = TripPage(driver)
# 点击接单按钮
def click_order_button(self):
self.trip_page.get_order_button().click()
return self
# 获取页面title
def get_trip_page_title(self, page_name):
if page_name == "行程详情":
title_text = self.trip_page.get_trip_info_title().text
elif page_name == "确认账单":
title_text = self.trip_page.get_bill_page_title().text
elif page_name == "行程结束":
title_text = self.trip_page.get_finish_order_page_title().text
elif page_name == "等乘客":
title_text = self.trip_page.get_wait_page_title().text
else:
title_text = self.trip_page.get_page_title().text
return title_text
# 点击行程详情入口
def click_trip_into_btn(self):
self.trip_page.get_trip_info().click()
return self
# 滑动按钮
def slide_button(self):
if self.trip_page.get_slider_btn().is_displayed():
self.slide_button()
return self
# 点击返回按钮
def click_back_btn(self, page_name):
if page_name == "行程详情":
self.trip_page.get_trip_info_back_btn().click()
elif page_name == "确认账单":
self.trip_page.get_bill_page_back_btn().click()
return self
# 点击休息按钮
def click_rest_btn(self):
self.trip_page.get_rest_btn().click()
return self
# 点击继续接单按钮
def click_continue_btn(self):
self.trip_page.get_continue_btn().click()
# 快车实时单主流程
def express_trip_main(self):
# =========判断是否进入播单页面
# WebDriverWait(self.driver,60).until(self.get_order_type().is_displayed())
time.sleep(5)
# 判断是否有接单按钮,有的话点击
if self.trip_page.get_order_button().is_displayed():
self.trip_page.get_order_button().click()
# =========进入接乘客页面
# WebDriverWait(self.driver,10).until(self.get_trip_page_title("接乘客") == "接乘客快车乘客")
if self.get_trip_page_title("接乘客") == "接乘客快车乘客":
# 点击行程详情入口
self.click_trip_into_btn()
# 点击行程详情页返回按钮
self.click_back_btn("行程详情")
print(self.trip_page.get_passenger_card_phone().text)
print(self.trip_page.get_passenger_card_address_from().text)
print(self.trip_page.get_passenger_card_address_to().text)
if self.trip_page.get_slider_btn().text == "到达约定地点":
self.slide_button()
# ===========进入等乘客页面
# WebDriverWait(self.driver, 10).until(self.get_trip_page_title("等乘客") == "等快车乘客")
time.sleep(5)
if self.get_trip_page_title("等乘客") == "等快车乘客":
# 点击行程详情入口
self.click_trip_into_btn()
# 点击行程详情页返回按钮
self.click_back_btn("行程详情")
if self.trip_page.get_slider_btn().text == "接到乘客":
self.slide_button()
# ============进入送乘客页面
time.sleep(5)
# WebDriverWait(self.driver, 10).until(self.get_trip_page_title("送乘客") == "送快车乘客")
if self.get_trip_page_title("送乘客") == "送快车乘客":
# 点击行程详情入口
self.click_trip_into_btn()
# 点击行程详情页返回按钮
self.click_back_btn("行程详情")
print(self.trip_page.get_passenger_card_phone().text)
print(self.trip_page.get_passenger_card_address_from().text)
print(self.trip_page.get_passenger_card_address_to().text)
if self.trip_page.get_slider_btn().text == "到达约定地点":
self.slide_button()
# ============进入确认账单页
time.sleep(5)
# WebDriverWait(self.driver, 10).until(self.get_trip_page_title("确认账单") == "确认账单")
if self.get_trip_page_title("确认账单") == "确认账单":
if self.trip_page.get_slider_btn().text == "发起收款":
self.slide_button()
# ============进入行程结束页
time.sleep(5)
# WebDriverWait(self.driver, 10).until(self.get_trip_page_title("行程结束") == "行程结束")
if self.get_trip_page_title("行程结束") == "行程结束":
print(self.trip_page.get_pay_status().text)
print(self.trip_page.get_cost_value().text)
self.click_continue_btn()
|
# GroundDug Permissions Cog
import discord
from discord.ext import commands
import asyncio
import cogs.utils.checks as checks
import cogs.utils.misc as misc
import cogs.utils.embed as embed
import cogs.utils.db as db
from cogs.logs import sendLog
async def changePermission(bot,ctx,user,permChangeTo,permission=None):
# Get the current user permissions
userPermissions = await db.getUser(ctx.guild.id,user.id)
userPermissions = userPermissions["permissions"]
# If the permission to change is none
# Check what permissions can be changed to permChangeTo and send a message
if permission == None:
prefix = await misc.getPrefix(bot,ctx)
msg = await embed.generate("Permissions",f"{user.name}'s permissions that can be changed")
for permission in userPermissions:
permissionName = permission.split("_")
# Check whether database permission name split is greater than 2 to capitalise text
if len(permissionName) >= 2:
permissionName = f"{permissionName[0].lower().capitalize()} {permissionName[1].lower().capitalize()}"
else:
permissionName = permission.lower().capitalize()
# Check whether the permission is the opposite, hence changable and add a field describing how to change the permission
if userPermissions[permission] is not permChangeTo:
# In future, change this so permission does not need to include the underscore
msg = await embed.add_field(msg,permissionName,f"Change this permission by running `{prefix}perms <give/remove> @{user.name}#{user.discriminator} {permission.lower()}`")
# If no permissions were listed, all permissions are set to permChangeTo
if msg.fields == []:
msg.title = "All permissions are already set to this"
msg.description = f"{user.mention} already has all their permissions set to `{permChangeTo}`"
await ctx.send(embed=msg)
else:
# Make the given permission upper case, if it wasn't already
permission = permission.upper()
if permission == "ADMINISTRATOR":
# Change all permissions to permChangeTo if the permission is administrator
for perm in userPermissions:
userPermissions[perm] = permChangeTo
await db.update("users",{"guild": ctx.guild.id, "user": user.id},{"permissions": userPermissions})
else:
# Try to change the permission, if the key doesn't exist, raise an error
if permission in userPermissions:
userPermissions[permission] = permChangeTo
await db.update("users",{"guild": ctx.guild.id, "user": user.id},{"permissions": userPermissions})
else:
return await embed.error(ctx,f"Permission {permission} cannot be found")
await db.update("users",{"guild": ctx.guild.id, "user": user.id},{"permissions": userPermissions})
# Send a message letting the user know the permission change was successful
await ctx.send(embed=(await embed.generate("Permission changed successfully!",f"Permission `{permission}` to {user.mention} was successfully changed, it is now set to `{permChangeTo}`")))
class Perms(commands.Cog):
def __init__(self,bot):
self.bot = bot
@commands.group(name="perms",description="Custom permissions")
@commands.guild_only()
async def perms(self,ctx):
# Send a help perms command if no subcommand invoked
if ctx.invoked_subcommand is None:
await ctx.invoke(self.bot.get_command("help"),"perms")
else:
await sendLog(self,ctx,"perms")
@perms.command(name="give",description="<user> [permission] | Give a user a GroundDug (`GD`) permission")
@commands.guild_only()
@checks.hasGDPermission("ADMINISTRATOR")
async def add(self,ctx,user:discord.Member,permission=None):
await changePermission(self.bot,ctx,user,True,permission)
@perms.command(name="remove",description="<user> [permission] | Remove a users GroundDug (`GD`) permission")
@commands.guild_only()
@checks.hasGDPermission("ADMINISTRATOR")
async def remove(self,ctx,user:discord.Member,permission=None):
await changePermission(self.bot,ctx,user,False,permission)
@perms.command(name="massadd",description="<role> <permission> | Give multiple users within a role a GroundDug (`GD`) permission")
@commands.guild_only()
@checks.hasGDPermission("ADMINISTRATOR")
async def massAdd(self,ctx,role:discord.Role,permission):
for user in role.members:
await changePermission(self.bot,ctx,user,True,permission)
@perms.command(name="massremove",description="<role> <permission> | Remove a GroundDug (`GD`) permission from users within a role")
@commands.guild_only()
@checks.hasGDPermission("ADMINISTRATOR")
async def massRemove(self,ctx,role:discord.Role,permission):
for user in role.members:
await changePermission(self.bot,ctx,user,False,permission)
@perms.command(name="list",description="[user] | Shows the current GroundDug (`GD`) permissions currently assigned to a user")
@commands.guild_only()
async def permsList(self,ctx,user:discord.Member=None):
if user is None:
user = ctx.author
msg = await embed.generate("Permissions",f"{user.name}'s GroundDug permissions")
# Get the current user permissions
userPermissions = await db.getUser(ctx.guild.id,user.id)
userPermissions = userPermissions["permissions"]
# For every permission, capitalise the name and set it to permissionName
for permission in userPermissions:
permissionName = permission.split("_")
if len(permissionName) >= 2:
permissionName = f"{permissionName[0].lower().capitalize()} {permissionName[1].lower().capitalize()}"
else:
permissionName = permission.lower().capitalize()
# If the permission is set to True, add a field to the embed
if userPermissions[permission]:
msg = await embed.add_field(msg,permissionName,"<:check:679095420202516480>",True)
await ctx.send(embed=msg)
def setup(bot):
bot.add_cog(Perms(bot))
|
#!/usr/bin/python
import sys, re, math,random,os
import argparse
from ast import literal_eval as le
def read_from_clusters_cache_file(clusters_file):
f = open(clusters_file, 'r')
data = f.read()
f.close()
del f
return data
def read_clusters(CLUSTERS_FILENAME):
cluster_data = read_from_clusters_cache_file(CLUSTERS_FILENAME)
for line in cluster_data.strip().split("\n"):
centroid_id=le(line)[0]
coords=le(line)[1]
if len(coords)!=NUM:
continue
clusters.append((centroid_id, coords))
delta_clusters[centroid_id] = (list([0]*NUM),0)
def get_distance_coords(vec1,vec2):
dist = 0
for i in range(len(vec1)):
dist += math.sqrt(math.pow(vec1[i] - vec2[i],2))
return dist
def get_nearest_cluster(vector):
nearest_cluster_id = None
nearest_distance = 1000000000
for cluster in clusters:
dist = get_distance_coords(vector, cluster[1])
if dist < nearest_distance:
nearest_cluster_id = cluster[0]
nearest_distance = dist
return nearest_cluster_id
def vecplus(vec1,vec2):
temp = []
for i in range(len(vec1)):
temp.append(vec1[i]+vec2[i])
return temp
def mapper():
while True:
line = sys.stdin.readline()
if not line:
break
ws = line.strip('\n').split('_')
key = ws[0]
value = le(ws[1])+le(ws[2])
if len(value)!=NUM:
continue
nearest_cluster_id = get_nearest_cluster(value)
accsum, cont = delta_clusters[nearest_cluster_id]
delta_clusters[nearest_cluster_id] = (vecplus(accsum,value), cont+1)
for key in delta_clusters:
accsum,cont = delta_clusters[key]
print str(key) + "\t" + str(accsum)+";"+str(cont)
def avesum(vec,cnt):
temp=[]
for i in range(len(vec)):
temp.append(vec[i]/cnt)
return temp
def suggest_valid_coords_to_cluster():
valid_clusters_count = len(valid_clusters)
if valid_clusters_count <= 1:
# Taking random values for a new coordinate
new_center = get_random_coords_in_region()
else:
# Taking two clusters and positioning this on their average
cid1 = random.randint(0, valid_clusters_count-1)
cid2 = random.randint(0, valid_clusters_count-1)
while cid1 == cid2:
cid2 = random.randint(0, valid_clusters_count-1)
cluster1 = valid_clusters[cid1]
cluster2 = valid_clusters[cid2]
new_center=avesum(vecplus(cluster1,cluster2),2)
return new_center
def get_random_coords_in_region():
return [random.uniform(-1,1) for x in range(NUM)]
def emit_new_lat_long(cluster_id, accsum_total, count_total):
if count_total == 0: #if the cluster did not attracted any point, change to a new coord
new_center = suggest_valid_coords_to_cluster()
# return
else:
new_center=avesum(accsum_total,count_total)
valid_clusters.append(new_center)
print "("+str(cluster_id) + "," + str(new_center)+")"
def reducer():
oldKey = None
accsum_total = list([0]*NUM)
count_total = 0
while True:
line = sys.stdin.readline()
if not line:
break
ws = line.strip().split('\t')
if len(ws)!=2:
continue
cluster_id, totals = ws
accsum,count =totals.split(";")
accsum=le(accsum)
if oldKey and oldKey != cluster_id:
emit_new_lat_long(oldKey, accsum_total,count_total)
accsum_total = list([0]*NUM)
count_total = 0
oldKey = cluster_id
accsum_total = vecplus(accsum_total,accsum)
count_total += float(count)
# print oldKey, accsum_total, count_total
if oldKey != None:
emit_new_lat_long(oldKey, accsum_total, count_total)
def main(args):
op_type = ''
if args.mr is None:
return
else:
op_type = args.mr
if op_type == 'm':
CLUSTERS_FILENAME = args.t
# CLUSTERS_FILENAME = 'twoclusteralltest.txt'
read_clusters(CLUSTERS_FILENAME)
mapper()
if op_type == 'r':
reducer()
if __name__ == '__main__':
NUM=1152
parser = argparse.ArgumentParser(description='kmeans_mr')
parser.add_argument('--mr', type=str, help='MR')
parser.add_argument('--t', type=str, help='cluster')
args = parser.parse_args()
clusters = []
delta_clusters = dict()
valid_clusters = []
main(args)
|
'''
Localization Utilities
@author: MiJyn
'''
import gettext
import os
import re
from relinux import config, configutils
class Localize():
def __init__(self):
self.languages = {}
patt = re.compile(config.productunix + "_(.*?)")
for i in os.listdir(config.localedir):
m = patt.match(i)
if configutils.checkMatched(m):
lang = m.group(1)
self.languages[lang] = gettext.translation(
config.productunix, config.localedir, languages=[lang])
def useLanguage(self, language):
self.languages[language].install()
config._ = self.languages[language].ugettext
|
import re
import matplotlib.pyplot as plt
import sys
file_path_list = [
# 'C:/Users/think/Desktop/最佳批大小/ssd_512_resnet50_v1_voc_train_8.log',
# 'C:/Users/think/Desktop/最佳批大小/16_ssd_512_resnet50_v1_voc_train.log',
# 'C:/Users/think/Desktop/最佳批大小/ssd_512_resnet50_v1_voc_train_24.log',
# 'C:/Users/think/Desktop/最佳批大小/ssd_512_resnet50_v1_voc_train_32.log',
# 'C:/Users/think/Desktop/最佳批大小/ssd_512_resnet50_v1_voc_train_40.log',
# 'C:/Users/think/Desktop/微调实验/ssd_512_mobilenet1.0_voc_train_0.01_zero.log'
'C:/Users/think/Desktop/毕设/网络选择实验/ssd_512_mobilenet1.0_voc_train.log',
'C:/Users/think/Desktop/毕设/网络选择实验/ssd_512_vgg16_atrous_voc_train.log',
'C:/Users/think/Desktop/毕设/网络选择实验/ssd_512_resnet50_v1_voc_train.log',
]
acc_list = [[], [], [], [], []]
for i, path in enumerate(file_path_list):
with open(file_path_list[i], 'r') as f:
line = f.readline()
while line:
if line.find('smoke') != -1:
acc = float(re.findall(r"smoke=(\d+\.\d+)", line)[0])
acc_list[i].append(acc)
line = f.readline()
plt.plot(range(0, 25), acc_list[0][0:25], label='mobilenet1')
plt.plot(range(0, 25), acc_list[1][0:25], label='vgg16_atrous')
plt.plot(range(0, 25), acc_list[2][0:25], label='resnet50_v1')
# plt.plot(range(0, last_epoch), acc_list[0][:last_epoch], label='batch size 8')
# plt.plot(range(0, last_epoch), acc_list[1][:last_epoch], label='batch size 16')
# plt.plot(range(0, last_epoch), acc_list[2][:last_epoch], label='batch size 24')
# plt.plot(range(0, last_epoch), acc_list[3][:last_epoch], label='batch size 32')
# plt.plot(range(0, last_epoch), acc_list[4][:last_epoch], label='batch size 40')
# plt.plot(range(0, len(acc_list[0])), acc_list[0], label='mobilenet1')
# plt.plot(range(0, len(acc_list[1])), acc_list[1], label='vgg16_atrous')
# plt.plot(range(0, len(acc_list[2])), acc_list[2], label='resnet50_v1')
plt.legend()
plt.xlabel('epoch(%d batches/epoch, batch size=%d)' % (132, 16))
plt.ylabel('mAP')
# plt.xticks(range(0, len(acc_list[0])))
plt.show()
|
# coding=utf-8
import sqlite3
import pandas as pd
import requests
import logging
from Scrapy_WDZJ.tools.net import *
import datetime
logging.basicConfig(level=logging.DEBUG)
sql = "select ip,port,protocal from ValidProxy"
conn = sqlite3.connect('scrapy_wdzj2018-12-23.db')
ips = pd.read_sql(sql=sql, con=conn)
logging.debug(ips)
valid_ip_table=[]
for index, row in ips.iterrows():
valid_ip=valid_proxyip(row)
if valid_ip['valid']==1:
valid_ip['valid_ip_url']="{0}://{1}:{2}".format(valid_ip['protocal'], valid_ip['ip'], valid_ip['port'])
logging.debug(valid_ip)
valid_ip_table.append(valid_ip)
df=pd.DataFrame(valid_ip_table)
timestamp = str(datetime.datetime.now().strftime("%Y-%m-%d"))
df.to_csv("valid_ip_table{0}.csv".format(timestamp)) |
# Generated by Django 3.1.5 on 2021-02-20 15:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users_dir', '0002_auto_20210220_1436'),
]
operations = [
migrations.AlterField(
model_name='users',
name='last_name',
field=models.CharField(max_length=100),
),
]
|
# ch13_4.py
# 導入模組makefood.py的make_icecream和make_drink函數
from makefood import make_icecream, make_drink
make_icecream('草莓醬')
make_icecream('草莓醬', '葡萄乾', '巧克力碎片')
make_drink('large', 'coke')
|
def findClosestValueHelper(root, target, closestVal):
if root is None:
return closestVal
if abs(root.value - target) < abs(closestVal - target):
closestVal = root.value
if root.value < target:
return findClosestValueHelper(root.right, target, closestVal)
return findClosestValueHelper(root.left, target, closestVal)
def findClosestValueInBst(tree, target):
# Write your code here.
closestVal = float("inf")
return findClosestValueHelper(tree, target, closestVal)
|
for tc in range(1, int(input()) + 1):
N = int(input())
homework = []
for i in range(N):
t, d = map(int, input().split())
|
from django import forms
from django.forms import ModelForm
from .models import Reviews
class ReviewForm(forms.ModelForm):
class Meta:
model = Reviews
fields = 'review', 'rating' |
import random
import time
def f_time():
ran = random.randint(1,5)
time.sleep(ran)
print("BOO")
f_time()
def tirc_treat():
ran_2 = random.randint(1,2)
print(ran_2)
if ran_2 == 1:
print("Trick")
if ran_2 == 2:
print("Treat")
tirc_treat()
|
import matplotlib.pyplot as plt
import torch
import torchvision
import numpy as np
from utils.device_config import dev
device = dev
def to_img(x):
x = x.clamp(0, 1)
return x
def visualise_output(images, model):
with torch.no_grad():
images = images.to(device)
images, _, _ = model(images)
images = images.cpu()
images = to_img(images)
np_imagegrid = torchvision.utils.make_grid(images[1:100], 10, 5).numpy()
plt.imshow(np.transpose(np_imagegrid, (1, 2, 0)))
plt.show()
def show_image(img):
plt.imshow(img.permute(1, 2, 0), cmap="gray")
plt.show()
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
while True:
GPIO.output(7,1)
time.sleep(0.0015)
GPIO.output(7,0)
time.sleep(0.0025)
GPIO.cleanup()
|
from datetime import datetime
from odoo import models, fields, api
from odoo.exceptions import UserError
from urllib.parse import urljoin, urlencode
REQUEST_STAGE = [
("draft", "Draft"),
("submit", "Department Manager"),
("approved", "Main manager"),
("warehouse", "Warehouse Officer"),
("approval", "Warehouse Manager"),
("transfer", "Transfer"),
("done", "Done"),
]
class HRDepartment(models.Model):
"""."""
_inherit = "hr.department"
location_id = fields.Many2one(
comodel_name="stock.location",
string="Stock Location",
domain=lambda self: [
("usage", "=", "internal"),
("company_id", "=", self.env.user.company_id.id),
],
)
class IRRequest(models.Model):
_name = "ng.ir.request"
_inherit = ["mail.thread"]
_description = "Internal Requisition"
_order = "state desc, write_date desc"
def _get_active_login(self):
"""return current logined in user."""
return self.env.user.id
def _current_login_employee(self):
"""Get the employee record related to the current login user."""
hr_employee = self.env["hr.employee"].search(
[("user_id", "=", self._get_active_login())], limit=1
)
return hr_employee.id
name = fields.Char(string="Number", default="/")
state = fields.Selection(
selection=REQUEST_STAGE, default="draft", track_visibility="onchange"
)
requester = fields.Many2one(
comodel_name="res.users",
string="Requester",
default=_get_active_login,
track_visibility="onchange",
)
end_user = fields.Many2one(
comodel_name="hr.employee",
string="End User",
default=_current_login_employee,
required=True,
)
request_date = fields.Datetime(
string="Request Date",
default=lambda self: datetime.now(),
help="The day in which request was initiated",
)
request_deadline = fields.Datetime(string="Request Deadline")
hod = fields.Many2one(
comodel_name="hr.employee", string="H.O.D", domain=[("parent_id", "=", False)]
)
department = fields.Many2one(comodel_name="hr.department", string="Department",)
dst_location_id = fields.Many2one(
comodel_name="stock.location",
string="Destination Location",
help="Departmental Stock Location",
track_visibility="onchange",
domain=lambda self: [
("usage", "=", "internal"),
("company_id", "=", self.env.user.company_id.id),
],
)
src_location_id = fields.Many2one(
comodel_name="stock.location",
string="Source Location",
help="Departmental Stock Location",
track_visibility="onchange",
domain=lambda self: [
("usage", "=", "internal"),
("company_id", "=", self.env.user.company_id.id),
],
)
request_ids = fields.One2many(
comodel_name="ng.ir.request.line",
inverse_name="request_id",
string="Request Line",
required=True,
)
approve_request_ids = fields.One2many(
comodel_name="ng.ir.request.approve",
inverse_name="request_id",
string="Approved Request Line",
required=True,
)
reason = fields.Text(string="Rejection Reason")
availaibility = fields.Boolean(
string="Availaibility", compute="_compute_availabilty"
)
warehouse_id = fields.Many2one(comodel_name="stock.warehouse", string="Warehouse")
company_id = fields.Many2one(
"res.company",
"Company",
default=lambda self: self.env["res.company"]._company_default_get(),
index=True,
required=True,
)
@api.depends("approve_request_ids")
def _compute_availabilty(self):
count_total = len(self.approve_request_ids)
count_avail = len(
[
appr_id.state
for appr_id in self.approve_request_ids
if appr_id.state == "available"
]
)
self.availaibility = count_total == count_avail
@api.onchange("hod")
def _onchange_hod(self):
if self.department:
if not self.department.location_id:
raise UserError("Departmental invertory location is not configured")
self.dst_location_id = self.department.location_id
@api.model
def create(self, vals):
rec_id = super(IRRequest, self).create(vals)
return rec_id
def submit(self):
if not self.request_ids:
raise UserError("You can not submit an empty item list for requisition.")
else:
# fetch email template.
seq = self.env["ir.sequence"].next_by_code("ng.ir.request")
recipient = self.recipient("hod", self.hod)
url = self.request_link()
mail_template = self.env.ref(
"ng_internal_requisition.ng_internal_requisition_submit"
)
mail_template.with_context({"recipient": recipient, "url": url}).send_mail(
self.id, force_send=True
)
self.write({"state": "submit", "name": seq})
def department_manager_approve(self):
if self:
approved = self.env.context.get("approved")
if not approved:
# send rejection mail to the author.
return {
"type": "ir.actions.act_window",
"res_model": "ir.request.wizard",
"views": [[False, "form"]],
"context": {"request_id": self.id},
"target": "new",
}
else:
# move to next level and send mail
url = self.request_link()
recipient = self.recipient("department_manager", self.department)
mail_template = self.env.ref(
"ng_internal_requisition.ng_internal_requisition_approval"
)
mail_template.with_context(
{"recipient": recipient, "url": url}
).send_mail(self.id, force_send=True)
self.write({"state": "approved"})
def main_manager_approve(self):
approved = self.env.context.get("approved")
if not approved:
# send mail to the author.
return {
"type": "ir.actions.act_window",
"res_model": "ir.request.wizard",
"views": [[False, "form"]],
"context": {"request_id": self.id},
"target": "new",
}
else:
# move to next level and send mail
self.write({"state": "warehouse"})
def warehouse_officer_confirm(self):
if not self.approve_request_ids:
raise UserError(
"Please add the requested items to the requested items line."
)
else:
url = self.request_link()
recipient = self.recipient("department_manager", self.department)
mail_template = self.env.ref(
"ng_internal_requisition.ng_internal_requisition_warehouse_officer"
)
mail_template.with_context({"recipient": recipient, "url": url}).send_mail(
self.id, force_send=True
)
self.write({"state": "approval"})
def warehouse_officer_confirm_qty(self):
"""Forward the available quantity to warehouse officer."""
clone = self.copy()
available_aggregate = sum(
[approve_request_id.qty for approve_request_id in self.approve_request_ids]
)
if available_aggregate <= 0:
raise UserError(
"The item line is empty or quantity available can not be forwarded."
)
return False
if self.approve_request_ids and self.request_ids:
for index, approve_request_id in enumerate(self.approve_request_ids):
approve_id = approve_request_id.copy()
availqty, reqqty = approve_id.qty, approve_id.quantity
if availqty <= reqqty:
approve_id.write({"request_id": clone.id, "quantity": availqty})
self.approve_request_ids[index].write(
{"quantity": reqqty - availqty}
)
else: # Detach from self record
approve_id.sudo().unlink()
for request_id in self.request_ids:
req = request_id.copy()
req.write({"request_id": clone.id})
clone.write({"state": "approval", "name": self.name})
if not clone.approve_request_ids:
clone.sudo().unlink()
raise UserError(
"There is enough quantity available for confirmation, make use of the confirm button."
)
return False
if sum([apr.quantity for apr in self.approve_request_ids]) == 0:
self.unlink()
return {
"type": "ir.actions.act_window",
"res_model": "ng.ir.request",
"views": [[False, "form"]],
"context": {"request_id": self.id},
"res_id": clone.id,
"target": "main",
}
else:
raise UserError("No line item(s) defined.")
def confirmation(self):
approved = self.env.context.get("approved")
if not approved:
# send mail to the author.
return {
"type": "ir.actions.act_window",
"res_model": "ir.request.wizard",
"views": [[False, "form"]],
"context": {"request_id": self.id},
"target": "new",
}
else:
# move to next level and send mail
self.write({"state": "transfer"})
def do_transfer(self):
if self:
src_location_id = self.src_location_id.id
dst_location_id = self.dst_location_id.id
domain = [
("active", "=", True),
("code", "=", "internal"),
("company_id", "=", self.env.user.company_id.id),
]
stock_picking = self.env["stock.picking"]
picking_type_id = self.env["stock.picking.type"].search(domain, limit=1)
print(
domain,
"**********************************************",
picking_type_id,
self.env.user.company_id,
)
payload = {
"location_id": src_location_id,
"location_dest_id": dst_location_id,
"picking_type_id": picking_type_id.id,
}
stock_picking_id = stock_picking.create(payload)
move_id = self.stock_move(self.approve_request_ids, stock_picking_id)
self.process(stock_picking_id)
def stock_move(self, request_ids, picking_id):
"""."""
stock_move = self.env["stock.move"]
for request_id in request_ids:
payload = {
"product_id": request_id.product_id.id,
"name": request_id.product_id.display_name,
"product_uom_qty": request_id.quantity,
"product_uom": request_id.uom.id,
"picking_id": picking_id.id,
"location_id": picking_id.location_id.id,
"location_dest_id": picking_id.location_dest_id.id,
}
stock_move.create(payload)
print(payload)
print(request_id.state)
request_id.write({"transferred": True})
self.write({"state": "done"})
def process(self, picking_id):
pick_to_do = self.env["stock.picking"]
pick_to_backorder = self.env["stock.picking"]
for picking in picking_id:
# If still in draft => confirm and assign
if picking.state == "draft":
picking.action_confirm()
if picking.state != "assigned":
picking.action_assign()
if picking.state != "assigned":
raise UserError(
(
"Could not reserve all requested products. Please use the 'Mark as Todo' button to handle the reservation manually."
)
)
for move in picking.move_lines:
for move_line in move.move_line_ids:
move_line.qty_done = move_line.product_uom_qty
if picking._check_backorder():
pick_to_backorder |= picking
continue
pick_to_do |= picking
# Process every picking that do not require a backorder, then return a single backorder wizard for every other ones.
if pick_to_do:
pick_to_do.action_done()
url = self.request_link()
recipient = self.recipient("department_manager", self.department)
mail_template = self.env.ref(
"ng_internal_requisition.ng_internal_requisition_transfer"
)
mail_template.with_context({"recipient": recipient, "url": url}).send_mail(
self.id, force_send=True
)
if pick_to_backorder:
return pick_to_backorder.action_generate_backorder_wizard()
return False
def recipient(self, recipient, model):
"""Return recipient email address."""
if recipient == "hod":
workmails = model.address_id, model.work_email
workmail = {workmail for workmail in workmails if workmail}
workmail = workmail.pop() if workmail else model.work_email
if not isinstance(workmail, str):
try:
return workmail.email
except:
pass
return workmail
elif recipient == "department_manager":
manager = model.manager_id
return manager.work_email or manager.address_id.email
def request_link(self):
fragment = {}
base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url")
model_data = self.env["ir.model.data"]
fragment.update(base_url=base_url)
fragment.update(
menu_id=model_data.get_object_reference(
"ng_internal_requisition", "ng_internal_requisition_menu_1"
)[-1]
)
fragment.update(model="ng.ir.request")
fragment.update(view_type="form")
fragment.update(
action=model_data.get_object_reference(
"ng_internal_requisition", "ng_internal_requisition_action_window"
)[-1]
)
fragment.update(id=self.id)
query = {"db": self.env.cr.dbname}
res = urljoin(base_url, "?%s#%s" % (urlencode(query), urlencode(fragment)))
return res
|
#
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command line subcommand for managing models."""
from mlonmcu.cli.common import add_common_options, add_context_options
from mlonmcu.context.context import MlonMcuContext
import mlonmcu.models
def add_models_options(parser):
models_parser = parser.add_argument_group("models options")
models_parser.add_argument(
"--detailed",
default=False,
action="store_true",
help="Display more information (default: %(default)s)",
)
def get_parser(subparsers):
""" "Define and return a subparser for the models subcommand."""
parser = subparsers.add_parser("models", description="Manage ML on MCU models.")
parser.set_defaults(func=handle)
add_common_options(parser)
add_context_options(parser)
add_models_options(parser)
return parser
def handle(args):
with MlonMcuContext(path=args.home, deps_lock="read") as context:
mlonmcu.models.print_summary(context, detailed=args.detailed)
|
from stipy import *
ns = 1.0
us = 1000.0
ms = 1000000.0
s = 1000000000.0
# Set description used by program
setvar('desc','''Turn TAs Off.''')
include("channels.py")
# Define different blocks of the experiment
def turnSFAOff(Start):
#Initialization Settings
tStart =1*ms
## Settings ##
tOn = tStart + 10*ms
# setQuadrupoleCurrent(tOn + 0.1*ms, 8, False, False)
# event(quadrupoleOnSwitch, tOn, 1)
# event(sfaOutputEnableSwitch, tOn + 100*us, 1)
commandTime = tOn + 500*ms
event(quadrupoleChargeSwitch, commandTime, 1)
# event(sfaOutputEnableSwitch, commandTime - 500*us, 0)
# event(sfaOutputEnableSwitch, commandTime + 100*us, 0)
# setQuadrupoleCurrent(commandTime + 500*us, 40, False, False, 0)
event(ch(digitalOut, 4), commandTime - 100*ms, 0)
event(ch(digitalOut, 4), commandTime - 100*us, 1)
event(quadrupoleChargeSwitch, commandTime + 50*ms, 0)
event(ch(digitalOut, 4), commandTime + 100*ms, 0)
return Start
# Global definitions
t0 = 10*us
time = t0
time = turnSFAOff(time)
|
import os
import sys
def _initCoverage():
if 'COVERAGE_PROCESS_START' in os.environ:
try:
import coverage
coverage.process_startup()
except ImportError:
pass
def getDRCBaseDir():
return os.environ['DRC_BASE']
def getDRCBaseIsSet():
return 'DRC_BASE' in os.environ
def updateSysPath(path):
if path and os.path.isdir(path) and path not in sys.path:
sys.path.insert(0, path)
return True
return False
_initCoverage()
# this is for mac homebrew users
#updateSysPath('/usr/local/opt/vtk7/lib/python2.7/site-packages')
|
import configparser
from model import CharacterLevelCNN
import torch
from innvestigator import InnvestigateModel
import spacy
args = configparser.ConfigParser()
args.read('argsConfig.ini')
class ModelsDeploy(object):
def __init__(self):
self.ag_news_model = CharacterLevelCNN(4, args)
self.ag_news_model_checkpoint = torch.load('AgNewsModel1.pt', map_location=torch.device('cpu'))
self.ag_news_model.load_state_dict(self.ag_news_model_checkpoint['state_dict'])
self.ag_news_lrp = InnvestigateModel(self.ag_news_model, lrp_exponent=1, method="e-rule", beta=.5)
self.yelp_model = CharacterLevelCNN(2, args)
self.yelp_model_checkpoint = torch.load('YelpModel.pt', map_location=torch.device('cpu'))
self.yelp_model.load_state_dict(self.yelp_model_checkpoint['state_dict'])
self.yelp_lrp = InnvestigateModel(self.yelp_model, lrp_exponent=1, method="e-rule", beta=.5)
self.alphabet = args.get('DataSet', 'alphabet')
self.l0 = args.getint('DataSet', 'l0')
self.nlp = spacy.load("en_core_web_sm-2.3.1")
def oneHotEncode(self, sentence):
# X = (batch, 70, sequence_length)
X = torch.zeros(len(self.alphabet), self.l0)
sequence = sentence[:self.l0]
for index_char, char in enumerate(sequence[::-1]):
if self.char2Index(char) != -1:
X[self.char2Index(char)][index_char] = 1.0
return X
def char2Index(self, character):
return self.alphabet.find(character)
def generate_word_rel_vals(self, text, heatmap):
word_rel_vals = []
word = ''
val = 0
for i in range(len(text)):
if text[i] == ' ':
# print(' ')
# try:
# word_rel_vals.append((word, val / len(word)))
# word_rel_vals.append((word, val))
# word_rel_vals[word] = val / len(word)
# except:
# word_rel_vals.append((word, val))
# word_rel_vals[word] = val
word_rel_vals.append((word, val))
word = ""
val = 0
else:
word += text[i]
val += torch.sum(heatmap[:, i]).item()
# print(text[i], torch.sum(heatmap[:, i]).item())
word_rel_vals.append((word, val))
# try:
# word_rel_vals.append((word, val / len(word)))
# word_rel_vals.append((word, val))
# word_rel_vals[word] = val / len(word)
# except:
# word_rel_vals.append((word, val))
# word_rel_vals[word] = val
return word_rel_vals
def predict_probs(self, sentence, model='yelp'):
input_tensor = self.oneHotEncode(sentence)
input_tensor = torch.unsqueeze(input_tensor, 0)
if model == 'yelp':
with torch.no_grad():
predictions = self.yelp_model(input_tensor)
else:
with torch.no_grad():
predictions = self.ag_news_model(input_tensor)
pred = torch.max(predictions, 1)[1].cpu().numpy().tolist()[0]
probs = torch.exp(predictions) * 100
probs = probs.cpu().numpy().tolist()[0]
return pred, probs
def explain(self, sentence, model='yelp'):
input_tensor = self.oneHotEncode(sentence)
input_tensor = torch.unsqueeze(input_tensor, 0)
if model == 'yelp':
predictions, heatmap = self.yelp_lrp.innvestigate(in_tensor=input_tensor)
else:
predictions, heatmap = self.ag_news_lrp.innvestigate(in_tensor=input_tensor)
pred = torch.max(predictions, 1)[1].cpu().numpy().tolist()[0]
probs = torch.exp(predictions) * 100
probs = probs.cpu().numpy().tolist()[0]
word_rels_vals = self.generate_word_rel_vals(sentence, heatmap[0])
word_rels_vals = list(filter(lambda x: x[0] != '', word_rels_vals))
return pred, probs, word_rels_vals
def main():
obj = ModelsDeploy()
a, b, c = obj.explain(
"Like any Barnes & Noble, it has a nice comfy cafe, and a large selection of books. The staff is very friendly and helpful. They stock a decent selection, and the prices are pretty reasonable. Obviously it's hard for them to compete with Amazon. However since all the small shop bookstores are gone, it's nice to walk into one every once in a while.")
if __name__ == '__main__':
main() |
import os
import csv
from collections import OrderedDict
def getTokenId(token):
return token["sentence"] + ":" + str(token["index"])
def getExampleId(tokens):
sentenceId = tokens[0]["sentence"]
for token in tokens:
assert token["sentence"] == sentenceId
return sentenceId + ":" + ",".join([str(x) for x in sorted([token["index"] for token in tokens])])
def getGoldExample(beginIndex, sentence, includeGaps=False):
"""
For each token in a sentence there can be only one expression,
which can have one or more words. A new expression begins with
one of the MWE tags 'O', 'B' or 'b'.
"""
if sentence[beginIndex]["supersense"] == None:
return None
tokens = [sentence[beginIndex]]
mweType = tokens[0]["MWE"]
assert mweType in ("O", "o", "B", "b"), tokens[0]
if mweType in ("O", "o"):
return tokens
for i in range(beginIndex + 1, len(sentence)):
mwe = sentence[i]["MWE"]
if mwe in ("B", "O"):
break
elif mwe == "I":
if mweType == "B":
tokens.append(sentence[i])
elif includeGaps: tokens.append(sentence[i])
elif mwe == "i":
if mweType == "b":
tokens.append(sentence[i])
elif includeGaps: tokens.append(sentence[i])
elif mwe == "b":
assert mweType == "B"
if includeGaps: tokens.append(sentence[i])
else:
assert mwe == "o", sentence[i]
if includeGaps: tokens.append(sentence[i])
return tokens
def hasGaps(tokens):
index = tokens[0]["index"]
for token in tokens[1:]:
assert token["index"] > index, tokens
if token["index"] - index > 1:
return True
index = token["index"]
return False
class Corpus():
def __init__(self, dataPath):
self.dataPath = dataPath
self.corpusFiles = {"train":"dimsum-data-1.5/dimsum16.train", "test":"dimsum-data-1.5/dimsum16.test.blind"}
self.columns = ["index", "word", "lemma", "POS", "MWE", "parent", "strength", "supersense", "sentence"]
self.MWETags = set(["O", "o", "B", "b", "I", "i"])
def readExamples(self, filePath):
counts = {"supersense":0, "token":0}
with open(filePath, "rb") as csvfile:
reader = csv.DictReader(csvfile, fieldnames=self.columns, delimiter="\t", quoting=csv.QUOTE_NONE)
tokens = [row for row in reader]
for token in tokens:
for column in self.columns:
token[column] = token[column].decode('utf8')
token["index"] = int(token["index"]) - 1
assert token["index"] >= 0, token
token["parent"] = int(token["parent"]) if token["parent"] != "" else None
token["supersense"] = token["supersense"] if token["supersense"] != "" else None
assert token["MWE"] in self.MWETags, token
# Add to counts
counts["token"] += 1
if token["supersense"] != None:
counts["supersense"] += 1
print "Read corpus file", filePath, counts
return tokens
def printSentence(self, sentence):
for token in sentence:
print "\t".join(str(token[x]) for x in self.columns)
def readSentences(self, filePath):
examples = self.readExamples(filePath)
sentenceDict = OrderedDict()
for example in examples:
if example["sentence"] not in sentenceDict:
sentenceDict[example["sentence"]] = []
sentenceDict[example["sentence"]].append(example)
return [sentenceDict[x] for x in sentenceDict]
def readCorpus(self, setNames, filePaths = None):
self.corpus = {}
for setName in setNames:
if filePaths:
filePath = filePaths["setName"]
else:
filePath = os.path.join(self.dataPath, self.corpusFiles[setName])
print "Reading set", setName, "from", filePath
self.corpus[setName] = self.readSentences(filePath)
def getSentences(self, setName):
if setName in self.corpus:
return self.corpus[setName]
else:
return [] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 27 14:51:01 2017
@author: marshals
"""
from opbase import opstage, opflowcontrol
import sys
#from Crypto.Cipher import AES
#from Crypto import Random
import argparse
#os.chdir('/proj/OP4/WORK/marshals/proj/project_bee/IDR/WORK/marshals/ORCA_TOP/V-0305_S0304_F0303')
#os.chdir(os.getenv('HOME'))
#---------------------------- parser args ---------------------------------------------------------
parser = argparse.ArgumentParser(description='''
Run the stage
''')
parser.add_argument("-src", dest='source_stage',
help='the name of source stage',
action="store")
parser.add_argument("-dst", dest='current_stage',
help='the name of submit stage',
action="store")
parser.add_argument("-scenario", dest='scenario', default='',
help='The name of corner(for stages based on corner specified like: PT). default: ""',
action="store")
parser.add_argument("-ow", dest='over_write',
help='over write the exists run script. default: not overwirte',
action="store_true")
parser.add_argument("-nx", dest='no_exit',
help='no exit after current stage finished. default: exit after stage finished',
action="store_true")
parser.add_argument("-debug", dest='repl_debug',
help='debug strings will be added in replaced scripts',
action="store_true")
parser.add_argument("-run", dest='run_task',
help='use this switch to enable run dst stage',
action="store_true")
parser.add_argument("-from", dest='from_stage',
help='the name of stage from',
action="store")
parser.add_argument("-to", dest='to_stage',
help='the name of stage to',
action="store")
parser.add_argument("-keyword_scenario", dest='keyword_scenario', default=[],
help='The name of corner(for stages based on corner specified like: PT). default: ""',
action="store")
parser.add_argument("-thr", dest='through_stage',
help='the name of the stage for through when change different tool',
action="append")
args = parser.parse_args()
#args.keyword_scenario = ' '.join(args.keyword_scenario)
#args.through_stage = ' '.
kwargs = {}
for k,v in args.__dict__.iteritems():
if v != None and v != False :
kwargs[k] = v
if args.source_stage or args.current_stage:
stage = opstage()
check_arg = stage.opstage_check_args(**kwargs)
if not check_arg:
parser.print_help()
sys.exit(1)
status = stage.init_stage()
if args.run_task and status == 1:
stage.submit()
stage.monitor()
else:
flow = opflowcontrol()
check_arg = flow.opflowcontrol_check_args(**kwargs)
if not check_arg:
parser.print_help()
sys.exit(1)
status=flow.init_flow()
|
from bs4 import BeautifulSoup
import urllib.request,sys,time
from urllib.request import Request, urlopen
import requests
import pandas as pd
import csv
def get_html(url):
try:
req = Request(url, headers = {'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
page_soup = BeautifulSoup(webpage, "html.parser")
except:
print("Error with parsing:", url)
return page_soup
def get_pages(url):
page_links = []
for i in range(1, 810):
page_links.append(url + 'page/' + str(i) + '/')
return page_links
def get_article_page_links(url):
links = set()
try:
page_html = get_html(url)
articles = page_html.findAll("article")
except:
print("Error with parsing:", url)
return []
for article in articles:
try:
for link in article.select("a"):
links.add(link['href'])
except:
continue
return links
def get_true_false_info(url):
false_phrase = ''
true_phrase = ''
count = 1
country = ''
article_true_link = ''
try:
page_soup = get_html(url)
false = page_soup.findAll("h1", {"class": "entry-title"})
true = page_soup.findAll("p", {"class": "entry-content__text entry-content__text--explanation"})
article_true_link = page_soup.find("a", {"class": "button entry-content__button entry-content__button--smaller"}).get('href')
date = str(page_soup.find("p", {"class": "entry-content__text entry-content__text--topinfo"}))[68:78]
country = page_soup.find("p", {"class": "entry-content__text entry-content__text--topinfo"}).getText()[13:]
except:
print("Error with url:", url)
try:
for phrase in false[0]:
if count == 3:
false_phrase = phrase
count += 1
except:
print('Error with false phrase')
return []
try:
count = 1
for phrase in true[0]:
if count == 1:
true_phrase = phrase
count += 1
except:
print('Error with true phrase')
return []
return [false_phrase[1:], true_phrase[13:], article_true_link, date, country]
def get_statements(url):
pages = get_pages(url)
article_page_links = set()
page_count = 1
for page in pages:
print("This is page", page_count)
article_page_links.update(get_article_page_links(page))
page_count += 1
true_false = dict()
link_count = 1
for link in article_page_links:
print("This is link", link_count)
true_false[link] = get_true_false_info(link)
link_count += 1
return true_false
def get_false_statements(true_false):
false = []
for statements in true_false.values():
false.append(statements[0])
return false
def get_true_statements(true_false):
true = []
for statements in true_false.values():
true.append(statements[1])
return true
def save_csv(save_dict, name):
with open(name, 'w') as f:
f.write("Date; Country, Article Link; False Statement; True Statement;\n")
for key in save_dict.keys():
f.write("%s; %s; %s; %s; %s\n" % (save_dict[key][3], save_dict[key][4], save_dict[key][2], save_dict[key][0], save_dict[key][1]))
if __name__ == "__main__":
home_page_url = 'https://www.poynter.org/ifcn-covid-19-misinformation/'
statement_dict = get_statements(home_page_url)
save_csv(statement_dict, "poynter_data.csv") |
# CS362 HW 7
# Alex Young
# 3/3/2021
# Run this file using python3 leap_year.py
# This program finds if a year is a leap year
def run(x):
try:
if (x % 4 == 0):
if (x % 400 == 0):
return "is a leap year"
else:
if (x % 100 == 0):
return "is not a leap year"
else:
return "is a leap year"
else:
return "is not a leap year"
except TypeError:
return("invalid input") |
import socket
import time
port = 10001
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", port))
s.send("0")
response = s.recv(1024)
print "CLIENT: got ", response, " from server"
blah = 0
for x in range(0,5):
blah += 0.1
print "CLIENT: sending position ", str(blah)
s.send(str(blah))
s.send('\n');
time.sleep(7)
|
#!/usr/bin/env python
import os
from Tools.Abstract import Tool
class ApplyRecalibration():
# http://www.broadinstitute.org/gatk/gatkdocs/org_broadinstitute_gatk_tools_walkers_variantrecalibration_ApplyRecalibration.html
def apply_model(self, reference_file, input_vcf, tranchesfile, recalfile,
outputfile, mode="SNP", ts_filter_level=99.0, GATK_dir=""):
# possible mode:
# SNP
# INDEL
os.system("java -Xmx3g -jar %sGenomeAnalysisTK.jar -T ApplyRecalibration -R %s -input %s \
--ts_filter_level %f -tranchesFile %s -recalFile %s -mode %s -o %s"
% (GATK_dir, reference_file, input_vcf, ts_filter_level, tranchesfile, recalfile, mode, outputfile))
|
todos = session.query(Servers).all()
for s in todos:
print s.id,"-",s.nome,"-",s.user_id
adms = session.query(Users).all()
for a in adms:
print a.id,"-",a.nome
leandro = session.query(Users).filter(Users.id==3).first()
apache = session.query(Servers).filter(Servers.id==5).first()
leandro.servers.append(apache)
session.commit()
adms = session.query(Users,Servers).join(Servers).all()
for a in adms:
print a.Users.nome,"administra",a.Servers.nome
|
# coding: utf-8
# In[ ]:
# In[4]:
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.cluster import KMeans
from collections import Counter
# In[16]:
features = {}
bots = set()
with open(os.path.join('ParsedData', 'retweets_clusters.csv')) as f:
for row in f:
row = row.rsplit("\n")[0]
row = row.rsplit(",")
features[row[0]] = [row[1]]
bots.add(row[0])
f.close()
with open(os.path.join('ParsedData', 'temporal_clusters.csv')) as f:
for row in f:
row = row.rsplit("\n")[0]
row = row.rsplit(",")
features[row[0]].append(row[1])
f.close()
with open(os.path.join('ParsedData', 'hashtags_clusters.csv')) as f:
for row in f:
row = row.rsplit("\n")[0]
row = row.rsplit(",")
features[row[0]].append(row[1])
f.close()
with open(os.path.join('ParsedData', 'media_clusters.csv')) as f:
for row in f:
row = row.rsplit("\n")[0]
row = row.rsplit(",")
features[row[0]].append(row[1])
f.close()
with open(os.path.join('ParsedData', 'mentions_clusters.csv')) as f:
for row in f:
row = row.rsplit("\n")[0]
row = row.rsplit(",")
features[row[0]].append(row[1])
f.close()
# In[20]:
similarity = []
data = features.values()
for i in range(0,len(data)):
for j in range(i+1,len(data)):
s = 0
for k in range(0,5):
if data[i][k] == data[j][k]:
s=s+1
similarity.append(s)
f = open(os.path.join('ParsedData', 'dissimilarity_matrix.csv'),"w")
for i in similarity:
f.write(str(1- (float(i)/5))+"\n")
f.close()
f = open(os.path.join('ParsedData', 'user_diss.csv'),"w")
for i in features.keys():
f.write(i+"\n")
f.close() |
########################################################################################
# MAC HW TX path tool box
########################################################################################
import struct
import wifi
def tx_machwqueue(mpdu):
"""
This function returns the MAC HW queue on which the frame was transmitted.
"""
return ord(mpdu[0])
def tx_channel(mpdu):
"""
This function returns the channel value configured at the time of frame transmission.
"""
return ord(mpdu[1])
def tx_framelen(mpdu):
"""
This function returns the framelen value provided by the software to the MAC HW.
"""
return struct.unpack("<H", mpdu[2:4])[0]
def tx_machwformat(packet):
"""
Returns the MAC HW format of the received packet
"""
return packet[32:]
def tx_policytable(mpdu):
return mpdu[4:32]
def uPatternPT(pt):
return struct.unpack("<LLLLLLL", pt)[0]
def nTxProtPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0x30000) >> 16
def nTxPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0xC000) >> 14
def shortGIPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0x2000) != 0
def txPwrLevelPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0xE00) >> 9
def stbcPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0x180) >> 7
def ldpcPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0x40) != 0
def numExtnSSPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0x30) >> 4
def BfFrmExPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0x8) != 0
def csiPrecisionPT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0x4) != 0
def cfbTypePT(pt):
return (struct.unpack("<LLLLLLL", pt)[1] & 0x3) >> 0
def cfbSizePT(pt):
return (struct.unpack("<LLLLLLL", pt)[2] & 0xFF0000) >> 16
def smmIndexPT(pt):
return (struct.unpack("<LLLLLLL", pt)[2] & 0xFF00) >> 8
def antennaSetPT(pt):
return (struct.unpack("<LLLLLLL", msdu[0:28])[2] & 0xFF) >> 0
def keySRamIndexRA(pt):
return (struct.unpack("<LLLLLLL", pt)[3] & 0x3FF000) >> 12
def keySRamIndex(pt):
return (struct.unpack("<LLLLLLL", pt)[3] & 0xFFF) >> 0
def rtsThreshold(pt):
return (struct.unpack("<LLLLLLL", pt)[4] & 0xFFF0000) >> 16
def shortRetryLimit(pt):
return (struct.unpack("<LLLLLLL", pt)[4] & 0xFF00) >> 8
def longRetryLimit(pt):
return (struct.unpack("<LLLLLLL", pt)[4] & 0xFF) >> 0
def mcsIndex1(pt):
return (struct.unpack("<LLLLLLL", pt)[5] & 0xFF0000) >> 16
def mcsIndex2(pt):
return (struct.unpack("<LLLLLLL", pt)[5] & 0xFF00) >> 8
def mcsIndex3(pt):
return (struct.unpack("<LLLLLLL", pt)[5] & 0xFF) >> 0
def tx_machdr(mpdu):
"""
This function returns the MAC header string from the packet transmitted by the MAC HW.
"""
return mpdu[32:32+48]
def framectl(mh):
return (struct.unpack("<HH", mh[0:4]))[1]
def initializationvector(mh):
return (struct.unpack("<LLLLLLLLLLLL", mh))[10]
def initializationvector_unpack(iv):
"""
This function unpacks all the subfields from the initialization vector field, they are
returned as the following tuple:
(extended IV : bool, )
"""
return ((iv & 0x20) != 0, )
def addr1(mh):
return mh[6:12]
def addr2(mh):
return mh[12:18]
def addr3(mh):
return mh[18:24]
def tx_body(mpdu):
"""
This function returns the frame body from the MPDU
"""
return mpdu[32+48:]
class tx_mpdu:
"""Class of an MPDU sent over the air in the MAC HW format"""
def __init__(self, airqueue, packet):
"""constructor"""
# store the air queue on which it was sent
self.airqueue = airqueue
# store the queue name
self.airqueuename = wifi.AIR_NAME[self.airqueue]
# store the MAC HW queue on which it was sent
self.machwqueue = tx_machwqueue(packet)
# store the MAC HW queue name
self.machwqueuename = wifi.Q_NAME[self.machwqueue]
# extract the channel
self.channel = tx_channel(packet)
# extract the MAC HW format of the TX frame
self.machwformat = tx_machwformat(packet)
# extract the frame length
self.framelen = tx_framelen(packet)
# extract the policy table
self.pt = tx_policytable(packet)
# extract the MAC header
self.mh = tx_machdr(packet)
# extract the frame body
self.bd = tx_body(packet)
# extract information from the policy table
self.uPatternPT = uPatternPT(self.pt)
# extract the frame control field
self.framectl = framectl(self.mh)
# unpack the frame control field
(self.type, self.subtype, self.typesubtype, self.tods, self.fromds, self.morefrag,
self.retry, self.ps, self.moredata, self.protected, self.order) \
= wifi.fctl_unpack(self.framectl)
# extract the IV field
self.iv = initializationvector(self.mh)
# extract the subfield from the IV field
(self.extendediv, ) = initializationvector_unpack(self.iv)
# extract the addresses
self.addr1 = addr1(self.mh)
self.addr2 = addr2(self.mh)
self.addr3 = addr3(self.mh)
# compute the theoretical length of the frame
self.compute_theorylen()
# build the info string
self.build_info()
def build_info(self):
# print the type of frame
self.info = wifi.framename[self.typesubtype] + "("
self.info += "ch=%d"%self.channel
if self.tods:
self.info += ",TDS"
if self.fromds:
self.info += ",FDS"
if self.morefrag:
self.info += ",MF"
if self.retry:
self.info += ",R"
if self.ps:
self.info += ",PM"
if self.moredata:
self.info += ",MD"
if self.protected:
self.info += ",PF"
if self.order:
self.info += ",O"
self.info += ")"
def compute_theorylen(self):
"""
This function returns the actual length of the field that the MAC HW is adding to the
payload taking into account the various bits set in the mac header.
"""
# initialize the length (frame control + duration/id + addr1 always included)
length = 10
# addr2 included for control and mgt frames and for RTS, PSPOLL, CFEND, BA and BAR
if (self.type in (wifi.fctl_type_data, wifi.fctl_type_mgt) or
self.typesubtype in (wifi.fctl_rts, wifi.fctl_pspoll, wifi.fctl_cfend,
wifi.fctl_ba, wifi.fctl_bar)):
length += 6
# addr3 included for control and mgt frames
if self.type in (wifi.fctl_type_data, wifi.fctl_type_mgt):
length += 6
# sequence control only added in data frames
if self.type in (wifi.fctl_type_data, ):
length += 2
# address 4 only included in very tods and fromds set
if self.tods and self.fromds:
length += 6
# qos control only included for qos data frames
if self.typesubtype in (wifi.fctl_qosdata, wifi.fctl_qosdatacfack,
wifi.fctl_qosdatacfpoll, wifi.fctl_qosdatacfackcfpoll):
length += 2
# cfc only included for control wrapper frames
if self.typesubtype in (wifi.fctl_controlwrapper, ):
length += 2
# HTC only included for control wrapper frames and ordered frames
if self.typesubtype in (wifi.fctl_controlwrapper,) or self.order:
length += 4
# IV only included for protected frames
if self.protected:
length += 4
# extended IV only included in protected frames with extended IV
if self.protected and self.extendediv:
length += 4
# the remaining elements (MIC, FCS) are included in the body
length += len(self.bd)
# record the computed theoretical length
self.theorylen = length
|
from models import db
from passlib.hash import pbkdf2_sha256 as sha256
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy import String
from datetime import datetime
class HotKey(db.Model):
__tablename__ = 'hot_key'
id: int = db.Column(db.Integer, primary_key=True)
name: str = db.Column(db.String(100), nullable=False)
timestamp = db.Column(db.DateTime(), nullable=False)
@staticmethod
def get(id):
return HotKey.query.filter_by(id=id).first()
@staticmethod
def list():
return HotKey.query.all()
@staticmethod
def list_by_ids(ids):
return HotKey.query.filter(HotKey.id.in_(ids)).all()
@staticmethod
def filter_by_name(name):
name = name.lower()
return HotKey.query.filter_by(name=name).first()
def save(self):
self.name = self.name.lower()
self.timestamp = datetime.now()
db.session.merge(self)
db.session.commit()
|
from time import clock
import random
def undirectedSparseRandomGraph(vertices):
B = {}
A = []
maxEdge = 6
for i in range(vertices):
A.append(i)
B.update({i:[0,[]]})
for u in range(vertices):
arrayList = []
for v in range(vertices):
if(B[v][0]<maxEdge and v!=u):
arrayList.append(v)
left = maxEdge - B[u][0]
n = len(arrayList)
k = 0
while(k<n and left>0):
r = random.randrange(n-k)
temp = arrayList[n-k-1]
arrayList[n-k-1] = arrayList[r]
arrayList[r] = temp
w = arrayList[n-k-1]
weight = random.randrange(1,1001)
B[u][1].append((weight,w))
B[w][1].append((weight,u))
B[w][0] += 1
B[u][0] += 1
k += 1
left -= 1
#print u,w
s = A[random.randrange(len(A))]
A.pop(A.index(s))
t = A[random.randrange(len(A))]
#print "Starting point is: ",s
#print "Ending point is: ",t
#return B,s,t"""
#print "Graph is : "
"""for v in B:
print v, ":", B[v][0]
print "---------------------"""
"""print "Graph is : "
for v in B:
print v, ":",
for pair in B[v][1]:
print pair,
print ""
print "---------------------"""
for v in B.keys():
if(B[v][0]!=6):
#print "Fault",v
#B.clear()
undirectedSparseRandomGraph(vertices)
B = addEdges(B,s,t,vertices)
return B,s,t
for pair in B[v][1]:
count = 0
for x in B[v][1]:
if x[1]==pair[1]:
count+=1
if count>1:
#print "Fault",v,pair[1]
#B.clear()
undirectedSparseRandomGraph(vertices)
B = addEdges(B,s,t,vertices)
return B,s,t
B = addEdges(B,s,t,vertices)
return B,s,t
#B,s,t = undirectedSparseRandomGraph(100)
#print len(B),s,t
def addEdges(B,s,t,vertices):
start = s
#print s
for i in range(vertices):
weight = random.randrange(1,1001)
if(i!=start and i!=t and notConnected(B,i,s)):
#print i
B[i][1].append((weight,s))
B[s][1].append((weight,i))
B[i][0] += 1
B[s][0] += 1
s = i
#print t
weight = random.randrange(1,1001)
B[s][1].append((weight,t))
B[t][1].append((weight,s))
B[s][0] += 1
B[t][0] += 1
return B
def notConnected(B,i,s):
for pair in B[i][1]:
if pair[1]==s:
return False
return True
#undirectedSparseRandomGraph(10)
|
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('dash_html_components')
|
# Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: wenyi.tang@intel.com
# Update Date: 2019/4/3 下午5:03
import argparse
import multiprocessing as mp
import re
from pathlib import Path
import tqdm
from VSR.DataLoader.VirtualFile import RawFile
parser = argparse.ArgumentParser(
description="Convert a raw video to a folder of images.")
parser.add_argument("input_dir", help="root folder of raw videos.")
parser.add_argument("output_dir", help="root folder of images")
parser.add_argument("--width", type=int, default=0,
help="default 0. Auto detect from file name.")
parser.add_argument("--height", type=int, default=0,
help="default 0. Auto detect from file name.")
parser.add_argument("--overwrite", action='store_true',
help="overwrite existing files with same name.")
FLAGS = parser.parse_args()
def guess_file_size(file):
name = file.name
rept = re.compile("\d+[xX]\d+")
for i in name.split('_'):
ans = re.findall(rept, i)
if ans:
size = ans[0].lower().split('x')
return int(size[0]), int(size[1])
return -1, -1
def parse_format(fmt):
if fmt.upper() in ('YUV', 'YUV420P'):
return 'YV12'
return fmt.upper()
def encode(file, save_dir):
w = FLAGS.width
h = FLAGS.height
if w == 0 or h == 0:
w, h = guess_file_size(file)
if w <= 0 or h <= 0:
raise ValueError("No width/height can be retrieved!")
fmt = file.suffix[1:]
fmt = parse_format(fmt)
save_dir /= file.stem
save_dir.mkdir(exist_ok=FLAGS.overwrite, parents=True)
fd = RawFile(file, fmt, [w, h])
frames = fd.read_frame(fd.frames)
for i, f in enumerate(frames):
f.convert('RGB').save(f'{str(save_dir)}/{i:05d}.png')
return file.stem
def main():
input_dir = Path(FLAGS.input_dir)
if input_dir.is_dir():
raw_videos = filter(lambda f: f.is_file(), input_dir.rglob('*'))
else:
assert input_dir.is_file()
raw_videos = [input_dir]
raw_videos = sorted(raw_videos)
save_dir = Path(FLAGS.output_dir)
save_dir.mkdir(exist_ok=True, parents=True)
pool = mp.pool.ThreadPool()
results = []
for fp in raw_videos:
results.append(pool.apply_async(encode, (fp, save_dir)))
with tqdm.tqdm(results, ascii=True, unit='image') as r:
for i in r:
name = i.get()
r.set_postfix({'name': name})
pool.close()
pool.join()
if __name__ == '__main__':
main()
|
from flask_jwt_extended import (
jwt_refresh_token_required,
get_jwt_identity)
from utils.exception import GenericException
import src.resources.endpoint.anounce as anounce
import json
import requests
from flask import jsonify
from config import config
from utils.token import Token
host = config['host']
client = config['client']
@jwt_refresh_token_required
@Token.check_refresh
def sendPass(request, app_db, ume_db):
username = get_jwt_identity()
user_info = ume_db.get_user_info(username)
data = request.data
parsed = json.loads(data)
if user_info[0]['password'] == parsed['PASS']:
ret = {
'dialog': False,
}
return jsonify(ret), 200
@jwt_refresh_token_required
@Token.check_refresh
def getUsers(request, app_db, ume_db):
try:
users = ume_db.get_users()
except:
raise GenericException(
"Kullanıcı bilgileri güncellenirken bir hata ile karşılaşıldı !", 403)
ret = {
'users': users,
}
return jsonify(ret), 200
@jwt_refresh_token_required
@Token.check_refresh
def update(request, app_db, ume_db):
data = request.data
parsed = json.loads(data)
sapMsg = []
try:
if parsed['DATA']['user_type'] == 'SUP':
parsed['DATA']['type_desc'] = 'Tedarikçi'
elif parsed['DATA']['user_type'] == 'QTY':
parsed['DATA']['type_desc'] = 'Kaliteci'
elif parsed['DATA']['user_type'] == 'PUR':
parsed['DATA']['type_desc'] = 'Satınalmacı'
elif parsed['DATA']['user_type'] == 'PLA':
parsed['DATA']['type_desc'] = 'Planlamacı'
ume_db.modify_user(parsed['DATA'])
u_type = parsed['DATA']['user_type'] + "_1"
ume_db.modify_user_role(parsed['DATA']['username'], u_type)
if parsed['DATA']['user_type'] == 'SUP':
r = requests.get(host + 'add_user' + client,
params={'sup_id': parsed['DATA']['user_sys_id']}, verify=False)
parsedSapData = json.loads(r.content)
sapMsg = parsedSapData['OUT']
except:
raise GenericException(
"Kullanıcı bilgileri güncellenirken bir hata ile karşılaşıldı !", 403)
users = ume_db.get_users()
ret = {
'users': users,
'sapMsg': sapMsg,
}
return jsonify(ret), 200
@jwt_refresh_token_required
@Token.check_refresh
def delete(request, app_db, ume_db):
data = request.data
parsed = json.loads(data)
sapMsg = []
try:
ume_db.delete_user(parsed['DATA'])
ume_db.delete_user_role(parsed['DATA'])
if parsed['DATA']['user_type'] == 'SUP':
r = requests.get(host + 'del_user' + client,
params={'sup_id': parsed['DATA']['user_sys_id']}, verify=False)
parsedSapData = json.loads(r.content)
sapMsg = parsedSapData['OUT']
except:
raise GenericException(
"Kullanıcı silinirken bir hata ile karşılaşıldı !", 403)
users = ume_db.get_users()
ret = {
'users': users,
'sapMsg': sapMsg,
}
return jsonify(ret), 200
|
from sys import argv
script, username = argv
prompt = '>'
print "Hi %r I'm the %r script." %(username, script)
print "I'd like to ask you a few questions."
print "Do you like me %s" % username
likes = raw_input(prompt)
print "Where do you live %s?" %username
lives = raw_input(prompt)
print "What kind of comeputer do you have?"
comeputer = raw_input(prompt)
print """
Alright, so you said %s about liking me.
You live is %r. Not sure where that is.
And you have a %r computer. Nice.
""" % (likes, lives, comeputer) |
import os
import sys
pytest_plugins = ["sentry.utils.pytest"]
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
def pytest_configure(config):
import warnings
# XXX(dcramer): Kombu throws a warning due to transaction.commit_manually
# being used
warnings.filterwarnings("error", "", Warning, r"^(?!(|kombu|raven|sentry))")
config.addinivalue_line("markers", "obsolete: mark test as obsolete and soon to be removed")
|
""" Write a static function sqrt to compute the square root of a nonnegative number c given in
the input using Newton's method:
- initialize t = c
- replace t with the average of c/t and t
- repeat until desired accuracy reached using condition Math.abs(t - c/t) > epsilon*t where epsilon = 1e-15;"""
try:
c = int(input("enter the c value\n"))
t = c
epsilon1 = 1e-15
while abs(t - c/t) > epsilon1*t:
t = (c/t + t)/2
print("avg =", t)
except ValueError as a:
print(a)
print("enter integer value ")
|
# Stavros Avdella
# 3939968
# Robot Vision Spring 2019
# Programming Assignment 1
# Problem 3
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Read images
img = cv2.imread('image1.png')
img2 = cv2.imread('image2.png')
# Create the kernel by creating a an 2D-array filled with the values for Gaussian Smoothing,
# and multiplying by 3, 5, and 10.
kernel_gs3 = np.array(3* [[ 1/16, 1/8, 1/16],
[ 1/8, 1/4, 1/8],
[ 1/16, 1/8, 1/16]])
kernel_gs5 = np.array(5* [[ 1/16, 1/8, 1/16],
[ 1/8, 1/4, 1/8],
[ 1/16, 1/8, 1/16]])
kernel_gs10 = np.array(10*[[ 1/16, 1/8, 1/16],
[ 1/8, 1/4, 1/8],
[ 1/16, 1/8, 1/16]])
# Using the filter2D function to convolve the Gaussian Smoothing kernels with image 1 and 2 for each kernel
dst = cv2.filter2D(img,-1,kernel_gs3)
dst2 = cv2.filter2D(img,-1,kernel_gs5)
dst3 = cv2.filter2D(img,-1,kernel_gs10)
dst4 = cv2.filter2D(img2,-1,kernel_gs3)
dst5 = cv2.filter2D(img2,-1,kernel_gs5)
dst6 = cv2.filter2D(img2,-1,kernel_gs10)
# Plotting the images in a 2x4 grid system with no x and y markers
# 1
plt.subplot(4,2,1)
plt.imshow(img)
plt.title('Original Image 1')
plt.xticks([])
plt.yticks([])
# 2
plt.subplot(4,2,3)
plt.imshow(dst)
plt.title('Gaussian 3 Image 1')
plt.xticks([])
plt.yticks([])
# 3
plt.subplot(4,2,5)
plt.imshow(dst2)
plt.title('Gaussian 5 Image 1')
plt.xticks([])
plt.yticks([])
# 4
plt.subplot(4,2,7)
plt.imshow(dst3)
plt.title('Gaussian 10 Image 1')
plt.xticks([])
plt.yticks([])
# 5
plt.subplot(4,2,2)
plt.imshow(img2)
plt.title('Original Image 2')
plt.xticks([])
plt.yticks([])
# 6
plt.subplot(4,2,4)
plt.imshow(dst4)
plt.title('Gaussian 3 Image 2')
plt.xticks([])
plt.yticks([])
# 7
plt.subplot(4,2,6)
plt.imshow(dst5)
plt.title('Gaussian 5 Image 2')
plt.xticks([])
plt.yticks([])
# 8
plt.subplot(4,2,8)
plt.imshow(dst6)
plt.title('Gaussian 10 Image 2')
plt.xticks([])
plt.yticks([])
# Show the results
plt.show()
# Results:
# Different sigmas change the intensity of the image making the image bring out more the other lighter colors as
# you increase the sigma. Between question 1, 2, and 3. I think that median filters provide the smoothest filter
# to an image. More specifically a 7 by 7 median filter. |
from django.shortcuts import render, redirect
from cashPayment.models import cashPayment
from .filters import CashFilter
def cashDashboard(request):
queryset = cashPayment.objects.all()
myFilter = CashFilter(request.GET, queryset = queryset)
queryset = myFilter.qs
context = {
"object_list":queryset, 'myFilter': myFilter
}
return render(request, "CashDashboard.html", context)
def delete_cash(request, id):
cashPayment.objects.filter(id=id).delete()
items = cashPayment.objects.all()
context = {
'items': items
}
return render(request, "DeletePayment.html", context)
|
# -*- coding: utf-8 -*-
from pyspark import SparkContext
from pyspark.sql import SQLContext
import json
import pandas as pd
'''
当需要把Spark DataFrame转换成Pandas DataFrame时,可以调用toPandas();
当需要从Pandas DataFrame创建Spark DataFrame时,可以采用createDataFrame(pandas_df)。
但是,需要注意的是,在调用这些操作之前,
需要首先把Spark的参数spark.sql.execution.arrow.enabled设置为true,
因为这个参数在默认情况下是false
'''
# 所需字段和新老字段映射关系
columns_json_str = '{"name":"影片名称","box_office":"票房"}'
columns_dict = json.loads(columns_json_str)
# 创建SparkContext
sc = SparkContext('local')
# 创建SQLContext
sqlContext = SQLContext(sc)
# 读取本地或HDFS上的文件【.load('hdfs://192.168.3.9:8020/input/movies.csv')】
df = sqlContext.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('../data/movies.csv')
# 打印列数据类型
print(df.dtypes)
# 将spark.dataFrame转为pandas.DataFrame,在此处选取指定的列
df = pd.DataFrame(df.toPandas(), columns=columns_dict.keys())
print(df)
data_values = df.values.tolist()
data_coulumns = list(df.columns)
# 将pandas.DataFrame转为spark.dataFrame,需要转数据和列名
df = sqlContext.createDataFrame(data_values, data_coulumns)
# 字段重命名
# df = df.withColumnRenamed('name', '影片名称')
for key in columns_dict.keys() :
df = df.withColumnRenamed(key , columns_dict[key]);
print(df.collect())
print(df.printSchema())
# 将重命名之后的数据写入到文件
filepath = 'new_movies.csv'
df.write.format("csv").options(header='true', inferschema='true').save('hdfs://192.168.3.9:8020/input/' + filepath)
|
# Generated by Django 3.2.7 on 2021-09-23 14:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('asset', '0003_auto_20210918_1808'),
]
operations = [
migrations.AlterField(
model_name='table',
name='cpu',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='cpu'),
),
]
|
# 典型90問038
# https://atcoder.jp/contests/typical90/tasks/typical90_al
a, b = list(map(int, input().split()))
# 最大公約数
def gcd(x, y):
while y:
x, y = y, x % y
return x
d = gcd(a,b)
ans = a*b//d if a*b//d<=10**18 else 'Large'
print(ans)
|
# @Author : jiaojie
# @CreateDate : 2020/5/17 13:42
# @Description :
# @UpdateAuthor :
# @LastUpdateTime :
# @UpdateDescription :
"""
1. 类属性怎么定义?实例属性怎么定义?
类属性是直接定义在类中的变量
实例属性:通过 对象.属性名 = 属性值 进行赋值
2. 实例方法中的self代表什么?
3. 类中__init__方法的作用
4. 根据下图定义一个类:至少在初始化方法中设置三个属性,另外至少定义两个方法,
1、创建两个实例对象
2、请问如何通过对象获取属性,通过对象调用方法?
3、扩展要求:不用提交作业,自己发挥想象,无限脑洞,定制游戏规则,让两个对象进行pk
定义一个游戏英雄类
特征(属性):血量(HP)、蓝量(MP)、攻击力(attack)、暴击率
行为(方法):技能1 技能2 技能3 移动
"""
|
import os
b_c_map = {}
def _f(n, s, t):
if not b_c_map:
for i, c in enumerate(s):
if b_c_map.get(c, None) is None:
b_c_map[c] = [i]
else:
b_c_map[c].append(i)
t_c_map = {}
for c in t:
if t_c_map.get(c, None) is None:
t_c_map[c] = 1
else:
t_c_map[c] += 1
m = 0
for key, value in t_c_map.items():
m = max(m, b_c_map[key][value - 1])
return m + 1
def f(n, s, t):
return _f(n, s, t)
if os.environ.get('DEBUG', False):
print(f"{f(9, 'arrayhead', 'arya')} = 5")
print(f"{f(9, 'arrayhead', 'harry')} = 6")
print(f"{f(9, 'arrayhead', 'ray')} = 5")
print(f"{f(9, 'arrayhead', 'r')} = 2")
print(f"{f(9, 'arrayhead', 'areahydra')} = 9")
else:
n = int(input())
s = input()
for _ in range(int(input())):
print(f(n, s, input()))
|
# -*- coding: utf8 -*-
#
# Copyright 2012, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
'''
Created on 9 févr. 2012
@author: Florent Vennetier
'''
from PySide.QtCore import Qt
from KickstartModelBase import KickstartModelBase
class KickstartScriptsModel(KickstartModelBase):
"""
Class to manage the scripts of the Kickstart file of a MIC project.
"""
NewScriptName = "NEW_UNSAVED_SCRIPT"
NewScriptText = "# Enter script here\n"
NewScriptInterpreter = "/bin/sh"
NewScriptType = 0
NameColumn = 0
ScriptColumn = 1
TypeColumn = 2
InterpreterColumn = 3
ErrorOnFailColumn = 4
RunInChrootColumn = 5
LogFileColumn = 6
ColumnKeys = ("name", "script", "type", "interp", "errorOnFail",
"inChroot", "logfile")
ColumnHeaders = ("Name", "Script", "Type", "Interpreter", "Error on fail",
"Run in chroot", "Log file")
def __init__(self, obsLightManager, projectName):
"""
`obsLightManager`: a reference to the ObsLightManager instance
`projectName`: the name of the MIC project to manage Kickstart commands
"""
KickstartModelBase.__init__(self,
obsLightManager,
projectName,
obsLightManager.getKickstartScriptDictionaries)
self._scriptsToRemove = []
# from QAbstractTableModel
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
if orientation == Qt.Orientation.Vertical:
return section
else:
return self.ColumnHeaders[section]
return None
# from QAbstractTableModel
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return None
if role == Qt.DisplayRole:
return self.displayRoleData(index)
return None
def displayRoleData(self, index):
"""
Return the `Qt.DisplayRole` data for cell at `index`.
"""
row = index.row()
if row >= self.rowCount():
return None
retVal = self.dataDict(row).get(self.ColumnKeys[index.column()], None)
return retVal
# from QAbstractTableModel
def setData(self, index, value, role=Qt.EditRole):
if not index.isValid():
return False
if role == Qt.DisplayRole:
self.dataDict(index.row())[self.ColumnKeys[index.column()]] = value
self.modified = True
return True
return False
def commitChanges(self):
"""
Commit all changes to the scripts and write the Kickstart file.
"""
while len(self._scriptsToRemove) > 0:
scriptDict = self._scriptsToRemove[0]
self.manager.removeKickstartScript(self.currentProject,
scriptDict[self.ColumnKeys[self.NameColumn]])
del self._scriptsToRemove[0]
for scriptDict in self.dataDictList():
if scriptDict[self.ColumnKeys[self.NameColumn]] == self.NewScriptName:
exportDict = dict(scriptDict)
exportDict[self.ColumnKeys[self.NameColumn]] = None
else:
exportDict = scriptDict
# pylint: disable-msg=W0142
self.manager.addOrChangeKickstartScript(self.currentProject,
**exportDict)
self.manager.saveKickstartFile(self.currentProject)
self.modified = False
def refresh(self):
"""
Reload the script list from Kickstart file (only if all
modifications have been commited).
"""
if not self.modified:
super(KickstartScriptsModel, self).refresh()
def newScript(self):
"""
Add a new script. Will not be added to the Kickstart file
until `commitChanges()` is called.
"""
ck = self.ColumnKeys
scriptDict = {ck[self.NameColumn]: self.NewScriptName,
ck[self.ScriptColumn]: self.NewScriptText,
ck[self.TypeColumn]: self.NewScriptType,
ck[self.InterpreterColumn]: self.NewScriptInterpreter}
self.dataDictList().append(scriptDict)
self.modified = True
def removeScript(self, row):
"""
Remove the script at `row`. Will not be removed from the
Kickstart file until `commitChanges()` is called.
"""
scriptDict = self.dataDict(row)
if scriptDict[self.ColumnKeys[self.NameColumn]] != self.NewScriptName:
self._scriptsToRemove.append(scriptDict)
del self.dataDictList()[row]
self.modified = True
|
import requests
import csv
from bs4 import BeautifulSoup
def get_product_info1(source):
info = {}
all_tr = source.find_all('tr')
for tr in all_tr:
tr_key = tr.findAll('td')[0].text
tr_val = tr.findAll('td')[1].text
info[tr_key] = tr_val
return info
def get_product_info(source,url):
global all_li
info = []
if 'amazon.in' in url:
all_li = source.find_all('li')
for li in all_li:
info.append(li.find('span').text.strip())
return info
elif 'flipkart.com' in url:
for li in source:
all_li = li.findAll('li')
for li in all_li:
info.append(li.text.strip())
return info
elif 'snapdeal.com' in url:
all_li = source.findAll('span' , attrs = {"class": "h-content"})
for li in all_li:
info.append(li.text.strip())
return info
def get_info_flipkart(soup, url):
data = []
name = soup.find_all("span", attrs={"class": "_35KyD6"})[0].text.strip()
price = soup.find_all("div", attrs={"class": "_1vC4OE _3qQ9m1"})[0].text.strip()
info = get_product_info(soup.find_all("div", attrs={"class": "_3WHvuP"}),url)
data.append(name)
data.append('flipkart.com')
data.append(price)
data.append(info)
data.append(url)
return data
def get_info_amazon(soup,url):
data = []
name = soup.find('span', {'class': 'a-size-large'}).text.strip()
price = soup.find('span', {'class': 'a-size-medium a-color-price'}).text.strip()
#info = get_product_info(soup.find('div', {'class': 'content pdClearfix'}))
info = get_product_info(soup.find('div', {'class': 'a-section a-spacing-medium a-spacing-top-small'}),url)
data.append(name)
data.append('amazon.in')
data.append(price)
data.append(info)
data.append(url)
return data
def get_info_snapdeal(soup,url):
data = []
name = soup.find('h1', {'class': 'pdp-e-i-head'}).text.strip()
price = soup.find('span', {'class': 'payBlkBig'}).text.strip()
info = get_product_info(soup.find('div', {'class': 'spec-body p-keyfeatures'}), url)
data.append(name)
data.append('snapdeal.com')
data.append(price)
data.append(info)
data.append(url)
return data
def for_multi_url():
fp = open('input-e-Comm.txt', "r")
urlList = fp.readlines()
fp.close()
for url in urlList:
url=url.strip("\n")
response = requests.get(url).text
soup = BeautifulSoup(response, 'lxml')
if 'flipkart.com' in url:
data = get_info_flipkart(soup, url)
with open('output.csv', 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(data)
elif 'amazon.in' in url:
data = get_info_amazon(soup, url)
with open('output.csv', 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(data)
elif 'snapdeal.com' in url:
data = get_info_snapdeal(soup, url)
with open('output.csv', 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(data)
def for_one_url(url):
response = requests.get(url).text
soup = BeautifulSoup(response, 'lxml')
if 'amazon.in' in url:
data = get_info_amazon(soup, url)
with open('output.csv', 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(data)
elif 'flipkart.com' in url:
data = get_info_flipkart(soup, url)
with open('output.csv', 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(data)
elif 'snapdeal.com' in url:
data = get_info_snapdeal(soup, url)
with open('output.csv', 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(data)
url1 = 'https://www.flipkart.com/mi-max-2-black-64-gb/p/itmevkftufr4d5e2'
url2 = 'https://www.amazon.in/dp/B073GTVVW7'
url3 = 'https://www.snapdeal.com/product/intex-blue-aqua-star-i10/5188147452280046171'
#for_one_url(url3)
for_multi_url()
|
"""
Test some specifics of the App class.
"""
import logging
import asyncio
import asgineer
class LogCapturer(logging.Handler):
def __init__(self):
super().__init__()
self.messages = []
def emit(self, record):
self.messages.append(record.msg)
def __enter__(self):
logger = logging.getLogger("asgineer")
logger.addHandler(self)
return self
def __exit__(self, *args, **kwargs):
logger = logging.getLogger("asgineer")
logger.removeHandler(self)
async def handler(request):
return ""
def test_invalid_scope_types():
# All scope valid scope types are tested in other tests. Only test invalid here.
app = asgineer.to_asgi(handler)
scope = {"type": "notaknownscope"}
loop = asyncio.get_event_loop()
with LogCapturer() as cap:
loop.run_until_complete(app(scope, None, None))
assert len(cap.messages) == 1
assert "unknown" in cap.messages[0].lower() and "notaknownscope" in cap.messages[0]
def test_lifespan():
app = asgineer.to_asgi(handler)
scope = {"type": "lifespan"}
loop = asyncio.get_event_loop()
lifespan_messages = [
{"type": "lifespan.startup"},
{"type": "lifespan.bullshit"},
{"type": "lifespan.shutdown"},
]
sent = []
async def receive():
return lifespan_messages.pop(0)
async def send(m):
sent.append(m["type"])
with LogCapturer() as cap:
loop.run_until_complete(app(scope, receive, send))
assert sent == ["lifespan.startup.complete", "lifespan.shutdown.complete"]
assert len(cap.messages) == 3
assert cap.messages[0].lower().count("starting up")
assert "bullshit" in cap.messages[1] and "unknown" in cap.messages[1].lower()
assert cap.messages[2].lower().count("shutting down")
if __name__ == "__main__":
test_invalid_scope_types()
test_lifespan()
|
from scipy import spatial
import numpy as np
import networkx as nx
from gensim.models import Word2Vec
import pandas as pd
import preprocess
from nltk.tokenize import word_tokenize
from nltk.cluster.util import cosine_distance
import nltk
def sentence_embedding(sentences):
#Vector representation
w2v=Word2Vec(sentences,size=1,min_count=1,iter=1000)
sentence_embeddings=[[w2v[word][0] for word in words] for words in sentences]
max_len=max([len(tokens) for tokens in sentences])
sentence_embeddings=[np.pad(embedding,(0,max_len-len(embedding)),'constant') for embedding in sentence_embeddings]
return sentence_embeddings
def similarity(sentences,sentence_embeddings):
#cosine similarity
sim_matrix=np.zeros((len(sentences),len(sentences)))
for i in range(len(sentence_embeddings)):
for j in range(len(sentence_embeddings)):
sim_matrix[i][j]= 1-spatial.distance.cosine(sentence_embeddings[i],sentence_embeddings[j])
for i in range(len(sim_matrix)):#normalization
sim_matrix[i]=sim_matrix[i]/sim_matrix[i].sum()
return sim_matrix
def pageRank(sim_matrix):
#get the pagerank
nx_graph = nx.from_numpy_array(sim_matrix)
scores = nx.pagerank(nx_graph)
sorted_dict={k:v for k,v in sorted(scores.items(), key=lambda item:item[1])}
return sorted_dict |
import pymol,re
def sup( reference, whichobjs='.*' ):
sel_template = '^'+whichobjs+'$'
r = ''
#selections = cmd.get_names('selections')
#for s in selections:
# if re.match(sel_template,s) is not None:
# cmd.fit()
models = cmd.get_names()
for m in models:
print m
if re.match(whichobjs,m) is not None:
cmd.fit(m,reference)
cmd.extend('sup',sup)
|
def ex043():
peso = float(input('Seu peso:\n'))
altura = float(input('Sua altura:\n'))
imc = (peso)/(altura*altura)
print(f'IMC: {imc:.2f}')
if imc < 18.5:
situ = 'ABAIXO DO PESO'
elif imc > 40:
situ = 'ACIMA DO PESO'
elif imc < 25:
situ = 'PESO IDEAL'
elif imc < 30:
situ = 'SOBREPESO'
else:
situ = 'OBESIDADE'
print(f'Situação: {situ}')
ex043()
|
import time
import pyautogui
from pynput.keyboard import Key, Controller, Listener
controller = Controller()
arr = []
time.sleep(2)
def on_press(key):
if hasattr(key, 'char'):
arr.insert(0, key.char)
l = Listener(on_press=on_press)
l.start()
res1680x1050 = {
'start': [(270, 470), '0, 172, 134'], # start
'car not found': [(1150, 530), '255, 255, 255'], # car not found
'car found': [(751, 380), '152, 152, 152'], # car found
'car sold': [(343, 410), '115, 115, 115'], # car sold
}
res1920x1080 = {
'start': [(302, 473), '0, 181, 146'], # start
'car not found': [(1209, 566), '255, 255, 255'], # car not found
'car found': [(858, 374), '195, 195, 195'], # car found
'car sold': [(404, 406), '150, 150, 150'], # car sold
}
pixels = res1920x1080
# pixels = res1680x1050
while True:
while (True):
pixelColor = pyautogui.screenshot().getpixel(pixels.get('start')[0]) # start
if (f'{pixelColor[0]}, {pixelColor[1]}, {pixelColor[2]}' == pixels.get('start')[1]): break
time.sleep(0.5)
controller.press(Key.enter)
controller.release(Key.enter)
time.sleep(0.5)
controller.press(Key.enter)
controller.release(Key.enter)
time.sleep(0.5)
while (True):
pixelColor = pyautogui.screenshot().getpixel(pixels.get('car not found')[0]) # car not found
if (f'{pixelColor[0]}, {pixelColor[1]}, {pixelColor[2]}' == pixels.get('car not found')[1]):
controller.press(Key.esc)
controller.release(Key.esc)
break
pixelColor = pyautogui.screenshot().getpixel(pixels.get('car found')[0]) # car found
if (f'{pixelColor[0]}, {pixelColor[1]}, {pixelColor[2]}' == pixels.get('car found')[1]):
pixelColor = pyautogui.screenshot().getpixel(pixels.get('car sold')[0]) # car sold
if (f'{pixelColor[0]}, {pixelColor[1]}, {pixelColor[2]}' == pixels.get('car sold')[1]):
controller.press(Key.esc)
controller.release(Key.esc)
controller.press('y')
controller.release('y')
time.sleep(0.3)
controller.press(Key.down)
controller.release(Key.down)
time.sleep(0.3)
controller.press(Key.enter)
controller.release(Key.enter)
time.sleep(0.3)
controller.press(Key.enter)
controller.release(Key.enter) # bought
time.sleep(6)
controller.press(Key.enter)
controller.release(Key.enter)
time.sleep(1)
controller.press(Key.esc)
controller.release(Key.esc)
time.sleep(1)
controller.press(Key.esc)
controller.release(Key.esc)
break
time.sleep(0.5)
|
from django.db import models
from django.utils import timezone
class Subscriptor(models.Model):
client = models.BigIntegerField()
account = models.BigIntegerField()
date_created = models.DateTimeField(
default=timezone.now,
)
lifetime = models.PositiveSmallIntegerField(
default=3,
)
is_active = models.BooleanField(
default=True,
)
def __unicode__(self):
return ":".join([str(self.client), str(self.account)])
class Meta:
db_table = "subscriptor"
index_together = [["client", "account"]]
unique_together = ("client", "account")
from .signals import update_subscriptor
|
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.contrib import messages
from .forms import LoginForm, RegisterForm
# Create your views here.
def user_login(request):
if request.method == 'POST':
#do some authentication stuff login as stay on the page
login_form = LoginForm(request.POST)
if login_form.is_valid():
cleaned_form_data = login_form.cleaned_data
user = authenticate(username = cleaned_form_data['username'],
password = cleaned_form_data['password'])
if user is not None:
#proceed to login if active
if user.is_active:
login(request,user)
#return HttpResponse('Successfully Logged In')
messages.success(request,'Login successfull')
else:
return HttpResponse('Inactive User')
else:
return HttpResponse('Invalid Login')
else:
login_form = LoginForm()
template = 'accounts/login.html'
context = {'form':login_form,}
return render(request,template,context)
def register(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
new_user = form.save(commit = False)
password = form.cleaned_data['password']
new_user.set_password(password)
new_user.save()
return render(request,'accounts/reg_success.html',{'new_user':new_user,})
else:
form = RegisterForm()
template = 'accounts/register.html'
context = {'form':form,}
return render(request,template,context)
def test_context(request):
message = "The message being serdver globally"
return {'msg':message,} |
#!/usr/bin/env python
#
"""
Contains classes and functions that a SAML2.0 Service Provider (SP) may use
to do attribute aggregation.
"""
import logging
# from saml2 import client
from saml2 import BINDING_SOAP
logger = logging.getLogger(__name__)
DEFAULT_BINDING = BINDING_SOAP
class AttributeResolver:
def __init__(self, saml2client, metadata=None, config=None):
self.metadata = metadata
self.saml2client = saml2client
self.metadata = saml2client.config.metadata
def extend(self, name_id, issuer, vo_members):
"""
:param name_id: The identifier by which the subject is know
among all the participents of the VO
:param issuer: Who am I the poses the query
:param vo_members: The entity IDs of the IdP who I'm going to ask
for extra attributes
:return: A dictionary with all the collected information about the
subject
"""
result = []
for member in vo_members:
for ass in self.metadata.attribute_consuming_service(member):
for attr_serv in ass.attribute_service:
logger.info("Send attribute request to %s", attr_serv.location)
if attr_serv.binding != BINDING_SOAP:
continue
# attribute query assumes SOAP binding
session_info = self.saml2client.attribute_query(name_id, attr_serv.location, issuer_id=issuer)
if session_info:
result.append(session_info)
return result
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 10:24:16 2019
@author: FJA
"""
from sklearn import svm
from scipy.io import loadmat
import numpy as np
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
def STL10_read():
m1 = loadmat("train")
X1 = np.array(m1['X'])
Y1 = np.array(m1['y'])
X1 = np.reshape(X1,[-1,96,96,3],order='F')
for i in range(np.shape(Y1)[0]):
if Y1[i,0] == 10:
Y1[i,0] = 0
m2 = loadmat("test")
X2 = np.array(m2['X'])
Y2 = np.array(m2['y'])
X2 = np.reshape(X2,[-1,96,96,3],order='F')
for i in range(np.shape(Y2)[0]):
if Y2[i,0] == 10:
Y2[i,0] = 0
x_train = np.vstack((X1,X2))
y_train = np.vstack((Y1,Y2))
x_train = x_train.astype('float32') / 255
y_train = np.squeeze(y_train)
data = []
label= []
for i in range(10):
data.append(x_train[y_train==i])
# print(np.shape(data[i])[0])
for i in range(10):
label.append(i*np.ones(1300))
# data = np.array(data)
# data = np.reshape(data,[-1,32,32,3])
label= np.array(label)
label= np.reshape(label,[13000,-1])
return data,label
def performance(label,label_pred,num):
FP,TP,TN,FN = 0,0,0,0
for i in range(np.size(label)):
if label_pred[i]==-1:
if label[i] == num:
FP += 1
else:
TP += 1
else:
if label[i] == num:
TN += 1
else:
FN += 1
TPR = TP/(TP+FN)
TNR = FP/(FP+TN)
F1 = 2*TP/(2*TP+FP+FN)
#print(F1)
return TPR,TNR,F1
if __name__ == '__main__':
inlier_num = 9
outlier_ratio = 0.05
nomal = inlier_num
data,label = STL10_read()
anormal = list(range(10))
anormal.remove(nomal)
aa = data[nomal]
o_num = (aa.shape[0]/(1-outlier_ratio)-aa.shape[0])/9
#cut = np.shape(aa)[0]
label = nomal*np.ones((np.shape(aa)[0],1))
for i in anormal:
_ = data[i]
index = np.random.choice(np.shape(_)[0],np.int(o_num))
aa = np.vstack((aa,_[index]))
label = np.vstack((label,i*np.ones((np.int(o_num),1))))
data = aa
data = np.reshape(data,(-1,96*96*3))
# a=[]
# f=[]
# for mu in [0.01,0.1]:
# for gama in [2**(-10),2**(-9),2**(-8),2**(-7),2**(-6),2**(-5),2**(-4),2**(-3),2**(-2),2**(-1)]:
# clf = svm.OneClassSVM(nu=mu, kernel="rbf", gamma=gama)
# clf.fit(data)
# label_pred = clf.predict(data)
# TPR,TNR,F1 = performance(label,label_pred,nomal)
# f.append(F1)
# score = -clf.decision_function(data)
# fpr, tpr, thresholds = roc_curve(np.reshape(label,[np.shape(data)[0],1]), score, pos_label=inlier_num)
# a.append(1-auc(fpr, tpr))
# index = np.argmax(a)
# print(index)
# print(a[index])
# print(f[index])
mu=0.1
gama=2**(-9)
clf = svm.OneClassSVM(nu=mu, kernel="rbf", gamma=gama)
clf.fit(data)
score = -clf.decision_function(data)
fpr, tpr, thresholds = roc_curve(np.reshape(label,[np.shape(data)[0],1]), score, pos_label=inlier_num)
print(1-auc(fpr, tpr))
|
"""Run some timing experiments on class Queue operations.
Authors: Francois Pitt, January 2013,
Danny Heap, September 2013, February 2014
"""
#from csc148queue import Queue
from linked_list import Queue
import time
def enqueue_dequeue(q: Queue, howmany: int):
"""Enqueue and dequeue 'howmany' items to/from Queue 'q'.
"""
for i in range(howmany):
q.enqueue(42)
q.dequeue()
def time_queue(m: int, n: int):
"""Return how long it takes to enqueue and dequeue 'm' items to/from a
Queue with 'n' items already in it.
"""
q = Queue()
for i in range(n):
q.enqueue(1)
start = time.time()
enqueue_dequeue(q, m)
end = time.time()
return end - start
if __name__ == '__main__':
for n in [i * 10000 for i in range(1, 11)]:
print('Inserting and removing 20000 items with', n, 'items already '
'in queue:', time_queue(20000, n))
|
import json
import re
with open('tweets.json', 'r', encoding='ascii', errors='ignore') as json_file:
tweets_json=json.load(json_file)
tweets = tweets_json['tweets']
#If for a tweet, retweeted status doesn't exist, the full_text for retweeted status will be "-1"
default = {"full_text": "-1"}
#Required tweets which are clean and loadable to MongoDB
tweets_req = dict()
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # most of the emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
#This list stores the full_text of the actual tweet and is free of emojis and other non ascii symbols
tweeted_texts = list(map(lambda x: emoji_pattern.sub(r'', x["full_text"]).encode(encoding='ascii',errors='ignore').decode('ascii'), tweets))
#This list stores the full_text of the retweeted_status and is free of emojis and other non ascii symbols
retweeted_texts = list(map(lambda x: emoji_pattern.sub(r'', x.get("retweeted_status", default)["full_text"]).encode(encoding='ascii',errors='ignore').decode('ascii'), tweets))
retweeted_texts = list(filter(lambda x: x != "-1", retweeted_texts))
all_tweet_texts = tweeted_texts + retweeted_texts
all_tweet_docs = [{'text': v} for v in all_tweets]
tweets_req['loadable'] = all_tweet_docs
with open('tweets_clean.json', 'w') as file:
json.dump(tweets_req, file)
all_tweet_docs
|
import sqlite3 as sq
from Employee import employee
conn=sq.connect('employee.db')
c=conn.cursor()
'''
c.execute("""create table Employees (empid integer,empnm text,sal integer)""")
'''
def insert_emp(emp):
with conn:
c.execute('insert into Employees values(:empid,:empnm,:sal)',
{'empid':emp.empid,'empnm':emp.empnm,'sal':emp.sal})
def update_sal(emp,sal):
with conn:
c.execute("""update employees set sal=:sal where empid=:empid and empnm=:empnm"""
,{'empid':emp.empid,'empnm':emp.empnm,'sal':sal})
def remove_emp(emp):
with conn:
c.execute("""Delete from employees where empid=:empid and empnm=:empnm""",
{'empid':emp.empid,'empnm':emp.empnm})
def search(id_1):
with conn:
c.execute("""select * from employees where empid=:empid""",
{'empid':id_1})
temp=c.fetchone()
emp=employee(temp[0],temp[1],temp[2])
return emp
emp_1=employee(9162809,'vishal',40000)
emp_2=employee(25256,'sam',23455)
'''
insert_emp(emp_1)
insert_emp(emp_2)
'''
#update_sal(emp_1,30000)
'''
remove_emp(emp_2)
remove_emp(emp_1)
'''
emp=search(9162809)
print(emp.empnm)
'''
c.execute('select * from Employees')
print(c.fetchall())
conn.commit()
'''
|
# Switches
from re import S
import sys
import argparse
# pretty print
import pprint
pp = pprint.PrettyPrinter(indent=4)
# pwn
from pwn import *
from pwnlib.rop import gadgets
# ::::::::::::::::::::::::: CONFIG :::::::::::::::::::::::::
PATH = 'Chals'
BINARY = '_binary_'
HOST = '_domain_:_port_'
LIBC_REMOTE = f'{PATH}/libc/libc-2.23.so'
LIBC_LOCAL = '/usr/lib/i386-linux-gnu/libc-2.33.so'
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# ::::::::::::::::::::::: CHECK SEC ::::::::::::::::::::::::
'''
'''
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def pwn(args):
binary = f'./{PATH}/{BINARY}'
proc = process(binary)
rop = ROP(binary)
if (args.remote):
host = HOST.split(':')
proc = remote(host[0], int(host[1]))
libc = ELF(LIBC_REMOTE, checksec=False)
elif (args.local):
libc = ELF(LIBC_LOCAL, checksec=False)
if (args.debug):
gdb.attach(proc)
elif (args.gadgets):
gadgets = rop.gadgets
pp.pprint(gadgets)
## get da banner
log.info('Receiving banner ...')
proc.recvuntil('> ')
# shellit
proc.interactive()
def print_banner():
print('''
__
\ \ _ ____ ___ __
\ \ | '_ \ \ /\ / / '_ \
/ / | |_) \ V V /| | | |
/_/ | .__/ \_/\_/ |_| |_|
|_|
''')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=print_banner())
parser.add_argument('-l', '--local',
help = 'run against local binary',
action = 'store_true',
)
parser.add_argument('-d', '--debug',
help = 'run with debugger attached',
action ='store_true'
)
parser.add_argument('-r', '--remote',
help = 'run against remote binary',
action ='store_true'
)
parser.add_argument('-g', '--gadgets',
help = 'dump binary gadgets',
action ='store_true'
)
args = parser.parse_args()
if (len(sys.argv) == 1): parser.print_help()
else: pwn(args)
|
from pc.types import Variable, Node, Data, Action, Param, InverseFunction
from pc.model import Model
from pc.utils import plot_values_b
def run_a():
num_iterations = 10000
prior_value = 0.0
delta_time = 0.01
intero_stim_value = 5.0
extero_stim_value = 5.0
noise = 0.03
extero_stim_range = range(1000, 9000)
intero_stim_range = [5000]
prior = Node(init_value=prior_value)
intero_mu = Node(dt=delta_time, init_value=prior_value)
intero_data = Data(init_value=prior_value)
extero_mu = Node(dt=delta_time)
action = Action()
intero_data = Data(noise=noise)
extero_data = Data(noise=noise)
extero_param = Param(is_fixed=True, init_value=1.0)
extero_func = InverseFunction(param=extero_param)
free_energy = Variable()
model = Model()
model.add_connection(prior, intero_mu)
model.add_connection(intero_mu, intero_data, action=action)
model.add_connection(extero_mu, extero_data)
model.add_connection(extero_mu, prior, func=extero_func)
for itr in range(num_iterations):
if itr in intero_stim_range:
intero_data.update(intero_data.value + intero_stim_value, skip_history=True)
if itr in extero_stim_range:
extero_data.update(extero_stim_value)
else:
extero_data.update(0.0)
intero_data.update(intero_data.value + delta_time * action.value)
model.update()
free_energy.update(model.get_free_energy())
# Valence
_prior = Node(init_value=prior_value)
_intero_mu = Node(dt=delta_time, init_value=prior_value)
_intero_data = Data(init_value=prior_value)
_extero_mu = Node(dt=delta_time)
_action = Action()
_intero_data = Data(noise=0.0)
_extero_data = Data(noise=0.0)
extero_param = Param(is_fixed=True, init_value=1.0)
extero_func = InverseFunction(param=extero_param)
model = Model()
model.add_connection(_prior, _intero_mu)
model.add_connection(_intero_mu, _intero_data, action=_action)
model.add_connection(_extero_mu, _extero_data)
model.add_connection(_extero_mu, _prior, func=extero_func)
prev_fe = 0
valence_val = 0
valence = Variable()
for itr in range(num_iterations):
if itr in intero_stim_range:
_intero_data.update(_intero_data.value + intero_stim_value, skip_history=True)
if itr in extero_stim_range:
_extero_data.update(extero_stim_value)
else:
_extero_data.update(0.0)
_intero_data.update(_intero_data.value + delta_time * _action.value)
model.update()
fe = model.get_free_energy()
if itr % 20 == 0:
valence_val = max(-1, min(1, prev_fe - fe))
prev_fe = fe
valence.update(valence_val)
return intero_mu, intero_data, prior, extero_mu, extero_data, action, free_energy, valence
def run_b():
num_iterations = 10000
prior_value = 0.0
delta_time = 0.01
intero_stim_value = 5.0
extero_stim_value = 5.0
noise = 0.03
extero_stim_range = range(1000, 9000)
intero_stim_range = [5000]
prior = Node(init_value=prior_value)
intero_mu = Node(dt=delta_time, init_value=prior_value)
intero_data = Data(init_value=prior_value)
extero_mu = Node(dt=delta_time)
action = Action()
intero_data = Data(noise=noise)
extero_data = Data(noise=noise)
extero_param = Param(is_fixed=True, init_value=1.0)
extero_func = InverseFunction(param=extero_param)
free_energy = Variable()
model = Model()
model.add_connection(prior, intero_mu)
model.add_connection(intero_mu, intero_data, action=action, variance=100)
model.add_connection(extero_mu, extero_data)
model.add_connection(extero_mu, prior, func=extero_func)
for itr in range(num_iterations):
if itr in intero_stim_range:
intero_data.update(intero_data.value + intero_stim_value, skip_history=True)
if itr in extero_stim_range:
extero_data.update(extero_stim_value)
else:
extero_data.update(0.0)
intero_data.update(intero_data.value + delta_time * action.value)
model.update()
free_energy.update(model.get_free_energy())
# Valence
_prior = Node(init_value=prior_value)
_intero_mu = Node(dt=delta_time, init_value=prior_value)
_intero_data = Data(init_value=prior_value)
_extero_mu = Node(dt=delta_time)
_action = Action()
_intero_data = Data(noise=0.0)
_extero_data = Data(noise=0.0)
extero_param = Param(is_fixed=True, init_value=1.0)
extero_func = InverseFunction(param=extero_param)
model = Model()
model.add_connection(_prior, _intero_mu)
model.add_connection(_intero_mu, _intero_data, action=_action, variance=100)
model.add_connection(_extero_mu, _extero_data)
model.add_connection(_extero_mu, _prior, func=extero_func)
prev_fe = 0
valence_val = 0
valence = Variable()
for itr in range(num_iterations):
if itr in intero_stim_range:
_intero_data.update(_intero_data.value + intero_stim_value, skip_history=True)
if itr in extero_stim_range:
_extero_data.update(extero_stim_value)
else:
_extero_data.update(0.0)
_intero_data.update(_intero_data.value + delta_time * _action.value)
model.update()
fe = model.get_free_energy()
if itr % 20 == 0:
valence_val = max(-1, min(1, prev_fe - fe))
prev_fe = fe
valence.update(valence_val)
return intero_mu, intero_data, prior, extero_mu, extero_data, action, free_energy, valence
if __name__ == "__main__":
fig_path = "figures"
num_iterations = 10000
prior_value = 0.0
delta_time = 0.01
intero_stim_value = 5.0
extero_stim_value = 5.0
noise = 0.03
extero_stim_range = range(1000, 9000)
intero_stim_range = [5000]
lines = [1000.0, 5000.0, 9000.0]
intero_mu, intero_data, prior, extero_mu, extero_data, action, free_energy, valence = run_a()
intero_mu_b, intero_data_b, prior_b, extero_mu_b, extero_data_b, action_b, free_energy_b, valence_b = (
run_b()
)
lims = [(-6, 7), (-6, 7), (-6, 7), (-6, 7), (-6, 7), (-7, 1), (-1, 15), (-1, 1)]
plot_values_b(
[intero_data, extero_data, intero_mu, extero_mu, action, prior, free_energy, valence],
[intero_data_b, extero_data_b, intero_mu_b, extero_mu_b, action_b, prior_b, free_energy_b, valence_b],
["Intero Data", "Extero Data", "Mu Intero", "Mu Extero", "Action", "Mu Prior", "Free Energy", "Valence"],
lims,
lines=lines,
fig_path=f"{fig_path}/figure_4.png",
shape=(4, 2),
figsize=(12, 10),
)
|
from telegram import ReplyKeyboardMarkup
meet_keyboard = [['Кто ты?']]
meet_markup = ReplyKeyboardMarkup(meet_keyboard, one_time_keyboard=True)
reply_keyboard = [['Какова моя миссия?', 'Как мне подготовиться к приключению?'],
['Где и когда искать мудреца?', "Прости, кто ты еще раз?"],
['Я все понял, прощай.']]
markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
start_keyboard = [['Go to the Stage 1']]
start_markup = ReplyKeyboardMarkup(start_keyboard, one_time_keyboard=True)
reset_keyboard = [['Reset the state']]
reset_markup = ReplyKeyboardMarkup(reset_keyboard, one_time_keyboard=True)
stage_1_keyboard = [['Go to the Stage 2.1','Go to the Stage 2.2'], ['Reset the state']]
stage_1_markup = ReplyKeyboardMarkup(stage_1_keyboard, one_time_keyboard=True)
stage_2_1_keyboard = [['Go to the Stage 3.1','Go to the Stage 3.2'], ['Reset the state']]
stage_2_1_markup = ReplyKeyboardMarkup(stage_2_1_keyboard, one_time_keyboard=True)
stage_2_2_keyboard = [['Go to the Stage 3.3'], ['Reset the state']]
stage_2_2_markup = ReplyKeyboardMarkup(stage_2_2_keyboard, one_time_keyboard=True)
stage_3_1_keyboard = [['Go to the Stage 1'], ['Reset the state']]
stage_3_1_markup = ReplyKeyboardMarkup(stage_3_1_keyboard, one_time_keyboard=True)
stage_3_2_keyboard = [['Go to the Stage 4'], ['Reset the state']]
stage_3_2_markup = ReplyKeyboardMarkup(stage_3_2_keyboard, one_time_keyboard=True) |
# dash for app
import dash
# dash_core_components is used for graph
import dash_core_components as dcc
# dash_html_components library has all the html tags as its own compenents
import dash_html_components as html
# external_stylesheets used from the following link
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# app layout is defined here with dash_html_components
app.layout = html.Div(children=[
html.H1(children='Hello World'),
html.Div(
html.P(children='Simple visualization using Dash python framework')
),
dcc.Graph(
id='example-bar-graph',
figure={
'data': [
{'x': [1, 2, 3], 'y': [1, 1, 3], 'type': 'bar', 'name': 'Bar 1'},
{'x': [1, 2, 3], 'y': [0.3, 0.2, 2], 'type': 'bar', 'name': 'Bar 2'},
{'x': [1, 2, 3], 'y': [0, 4, 1], 'type': 'bar', 'name': 'Bar 3'},
],
'layout': {
'title': 'Dash Data Visualization'
}
}
)
])
# running app on server
if __name__ == '__main__':
app.run_server(debug=True) |
import zipfile
from threading import Thread
def extractFile(zFile, password):
try:
zFile.extractall(pwd=password)
print '[+] Found password ' + password + '\n'
except:
pass
def main():
zFile = zipfile.ZipFile(zname)
passFile = open(dname)
for line in passFile.readlines():
password = line.strip('\n')
t = Thread(target=extractFile, args=(zFile, password))
t.start()
if __name__ == '__main__':
main()
|
from django.shortcuts import render
from blogapp.models import Post
from django.views.generic import ListView, DetailView
# Create your views here.
def Home(request):
home = {
'title': 'Home Page',
'message': 'Hello! Welcome to my demo learning website! In that case I am a learner.'
}
return render(request, 'blogapp/index.html', context=home)
def About(request):
contact_info={
'name': 'MD. Soharab Hossain',
'email': 'sohansoharab@ieee.org',
'country': 'Bangladesh',
}
return render(request, 'blogapp/about.html', context=contact_info)
class PostView(ListView):
model = Post
template_name = 'blogapp/post.html'
context_object_name = 'all_post'
class PostDetailsView(DetailView):
model = Post
template_name = 'blogapp/post_detail.html'
context_object_name = 'main_post'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.