index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
6,500 | fea0619263b081f60ed0a4e178ef777a8d5dc988 | from django.contrib import admin
from .models import TutorialsReview,TutorialsReviewComment
admin.site.register(TutorialsReview)
admin.site.register(TutorialsReviewComment) |
6,501 | 0779e516e35c41acf0529961e11541dfd1320749 | # funkcja usuwająca zera z listy
def remove_zeros(given_list):
list_without_zero = []
for element in given_list:
if element != 0:
list_without_zero.append(element)
return list_without_zero
# funkcja sortująca listę
def sort_desc(given_list):
# sorted_list = []
# for i in range(0, len(given_list)):
# for element in given_list:
# if element == max(given_list):
# sorted_list.append(element)
# given_list.remove(element)
return sorted(given_list, key=None, reverse=True)
# funkcja sprawdzająca czy iilość elementów jest mniejsza od danej wartości
# zwraca wartość logiczną danego wyrażenia
def length_check(n, given_list):
return n > len(given_list)
# funkcja odejmująca 1 od pierwszych n-elementów listy
def substract_one_for_n_elements(n, given_list):
minus_one_list = given_list[:]
for i in range(0, n):
minus_one_list[i] -= 1
return minus_one_list
# wielki finał i kompletny algorytm Havel-Hakimi.
# This algorithm will return true if the answers are consistent
# (i.e. it's possible that everyone is telling the truth)
# and false if the answers are inconsistent (i.e. someone must be lying)
def hh(given_list):
if given_list == []:
return True
else:
# 1
while given_list != []:
given_list = remove_zeros(given_list)
# 2
if given_list == []:
return True
break
else:
# 3
given_list = sort_desc(given_list)
# 4
n = given_list.pop(0)
# 5
if length_check(n, given_list):
return False
break
# 6, 7
else:
given_list = substract_one_for_n_elements(n, given_list)
# *****************************************
# testy
def test_remove_zeros():
assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, 7, 2, 5]
assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]
assert remove_zeros([1, 2, 3]) == [1, 2, 3]
assert remove_zeros([0, 0, 0]) == []
assert remove_zeros([]) == []
def test_sort_desc():
assert sort_desc([5, 1, 3, 4, 2]) == [5, 4, 3, 2, 1]
assert sort_desc([0, 0, 0, 4, 0]) == [4, 0, 0, 0, 0]
assert sort_desc([1]) == [1]
assert sort_desc([]) == []
def test_length_check():
assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False
assert length_check(5, [5, 5, 5, 5, 5]) is False
assert length_check(5, [5, 5, 5, 5]) is True
assert length_check(3, [1, 1]) is True
assert length_check(1, []) is True
assert length_check(0, []) is False
def test_substract_one_for_n_elements():
assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]
assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, 4, 4, 2]
assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]
assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]
assert substract_one_for_n_elements(1, [1]) == [0]
def test_hh():
assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False
assert hh([4, 2, 0, 1, 5, 0]) is False
assert hh([3, 1, 2, 3, 1, 0]) is True
assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17, 0, 3, 16]) is True
assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6, 4, 7, 12]) is True
assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7, 12, 3]) is False
assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15, 5, 1]) is False
assert hh([2, 2, 0]) is False
assert hh([3, 2, 1]) is False
assert hh([1, 1]) is True
assert hh([1]) is False
assert hh([]) is True
|
6,502 | 1f0349edd9220b663f7469b287f796e4a54df88d | '''Finding Perfect Numbers
3/2/17
@author: Annalane Miller (asm9) and Ivanna Rodriguez (imr6)'''
num_perfect = 0
for value in range(2, 10000):
#set initial values
high= value
low = 1
divisors = []
#finding divisors
while low < high:
if value % low ==0:
high = value// low
divisors.append(low)
if high != low:
divisors.append(high)
low += 1
#find if number is perfect
divisors.remove(value)
total= sum(divisors)
#print 4 perfect numbers in range
if total==value:
print(value)
num_perfect +=1
if num_perfect > 4:
break
|
6,503 | b80deec4d3d3ab4568f37cc59e098f1d4af5504c | # Square-Root of Trinomials
import math
print("Έχουμε ένα τριώνυμο ax²+bx+c. Δώστε μία θετική ή αρνητική τιμή σε κάθε σταθερά!")
a=int(input("a:"))
b=int(input("b:"))
c=int(input("c:"))
D= b**2-4*a*c
print("Η Διακρίνουσα ειναι: " + str(D))
if D>0:
x1=(-b+math.sqrt(D))/(2*a)
print("Η πρώτη ρίζα ειναι: " + str(x1))
x2=(-b-math.sqrt(D))/(2*a)
print("Η δεύτερη ρίζα ειναι: " + str(x2))
elif D==0:
x=(-(b/(2*a)))
print("Η διπλή ρίζα ειναι: " + str(x))
elif D<0: # else:
print("Δεν υπάρχει ρίζα")
|
6,504 | 7775d260f0db06fad374d9f900b03d8dbcc00762 | # -*- coding: utf-8 -*-
# @time : 2021/1/10 10:25
# @Author : Owen
# @File : mainpage.py
from selenium.webdriver.common.by import By
from homework.weixin.core.base import Base
from homework.weixin.core.contact import Contact
'''
企业微信首页
'''
class MainPage(Base):
#跳转到联系人页面
def goto_contact(self):
self.find(By.CSS_SELECTOR, '#menu_contacts').click()
return Contact(self.driver) |
6,505 | 071e3cf6b4337e0079bbb2c7694fff2468142070 | import pygame
class BackGround:
def __init__(self, x, y):
self.y = y
self.x = x
def set_image(self, src):
self.image = pygame.image.load(src)
self.rect = self.image.get_rect()
self.rect.y = self.y
self.rect.x = self.x
def draw(self, screen):
screen.blit(self.image, self.rect)
|
6,506 | 784b51c05dc7b5e70016634e2664c9ec25b8a65a | import numpy as np
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from preprocessing import *
from utils import *
def find_optimal_param(lda, x_train, y_train):
probs_train = lda.predict_proba(x_train)[:, 1]
y_train = [x for _,x in sorted(zip(probs_train,y_train))]
y_train = np.array(y_train)
probs_train.sort()
Se = []
Sp = []
for p in range(len(probs_train)):
tp = np.count_nonzero(y_train[p:] == 1)
fp = np.count_nonzero(y_train[p:] == 0)
tn = np.count_nonzero(y_train[:p] == 0)
fn = np.count_nonzero(y_train[:p] == 1)
Se.append(tp/(tp+fn))
Sp.append(tn/(tn+fp))
mx = np.argmax(-(1-np.array(Sp) - np.array(Se)))
return probs_train[mx]
def predict(lda, x, y, m):
tp = 0
fp = 0
tn = 0
fn = 0
if len(x) != 0:
probs= lda.predict_proba(x)[:, 1]
for j in range(len(x)):
if probs[j] > m:
if y[j] == 1:
tp+=1
else:
fp+=1
else:
if y[j] == 1:
fn +=1
else:
tn +=1
return tp, fp, fn, tn
from methodutils import FdaUtils
class FDA_node(object):
def __init__(self):
"""Constructor"""
self.method = FdaUtils()
self.left = None
self.right = None
self.m = 0.5
def grow(self):
self.right = FDA_node()
self.left = FDA_node()
def find_optimal_param(self, x, y):
self.m = self.method.find_optimal_param(x, y)
if self.left != None and self.right != None:
left, right = self.divide_data(x)
self.left.find_optimal_param(x[left], y[left])
self.right.find_optimal_param(x[right], y[right])
def fit(self, x, y):
self.method.fit(x, y)
if self.left != None and self.right != None:
left, right = self.divide_data(x)
if (max(y[left]) == 0 or min(y[right]) == 1):
self.left = self.right = None
else:
self.right.fit(x[left], y[left])
self.left.fit(x[right], y[right])
def divide_data(self, x):
probs = self.method.predict_proba(x)[:, 1]
left = (probs <= self.m)
right = (probs > self.m)
return left, right
def predict(self, x):
if self.left == None and self.right == None:
pred = self.method.predict(x, self.m)
elif self.left != None and self.right != None:
left, right = self.divide_data(x)
l_pred = self.left.predict(x[left])
r_pred =self.right.predict(x[right])
pred = np.ones(x.shape[0])*2
pred[left] = l_pred
pred[right] = r_pred
return pred
if __name__ == "__main__":
np.seterr(all='raise')
from sklearn.metrics import confusion_matrix
from dataset import load_dataset, load_new_dataset_6002, diagnosis_to_binary, MOST_FREQ_DIAGS_NUMS_NEW
from fisher_discriminant import FisherDiscriminantAnalisys
num_components = 100
infile = open('C:\\Users\\donte_000\\PycharmProjects\\Basic_Methods\\data\\data_old_and_new_without_noise.pkl', 'rb')
(old, new) = pkl.load(infile)
infile.close()
Y = old["y"]
outfile = open('C:\\Users\\donte_000\\PycharmProjects\\Basic_Methods\\data\\6002_old_Dif.pkl', 'rb')
X = pkl.load(outfile)
outfile.close()
pca = PCA(n_components=X.shape[0])
b = pca.fit_transform(X)
for d in reversed(MOST_FREQ_DIAGS_NUMS_NEW):
y_prediction =[]
y_labels = []
for train_index, test_index in cross_val(b.shape[0], 500):
tree = FDA_node()
tree.grow()
tree.fit(b[train_index, :num_components],Y[train_index,d])
tree.find_optimal_param(b[train_index, :num_components], Y[train_index,d])
y_prediction.append(tree.predict(b[test_index, :num_components]))
y_labels.append(Y[test_index, d])
y_prediction = np.array(y_prediction).flatten()
y_labels = np.array(y_labels).flatten()
tn, fp, fn, tp = confusion_matrix(y_labels, y_prediction).ravel()
test_se = tp / (tp + fn)
test_sp = tn / (tn + fp)
print("Val. Se = %s, Val. Sp = %s" % (round(test_sp, 4), round(test_se, 4)))
|
6,507 | b0468e58c4d0387a92ba96e8fb8a876ece256c78 | import mmap;
import random;
def shuffle():
l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
random.shuffle(l_digits);
return "".join(l_digits);
with open("hello.txt", "r+") as f:
map = mmap.mmap(f.fileno(), 1000);
l_i = 0;
for l_digit in shuffle():
map[l_i] = l_digit;
l_i += 1; |
6,508 | 9cca73ebdf2b05fe29c14dc63ec1b1a7c917b085 | # Cutting a Rod | DP-13
# Difficulty Level : Medium
# Last Updated : 13 Nov, 2020
# Given a rod of length n inches and an array of prices that contains prices of all pieces of size smaller than n. Determine the maximum value obtainable by cutting up the rod and selling the pieces. For example, if length of the rod is 8 and the values of different pieces are given as following, then the maximum obtainable value is 22 (by cutting in two pieces of lengths 2 and 6)
# length | 1 2 3 4 5 6 7 8
# --------------------------------------------
# price | 1 5 8 9 10 17 17 20
# And if the prices are as following, then the maximum obtainable value is 24 (by cutting in eight pieces of length 1)
# length | 1 2 3 4 5 6 7 8
# --------------------------------------------
# price | 3 5 8 9 10 17 17 20
import numpy as np
def cut_rod(price, n):
if n <= 0:
return 0
max_val = -1
val = 0
for i in range(0, n):
val = price[i] + cut_rod(price, n - i - 1)
if max_val < val:
max_val = val
# print("i:", i, "n:", n, "max_val:", max_val)
return max_val
def cut_rod2(price, n):
val = [0 for x in range(n+1)]
val[0] = 0
for i in range(1, n+1):
max_val = -1
for j in range(i):
max_val = max(max_val, price[j] + val[i-j-1])
# print("i:", i, "j:", j, "max_val:", max_val, "val:", val)
val[i] = max_val
# print("i:", i, "val:", val)
return val[n]
# Driver code
arr = [1, 5, 8, 9, 10, 17, 17, 20]
arr1 = [3, 5, 8, 9, 10, 17, 17, 20]
arr2 = [5, 5, 8, 9, 10, 17, 17, 20]
size = len(arr)
# print("Maximum Obtainable Value is", cut_rod(arr1, size))
# print("Maximum Obtainable Value is", cut_rod2(arr1, size))
print("Maximum Obtainable Value is", cut_rod2([2, 5, 7, 3, 9], 5))
def rodCut(price, n):
if n <= 0:
return 0
max_val = -1
# val = 0
for i in range(n):
# val = price[i] + rodCut(price, n-1-i)
max_val = max(max_val, price[i] + rodCut(price, n-1-i))
return max_val
# print("Maximum Obtainable Value is", rodCut(arr1, size))
|
6,509 | 9061db3bb3aa3178262af58e56126302b9effdff | import pymel.all as pm
from collections import Counter
# example
# v.Create( sel[0], pm.datatypes.Color.red, sel[1], 'leftEye', 0.2 )
# select mesh 1st then the control
def Create( obj, targetColor, control, attr, offset ) :
shape = obj.getShape()
name = obj.name()
if( type(shape) == pm.Mesh ) :
outVerts = []
verts = shape.vtx[:]
for i, vert in enumerate(verts) :
if( vert.getColor() == targetColor ) :
outVerts.append(vert)
# this needs rewriting
# what shells does this vert eblong to?
# out of teh verts we have, which shell contains the most?
uvShellsList = shape.getUvShellsIds()[0]
uvList = []
outUvShellList = []
for vert in outVerts :
uvs = vert.getUVIndices()
for uv in uvs :
uvList.append(uv)
outUvShellList.append(uvShellsList[uv])
outUvList = []
mostCommonShell = Counter(outUvShellList).most_common(1)[0][0]
for i, uvshell in enumerate(outUvShellList) :
if( uvshell == mostCommonShell ) :
outUvList.append(shape.map[uvList[i]])
# print outUvList
# return
if( len(outVerts) > 0 ) :
moveUV = pm.polyMoveUV( outUvList )[0]
moveUV.rename('%s_%s_moveUV' % ( name, attr ))
crv = pm.AnimCurveTU(name='%s_%s_animCurveTU' % ( name, attr ) )
pm.setKeyframe(crv, t=0.0, v=0.0, itt='linear', ott='linear')
pm.setKeyframe(crv, t=20.0, v=-offset * 20, itt='linear', ott='linear')
control.attr(attr) >> crv.input
crv.output >> moveUV.translateV
return moveUV
else :
pm.warning( 'No verts found with color %s' % ( targetColor ) )
else :
pm.warning('The target must be a mesh')
# use this to connect the PolyMoveUV to the joint attribute you want FF (shader) to read
# example : ConnectToAttr( sel[0], sel[1], 'translateX' ) - select mesh 1st then joint
def ConnectToAttr( src, trgt, attr ) :
moveUVs = src.getShape().history(type='polyMoveUV')
try :
attr = pm.PyNode(trgt).attr(attr).getChildren()
except :
attr = [ pm.PyNode(trgt).attr(attr) ]
if( len(moveUVs) > len(attr) ) :
pm.warning( 'There are more polyMoveUV nodes that attrs to connect to %s:%s' % ( len(moveUVs), len(attr) ) )
else :
for i, moveUV in enumerate(moveUVs) :
moveUV.translateV >> attr[i]
|
6,510 | 89dfd9a32b008307eb4c456f2324804c29f3b68f | import numpy as np
class SampleMemory(object):
def __init__(self, item_shape, max_size):
self.memory = np.zeros((max_size,) + item_shape)
self.item_shape = item_shape
self.num_stored = 0
self.max_size = max_size
self.tail_index = 0
def sample(self, num_samples):
indexes = self.sample_indexes(num_samples)
return self.memory[indexes]
def sample_indexes(self, num_samples):
return np.random.randint(
0, self.num_stored, (num_samples,)
)
def append(self, item):
self.memory[self.tail_index, :] = item
self.tail_index = (self.tail_index + 1) % self.max_size
self.num_stored = min(self.num_stored + 1, self.max_size)
def append_batch(self, batch):
batch_size = batch.shape[0]
batch_tail_index = self.tail_index + batch_size
wrap_extra = batch_tail_index - self.max_size
chunk_tail_index = min(batch_tail_index, self.max_size)
self.memory[self.tail_index:chunk_tail_index, :] = batch[:batch_size - wrap_extra]
if wrap_extra > 0:
self.memory[:wrap_extra, :] = batch[batch_size - wrap_extra:]
self.tail_index = batch_tail_index % self.max_size
self.num_stored = min(self.num_stored + batch_size, self.max_size)
def last_n_frames(self, n):
return self.memory[self.last_n_frames_indexes(n)]
def last_n_frames_indexes(self, n):
tail = self.tail_index
start = tail - n
indexes = range(max(start, 0), tail)
if start < 0:
indexes = range(self.num_stored + start, self.num_stored) + indexes
return indexes
if __name__ == '__main__':
shape = (32, 32, 3)
max_size = 100
for num_items, sample_size in ((10, 5), (10, 10), (100, 32), (120, 10)):
mem = SampleMemory(shape, max_size)
assert(mem.num_stored == 0)
assert(mem.max_size == max_size)
assert(mem.memory.shape == (max_size,) + shape)
for i in range(num_items):
mem.append(np.random.random(shape))
assert(mem.tail_index == num_items % max_size)
assert(mem.num_stored == min(num_items, max_size))
indexes = mem.sample_indexes(sample_size)
assert(indexes.shape[0] == sample_size)
assert(indexes.min() >= 0)
assert(indexes.max() < num_items)
samples = mem.sample(sample_size)
assert(samples.shape == (sample_size,) + shape)
mem = SampleMemory(shape, max_size)
batch_size = 10
batch = np.random.random((batch_size,) + shape)
mem.append_batch(batch)
assert(mem.num_stored == batch_size)
assert(mem.tail_index == batch_size)
assert(np.array_equal(mem.memory[:5], batch[:5]))
batch_size = 100
batch = np.random.random((batch_size,) + shape)
mem.append_batch(batch)
assert(mem.num_stored == max_size)
assert(mem.tail_index == 10)
assert(np.array_equal(mem.memory[:5], batch[-10:-5]))
assert(mem.last_n_frames_indexes(15) == map(lambda x: x % 100, range(95, 110)))
|
6,511 | 30e4c4c5ef944b0cd2d36b2fe5f7eee39dff1d16 | alien_color = 'green'
if alien_color == 'green':
print('you earned 5 points')
alien_color2 = 'yellow'
if alien_color2 == 'green':
print ('your earned 5 points')
if alien_color2 == 'yellow':
print('Right answer')
# 5.4
alien_color = 'green'
if alien_color == 'green':
print('you earned 5 points')
else:
print('your earned 10 points')
# 5.5
alien_color = 'green'
if alien_color == 'green':
print('you earned 5 points')
elif alien_color == 'yellow':
return ('your earned 10 points')
else:
print('your earned 15 points')
|
6,512 | 3970c7768e892ad217c193b1d967c1203b7e9a25 | import math
def is_prime(n):
# Based on the Sieve of Eratosthenes
if n == 1:
return False
if n < 4:
# 2 and 3 are prime
return True
if n % 2 == 0:
return False
if n < 9:
# 5 and 7 are prime (we have already excluded 4, 6 and 8)
return True
if n % 3 == 0:
return False
root = math.sqrt(n)
f = 5
while f <= root:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def main():
limit = 10001
# We know that 2 is prime
count = 1
candidate = 1
while count < limit:
candidate += 2
if is_prime(candidate):
count += 1
print(candidate)
if __name__ == '__main__':
main()
|
6,513 | 76526bdff7418997ac90f761936abccbb3468499 | """
Array.diff
Our goal in this kata is to implement a difference function,
which subtracts one list from another and returns the result.
It should remove all values from list a, which are present in list b keeping their order.
"""
from unittest import TestCase
def list_diff(a, b):
return [x for x in a if x not in b]
class TestListDiff(TestCase):
def test_one(self):
assert list_diff([1, 2], [1]) == [2]
def test_two(self):
assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]
def test_three(self):
assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]
def list_diff_left_right(a, b):
left = [x for x in a if x not in b]
right = [x for x in b if x not in a]
return left, right
class TestDiffLR(TestCase):
def test_one(self):
assert list_diff_left_right([1, 2], [1]) == ([2], [])
def test_two(self):
assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])
def test_three(self):
assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2], [3, 3])
|
6,514 | 9fd985e9675514f6c8f3ac5b91962eb744e0e82c | import numpy
import matplotlib.pyplot as plt
numpy.random.seed(2)
# create datasets
x = numpy.random.normal(3, 1, 100)
y = numpy.random.normal(150, 40, 100) / x
# displaying original dataset
plt.scatter(x, y)
plt.title("Original dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
# train dataset will be 80% of the data
train_x = x[:80]
train_y = y[:80]
# test dataset will be remaining 20% of the data
test_x = x[80:]
test_y = y[80:]
# displaying train dataset
plt.scatter(train_x, train_y)
plt.title("Train dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
# displaying test dataset
plt.scatter(test_x, test_y)
plt.title("Test dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
|
6,515 | 607700faebc2018327d66939419cc24a563c3900 | # Return min number of hacks (swap of adjacent instructions)
# in p so that total damage <= d.
# If impossible, return -1
def min_hacks(d, p):
# list containing number of shoot commands per
# damage level. Each element is represents a
# damage level; 1, 2, 4, 8, ... and so on.
shots = [0]
damage = 0
for c in p:
if c == "S":
shots[-1] += 1
# we can also calculate damage here.
damage += 2 ** (len(shots) - 1)
else:
shots.append(0)
# each hack represents moving 1 shot down 1 element
# in the shots list. So keep doing this until
# damage is <= d.
hacks = 0
while damage > d:
# move 1 shot from highest element possible down 1 element.
hacked = False
for i in range(len(shots)-1, 0, -1):
if shots[i] > 0:
shots[i] -= 1
shots[i-1] += 1
damage -= 2 ** (i - 1) # damage = damage - 2**i + 2**(i-1)
hacks += 1
hacked = True
break
if not hacked:
# impossible to get damage <= d!
return -1
return hacks
num_cases = int(input())
for i in range(1, num_cases+1):
current_case = input().split()
d = int(current_case[0])
p = current_case[1]
solution = min_hacks(d, p)
if solution < 0:
solution_string = "IMPOSSIBLE"
else:
solution_string = str(solution)
print("Case #{:d}: {:s}".format(i, solution_string))
|
6,516 | 1634ae0e329b4f277fa96a870fbd19626c0ece81 | from sympy import *
import sys
x = Symbol("x")
# EOF
try:
in_str = input()
except Exception as e:
print("WRONG FORMAT!") # Wrong Format!
sys.exit(0)
in_str = in_str.replace("^", "**") #change '^'into'**' for recognition
# wrong expression
try:
in_exp = eval(in_str) # turn str into expression
except Exception as e:
print("WRONG FORMAT!") # Wrong Format!
sys.exit(0)
res = diff(in_exp)
print(str(res).replace("**", "^"))
#res = diff(in_exp).subs(x,2)
#print(res)
|
6,517 | 052824082854c5f7721efb7faaf5a794e9be2789 | L5 = [0]*10
print(L5)
L5[2] = 20
print(L5)
print(L5[1:4])
L5.append(30)
print(L5)
L5.remove(30) #Elimina la primera ocurrencia del objeto
print(L5)
L6 = [1,2,3,4,5,6]
print(L6[1::2])
print(L6[::2]) |
6,518 | e99d3ae82d8eea38d29d6c4f09fdb3858e36ca50 | import requests as r
from .security import Security, Securities
from .data import Data
url_base = 'https://www.alphavantage.co/query'
def _build_url(**kargs):
query = {
'function': 'TIME_SERIES_DAILY',
'symbol': 'SPY',
'outputsize': 'full',
'datatype': 'json',
'apikey': 'JPIO2GNGBMFRLGMN'
}
query.update(kargs)
query_str = '&'.join([f'{key}={val}' for key, val in query.items()])
return f'{url_base}?{query_str}'
def _request(**kargs):
url = _build_url(**kargs)
return r.get(url)
def _get_symbol(symbol, **kargs):
kargs['symbol'] = symbol
kargs['datatype'] = 'csv'
req = _request(**kargs)
# Reverse dates to past to present
text = req.text
header, *text = text.split()
text = '\n'.join(
[l for l in text[::-1]]
)
csv_str = f'{header}\n{text}'
data = Data.load_csv(csv_str)
return Security(symbol, data)
def get(symbols, **kargs):
if not isinstance(symbols, list):
symbols = [symbols]
result = Securities()
for symbol in symbols:
kargs['symbol'] = symbol
result.add(
id=symbol,
security=_get_symbol(**kargs)
)
return result
|
6,519 | 4b14dee3625d5d0c703176ed2f0a28b2583fd84d | """
Creating flask server that response with a json
"""
from flask import Flask
from flask import jsonify
micro_service = Flask(__name__)
@micro_service.route('/') # http://mysite.com/
def home():
return jsonify({'message': 'Hello, world!'})
if __name__ == '__main__':
micro_service.run()
|
6,520 | 430e971d2ae41bfd60e7416ecb2c26bb08e4df45 | import os
import os.path
import numpy as np
import pickle
import codecs
from konlpy.tag import Okt
from hyperparams import params
from gensim.models import FastText
#tokenizer
tokenizer = Okt()
def make_word_dictionary(word_dict_pkl_path=params['default_word_dict_pkl_path'], training_data_path = params['default_training_data_path']):
#word_dict => 'Word':'index'
word_dict = dict()
if os.path.isfile(word_dict_pkl_path):
#if already existed, just load it
with open(word_dict_pkl_path, 'rb') as f:
word_dict = pickle.load(f)
print('Existed word_dict loaded')
else:
print('No word_dict pkl file, start making word_dict...')
with codecs.open(training_data_path, 'r', encoding='utf-8') as f:
word_vocab = dict()
# 'word':'frequency'
for line in f.read().split('\n')[1:]:
review = line.split('\t')[1]
#tokenizing
tokens = tokenizer.morphs(review)
for token in tokens:
if token in word_vocab.keys():
word_vocab[token] += 1
else:
word_vocab[token] = 1
word_vocab = [word for word in word_vocab.keys() if word_vocab[word] >= params['min_vocab_count']]
# add pad & unk token
word_vocab = [params['PAD']] + word_vocab + [params['UNK']]
for idx, word in enumerate(word_vocab):
word_dict[word] = idx
print('Making word_dict ... Done and Saved')
with open(word_dict_pkl_path, 'wb') as f:
pickle.dump(word_dict, f)
return word_dict
def make_word_embedding(word_dict, word_emb_pkl_path = params['default_word_emb_pkl_path'], fasttext_path = params['default_fasttext_path']):
word_emb = np.zeros([len(word_dict), params['word_emb_dim']])
if os.path.isfile(word_emb_pkl_path):
with open(word_emb_pkl_path, 'rb') as f:
word_emb = pickle.load(f)
print('Existed trained word embedding loaded')
else:
#load fasttext model
fasttext_model = FastText.load_fasttext_format(fasttext_path, encoding='utf8')
print('No word_emb pkl file, start making word_emb ...')
for word, idx in word_dict.items():
if idx==0:
# PAD = 0
continue
else:
try:
word_emb[idx] = np.asarray(fasttext_model.wv[word])
except KeyError:
# if there is no word vector for certain word, just assign random vector
word_emb[idx] = np.random.uniform(-0.25, 0.25, params['word_emb_dim'])
with open(word_emb_pkl_path, 'wb') as f:
pickle.dump(word_emb, f)
print('Making word_emb ... Done and Saved')
return word_emb
def zero_padding(token_sentence, word_dict):
#input : [1,4,3,2,1,15]
#output : [1,4,3,2,1,15,0,0,0,0]
padded_sentence = token_sentence + [word_dict[params['PAD']]]*(params['max_seq_length']-len(token_sentence))
return padded_sentence
def dataset_iterator(filename, word_dict, batch_size):
#yield batch for training
with open(filename, 'r', encoding='utf8') as f_dataset:
context = []
sequence_length = []
label = []
text = f_dataset.read().split('\n')
for line in text[1:]:
class_label = [0,0]
review = line.split('\t')[1]
polarity = int(line.split('\t')[2])
class_label[polarity] = 1 #mark polarity
label.append(class_label)
tokens = tokenizer.morphs(review)
#if the review is too long, cut it to adequate length
if len(tokens) > params['max_seq_length']:
tokens = tokens[:params['max_seq_length']]
sentence = [word_dict[word] if word in word_dict else word_dict[params['UNK']] for word in tokens]
sequence_length.append(len(sentence))
sentence = zero_padding(sentence, word_dict)
context.append(sentence)
if len(context) == batch_size:
yield (context, sequence_length, label)
context =[]
sequence_length = []
label = []
if len(context) > 0:
yield (context, sequence_length, label) |
6,521 | 275f8b6ac31792a9e4bb823b61366f868e45ef4e | import datetime
from app.api.v2.models.db import Database
now = datetime.datetime.now()
db = Database()
cur = db.cur
class Meetup():
#meetup constructor
def __init__(self, topic, location, tags, happening_on):
self.topic = topic
self.location = location
self.tags = tags
self.happening_on = happening_on
self.created_on = now
def check_if_meetup_exists(self, topic):
query = "SELECT topic from meetups WHERE topic=%s;"
cur.execute(query, (topic,))
meetup = cur.fetchone()
if meetup:
return True
def create_meetup(self):
if self.check_if_meetup_exists(self.topic):
return False
query = "INSERT INTO meetups (topic, location, tags, happening_on, created_on) values (%s, %s, %s, %s, %s) \
RETURNING meetup_id, topic, location, tags, happening_on, created_on;"
cur.execute(
query,
(self.topic,
self.location,
self.tags,
self.happening_on,
self.created_on))
meetup = cur.fetchone()
db.conn.commit()
return meetup
def delete_meetup(meetup_id):
"""Delete a single Meetup"""
query = "DELETE FROM meetups WHERE meetup_id= '{}';".format(meetup_id)
cur.execute(query)
db.conn.commit()
@staticmethod
def get_all_meetups():
'''Method to fetch all meetups'''
query = "SELECT * from meetups;"
cur.execute(query)
meetups = cur.fetchall()
return meetups
@staticmethod
def get_meetup_by_id(meetup_id):
""" Fetch a specific meetup using meetup_id"""
query = "SELECT * from meetups where meetup_id=%s;"
cur.execute(query, (meetup_id,))
meetup = cur.fetchone()
return meetup
|
6,522 | 8804bfc5bed8b93e50279f0cbab561fe09d92a64 | from random import randint
import matplotlib.pyplot as plt
def generate_list(length: int) -> list:
"""Generate a list with given length with random integer values in the interval [0, length]
Args:
length (int): List length
Returns:
list: List generated with random values
"""
return [randint(0, length + 1) for _ in range(length)]
def plot_table(timestamps: dict, threadList: list, mList: list) -> None:
"""Plot standard deviation chart
Args:
k (list): Threads/Process used
deviation (list): Standard deviation of the timestamps
label (str): "Threads" or "Processos"
"""
plt.plot(threadList, timestamps.values(), 'o-')
plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))
plt.xlabel('Número de processos')
plt.ylabel('Tempo de Execução (s)')
plt.title('Tempo de Execução por Total de Processos e Valores')
plt.show()
|
6,523 | cb2dd08a09d2e39bd83f82940c3d9a79a5a27918 | import logging
import subprocess
from pathlib import Path
from typing import Union
from git import Repo
def init_repo(metadata: str, path: str, deep_clone: bool) -> Repo:
clone_path = Path(path)
if not clone_path.exists():
logging.info('Cloning %s', metadata)
repo = (Repo.clone_from(metadata, clone_path)
if deep_clone else
Repo.clone_from(metadata, clone_path, depth=1))
else:
repo = Repo(clone_path)
return repo
def init_ssh(key: str, key_path: Path) -> None:
if not key:
logging.warning('Private Key required for SSH Git')
return
logging.info('Private Key found, writing to disk')
key_path.mkdir(exist_ok=True)
key_file = Path(key_path, 'id_rsa')
if not key_file.exists():
key_file.write_text(f'{key}\n', encoding='UTF-8')
key_file.chmod(0o400)
scan = subprocess.run([
'ssh-keyscan', '-t', 'rsa', 'github.com'
], stdout=subprocess.PIPE, check=False)
Path(key_path, 'known_hosts').write_text(scan.stdout.decode('utf-8'), encoding='UTF-8')
def repo_file_add_or_changed(repo: Repo, filename: Union[str, Path]) -> bool:
if repo.working_dir:
relative_file = Path(filename).relative_to(repo.working_dir).as_posix()
if relative_file in repo.untracked_files:
return True
if relative_file in [
x.a_path for x in repo.index.diff(None)]:
return True
return False
|
6,524 | 7df55853d0f4f1bf56512c4427d7f91e9c1f2279 | """Initial migration
Revision ID: 1f2296edbc75
Revises: 7417382a3f1
Create Date: 2014-01-19 23:04:58.877817
"""
# revision identifiers, used by Alembic.
revision = '1f2296edbc75'
down_revision = '7417382a3f1'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy import func
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(u'consultant',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('address', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'service',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'ballot_type',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('percent_required', sa.Numeric(precision=2, scale=2), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'employer',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'tag',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'election',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('date', sa.Date(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'donor',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('first_name', sa.Text(), nullable=False),
sa.Column('last_name', sa.Text(), nullable=False),
sa.Column('address', sa.Text(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=False),
sa.Column('longitude', sa.Float(), nullable=False),
sa.Column('employer_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['employer_id'], [u'employer.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('first_name','last_name','latitude','longitude')
)
op.create_index('ix_donor_employer_id', 'donor', ['employer_id'], unique=False)
op.create_table(u'committee',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('filer_id', sa.Text(), nullable=True),
sa.Column('sponsor', sa.Text(), nullable=True),
sa.Column('election_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['election_id'], [u'election.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_committee_election_id', 'committee', ['election_id'], unique=False)
op.create_table(u'ballot_measure',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('prop_id', sa.Text(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('num_yes', sa.Integer(), nullable=True),
sa.Column('num_no', sa.Integer(), nullable=True),
sa.Column('passed', sa.Boolean(), nullable=True),
sa.Column('ballot_type_id', postgresql.UUID(), nullable=True),
sa.Column('election_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['ballot_type_id'], [u'ballot_type.id'], ),
sa.ForeignKeyConstraint(['election_id'], [u'election.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_ballot_measure_election_id', 'ballot_measure', ['election_id'], unique=False)
op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', ['ballot_type_id'], unique=False)
op.create_table(u'donation',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('amount', sa.Float(), nullable=False),
sa.Column('transaction_date', sa.Date(), nullable=False),
sa.Column('donor_id', postgresql.UUID(), nullable=False),
sa.Column('committee_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),
sa.ForeignKeyConstraint(['donor_id'], [u'donor.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_donation_committee_id', 'donation', ['committee_id'], unique=False)
op.create_index('ix_donation_donor_id', 'donation', ['donor_id'], unique=False)
op.create_table(u'contract',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('payment', sa.Float(), nullable=False),
sa.Column('consultant_id', postgresql.UUID(), nullable=False),
sa.Column('service_id', postgresql.UUID(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('committee_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),
sa.ForeignKeyConstraint(['consultant_id'], [u'consultant.id'], ),
sa.ForeignKeyConstraint(['service_id'], [u'service.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_contract_consultant_id', 'contract', ['consultant_id'], unique=False)
op.create_index('ix_contract_service_id', 'contract', ['service_id'], unique=False)
op.create_index('ix_contract_committee_id', 'contract', ['committee_id'], unique=False)
op.create_table(u'stance',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('voted_yes', sa.Boolean(), nullable=False),
sa.Column('committee_id', postgresql.UUID(), nullable=False),
sa.Column('ballot_measure_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id'], ),
sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('committee_id','ballot_measure_id')
)
op.create_index('ix_stance_ballot_measure_id', 'stance', ['ballot_measure_id'], unique=False)
op.create_index('ix_stance_committee_id', 'stance', ['committee_id'], unique=False)
op.create_table(u'ballot_measure_tags',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('ballot_measure_id', postgresql.UUID(), nullable=False),
sa.Column('tag_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id'], ),
sa.ForeignKeyConstraint(['tag_id'], [u'tag.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('ballot_measure_id','tag_id')
)
op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags', ['tag_id'], unique=False)
op.create_index('ix_ballot_measure_tags_ballot_measure_id', 'ballot_measure_tags', ['ballot_measure_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_ballot_measure_tags_ballot_measure_id', 'ballot_measure_tags')
op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')
op.drop_table(u'ballot_measure_tags')
op.drop_index('ix_stance_committee_id', 'stance')
op.drop_index('ix_stance_ballot_measure_id', 'stance')
op.drop_table(u'stance')
op.drop_index('ix_contract_committee_id', 'contract')
op.drop_index('ix_contract_service_id', 'contract')
op.drop_index('ix_contract_consultant_id', 'contract')
op.drop_table(u'contract')
op.drop_index('ix_donation_donor_id', 'donation')
op.drop_index('ix_donation_committee_id', 'donation')
op.drop_table(u'donation')
op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')
op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')
op.drop_table(u'ballot_measure')
op.drop_index('ix_committee_election_id', 'committee')
op.drop_table(u'committee')
op.drop_index('ix_donor_employer_id', 'donor')
op.drop_table(u'donor')
op.drop_table(u'election')
op.drop_table(u'tag')
op.drop_table(u'employer')
op.drop_table(u'ballot_type')
op.drop_table(u'service')
op.drop_table(u'consultant')
### end Alembic commands ###
|
6,525 | 7eb4efb64a5a5b2e8c2dfa965411ff4c7aad6e35 | from soln import Solution
import pytest
@pytest.mark.parametrize(
["inp1", "inp2", "res"],
[
("112", 1, "11"),
("11000002000304", 4, "4"),
("9119801020", 6, "20"),
("111111", 3, "111"),
("1432219", 3, "1219"),
("10200", 1, "200"),
("10", 2, "0"),
("10", 1, "0"),
],
)
def test_soln(inp1, inp2, res):
s = Solution()
assert s(inp1, inp2) == res
|
6,526 | 21c12aabfb21e84f3ea546842fb55c41d2129ff9 | import re
list = ["Protein XVZ [Human]","Protein ABC [Mouse]","go UDP[3] glucosamine N-acyltransferase [virus1]","Protein CDY [Chicken [type1]]","Protein BBC [type 2] [Bacteria] [cat] [mat]","gi p19-gag protein [2] [Human T-lymphotropic virus 2]"]
pattern = re.compile("\[(.*?)\]$")
for string in list:
match = re.search(pattern,string)
lastBracket = re.split("\].*\[",match.group(1))[-1]
print lastBracket
|
6,527 | 9609f23463aa4c7859a8db741c7f3badd78b8553 | #!/usr/bin/python
'''Defines classes for representing metadata found in Biographies'''
class Date:
'''Object to represent dates. Dates can consist of regular day-month-year, but also descriptions (before, after, ca.). Object has attributes for regular parts and one for description, default is empty string.'''
def __init__( self, year='YY', month='YY', day='YY', description='', dateInterval = ''):
self.year = year
self.month = month
self.day = day
self.description = description
self.interval = dateInterval
def returnDate(self):
myDate = self.year + '-' + self.month + '' + self.day
if self.description:
myDate += ' (' + self.description + ')'
return myDate
class DateInterval:
'''Object to represent date intervales. consists of a begin date and an end date, each of which can be underspecified'''
def __init__(self, beginDate = '', endDate = ''):
self.beginDate = beginDate
self.endDate = endDate
class Name:
'''Object to describe person names. It has fields for initials, first name, last name, infixes and titles.'''
def __init__(self, lastname, firstname = '', initials = '', infix = ''):
self.lastname = lastname
self.firstname = firstname
self.initials = initials
self.infix = infix
self.title = ''
def addTitle(self, title):
self.title = title
def defineName(self, name):
self.lastname = name
def addFirstname(self, firstname):
self.firstname = firstname
def addInitials(self, initials):
self.initials = initials
def addInfix(self, infix):
self.infix = infix
def returnName(self):
'''prefer full first name if known, else initials. If neither are known, this will be the empty string.'''
if self.firstname:
name = self.title + ' ' + self.firstname + ' ' + self.infix + self.lastname
else:
name = self.title + ' ' + self.initials + ' ' + self.infix + self.lastname
return name
class Event:
'''Object that can describe an event (time, place, description)'''
def __init__(self, label, location = '', date = Date):
self.label = label
self.location = location
self.date = date
def setDate(self, date):
self.date = date
def setLocation(self, location):
self.location = location
class State:
'''Object that can describe a state (begin time, end time, place, description)'''
def __init__(self, label, description = '', location = '', beginDate = Date, endDate = Date):
self.label = label
self.location = location
self.beginDate = beginDate
self.endDate = endDate
self.description = description
def setBeginDate(self, date):
self.beginDate = date
def setEndDate(self, date):
self.endDate = date
def setLocation(self, location):
self.location = location
def setDescription(self, description):
self.description = description
class MetadataSingle:
'''Object that represents the metadata from a single biography'''
def __init__(self, idNr, name):
self.id = idNr
self.name = name
self.birth = Event('birth')
self.death = Event('death')
self.father = Name('')
self.mother = Name('')
self.education = []
self.occupation = []
self.gender = ''
self.religion = []
self.residence = []
self.otherEvents = []
self.otherStates = []
self.text = ''
def defineBirthDay(self, date, location=''):
self.birth.date = date
if location:
self.birth.location = location
def defineDeathDay(self, date, location=''):
self.death.date = date
if location:
self.death.location = location
def defineFather(self, name):
self.father = name
def defineMother(self, name):
self.mother = name
def addEducation(self, educEvent):
self.education.append(educEvent)
def addOccupation(self, occEvent):
self.occupation.append(occEvent)
def defineGender(self, gender):
self.gender = gender
def addReligion(self, religion):
self.religion.append(religion)
def addResidence(self, religion):
self.residence.append(religion)
def defineText(self, text):
self.text = text
class MetadataComplete:
'''Object that represents all available metadata for an individual. All except id number are represented as lists'''
def __init__(self, idNr):
self.id = idNr
self.name = []
self.birth = []
self.death = []
self.father = []
self.mother = []
self.education = []
self.occupation = []
self.gender = []
self.religion = []
self.otherEvents = []
self.otherStates = []
self.text = []
def addName(self, name):
self.name.append(name)
def addBirthDay(self, birthEvent):
self.birth.append(birthEvent)
def addDeathDay(self, deathEvent):
self.death.append(deathEvent)
def addFather(self, fatherName):
self.father.append(name)
def defineMother(self, motherName):
self.mother.append(motherName)
def addEducation(self, eduList):
self.education.append(eduList)
def addOccupation(self, occList):
self.occupation.append(occList)
def defineGender(self, gender):
self.gender.append(gender)
def addReligion(self, religionList):
self.religion.append(religionList)
def.addOtherEvents(self, otherElist):
self.otherEvents.append(otherElist)
def.addOtherStates(self, otherSlist):
self.otherStates.append(otherSlist)
def defineText(self, text):
self.text.append(text) |
6,528 | f14b9373e9bf1ad7fe2216dfefc1571f5380fb27 | #!/usr/bin/python3
"""minimum time time to write operations of copy and paste"""
def minOperations(n):
"""
a method that calculates the fewest number of operations needed
to result in exactly n H characters in the file
"""
if n <= 1:
return 0
"""loop for n number of times"""
for i in range(2, n + 1):
if n % i == 0:
return minOperations(int(n / i)) + i
|
6,529 | 9c14f024b25c5014567405535dbe5a6c787cfe28 | from abc import ABC
from rest_framework import serializers
from shopping_cars.models import Order, ShoppingCart
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = '__all__'
class OrderProductSerializer(serializers.ModelSerializer):
class Meta:
model = ShoppingCart
fields = '__all__'
# ways to validate
# #1
def validate_quantity(self, value):
if value <= 0:
raise serializers.ValidationError(
"Please, enter a positive quantity")
return value
def validate_total_price_product(self, value):
if value <= 0:
raise serializers.ValidationError(
"Please, enter a positive total price")
return value
# #2
def validate(self, data):
if data['quantity'] <= 0 and data['total_price_product'] <= 0:
raise serializers.ValidationError(
"Please, enter a positive value")
return data
|
6,530 | 02d4e1ddb0b4cf75c9902e13263c5a80417de01b | from tkinter import *
from tkinter import messagebox as mb
from tkinter.scrolledtext import ScrolledText
from tkinter import filedialog as fd
from child_window import ChildWindow
# from PIL import Image as PilImage
# from PIL import ImageTk, ImageOps
class Window:
def __init__(self, width, height, title="MyWindow", resizable=(False, False), icon=r"resources/feather.ico"):
self.root = Tk()
self.root.title(title)
# self.root.geometry(f"{width}x{height}+200+200")
self.root.geometry("+600+300")
# self.root.resizable(resizable[0], resizable[1])
if icon:
self.root.iconbitmap(icon)
self.text = ScrolledText(self.root)
def run(self):
self.draw_widgets()
self.root.mainloop()
def draw_widgets(self):
self.draw_menu()
self.text.pack()
def draw_menu(self):
menu_bar = Menu(self.root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="Открыть", command=self.open_file)
file_menu.add_command(label="Сохранить как", command=self.save_file)
file_menu.add_command(label="Отркыть папку", command=self.open_dir)
file_menu.add_separator()
file_menu.add_command(label="Выйти", command=self.exit)
info_menu = Menu(menu_bar, tearoff=0)
info_menu.add_command(label="О приложении", command=self.show_info)
menu_bar.add_cascade(label="Файл", menu=file_menu)
menu_bar.add_cascade(label="Справка", menu=info_menu)
self.root.configure(menu=menu_bar)
def open_file(self):
# wanted_files = (
# ("IMAGES", "*.jpeg;*.png;*.gif"),
# ("TEXT files", "*.txt;*.log"),
# ("PY files", "*.py"),
# ("ALL", "*.*")
# )
#
# file_name = fd.askopenfilename(initialdir="D:/", title="FIND A FILE", filetypes=wanted_files)
# self.text.insert(END, f"Надо открыть файл: {file_name}\nСодержимое:\n")
# if file_name:
# with open(file_name, "r") as f:
# self.text.insert(END, f.read())
# file = fd.askopenfile()
# self.text.insert(END, file.read())
# file.close()
file_names = fd.askopenfilenames()
self.text.insert(END, str(file_names))
def save_file(self):
name = fd.asksaveasfilename(filetypes=(("TEXT files", "*.txt"), ("Py files", "*.py")))
if name:
self.text.insert(END, f"Сохранить файл по пути {name}\n")
# with open(name, "w") as f:
# f.write("123")
# file = fd.asksaveasfile()
# file.write("123")
# file.close()
def open_dir(self):
path = fd.askdirectory(mustexist=True)
self.text.insert(END, f"Папка {path}\n")
def show_info(self):
mb.showinfo("Информация", "Лучшее графическое приложение на свете")
def exit(self):
choice = mb.askyesno("Quit", "Do you want to quit?")
if choice:
self.root.destroy()
def create_child(self, width, height, title="Child", resizable=(False, False), icon=None):
ChildWindow(self.root, width, height, title, resizable, icon)
if __name__ == "__main__":
window = Window(500, 500, "TKINTER")
# window.create_child(200, 100)
window.run()
|
6,531 | 76fbe055b53af9321cc0d57a210cfffe9188f800 | #
# PySNMP MIB module CISCO-LWAPP-CLIENT-ROAMING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-LWAPP-CLIENT-ROAMING-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:04:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
cLApDot11IfSlotId, cLApSysMacAddress = mibBuilder.importSymbols("CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId", "cLApSysMacAddress")
CLDot11RfParamMode, CLDot11Channel = mibBuilder.importSymbols("CISCO-LWAPP-TC-MIB", "CLDot11RfParamMode", "CLDot11Channel")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Integer32, IpAddress, MibIdentifier, NotificationType, TimeTicks, Bits, ObjectIdentity, Counter64, ModuleIdentity, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "IpAddress", "MibIdentifier", "NotificationType", "TimeTicks", "Bits", "ObjectIdentity", "Counter64", "ModuleIdentity", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Unsigned32")
DisplayString, MacAddress, TextualConvention, TimeInterval = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "MacAddress", "TextualConvention", "TimeInterval")
ciscoLwappClRoamMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 523))
ciscoLwappClRoamMIB.setRevisions(('2010-01-29 00:00', '2006-04-11 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setRevisionsDescriptions(('Deprecated following attributes:- clcrDot11aMinRssi, clcrDot11aHysteresis, clcrDot11aAdaptiveScanThreshold, clcrDot11aTransitionTime, clcrDot11bMinRssi, clcrDot11bHysteresis, clcrDot11bAdaptiveScanThreshold, clcrDot11bTransitionTime. clcrMIBCompliance, ciscoLwappClRoamDot11aRfParamsGroup, ciscoLwappClRoamDot11bRfParamsGroup Added following attributes:- clcrDot11aMinRssiV2, clcrDot11aHysteresisV2, clcrDot11aAdaptiveScanThresholdV2, clcrDot11aTransitionTimeV2, clcrDot11bMinRssiV2, clcrDot11bHysteresisV2, clcrDot11bAdaptiveScanThresholdV2, clcrDot11bTransitionTimeV2. clcrMIBComplianceRev1, ciscoLwappClRoamDot11aRfParamsGroupSup1, ciscoLwappClRoamDot11bRfParamsGroupSup1', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setLastUpdated('201001290000Z')
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setContactInfo('Cisco Systems, Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS Email: cs-wnbu-snmp@cisco.com')
if mibBuilder.loadTexts: ciscoLwappClRoamMIB.setDescription("This MIB is intended to be implemented on all those devices operating as Central controllers, that terminate the Light Weight Access Point Protocol tunnel from Cisco Light-weight LWAPP Access Points. Information provided by this MIB is for CCX related features as specified in the CCX specifications. This MIB covers roaming RF parameters for CCX clients. The relationship between CC and the LWAPP APs can be depicted as follows: +......+ +......+ +......+ + + + + + + + CC + + CC + + CC + + + + + + + +......+ +......+ +......+ .. . . .. . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + AP + + AP + + AP + + AP + + + + + + + + + +......+ +......+ +......+ +......+ . . . . . . . . . . . . . . . . . . . +......+ +......+ +......+ +......+ + + + + + + + + + MN + + MN + + MN + + MN + + + + + + + + + +......+ +......+ +......+ +......+ The LWAPP tunnel exists between the controller and the APs. The MNs communicate with the APs through the protocol defined by the 802.11 standard. LWAPP APs, upon bootup, discover and join one of the controllers and the controller pushes the configuration, that includes the WLAN parameters, to the LWAPP APs. The APs then encapsulate all the 802.11 frames from wireless clients inside LWAPP frames and forward the LWAPP frames to the controller. GLOSSARY Access Point ( AP ) An entity that contains an 802.11 medium access control ( MAC ) and physical layer ( PHY ) interface and provides access to the distribution services via the wireless medium for associated clients. LWAPP APs encapsulate all the 802.11 frames in LWAPP frames and sends them to the controller to which it is logically connected. Basic Service Set ( BSS ) The IEEE 802.11 BSS of an AP comprises of the stations directly associating with the AP. Central Controller ( CC ) The central entity that terminates the LWAPP protocol tunnel from the LWAPP APs. Throughout this MIB, this entity is also referred to as 'controller'. Cisco Compatible eXtensions (CCX) Wireless LAN Access Points (APs) manufactured by Cisco Systems have features and capabilities beyond those in related standards (e.g., IEEE 802.11 suite of standards ,Wi-Fi recommendations by WECA, 802.1X security suite,etc). A number of features provide higher performance.For example, Cisco AP transmits a specific Information Element, which the clients adapt to for enhanced performance. Similarly, a number of features are implemented by means of proprietary Information Elements, which Cisco clients use in specific ways to carry out tasks above and beyond the standard. Other examples of feature categories are roaming and power saving. Client Roaming A client may decide to reassociate with another AP for reasons of its own choosing. The decision of whether or not to use the information contained in the AP list is up to the discretion of the implementor, as long as the roam time requirement is met. Light Weight Access Point Protocol ( LWAPP ) This is a generic protocol that defines the communication between the Access Points and the Central Controller. Mobile Node ( MN ) A roaming 802.11 wireless device in a wireless network associated with an access point. Mobile Node and client are used interchangeably. REFERENCE [1] Wireless LAN Medium Access Control ( MAC ) and Physical Layer ( PHY ) Specifications [2] Draft-obara-capwap-lwapp-00.txt, IETF Light Weight Access Point Protocol")
ciscoLwappClRoamMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 0))
ciscoLwappClRoamMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1))
ciscoLwappClRoamMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2))
clcrRoamDot11aRfParamConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1))
clcrRoamDot11bRfParamConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2))
clcrRoamReasonReport = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3))
clcrRoamDot11Stats = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4))
clcrDot11aMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 1), CLDot11RfParamMode().clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aMode.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aMode.setDescription('This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11a networks.')
clcrDot11aMinRssi = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -80)).clone(-85)).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aMinRssi.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11aMinRssi.setDescription("This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11aMinRssiV2")
clcrDot11aHysteresis = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4)).clone(2)).setUnits('dB').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aHysteresis.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11aHysteresis.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11aHysteresisV2')
clcrDot11aAdaptiveScanThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-77, -70)).clone(-72)).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aAdaptiveScanThreshold.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11aAdaptiveScanThreshold.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime. This object is superceded by clcrDot11aAdaptiveScanThresholdV2')
clcrDot11aTransitionTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 5), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(100, 10000)).clone(500)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aTransitionTime.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11aTransitionTime.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client?s associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11aTransitionTimeV2')
clcrDot11aMinRssiV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aMinRssiV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aMinRssiV2.setDescription("This object indicates the Minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.")
clcrDot11aHysteresisV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits('dB').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aHysteresisV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aHysteresisV2.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.')
clcrDot11aAdaptiveScanThresholdV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aAdaptiveScanThresholdV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aAdaptiveScanThresholdV2.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11aTransitionTime.')
clcrDot11aTransitionTimeV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 1, 9), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11aTransitionTimeV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11aTransitionTimeV2.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the clients associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.')
clcrDot11bMode = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 1), CLDot11RfParamMode().clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bMode.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bMode.setDescription('This object represents how the controller chooses the values of the RF parameters needed to manage roaming in 802.11b/g networks.')
clcrDot11bMinRssi = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-90, -80)).clone(-85)).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bMinRssi.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11bMinRssi.setDescription("This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal. This object is superceded by clcrDot11bMinRssiV2")
clcrDot11bHysteresis = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4)).clone(2)).setUnits('dB').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bHysteresis.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11bHysteresis.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs. This object is superceded by clcrDot11bHysteresisV2')
clcrDot11bAdaptiveScanThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-77, -70)).clone(-72)).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bAdaptiveScanThreshold.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11bAdaptiveScanThreshold.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime. This object is superceded by clcrDot11bAdaptiveScanThresholdV2')
clcrDot11bTransitionTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 5), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(100, 10000)).clone(500)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bTransitionTime.setStatus('deprecated')
if mibBuilder.loadTexts: clcrDot11bTransitionTime.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second. This object is superceded by clcrDot11bTransitionTimeV2')
clcrDot11bMinRssiV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bMinRssiV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bMinRssiV2.setDescription("This object indicates the minimum Received Signal Strength Indication (RSSI) in dBm required to associate with the AP. It also defines the edge of coverage for the BSS. If the client's average received signal power dips below this threshold, clients must have roamed to another AP with a stronger signal.")
clcrDot11bHysteresisV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits('dB').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bHysteresisV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bHysteresisV2.setDescription('This object indicates how much stronger the signal strength (dB) of a neighbor AP must be, in order for the client to roam to it. The use of roaming hysteresis is intended to reduce the amount of clients roaming back and forth between BSSs if the client is physically located on or near the border between two BSSs.')
clcrDot11bAdaptiveScanThresholdV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-255, 255))).setUnits('dBm').setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bAdaptiveScanThresholdV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bAdaptiveScanThresholdV2.setDescription('This object configures the threshold for the strength of the signals received(RSSI) from an AP, as seen by an associated client, below which the client must be able to roam to a neighbor AP within the specified Transition Time configured through clcrDot11bTransitionTime.')
clcrDot11bTransitionTimeV2 = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 2, 9), TimeInterval().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clcrDot11bTransitionTimeV2.setStatus('current')
if mibBuilder.loadTexts: clcrDot11bTransitionTimeV2.setDescription('This object configures the maximum time duration permitted for the client to detect a suitable neighbor AP to roam to and to complete the roam, whenever the RSSI from the client is associated AP is below the adaptive scan threshold configured through clcrDot11aAdaptiveScanThreshold. The time is expressed in 100th of a second.')
clcrRoamReasonReportTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1), )
if mibBuilder.loadTexts: clcrRoamReasonReportTable.setStatus('current')
if mibBuilder.loadTexts: clcrRoamReasonReportTable.setDescription('This table provides the reasons for CCX clients roaming from one AP to another. When a CCX client associates to an AP, it will always send an IAPP information packet to the new AP listing the characteristics of the previous AP. An entry is added to this table when a roam reason report is sent by a CCX client when it roams to a new AP.')
clcrRoamReasonReportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamClientMacAddress"), (0, "CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamClientTimeStamp"))
if mibBuilder.loadTexts: clcrRoamReasonReportEntry.setStatus('current')
if mibBuilder.loadTexts: clcrRoamReasonReportEntry.setDescription('Each entry corresponds to the roam reason report sent by a CCX client to the new AP to which client associates.')
clcrRoamClientMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 1), MacAddress())
if mibBuilder.loadTexts: clcrRoamClientMacAddress.setStatus('current')
if mibBuilder.loadTexts: clcrRoamClientMacAddress.setDescription('This object indicates the mac address of the client which has roamed to a new AP.')
clcrRoamClientTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 2), TimeTicks())
if mibBuilder.loadTexts: clcrRoamClientTimeStamp.setStatus('current')
if mibBuilder.loadTexts: clcrRoamClientTimeStamp.setDescription("This object indicates the time instance at which this report was received by the new AP, to which client roamed to. This represents number of seconds elapsed since 00:00:00 on January 1, 1970, Coordinated Universal Time (UTC). So a value of '1131362704' means 'Mon Nov 7 16:55:04 2005'.")
clcrRoamNewApMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamNewApMacAddress.setStatus('current')
if mibBuilder.loadTexts: clcrRoamNewApMacAddress.setDescription('This object indicates the mac address of the current AP to which client has roamed to. This AP receives the roam reason report.')
clcrRoamPrevApMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 4), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamPrevApMacAddress.setStatus('current')
if mibBuilder.loadTexts: clcrRoamPrevApMacAddress.setDescription('This object indicates the mac address of the previous AP to which client was associated.')
clcrRoamPrevApChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 5), CLDot11Channel()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamPrevApChannel.setStatus('current')
if mibBuilder.loadTexts: clcrRoamPrevApChannel.setDescription('This object indicates the channel number at which the client was associated to the previous AP.')
clcrRoamPrevApSsid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamPrevApSsid.setStatus('current')
if mibBuilder.loadTexts: clcrRoamPrevApSsid.setDescription('This object indicates the SSID at which the client was associated to the previous AP.')
clcrRoamDisassocTimeInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 7), TimeInterval()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamDisassocTimeInterval.setStatus('current')
if mibBuilder.loadTexts: clcrRoamDisassocTimeInterval.setDescription('This object indicates the time elapsed since the client disassociated, in hundredth of a second.')
clcrRoamReason = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("clcrUnspecified", 0), ("clcrPoorLink", 1), ("clcrLoadBalancing", 2), ("clcrInsufficientCapacity", 3), ("clcrDirectedRoam", 4), ("clcrFirstAssociation", 5), ("clcrRoamingIn", 6), ("clcrRoamingOut", 7), ("clcrBetterAp", 8), ("clcrDisassociated", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrRoamReason.setStatus('current')
if mibBuilder.loadTexts: clcrRoamReason.setDescription("This object indicates the reason for a client to roam to a new AP. The semantics are as follows. clcrUnspecified - The reason is not known or can't be found. clcrPoorLink - Normal roam due to poor link (excessive retries, too much interference, RSSI too low, etc.) clcrLoadBalancing - Normal roam due to load balancing clcrInsufficientCapacity - Roaming occured due to the insufficient capacity on the previous AP (TSPEC rejected) clcrDirectedRoam - Roaming is directed by the 802.11 wireless Infrastructure clcrFirstAssociation - This is the first association to a particular WLAN clcrRoamingIn - Roaming in from cellular or other WAN clcrRoamingOut - Roaming out to cellular or other WAN clcrBetterAp - Normal roam due to better AP found clcrDisassociated - Deauthenticated or Disassociated from the previous AP.")
clcrDot11StatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1), )
if mibBuilder.loadTexts: clcrDot11StatsTable.setStatus('current')
if mibBuilder.loadTexts: clcrDot11StatsTable.setDescription('This table populates the statistics collected when the client roamed in the WLAN. There exists a row in this table for each conceptual row in cLApDot11IfTable that represents a dot11 interface of an AP.')
clcrDot11StatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-AP-MIB", "cLApSysMacAddress"), (0, "CISCO-LWAPP-AP-MIB", "cLApDot11IfSlotId"))
if mibBuilder.loadTexts: clcrDot11StatsEntry.setStatus('current')
if mibBuilder.loadTexts: clcrDot11StatsEntry.setDescription('Each entry represents a conceptual row in clcrDot11StatsTable and corresponds to the roam reason report sent by a CCX client to the new AP which the client associates to.')
clcrDot11NeighborRequestRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrDot11NeighborRequestRx.setStatus('current')
if mibBuilder.loadTexts: clcrDot11NeighborRequestRx.setDescription('This object indicates the count of the number of requests received from an E2E client for neighbor updates.')
clcrDot11NeighborReplySent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrDot11NeighborReplySent.setStatus('current')
if mibBuilder.loadTexts: clcrDot11NeighborReplySent.setDescription('This object indicates the count of the number of replies sent to the client in reply to the request for neighbor updates received from the client.')
clcrDot11RoamReasonReportRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrDot11RoamReasonReportRx.setStatus('current')
if mibBuilder.loadTexts: clcrDot11RoamReasonReportRx.setDescription('This object reports the count of the number of roam reason reports received from CCX clients.')
clcrDot11BcastUpdatesSent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 523, 1, 4, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: clcrDot11BcastUpdatesSent.setStatus('current')
if mibBuilder.loadTexts: clcrDot11BcastUpdatesSent.setDescription('This object indicates the count of the number of broadcast neighbor updates sent by an AP.')
ciscoLwappClRoamMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1))
ciscoLwappClRoamMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2))
clcrMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1, 1)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamDot11aRfParamsGroup"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamDot11bRfParamsGroup"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamroamReasonGroup"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamroamingStatsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clcrMIBCompliance = clcrMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: clcrMIBCompliance.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.')
clcrMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 1, 2)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamDot11aRfParamsGroupSup1"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamDot11bRfParamsGroupSup1"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamroamReasonGroup"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "ciscoLwappClRoamroamingStatsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clcrMIBComplianceRev1 = clcrMIBComplianceRev1.setStatus('current')
if mibBuilder.loadTexts: clcrMIBComplianceRev1.setDescription('The compliance statement for the SNMP entities that implement the ciscoLwappRoamMIB module.')
ciscoLwappClRoamDot11aRfParamsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 1)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aMode"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aMinRssi"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aHysteresis"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aAdaptiveScanThreshold"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aTransitionTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamDot11aRfParamsGroup = ciscoLwappClRoamDot11aRfParamsGroup.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappClRoamDot11aRfParamsGroup.setDescription('This collection of objects represent the radio parameters for the 802.11a networks.')
ciscoLwappClRoamDot11bRfParamsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 2)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bMode"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bMinRssi"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bHysteresis"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bAdaptiveScanThreshold"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bTransitionTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamDot11bRfParamsGroup = ciscoLwappClRoamDot11bRfParamsGroup.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoLwappClRoamDot11bRfParamsGroup.setDescription('This collection of objects represent the radio parameters for the 802.11b/g bands.')
ciscoLwappClRoamroamReasonGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 3)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamNewApMacAddress"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamPrevApMacAddress"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamPrevApChannel"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamPrevApSsid"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamDisassocTimeInterval"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrRoamReason"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamroamReasonGroup = ciscoLwappClRoamroamReasonGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappClRoamroamReasonGroup.setDescription('This collection of objects provide the reasons for clients roaming between APs.')
ciscoLwappClRoamroamingStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 4)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11NeighborRequestRx"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11NeighborReplySent"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11RoamReasonReportRx"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11BcastUpdatesSent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamroamingStatsGroup = ciscoLwappClRoamroamingStatsGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappClRoamroamingStatsGroup.setDescription('This collection of objects provide the counters related to roaming.')
ciscoLwappClRoamDot11aRfParamsGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 5)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aMode"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aMinRssiV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aHysteresisV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aAdaptiveScanThresholdV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11aTransitionTimeV2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamDot11aRfParamsGroupSup1 = ciscoLwappClRoamDot11aRfParamsGroupSup1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappClRoamDot11aRfParamsGroupSup1.setDescription('This collection of objects represent the radio parameters for the 802.11a networks.')
ciscoLwappClRoamDot11bRfParamsGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 523, 2, 2, 6)).setObjects(("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bMode"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bMinRssiV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bHysteresisV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bAdaptiveScanThresholdV2"), ("CISCO-LWAPP-CLIENT-ROAMING-MIB", "clcrDot11bTransitionTimeV2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappClRoamDot11bRfParamsGroupSup1 = ciscoLwappClRoamDot11bRfParamsGroupSup1.setStatus('current')
if mibBuilder.loadTexts: ciscoLwappClRoamDot11bRfParamsGroupSup1.setDescription('This collection of objects represent the radio parameters for the 802.11b/g bands.')
mibBuilder.exportSymbols("CISCO-LWAPP-CLIENT-ROAMING-MIB", clcrDot11aMinRssi=clcrDot11aMinRssi, clcrRoamClientMacAddress=clcrRoamClientMacAddress, ciscoLwappClRoamroamingStatsGroup=ciscoLwappClRoamroamingStatsGroup, clcrDot11bTransitionTimeV2=clcrDot11bTransitionTimeV2, clcrRoamNewApMacAddress=clcrRoamNewApMacAddress, clcrMIBCompliance=clcrMIBCompliance, clcrRoamDot11aRfParamConfig=clcrRoamDot11aRfParamConfig, clcrDot11BcastUpdatesSent=clcrDot11BcastUpdatesSent, clcrRoamPrevApSsid=clcrRoamPrevApSsid, clcrMIBComplianceRev1=clcrMIBComplianceRev1, clcrDot11bHysteresisV2=clcrDot11bHysteresisV2, ciscoLwappClRoamMIBConform=ciscoLwappClRoamMIBConform, clcrDot11aTransitionTime=clcrDot11aTransitionTime, clcrDot11aHysteresis=clcrDot11aHysteresis, ciscoLwappClRoamDot11bRfParamsGroupSup1=ciscoLwappClRoamDot11bRfParamsGroupSup1, PYSNMP_MODULE_ID=ciscoLwappClRoamMIB, clcrDot11bHysteresis=clcrDot11bHysteresis, clcrDot11StatsEntry=clcrDot11StatsEntry, clcrRoamDisassocTimeInterval=clcrRoamDisassocTimeInterval, ciscoLwappClRoamDot11aRfParamsGroupSup1=ciscoLwappClRoamDot11aRfParamsGroupSup1, clcrDot11bAdaptiveScanThreshold=clcrDot11bAdaptiveScanThreshold, clcrDot11NeighborRequestRx=clcrDot11NeighborRequestRx, clcrRoamClientTimeStamp=clcrRoamClientTimeStamp, clcrRoamReason=clcrRoamReason, clcrDot11bMode=clcrDot11bMode, clcrDot11aAdaptiveScanThreshold=clcrDot11aAdaptiveScanThreshold, clcrDot11RoamReasonReportRx=clcrDot11RoamReasonReportRx, clcrDot11bAdaptiveScanThresholdV2=clcrDot11bAdaptiveScanThresholdV2, ciscoLwappClRoamDot11bRfParamsGroup=ciscoLwappClRoamDot11bRfParamsGroup, ciscoLwappClRoamMIBNotifs=ciscoLwappClRoamMIBNotifs, clcrRoamReasonReportTable=clcrRoamReasonReportTable, clcrDot11aMinRssiV2=clcrDot11aMinRssiV2, ciscoLwappClRoamMIBObjects=ciscoLwappClRoamMIBObjects, clcrDot11NeighborReplySent=clcrDot11NeighborReplySent, clcrDot11aAdaptiveScanThresholdV2=clcrDot11aAdaptiveScanThresholdV2, ciscoLwappClRoamroamReasonGroup=ciscoLwappClRoamroamReasonGroup, clcrDot11StatsTable=clcrDot11StatsTable, clcrRoamDot11Stats=clcrRoamDot11Stats, clcrRoamDot11bRfParamConfig=clcrRoamDot11bRfParamConfig, clcrDot11bMinRssi=clcrDot11bMinRssi, clcrRoamReasonReport=clcrRoamReasonReport, clcrRoamPrevApMacAddress=clcrRoamPrevApMacAddress, ciscoLwappClRoamDot11aRfParamsGroup=ciscoLwappClRoamDot11aRfParamsGroup, clcrRoamReasonReportEntry=clcrRoamReasonReportEntry, ciscoLwappClRoamMIBGroups=ciscoLwappClRoamMIBGroups, clcrDot11bMinRssiV2=clcrDot11bMinRssiV2, ciscoLwappClRoamMIBCompliances=ciscoLwappClRoamMIBCompliances, clcrDot11aMode=clcrDot11aMode, clcrDot11aTransitionTimeV2=clcrDot11aTransitionTimeV2, clcrRoamPrevApChannel=clcrRoamPrevApChannel, clcrDot11bTransitionTime=clcrDot11bTransitionTime, ciscoLwappClRoamMIB=ciscoLwappClRoamMIB, clcrDot11aHysteresisV2=clcrDot11aHysteresisV2)
|
6,532 | 2c6dc4d55f64d7c3c01b3f504a72904451cb4610 | """
2. Schreiben Sie die Anzahl von symmetrischen Paaren (xy) und (yx).
"""
def symetrisch(x, y):
"""
bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind
:param x: ein Element der Liste
:param y: ein Element der Liste
:return: True- wenn x und y symetrisch
False - sonst
"""
if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):
return True
else:
return False
def anz_von_sym(lst):
"""
mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste
:param lst: die Liste
:return: Anzahl der symetrischen Paaren der Liste
"""
anz = 0
for i in range(len(lst) - 1):
for j in range(i, len(lst)):
if symetrisch(lst[i], lst[j]):
anz += 1
print("Anzahl symmetrischer Paaren:", anz)
|
6,533 | 6afcb8f17f7436f0ae9fa3a8c2a195245a9801f1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
class ZoomPanHandler:
"""
Matplotlib callback class to handle pan and zoom events.
"""
def __init__(self, axes, scale_factor=2, mouse_button=2):
"""
Default constructor for the ZoomPanHandler class.
Parameters
axes: matplotlib.backend_bases.Axes
The axes to attach this handler to.
scale_factor: number
The scale factor to apply when zooming.
mouse_button: number or string
The mouse button used to activate the pan action. Default value is
2, meaning the middle mouse button.
"""
self._axes = axes
self._scale_factor = scale_factor
self._mouse_button = mouse_button
self._press_coords = None
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
# Mouse action callback IDs
self._cb_mouse_wheel_id = None
self._cb_mouse_button_id = None
self._cb_mouse_release_id = None
self._cb_mouse_motion_id = None
self._connect_cb()
def __del__(self):
self._disconnect_cb()
self._axes = None
@property
def axes(self):
return self._axes
@property
def scale_factor(self):
return self._scale_factor
@property
def mouse_button(self):
return self._mouse_button
def apply_transforms(self):
"""
Applies the zoom and pan transforms to the axes. Useful after reseting
the plot.
"""
self.axes.set_xlim(self._curr_xlim)
self.axes.set_ylim(self._curr_ylim)
def set_base_transforms(self):
"""
Queries the current axis limits and stores them.
"""
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
# Private methods
def _cb_mouse_wheel(self, event):
if event.inaxes:
curr_xlim = self.axes.get_xlim()
curr_ylim = self.axes.get_ylim()
xdata = event.xdata
ydata = event.ydata
xmin = xdata - curr_xlim[0]
ymin = ydata - curr_ylim[0]
xmax = curr_xlim[1] - xdata
ymax = curr_ylim[1] - ydata
xlim = ylim = []
if event.button == 'up': # zoom-in
xlim = [xdata - xmin / self.scale_factor,
xdata + xmax / self.scale_factor]
ylim = [ydata - ymin / self.scale_factor,
ydata + ymax / self.scale_factor]
elif event.button == 'down': # zoom-out
xlim = [xdata - xmin * self.scale_factor,
xdata + xmax * self.scale_factor]
ylim = [ydata - ymin * self.scale_factor,
ydata + ymax * self.scale_factor]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _cb_mouse_button(self, event):
if not event.inaxes or event.button != self.mouse_button:
return
self._press_coords = (event.xdata, event.ydata)
def _cb_mouse_release(self, event):
self._press_coords = None
self.axes.figure.canvas.draw()
def _cb_mouse_motion(self, event):
if not event.inaxes or not self._press_coords:
return
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xlim -= (event.xdata - self._press_coords[0])
ylim -= (event.ydata - self._press_coords[1])
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _connect_cb(self):
fig = self.axes.figure
self._cb_mouse_wheel_id = fig.canvas.mpl_connect(
'scroll_event', self._cb_mouse_wheel)
self._cb_mouse_button_id = fig.canvas.mpl_connect(
'button_press_event', self._cb_mouse_button)
self._cb_mouse_release_id = fig.canvas.mpl_connect(
'button_release_event', self._cb_mouse_release)
self._cb_mouse_motion_id = fig.canvas.mpl_connect(
'motion_notify_event', self._cb_mouse_motion)
def _disconnect_cb(self):
fig = self.axes.figure
if self._cb_mouse_wheel_id:
fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)
self._cb_mouse_wheel_id = None
if self._cb_mouse_button_id:
fig.canvas.mpl_disconnect(self._cb_mouse_button_id)
self._cb_mouse_button_id = None
if self._cb_mouse_release_id:
fig.canvas.mpl_disconnect(self._cb_mouse_release_id)
self._cb_mouse_release_id = None
if self._cb_mouse_motion_id:
fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)
self._cb_mouse_motion_id = None
def main():
import matplotlib.pyplot as plt
fig = plt.figure()
axes = fig.add_subplot(111)
axes.scatter(x=np.arange(0, 10, 0.5), y=np.arange(
0, 20, 1), color='r', marker='o')
hand = ZoomPanHandler(axes, scale_factor=1.5)
plt.show()
if __name__ == '__main__':
main()
|
6,534 | 3852ff2f3f4ac889256bd5f4e36a86d483857cef | from pyspark.sql import SparkSession, Row, functions, Column
from pyspark.sql.types import *
from pyspark.ml import Pipeline, Estimator
from pyspark.ml.feature import SQLTransformer, VectorAssembler
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder
from pyspark.ml.regression import (LinearRegression,
GBTRegressor,
RandomForestRegressor,
DecisionTreeRegressor)
import sys
from weather_tools_mv import *
schema = StructType([
StructField('station', StringType(), False),
StructField('date', DateType(), False),
# StructField('dayofyear', IntegerType(), False),
StructField('latitude', FloatType(), False),
StructField('longitude', FloatType(), False),
StructField('elevation', FloatType(), False),
StructField('tmax', FloatType(), False),
])
def get_data(inputloc, tablename='data'):
data = spark.read.csv(inputloc, schema=schema)
data.createOrReplaceTempView(tablename)
return data
input_loc = 'tmax-2'
data = get_data(input_loc)
#Part 2a
# years = list(map(lambda x: str(x), range(2000, 2018)))
years = ['2000', '2001', '2002', '2003']
reduced_data = dict()
def resolved_max(df):
df_max = df.groupBy('station').agg({'date': 'max'}).select(functions.col('station'),
functions.col('max(date)').alias('d_max'))
d_max = df.join(df_max, 'station').where(functions.col('d_max') == functions.col('date'))
fin_ret = d_max.select(functions.col('latitude'),
functions.col('longitude'),
functions.col('tmax'),
functions.col('station'))
return list(map(lambda row: row.asDict(), fin_ret.collect()))
for i in range(0, len(years) - 1):
lower = years[i]
upper = years[i+1]
zone = data.filter(functions.col('date') < upper).filter(functions.col('date') >= lower)
reduced_data[lower+"_"+upper] = resolved_max(zone)
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
plt.figure(figsize=(16,12))
eq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,
lat_0=0, lon_0=0)
# eq_map.drawcoastlines()
# eq_map.drawcountries()
eq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)
eq_map.drawmapboundary(fill_color='#3b3b3b')
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
lat = []
lon = []
val = []
for y in reduced_data['2000_2001']:
lon.append(y['longitude'])
lat.append(y['latitude'])
val.append(y['tmax'])
x, y = eq_map(lon, lat)
cs = eq_map.scatter(x, y, c=val, marker="o", cmap=cm.bwr)
# add colorbar.
cbar = eq_map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('Max Temperature (in Celcius)')
plt.title('Year 2000')
plt.savefig('2a_2000.png')
plt.figure(figsize=(16,12))
eq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,
lat_0=0, lon_0=0)
# eq_map.drawcoastlines()
# eq_map.drawcountries()
eq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)
eq_map.drawmapboundary(fill_color='#3b3b3b')
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
lat = []
lon = []
val = []
for y in reduced_data['2001_2002']:
lon.append(y['longitude'])
lat.append(y['latitude'])
val.append(y['tmax'])
x, y = eq_map(lon, lat)
cs = eq_map.scatter(x, y, c=val, marker="o", cmap=cm.coolwarm)
# add colorbar.
cbar = eq_map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('Max Temperature (in Celcius)')
plt.title('Year 2001')
plt.savefig('2a_2001.png')
# Part 2b
def make_weather_trainers(trainRatio,
estimator_gridbuilders,
metricName=None):
"""Construct a list of TrainValidationSplit estimators for weather data
where `estimator_gridbuilders` is a list of (Estimator, ParamGridBuilder) tuples
and 0 < `trainRatio` <= 1 determines the fraction of rows used for training.
The RegressionEvaluator will use a non-default `metricName`, if specified.
"""
feature_cols = ['latitude', 'longitude', 'elevation']
column_names = dict(featuresCol="features",
labelCol="tmax",
predictionCol="tmax_pred")
feature_assembler = VectorAssembler(
inputCols=feature_cols,
outputCol=column_names["featuresCol"])
ev = (RegressionEvaluator()
.setLabelCol(column_names["labelCol"])
.setPredictionCol(column_names["predictionCol"])
)
if metricName:
ev = ev.setMetricName(metricName)
tvs_list = []
for est, pgb in estimator_gridbuilders:
est = est.setParams(**column_names)
pl = Pipeline(stages=[feature_assembler, est])
paramGrid = pgb.build()
tvs_list.append(TrainValidationSplit(estimator=pl,
estimatorParamMaps=paramGrid,
evaluator=ev,
trainRatio=trainRatio))
return tvs_list
def get_best_weather_model(data):
train, test = data.randomSplit([0.75, 0.25])
train = train.cache()
test = test.cache()
# e.g., use print(LinearRegression().explainParams()) to see what can be tuned
estimator_gridbuilders = [
estimator_gridbuilder(
LinearRegression(),
dict(regParam=[0.3, 0.6],
elasticNetParam=[0, 0.5],
maxIter=[10, 20]
)),
estimator_gridbuilder(
GBTRegressor(),
dict(lossType=["squared"],
maxDepth=[5, 10],
maxIter=[2, 5],
stepSize=[0.1]
)),
estimator_gridbuilder(
RandomForestRegressor(),
dict(numTrees=[5, 10],
maxDepth=[5, 15],
featureSubsetStrategy=["auto"]
))
]
metricName = 'r2'
tvs_list = make_weather_trainers(.2, # fraction of data for training
estimator_gridbuilders,
metricName)
ev = tvs_list[0].getEvaluator()
scorescale = 1 if ev.isLargerBetter() else -1
model_name_scores = []
for tvs in tvs_list:
model = tvs.fit(train)
test_pred = model.transform(test)
score = ev.evaluate(test_pred) * scorescale
model_name_scores.append((model, get_estimator_name(tvs.getEstimator()), score))
best_model, best_name, best_score = max(model_name_scores, key=lambda triplet: triplet[2])
print("\n\nBest model is %s with validation data %s score %f" % (best_name, ev.getMetricName(), best_score*scorescale))
return best_model
fortrain, holdout = data.randomSplit([0.75, 0.25])
model = get_best_weather_model(fortrain)
print("\n\n\nBest parameters on test data:\n", get_best_tvs_model_params(model))
# Part 2b1
import elevation_grid as eg
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
import numpy as np
lat_range = range(-90, 90, 1)
lon_range = range(-180, 180, 1)
combo = []
for lat in lat_range:
for lon in lon_range:
elev = eg.get_elevation(lat, lon)
combo.append((lat, lon, float(elev)))
dataset = spark.createDataFrame(combo,["latitude", "longitude", "elevation"])
pred = model.transform(dataset).collect()
collected_predictions = list(map(lambda row: row.asDict(), pred))
plt.figure(figsize=(16,12))
eq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,
lat_0=0, lon_0=0)
# eq_map.drawcoastlines()
# eq_map.drawcountries()
eq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)
eq_map.drawmapboundary(fill_color='#3b3b3b')
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
lon = []
lat = []
val = []
for y in collected_predictions:
lon.append(y['longitude'])
lat.append(y['latitude'])
val.append(y['tmax_pred'])
x, y = eq_map(lon, lat)
cs = eq_map.scatter(x, y, c=val, marker="o", cmap=cm.coolwarm)
cbar = eq_map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('Max Temperature (in Celcius)')
plt.title('Predicted Heat Map')
plt.savefig('2b1_heat.png')
# Part 2b2
pred = model.transform(holdout).collect()
collected_predictions = list(map(lambda row: row.asDict(), pred))
plt.figure(figsize=(16,12))
eq_map = Basemap(projection='cyl', resolution = 'l', area_thresh = 1000.0,
lat_0=0, lon_0=0)
# eq_map.drawcoastlines()
# eq_map.drawcountries()
eq_map.fillcontinents(color = '#202020', lake_color='#3b3b3b', zorder=0.5)
eq_map.drawmapboundary(fill_color='#3b3b3b')
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
lon = []
lat = []
val = []
for y in collected_predictions:
lon.append(y['longitude'])
lat.append(y['latitude'])
val.append(abs(y['tmax_pred'] - y['tmax']))
x, y = eq_map(lon, lat)
cs = eq_map.scatter(x, y, c=val, marker="o", cmap=cm.Reds)
cbar = eq_map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('Absolute Temperature Difference (in Celcius)')
plt.title('Regression Error Map')
plt.savefig('2b2_regression_error.png') |
6,535 | d292de887c427e3a1b95d13cef17de1804f8f9ee | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
#led = 21
pins = [21, 25, 18]
# 0 1 2 3 4
names = ["First", "Second", "Third"]
for x in range(len(pins)):
GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)
#GPIO.setup(led, GPIO.OUT)
while True:
input_state = 0
for i in range(len(pins)):
input_state = GPIO.input(pins[i])
if input_state == False:
print('Button {0} Pressed'.format(names[i]))
time.sleep(0.2)
# if (i == 0):
# print("TURN ON LED")
# GPIO.output(led, 1)
# if (i == 1):
# print("TURN OFF LED")
# GPIO.output(led, 0)
|
6,536 | 21ef8103a5880a07d8c681b2367c2beef727260f | import random
def take_second(element):
return element[1]
import string
def get_random_name():
name = ""
for i in range(random.randint(5, 15)):
name += random.choice(string.ascii_letters)
return name
imenik = [(777, "zejneba"), (324, "fahro"), (23, "fatih"), (2334, "muamer"), (435, "kerim"),(4568,"zzzzzzz")]
print(sorted(imenik,key=take_second))
for i in range(100000):
novi_element = (random.randint(1, 10000), get_random_name())
imenik.append(novi_element)
imenik.sort(key=take_second)
print(imenik)
name = input('enter a name: ')
min_index = 0
max_index = len(imenik)
previous_guess_name = ""
counter = 0
while True:
mid_index = (max_index + min_index) // 2
guess_score = imenik[mid_index][0]
guess_name = imenik[mid_index][1]
if guess_name == previous_guess_name:
print("Not found")
break
if guess_name == name:
print("your score is", guess_score)
break
elif name > guess_name:
min_index = mid_index
else:
max_index = mid_index
previous_guess_name = guess_name
counter += 1
print("Number of comparisons", counter)
print("after")
found = False
counter = 0
for i in range(len(imenik)):
counter += 1
if imenik[i][1] == name:
print("your score is", guess_score)
found = True
break
if not found:
print("Not found")
print("Number of comparisons after", counter)
|
6,537 | 93909ab98f1141940e64e079e09834ae5ad3995f | import requests
import time
import csv
import os
import pandas as pd
col_list1 = ["cardtype","username_opensea", "address", "username_game"]
df1 = pd.read_csv("profiles.csv", usecols=col_list1)
#
for j in range(0,len(df1) ): #usernames in opensea
print(j)
user=[]
proto=[]
purity=[]
card_name=[]
card_effect=[]
god=[]
rarity=[]
mana=[]
type=[]
set=[]
print(df1['address'][j])
url1 = "https://api.godsunchained.com/v0/card?user="+df1['address'][j]+"&perPage=150000"
print (url1)
response = requests.request("GET", url1)
data = response.json()
number_cards=data['total']
if number_cards!=0:
for i in range(0, number_cards):
user.append(data['records'][i]['user'])
proto.append(data['records'][i]['proto'])
url2 = "https://api.godsunchained.com/v0/proto/" + str(proto[i])
purity.append(data['records'][i]['purity'])
# response2 = requests.request("GET", url2)
# data2 = response2.json()
# if data2['name']!=None:
# card_name.append(data2['name'])
# card_effect.append(data2['effect'])
# god.append(data2['god'])
# rarity.append(data2['rarity'])
# mana.append(data2['god'])
# type.append(data2['type'])
# set.append(data2['set'])
# else:
# card_name.append(None)
# card_effect.append(None)
# god.append(None)
# rarity.append(None)
# mana.append(None)
# type.append(None)
# set.append(None)
dict={
'user': user,
'proto_number': proto,
# 'card_name':card_name,
'purity': purity,
# 'card_effect': card_effect,
# 'god':god,
# 'rarity':rarity,
# 'mana': mana,
# 'type': type,
# 'set': set
}
df = pd.DataFrame(dict)
path = 'C:\\Users\\...'
df.to_csv(os.path.join(path, str(user[0]) + ".csv"), index=False)
|
6,538 | ee57e6a1ccbec93f3def8966f5621ea459f3d228 | from distutils.core import setup
setup(
name='json_config',
version='0.0.01',
packages=['', 'test'],
url='',
license='',
author='craig.ferguson',
author_email='',
description='Simple Functional Config For Changing Environments'
)
|
6,539 | f60d02fb14364fb631d87fcf535b2cb5782e728f | from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import REQ, has_request_variables, webhook_view
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message
from zerver.models import UserProfile
FRESHPING_TOPIC_TEMPLATE_TEST = "Freshping"
FRESHPING_TOPIC_TEMPLATE = "{check_name}"
FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = """
{request_url} has just become unreachable.
Error code: {http_status_code}.
""".strip()
FRESHPING_MESSAGE_TEMPLATE_UP = "{request_url} is back up and no longer unreachable."
@webhook_view("Freshping")
@has_request_variables
def api_freshping_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
body = get_body_for_http_request(payload)
subject = get_subject_for_http_request(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject_for_http_request(payload: Dict[str, Any]) -> str:
webhook_event_data = payload["webhook_event_data"]
if webhook_event_data["application_name"] == "Webhook test":
subject = FRESHPING_TOPIC_TEMPLATE_TEST
else:
subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=webhook_event_data["check_name"])
return subject
def get_body_for_http_request(payload: Dict[str, Any]) -> str:
webhook_event_data = payload["webhook_event_data"]
if webhook_event_data["check_state_name"] == "Reporting Error":
body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**webhook_event_data)
elif webhook_event_data["check_state_name"] == "Available":
if webhook_event_data["application_name"] == "Webhook test":
body = get_setup_webhook_message("Freshping")
else:
body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)
return body
|
6,540 | 40d08bfa3286aa30b612ed83b5e9c7a29e9de809 | # -*- coding: utf-8 -*-
from euler.baseeuler import BaseEuler
from os import path, getcwd
def get_name_score(l, name):
idx = l.index(name) + 1
val = sum([(ord(c) - 64) for c in name])
return idx * val
class Euler(BaseEuler):
def solve(self):
fp = path.join(getcwd(), 'euler/resources/names.txt')
with open(fp, 'r') as f:
names = sorted([name for name
in f.read().replace('"', '').split(',')])
return sum([get_name_score(names, name) for name in names])
@property
def answer(self):
return ('The total of all the name scores in the file is: %d'
% self.solve())
@property
def problem(self):
return '''
Project Euler Problem 22:
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file
containing over five-thousand first names, begin by sorting it into
alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a
name score.
For example, when the list is sorted into alphabetical order, COLIN, which
is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,
COLIN would obtain a score of 938 * 53 = 49714.
What is the total of all the name scores in the file?
'''
|
6,541 | 0069a61127c5968d7014bdf7f8c4441f02e67df0 | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""`kedro_viz.launchers.jupyter` provides line_magic to launch the viz server
from a jupyter notebook.
"""
# pragma: no cover
import logging
import multiprocessing
import socket
from contextlib import closing
from functools import partial
from time import sleep, time
from typing import Any, Callable, Dict
import requests
from IPython.core.display import HTML, display
from kedro_viz.server import run_server
_VIZ_PROCESSES: Dict[str, int] = {}
logger = logging.getLogger(__name__)
class WaitForException(Exception):
"""WaitForException: if func doesn't return expected result within the specified time"""
def _wait_for(
func: Callable,
expected_result: Any = True,
timeout: int = 10,
print_error: bool = True,
sleep_for: int = 1,
**kwargs,
) -> None:
"""
Run specified function until it returns expected result until timeout.
Args:
func (Callable): Specified function
expected_result (Any): result that is expected. Defaults to None.
timeout (int): Time out in seconds. Defaults to 10.
print_error (boolean): whether any exceptions raised should be printed.
Defaults to False.
sleep_for (int): Execute func every specified number of seconds.
Defaults to 1.
**kwargs: Arguments to be passed to func
Raises:
WaitForException: if func doesn't return expected result within the
specified time
"""
end = time() + timeout
while time() <= end:
try:
retval = func(**kwargs)
except Exception as err: # pylint: disable=broad-except
if print_error:
logger.error(err)
else:
if retval == expected_result:
return None
sleep(sleep_for)
raise WaitForException(
f"func: {func}, didn't return {expected_result} within specified timeout: {timeout}"
)
def _check_viz_up(port): # pragma: no cover
url = "http://127.0.0.1:{}/".format(port)
try:
response = requests.get(url)
except requests.ConnectionError:
return False
return response.status_code == 200
def _allocate_port(start_at: int, end_at: int = 65535) -> int:
acceptable_ports = range(start_at, end_at + 1)
viz_ports = _VIZ_PROCESSES.keys() & set(acceptable_ports)
if viz_ports: # reuse one of already allocated ports
return sorted(viz_ports)[0]
socket.setdefaulttimeout(2.0) # seconds
for port in acceptable_ports: # iterate through all acceptable ports
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex(("127.0.0.1", port)) != 0: # port is available
return port
raise ValueError(
"Cannot allocate an open TCP port for Kedro-Viz in a range "
"from {} to {}".format(start_at, end_at)
)
# pylint: disable=unused-argument,missing-type-doc
def run_viz(port: int = None, line=None, local_ns=None) -> None:
"""
Line magic function to start kedro viz. It calls a kedro viz in a process and displays it in
the Jupyter notebook environment.
Args:
port: TCP port that viz will listen to. Defaults to 4141.
line: line required by line magic interface.
local_ns: Local namespace with local variables of the scope where the line magic is invoked.
For more details, please visit:
https://ipython.readthedocs.io/en/stable/config/custommagics.html
"""
port = port or 4141 # Default argument doesn't work in Jupyter line magic.
port = _allocate_port(start_at=port)
if port in _VIZ_PROCESSES and _VIZ_PROCESSES[port].is_alive():
_VIZ_PROCESSES[port].terminate()
if local_ns is not None and "project_path" in local_ns: # pragma: no cover
target = partial(run_server, project_path=local_ns["project_path"])
else:
target = run_server
viz_process = multiprocessing.Process(
target=target, daemon=True, kwargs={"port": port}
)
viz_process.start()
_VIZ_PROCESSES[port] = viz_process
_wait_for(func=_check_viz_up, port=port)
wrapper = """
<html lang="en"><head></head><body style="width:100; height:100;">
<iframe src="http://127.0.0.1:{}/" height=500 width="100%"></iframe>
</body></html>""".format(
port
)
display(HTML(wrapper))
|
6,542 | 4f81eb7218fa1341bd7f025a34ec0677d46151b0 | from setuptools import find_packages, setup
NAME = 'compoelem'
VERSION = "0.1.1"
setup(
name=NAME,
packages=['compoelem', 'compoelem.generate', 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect', 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],
include_package_data=True,
version=VERSION,
description='Library for generating and comparing compositional elements from art historic images.',
author='Tilman Marquart',
license='MIT',
python_requires='>=3.8',
install_requires=['opencv-python','numpy','typing','shapely','pyyaml','torch','torchvision','yacs','scikit-image', 'pandas'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests',
) |
6,543 | b090e92fe62d9261c116529ea7f480daf8b3e84e | #!/usr/bin/python3
def square_matrix_simple(matrix=[]):
'''This function will compute the square root of all integers in
a matrix. '''
new_matrix = []
for index in matrix:
jndex = 0
new_row = []
while jndex < len(index):
new_row.append(index[jndex] ** 2)
jndex += 1
new_matrix.append(new_row)
return new_matrix
|
6,544 | 207bb7c79de069ad5d980d18cdfc5c4ab86c5197 | def slices(series, length):
if length <= 0:
raise ValueError("Length has to be at least 1")
elif length > len(series) or len(series) == 0:
raise ValueError("Length has to be larger than len of series")
elif length == len(series):
return [series]
else:
result = []
for i in range(0, len(series) - length + 1):
result.append(series[i:i+length])
return result
|
6,545 | b3f72bc12f85724ddcdaf1c151fd2a68b29432e8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MQTT handler for Event subscriptions.
"""
import json
import time
import tornado.gen
import tornado.ioloop
from hbmqtt.mqtt.constants import QOS_0
from tornado.queues import QueueFull
from wotpy.protocols.mqtt.handlers.base import BaseMQTTHandler
from wotpy.protocols.mqtt.handlers.subs import InteractionsSubscriber
from wotpy.utils.utils import to_json_obj
from wotpy.wot.enums import InteractionTypes
class EventMQTTHandler(BaseMQTTHandler):
"""MQTT handler for Event subscriptions."""
DEFAULT_CALLBACK_MS = 2000
DEFAULT_JITTER = 0.2
def __init__(self, mqtt_server, qos=QOS_0, callback_ms=None):
super(EventMQTTHandler, self).__init__(mqtt_server)
callback_ms = self.DEFAULT_CALLBACK_MS if callback_ms is None else callback_ms
self._qos = qos
self._callback_ms = callback_ms
self._subs = {}
self._interaction_subscriber = InteractionsSubscriber(
interaction_type=InteractionTypes.EVENT,
server=self.mqtt_server,
on_next_builder=self._build_on_next)
@tornado.gen.coroutine
def refresh_subs():
self._interaction_subscriber.refresh()
self._periodic_refresh_subs = tornado.ioloop.PeriodicCallback(
refresh_subs, self._callback_ms, jitter=self.DEFAULT_JITTER)
def build_event_topic(self, thing, event):
"""Returns the MQTT topic for Event emissions."""
return "{}/event/{}/{}".format(
self.servient_id,
thing.url_name,
event.url_name)
@tornado.gen.coroutine
def init(self):
"""Initializes the MQTT handler.
Called when the MQTT runner starts."""
self._interaction_subscriber.refresh()
self._periodic_refresh_subs.start()
yield None
@tornado.gen.coroutine
def teardown(self):
"""Destroys the MQTT handler.
Called when the MQTT runner stops."""
self._periodic_refresh_subs.stop()
self._interaction_subscriber.dispose()
yield None
def _build_on_next(self, exp_thing, event):
"""Builds the on_next function to use when subscribing to the given Event."""
topic = self.build_event_topic(exp_thing, event)
def on_next(item):
try:
data = {
"name": item.name,
"data": to_json_obj(item.data),
"timestamp": int(time.time() * 1000)
}
self.queue.put_nowait({
"topic": topic,
"data": json.dumps(data).encode(),
"qos": self._qos
})
except QueueFull:
pass
return on_next
|
6,546 | 829e23ce2388260467ed159aa7e1480d1a3d6045 | """I referred below sample.
https://ja.wikipedia.org/wiki/Adapter_%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3#:~:text=Adapter%20%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3%EF%BC%88%E3%82%A2%E3%83%80%E3%83%97%E3%82%BF%E3%83%BC%E3%83%BB%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3%EF%BC%89,%E5%A4%89%E6%9B%B4%E3%81%99%E3%82%8B%E3%81%93%E3%81%A8%E3%81%8C%E3%81%A7%E3%81%8D%E3%82%8B%E3%80%82
"""
from abc import ABC, abstractmethod
class ProductPrice(ABC):
"""Target"""
@abstractmethod
def get_doll(self) -> float:
pass
class Product:
"""Adaptee"""
def __init__(self, cost: int) -> None:
self.__cost = cost
def get_yen(self) -> int:
return self.__cost
class ProductAdapter(ProductPrice):
"""Adapter"""
DOLL_RATE: int = 110
def __init__(self, product: Product) -> None:
self.__product = product
def get_doll(self) -> float:
doll = self.__product.get_yen() / self.DOLL_RATE
return doll
if __name__ == '__main__':
product = Product(cost=1000)
print(f'product cost {product.get_yen()} yen')
adapted_product = ProductAdapter(product)
print(f'product cost {adapted_product.get_doll():.1f} doll')
|
6,547 | 6ee71cf61ae6a79ec0cd06f1ddc7dc614a76c7b9 | import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')
SQLALCHEMY_ECHO = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 8
CSRF_ENABLED = True
CSRF_SESSION_KEY = '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A'
UPLOAD_FOLDER = '%s/images'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
|
6,548 | 253804644e366382a730775402768bc307944a19 | import unittest
'''
시험 문제 2) 장식자 구현하기
- 다수의 인자를 받아, 2개의 인자로 변환하여 함수를 호출토록 구현
- 첫번째 인자 : 홀수의 합
- 두번째 인자 : 짝수의 합
모든 테스트가 통과하면, 다음과 같이 출력됩니다.
쉘> python final_2.py
...
----------------------------------------------------------------------
Ran 3 tests in 0.000s
OK
'''
def divider(fn):
def wrap(*args):
odd = sum(i for i in args if i%2!=0)
even = sum(i for i in args if i%2==0)
return fn(odd, even)
return wrap
########################################
#
# 아래는 수정하지마세요.
#
########################################
@divider
def mysum(x, y):
return x + y
@divider
def mymultiply(x, y):
return x * y
@divider
def mypow(x, y):
return x ** y
class TestFinalExam(unittest.TestCase):
def k__test_mysum(self):
self.assertEqual(mysum(1, 2), 3)
self.assertEqual(mysum(1, 2, 3), 6)
self.assertEqual(mysum(1, 2, 3, 4), 10)
self.assertEqual(mysum(1, 2, 3, 4, 5), 15)
self.assertEqual(mysum(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), 55)
self.assertEqual(mysum(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 100, 1001), 1156)
def test_mymultiply(self):
# self.assertEqual(mymultiply(1, 2), 2) # 1 * 2
# self.assertEqual(mymultiply(1, 2, 3), 8) # (1+3) * 2
# self.assertEqual(mymultiply(1, 2, 3, 4), 24) # (1+3) * (2+4)
# self.assertEqual(mymultiply(1, 2, 3, 4, 5), 54) # (1+3+5) * (2+4)
# self.assertEqual(mymultiply(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), 750)
self.assertEqual(mymultiply(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 100, 1001), 133380)
# (1 + 3 + 5 + 7 + 8 + 9) * (2 + 4 + 6 + 8 + 10 + 100 + 1001)
def test_mypow(self):
# self.assertEqual(mypow(1, 2), 1) # 1 ** 2
# self.assertEqual(mypow(1, 2, 3), 16) # (1+3) ** 2
# self.assertEqual(mypow(1, 2, 3, 4), 4096) # (1+3) ** (2+4)
# self.assertEqual(mypow(1, 2, 3, 4, 5), 531441) # (1+3+5) ** (2+4)
# self.assertEqual(mypow(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), 867361737988403547205962240695953369140625)
pass
if __name__ == '__main__':
unittest.main()
|
6,549 | 0d37b6f0ea8854f9d4d4cd2ff235fa39bab7cc12 | import sys
def digit_sum(x):
sum = 0
while x != 0:
sum = sum + x % 10
x = x // 10
return sum
for i in sys.stdin:
test_num = int( i )
if test_num == 0:
break
count = 11
while digit_sum(test_num) != digit_sum(count * test_num):
count = count + 1
print('{}'.format(count)) |
6,550 | bcc76e4dbcc191e7912085cbb92c5b0ebd2b047b | from datetime import datetime
from pymongo import MongoClient
from bson import ObjectId
from config import config
class Database(object):
def __init__(self):
self.client = MongoClient(config['db']['url']) # configure db url
self.db = self.client[config['db']['name']] # configure db name
def insert(self, element, collection_name):
element["created"] = datetime.now()
element["updated"] = datetime.now()
inserted = self.db[collection_name].insert_one(element) # insert data to db
return str(inserted.inserted_id)
def find(self, criteria, collection_name, projection=None, sort=None, limit=0, cursor=False): # find all from db
if "_id" in criteria:
criteria["_id"] = ObjectId(criteria["_id"])
found = self.db[collection_name].find(filter=criteria, projection=projection, limit=limit, sort=sort)
if cursor:
return found
found = list(found)
for i in range(len(found)): # to serialize object id need to convert string
if "_id" in found[i]:
found[i]["_id"] = str(found[i]["_id"])
return found
def find_by_id(self, id, collection_name):
found = self.db[collection_name].find_one({"_id": ObjectId(id)})
if found is None:
return not found
if "_id" in found:
found["_id"] = str(found["_id"])
return found
def update(self, id, element, collection_name):
criteria = {"_id": ObjectId(id)}
element["updated"] = datetime.now()
set_obj = {"$set": element} # update value
updated = self.db[collection_name].update_one(criteria, set_obj)
if updated.matched_count == 1:
return "Record Successfully Updated"
def delete(self, id, collection_name):
deleted = self.db[collection_name].delete_one({"_id": ObjectId(id)})
return bool(deleted.deleted_count)
|
6,551 | 326f1b5bee8f488382a76fcc5559f4ea13734f21 | from scrapy import cmdline
cmdline.execute("scrapy crawl rapo.com".split())
|
6,552 | e364ba45513167966fe50e31a01f552ccedec452 | from ethereum.common import mk_transaction_sha, mk_receipt_sha
from ethereum.exceptions import InsufficientBalance, BlockGasLimitReached, \
InsufficientStartGas, InvalidNonce, UnsignedTransaction
from ethereum.messages import apply_transaction
from ethereum.slogging import get_logger
from ethereum.utils import encode_hex
from sharding.receipt_consuming_tx_utils import apply_shard_transaction
from sharding.collation import Collation, CollationHeader
log = get_logger('sharding.shard_state_transition')
def mk_collation_from_prevstate(shard_chain, state, coinbase):
"""Make collation from previous state
(refer to ethereum.common.mk_block_from_prevstate)
"""
# state = state or shard_chain.state
collation = Collation(CollationHeader())
collation.header.shard_id = shard_chain.shard_id
collation.header.prev_state_root = state.trie.root_hash
collation.header.coinbase = coinbase
collation.transactions = []
return collation
def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None):
"""Add transactions to a collation
(refer to ethereum.common.add_transactions)
"""
if not txqueue:
return
pre_txs = len(collation.transactions)
log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(txqueue.txs), pre_txs))
while 1:
tx = txqueue.pop_transaction(
max_gas=shard_state.gas_limit - shard_state.gas_used,
min_gasprice=min_gasprice
)
if tx is None:
break
try:
apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)
collation.transactions.append(tx)
except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas,
InvalidNonce, UnsignedTransaction) as e:
log.info(str(e))
pass
log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))
def update_collation_env_variables(state, collation):
"""Update collation variables into the state
(refer to ethereum.common.update_block_env_variables)
"""
state.block_coinbase = collation.header.coinbase
def set_execution_results(state, collation):
"""Set state root, receipt root, etc
(ethereum.pow.common.set_execution_results)
"""
collation.header.receipts_root = mk_receipt_sha(state.receipts)
collation.header.tx_list_root = mk_transaction_sha(collation.transactions)
# Notice: commit state before assigning
state.commit()
collation.header.post_state_root = state.trie.root_hash
# TODO: Don't handle in basic sharding currently
# block.header.gas_used = state.gas_used
# block.header.bloom = state.bloom
log.info('Collation pre-sealed, %d gas used' % state.gas_used)
def validate_transaction_tree(collation):
"""Validate that the transaction list root is correct
(refer to ethereum.common.validate_transaction_tree)
"""
if collation.header.tx_list_root != mk_transaction_sha(collation.transactions):
raise ValueError("Transaction root mismatch: header %s computed %s, %d transactions" %
(encode_hex(collation.header.tx_list_root), encode_hex(mk_transaction_sha(collation.transactions)),
len(collation.transactions)))
return True
def verify_execution_results(state, collation):
"""Verify the results by Merkle Proof
(refer to ethereum.common.verify_execution_results)
"""
state.commit()
validate_transaction_tree(collation)
if collation.header.post_state_root != state.trie.root_hash:
raise ValueError('State root mismatch: header %s computed %s' %
(encode_hex(collation.header.post_state_root), encode_hex(state.trie.root_hash)))
if collation.header.receipts_root != mk_receipt_sha(state.receipts):
raise ValueError('Receipt root mismatch: header %s computed %s, computed %d, %d receipts' %
(encode_hex(collation.header.receipts_root), encode_hex(mk_receipt_sha(state.receipts)),
state.gas_used, len(state.receipts)))
return True
def finalize(state, coinbase):
"""Apply rewards and commit
(refer to ethereum.pow.consensus.finalize)
"""
delta = int(state.config['COLLATOR_REWARD'])
state.delta_balance(coinbase, delta)
|
6,553 | deaaf7620b9eba32149f733cd543399bdc2813a1 |
import os
import requests
import json
from web import *
from libs_support import *
from rss_parser import *
from database import *
class Solr_helper:
""" Ho tro He thong tu dong cap nhat du lieu - su dung post.jar de tu dong cap nhat du lieu moi vao he thong theo
tung khoang thoi gian nhat dinh """
def __init__(self, db_name = "btl-tktdtt", domain = "localhost", port = 8983, solr_home = "."):
self.server_db_name = db_name
self.server_port = port
self.server_domain = domain
self.server_db_name = db_name
#default
self.set_solr_home(solr_home)
# Cai dat cua solr
def set_post_tool(self, path_tool):
self.server_post_tool = path_tool
def set_solr_home(self, path_home):
if(path_home.endswith("/")): path_home = path_home[:-1]
self.server_solr_home = path_home
self.server_post_tool = path_home +"/example/exampledocs/post.jar"
# update du lieu json web vao he thong
def update_use_tool(self, path_file_json_data, type_update="text/json"):
# use java tool
cmd_update_data = "java -Dtype={2} -Durl=http://{0}:{1}/solr/{3}/update -jar {5} {4}" \
.format(self.server_domain, self.server_port, type_update, self.server_db_name, path_file_json_data,
self.server_post_tool)
print (cmd_update_data)
# os.system(cmd_update_data)
# update du lieu json web vao he thong
def update(self, data_json):
# post paterm: curl 'http://localhost:8983/solr/testBTL/update/json/docs' -H 'Content-type:application/json' -d '[{},{}]'
# use Data with Index Handlers (DIH) Http post
url = "http://{0}:{1}/solr/{2}/update/json/docs" \
.format(self.server_domain, self.server_port, self.server_db_name)
headers = dict()
headers['Content-type'] = 'application/json'
try:
r = requests.post(url=url,data=data_json,headers=headers)
r.close()
return r.text # .encode('utf-8', 'inorge')
except Exception, e:
print('Exception' + str(e))
return None
def reload(self):
# post paterm: curl "http://localhost:8983/solr/admin/cores?action=RELOAD&core=mycore"
# use Data with Index Handlers (DIH) Http post
url = "http://{0}:{1}/solr/admin/cores?action=RELOAD&core={2}" .format(self.server_domain, self.server_port,self.server_db_name)
try:
r = requests.post(url=url)
r.close()
return r.text # .encode('utf-8', 'inorge')
except Exception, e:
print('Exception' + str(e))
return None
def crawl_data():
max_count_web = 500
rss_page_links = [
#"http://vietbao.vn/vn/rss",
#"http://vnexpress.net/rss",
"http://dantri.com.vn/rss",
#"http://vtv.vn/rss",
"http://techtalk.vn/"
]
web_mannual_page_links = [
# "vtv.vn" ,
"kenh14.vn"
]
# Cai dat bo loc crawl web
# Web_filter.set_last_time("2016-10-26, 22:20:08+07:00") # Bai viet moi hon ke tu thoi diem xxx
# Web_filter.set_limit_time("2016-10-26, 22:20:08+07:00", "2016-10-26, 23:20:08+07:00") # Bai viet trong khoang tg
Web_filter.set_max_count_web_each_domain(10000) # moi domain khong vuot qua 1000
Web_filter.set_max_count_web_each_sublabel(100) # moi label trong 1 domain k vuot qua 100
# Cac trang co rss
data = "["
for link_rss in rss_page_links:
parser = rss_parser(link_rss)
webs = parser.get_list_web()
for web_x in webs:
data += (web_x.get_json()+",")
# web_x.write_to_file('/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/vietnam-news/data-train')
if data.__len__() > 1:
data = data[:-1]+"]"
solr = Solr_helper(db_name="btl-tktdtt")
solr.set_solr_home("/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/solr-6.2.1")
print (solr.update(data))
print (solr.reload())
def query():
# http://localhost:8983/solr/btl-tktdtt/select?indent=on&q=*:*&wt=json
# http://localhost:8983/solr/btl-tktdtt/select?q=*:*&sort=dist(0,%2010,%2010)%20desc
# http://localhost:8983/solr/btl-tktdtt/select?q=title:Thiên thần+url:thien-than
None
if __name__ == "__main__":
t = 1
t = t + 1
solr = Solr_helper( db_name = "btl-tktdtt")
solr.set_solr_home("/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/solr-6.2.1")
# # solr.update("/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/vietnam-news/data-train/techtalk/Cong\ nghe/31fa871c7d521106e28c45f567a63445c33e1186.json")
#
# data_test = []
# data_test.append({
# "code": "55421c7d521106e28c45f567a63445c33e118744446",
# "title": "test dddd vcc c dsf" ,
# "url": "http://techtalk.vn/van-de-da-ngon-ngu-trong-angularjs.html",
# "labels": "techtalk/Cong nghe",
# "content": "tset content ",
# "image_url": "",
# "date": "2016-11-14, 12:00:02+00:00"
# })
# data_test.append({
# "code": "12345651717ebecaeb1c179522eff5dcc19c86ce8",
# "title": "test title ",
# "url": "http://techtalk.vn/tim-hieu-ve-middleware-trong-expressjs.html",
# "labels": "techtalk/Cong nghe",
# "content": "test ddddd content ",
# "image_url": "",
# "date": "2016-11-13, 01:00:14+00:00"
# })
crawl_data()
# data_json = (json.dumps(data_test,indent=4, separators=(',', ': '), ensure_ascii=False))
# solr.update(data_json)
# print (solr.reload())
|
6,554 | 268c36f6fb99383ea02b7ee406189ffb467d246c | import re
import requests
def download_image(url: str) -> bool:
img_tag_regex = r"""<img.*?src="(.*?)"[^\>]+>"""
response = requests.get(url)
if response.status_code != 200:
return False
text = response.text
image_links = re.findall(img_tag_regex, text)
for link in image_links:
resp = requests.get(link)
with open(link.replace("https://", "").replace("http://", ""), "wb") as file:
file.write(resp.content)
return True
|
6,555 | d35d26cc50da9a3267edd2da706a4b6e653d22ac | import subprocess
class Audio:
def __init__(self):
self.sox_process = None
def kill_sox(self, timeout=1):
if self.sox_process is not None:
self.sox_process.terminate()
try:
self.sox_process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
self.sox_process.kill()
self.sox_process.wait(timeout=timeout)
self.sox_process = None
# trying a lower buffer size
def run_sox(self, scale, preset, buffer=20):
'''
Builds and returns a sox command from a preset object
'''
buffer = 17
multiplier = 100
command_effects = []
command_effects += ["pitch", str(scale * multiplier)]
# Volume boosting
if preset.volume_boost != None:
command_effects += ["vol", str(preset.volume_boost) + "dB"]
else:
# Fix a bug where SoX uses last given volumne
command_effects += ["vol", "0"]
# Downsampling
if preset.downsample_amount != None:
command_effects += ["downsample", str(preset.downsample_amount)]
else:
# Append downsample of 1 to fix a bug where the downsample isn't being reverted
# when we disable the effect with it on.
command_effects += ["downsample", "1"]
command = ["sox", "--buffer", str(buffer), "-q", "-t", "pulseaudio", "default", "-t", "pulseaudio", "Lyrebird-Output"] + command_effects
self.sox_process = subprocess.Popen(command)
def get_sink_name(self, tuple):
if tuple[0] == "sink_name":
return tuple[1]
elif tuple[0] == "source_name":
return tuple[1]
else:
return None
def load_pa_modules(self):
self.null_sink = subprocess.check_call(
'pactl load-module module-null-sink sink_name=Lyrebird-Output node.description="Lyrebird Output"'.split(' ')
)
self.remap_sink = subprocess.check_call(
'pactl load-module module-remap-source source_name=Lyrebird-Input master=Lyrebird-Output.monitor node.description="Lyrebird Virtual Input"'\
.split(' ')
)
def get_pactl_modules(self):
'''
Parses `pactl info short` into tuples containing the module ID,
the module type and the attributes of the module. It is designed
only for named modules and as such junk data may be included in
the returned list.
Returns an array of tuples that take the form:
(module_id (str), module_type (str), attributes (attribute tuples))
The attribute tuples:
(key (str), value (str))
An example output might look like:
[
( '30', 'module-null-sink', [('sink_name', 'Lyrebird-Output')] ),
( '31', 'module-remap-source', [('source_name', 'Lyrebird-Input'), ('master', 'Lyrebird-Output.monitor')] )
]
'''
pactl_list = subprocess.run(["pactl", "list", "short"], capture_output=True, encoding="utf8")
lines = pactl_list.stdout
data = []
split_lines = lines.split("\n")
for line in split_lines:
info = line.split("\t")
if len(info) <= 2:
continue
if info[2] and len(info[2]) > 0:
key_values = list(map(lambda key_value: tuple(key_value.split("=")), info[2].split(" ")))
data.append((info[0], info[1], key_values))
else:
data.append((info[0], info[1], []))
return data
def unload_pa_modules(self):
'''
Unloads all Lyrebird null sinks.
'''
modules = self.get_pactl_modules()
lyrebird_module_ids = []
for module in modules:
if len(module) < 3:
continue;
if len(module[2]) < 1:
continue;
if module[1] == "module-null-sink":
sink_name = self.get_sink_name(module[2][0])
if sink_name == "Lyrebird-Output":
lyrebird_module_ids.append(module[0])
elif module[1] == "module-remap-source":
sink_name = self.get_sink_name(module[2][0])
if sink_name == "Lyrebird-Input":
lyrebird_module_ids.append(module[0])
for id in lyrebird_module_ids:
subprocess.run(["pactl", "unload-module", str(id)])
|
6,556 | afe63f94c7107cf79e57f695df8543e0786a155f | def getGC(st):
n = 0
for char in st:
if char == 'C' or char == 'G':
n += 1
return n
while True:
try:
DNA = input()
ln = int(input())
maxLen = 0
subDNA = ''
for i in range(len(DNA) - ln + 1):
sub = DNA[i : i + ln]
if getGC(sub) > maxLen:
maxLen = getGC(sub)
subDNA = sub
print(subDNA)
except:
break |
6,557 | a8c59f97501b3f9db30c98e334dbfcffffe7accd | import simple_map
import pickle
import os
import argparse
import cv2
argparser = argparse.ArgumentParser()
argparser.add_argument("--src", type=str, required=True,
help="source directory")
argparser.add_argument("--dst", type=str, required=True,
help="destination directory")
argparser.add_argument("--ref", type=str, required=False, default="train_raw",
help="global reference directory (default: train_raw)")
args = argparser.parse_args()
def get_reference():
json = sorted([os.path.join(args.ref, file) for file in os.listdir(args.ref) if file.endswith(".json")])[0]
smap = simple_map.SimpleMap(json)
return smap.northing, smap.easting
def construct_maps(jsons):
cnt = 0
# get first map as reference
ref_globals = get_reference()
for i in range(len(jsons)):
smap = simple_map.SimpleMap(jsons[i], ref_globals)
(x, y), (x_real, y_real), imgs = smap.get_route()
# resize image
imgs = [tuple(map(lambda x: cv2.resize(x, None, fx=0.2, fy=0.2), img)) for img in imgs]
for j in range(0, len(imgs), 10):
for k in range(3):
cnt += 1
path = os.path.join(args.dst, str(cnt))
output_file = open(path, 'wb')
obj = {"x_steer": x[j], "y_steer": y[j],
"x_utm": x_real[j], "y_utm": y_real[j],
"img": imgs[j][k]}
pickle.dump(obj, output_file)
output_file.close()
print("* Video %d done, %s" %( i, jsons[i]))
def main():
jsons = sorted([os.path.join(args.src, file) for file in os.listdir(args.src) if file.endswith(".json")])
construct_maps(jsons)
if __name__ == "__main__":
main()
|
6,558 | 94560d8f6528a222e771ca6aa60349d9682e8f4b | from pig_util import outputSchema
@outputSchema('word:chararray')
def reverse(word):
"""
Return the reverse text of the provided word
"""
return word[::-1]
@outputSchema('length:int')
def num_chars(word):
"""
Return the length of the provided word
"""
return len(word) |
6,559 | e6bd9391a5364e798dfb6d2e9b7b2b98c7b701ac | # coding:utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Pool
"""
用户id,时间戳,浏览行为数据,浏览子行为编号
"""
names = ['userid','time','browser_behavior','browser_behavior_number']
browse_history_train = pd.read_csv("../../pcredit/train/browse_history_train.txt",header=None)
browse_history_test = pd.read_csv("../../pcredit/test/browse_history_test.txt",header=None)
browse_history = pd.concat([browse_history_train,browse_history_test])
browse_history.columns = names
browse_history['browse_count'] = 1
#browse_history = browse_history.head(100)
users = list(browse_history.userid.unique())
# 按照时间统计
data = browse_history[['userid','time','browse_count']]
t = data.groupby(['userid','time']).agg(sum)
t.reset_index(inplace=True)
def time_m(u):
d = {'userid':u}
tu = t[t.userid==u]
d['browse_max'] = tu['browse_count'].max()
d['browse_min'] = tu['browse_count'].min()
d['browse_mean'] = tu['browse_count'].mean()
d['browse_median'] = tu['browse_count'].median()
d['browse_var'] = tu['browse_count'].var()
d['browse_std'] = tu['browse_count'].std()
d['browse_count'] = tu['browse_count'].count()
d['browse_max_min'] = d['browse_max'] - d['browse_min']
print d
return d
def multi_time():
pool = Pool(12)
rst = pool.map(time_m,users)
pool.close()
pool.join()
Datas = pd.DataFrame(rst)
#print Data.head()
#Datas.fillna(-9999,inplace=True)
print Datas.head()
print Datas.shape
Datas.to_csv('../data/train/browser_history_time.csv', index=None)
# 统计 browser 类别数据
def browser_behavior_u(u):
d = {"userid":u}
ta = t.loc[t.userid == u, :]
d['browser_data_max'] = ta['browse_count'].max()
d['browser_data_min'] = ta['browse_count'].min()
d['browser_data_mean'] = ta['browse_count'].mean()
d['browser_data_median'] = ta['browse_count'].median()
d['browser_data_var'] = ta['browse_count'].var()
d['browser_data_std'] = ta['browse_count'].std()
d['browser_data_count'] = ta['browse_count'].count()
d['browser_data_max_min'] = d['browser_data_max'] - d['browser_data_min']
#print ta
for b in browser_behavior_tp:
try:
tb = ta.loc[ta.browser_behavior==b,'browse_count']
d['browser_'+str(b)] = tb.iloc[0]
except:
d['browser_' + str(b)] = np.NAN
print d
return d
def multi_data():
# 浏览数据统计
data = browse_history[['userid', 'browser_behavior', 'browse_count']]
t = data.groupby(['userid', 'browser_behavior']).agg(sum)
t.reset_index(inplace=True)
browser_behavior_tp = list(data.browser_behavior.unique())
pool = Pool(12)
rst = pool.map(browser_behavior_u,users)
pool.close()
pool.join()
Data = pd.DataFrame(rst)
#Datas = pd.merge(Datas,Data,on='userid')
del Data,rst,t,data
def browser_behavior_number_u(u):
d = {"userid":u}
ta = t.loc[t.userid == u, :]
d['browser_behavior_max'] = ta['browse_count'].max()
d['browser_behavior_min'] = ta['browse_count'].min()
d['browser_behavior_mean'] = ta['browse_count'].mean()
d['browser_behavior_median'] = ta['browse_count'].median()
d['browser_behavior_var'] = ta['browse_count'].var()
d['browser_behavior_std'] = ta['browse_count'].std()
d['browser_behavior_count'] = ta['browse_count'].count()
d['browser_behavior_max_min'] = d['browser_behavior_max'] - d['browser_behavior_min']
for b in [1,4,5,6,7,8,10]:
try:
tb = ta.loc[t.browser_behavior_number==b,'browse_count']
d['browser_behavior_number_'+str(b)] = tb.iloc[0]
except:
d['browser_behavior_number_' + str(b)] = np.NAN
print d
return d
def mult_browse_behavi():
# 子行为统计
data = browse_history[['userid', 'browser_behavior_number', 'browse_count']]
t = data.groupby(['userid', 'browser_behavior_number']).agg(sum)
t.reset_index(inplace=True)
pool = Pool(12)
rst = pool.map(browser_behavior_number_u,users)
pool.close()
pool.join()
Data = pd.DataFrame(rst)
#Datas = pd.merge(Datas,Data,on='userid')
del Data,rst,data
def merge_browser():
d = pd.read_csv('../data/train/browser_history_time.csv')
d1 = pd.read_csv('../data/train/browse_history_stage5.csv')
d = pd.merge(d,d1,on='userid')
d.fillna(-9999, inplace=True)
print d.head(10)
print d.shape
d.to_csv('../data/train/browser_history_all.csv', index=None)
if __name__=='__main__':
merge_browser()
|
6,560 | 146cae8f60b908f04bc09b10c4e30693daec89b4 | import imgui
print("begin")
imgui.create_context()
imgui.get_io().display_size = 100, 100
imgui.get_io().fonts.get_tex_data_as_rgba32()
imgui.new_frame()
imgui.begin("Window", True)
imgui.text("HelloWorld")
imgui.end()
imgui.render()
imgui.end_frame()
print("end")
|
6,561 | 31a5bf0b275238e651dcb93ce80446a49a4edcf4 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 13:25:03 2020
@author: Dr. Michael Sigmond, Canadian Centre for Climate Modelling and Analysis
"""
import matplotlib.colors as col
import matplotlib.cm as cm
import numpy as np
def register_cccmacms(cmap='all'):
"""create my personal colormaps with discrete colors and register them.
default is to register all of them. can also specify which one.
(@@ input arg cmap not implemented yet 2/27/14)
"""
#print 'registering cmaps'
# define individual colors as RGB triples
# from colorwheel.m
# =============================================
# kem_w20 (20) OR blue2red_w20
# blueish at top, white in middle, reddish at bottom
cpool = np.array([ [153,255,255], \
[204,255,229], \
[240,255,240],\
[204,255,153],\
[178,255,102],\
[216,255,76],\
[255,255,51],\
[255,220,51],\
[255,187,51],\
[255,153,51],\
[255,0,0],\
[204,0,0],\
[153,0,0]], \
dtype=float)
acccbar = (cpool/255.)
thecmap = col.ListedColormap(acccbar,'acccbar')
cm.register_cmap(cmap=thecmap)
return
register_cccmacms()
|
6,562 | b9b113bdc5d06b8a7235333d3b3315b98a450e51 | import random
s = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
t = True
while t:
a = random.randint(1, 10)
if a not in s:
t = False
s[a] = a
print(s)
|
6,563 | 10a7c1827abb8a87f5965453aa2d8f5e8b4914e5 | import matplotlib.pyplot as plt
def xyplot(xdata,ydata,title):
fname = "/Users/nalmog/Desktop/swa_equipped_cumulative_"+title+".png"
#plt.figure(figsize=(500,500))
plt.plot(xdata, ydata)
plt.ylabel('some numbers')
# plt.savefig("/Users/nalmog/Desktop/swa_equipped_cumulative_"+title+".png", format='png')
#plt.show()
#plt.savefig("/Users/nalmog/Desktop/swa_equipped_cumulative_"+title+".png", format='png')
plt.title(title)
plt.xlabel("Percent of Fleet")
plt.ylabel("Number of Passes")
plt.savefig(fname)
plt.clf();
#plt.
|
6,564 | 1190e802fde6c2c6f48bd2720688bd9231b622e0 | """
PROYECTO : Portal EDCA-HN
NOMBRE : ZipTools
Descripcion : Clase utilitaria para descomprimir archivos ZIP.
MM/DD/YYYY Colaboradores Descripcion
05/07/2019 Alla Duenas Creacion.
"""
import zipfile
from edca_mensajes import EdcaErrores as err, EdcaMensajes as msg
from edca_logs.EdcaLogger import EdcaLogger as log
class ZipTools:
# Funcion para cromprimir los archivos descargados
@staticmethod
def comprimir(archivo, dir_comprimir):
__archivo_zip = archivo[:archivo.find(".")] + ".zip"
try:
with zipfile.ZipFile(__archivo_zip,'w', zipfile.ZIP_DEFLATED) as archivoZip:
archivoZip.write(archivo)
archivoZip.close()
except PermissionError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % PermissionError.filename % PermissionError.strerror)
except IOError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(
err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)
# Funcion para descromprimir los archivos descargados
@staticmethod
def descomprimir(archivo, dir_extraer):
try:
zip_ref = zipfile.ZipFile(archivo, 'r')
zip_list = zip_ref.infolist()
for contenido in zip_list:
log.registrar_log_info(__name__, err.EdcaErrores.INFO_ZIPTOOL_PRINT_DIR,
"EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_PRINT_DIR) % contenido.filename)
zip_ref.extractall(dir_extraer)
zip_ref.close()
log.registrar_log_info(__name__, err.EdcaErrores.INFO_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_UNZIP))
except PermissionError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % PermissionError.filename % PermissionError.strerror)
except IOError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(
err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)
@staticmethod
def obtener_contenido_zip(archivo):
global zp
try:
zip_ref = zipfile.ZipFile(archivo, 'r')
zip_list = zip_ref.infolist()
for contenido in zip_list:
zp = contenido.filename
zip_ref.close()
return zp
except PermissionError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP)
% PermissionError.filename % PermissionError.strerror)
except IOError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(
err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)
|
6,565 | 71503282e58f60e0936a5236edc094f1da937422 | from django.utils.text import slugify
from pyexpat import model
from django.db import models
# Create your models here.
from rest_framework_simplejwt.state import User
FREQUENCY = (
('daily', 'Diario'),
('weekly', 'Semanal'),
('monthly', 'Mensual')
)
class Tags(models.Model):
name = models.CharField(max_length=100)
slug = models.CharField(max_length=150)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def save(self, *arg, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(Tags, self).save(*arg, **kwargs)
class Meta:
ordering = ('-created_at',)
class Newsletter(models.Model):
name = models.CharField(max_length=200)
description = models.CharField(max_length=10000)
image = models.ImageField()
target = models.IntegerField()
frequency = models.CharField(max_length=10, choices=FREQUENCY, default='monthly')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
tag = models.ManyToManyField(Tags)
@property
def subscribed(self):
return 10
def __str__(self):
return self.name
class Meta:
ordering = ('-created_at',)
|
6,566 | b7038ad73bf0e284474f0d89d6c34967d39541c0 | from .auth import Auth
from .banDetection import BanDetectionThread
from .botLogging import BotLoggingThread
from .clientLauncher import ClientLauncher
from .log import LogThread, Log
from .mainThread import MainThread
from .nexonServer import NexonServer
from .tmLogging import TMLoggingThread
from .worldCheckboxStatus import WorldCheckBoxThread
from .setStartup import setStartupThread
|
6,567 | 6928ff58ddb97883a43dfd867ff9a89db72ae348 | from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import urllib
from flask import Flask
########################################################################################DataBase@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@2
#connection string
params = urllib.parse.quote_plus('Driver={SQL Server};'
'Server=YoussefSami;'
'Database=CLS_DB2;'
'Trusted_Connection=yes;')
#init flas app
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS']='Content-Type'
app.config['Access-Control-Allow-Origin'] ='*'
app.config["DEBUG"]=True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] =True
app.config['TESTING']=True
app.config['SECRET_KEY']='thisissecretkey'
#init db
app.config['SQLALCHEMY_DATABASE_URI'] = "mssql+pyodbc:///?odbc_connect=%s" % params
db=SQLAlchemy(app)
#create modules for database
class Entity_list_user(db.Model):
ID = db.Column(db.Integer, primary_key=True)
NationalID = db.Column(db.String(250),nullable=False)
FirstName = db.Column(db.String(250), nullable=False)
LastName = db.Column(db.String(250), nullable=False)
Email = db.Column(db.String(250), nullable=False)
Password = db.Column(db.String(250), nullable=False)
FacultyID = db.Column(db.String(250))
Faculty = db.Column(db.String(250))
Dept = db.Column(db.String(250))
UserType=db.Column(db.String(250),nullable=False)
class Entity_list_Attendance(db.Model):
ID = db.Column(db.Integer, primary_key=True, )
FacultyID = db.Column(db.String(250),nullable=False)
Name = db.Column(db.String(250), nullable=False)
Time = db.Column(db.String(250), nullable=False)
InOut = db.Column(db.String(250), nullable=False)
Date = db.Column(db.Date, nullable=False)
db.ForeignKeyConstraint(
['FacultyID'], ['Entity_list_user.FacultyID'],
name='fk_FacultyID'
)
|
6,568 | dc5b9600828857cc5ea434a7b010cd8aa2589d22 | from math import log2
from egosplit.benchmarks.data_structures.cover_benchmark import *
from egosplit.benchmarks.evaluation.utility import create_line
from networkit.stopwatch import clockit
# Analyse the result cover of a benchmark run
@clockit
def analyze_cover(benchmarks, result_dir, calc_f1, append):
if not append:
print_headers(result_dir)
for benchmark in benchmarks:
count_benchmark_cover(result_dir, calc_f1, benchmark)
# Print output file headers
def print_headers(result_dir):
with open(result_dir + 'cover_num_comms.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities'))
with open(result_dir + 'cover_comm_sizes.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(), 'Community Size', 'F1 Score'))
with open(result_dir + 'cover_node_comms.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities per Node'))
# Count the number of communities and their sizes
def count_benchmark_cover(result_dir, calc_f1, benchmark):
cover = benchmark.get_cover()
ground_truth = benchmark.get_ground_truth()
comm_map = get_communities(benchmark.get_graph(), cover)
gt_map = get_communities(benchmark.get_graph(), ground_truth)
comm_sizes = cover.subsetSizeMap()
# Number of communities
with open(result_dir + 'cover_num_comms.result', 'a') as f:
f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))
# Community sizes and F1 scores
with open(result_dir + 'cover_comm_sizes.result', 'a') as f:
for u in cover.getSubsetIds():
comm = comm_map[u]
size = comm_sizes[u]
f1 = f1_score(comm, gt_map) if calc_f1 else 0
f.write(create_line(*benchmark.output_line(), log2(size), f1))
# Number of Communities per Node
with open(result_dir + 'cover_node_comms.result', 'a') as f:
for u in benchmark.get_graph().nodes():
num_comms = len(cover.subsetsOf(u))
if num_comms > 0:
f.write(create_line(*benchmark.output_line(), log2(num_comms)))
def get_communities(graph, cover):
comm_map = defaultdict(lambda: set())
for u in graph.nodes():
comms = cover.subsetsOf(u)
for c in comms:
comm_map[c].add(u)
return comm_map
def f1_score(community, ground_truth):
max_f1 = 0.0
for gt_comm in ground_truth.values():
overlap = len(gt_comm.intersection(community))
if overlap == 0:
continue
precision = overlap / len(community)
recall = overlap / len(gt_comm)
f1 = 2 * precision * recall / (precision + recall)
max_f1 = max(max_f1, f1)
return max_f1
|
6,569 | 94a84c7143763c6b7ccea1049cdec8b7011798cd | #!/usr/bin/python
#_*_ coding: utf-8 _*_
import MySQLdb as mdb
import sys
con = mdb.connect("localhost","testuser","testdB","testdb")
with con:
cur = con.cursor()
cur.execute("UPDATE Writers SET Name = %s WHERE Id = %s ",
("Guy de manupassant", "4"))
print "Number of rows updated: %d "% cur.rowcount
|
6,570 | b2c0ef4a0af12b267a54a7ae3fed9edeab2fb879 | import torch
import torch.nn as nn
from model.common import UpsampleBlock, conv_, SELayer
def wrapper(args):
act = None
if args.act == 'relu':
act = nn.ReLU(True)
elif args.act == 'leak_relu':
act = nn.LeakyReLU(0.2, True)
elif args.act is None:
act = None
else:
raise NotImplementedError
return AFN(in_c=args.n_colors, out_c=args.n_colors, scale=args.scale, n_feats=args.n_feats, act=act)
class AFB_0(nn.Module):
def __init__(self, channels, n_blocks=2, act=nn.ReLU(True)):
super(AFB_0, self).__init__()
self.op = []
for _ in range(n_blocks):
self.op.append(conv_(channels, channels))
self.op.append(act)
self.op = nn.Sequential(*self.op)
def forward(self, x):
x = x + self.op(x)
return x
class AFB_L1(nn.Module):
def __init__(self, channels, n_l0=3, act=nn.ReLU(True)):
super(AFB_L1, self).__init__()
self.n = n_l0
self.convs_ = nn.ModuleList()
for _ in range(n_l0):
self.convs_.append(
AFB_0(channels, 2, act)
)
self.LFF = nn.Sequential(
SELayer(channels * n_l0, 16),
nn.Conv2d(channels * n_l0, channels, 1, padding=0, stride=1),
)
def forward(self, x):
res = []
ox = x
for i in range(self.n):
x = self.convs_[i](x)
res.append(x)
res = self.LFF(torch.cat(res, 1))
x = res + ox
return x
class AFB_L2(nn.Module):
def __init__(self, channels, n_l1=4, act=nn.ReLU(True)):
super(AFB_L2, self).__init__()
self.n = n_l1
self.convs_ = nn.ModuleList()
for _ in range(n_l1):
self.convs_.append(
AFB_L1(channels, 3, act)
)
self.LFF = nn.Sequential(
SELayer(channels * n_l1, 16),
nn.Conv2d(channels * n_l1, channels, 1, padding=0, stride=1),
)
def forward(self, x):
res = []
ox = x
for i in range(self.n):
x = self.convs_[i](x)
res.append(x)
res = self.LFF(torch.cat(res, 1))
x = res + ox
return x
class AFB_L3(nn.Module):
def __init__(self, channels, n_l2=4, act=nn.ReLU(True)):
super(AFB_L3, self).__init__()
self.n = n_l2
self.convs_ = nn.ModuleList()
for _ in range(n_l2):
self.convs_.append(
AFB_L2(channels, 4, act)
)
self.LFF = nn.Sequential(
SELayer(channels * n_l2, 16),
nn.Conv2d(channels * n_l2, channels, 1, padding=0, stride=1),
)
def forward(self, x):
res = []
ox = x
for i in range(self.n):
x = self.convs_[i](x)
res.append(x)
res = self.LFF(torch.cat(res, 1))
x = res + ox
return x
class AFN(nn.Module):
def __init__(self, in_c=3, out_c=3, scale=4, n_feats=128, n_l3=3, act=nn.LeakyReLU(0.2, True)):
super(AFN, self).__init__()
self.head = conv_(in_c, n_feats)
self.n = n_l3
self.AFBs = nn.ModuleList()
for i in range(n_l3):
self.AFBs.append(
AFB_L3(channels=n_feats, n_l2=4, act=act)
)
self.GFF = nn.Sequential(*[
SELayer(n_feats * n_l3),
conv_(n_feats * n_l3, n_feats, 1, padding=0, stride=1),
])
self.tail = nn.Sequential(*[
UpsampleBlock(scale, n_feats, kernel_size=3, stride=1, bias=True, act=act),
conv_(n_feats, out_c)
])
def forward(self, x):
res = []
x = self.head(x)
for i in range(self.n):
x = self.AFBs[i](x)
res.append(x)
res = self.GFF(torch.cat(res, 1))
x = res + x
x = self.tail(x)
return x
if __name__ == "__main__":
import numpy as np
import torch
import torchsummary
model = AFN(in_c=3, out_c=3, scale=8, n_feats=128, n_l3=3, act=nn.LeakyReLU(0.2, True))
print(torchsummary.summary(model, (3, 24, 24), device='cpu'))
x = np.random.uniform(0, 1, [2, 3, 24, 24]).astype(np.float32)
x = torch.tensor(x)
# loss = nn.L1Loss()
# Adam = torch.optim.Adam(model.parameters(), lr=1e-3, betas=(0.99, 0.999))
with torch.autograd.profiler.profile(use_cuda=True) as prof:
y = model(x)
print(prof)
print(y.shape)
|
6,571 | b984dc052201748a88fa51d25c3bd3c22404fa96 |
# import draw as p
# ако няма __init__.py
# from draw.point import Point
from draw import Rectangle
from draw import Point
from draw import ShapeUtils
if __name__ == '__main__':
pn1 = Point(9,8)
pn2 = Point(6,4)
print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1,pn2)}')
rc1 = Rectangle(40,20,120,300)
rc2 = Rectangle(30,21,350,400)
print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1,rc2)}')
if ShapeUtils.compare(pn1,pn2) > 0:
print(f'{pn1} > {pn2}') |
6,572 | 6c9f9363a95ea7dc97ccb45d0922f0531c5cfec9 | import re
_camel_words = re.compile(r"([A-Z][a-z0-9_]+)")
def _camel_to_snake(s):
""" Convert CamelCase to snake_case.
"""
return "_".join(
[
i.lower() for i in _camel_words.split(s)[1::2]
]
)
|
6,573 | fd41e6d8530d24a8a564572af46078be77e8177f | SQL_INSERCION_COCHE = "INSERT INTO tabla_coches(marca, modelo, color, motor, precio) VALUES (%s,%s,%s,%s,%s);"
SQL_LISTADO_COCHES = "SELECT * FROM tabla_coches;"
|
6,574 | 2ee4b31f880441e87c437d7cc4601f260f34ae24 | from sys import getsizeof
# using parenthesis indicates that we are creating a generator
a = (b for b in range(10))
print(getsizeof(a))
c = [b for b in range(10)]
# c uses more memory than a
print(getsizeof(c))
for b in a:
print(b)
print(sum(a)) # the sequence has disappeared
|
6,575 | 9376d697158faf91f066a88e87d317e79a4d9240 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
from ...utils import minversion
__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11',
'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14',
'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')
NUMPY_LT_1_11 = not minversion('numpy', '1.11.0')
NUMPY_LT_1_12 = not minversion('numpy', '1.12')
NUMPY_LT_1_13 = not minversion('numpy', '1.13')
NUMPY_LT_1_14 = not minversion('numpy', '1.14')
NUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')
NUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')
|
6,576 | 539523f177e2c3c0e1fb0226d1fcd65463b68a0e | # -*- coding: utf-8 -*-
from __future__ import print_function
"""phy main CLI tool.
Usage:
phy --help
"""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import sys
import os.path as op
import argparse
from textwrap import dedent
import numpy as np
from six import exec_, string_types
#------------------------------------------------------------------------------
# Parser utilities
#------------------------------------------------------------------------------
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(message + '\n\n')
self.print_help()
sys.exit(2)
_examples = dedent("""
examples:
phy -v display the version of phy
phy download hybrid_120sec.dat -o data/
download a sample raw data file in `data/`
phy describe my_file.kwik
display information about a Kwik dataset
phy spikesort my_params.prm
run the whole suite (spike detection and clustering)
phy detect my_params.prm
run spike detection on a parameters file
phy cluster-auto my_file.kwik
run klustakwik on a dataset (after spike detection)
phy cluster-manual my_file.kwik
run the manual clustering GUI
""")
#------------------------------------------------------------------------------
# Parser creator
#------------------------------------------------------------------------------
class ParserCreator(object):
def __init__(self):
self.create_main()
self.create_download()
self.create_traces()
self.create_describe()
self.create_spikesort()
self.create_detect()
self.create_auto()
self.create_manual()
self.create_notebook()
@property
def parser(self):
return self._parser
def _add_sub_parser(self, name, desc):
p = self._subparsers.add_parser(name, help=desc, description=desc)
self._add_options(p)
return p
def _add_options(self, parser):
parser.add_argument('--debug', '-d',
action='store_true',
help='activate debug logging mode')
parser.add_argument('--hide-traceback',
action='store_true',
help='hide the traceback for cleaner error '
'messages')
parser.add_argument('--profiler', '-p',
action='store_true',
help='activate the profiler')
parser.add_argument('--line-profiler', '-lp',
dest='line_profiler',
action='store_true',
help='activate the line-profiler -- you '
'need to decorate the functions '
'to profile with `@profile` '
'in the code')
parser.add_argument('--ipython', '-i', action='store_true',
help='launch the script in an interactive '
'IPython console')
parser.add_argument('--pdb', action='store_true',
help='activate the Python debugger')
def create_main(self):
import phy
desc = sys.modules['phy'].__doc__
self._parser = Parser(description=desc,
epilog=_examples,
formatter_class=CustomFormatter,
)
self._parser.set_defaults(func=None)
self._parser.add_argument('--version', '-v',
action='version',
version=phy.__version_git__,
help='print the version of phy')
self._add_options(self._parser)
self._subparsers = self._parser.add_subparsers(dest='command',
title='subcommand',
)
def create_download(self):
desc = 'download a sample dataset'
p = self._add_sub_parser('download', desc)
p.add_argument('file', help='dataset filename')
p.add_argument('--output-dir', '-o', help='output directory')
p.add_argument('--base',
default='cortexlab',
choices=('cortexlab', 'github'),
help='data repository name: `cortexlab` or `github`',
)
p.set_defaults(func=download)
def create_describe(self):
desc = 'describe a `.kwik` file'
p = self._add_sub_parser('describe', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.set_defaults(func=describe)
def create_traces(self):
desc = 'show the traces of a raw data file'
p = self._add_sub_parser('traces', desc)
p.add_argument('file', help='path to a `.kwd` or `.dat` file')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.add_argument('--n-channels', '-n',
help='number of channels in the recording '
'(only required when using a flat binary file)')
p.add_argument('--dtype',
help='NumPy data type '
'(only required when using a flat binary file)',
default='int16',
)
p.add_argument('--sample-rate', '-s',
help='sample rate in Hz '
'(only required when using a flat binary file)')
p.set_defaults(func=traces)
def create_spikesort(self):
desc = 'launch the whole spike sorting pipeline on a `.prm` file'
p = self._add_sub_parser('spikesort', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help='filename of the `.kwik` file '
'to create (by default, `"experiment_name".kwik`)')
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=spikesort)
def create_detect(self):
desc = 'launch the spike detection algorithm on a `.prm` file'
p = self._add_sub_parser('detect', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help='filename of the `.kwik` file '
'to create (by default, `"experiment_name".kwik`)')
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=detect)
def create_auto(self):
desc = 'launch the automatic clustering algorithm on a `.kwik` file'
p = self._add_sub_parser('cluster-auto', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.set_defaults(func=cluster_auto)
def create_manual(self):
desc = 'launch the manual clustering GUI on a `.kwik` file'
p = self._add_sub_parser('cluster-manual', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.add_argument('--cluster-ids', '-c',
help='list of clusters to select initially')
p.add_argument('--no-store', action='store_true', default=False,
help='do not create the store (faster loading time, '
'slower GUI)')
p.set_defaults(func=cluster_manual)
def create_notebook(self):
# TODO
pass
def parse(self, args):
try:
return self._parser.parse_args(args)
except SystemExit as e:
if e.code != 0:
raise e
#------------------------------------------------------------------------------
# Subcommand functions
#------------------------------------------------------------------------------
def _get_kwik_path(args):
kwik_path = args.file
if not op.exists(kwik_path):
raise IOError("The file `{}` doesn't exist.".format(kwik_path))
return kwik_path
def _create_session(args, **kwargs):
from phy.session import Session
kwik_path = _get_kwik_path(args)
session = Session(kwik_path, **kwargs)
return session
def describe(args):
from phy.io.kwik import KwikModel
path = _get_kwik_path(args)
model = KwikModel(path, clustering=args.clustering)
return 'model.describe()', dict(model=model)
def download(args):
from phy import download_sample_data
download_sample_data(args.file,
output_dir=args.output_dir,
base=args.base,
)
def traces(args):
from vispy.app import run
from phy.plot.traces import TraceView
from phy.io.h5 import open_h5
from phy.io.traces import read_kwd, read_dat
path = args.file
if path.endswith('.kwd'):
f = open_h5(args.file)
traces = read_kwd(f)
elif path.endswith(('.dat', '.bin')):
if not args.n_channels:
raise ValueError("Please specify `--n-channels`.")
if not args.dtype:
raise ValueError("Please specify `--dtype`.")
if not args.sample_rate:
raise ValueError("Please specify `--sample-rate`.")
n_channels = int(args.n_channels)
dtype = np.dtype(args.dtype)
traces = read_dat(path, dtype=dtype, n_channels=n_channels)
start, end = map(int, args.interval.split(','))
sample_rate = float(args.sample_rate)
start = int(sample_rate * start)
end = int(sample_rate * end)
c = TraceView(keys='interactive')
c.visual.traces = .01 * traces[start:end, ...]
c.show()
run()
return None, None
def detect(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file,
overwrite=args.overwrite,
kwik_path=kwik_path)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
# Create the session with the newly-created .kwik file.
args.file = kwik_path
session = _create_session(args, use_store=False)
return ('session.detect(interval=interval)',
dict(session=session, interval=interval))
def cluster_auto(args):
from phy.utils._misc import _read_python
from phy.session import Session
assert args.file.endswith('.prm')
params = _read_python(args.file)
kwik_path = params['experiment_name'] + '.kwik'
session = Session(kwik_path)
ns = dict(session=session,
clustering=args.clustering,
)
cmd = ('session.cluster(clustering=clustering)')
return (cmd, ns)
def spikesort(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file,
overwrite=args.overwrite,
kwik_path=kwik_path,
)
# Create the session with the newly-created .kwik file.
args.file = kwik_path
session = _create_session(args, use_store=False)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
ns = dict(session=session,
interval=interval,
n_s_clusters=100, # TODO: better handling of KK parameters
)
cmd = ('session.detect(interval=interval); session.cluster();')
return (cmd, ns)
def cluster_manual(args):
session = _create_session(args,
clustering=args.clustering,
use_store=not(args.no_store),
)
cluster_ids = (list(map(int, args.cluster_ids.split(',')))
if args.cluster_ids else None)
session.model.describe()
from phy.gui import start_qt_app
start_qt_app()
gui = session.show_gui(cluster_ids=cluster_ids, show=False)
print("\nPress `ctrl+h` to see the list of keyboard shortcuts.\n")
return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)
#------------------------------------------------------------------------------
# Main functions
#------------------------------------------------------------------------------
def main(args=None):
p = ParserCreator()
if args is None:
args = sys.argv[1:]
elif isinstance(args, string_types):
args = args.split(' ')
args = p.parse(args)
if args is None:
return
if args.profiler or args.line_profiler:
from phy.utils.testing import _enable_profiler, _profile
prof = _enable_profiler(args.line_profiler)
else:
prof = None
import phy
if args.debug:
phy.debug()
# Hide the traceback.
if args.hide_traceback:
def exception_handler(exception_type, exception, traceback):
print("{}: {}".format(exception_type.__name__, exception))
sys.excepthook = exception_handler
# Activate IPython debugger.
if args.pdb:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux',
call_pdb=1,
)
func = args.func
if func is None:
p.parser.print_help()
return
out = func(args)
if not out:
return
cmd, ns = out
if not cmd:
return
requires_qt = ns.pop('requires_qt', False)
requires_vispy = ns.pop('requires_vispy', False)
# Default variables in namespace.
ns.update(phy=phy, path=args.file)
if 'session' in ns:
ns['model'] = ns['session'].model
# Interactive mode with IPython.
if args.ipython:
print("\nStarting IPython...")
from IPython import start_ipython
args_ipy = ["-i", "-c='{}'".format(cmd)]
if requires_qt or requires_vispy:
# Activate Qt event loop integration with Qt.
args_ipy += ["--gui=qt"]
start_ipython(args_ipy, user_ns=ns)
else:
if not prof:
exec_(cmd, {}, ns)
else:
_profile(prof, cmd, {}, ns)
if requires_qt:
# Launch the Qt app.
from phy.gui import run_qt_app
run_qt_app()
elif requires_vispy:
# Launch the VisPy Qt app.
from vispy.app import use_app, run
use_app('pyqt4')
run()
#------------------------------------------------------------------------------
# Entry point
#------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
6,577 | 31b109d992a1b64816f483e870b00c703643f514 | def resolve_data(raw_data, derivatives_prefix):
derivatives = {}
if isinstance(raw_data, dict):
for k, v in raw_data.items():
if isinstance(v, dict):
derivatives.update(resolve_data(v, derivatives_prefix + k + '_'))
elif isinstance(v, list):
derivatives.update(resolve_data(v, derivatives_prefix + k + '_'))
else:
derivatives[derivatives_prefix + k] = v
elif isinstance(raw_data, list):
derivatives[derivatives_prefix + 'cnt'] = len(raw_data)
if len(raw_data) > 1:
if isinstance(raw_data[0], dict):
if raw_data[0].keys() == raw_data[1].keys():
for ke, va in raw_data[0].items():
if isinstance(va, dict):
for r in raw_data:
if r.get(ke) is not None:
derivatives.update(resolve_data(r[ke], derivatives_prefix + ke + '_'))
elif isinstance(va, list):
for r in raw_data:
if r.get(ke) is not None:
derivatives.update(resolve_data(r[ke], derivatives_prefix + ke + '_'))
elif isinstance(va, (float, int, bool)):
derivatives[derivatives_prefix + ke + '_' + 'sum'] = sum([r.get(ke) for r in raw_data if r.get(ke)])
derivatives[derivatives_prefix + ke + '_' + 'avg'] = float(
sum([r.get(ke) for r in raw_data if r.get(ke)])) / len(raw_data)
else:
pass
else:
for li in raw_data:
if isinstance(li, dict):
derivatives.update(resolve_data(li, derivatives_prefix))
elif isinstance(li, list):
derivatives.update(resolve_data(li, derivatives_prefix))
else:
pass
else:
pass
else:
for li in raw_data:
if isinstance(li, dict):
derivatives.update(resolve_data(li, derivatives_prefix))
elif isinstance(li, list):
derivatives.update(resolve_data(li, derivatives_prefix))
else:
pass
else:
derivatives[derivatives_prefix] = raw_data
return derivatives
|
6,578 | 09850f0d3d295170545a6342337e97a0f190989a | import plotly.express as px
import pandas as pd
def fiig(plan):
df = pd.DataFrame(plan)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="РЦ", color='РЦ', facet_row_spacing=0.6,
facet_col_spacing=0.6, opacity=0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],
title='график проектов')
for i, d in enumerate(fig.data):
d.width = df[df['РЦ'] == d.name]['Вес']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
# fig.add_vrect(x0=0.9, x1=2)
# fig.show()
def fig_porc_projects(plan):
df = pd.DataFrame(plan)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Проект", color='РЦ', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')
# for i, d in enumerate(fig.data):
# d.width = df[df['РЦ'] == d.name]['РЦ']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
# fig.add_vrect(x0=0.9, x1=2)
# fig.show()
def fig_podetalno_naproject_rc(plan, proj):
df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Номер", color='РЦ', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')
# for i, d in enumerate(fig.data):
# d.width = df[df['РЦ'] == d.name]['РЦ']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
def fig_podetalno_narc_projects(plan, rc):
filtr = [_ for _ in plan if rc in _['РЦ']]
df = pd.DataFrame(filtr)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Номер", color='Проект', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')
for i, d in enumerate(fig.data):
d.width = df[df['Проект'] == d.name]['Пост']/10 + 0.1
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
|
6,579 | d261efa72e1ab77507a1fd84aa2e462c6969af56 | from django.shortcuts import render, Http404, HttpResponse, redirect
from django.contrib.auth import authenticate, login
from website.form import UserForm
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from website.models import UserProfile
from website.form import UserForm
import pandas as pd
from pandas import DataFrame
from sqlalchemy import create_engine
from django.contrib.auth.decorators import login_required
import sqlite3
import xlrd
import uuid
def df_to_sql_T_1(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns
#读取存在文件夹中的excel
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how="all")
excel_df = excel_df.dropna(axis=1, how="all")
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)
#数据库的读取
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM org_info"#!!!注意sql中没有表格会出错
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['org_full_name'].tolist()
sql_number = len(fund_name_list)
#依次对数据库中的每一行添加一列id
org_id_number = 0
for org_full_name in sql_df['org_full_name'].unique():
org_id_number = org_id_number+1
org_id = 'O'+'0'*(5-len(str(org_id_number)))+str(org_id_number)
with con:
cur = con.cursor()
cur.execute("""UPDATE org_info SET org_id=? WHERE org_full_name=?""", (org_id, org_full_name))
#对excel进行读取
#excel_data = pd.read_excel(filefullpath, sheetname=sheet)
excel_name_list = excel_df['★机构全名'].tolist()
for name in excel_name_list:
if name in fund_name_list:
#提取数据库中的org_full_name为name的id
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM org_info"
sql_df = pd.read_sql(sql, con)
name_dataframe =sql_df[sql_df["org_full_name"] == name]
org_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'org_id']
#把excel的一行变成dataframe,并且加上id,并上传到数据库
commit_data = excel_df[excel_df["★机构全名"] == name]
commit_data.columns = ["org_name", "org_full_name", "reg_code", "reg_time", "found_date", "reg_capital",
"real_capital", "region", "profile", "address", "team", "fund_num",
"is_qualification", "prize", "team_scale", "investment_idea", "master_strategy",
"remark", "asset_mgt_scale", "linkman", "linkman_duty", "linkman_phone",
"linkman_email"]
commit_data["org_id"] = str(org_id)
#把一行表格dataframe提取其中的值
org_name = str(commit_data.loc[commit_data.org_full_name == name, 'org_name'].values[0])
org_full_name = str(name)
reg_code = str(commit_data.loc[commit_data.org_full_name == name, 'reg_code'].values[0])
reg_time = str(commit_data.loc[commit_data.org_full_name == name, 'reg_time'].values[0])
found_date = str(commit_data.loc[commit_data.org_full_name == name, 'found_date'].values[0])
reg_capital = str(commit_data.loc[commit_data.org_full_name == name, 'reg_capital'].values[0])
real_capital = str(commit_data.loc[commit_data.org_full_name == name, 'real_capital'].values[0])
region = str(commit_data.loc[commit_data.org_full_name == name, 'region'].values[0])
profile = str(commit_data.loc[commit_data.org_full_name == name, 'profile'].values[0])
address = str(commit_data.loc[commit_data.org_full_name == name, 'address'].values[0])
team = str(commit_data.loc[commit_data.org_full_name == name, 'org_name'].values[0])
fund_num = str(commit_data.loc[commit_data.org_full_name == name, 'team'].values[0])
is_qualification = str(commit_data.loc[commit_data.org_full_name == name, 'is_qualification'].values[0])
prize = str(commit_data.loc[commit_data.org_full_name == name, 'prize'].values[0])
team_scale = str(commit_data.loc[commit_data.org_full_name == name, 'team_scale'])
investment_idea = str(commit_data.loc[commit_data.org_full_name == name, 'investment_idea'].values[0])
master_strategy = str(commit_data.loc[commit_data.org_full_name == name, 'master_strategy'].values[0])
remark = str(commit_data.loc[commit_data.org_full_name == name, 'remark'].values[0])
asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name == name, 'asset_mgt_scale'].values[0])
linkman = str(commit_data.loc[commit_data.org_full_name == name, 'linkman'].values[0])
linkman_duty = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_duty'].values[0])
linkman_phone = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_phone'].values[0])
linkman_email = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_email'].values[0])
# org_name = str(commit_data.loc[index.last_valid_index(), "org_name"])
with con:
cur = con.cursor()
sql = """UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, \
reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, \
prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, \
linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?"""
l = (org_name, org_full_name, reg_code, reg_time, found_date, reg_capital, real_capital, region, profile,\
address, team, fund_num, is_qualification, prize, team_scale, investment_idea, master_strategy, remark,\
asset_mgt_scale, linkman, linkman_duty, linkman_phone, linkman_email, org_id)
cur.execute(sql, l)
print("if")
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df["★机构全名"] == name]
commit_data.columns = ["org_name", "org_full_name", "reg_code", "reg_time", "found_date", "reg_capital",
"real_capital", "region", "profile", "address", "team", "fund_num",
"is_qualification", "prize", "team_scale", "investment_idea", "master_strategy",
"remark", "asset_mgt_scale", "linkman", "linkman_duty", "linkman_phone",
"linkman_email"]
commit_data.loc[:, "org_id"] = 'O'+'0'*(5-len(str(sql_number)))+str(sql_number)
commit_data.to_sql("org_info", con, if_exists="append", index=False)
print("else")
def df_to_sql_T_2(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns
#读取存在文件夹中的excel
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how="all")
excel_df = excel_df.dropna(axis=1, how="all")
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)
#数据库的读取
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM fund_info"#!!!注意sql中没有表格会出错
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['fund_full_name'].tolist()#list
sql_number = len(fund_name_list)
#依次对数据库中的每一行添加一列id
fund_id_number = 0
for fund_full_name in sql_df['fund_full_name'].unique():
fund_id_number = fund_id_number+1
fund_id = 'F'+'0'*(6-len(str(fund_id_number)))+str(fund_id_number)
with con:
cur = con.cursor()
cur.execute("""UPDATE fund_info SET fund_id=? WHERE fund_full_name=?""", (fund_id, fund_full_name))
#对excel进行读取
#excel_data = pd.read_excel(filefullpath, sheetname=sheet)
excel_name_list = excel_df['★基金全称'].tolist()#list
for name in excel_name_list:
if name in fund_name_list:
#提取数据库中的org_full_name为name的id
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM fund_info"
sql_df = pd.read_sql(sql, con)
name_dataframe =sql_df[sql_df["fund_full_name"] == name]
fund_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'fund_id']
#把excel的一行变成dataframe,并且加上id,并上传到数据库
commit_data = excel_df[excel_df["★基金全称"] == name]
commit_data.columns = ["group", "fund_type_strategy", "reg_code", "foundation_date", "fund_name",
"fund_full_name", "fund_manager", "fund_manager_nominal", "fund_stockbroker",
"fund_custodian", "fund_member", "fund_type_issuance", "fund_type_structure",
"fund_structure", "issue_scale", "asset_scale", "is_main_fund", "fee_pay",
"open_date", "locked_time_limit", "duration", "fee_manage", "fee_pay_remark",
"fee_redeem", "fee_subscription", "fee_trust", "investment_range",
"min_purchase_amount", "min_append_amount", "stop_line", "alert_line",
"manager_participation_scale", "investment_idea", "structure_hierarchy", "remark"]
commit_data["fund_id"] = str(fund_id)
#把一行表格dataframe提取其中的值
group = str(commit_data.loc[commit_data.fund_full_name == name, 'group'].values[0])
fund_type_strategy = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_strategy'].values[0])
reg_code = str(commit_data.loc[commit_data.fund_full_name == name, 'reg_code'].values[0])
foundation_date = str(commit_data.loc[commit_data.fund_full_name == name, 'foundation_date'].values[0])
fund_name = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_name'].values[0])
fund_full_name = str(name)
fund_manager = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_manager'].values[0])
fund_manager_nominal = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_manager_nominal'].values[0])
fund_stockbroker = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_stockbroker'].values[0])
fund_custodian = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_custodian'].values[0])
fund_member = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_member'].values[0])
fund_type_issuance = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_issuance'].values[0])
fund_type_structure = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_structure'].values[0])
fund_structure = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_structure'].values[0])
issue_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'issue_scale'].values[0])
asset_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'asset_scale'].values[0])
is_main_fund = str(commit_data.loc[commit_data.fund_full_name == name, 'is_main_fund'].values[0])
fee_pay = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_pay'].values[0])
open_date = str(commit_data.loc[commit_data.fund_full_name == name, 'open_date'])
locked_time_limit = str(commit_data.loc[commit_data.fund_full_name == name, 'locked_time_limit'].values[0])
duration = str(commit_data.loc[commit_data.fund_full_name == name, 'duration'].values[0])
fee_manage = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_manage'].values[0])
fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_pay_remark'].values[0])
fee_redeem = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_redeem'].values[0])
fee_subscription = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_subscription'].values[0])
fee_trust = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_trust'].values[0])
investment_range = str(commit_data.loc[commit_data.fund_full_name == name, 'investment_range'].values[0])
min_purchase_amount = str(commit_data.loc[commit_data.fund_full_name == name, 'min_purchase_amount'].values[0])
min_append_amount = str(commit_data.loc[commit_data.fund_full_name == name, 'min_append_amount'].values[0])
stop_line = str(commit_data.loc[commit_data.fund_full_name == name, 'stop_line'].values[0])
alert_line = str(commit_data.loc[commit_data.fund_full_name == name, 'alert_line'].values[0])
manager_participation_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'manager_participation_scale'].values[0])
investment_idea = str(commit_data.loc[commit_data.fund_full_name == name, 'investment_idea'].values[0])
structure_hierarchy = str(commit_data.loc[commit_data.fund_full_name == name, 'structure_hierarchy'].values[0])
remark = str(commit_data.loc[commit_data.fund_full_name == name, 'remark'].values[0])
with con:
cur = con.cursor()
sql = """UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?,\
fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?,\
fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?,\
open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?,\
investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, \
investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?"""
l = (group, fund_type_strategy, reg_code, foundation_date, fund_name, fund_full_name, fund_manager, \
fund_manager_nominal, fund_stockbroker, fund_custodian, fund_member, fund_type_issuance, \
fund_type_structure, fund_structure, issue_scale, asset_scale, is_main_fund, fee_pay, open_date, \
locked_time_limit, duration, fee_manage, fee_pay_remark, fee_redeem, fee_subscription, fee_trust, \
investment_range, min_purchase_amount, min_append_amount, stop_line, alert_line, manager_participation_scale, \
investment_idea, structure_hierarchy, remark, fund_id)
cur.execute(sql, l)
print("if")
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df["★基金全称"] == name]
commit_data.columns = ["group", "fund_type_strategy", "reg_code", "foundation_date", "fund_name", "fund_full_name", \
"fund_manager", "fund_manager_nominal", "fund_stockbroker", "fund_custodian", "fund_member", \
"fund_type_issuance", "fund_type_structure", "fund_structure", "issue_scale", "asset_scale", \
"is_main_fund", "fee_pay", "open_date", "locked_time_limit", "duration", "fee_manage", \
"fee_pay_remark", "fee_redeem", "fee_subscription", "fee_trust", "investment_range", \
"min_purchase_amount", "min_append_amount", "stop_line", "alert_line", "manager_participation_scale", \
"investment_idea", "structure_hierarchy", "remark"]
commit_data.loc[:, "fund_id"] = 'F'+'0'*(6-len(str(sql_number)))+str(sql_number)
commit_data.to_sql("fund_info", con, if_exists="append", index=False)
print("else")
def df_to_sql_T_3(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns
#读取存在文件夹中的excel
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how="all")
excel_df = excel_df.dropna(axis=1, how="all")
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]#把【人员简介】的这一行变成columns这一列
excel_df = excel_df.drop(row_name, axis=0, inplace=False)#去除【人员简介】这一行
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★姓名'], inplace=True)
#数据库的读取
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM manager_info"#!!!注意sql中没有表格会出错
sql_df = pd.read_sql(sql, con)
user_list = sql_df['user_name'].tolist()#list
sql_number = len(user_list)
#依次对数据库中的每一行添加一列id
user_id_number = 0
for user_name in sql_df['user_name'].unique():
user_id_number = user_id_number+1
user_id = 'M'+'0'*(5-len(str(user_id_number)))+str(user_id_number)
with con:
cur = con.cursor()
cur.execute("""UPDATE manager_info SET user_id=? WHERE user_name=?""", (user_id, user_name))
#对excel进行读取
#excel_data = pd.read_excel(filefullpath, sheetname=sheet)
excel_name_list = excel_df['★姓名'].tolist()#list
for name in excel_name_list:
if name in user_list:
#提取数据库中的user_name为name的id
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM manager_info"
sql_df = pd.read_sql(sql, con)
name_dataframe =sql_df[sql_df["user_name"] == name]
user_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'user_id']#loc到最后一个有效的index和fund_id,取出值
#把excel的一行变成dataframe,并且加上id,并上传到数据库
commit_data = excel_df[excel_df["★姓名"] == name]
commit_data.columns = ["user_name", "sex", "org_name", "introduction", "photo", "entry_date",
"investment_years", "education", "duty", "qualification", "background", "is_fund_qualification",
"is_core_member", "resume", "max_asset_mgt_scale", "prize", "remark"]
commit_data["user_id"] = str(user_id)#不需要
#把一行表格dataframe提取其中的值
user_name = str(name)
sex = str(commit_data.loc[commit_data.user_name == name, 'sex'].values[0])
org_name = str(commit_data.loc[commit_data.user_name == name, 'org_name'].values[0])
introduction = str(commit_data.loc[commit_data.user_name == name, 'introduction'].values[0])
photo = str(commit_data.loc[commit_data.user_name == name, 'photo'].values[0])
entry_date = str(commit_data.loc[commit_data.user_name == name, 'entry_date'].values[0])
investment_years = str(commit_data.loc[commit_data.user_name == name, 'investment_years'].values[0])
education = str(commit_data.loc[commit_data.user_name == name, 'education'].values[0])
duty = str(commit_data.loc[commit_data.user_name == name, 'duty'].values[0])
qualification = str(commit_data.loc[commit_data.user_name == name, 'qualification'].values[0])
background = str(commit_data.loc[commit_data.user_name == name, 'background'].values[0])
is_fund_qualification = str(commit_data.loc[commit_data.user_name == name, 'is_fund_qualification'].values[0])
is_core_member = str(commit_data.loc[commit_data.user_name == name, 'is_core_member'].values[0])
resume = str(commit_data.loc[commit_data.user_name == name, 'resume'].values[0])
max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name == name, 'max_asset_mgt_scale'].values[0])
prize = str(commit_data.loc[commit_data.user_name == name, 'prize'].values[0])
remark = str(commit_data.loc[commit_data.user_name == name, 'remark'].values[0])
with con:
cur = con.cursor()
sql = """UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, \
entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, \
is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?"""
l = (user_name, sex, org_name, introduction, photo, entry_date, investment_years, education, \
duty, qualification, background, is_fund_qualification, is_core_member, resume, max_asset_mgt_scale, \
prize, remark, user_id)
cur.execute(sql, l)
print("if")
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df["★姓名"] == name]
commit_data.columns = ["user_name", "sex", "org_name", "introduction", "photo", "entry_date", \
"investment_years", "education", "duty", "qualification", "background", \
"is_fund_qualification", "is_core_member", "resume", "max_asset_mgt_scale", "prize", \
"remark"]
commit_data.loc[:, "user_id"] = 'M'+'0'*(5-len(str(sql_number)))+str(sql_number)
commit_data.to_sql("manager_info", con, if_exists="append", index=False)
print("else")
def df_to_sql_4(filefullpath, sheet, row_name):
#读取处理文件夹中的excel
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how="all")
#excel_df = excel_df.dropna(axis=1, how="all")
excel_df[row_name] = excel_df[row_name].ffill()
excel_df.index = range(len(excel_df))
print(excel_df)
#数据库的读取
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM fund_nav_data"
sql_df = pd.read_sql(sql, con)
name_list = sql_df['fund_name'].tolist()
date_list = sql_df['statistic_date'].tolist()
print("name_list")
#print(type(name_list[0]))
print(name_list)
print("date_list")
#print(type(date_list[0]))
print(date_list)
#从fund_info数据表中提取出fund_id,加入fund_nav_data数据表中的fund_id
for fund_name in sql_df['fund_name'].unique():
sql = "SELECT * FROM fund_info"
fund_info_sql_df = pd.read_sql(sql, con)
fund_id = fund_info_sql_df.loc[fund_info_sql_df.fund_name == fund_name, 'fund_id'].values[0]
with con:
cur = con.cursor()
cur.execute("""UPDATE fund_nav_data SET fund_id=? WHERE fund_name=?""", (fund_id, fund_name))
#对excel_df进行读取
excel_name_list = excel_df['基金简称'].tolist()
excel_name_list = list(set(excel_name_list))
print("excel_name_list")
#print(type(excel_name_list[0]))
print(excel_name_list)
for name in excel_name_list:
statistic_date_series = excel_df.loc[excel_df['基金简称'] == name, '净值日期']
excel_date_list = statistic_date_series.tolist()
excel_date_list = [str(i) for i in excel_date_list]
print("excel_date_list")
#print(type(excel_date_list[0]))
print(excel_date_list)
for date in excel_date_list:
if name in name_list and date in date_list:
commit_data = excel_df[excel_df['基金简称'] == name]
print(commit_data.columns)
commit_data.columns = ["fund_name", "statistic_date", "nav", "added_nav", "total_share", "total_asset", "total_nav", "is_split", "is_open_date", "split_ratio", "after_tax_bonus"]
commit_data["fund_id"] = str(fund_id)
fund_name = name
statistic_date = str(date)
nav = str(commit_data.loc[commit_data.statistic_date == date, 'nav'].values[0])
added_nav = str(commit_data.loc[commit_data.statistic_date == date, 'added_nav'].values[0])
total_share = str(commit_data.loc[commit_data.statistic_date == date, 'total_share'].values[0])
total_asset = str(commit_data.loc[commit_data.statistic_date == date, 'total_asset'].values[0])
total_nav = str(commit_data.loc[commit_data.statistic_date == date, 'total_nav'].values[0])
is_split = str(commit_data.loc[commit_data.statistic_date == date, 'is_split'].values[0])
is_open_date = str(commit_data.loc[commit_data.statistic_date == date, 'is_open_date'].values[0])
split_ratio = str(commit_data.loc[commit_data.statistic_date == date, 'split_ratio'].values[0])
after_tax_bonus = str(commit_data.loc[commit_data.statistic_date == date, 'after_tax_bonus'].values[0])
with con:
cur = con.cursor()
sql = """UPDATE fund_nav_data SET nav=?, added_nav=?, total_share=?, total_asset=?, total_nav=?, is_split=?, is_open_date=?, split_ratio=?, after_tax_bonus=? WHERE fund_name=? AND statistic_date=?"""
l = (nav, added_nav, total_share, total_asset, total_nav, is_split, is_open_date, split_ratio, after_tax_bonus, fund_name, statistic_date)
cur.execute(sql, l)
print("if")
else:
commit_data = excel_df[(excel_df["基金简称"] == name)&(excel_df["净值日期"] == date)]
commit_data.columns = ["fund_name", "statistic_date", "nav", "added_nav", "total_share", "total_asset", "total_nav", "is_split", "is_open_date", "split_ratio", "after_tax_bonus"]
commit_data.to_sql("fund_nav_data", con, if_exists="append", index=False)
print("else")
def listing(request):
context = {}
if request.method == "POST":
uf = UserForm(request.POST, request.FILES)
if request.user.username and uf.is_valid():
#username = uf.cleaned_data['username']
user_upload_file = uf.cleaned_data['user_upload_file']
#写入数据库
profile = UserProfile()
profile.username = request.user.username
profile.user_upload_file = user_upload_file
profile.save()
file_name = request.FILES.get('user_upload_file').name
path = "C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\upload\\upload\\"
#C:\Users\K\Desktop\excel - upload - sqlite3\excel - upload - sqlite3\mins\upload\upload\华泰大赛参赛私募基金数据填报模板.xlsx
filefullpath = path + file_name
#print(filefullpath)
if user_upload_file:
b = xlrd.open_workbook(filefullpath)
#count = len(b.sheets())#不需要,sheet数都是固定的
for sheet in range(1, 5):
if sheet == 1:
row_name = "公司资料简介"
df_to_sql_T_1(filefullpath, sheet, row_name)
if sheet == 2:
row_name = "基金简介"
df_to_sql_T_2(filefullpath, sheet, row_name)
if sheet == 3:
row_name = "人员简介"
df_to_sql_T_3(filefullpath, sheet, row_name)
if sheet == 4:
row_name = "基金简称"
df_to_sql_4(filefullpath, sheet, row_name)
return HttpResponse('upload ok!')
else:
return redirect(to='login')
else:
uf = UserForm()
context['uf'] = uf
return render(request, 'website/templates/listing.html', context)
def index_login(request):
context = {}
if request.method == "GET":
form = AuthenticationForm
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
return redirect(to='list')
context['form'] = form
return render(request, 'register_login.html', context)
def index_register(request):
context = {}
if request.method == 'GET':
form = UserCreationForm
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect(to='login')
context['form'] = form
return render(request, 'register_login.html', context) |
6,580 | f8e6f6e1be6c4ea306b7770c918b97808a0765b2 | import random
import time
import unittest
from old import dict_groupby
class TestDictGroupBy(unittest.TestCase):
def setUp(self):
random.seed(0)
self.sut = dict_groupby
def generate_transaction(self):
return {
'transaction_type': random.choice(['a', 'b', 'c']),
'outstanding': random.randint(0, 100)
}
def generate_facility(self):
num_transactions = random.randint(1, 3)
transactions = {}
outstanding = 0
for i in range(num_transactions):
transactions[i] = self.generate_transaction()
outstanding += transactions[i]['outstanding']
return {
'facility_type': random.choice(['a', 'b', 'c']),
'outstanding': outstanding,
'transactions': transactions
}
def generate_facilities(self, num):
out = {}
for i in range(num):
out[i] = self.generate_facility()
return out
def generate_record(self):
return {
'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.choice(['a', 'b', 'c']),
'gcol3': random.choice(['a', 'b', 'c']), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),
'vcol3': random.randint(0, 2)
}
def test_hierarchical_groupby(self):
input_set = self.generate_facilities(4)
group_columns = ['facility_type', {'transactions': 'transaction_type'}]
print(input_set)
self.sut.DictGroupBy(input_set, group_columns)
def test_groupby_and_sum_speed(self):
data = {}
for i in range(100000):
data[i] = self.generate_record()
print('Generated data.')
group_columns = ['gcol1', 'gcol2', 'gcol3']
t0 = time.time()
gb = dict_groupby.GroupByObj(data, group_columns)
t1 = time.time()
out = gb.sum()
tf = time.time()
# print(out)
print(t1 - t0, tf - t1, tf - t0)
# df = pd.DataFrame(data).T
# t0 = time.time()
# df.groupby(group_columns).sum()
# tf = time.time()
# # print(out)
# print(tf - t0) |
6,581 | 0229783467b8bcd0361baf6be07e3261f34220c7 |
from numpy.testing import assert_almost_equal
from fastats.maths.norm_cdf import norm_cdf
def test_norm_cdf_basic_sanity():
assert_almost_equal(0.5, norm_cdf(0.0, 0, 1))
def test_norm_cdf_dartmouth():
"""
Examples taken from:
https://math.dartmouth.edu/archive/m20f12/public_html/matlabnormal
stored in literature directory as dartmouth_normcdf_norminv.pdf
"""
assert_almost_equal(0.0062, norm_cdf(90, 100, 4), decimal=4)
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
6,582 | f14a8d0d51f0baefe20b2699ffa82112dad9c38f | no_list = {"tor:", "getblocktemplate", " ping ", " pong "}
for i in range(1, 5):
with open("Desktop/"+str(i)+".log", "r") as r:
with open("Desktop/"+str(i)+"-clean.log", "a+") as w:
for line in r:
if not any(s in line for s in no_list):
w.write(line)
|
6,583 | aa6464c53176be9d89c6c06997001da2b3ee1e5c | from django import forms
from .models import Diagnosis, TODOItem
class DiagnosisForm(forms.ModelForm):
class Meta:
model = Diagnosis
fields = ['name', 'Rostered_physician', 'condition', 'details', 'date_of_diagnosis', 'content']
class TODOItemForm(forms.ModelForm):
class Meta:
model = TODOItem
fields = ['job', 'due_date', 'medication_details', 'completed']
|
6,584 | 2fdbf418b5cec50ee6568897e0e749681efeef6b | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'find_result_window.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FindResultWindow(object):
def setupUi(self, FindResultWindow):
FindResultWindow.setObjectName("FindResultWindow")
FindResultWindow.resize(801, 546)
self.centralwidget = QtWidgets.QWidget(FindResultWindow)
self.centralwidget.setObjectName("centralwidget")
self.btnEdit = QtWidgets.QPushButton(self.centralwidget)
self.btnEdit.setEnabled(False)
self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))
self.btnEdit.setCheckable(False)
self.btnEdit.setAutoDefault(False)
self.btnEdit.setObjectName("btnEdit")
self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)
self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))
self.listWidgetFindResult.setObjectName("listWidgetFindResult")
FindResultWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(FindResultWindow)
QtCore.QMetaObject.connectSlotsByName(FindResultWindow)
def retranslateUi(self, FindResultWindow):
_translate = QtCore.QCoreApplication.translate
FindResultWindow.setWindowTitle(_translate("FindResultWindow", "Информация о приборах"))
self.btnEdit.setText(_translate("FindResultWindow", "Изменить данные"))
|
6,585 | eb891341488e125ae8c043788d7264fff4018614 | #!/usr/bin/env python
from http.client import HTTPConnection
import pytest
from circuits.web import Controller
from circuits.web.client import Client, request
from .helpers import urlopen
class Root(Controller):
def index(self):
return "Hello World!"
def request_body(self):
return self.request.body.read()
def response_body(self):
return "ä"
def request_headers(self):
return self.request.headers["A"]
def response_headers(self):
self.response.headers["A"] = "ä"
return "ä"
def argument(self, arg):
return arg
def test_index(webapp):
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b"Hello World!"
@pytest.mark.parametrize('body', [
"ä".encode(),
"ä".encode('iso8859-1'),
])
def test_request_body(webapp, body):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
connection.request("POST", "/request_body", body)
response = connection.getresponse()
assert response.status == 200
assert response.reason == "OK"
s = response.read()
assert s == body
connection.close()
def test_response_body(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
connection.request("GET", "/response_body")
response = connection.getresponse()
assert response.status == 200
assert response.reason == "OK"
s = response.read()
assert s == "ä".encode()
connection.close()
def test_request_headers(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
body = b""
headers = {"A": "ä"}
connection.request("GET", "/request_headers", body, headers)
response = connection.getresponse()
assert response.status == 200
assert response.reason == "OK"
s = response.read()
assert s == "ä".encode()
connection.close()
def test_response_headers(webapp):
client = Client()
client.start()
client.fire(
request(
"GET",
"http://%s:%s/response_headers" % (
webapp.server.host, webapp.server.port,
),
),
)
while client.response is None:
pass
assert client.response.status == 200
assert client.response.reason == 'OK'
s = client.response.read()
a = client.response.headers.get('A')
assert a == "ä"
assert s == "ä".encode()
def test_argument(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
data = 'arg=%E2%86%92'
connection.request("POST", "/argument", data, {"Content-type": "application/x-www-form-urlencoded"})
response = connection.getresponse()
assert response.status == 200
assert response.reason == "OK"
s = response.read()
assert s.decode('utf-8') == '\u2192'
connection.close()
|
6,586 | c5b40b373953a2375eeca453a65c49bdbb8715f1 | '''import math
x = 5
print("sqrt of 5 is", math.sqrt(64))
str1 = "bollywood"
str2 = 'ody'
if str2 in str1:
print("String found")
else:
print("String not found")
print(10+20)'''
#try:
#block of code
#except Exception l:
#block of code
#else:
#this code executes if except block is executed
try:
fh = open("testfile.txt", "w")
fh.write("This is my test file for exception handling! !")
except IOError:
print("Error: can\'t find file or read data")
else:
print("written content in the file successfully")
fh = open("testfile.txt", "r+")
print(fh.read())
fh.close()
print(fh.closed)
try:
fileptr = open("file.txt", "w")
try:
fileptr.write("Hi I am good")
finally:
fileptr.close()
print("file.closed")
except:
print("Error")
else:
print("inside else block")
try:
age = int(input("Enter the age?"))
if age<18:
raise ValueError
else:
print("the age is valid")
except ValueError:
print("The age is not valid")
|
6,587 | bd0530b6f3f7b1a5d72a5b11803d5bb82f85105d | import numpy as np
import math
a = [
[0.54, -0.04, 0.10],
[-0.04, 0.50, 0.12],
[0.10, 0.12, 0.71]
]
b = [0.33, -0.05, 0.28]
# Метод Гаусса
def gauss(left, right, prec=3):
# Создаем расширенную матрицу
arr = np.concatenate((np.array(left), np.array([right]).T), axis=1)
print('\nИсходная матрица:')
print(arr)
# Проверка совместности
if np.linalg.matrix_rank(left) != np.linalg.matrix_rank(arr):
return 'Решений нет!'
# Приводим к ступенчатому виду
for j in range(len(arr)):
# Находим ведущий элемент
lead = j
for i in range(j, len(arr)):
if (arr[i][j] > arr[lead][j] and arr[i][j] != 0):
lead = i
# Если все элементы строки - 0, пропускаем итерацию
if arr[lead][j] == 0:
continue
# Выносим строку с ведущим элементом вверх
arr[[j, lead]] = arr[[lead, j]]
# Обнуляем нижестоящие элементы
arr[j] = arr[j] / arr[j][j]
for i in range(j + 1, len(arr)):
arr[i] = arr[i] - arr[j] * arr[i][j]
print('\nШаг ', j)
print(arr)
# Приводим матрицу к единичной
for j in reversed(range(len(arr))):
for i in reversed(range(j)):
arr[i] = arr[i] - arr[j] * arr[i][j]
print('\nМатрица в единичном виде')
print(arr)
# Формируем и возвращаем результат
answer = {('x' + str(i + 1))
: format(arr[:, -1][i], f'.{prec}f') for i in range(len(arr))}
return answer
def norm_1(matrix):
data = np.array(matrix)
return max([np.sum(np.absolute(data[i])) for i in range(len(data))])
def norm_2(matrix):
data = np.array(matrix).T
data = np.array(data)
return max([np.sum(np.absolute(data[i])) for i in range(len(data))])
def norm_3(matrix):
data = np.square(np.array(matrix).flatten())
return math.sqrt(np.sum(data))
def converges(matrix):
return norm_1(matrix) < 1 or norm_2(matrix) < 1 or norm_3(matrix) < 1
# Метод простой итерации
def iteration(left, right, eps=0.0001, prec=5):
# Формируем матрицу Альфа
alpha = [[(-left[i][j] / left[i][i]) if (i != j)
else 0 for j in range(len(left))] for i in range(len(left[0]))]
# Формируем вектор Бета
beta = np.array([right[i] / left[i][i] for i in range(len(left))])
# Задаем текущую точность
norm_alpha = min(norm_1(alpha), norm_2(alpha), norm_3(alpha))
norm_beta = norm_1(beta)
cur_eps = norm_alpha / (1 - norm_alpha) * norm_beta
# Если решение сходится
if converges(alpha):
# Выбираем за начальное приближение вектор Бэта
x = np.copy(beta)
it = 0
# Выходим из цикла при достижении указанной точности
while cur_eps > eps:
# Запоминаем предыдущее значение
prev_x = np.copy(x)
# Считаем следующее приблеженное значение
x = np.dot(alpha, prev_x) + beta
# Считаем точность
cur_eps = cur_eps * norm_alpha
it += 1
print('Итерация', it, ': X =', x)
# Формируем и возвращаем результат
answer = {('x' + str(i + 1))
: format(x[i], f'.{prec}f') for i in range(len(x))}
return answer
# Если решение не сходится - ошибка
else:
return 'Решение не сходится!'
print('Метод Гаусса')
res = gauss(a, b, prec=5)
print('Решение:', res)
print('\nМетод простой итерации')
res = iteration(a, b, eps=0.01, prec=5)
print('Решение:', res)
|
6,588 | 68f8b301d86659f9d76de443b0afe93fd7f7e8c2 | # getting a sample of data to parse for the keys of the players
import requests
import xml.etree.ElementTree as ET
currentPlayerInfoUrl="http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16"
r=requests.get(currentPlayerInfoUrl)
if r.status_code == requests.codes.ok:
with open('currentPlayerDump.json','w') as f:
for line in r.text:
f.write(line)
|
6,589 | 38f6700b283bdc68a0271cb3ec397ce72aa2de3c | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: filecmp
import os, stat
from itertools import ifilter, ifilterfalse, imap, izip
__all__ = [
'cmp', 'dircmp', 'cmpfiles']
_cache = {}
BUFSIZE = 8192
def cmp(f1, f2, shallow=1):
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
outcome = _cache.get((f1, f2, s1, s2))
if outcome is None:
outcome = _do_cmp(f1, f2)
if len(_cache) > 100:
_cache.clear()
_cache[(f1, f2, s1, s2)] = outcome
return outcome
def _sig(st):
return (
stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
with open(f1, 'rb') as (fp1):
with open(f2, 'rb') as (fp2):
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
class dircmp:
def __init__(self, a, b, ignore=None, hide=None):
self.left = a
self.right = b
if hide is None:
self.hide = [
os.curdir, os.pardir]
else:
self.hide = hide
if ignore is None:
self.ignore = [
'RCS', 'CVS', 'tags']
else:
self.ignore = ignore
return
def phase0(self):
self.left_list = _filter(os.listdir(self.left), self.hide + self.ignore)
self.right_list = _filter(os.listdir(self.right), self.hide + self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self):
a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))
self.common = map(a.__getitem__, ifilter(b.__contains__, a))
self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))
self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))
def phase2(self):
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except os.error as why:
ok = 0
try:
b_stat = os.stat(b_path)
except os.error as why:
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self):
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self):
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self):
self.phase4()
for sd in self.subdirs.itervalues():
sd.phase4_closure()
def report(self):
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self):
self.report()
for sd in self.subdirs.itervalues():
print
sd.report()
def report_full_closure(self):
self.report()
for sd in self.subdirs.itervalues():
print
sd.report_full_closure()
methodmap = dict(subdirs=phase4, same_files=phase3, diff_files=phase3, funny_files=phase3, common_dirs=phase2, common_files=phase2, common_funny=phase2, common=phase1, left_only=phase1, right_only=phase1, left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError, attr
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=1):
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except (os.error, IOError):
return 2
def _filter(flist, skip):
return list(ifilterfalse(skip.__contains__, flist))
def demo():
import sys, getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
return
if __name__ == '__main__':
demo() |
6,590 | ac19ae96d8262cadd43314c29198fccbc008c1b5 | #!/usr/bin/env python
from __future__ import print_function, division, unicode_literals
import os
import sys
import json
import logging
import tempfile
import itertools
import traceback
import subprocess as sp
from os.path import basename
from datetime import datetime
from argparse import ArgumentParser, FileType
PREPROC_CMDS = {
'exon': "awk '$3 == \"exon\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"exon\";print}}' > {output}",
'gene': "awk '$3 == \"gene\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"gene\";print}}' > {output}",
'intron': "subtractBed -a {input[0]} -b {input[1]} | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF)=\"intron\";print}}' > {output}",
'intergenic': "complementBed -i {input[0]} -g <(cut -f 1-2 {input[1]} | sort -k1,1) | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"intergenic\";print}}' > {output}"
}
def strfdelta(tdelta, fmt):
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
def preprocess(element, inputs=None):
'''element can be one of <gene> <exon> <intron> <intergenic>'''
log = logging.getLogger('gencov')
element_bed = tempfile.mkstemp(suffix='.bed')[1]
if not inputs:
inputs = [ args.annotation ]
else:
inputs = inputs[element]
command = PREPROC_CMDS[element].format(input=inputs, output=element_bed)
log.debug(command)
proc = sp.Popen(command, shell=True, executable='/bin/bash', stderr=sp.PIPE)
err_msg = proc.communicate()[1]
if err_msg:
raise IOError(err_msg)
log.info("%s preprocessed" % element.title())
return element_bed
def gtf_processing(genome=None, prefix='gencov'):
"""Annotation preprocessing. Provide a bed file with the
following elements:
- projected exons
- projected genes
- introns
- integenic regions
"""
all_bed = prefix + ".all.bed"
if not os.path.exists(all_bed) or os.stat(all_bed).st_size == 0:
log.info("Preprocessing annotation...")
features = ('exon', 'gene', 'intron', 'intergenic')
merged_exons, merged_genes = map(preprocess, features[:2])
ins = {
'intron': [merged_genes, merged_exons],
'intergenic': [merged_genes, genome]
}
intron_bed, intergenic_bed = map(preprocess, features[2:], [ins, ins])
log.info("Concatenate bed files for all elements...")
with open(all_bed, 'w') as out_bed:
cat_all(merged_exons, merged_genes, intron_bed, intergenic_bed, out_bed=out_bed)
for f in (merged_exons, merged_genes, intron_bed, intergenic_bed):
os.remove(f)
return all_bed
def cat_all(*args, **kwargs):
out_bed = kwargs.get('out_bed', sys.stdout)
for bed in args:
print(open(bed,'r').read(), end='', file=out_bed)
def get_chromosomes(genome_file):
with open(genome_file) as genome:
chrs = [l.split()[0] for l in genome]
return chrs
def process_bam(bam, all_elements, chrs=None, all_reads=False):
if not os.path.exists(bam):
raise IOError("Fail to open {0!r} for reading".format(bam))
bai = "{0}.bai".format(bam)
if chrs and not os.path.exists(bai):
log.info("Indexing {0}...".format(bam))
sp.call('samtools index {0}'.format(bam), shell=True)
log.info('Processing {0}...'.format(bam))
command = "samtools view -u"
sam_filter = 4
if not all_reads:
sam_filter += 256
command += " -F {0} {1}".format(str(sam_filter), bam)
if chrs:
command += " {0}".format(" ".join(chrs))
command = "{0} | bamToBed -i stdin -tag NH -bed12 | intersectBed -a stdin -b {1} -split -wao".format(command, all_elements)
log.debug(command)
return sp.Popen(command, shell=True, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=1)
def update_counts(element, tot_counts, cont_counts, split_counts, is_split):
elem='total'
tot_counts[elem] = tot_counts.get(elem,0) + 1
if is_split:
split_counts['total'] = split_counts.get('total',0) + 1
if len(element) > 1:
if len(set(element)) == 1:
elem = element[0]
else:
if 'intergenic' in element:
elem = 'others'
else:
elem = 'exonic_intronic'
else:
elem = element[0]
split_counts[elem] = split_counts.get(elem, 0) + 1
else:
cont_counts['total'] = cont_counts.get('total', 0) + 1
if len(element) > 1:
if 'intergenic' in element:
elem = 'others'
else:
elem = 'exonic_intronic'
else:
elem = element[0]
cont_counts[elem] = cont_counts.get(elem, 0) + 1
def count_features(bed, uniq=False):
# Initialize
n_skipped = {}
newRead = False # keep track of different reads
prev_rid = None # read id of the previous read
is_split = False # check if current read is a split
element = [] # list with all elements intersecting the read
cont_counts = {} # Continuous read counts
split_counts = {} # Split read counts
tot_counts = {} # Total number of reads
o = bed.stdout
log.info("Compute genomic coverage...")
# Iterate
while True:
try:
line = o.next()
if not line:
n_skipped['empty'] = n_skipped.get('gene', 0) + 1
continue
if 'gene' in line:
n_skipped['gene'] = n_skipped.get('gene', 0) + 1
continue
rchr, rstart, rend, rid, rflag, rstrand, rtstart, rtend, rrgb, rbcount, rbsizes, rbstarts, achr, astart, aend, ael, covg = line.strip().split("\t")
if uniq and int(rflag) != 1:
n_skipped['non-uniq'] = n_skipped.get('non-uniq', 0) + 1
continue
newRead = (rid != prev_rid)
if (newRead) and prev_rid!=None:
update_counts(element, tot_counts, cont_counts, split_counts, is_split)
# Re-Initialize the counters
element = []
element.append(ael)
prev_rid = rid
is_split = int(rbcount) > 1
except StopIteration:
update_counts(element, tot_counts, cont_counts, split_counts, is_split)
break
for k,v in n_skipped.iteritems():
log.info("Skipped {1} {0} lines".format(k, v))
return (tot_counts, cont_counts, split_counts)
def write_output(stats, out, output_format='tsv', json_indent=4):
if not args.ID:
args.ID = basename(args.bam)
if output_format == 'tsv':
for k, v in stats.iteritems():
for k1, v1 in v.iteritems():
line_array = [args.ID, k, str(k1), str(v1)]
out.write("\t".join(line_array)+"\n")
elif output_format == 'json':
out.write('Total reads: {0}\n'.format(json.dumps(stats['total'], indent=json_indent)))
out.write('Continuous reads: {0}\n'.format(json.dumps(stats['continuous'], indent=json_indent)))
out.write('Split reads: {0}\n'.format(json.dumps(stats['split'], indent=json_indent)))
def main(args):
bn_bam = os.path.basename(args.bam).rsplit(".", 1)[0]
bn_gtf = os.path.basename(args.annotation).rsplit(".", 1)[0]
start = datetime.now()
all_elements = gtf_processing(genome=args.genome, prefix=bn_bam + "." + bn_gtf)
chrs = None if args.all_chrs else get_chromosomes(args.genome)
if args.uniq:
args.all_reads = False
bed = process_bam(args.bam, all_elements, chrs=chrs, all_reads=args.all_reads)
read_type = "UNIQ" if args.uniq else "ALL" if args.all_reads else "PRIMARY"
chroms = ", ".join(chrs) if chrs else "ALL"
log.info("Chromosomes: {0}".format(str(chroms)))
log.info("Mapped reads: {0}".format(str(read_type)))
tot, cont, split = count_features(bed, uniq=args.uniq)
stats_summary = {"total" : tot, "continuous" : cont, "split" : split}
write_output(stats_summary, args.output, output_format=args.output_format)
end = datetime.now() - start
log.info('DONE ({0})'.format(strfdelta(end, "{hours}h{minutes}m{seconds}s")))
if not args.keep:
os.remove(all_elements)
def parse_arguments(argv):
""" Parsing arguments """
parser = ArgumentParser(argv, description = "Count the number of reads in genomic regions. NOTE: SAMtools and BEDtools must be installed")
parser.add_argument("-a", "--annotation", type=str, help="gtf with all elements (genes, transcripts and exons)", required=True)
parser.add_argument("-g", "--genome", type=str, help="genome chromosome sizes", required=True)
parser.add_argument("-b", "--bam", type=str, help="bam file", required=True)
parser.add_argument("-o", "--output", type=FileType('w'), default=sys.stdout, help="output file name")
parser.add_argument("-I", "--ID", type=str, help="the ID of the experiment, from which the bam comes from")
parser.add_argument("--keep", dest='keep', help="Do not delete the temporary files generated during the run", action='store_true', default=False)
parser.add_argument("--uniq", dest='uniq', action='store_true', help="Only use uniquely mapped reads", default=False)
parser.add_argument("--loglevel", dest='loglevel', help="Set the loglevel", default="info")
parser.add_argument("--all-reads", dest='all_reads', action='store_true', help="Use all reads from the BAM file. Default: use primary alignments only ('samtools view -F 260')", default=False)
parser.add_argument("--output-format", dest='output_format', help="Set the output format", default="tsv")
parser.add_argument("--all-chromosomes", dest='all_chrs', action='store_true', help="Use all chromosomes from the BAM file header. Default: use only chromosomes in the genome index file.", default=False)
return parser.parse_args()
def setup_logger():
""" Logging setup """
log = logging.getLogger("gencov")
log.setLevel(logging.getLevelName(args.loglevel.upper()))
ch = logging.StreamHandler()
ch.setLevel = log.level
fmt = logging.Formatter('%(asctime)s - %(message)s', '%Y-%m-%d %H:%M:%S')
ch.setFormatter(fmt)
log.addHandler(ch)
return log
if __name__ == "__main__":
"""
Given a bam file, compute the read coverage for different genomic regions:
- exons
- introns
- exon-intron junctions
- intergenic
*** ONLY PRIMARY alignments are used ***
"""
try:
args = parse_arguments(sys.argv)
log = setup_logger()
main(args)
exit(0)
except Exception,err:
log.error("Error:")
errinfo = traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)
log.error("".join(errinfo))
exit(1)
|
6,591 | 501b8a9307a1fd65a5f36029f4df59bbe11d881a | from LAMARCK_ML.data_util import ProtoSerializable
class NEADone(Exception):
pass
class NoSelectionMethod(Exception):
pass
class NoMetric(Exception):
pass
class NoReproductionMethod(Exception):
pass
class NoReplaceMethod(Exception):
pass
class ModelInterface(ProtoSerializable):
def reset(self):
raise NotImplementedError()
pass
def run(self):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
@property
def abstract_timestamp(self):
raise NotImplementedError()
def state_stream(self):
raise NotImplementedError()
def from_state_stream(self, stream):
raise NotImplementedError()
pass
class ModellUtil(object):
def __init__(self, **kwargs):
super(ModellUtil, self).__init__()
|
6,592 | 37804c92b69d366cc1774335b6a2295dfd5b98f3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import codecs
import Levenshtein
import logging
import random
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
import time
from sklearn.model_selection import KFold
import numpy as np
import scipy.io as scio
from matplotlib import pyplot as plt
logging.basicConfig(level=logging.INFO)
user_file = open('groundtruth.txt')
user_gp = user_file.readlines()
user_file.close()
same_line_dict = {}
for items in user_gp:
users = items.strip().split()
same_line_dict.update({x: users for x in users})
# print(same_line_dict)
info_file = codecs.open('new_posts.txt', 'r', 'utf-8')
info_data = info_file.readlines()
info_file.close()
info_dict = {}
for line in info_data:
tmp_str = line.strip()
# print(tmp_str)
try:
tmp_dict = json.loads(tmp_str)
k = list(tmp_dict.keys())
# print(k)
v = tmp_dict[k[0]]
info_dict.update({k[0]: v})
except:
logging.warning('Invalid Data!')
continue
valid_users = list(info_dict.keys())
user_num = len(valid_users)
print(user_num)
flw_file = open('new_followings.txt')
flw_data = flw_file.readlines()
flw_file.close()
flw_dict = {}
for lines in flw_data:
items = lines.strip().split()
flw_dict[items[0]] = items[2:]
valid_flw = list(flw_dict.keys())
print(len(flw_dict))
def gen_label(uid1, uid2):
if same_line_dict[uid1].__contains__(uid2) and same_line_dict[uid2].__contains__(uid1):
return '1'
else:
return '-1'
info_keys = ['text', 'textLength', 'source', 'id', 'screen_name',
'statuses_count', 'verified', 'verified_type',
'description', 'gender', 'urank', 'followers_count',
'follow_count', 'reposts_count', 'comments_count',
'attitudes_count', 'isLongText']
def get_info(uid):
if info_dict[uid] == []:
return False, {}
tdict = {
'text': '',
'textLength': 0,
'source': '',
'id': '',
'screen_name': '',
'statuses_count': 0,
'verified': False,
'verified_type': -1,
'description': '',
'gender': '',
'urank': 0,
'followers_count': 0,
'follow_count': 0,
'reposts_count': 0,
'comments_count': 0,
'attitudes_count': 0,
'isLongText': False
}
# print(info_dict[uid])
latest_po = info_dict[uid][0]['mblog']
user_info = latest_po['user']
# print(latest_po)
# print(user_info)
for elem in info_keys[0:3]:
if list(latest_po.keys()).__contains__(elem):
tdict.update({elem: latest_po[elem]})
for elem in info_keys[3:]:
if list(user_info.keys()).__contains__(elem):
tdict.update({elem: user_info[elem]})
return True, tdict
def gen_data(dict1, dict2):
result = []
if dict1['verified'] and dict2['verified']:
verified = -1
elif dict1['verified'] or dict2['verified']:
verified = 1
else:
verified = 0
result.append(verified)
bool_style = ['verified_type', 'gender', 'isLongText']
for items in bool_style:
result.append(1 if dict1[items] == dict2[items] else 0)
result.append(abs(dict1['urank'] - dict2['urank']))
result.append(abs(dict1['statuses_count'] - dict2['statuses_count']))
result.append(abs(dict1['followers_count'] - dict2['followers_count'])
/ abs(dict1['followers_count'] + dict2['followers_count'])
if abs(dict1['followers_count'] + dict2['followers_count']) != 0
else 1)
result.append(abs(dict1['follow_count'] - dict2['follow_count'])
/ abs(dict1['follow_count'] + dict2['follow_count'])
if abs(dict1['follow_count'] + dict2['follow_count']) != 0
else 1
)
result.append(abs(dict1['reposts_count'] - dict2['reposts_count']))
result.append(abs(dict1['comments_count'] - dict2['comments_count']))
result.append(abs(dict1['attitudes_count'] - dict2['attitudes_count']))
result.append(Levenshtein.jaro_winkler(dict1['screen_name'], dict2['screen_name']))
result.append(Levenshtein.jaro_winkler(dict1['description'], dict2['description']))
result.append(Levenshtein.jaro_winkler(dict1['text'], dict2['text']))
return result
def gen_flw(uid1, uid2):
if not valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):
return 0, 0
elif valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):
return flw_dict[uid1].__contains__(uid2), 0
elif not valid_flw.__contains__(uid1) and valid_flw.__contains__(uid2):
return flw_dict[uid2].__contains__(uid1), 0
else:
return 2, len(list(a for a in flw_dict[uid1] if a in flw_dict[uid2])) \
/ (
len(flw_dict[uid1]) + len(flw_dict[uid2]) - len(
list(a for a in flw_dict[uid1] if a in flw_dict[uid2])))
logging.info('Prepare Data!')
train_num = 8000
data = []
labels = []
uidpool = []
for i in range(0, train_num):
order1 = random.randint(0, user_num - 1)
order2 = random.randint(0, user_num - 1)
uid1 = valid_users[order1]
uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) - 1)]
# uid2 = valid_users[order2]
# if random.random() >= 0:
# # print('+-1')
# uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) - 1)]
flag1, dict1 = get_info(uid1)
flag2, dict2 = get_info(uid2)
while (uid1 == uid2 or uidpool.__contains__([uid1, uid2]) or not flag1 or not flag2):
order1 = random.randint(0, user_num - 1)
order2 = random.randint(0, user_num - 1)
uid1 = valid_users[order1]
uid2 = valid_users[order2]
flag1, dict1 = get_info(uid1)
flag2, dict2 = get_info(uid2)
uidpool.append([uid1, uid2])
uidpool.append([uid2, uid1])
tmp_data = gen_data(dict1, dict2)
flw1, flw2 = gen_flw(uid1, uid2)
# data.append(gen_data(dict1, dict2))
tmp_data.append(flw1)
tmp_data.append(flw2)
data.append(tmp_data)
labels.append(gen_label(uid1, uid2))
# print(uid1, uid2)
print(data)
print(labels)
print('total number:', train_num)
print('total positive samples:', labels.count('1'))
logging.info('Start Training!')
rf = RandomForestClassifier(n_estimators=40, n_jobs=4, verbose=0)
accur = []
begin_time=time.time()
for order in range(0, 10):
ratio = 9 / 10
train_data = []
train_labels = []
test_data = []
test_labels = []
for i in range(0, train_num):
if random.random() > ratio:
test_data.append(data[i])
test_labels.append(labels[i])
else:
train_data.append(data[i])
train_labels.append(labels[i])
# print('train number:', len(train_labels))
# print('train positive samples:', train_labels.count('1'))
rf.fit(train_data, train_labels)
logging.info('Train Done!')
# print('Train accuracy:',
# rf.score(train_data, train_labels))
# print('Test accuracy:',
# rf.score(test_data, test_labels))
acc = rf.score(data, labels)
# print('Total accuracy:', acc)
accur.append(acc)
end_time=time.time()
print('Feature Weight:')
# print('Feature Weight:', rf.feature_importances_)
features = ['verified', 'verified_type', 'gender', 'isLongText', 'urank', 'statuses_diff',
'followers_diff', 'follows_diff', 'reposts_diff', 'comment_diff', 'attitudes_diff',
'screen_name_similarity', 'description_similarity', 'text_similarity', 'co_follow', 'in_follows']
for i in range(0, 16):
print(features[i], ':', rf.feature_importances_[i])
print('Total accuracy', rf.score(data, labels))
scores = cross_val_score(rf, data, labels, cv=10)
print(sum(scores) / 10)
print('time:',end_time-begin_time)
|
6,593 | cd0b55e163851344273ad020d434cc8662083d19 | import math
class Rank:
class Stats(object):
'''Holds info used to calculate amount of xp a player gets'''
post_likes = 0
post_dislikes = 0
comment_likes = 0
comment_dislikes = 0
usage = 0
class Interval(object):
'''A class representing an interval. It is always [a, b).'''
def __init__(self, a, b):
self.a = a
self.b = b
def contains(self, n):
return self.a >= n and n < b
# Each index in this array corresponds to the level for that xp interval.
XP_INTERVALS = [
Interval(0, 100),
Interval(100, 250),
Interval(250, 1000),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
Interval(100, 250),
]
STAT_WORTH = {
'post_likes': 1,
'post_dislikes': -1,
'comment_likes': 1,
'comment_dislikes': -1,
'usage': 1
}
# Tweaks how far apart each of the levels are. For example, the closer to
# zero this is, the further apart the levels.
LEVEL_RATE = 0.2
def __init__(self):
self._xp = 0
self._level = 0
self._label = ''
def consume_stats(self, stats):
total_arr = [
STAT_WORTH['post_likes']*stats.post_likes,
STAT_WORTH['post_dislikes']*stats.post_dislikes,
STAT_WORTH['comment_likes']*stats.comment_likes,
STAT_WORTH['comment_dislikes']*stats.comment_dislikes,
STAT_WORTH['usage']*stats.usage,
]
self._xp = sum(total_arr)
self._level = self._calculate_level()
def _calculate_level(self):
return math.sqrt(LEVEL_RATE*self._xp)
def from_model(self):
pass
def from_proto(self):
pass
def to_model(self):
pass
def to_proto(self):
pass
|
6,594 | fc5d0dd16b87ab073bf4b054bd2641bdec88e019 | def descending_order(num):
return int(''.join(sorted(str(num), reverse=True)))
import unittest
class TestIsBalanced(unittest.TestCase):
def test_is_balanced(self):
self.assertEquals(descending_order(0), 0)
self.assertEquals(descending_order(15), 51)
self.assertEquals(descending_order(123456789), 987654321)
self.assertEquals(descending_order(1201), 2110)
if __name__ == '__main__':
unittest.main()
|
6,595 | 1f3e20e7fe597a88cddacf6813250f1ede6c6ee0 | #!/usr/bin/python3
"""Prints the first State object from the database specified
"""
from sys import argv
import sqlalchemy
from sqlalchemy import create_engine, orm
from model_state import Base, State
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'
.format(*argv[1:4]), pool_pre_ping=True)
Base.metadata.create_all(engine)
session = orm.sessionmaker(bind=engine)()
first = session.query(State).order_by(State.id).first()
out = 'Nothing' if first is None else '{}: {}'.format(first.id, first.name)
print(out)
session.close()
|
6,596 | d13f06afeac938fc2cf4d3506b3f68c6de9de210 | import cv2
img = cv2.imread('imgs/1.png')
pixel = img[100, 100]
img[100, 100] = [57, 63, 99] # 设置像素值
b = img[100, 100, 0] # 57, 获取(100, 100)处, blue通道像素值
g = img[100, 100, 1] # 63
r = img[100, 100, 2] # 68
r = img[100, 100, 2] = 99 # 设置red通道
# 获取和设置
piexl = img.item(100, 100, 2)
img.itemset((100, 100, 2), 99)
|
6,597 | 79f4ede16628c6fbf37dfb4fe5afb8489c120f5a | class Solution(object):
def lexicalOrder(self, n):
"""
:type n: int
:rtype: List[int]
"""
acc = []
self.backtrack(acc, 1, n)
return acc
def backtrack(self, acc, counter, n):
if counter > n:
return
elif len(acc) == n:
return
else:
acc.append(counter)
self.backtrack(acc, counter * 10, n)
if counter % 10 != 9:
self.backtrack(acc, counter + 1, n)
|
6,598 | 37969899aa646f4cdd7a5513f17d26b334870f1b | import pymongo
import redis
import json
from time import time
user_timeline_mongodb = "mongodb://user-timeline-mongodb.sdc-socialnetwork-db.svc.cluster.local:27017/"
user_timeline_redis = "user-timeline-redis.sdc-socialnetwork-db.svc.cluster.local"
def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
start = time()
event = json.loads(req)
user_id = event["user_id"]
post_id = event["post_id"]
timestamp = event["timestamp"]
myclient = pymongo.MongoClient(user_timeline_mongodb)
mydb = myclient['user-timeline']
mycol = mydb["user-timeline"]
myquery = { "user_id": user_id }
mydoc = mycol.find(myquery)
if mydoc.count() == 0:
posts_j = {}
posts_j[str(post_id)] = timestamp
mydict = {"user_id": user_id, "posts": json.dumps(posts_j)}
mycol.insert_one(mydict)
else:
posts_j = json.loads(mydoc.next()["posts"])
posts_j[str(post_id)] = timestamp
posts_update = {"$set": {"posts": json.dumps(posts_j)}}
mycol.update_one(myquery, posts_update)
r = redis.Redis(host=user_timeline_redis, port=6379, decode_responses=True)
r.hset(user_id, post_id, timestamp)
#r.hset("end_time", event["req_id"], str(time()))
return str(time() - start)
|
6,599 | 2c43ede960febfb273f1c70c75816848768db4e5 | a,b,c,y=4.4,0.0,4.2,3.0
print(c+a*y*y/b) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.