index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
18,900 | 78ef4a87888384dc4e6d6f70cb2f1d2ec2573b7a | # ##################################################################################################
# Copyright (c) 2020 - Fundação CERTI
# All rights reserved.
# ##################################################################################################
import numpy
import rasterio as rio
import pytest
from qda_modelos import total_suspended_solids_turbidity as turbidity
class TestTSSTurbidityLiuEtAl2006:
def test_expected_result_type(self, setup_bands):
R20m_bands = setup_bands["20m"]
B8A = R20m_bands["B8A"]
B04 = R20m_bands["B04"]
liu_et_al_2006_result = turbidity.doxaran_et_al_2003(B8A, B04)
assert isinstance(liu_et_al_2006_result, numpy.ndarray)
def test_expected_result_shape(self, setup_bands):
R20m_bands = setup_bands["20m"]
B8A = R20m_bands["B8A"]
B04 = R20m_bands["B04"]
liu_et_al_2006_result = turbidity.liu_et_al_2006(B8A, B04)
assert liu_et_al_2006_result.shape == B8A.shape
def test_expected_error_for_wrong_number_of_bands(self, setup_bands):
B8A = setup_bands["20m"]["B8A"]
with pytest.raises(TypeError):
turbidity.doxaran_et_al_2003(B8A)
def test_expected_error_for_bands_of_different_shapes(self, setup_bands):
B8A = setup_bands["20m"]["B8A"]
B04 = setup_bands["10m"]["B04"]
with pytest.raises(ValueError):
turbidity.doxaran_et_al_2003(B8A, B04)
|
18,901 | 89efde666884e7c7f70d186350923568bbdd8598 | import paho.mqtt.client as mqtt
import numpy as np
import cv2
MQTT_HOST="mosquitto"
MQTT_PORT=1883
MQTT_TOPIC="face_detection"
def on_connect(client, userdata, flags, rc):
print("connected: " + str(rc))
client.subscribe(MQTT_TOPIC)
loop_flag=0
mqttC = mqtt.Client()
mqttC.on_connect = on_connect
mqttC.connect(MQTT_HOST, MQTT_PORT, 60)
# Use face cascade with video capture = 1 for USB camera
face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(1)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# We don't use the color information, so might as well save space
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
print("Face identified")
face = frame[y:y + h, x:x + w]
rc,faceJpg = cv2.imencode('.png', face)
message = faceJpg.tobytes()
mqttC.publish(MQTT_TOPIC, payload = message, qos=0, retain=False)
mqttC.loop_forever()
|
18,902 | a82183155c68196740fe08e65d29f120557f46f2 | import re
pattern = re.compile(r'[a-z][A-Z][A-Z][A-Z]([a-z])[A-Z][A-Z][A-Z][a-z]')
with open('equality.txt', 'r') as f:
for line in f:
match = pattern.search(line)
if match:
print match.groups()[0],
print |
18,903 | 672e52c43a75924c0cf9d49148b4e495519cf76b | """
演示字符串加密解密操作
"""
# str1 = "say goodbye"
# dict1 = "".maketrans("abcdefg","1234567")
# # print(dict1)
# str2 = str1.translate(dict1)
# print(str2)
# str3 = "s1y 7oo42y5"
# dict2 = "".maketrans("1234567","abcdefg")
# str4 = str3.translate(dict2)
# print(str4)
# str1 = "say g77dbye" # s1y 77742y5
# dict1 = "".maketrans("abcdefg","1234567")
# # print(dict1)
# str2 = str1.translate(dict1)
# print(str2)
#
# str3 = "s1y 77742y5"
# dict2 = "".maketrans("1234567","abcdefg")
# str4 = str3.translate(dict2)
# print(str4)
dict1 = "".maketrans("abcdefg","1234567")
dict2 = "".maketrans("1234567","bcdefgh")
dict3 = "".maketrans("cdefghi","1234567")
dict4 = "".maketrans("1234567","defghij")
|
18,904 | 3a49fa45434963a4e7a741db4beda16b43f3651d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 23:34:55 2020
@author: y56
"""
from collections import *
#from itertools import *
from math import *
#from string import *
import random
import numpy as np
def rand_into_k_groups(n,k): # into k group
# 0 ~ n-1
li = list(range(n))
ans=[[] for _ in range(k)]
while li:
for kk in range(k):
if not li:
return ans
tmp=random.choice(li)
ans[kk].append(tmp)
li.remove(tmp)
return ans
def dis(center,i,m): # d dim
vector=m[i]
ans=0
for i,j in zip(center,vector):
ans+=(i-j)**2
return ans
def nearest_center_in(centers,i,m):
ans=None
curmindis=inf
for ind_center, center in enumerate(centers):
curdis=dis(center,i,m)
if curdis < curmindis:
curmindis=curdis
ans=ind_center
return ans
def ave(ind_li,m):
ans=[0]*len(m[0])
for i in ind_li:
for d in range(len(m[0])):
ans[d]+=m[i][d]
for i,_ in enumerate(ans):
ans[i]/=len(ind_li)
return ans
def kmean(m,k): # m = n by d
n=len(m)
d=len(m[0])
centers=[ave(ind_li,m) for ind_li in rand_into_k_groups(n,k)]
center_for_pt=[-1]*n
pt_belong_to_center=defaultdict(list)
ct=0
while ct<100:
pt_belong_to_center.clear()
for i in range(n): # new center for each pt
center_ind_for_i = nearest_center_in(centers,i,m)
center_for_pt[i]=center_ind_for_i
pt_belong_to_center[center_ind_for_i].append(i)
# update `center`
for _i,pt_li in enumerate(pt_belong_to_center.values()):
new_center=ave(pt_li,m)
centers[_i]=new_center
ct+=1
return pt_belong_to_center,centers
m=[]
for _ in range(100):
tmp=[]
for _ in range(2):
tmp.append(random.randint(0,100))
m.append(tmp)
pt_belong_to_center,centers=kmean(m,4)
import matplotlib.pyplot as plt
for x in m:
plt.plot(x[0],x[1],'bo')
ss=['r','g','b','k']
for i,x in enumerate(pt_belong_to_center.values()):
s=ss[i]
for xx in x:
plt.plot(m[xx][0],m[xx][1],s+'o')
for c in centers:
plt.plot(c[0],c[1],'m*',markersize=15) |
18,905 | 25165775cfe6f4b1fa902095752c7c4988b0f44b | import sys
import urllib3
import json
from pyspark import SparkContext, SparkConf
from urllib.parse import urlencode
if __name__ == "__main__":
if len(sys.argv) != 1:
print("Usage: full_path\ReadJSON.py", sys.stderr)
#In my case "spark-submit --master local[8] C:\Users\joaquin.diaz.ramirez\PycharmProjects\Spark\jsonRead.py"
exit(-1)
conf = SparkConf().setAppName("WordCount").setMaster("local[8]")
sc = SparkContext(conf=conf)
print(str(sys.argv))
url = "https://randomuser.me/api/?results=100"
http = urllib3.PoolManager()
r = http.request('GET', url)
encoded_args = urlencode({'nombre': ['name']})
data = json.loads(r.data.decode('utf-8'))
for users in data['results']:
print(users['name']['first'])
sc.stop() |
18,906 | ab409abef1a5bd03af5595c0b81e0125b4274a27 | import sys
input = sys.stdin.buffer.readline
from collections import defaultdict
H, W, N = map(int, input().split())
AB = [list(map(int, input().split())) for _ in range(N)]
counter = defaultdict(int)
for a, b in AB:
for aa in range(a - 1, a + 2):
for bb in range(b - 1, b + 2):
if 2 <= aa <= H - 1 and 2 <= bb <= W - 1:
counter[(aa, bb)] += 1
answer = [0] * (10)
for (x, y), n in list(counter.items()):
if n:
answer[n] += 1
answer[0] = (H - 2) * (W - 2)
for i in range(1, 10):
answer[0] -= answer[i]
for a in answer:
print(a) |
18,907 | 9a1095c5621d35d9e5b63568d06b5b61039f6485 | movies=["nanaku prematho","pirates of carribean","scam 1992","hary potter","joker","RRR"]
print(movies)
print(movies[0])
print(movies[2:4])
movies[1]="bard of blood"
print(len(movies))
del movies[4]
print(movies)
movies.remove("RRR")
print(movies)
movies.pop(1)
print(movies) |
18,908 | cf9f7dde2d92319b258529aaefdcb0199ebe49fe | #对经fft变换的复数数据转换为极坐标形式
import numpy as np
import getopt
import sys
def main(argv):
try:
opts, args = getopt.getopt(sys.argv[1:], "-i:-o:-h", ["input=", "output=","help"])
except getopt.GetoptError:
print('将经过fft变换的音频数据,转换到极坐标')
print('python ffttxtpolar.py -i fft_test1.txt -o fft_test1_polar.txt')
sys.exit()
# 处理 返回值options是以元组为元素的列表。
for opt, arg in opts:
if opt in ("-h", "--help"):
print("将经过fft变换的音频数据,转换到极坐标")
print('输入格式为:')
print('python ffttxtpolar.py -i fft_test1.txt -o fft_test1_polar1.txt')
print('此时幅度和相位以"@"间隔')
print('python ffttxtpolar.py -i fft_test1.txt -o fft_test1_polar.txt')
print('此时幅度和相位以空格间隔')
sys.exit()
elif opt in ("-i", "--input"):
input = arg
elif opt in ("-o", "--output"):
output = arg
fft_data = np.loadtxt(input, dtype=np.complex) #加载数据文件,读入数据
fft_data_len = len(fft_data) # 输入数据长度
file = open(output, 'w+') #打开输出文件
for i in range(fft_data_len):
#复数实部和虚部的平方和,再开方就是极坐标的幅度
#Amplitude = np.abs(fft_data) # 调包求模,即幅度
#angle = np.rad2deg(np.angle(fft_data)) # 调包求相位,np.angle是求弧度,np.rad2deg将弧度转化为角度
data_real = np.real(fft_data[i]) #取复数的实部
data_imag = np.imag(fft_data[i]) #取复数的虚部
Amplitude = np.sqrt(data_real ** 2 + data_imag ** 2) #求幅度,即模
angle = np.arctan(data_imag / data_real )* (180 / np.pi) # 求相位,此时为角度
Amplitude_and_angle = str(Amplitude) + '@' + str(angle) + '\n' #将幅度和相位数据中间加空格和@符号
#Amplitude_and_angle = str(Amplitude) + ' ' + str(angle) + '\n' # 将幅度和相位数据中间加空格
file.write(Amplitude_and_angle) #将每行数据写入文件
file.close()
if __name__ == "__main__":
main(sys.argv) #调用函数
#python ffttxtpolar.py -i fft_test1.txt -o fft_test1_polar.txt
#python ffttxtpolar.py -i fft_test1.txt -o fft_test1_polar1.txt
|
18,909 | 5cc048cce667e6cfab07b0c44402e4bc6a5dc967 | from django.urls import path
from .views import *
urlpatterns = [
path('create/reporter/', CreateReporter, name="create-reporter"),
path('', HomeReporter.as_view(), name="home-reporter"),
] |
18,910 | 30fc326d5bb9c1dee7642d637fdc56289462faba | import turtle as tt
import random
def onLeftClick(x, y):
tSize = random.randrange(1, 10)
r = random.random()
g = random.random()
b = random.random()
tt.pencolor((r, g, b))
tt.shapesize(tSize)
tt.goto(x, y)
tt.penup()
tt.stamp()
tt.title('TITLE')
tt.shape('turtle')
tt.pensize(10)
tt.onscreenclick(onLeftClick, 1)
tt.done() |
18,911 | 9a7e48a8837edbbdaeaead9936f33da6e2ffc659 | import numpy as np
from printGraph import getData
#import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import tensorflow as tf
def model(X, w):
return tf.add(tf.multiply(X, w[1]) , w[0])
def applyRegressionMultipleParameters(x_train, y_train):
learning_rate = 0.01
training_epochs = 100
#x_train, y_train = getData()
m = x_train.size
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
w = tf.Variable([0.0, 0.0], name="weights")
y_model = model(X, w)
cost = tf.square(Y - y_model)
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(training_epochs):
i = 0
for (x, y) in zip(x_train, y_train):
# if i == 50:
# print(epoch, x, y, sess.run(w))
# print(sess.run(cost, {X:x, Y:y}))
# i = i + 1
sess.run(train_op, {X:x, Y:y})
w_val = sess.run(w)
print("w_val (multiple)s:" + str(w_val))
sess.close()
#print(set(zip(x_train, y_train)))
temp = np.hstack((np.ones(m).reshape(m, 1), x_train.reshape(m, 1)))
fig = plt.figure(3)
fig.clear()
plt.title("Linear Regression with multiple parameter(s): " + str(w_val) )
plt.scatter(x_train, y_train, color="r", marker='x')
y_learned = np.matmul(temp, w_val)
#comparison = np.hstack((y_train.reshape(m, 1), y_learned.reshape(m, 1)))
#print("Comparison: {}".format(comparison))
plt.plot(x_train, y_learned, 'b')
plt.draw()
|
18,912 | a8c0c33925a46eaf12503d5bc1b49d8df02e7c13 | """
module where all the project specific functions available
"""
import json
import requests
import params
def get_token():
"""
To get latest token to make different api requests
Parameters
----------
None
Returns
-------
True, token / False, error
"""
try:
url = params.BASE_URL
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
response = json.loads(response.text)
base_url = response.get(params.CONTEXT)
token = base_url.split("/")[-2]
return (True, token)
except Exception as e:
return (False, str(e))
def get_entity_url(token, entity_id):
"""
To get api url to get the entity information
Parameters
----------
token, str
token to get authenticated
entity_id: str,
entity to which we need to get info
Returns
-------
True, url / False, error
"""
try:
api_url = params.GET_ENTITY_URL.format(token, entity_id)
return (True, api_url)
except Exception as e:
return (False, str(e))
def get_filter_fname_url(token, firstname):
"""
To get api url to filter by firstname
Parameters
----------
token, str
token to get authenticated
firstname: str,
firstname to get all the entry with that specific firstname
Returns
True, url / False, error
"""
try:
api_url = params.FILTER_BY_FNAME_URL.format(token, firstname)
return (True, api_url)
except Exception as e:
return (False, str(e))
def get_filter_gender_url(token, gender):
"""
To get api url to filter by gender
Parameters
----------
token, str
token to get authenticated
gender: str,
gender to get all the entry with that specific gender
Returns
True, url / False, error
"""
try:
api_url = params.FILTER_BY_GENDER_URL.format(token, gender)
return (True, api_url)
except Exception as e:
return (False, str(e))
def get_create_entity_url(token):
"""
To get api url to create new entity
Parameters
----------
token, str
token to get authenticated
Returns
True, url / False, error
"""
try:
api_url = params.CREATE_ENTITY_URL.format(token)
return (True, api_url)
except Exception as e:
return (False, str(e))
|
18,913 | 0f9d639cfa7416e63f06f29bcbbeda5c6976d292 | # $Header: //depot/cs/s/product_manage.wsgi#30 $
from db.Support import SupportSession
import db.Db as Db
import db.Product as Product
import db.Promo as Promo
from db.Exceptions import SupportSessionExpired
from werkzeug.utils import redirect
from werkzeug.wrappers import Response
from p.DTemplate import DTemplate
from p.DRequest import DRequest
import db.Statics as Statics
import simplejson as json
def application(environ, start_response):
"""Edit and manage products"""
request = DRequest(environ)
try:
Db.start_transaction()
support = SupportSession(key = request.support_key())
request.add_vars({
'products': json.dumps(Statics.products.get_ids()),
'orientations': json.dumps(Statics.orientations.get_ids()),
'labs': json.dumps(Statics.labs.get_ids()),
'lab_products': json.dumps(Statics.lab_products.get_ids()),
'CSVPRICECOUNT': Product.CSVPRICECOUNT,
'promo_categories': Promo.get_promo_categories()['promo_categories']
})
t = DTemplate(request, 'product_manage.html')
resp = Response(t.render(request.get_vars()))
Db.finish_transaction()
except SupportSessionExpired:
Db.cancel_transaction()
resp = redirect('/s/login', 307)
return resp(environ, start_response)
except Exception as e:
Db.cancel_transaction()
import traceback
traceback.print_exc()
t = DTemplate(request, 'error.html')
resp = Response(t.render({'message': "Internal Error"}))
request.cookie_freshen(resp)
resp.headers['content-type'] = 'text/html; charset=utf-8'
resp.headers['content-length'] = len(resp.data)
return resp(environ, start_response)
|
18,914 | b074204156b059753565b1172be470d713018e42 | ###
## uses this python omxplayer wrapper (https://github.com/willprice/python-omxplayer-wrapper)
###
from omxplayer import OMXPlayer
from time import sleep
mainMenu = ["1_drums.mp3", "2_hello.mp3", "3_lineSearch.mp3", "4_red.mp3", "5_yellow.mp3"]
cnt = 0
while(True):
# file_path_or_url = 'path/to/file.mp4'
# This will start an `omxplayer` process, this might
# fail the first time you run it, currently in the
# process of fixing this though.
player = OMXPlayer(mainMenu[cnt])
# The player will initially be paused
player.play()
sleep(1)
player.pause()
# Kill the `omxplayer` process gracefully.
player.quit()
cnt+=1
if cnt ==len(mainMenu):
cnt=0
|
18,915 | aace2f4b8c3e1725d805a0e5e8919529fca4de88 | from rest_framework import serializers
from jimjam.models import *
# Superuser Serializer
class SuperUserSerializer(serializers.ModelSerializer):
class Meta:
model = SuperUser
fields = '__all__'
class PostsSerializer(serializers.ModelSerializer):
class Meta:
model = Posts
fields = '__all__' |
18,916 | 9909dcc8461c46c37635e3b5036c93e1e9f92b17 | __author__ = '123'
# coding=utf-8
import requests, unittest, time
from common.jsonparser import JMESPathExtractor
from common.logger import logger
import threading
class TestWinprobability_1(unittest.TestCase):
"""
测试大转盘中奖几率
"""
def tet_01(self):
"""
<-------------------------------------------------------------------------------------->
循环次数,验证中奖奖品个数
"""
self.headers = {
"Accept-Encoding": "gzip",
"User-Agent": "android-6.0/Meizu(M5)",
}
self.param = {
"isAuto": " ",
"userMail": "38@qq.com",
"platform": "Android",
"timeStamp": "1536997844366",
"token": "b69b76193d95b196d7f476aa49f443da",
"userPassword": "1ebb51846675cb9802783d6dae3c8c79",
"uuid": "00000000-7508-8fb8-d616-f1c80033c587",
"version": "1.2.1",
}
self.user_balance_data = {
"currencyId": 4,
"languageType": 3,
"timeStamp": int(time.time() * 1000),
"token": "b69b76193d95b196d7f476aa49f443da",
"userMail": "38@qq.com",
}
# 兑换抽奖次数param
self.exchangeActivityTimes_param = {
"activityId": "f90bb97a1a7f4cd099df7e57fd8c5883",
"times": 4,
}
self.login_url = "http://192.168.1.123:10002/dididu/userLogin.do"
self.r = requests.session()
self.win_url = "http://192.168.1.123:10002/dididu/winActivityPrize.do"
self.query_balance_value_url = "http://192.168.1.123:10002/dididu/userBalanceDetails.do"
# 兑换抽奖次数地址
self.exchangeActivityTimes_url = "http://192.168.1.123:10002/dididu/exchangeActivityTimes.do"
logger.info("注释: {0}".format(TestWinprobability_1.tet_01.__doc__))
logger.info("当前线程: {0}".format(threading.current_thread()))
# -------------------------
self.r = requests.session()
self.r.post(url=self.login_url, headers=self.headers, data=self.param)
# ------------------------
# 购买抽奖次数之前查询余额
self.balacne_value_resp = self.r.post(url=self.query_balance_value_url, data=self.user_balance_data)
self.TNB_balance_value = JMESPathExtractor().extract(query="OBJECT.balanceValue",
body=self.balacne_value_resp.text)
logger.info("抽奖之前TNB的余额:------{0}".format(self.TNB_balance_value))
# ------------------------------------
# 购买抽奖次数--10
self.exchangeActivityTimes_resp = self.r.post(url=self.exchangeActivityTimes_url,
data=self.exchangeActivityTimes_param)
logger.info("user_id : {0}\------times_left : {1}".format(
JMESPathExtractor().extract(query="OBJECT.data.user_id", body=self.exchangeActivityTimes_resp.text),
JMESPathExtractor().extract(query="OBJECT.data.times_left", body=self.exchangeActivityTimes_resp.text)))
self.r.close()
time.sleep(0.2)
for i in range(2):
with self.subTest():
logger.info("for循环内当前线程: {0}".format(threading.current_thread()))
win_param = {
"activityId": "f90bb97a1a7f4cd099df7e57fd8c5883",
}
self.r = requests.session()
self.r.post(url=self.login_url, headers=self.headers, data=self.param)
time.sleep(0.2)
# 抽奖
self.resp = self.r.post(url=self.win_url, data=win_param)
self.prize_id = JMESPathExtractor().extract(query="OBJECT.data.prize_id", body=self.resp.text)
self.prize_name = JMESPathExtractor().extract(query="OBJECT.data.prize_name", body=self.resp.text)
print(self.prize_id, self.prize_name)
logger.info("第{0}次中奖--中奖ID----{1}----中奖礼品----{2}".format(i + 1, self.prize_id, self.prize_name))
print(self.resp.json())
self.r.close()
def test_02(self):
"""
<-------------------------------------------------------------------------------------->
循环次数,验证中奖奖品个数
"""
self.headers = {
"Accept-Encoding": "gzip",
"User-Agent": "android-6.0/Meizu(M5)",
}
self.param = {
"isAuto": " ",
"userMail": "39@qq.com",
"platform": "Android",
"timeStamp": "1536997844366",
"token": "b69b76193d95b196d7f476aa49f443da",
"userPassword": "1ebb51846675cb9802783d6dae3c8c79",
"uuid": "00000000-7508-8fb8-d616-f1c80033c587",
"version": "1.2.1",
}
self.user_balance_data = {
"currencyId": 4,
"languageType": 3,
"timeStamp": int(time.time() * 1000),
"token": "b69b76193d95b196d7f476aa49f443da",
"userMail": "39@qq.com",
}
# 兑换抽奖次数param
self.exchangeActivityTimes_param = {
"activityId": "f90bb97a1a7f4cd099df7e57fd8c5883",
"times": 2,
}
self.login_url = "http://192.168.1.123:10002/dididu/userLogin.do"
self.r = requests.session()
self.win_url = "http://192.168.1.123:10002/dididu/winActivityPrize.do"
self.query_balance_value_url = "http://192.168.1.123:10002/dididu/userBalanceDetails.do"
# 兑换抽奖次数地址
self.exchangeActivityTimes_url = "http://192.168.1.123:10002/dididu/exchangeActivityTimes.do"
logger.info("注释: {0}".format(TestWinprobability_1.test_02.__doc__))
logger.info("当前线程: {0}".format(threading.current_thread()))
# -------------------------
self.r = requests.session()
self.r.post(url=self.login_url, headers=self.headers, data=self.param)
# ------------------------
# 购买抽奖次数之前查询余额
self.balacne_value_resp = self.r.post(url=self.query_balance_value_url, data=self.user_balance_data)
self.TNB_balance_value = JMESPathExtractor().extract(query="OBJECT.balanceValue",
body=self.balacne_value_resp.text)
logger.info("抽奖之前TNB的余额:------{0}".format(self.TNB_balance_value))
# ------------------------------------
# 购买抽奖次数--10
self.exchangeActivityTimes_resp = self.r.post(url=self.exchangeActivityTimes_url,
data=self.exchangeActivityTimes_param)
logger.info("user_id : {0}\------times_left : {1}".format(
JMESPathExtractor().extract(query="OBJECT.data.user_id", body=self.exchangeActivityTimes_resp.text),
JMESPathExtractor().extract(query="OBJECT.data.times_left", body=self.exchangeActivityTimes_resp.text)))
self.r.close()
time.sleep(0.2)
for i in range(3):
with self.subTest():
logger.info("for循环内当前线程: {0}".format(threading.current_thread()))
win_param = {
"activityId": "67c5b75fbd784ecf8f8f996138420077",
}
self.r = requests.session()
self.r.post(url=self.login_url, headers=self.headers, data=self.param)
time.sleep(0.2)
# 抽奖
self.resp = self.r.post(url=self.win_url, data=win_param)
self.prize_id = JMESPathExtractor().extract(query="OBJECT.data.prize_id", body=self.resp.text)
self.prize_name = JMESPathExtractor().extract(query="OBJECT.data.prize_name", body=self.resp.text)
print(self.prize_id, self.prize_name)
logger.info("第{0}次中奖--中奖ID----{1}----中奖礼品----{2}".format(i + 1, self.prize_id, self.prize_name))
print(self.resp.json())
self.r.close()
if __name__ == '__main__':
unittest.main() |
18,917 | ab92a3a488413d37e2860fa50fd7b1aa6c783900 | def decodeString(s):
stack = []
StringResult = ''
for i in s:
if i == ']' and stack:
string = ''
while stack:
string = stack.pop() + string
if stack[-1] == '[':
stack.pop()
num = ''
while stack and stack[-1].isdigit():
num = stack.pop() + num
string = string * int(num)
if stack:
if stack[-1] != '[':
stack[-1] = stack[-1] + string
else:
stack.append(string)
else:
stack.append(string)
break
else:
stack.append(i)
result = ''
if stack:
result = StringResult.join(stack)
return result
if __name__ == "__main__":
print(decodeString("2[2[b]]"))
|
18,918 | 4b04f301ce5c4ac04f5252c7ca4cd91c6f83ba9f | def test_variants_count(product_version):
assert len(product_version.variants()) == 3
def test_variant_names(product_version):
variants = product_version.variants()
assert variants[0].name == '7Server-RHEL-7-RHCEPH-3.1-MON'
assert variants[1].name == '7Server-RHEL-7-RHCEPH-3.1-OSD'
assert variants[2].name == '7Server-RHEL-7-RHCEPH-3.1-Tools'
def test_variant_descriptions(product_version):
variants = product_version.variants()
assert variants[0].description == 'Red Hat Ceph Storage 3.1 MON'
assert variants[1].description == 'Red Hat Ceph Storage 3.1 OSD'
assert variants[2].description == 'Red Hat Ceph Storage 3.1 Tools'
def test_cpe(product_version):
variants = product_version.variants()
for variant in variants:
assert variant.cpe == 'cpe:/a:redhat:ceph_storage:3::el7'
|
18,919 | 4f89fce08989399de31b216a1a3da591062dadc2 | # Generated by Django 2.2 on 2019-04-07 12:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20190407_1238'),
]
operations = [
migrations.AlterField(
model_name='snippet',
name='author',
field=models.OneToOneField(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
18,920 | b30879f69e16557f9f5773e18edc33d495fa88cf | # This file generate the vocabulary.
import os
import pandas as pd
import numpy as np
import operator, csv
worddict = {}
cnt = 0
maxlen = 0
maxSen = ""
# Count the frequencies of occurrence of all words.
for filename in os.listdir('data'):
with open('data/' + filename, 'r') as file:
cnt += 1
if cnt % 100 == 0:
print('{}/2318 files are scanned.'.format(cnt))
line = file.readline()[:-1]
while line:
words = line.split()
if len(words) > maxlen:
maxlen = len(words)
maxSen = line
for word in words:
if word in worddict:
worddict[word] += 1
else:
worddict[word] = 1
line = file.readline()[:-1]
# Sort the words according to their frequency of occurrence.
sorteddict = sorted(worddict.items(), key = operator.itemgetter(1), reverse = True)
sorteddict = pd.DataFrame(sorteddict, columns = ['words', 'counts'])
# Save the vocabulary into a csv file.
sorteddict.loc[:10000, 'words'].to_frame().to_csv('Vocabulary.csv', index = False, quoting = csv.QUOTE_NONE, escapechar = ' ')
print('Longest sentence has {} tokens'.format(maxlen))
print(maxSen)
|
18,921 | 95300baa9856173bb47a7ca5c05cfe0e3b05880e | import glob
import os
import cv2
import numpy as np
import pandas as pd
import torch
from transformers import AutoTokenizer
class BaseDataset(torch.utils.data.Dataset):
def __init__(
self,
csv_filename,
input_column,
target_column=None,
input_dir="../data/input",
extension="",
target_unique_values=None,
enable_load=True,
images_dir=None,
split="train",
transform=None,
fold_column="Fold",
num_fold=5,
idx_fold=0,
label_smoothing=0,
return_input_as_x=True,
csv_input_dir=None,
# for pseudo labeling
predictions_dirname_for_pseudo_labeling=None,
test_csv_filename=None,
test_images_dir=None,
label_confidence_threshold=None,
**params,
):
self.input_column = input_column
self.target_column = target_column
self.target_unique_values = target_unique_values
self.input_dir = input_dir
self.extension = extension
self.split = split
self.transform = transform
self.num_fold = num_fold
self.idx_fold = idx_fold
self.label_smoothing = label_smoothing
self.enable_load = enable_load
self.return_input_as_x = return_input_as_x
# load
if csv_input_dir is not None:
df = pd.read_csv(os.path.join(csv_input_dir, csv_filename))
else:
df = pd.read_csv(os.path.join(input_dir, csv_filename))
# TODO: make code clean
if predictions_dirname_for_pseudo_labeling is not None:
# load
df_pl = pd.read_csv(os.path.join(input_dir, test_csv_filename))
load_test_paths = sorted(
glob.glob(f"{predictions_dirname_for_pseudo_labeling}/*.npy")
)
print(f"[predictions for pseudo labeling] {load_test_paths}")
assert len(load_test_paths) == num_fold
df_pl[target_column] = np.mean(
[np.load(path) for path in load_test_paths], axis=0
)
if label_confidence_threshold is not None:
mask = df_pl[target_column].between(
label_confidence_threshold, 1 - label_confidence_threshold
)
# df_pl = df_pl[mask].reset_index(drop=True)
df_pl = df_pl[~mask].reset_index(drop=True)
# concat
df["__is_test__"], df_pl["__is_test__"] = False, True
df = pd.concat([df, df_pl]).reset_index(drop=True)
if fold_column in df.columns:
if self.split == "validation":
df = df[df[fold_column] == self.idx_fold]
elif self.split == "train":
df = df[df[fold_column] != self.idx_fold]
else:
print(f"Thire is no {fold_column} fold column in DataFrame.")
# image dir
if images_dir is None:
if self.split == "test":
images_dir = "test"
else:
images_dir = "train"
self.images_dir = images_dir
self.test_images_dir = test_images_dir # for pseudo labeling
# inputs
if enable_load:
self.inputs = self._extract_path_to_input_from_input_column(df)
else:
self.inputs = df[self.input_column]
# targets
if self.target_column in df.columns:
print(f"[Dataset Info] {split} target describe:")
print(df[self.target_column].describe())
self.targets = df[self.target_column].tolist()
else:
print(f"Thire is no {target_column} target column in DataFrame.")
self.targets = None
def __len__(self):
return len(self.inputs)
def _extract_path_to_input_from_input_column(self, df):
inputs = df[self.input_column].apply(
lambda x: os.path.join(
self.input_dir, self.images_dir, x + self.extension
)
)
if self.test_images_dir is not None:
is_test = df["__is_test__"]
test_inputs = df[self.input_column].apply(
lambda x: os.path.join(
self.input_dir, self.test_images_dir, x + self.extension
)
)
inputs[is_test] = test_inputs[is_test]
return inputs.tolist()
def _preprocess_input(self, x):
return x
def _preprocess_target(self, y):
if isinstance(y, np.ndarray):
return y
else:
return np.array([y], dtype="float32") # will be [batch_size, 1]
def _load(self, path):
raise NotImplementedError
def __getitem__(self, idx):
if self.enable_load:
path = self.inputs[idx]
x = self._load(path)
else:
x = self.inputs[idx]
if self.transform is not None:
x = self.transform(x)
x = self._preprocess_input(x)
if self.return_input_as_x:
inputs = {"x": x}
else:
inputs = x
if self.targets is not None:
inputs["y"] = self._preprocess_target(self.targets[idx])
return inputs
class BaseImageDataset(BaseDataset):
def _load(self, path):
x = cv2.imread(path)
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
return x
class BaseTextDataset(BaseDataset):
def __init__(
self,
csv_filename,
input_column,
target_column=None,
input_dir="../data/input",
extension="",
target_unique_values=None,
enable_load=True,
images_dir=None,
split="train",
transform=None,
fold_column="Fold",
num_fold=5,
idx_fold=0,
label_smoothing=0,
return_input_as_x=True,
csv_input_dir=None,
# for pseudo labeling
predictions_dirname_for_pseudo_labeling=None,
test_csv_filename=None,
# for text data
model_name=None,
use_fast=False,
padding="max_length",
truncation=True,
return_tensors="pt",
return_special_tokens_mask=False,
return_attention_mask=True,
return_token_type_ids=True,
max_length=None,
enable_bucket_sampler=False,
**params,
):
super().__init__(
csv_filename=csv_filename,
input_column=input_column,
target_column=target_column,
input_dir=input_dir,
extension=extension,
target_unique_values=target_unique_values,
enable_load=enable_load,
images_dir=images_dir,
split=split,
transform=transform,
fold_column=fold_column,
num_fold=num_fold,
idx_fold=idx_fold,
label_smoothing=label_smoothing,
return_input_as_x=return_input_as_x,
csv_input_dir=csv_input_dir,
# for pseudo labeling
predictions_dirname_for_pseudo_labeling=predictions_dirname_for_pseudo_labeling,
test_csv_filename=test_csv_filename,
)
self.tokenizer = AutoTokenizer.from_pretrained(
model_name, use_fast=use_fast
)
self.padding = padding
self.truncation = truncation
self.return_tensors = return_tensors
self.return_special_tokens_mask = return_special_tokens_mask
self.return_attention_mask = return_attention_mask
self.return_token_type_ids = return_token_type_ids
self.max_length = max_length
self.enable_bucket_sampler = enable_bucket_sampler
if self.enable_bucket_sampler:
self.lengths = [len(inp.split()) for inp in self.inputs]
def _preprocess_input(self, x):
x = self.tokenizer(
x,
padding=self.padding,
truncation=self.truncation,
return_tensors=self.return_tensors,
return_attention_mask=self.return_attention_mask,
return_token_type_ids=self.return_token_type_ids,
return_special_tokens_mask=self.return_special_tokens_mask,
max_length=self.max_length,
)
x = {k: v.squeeze() for k, v in x.items()}
return x
class BaseClassificationDataset(BaseDataset):
def _preprocess_target(self, y):
if self.split == "train":
smoothing = self.label_smoothing
else:
smoothing = 0
n_labels = len(self.target_unique_values)
labels = np.zeros(n_labels, dtype="float32") + smoothing / (
n_labels - 1
)
labels[self.target_unique_values.index(y)] = 1.0 - smoothing
return labels
|
18,922 | 187e2fe1214fa1e2f7cb43657e81ad116222a7ea | # -*- coding: utf-8 -*-
# Purpose: Python module for processing and saving IMMA1 to netCDF 4
# same shortnames are used as the IMMA1 data, please refere to IMMA1 documentation for more details
# IMMA1 documentation is at https://rda.ucar.edu/datasets/ds548.0/#!docs
# History: developed by Zhankun Wang between Oct 2016 and May 2017 for the BEDI ICOADS project
# (c) NOAA National Centers for Environmental Information
# contact: zhankun.wang@noaa.gov
import uuid
import time
import netCDF4
import numpy as np
import os
import jdutil
# change the path to where the program and documents are saved. one level above
fpath_default = '/nodc/projects/tsg/zwang/ICOADS/codes'
# change to where the python codes saved
os.chdir(fpath_default)
time_fmt = "%Y-%m-%dT%H:%M:%SZ"
att_doc = 2
if att_doc == 1:
f = open('%sTables_ICOADS.csv' %fpath_default, 'r')
lines = f.readlines()
lines = [x.rstrip("\r\n") for x in lines]
f.close()
No = [x.split(',')[0] for x in lines]
length = [x.split(',')[1] for x in lines]
abbr = [x.split(',')[2].upper() for x in lines]
longname = [x.split(',')[3] for x in lines]
min_values = [x.split(',')[4] for x in lines]
max_values = [x.split(',')[5] for x in lines]
units = [x.split(',')[6] for x in lines]
comments = [x.split(',')[7:] for x in lines]
elif att_doc == 2:
f = open('%sicoads_dsv.csv' %fpath_default, 'r')
lines = f.readlines()
lines = [x.rstrip("\r\n") for x in lines]
lines = [x.rstrip("\xa0") for x in lines]
f.close()
ancillary = [x.split(',')[1] for x in lines]
names = [x.split(',')[2] for x in lines]
units = [x.split(',')[5] for x in lines]
min_values = [x.split(',')[6] for x in lines]
max_values = [x.split(',')[7] for x in lines]
longname = [x.split(',')[9] for x in lines]
flagvalues = [x.split(',')[10] for x in lines]
# flagvalues = [x.replace(' ',',') for x in flagvalues]
flagmeanings = [x.split(',')[17] for x in lines]
standardname = [x.split(',')[18] for x in lines]
scaledtype = [x.split(',')[16] for x in lines]
comments = [x.split(',')[19] for x in lines]
keywords_list = [x.split(',')[22] for x in lines]
abbr = [x.split('-')[0] for x in names]
abbr_e = [x.split('-')[1] if '-' in x else x for x in names]
flagvalues = [x if 'blank' not in x else '' for x in flagvalues]
else:
print('Error: No proper variable attributes document is found!')
parameters = {}
attachment = {}
atta_list = [0,1,5,6,7,8,9,95,96,97,98,99]
attachment['00'] = 'CORE'
parameters['00'] = ('YR','MO','DY','HR','LAT','LON','IM','ATTC','TI','LI','DS','VS','NID','II','ID','C1','DI','D','WI','W','VI','VV','WW','W1','SLP','A','PPP','IT','AT','WBTI','WBT','DPTI','DPT','SI','SST','N','NH','CL','HI','H','CM','CH','WD','WP','WH','SD','SP','SH')
attachment['01'] = 'ICOADS ATTACHMENT'
parameters['01'] = ('BSI','B10','B1','DCK','SID','PT','DUPS','DUPC','TC','PB','WX','SX','C2','SQZ','SQA','AQZ','AQA','UQZ','UQA','VQZ','VQA','PQZ','PQA','DQZ','DQA','ND','SF','AF','UF','VF','PF','RF','ZNC','WNC','BNC','XNC','YNC','PNC','ANC','GNC','DNC','SNC','CNC','ENC','FNC','TNC','QCE','LZ','QCZ')
attachment['05'] = 'IMMT-5/FM13 ATTACHMENT'
parameters['05'] = ('OS','OP','FM','IMMV','IX','W2','WMI','SD2','SP2','SH2','IS','ES','RS','IC1','IC2','IC3','IC4','IC5','IR','RRR','TR','NU','QCI','QI1','QI2','QI3','QI4','QI5','QI6','QI7','QI8','QI9','QI10','QI11','QI12','QI13','QI14','QI15','QI16','QI17','QI18','QI19','QI20','QI21','HDG','COG','SOG','SLL','SLHH','RWD','RWS','QI22','QI23','QI24','QI25','QI26','QI27','QI28','QI29','RH','RHI','AWSI','IMONO')
attachment['06'] = 'MODEL QUALITY CONTROL ATTACHMENT'
parameters['06'] = ('CCCC','BUID','FBSRC','BMP','BSWU','SWU','BSWV','SWV','BSAT','BSRH','SRH','BSST','MST','MSH','BY','BM','BD','BH','BFL')
attachment['07'] = 'SHIP METADATA ATTACHMENT'
parameters['07'] = ('MDS','C1M','OPM','KOV','COR','TOB','TOT','EOT','LOT','TOH','EOH','SIM','LOV','DOS','HOP','HOT','HOB','HOA','SMF','SME','SMV')
attachment['08'] = 'NEAR-SURFACE OCEANOGRAPHIC DATA ATTACHMENT'
parameters['08'] = ('OTV','OTZ','OSV','OSZ','OOV','OOZ','OPV','OPZ','OSIV','OSIZ','ONV','ONZ','OPHV','OPHZ','OCV','OCZ','OAV','OAZ','OPCV','OPCZ','ODV','ODZ','PUID')
attachment['09'] = 'EDITED CLOUD REPORT ATTACHMENT'
parameters['09'] = ('CCE','WWE','NE','NHE','HE','CLE','CME','CHE','AM','AH','UM','UH','SBI','SA','RI')
attachment['95'] = 'REANALYSES QC/FEEDBACK ATTACHMENT'
parameters['95'] = ('ICNR','FNR','DPRO','DPRP','UFR','MFGR','MFGSR','MAR','MASR','BCR','ARCR','CDR','ASIR')
attachment['96'] = 'ICOADS VALUE-ADDED DATABASE ATTACHMENT'
parameters['96'] = ('ICNI','FNI','JVAD','VAD','IVAU1','JVAU1','VAU1','IVAU2','JVAU2','VAU2','IVAU3','JVAU3','VAU3','VQC','ARCI','CDI','ASII')
attachment['97'] = 'ERROR ATTACHMENT'
parameters['97'] = ('ICNE','FNE','CEF','ERRD','ARCE','CDE','ASIE')
attachment['98'] = 'UNIQUE ID ATTACHMENT'
parameters['98'] = ('UID','RN1','RN2','RN3','RSA','IRF')
attachment['99'] = 'SUPPLEMENTAL DATA ATTACHMENT'
parameters['99'] = ('ATTE','SUPD')
def get_var_att(var):
idx = abbr.index(var)
if att_doc == 1:
att = {'abbr':var,'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx]}
elif att_doc == 2:
att = {'abbr':var,'ancillary':ancillary[idx],'standardname':standardname[idx],'scaledtype':scaledtype[idx],'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx], 'flagvalues': flagvalues[idx], 'flagmeanings':flagmeanings[idx]}
else:
print('Error: No attribute document found.')
return att
def get_ancillary(anc_QC, check_list):
var = anc_QC.split(';')
var = [x.split('-')[0].strip() for x in var]
var = [x for x in var if x in check_list ]
return ' '.join(var)
def getParameters(i):
return parameters["%02d" % i]
def save(out_file,data, **kwargs):
def duration(seconds):
t= []
for dm in (60, 60, 24, 7):
seconds, m = divmod(seconds, dm)
t.append(m)
t.append(seconds)
return ''.join('%d%s' % (num, unit)
for num, unit in zip(t[::-1], 'W DT H M S'.split())
if num)
def get_keywords(data):
keywords = []
for var in data.data.keys():
if var in abbr:
idx = abbr.index(var)
if len(keywords_list[idx])>0:
keywords.append(keywords_list[idx])
# print var, keywords_list[idx]
keywords = list(set(keywords))
keywords = ['Earth Science > %s' %x for x in keywords]
keywords = ', '.join(keywords)
return keywords
def Add_gattrs(ff):
lon_min = min(data['LON'])
lon_max = max(data['LON'])
lat_min = min(data['LAT'])
lat_max = max(data['LAT'])
start_time = min(data.data['Julian'])
end_time = max(data.data['Julian'])
dur_time = (end_time-start_time)*24.0*3600.0
start_time = jdutil.jd_to_datetime(start_time)
start_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(start_time.year,start_time.month,start_time.day,start_time.hour,start_time.minute,start_time.second)
end_time = jdutil.jd_to_datetime(end_time)
end_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(end_time.year,end_time.month,end_time.day,end_time.hour,end_time.minute,end_time.second)
version = out_file.split('_')[1]
#start_time_s = time.strftime(time_fmt,time.gmtime(float(start_time)))
#end_time_s = time.strftime(time_fmt,time.gmtime(float(end_time)))
ff.ncei_template_version = "NCEI_NetCDF_Point_Template_v2.0"
ff.featureType = "point"
ff.title = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) %s data collected from %s to %s." %(version, start_time_s, end_time_s)
ff.summary = "This file contains ICOADS %s data in netCDF4 format collected from %s to %s. The International Comprehensive Ocean-Atmosphere Data Set (ICOADS) offers surface marine data spanning the past three centuries, and simple gridded monthly summary products for 2-degree latitude x 2-degree longitude boxes back to 1800 (and 1degreex1degree boxes since 1960)--these data and products are freely distributed worldwide. As it contains observations from many different observing systems encompassing the evolution of measurement technology over hundreds of years, ICOADS is probably the most complete and heterogeneous collection of surface marine data in existence." %(version, start_time_s, end_time_s)
ff.keywords = get_keywords(data);
ff.Conventions = "CF-1.6, ACDD-1.3"
ff.id = out_file.split('.nc')[0].replace('IMMA1','ICOADS')
ff.naming_authority = "gov.noaa.ncei"
#ff.source = "http://rda.ucar.edu/data/ds548.0/imma1_r3.0.0/%s.tar" %out_file.split('-')[0]
ff.source = "%s.gz" %out_file.split('.nc')[0]
ff.processing_level = "Restructured from IMMA1 format to NetCDF4 format."
ff.acknowledgement = "Conversion of ICOADS data from IMMA1 to netCDF format by NCEI is supported by the NOAA Big Earth Data Initiative (BEDI)."
ff.license = "These data may be redistributed and used without restriction."
ff.standard_name_vocabulary = "CF Standard Name Table v31"
ff.date_created = time.strftime(time_fmt,time.gmtime())
ff.creator_name = "NCEI"
ff.creator_email = "ncei.info@noaa.gov"
ff.creator_url = "https://www.ncei.noaa.gov/"
ff.institution = "National Centers for Environmental Information (NCEI), NOAA"
ff.project = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) Project"
ff.publisher_name = "NCEI"
ff.publisher_email = "ncei.info@noaa.gov"
ff.publisher_url = "https://www.ncei.noaa.gov/"
ff.geospatial_bounds = "POLYGON ((%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f))" %(lon_min,lat_min,lon_min,lat_max,lon_max,lat_max,lon_max,lat_min,lon_min,lat_min)
ff.geospatial_bounds_crs = "EPSG:4326"
ff.geospatial_lat_min = float("%.4f" %(lat_min))
ff.geospatial_lat_max = float("%.4f" %(lat_max))
ff.geospatial_lon_min = float("%.4f" %(lon_min))
ff.geospatial_lon_max = float("%.4f" %(lon_max))
ff.geospatial_lat_units = "degrees_north"
ff.geospatial_lon_units = "degrees_east"
ff.time_coverage_start = start_time_s
ff.time_coverage_end = end_time_s
ff.time_coverage_duration = 'P' + duration(dur_time)
ff.time_coverage_resolution = "vary"
ff.uuid = str(uuid.uuid4())
ff.sea_name = "World-Wide Distribution"
ff.creator_type = "group"
ff.creator_institution = "NOAA National Centers for Environmental Information (NCEI)"
ff.publisher_type = "institution"
ff.publisher_institution = "NOAA National Centers for Environmental Information (NCEI)"
ff.program = ""
ff.contributor_name = "Zhankun Wang; ICOADS team"
ff.contributor_role = "ICOADS Data Conversion to NetCDF; ICOADS IMMA1 Data Provider"
ff.date_modified = time.strftime(time_fmt,time.gmtime())
ff.date_issued = time.strftime(time_fmt,time.gmtime())
ff.date_metadata_modified = time.strftime(time_fmt,time.gmtime())
ff.product_version = "ICOADS %s netCDF4" %version
ff.keywords_vocabulary = "Global Change Master Directory (GCMD) 2015. GCMD Keywords, Version 8.1."
ff.cdm_data_type = 'Point'
#ff.metadata_link = 'http://rda.ucar.edu/datasets/ds548.0/#!docs'
ff.metadata_link = ''
if len(set(data.data['IM'])) == 1:
ff.IMMA_Version = str(data.data['IM'][0])
else:
print('%s: check IMMA version' %out_file)
if len(set(data.data['RN1'])) == 1:
ff.Release_Number_Primary = str(data.data['RN1'][0])
else:
print('%s: check Release_Number_Primary' %out_file)
if len(set(data.data['RN2'])) == 1:
ff.Release_Number_Secondary = str(data.data['RN2'][0])
else:
print('%s: check Release_Number_Secondary' %out_file)
if len(set(data.data['RN3'])) == 1:
ff.Release_Number_Tertiary = str(data.data['RN3'][0])
else:
print('%s: check Release_Number_Tertiary' %out_file)
if len(set(data.data['RSA'])) == 1:
ff.Release_status_indicator = str(data.data['RSA'][0])
else:
print('%s: check RSA' %out_file)
#ff.comment = ""
ff.references = 'http://rda.ucar.edu/datasets/ds548.0/docs/R3.0-citation.pdf'
ff.history = time.strftime(time_fmt,time.gmtime()) + ": Converted from IMMA1 format to netCDF4 format by Z.W. "
fpath = kwargs.get('fpath')
if fpath is None:
fpath = fpath_default
#ftxt = open("%s%s.txt" %(fpath,out_file[0:-3]), 'w')
#ftxt.write('Saving to %s ...\n' %out_file);
ff = netCDF4.Dataset(fpath + out_file.replace('IMMA1','ICOADS'), 'w', format='NETCDF4')
Add_gattrs(ff)
ff.createDimension('obs',len(data.data['YR']))
'''
# save time in Julian Days
timein = ff.createVariable('time','f8',('obs',),zlib=True,complevel=4)
timein.long_name = "time"
timein.standard_name = "time"
timein.units = "days since -4713-1-1 12:0:0 "
timein.calendar = "julian"
timein.axis = "T"
timein.comment = "Julian days since noon on January 1, 4713 BC. Missing values of date (DD in date) are replaced by 0 and missing values in HR are filled with 0.0 in this calculation. See actural values in date, HR for reference."
timein[:] = data.data['Julian'][:]
'''
# save time in Julian Days since the beginning of ICOADS data: 1662-10-15 12:00:00
timein = ff.createVariable('time','f8',('obs',),zlib=True,complevel=4)
timein.long_name = "time"
timein.standard_name = "time"
timein.units = "days since 1662-10-15 12:00:00"
timein.calendar = "julian"
timein.axis = "T"
timein.comment = "Julian days since the beginning of the ICOADS record, which is 1662-10-15 12:00:00. Missing values of date (DD in date) are replaced by 0 and missing values in HR are filled with 0.0 in this calculation. See actual values in date, HR for reference."
timein[:] = data.data['Julian1'][:]
# save date in YYYYMMDD
ff.createDimension('DATE_len',len(data.data['DATE'][0]))
date = ff.createVariable('date','S1',('obs','DATE_len',),zlib=True,complevel=4)
date.long_name = "date in YYYYMMDD"
#date.valid_min = '16000101'
#date.valid_max = '20241231'
date.format = 'YYYYMMDD'
#date.axis = "T"
date.comment = "YYYY: four digital year, MM: two digital month and DD: two digital date. Missing values of DD have been filled with 99."
date[:] = [netCDF4.stringtochar(np.array(x)) for x in data.data['DATE']]
#print data.data['YR']
crsout = ff.createVariable('crs','i')
crsout.grid_mapping_name = "latitude_longitude"
crsout.epsg_code = "EPSG:4326"
crsout.semi_major_axis = 6378137.0
crsout.inverse_flattening = 298.257223563
#crsout.comment = ''
dim_list = []
dim_dir = []
exclusives = ['YR','MO','DY','SUPD','IM','ATTC','ATTE','RN1','RN2','RN3','RSA']
'''
exclusives_2 = ['CDE','CDI','YR','MO','DY','SUPD','IM','ATTC','ATTE','RN1','RN2','RN3','RSA','ICNR','FNR','DPRO','DPRP','UFR','MFGR','MFGSR','MAR','MASR','BCR','ARCR','CDR','ASIR']
for atta in atta_list:
var_list = getParameters(atta)
for var in var_list:
if var in exclusives_2:
pass
else:
print var
att = get_var_att(var)
if 'flagvalues' in att:
if len(att['flagvalues']) > 0:
print var, att['flagvalues'], att['flagmeanings']
foo = att['flagvalues'].split(' ')
foo_m = att['flagmeanings'].split(' ')
for x,y in zip(foo,foo_m): print('%s: %s' %(x,y))
'''
for atta in atta_list:
var_list = getParameters(atta)
for var in var_list:
if var in data.data.keys():
if var in exclusives:
pass
else:
start = time.time()
#ftxt.write('%s start at %s. ' %(var,time.strftime(time_fmt,time.gmtime())));
index = [i for i, x in enumerate(data.data[var]) if x is not None]
# print var,data[var],index[0],data.data[var][index[0]]
if type(data.data[var][index[0]]) is int:
dataout = ff.createVariable(var,'i2',('obs',),fill_value = -99,zlib=True,complevel=4)
#dataout = ff.createVariable(var,'f4',('obs',),zlib=True,complevel=4)
dataout[index] = [data.data[var][idx] for idx in index]
elif type(data.data[var][index[0]]) is float:
if var == 'LAT':
dataout = ff.createVariable('lat','f4',('obs',),zlib=True,complevel=4)
elif var == 'LON':
dataout = ff.createVariable('lon','f4',('obs',),zlib=True,complevel=4)
else:
dataout = ff.createVariable(var,'f4',('obs',),fill_value = float(-9999),zlib=True,complevel=4)
dataout[index] = [data.data[var][idx] for idx in index]
elif type(data.data[var][index[0]]) is str:
#print var
if var == 'SUPD':
#ll = max([len(x) if x is not None else 0 for x in data.data[var] ])
#data.data[var] = [x.ljust(ll) if x is not None else None for x in data.data[var]]
pass
else:
ll = len(data.data[var][index[0]])
if ll not in dim_list:
ff.createDimension('%s_len' %var,ll)
dataout = ff.createVariable(var,'S1',('obs','%s_len' %var,),zlib=True,complevel=4)
dim_list.append(ll)
dim_dir.append(var)
else:
idx = dim_list.index(ll)
dataout = ff.createVariable(var,'S1',('obs','%s_len' %dim_dir[idx],),zlib=True,complevel=4)
dataout[index] = [netCDF4.stringtochar(np.array(data.data[var][idx])) for idx in index]
else:
print var, type(data.data[var][index[0]])
att = get_var_att(var)
if 'standardname' in att:
if len(att['standardname']) >0: dataout.standard_name = att['standardname']
dataout.long_name = att['longname'] if len(att['longname']) > 0 else ""
if len(att['unit']) > 0: dataout.units = att['unit']
if len(att['min_v']) > 0:
if 'int' in att['scaledtype']:
dataout.valid_min = np.int16(att['min_v'])
elif 'double' in att['scaledtype']:
dataout.valid_min = float(att['min_v'])
else:
dataout.valid_min = float(att['min_v'])
if len(att['max_v']) > 0:
if 'int' in att['scaledtype']:
dataout.valid_max = np.int16(att['max_v'])
elif 'double' in att['scaledtype']:
dataout.valid_max = float(att['max_v'])
else:
dataout.valid_max = float(att['max_v'])
if var == 'LAT': dataout.axis = 'Y'
if var == 'LON': dataout.axis = 'X'
#if len(att['min_v']) > 0:
# dataout.scale_factor = 1.
# dataout.add_offset = 0.
if 'flagvalues' in att:
if len(att['flagvalues']) >0:
foo = att['flagvalues'].split(' ')
dataout.flag_values = [np.int16(x) for x in foo]
if len(att['flagmeanings']) >0: dataout.flag_meanings = att['flagmeanings']
if var != 'LAT' and var != 'LON':
dataout.coordinates = "time lat lon"
dataout.grid_mapping = "crs"
dataout.cell_methods = "time: point"
if len(att['comment']) > 0: dataout.comment = att['comment']
if len(get_ancillary(att['ancillary'],data.data.keys())) > 0: dataout.ancillary_variables = get_ancillary(att['ancillary'],data.data.keys())
end = time.time()
#print var, end-start
#ftxt.write('Time used = %s sec\n' %(end-start));
#dataout.standard_name = "sea_surface_temperature"
#dataout.long_name = "Sea surface temperature"
#dataout.units = "degree_Celsius"
#ftxt.write('Done with %s' %out_file)
ff.close()
#ftxt.close()
|
18,923 | 363d3da182b2664ba27b0600497fdb2435097cb8 | from tile import Tile
from item import Item
class Ether(Item):
def __init__(self):
self.load_image("5")
self.name = "Ether"
|
18,924 | f5a83525c7fbe3c4630f598c7f81935e272a1b61 | import os
__all__ = ("get_data",)
DATA_DIR = os.path.normpath(
os.path.join(os.path.abspath(__file__), "../../data")
)
def get_data(day: int) -> str:
with open(os.path.join(DATA_DIR, f"day_{day}.txt")) as f:
return f.read() |
18,925 | d88760b126f909603897d81169a935183b0bd837 | from django.shortcuts import reverse
from onken.workspace.test import WorkspaceTestCase
from django.contrib.auth import get_user_model
class IndexTest(WorkspaceTestCase):
def test_index(self):
User = get_user_model()
user = User(username='gburdell3', first_name='George')
user.save()
self.client.force_login(user)
response = self.client.get(reverse('workspace_index'))
self.assertContains(response, "This is the workspace app.", status_code=200)
|
18,926 | e65f5286b98358331edba43a286106056b885663 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import argparse
import time
import threading
from bypy import ByPy, const
import youtube_dl
from downlib.JsonLoader import readJsonSet
from downlib.readCSV import readCSVSet
from downlib.readTXT import readTXTSet
'''
python doanload3.py data/kinetics-400_test.csv download
youtube-dl -o 1.mp4 -f mp4 https://www.youtube.com/watch?v=--6bJUbfpnQ
'''
class TheParty(object):
def __init__(self,dataset,remote_path,localDlDir='tmp'):
self.alldown = list(dataset)
self.url_base='https://www.youtube.com/watch?v='
self.ext = "mp4"
self.LDlDir = localDlDir
self.RDir = remote_path
self.bad_video = 'bad_video.log'
self.uploaded_video = 'bdnet.txt'
self.outtmpl = os.path.join(self.LDlDir,'%(id)s.%(ext)s')
self.ydl_opts = {
# outtmpl 格式化下载后的文件名,避免默认文件名太长无法保存 http://www.yujzw.com/python/python-youtube-dl.html
'format' : 'best',
'quiet' : True,
'outtmpl': self.outtmpl#u'tmp/%(id)s.%(ext)s'
}
self.processes = 1
self.dlqueue = list()
self.que_max = 50
self._init()
self.ydl = youtube_dl.YoutubeDL(self.ydl_opts)
self.bp = ByPy(processes=self.processes)
self.bp.info()
def _init(self):
if not os.path.exists(self.LDlDir):
os.mkdir(self.LDlDir)
else:
for item in os.listdir(self.LDlDir):
file,_ = os.path.splitext(item)
self.dlqueue.append(file)
# http://stackoverflow.com/a/27320254/404271
# https://github.com/houtianze/bypy/blob/75a810df2d60048d5406a42666359d51339dcfdd/bypy/bypy.py#L119
self.processes = 1
#OpenVZ failed install multiprocesses
def _addID(self,youtube_id,filename):
fo = open(filename,"a")
fo.write(youtube_id)
fo.write("\n")
fo.close()
def _ydl(self,youtube_id):
download_url = '%s' % (self.url_base + youtube_id)
try:
print 'downloading ',youtube_id
self.ydl.download([download_url])
except youtube_dl.utils.DownloadError,err:
print 'ydl error! Add to bad_video list.'
for arg in err.args:
print arg
self._addID(youtube_id,self.bad_video)
def download(self,youtube_id):
self._ydl(youtube_id)
fpath = os.path.join(self.LDlDir,''.join([youtube_id,'.',self.ext]) )
return os.path.exists(fpath)
def _cloud_exist(self,youtube_id):
lfpath = os.path.join(self.LDlDir,''.join([youtube_id,'.',self.ext]))
rfpath = os.path.join(self.RDir,''.join([youtube_id,'.',self.ext]) )
assert os.path.exists(fpath)
try:
ans = self.bp.meta(rfpath)
if 0 == ans:
return True
elif 31066 == ans:
return False
else:
print self.bp.response.json()
raise Exception,'baiduyun failed.'
except Exception,e:
print 'baiduyun failed.'
print self.bp.response.json()
print e
def _chkok(self,result):
ans = True
if self.bp.processes == 1:
if result != const.ENoError and result != const.EHashMismatch:
print "Failed, result: {}".format(result)
print self.bp.response.json()
ans = False
else:
if result != const.ENoError and result != const.IEFileAlreadyExists and result != const.EHashMismatch:
print "Failed, result: {}".format(result)
print self.bp.response.json()
ans = False
return ans
def upload(self,youtube_id):
fpath = os.path.join(self.LDlDir,''.join([youtube_id,'.',self.ext]) )
rfpath = os.path.join(self.RDir,''.join([youtube_id,'.',self.ext]) )
try:
ans = self.bp.upload(localpath=fpath, remotepath=rfpath, ondup=u'overwrite')
resp = self._chkok(ans)
print 'ans:'+str(ans)+';'
if resp:
self._addID(youtube_id,self.uploaded_video)
return resp
except Exception,e:
print 'upload failed.'
print self.bp.response.json()
print e
# def syncup(self):
# assert self.processes > 1
# try:
# uplist = os.listdir(self.LDlDir) ##a,b = os.path.splitext()
# ans = self.bp.syncup(self.LDlDir,self.RDir)
# resp = self._chkok(ans)
# for item in uplist:
# file,_ = os.path.splitext(item)
# if not resp:
# ans = self.upload(item)
# assert self._chkok(ans)
# os.remove(fpath)
# self.dlqueue.remove(item)
# print str(item),' uploaded. deleted'
# self._addID(item,self.uploaded_video)
#
# except Exception,e:
# print 'upload failed.'
# print self.bp.response.json()
# print e
def worker_updel(self):
while len(self.dlqueue) > 0:
item = self.dlqueue.pop()
print '=processing=',len(self.dlqueue),'--',item
fpath = os.path.join(self.LDlDir,''.join([item,'.',self.ext]) )
try:
assert os.path.exists(fpath)
except Exception,e:
print e
ans = self.upload(item)
if ans == True:
os.remove(fpath)
print str(item),' uploaded. deleted'
else:
#self.dlqueue.append(item)
print 'upload fail, skip ',item
continue
def worker_download(self):
while len(self.alldown) > 0:
if len(self.dlqueue) < self.que_max: #如果队列够少,就继续下载;上传
# download
item = self.alldown.pop()
print '=== remain ',len(self.alldown),'== downloading... ',item
if self.download(item):
self.dlqueue.append(item)
else:
time.sleep(1)
def process(self):
t1 = threading.Thread(target = self.worker_updel)
t2 = threading.Thread(target = self.worker_download)#,args = (8,))
#t3 = threading.Thread(target = self.worker_updel)
t1.start()
t2.start()
#t3.start()
t1.join()
t2.join()
#t3.join()
def readSet(input_file,fmt):
if fmt == 'json':
dataset = readJsonSet(input_file)
elif fmt == 'txt':
dataset = readTXTSet(input_file)
elif fmt == 'csv':
dataset = readCSVSet(input_file)
else:
return {}
return dataset
def setOp(dataset):
#bad set
#badfiles = ['bad_video.log','act100.txt','bdnet.txt']
badfiles = ['bad_video.log','act.txt','k400.txt','bdnet.txt']
badset = set()
for badfile in badfiles:
if os.path.exists(badfile) == True:
tmpset = readTXTSet(badfile)
badset = set.union(badset,set(tmpset))
#video list
return set.difference(set(dataset),badset)
def main(args):
# print args.input_file
# print args.output_dir
# print args.fmt
dataset = readSet(args.input_file,args.fmt)
if args.no_bad == False:
dataset = setOp(dataset)
aworker = TheParty(dataset,args.output_dir)
aworker.process()
if __name__ == '__main__':
description = 'Helper script for downloading and trimming kinetics videos.'
p = argparse.ArgumentParser(description=description)
p.add_argument('input_file',type=str, help=('input file name'))
p.add_argument('fmt',type=str,default='json',choices=['json','txt','csv'],help=('Input file format') )
p.add_argument('output_dir',type=str, help=('Output directory where videos will be saved.') )
p.add_argument('--no_bad', '--force', default=False, action="store_true")
#p.add_argument('-n', '--num-jobs', type=int, default=2)
#main(**vars(p.parse_args() ) )
main( p.parse_args() )
|
18,927 | 2084e5afd97b5fae9230c476fcba8b1e5138761f | a=int(input("Enter the no of tanks:- ")) # no of tanks
tank_level=[]
leak_rate=[]
out=[]
for i in range(a):
tank_level.append(int(input("Enter tank level of no {} tank :- ".format(i))))
leak_rate.append(int(input("Enter leak rate of no {} tank:- ".format(i))))
out.append(tank_level[i]/leak_rate[i]) #calculating deciding factor
for i in range(a):
b=0
for j in range(a): #finding leat deciding factor
if out[i]<=out[j] and j!=i:
b=b+1
if b==(a-1):
least=out[i]
break
final=[]
for i in range(a): #printing tank number which will empty first,
if out[i]==least: #if two tanks are empting together then print both tank nos in ascending order seperated by comma
final.append(i)
print("These tank will empty first/together")
print(*final, sep = ", ")
|
18,928 | 38dbbf7d1ba9cb64b3a1be4a50bff14e867e3b47 |
""" run_mot_challenge.py
Run example:
run_mot_challenge.py --USE_PARALLEL False --METRICS Hota --TRACKERS_TO_EVAL Lif_T
Command Line Arguments: Defaults, # Comments
Eval arguments:
'USE_PARALLEL': False,
'NUM_PARALLEL_CORES': 8,
'BREAK_ON_ERROR': True,
'PRINT_RESULTS': True,
'PRINT_ONLY_COMBINED': False,
'PRINT_CONFIG': True,
'TIME_PROGRESS': True,
'OUTPUT_SUMMARY': True,
'OUTPUT_DETAILED': True,
'PLOT_CURVES': True,
Dataset arguments:
'GT_FOLDER': os.path.join(code_path, 'data/gt/mot_challenge/'), # Location of GT data
'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/mot_challenge/'), # Trackers location
'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
'CLASSES_TO_EVAL': ['pedestrian'], # Valid: ['pedestrian']
'BENCHMARK': 'MOT17', # Valid: 'MOT17', 'MOT16', 'MOT20', '2D_MOT_2015'
'SPLIT_TO_EVAL': 'train', # Valid: 'train', 'test', 'all'
'INPUT_AS_ZIP': False, # Whether tracker input files are zipped
'PRINT_CONFIG': True, # Whether to print current config
'DO_PREPROC': True, # Whether to perform preprocessing (never done for 2D_MOT_2015)
'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
Metric arguments:
'METRICS': ['Hota','Clear', 'ID', 'Count']
"""
import sys
import os
import argparse
from multiprocessing import freeze_support
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import trackeval # noqa: E402
import logging
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s][%(name)s][%(levelname)-8s][%(process)d][%(module)s:%(funcName)s:%(lineno)d] - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
if __name__ == '__main__':
cmd = 'python ' + ' '.join(sys.argv)
logging.info(cmd)
freeze_support()
# Command line interface:
my_config = {'UUID': 'default_uuid', 'SUFFIX': ''}
default_eval_config = trackeval.Evaluator.get_default_eval_config()
default_dataset_config = trackeval.datasets.MotChallenge2DBox.get_default_dataset_config()
default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity']}
config = {**default_eval_config, **default_dataset_config, **default_metrics_config, **my_config} # Merge default configs
parser = argparse.ArgumentParser()
for setting in config.keys():
if type(config[setting]) == list or type(config[setting]) == type(None):
parser.add_argument("--" + setting, nargs='+')
else:
parser.add_argument("--" + setting)
args = parser.parse_args().__dict__
for setting in args.keys():
if args[setting] is not None:
if type(config[setting]) == type(True):
if args[setting] == 'True':
x = True
elif args[setting] == 'False':
x = False
else:
raise Exception('Command line parameter ' + setting + 'must be True or False')
elif type(config[setting]) == type(1):
x = int(args[setting])
elif type(args[setting]) == type(None):
x = None
else:
x = args[setting]
config[setting] = x
eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()}
dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()}
metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()}
import wandb
for i in range(10):
try:
wandb_run = wandb.init(project='object-motion', resume='allow', id=config['UUID'], dir='/scratch/cluster/jozhang/logs', save_code=True)
except wandb.errors.error.UsageError as e:
# see https://github.com/wandb/client/issues/1409#issuecomment-723371808
if i == 9:
logging.error(f'Could not init wandb in 10 attempts, exiting')
raise e
logging.warning(f'wandb.init failed {i}th attempt, retrying')
import time
time.sleep(10)
wandb.config.update({f'new-eval-{k}': v for k, v in config.items()}, allow_val_change=True)
# Run code
evaluator = trackeval.Evaluator(eval_config)
dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)]
metrics_list = []
for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity]:
if metric.get_name() in metrics_config['METRICS']:
metrics_list.append(metric())
if len(metrics_list) == 0:
raise Exception('No metrics selected for evaluation')
ret, msg = evaluator.evaluate(dataset_list, metrics_list)
metrics = ret['MotChallenge2DBox'][dataset_config['TRACKERS_TO_EVAL'][0]]
import numpy as np
def get_worst_best(met, k, idmap):
met_ids = np.argsort(met)
return idmap[met_ids[:k]], idmap[met_ids[-k:]]
rows = []
for vid_id in metrics:
hota_vid = metrics[vid_id]['pedestrian']['HOTA']
if vid_id == 'COMBINED_SEQ' or 'GT-RHOTA_mean' not in hota_vid:
continue
rhota_hard_ids, rhota_easy_ids = get_worst_best(hota_vid['GT-RHOTA_mean'], 5, hota_vid['raw_gt_ids'])
gt_assa_hard_ids, gt_assa_easy_ids = get_worst_best(hota_vid['GT-AssA_mean'], 5, hota_vid['raw_gt_ids'])
pr_assa_hard_ids, pr_assa_easy_ids = get_worst_best(hota_vid['PR-AssA_mean'], 10, hota_vid['raw_pr_ids'])
rows.append([vid_id, 'easy', 'gt', *rhota_easy_ids, *gt_assa_easy_ids])
rows.append([vid_id, 'hard', 'gt', *rhota_hard_ids, *gt_assa_hard_ids])
rows.append([vid_id, 'easy', 'pr', *pr_assa_easy_ids])
rows.append([vid_id, 'hard', 'pr', *pr_assa_hard_ids])
import pandas as pd
header = ['vid_id', 'difficulty', 'type', *[str(i) for i in range(10)]]
fp = dataset_list[0].get_output_fol(dataset_list[0].get_eval_info()[0][0]) + 'tids.csv'
pd.DataFrame(rows).to_csv(fp, header=header, index=False)
per_vid_hota = ['AssA']
per_vid_mota = ['IDSW']
per_vid_log = {}
for vid_id in metrics:
metrics_vid = metrics[vid_id]['pedestrian']
for m in per_vid_hota:
per_vid_log[f'Vid{vid_id}-{m}_mean'] = metrics_vid['HOTA'][m].mean()
for m in per_vid_mota:
per_vid_log[f'Vid{vid_id}-{m}'] = metrics_vid['CLEAR'][m]
ped_metrics = metrics['COMBINED_SEQ']['pedestrian']
to_take_mean = ['DetRe', 'DetPr', 'DetA', 'AssRe', 'AssPr', 'AssA', 'HOTA']
for m in to_take_mean:
ped_metrics['HOTA'][f'{m}_mean'] = ped_metrics['HOTA'][m].mean()
rows = []
for vid_id in metrics:
for cls in metrics[vid_id]:
hota_vid = metrics[vid_id][cls]['HOTA']
if vid_id == 'COMBINED_SEQ' or 'GT-RHOTA_mean' not in hota_vid:
continue
for i in range(len(hota_vid['raw_gt_ids'])):
rows.append({
'vid_name': vid_id,
'cls': cls,
'gt_track_id': hota_vid['raw_gt_ids'][i],
'rhota': hota_vid['GT-RHOTA_mean'].tolist()[i],
'assa': hota_vid['GT-AssA_mean'].tolist()[i],
**{f'Vid_{m}_mean': hota_vid[m].mean() for m in to_take_mean}
})
fp = dataset_list[0].get_output_fol(dataset_list[0].get_eval_info()[0][0]) + 'per_gt.csv'
pd.DataFrame(rows).to_csv(fp, index=False)
hota_keep = ['HOTA(0)', 'LocA(0)', 'HOTALocA(0)'] + [f'{m}_mean' for m in to_take_mean]
hota = {f'HOTA-{k}': v for k, v in ped_metrics['HOTA'].items() if k in hota_keep}
clear = {f'CLEAR-{k}': v for k, v in ped_metrics['CLEAR'].items()}
iden = {f'IDENTITY-{k}': v for k, v in ped_metrics['Identity'].items()}
to_log = {**clear, **hota, **iden, **per_vid_log}
to_log = {f'{k}{config["SUFFIX"]}': v for k, v in to_log.items()}
for k, v in to_log.items():
wandb.run.summary[k] = v
wandb.log(to_log)
wandb_run.finish()
for k, v in per_vid_log.items():
logging.info(f' {k} - {v}')
logging.info(f' MOTA - {clear["CLEAR-MOTA"]}, IDSW {clear["CLEAR-IDSW"]}, Frag {clear["CLEAR-Frag"]}')
for k, v in hota.items():
logging.info(f' {k} - {v}')
logging.info('we are done!')
|
18,929 | 71c769e712d492344fce4490344bc25625eff743 | # re.sub函数
# 用于替换字符串中的匹配项
# re.sub(pattern,repl,string,count=0,flags=0)
# pattern 正则中的模式字符串
# repl 替换的字符串,也可为一个函数
# string 要被查找替换的原始字符串
# count 模式匹配后替换的最大次数,默认0表示替换所有的匹配
import re
# 13577668899 , 湖南号码
str1 = '135 7766 8899 , 湖南号码'
result_01 = str1.replace(' ','')
print(result_01)
str1 = '135 7766 8899 , 湖南号码' # 一个空格
result_02 = re.sub('\d\s+\d','',str1) # 1376899 , 湖南号码
print(result_02)
# result_03 = re.sub('(\d+) (\d+) (\d+)',r'\1\2\3',str1) # \1\2\3 表示()的分组
# result_03 = re.sub('(\d+) (\d+) (\d+)',r'\1\3\2',str1) # 13588997766 , 湖南号码
result_03 = re.sub('(\d+) (\d+) (\d+)',r'133',str1) # 133 , 湖南号码
print(result_03)
str2 = '135 7766 8899 , 湖南号码' # 多个空格
result_04 = re.sub('(\d+)\s+(\d+) (\d+)',r'\1\2\3',str2) # r 原字符集,不会被转码
print(result_04)
result_05 = re.sub('\s,.*$','',str2) # 135 7766 8899
print(result_05)
|
18,930 | 868c25704343870bddbdb78a16a317ec5e85f1e4 | # Copyright 2020 Keren Ye, University of Pittsburgh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from modeling.layers import id_to_token
tf.compat.v1.enable_eager_execution()
class IdToTokenLayerTest(tf.test.TestCase):
def test_id_to_token(self):
test_layer = id_to_token.IdToTokenLayer({5: 'hello', 11: 'world'}, 'OOV')
output = test_layer(tf.convert_to_tensor([5, 1, 11, 2]))
self.assertAllEqual(output, [b'hello', b'OOV', b'world', b'OOV'])
output = test_layer(tf.convert_to_tensor([5, 1, 11]))
self.assertAllEqual(output, [b'hello', b'OOV', b'world'])
def test_id_to_token_2d(self):
test_layer = id_to_token.IdToTokenLayer({
2: 'one',
3: 'world',
6: 'dream'
}, 'UNK')
output = test_layer(tf.convert_to_tensor([[4, 4, 3, 4], [2, 3, 2, 6]]))
self.assertAllEqual(output, [[b'UNK', b'UNK', b'world', b'UNK'],
[b'one', b'world', b'one', b'dream']])
if __name__ == '__main__':
tf.test.main()
|
18,931 | b84f275cab24a6f8479369f53be38b436c124d87 | #coding:utf-8
json = {"stat":1,"info":"正常,且为当前小时的key,可返回新闻列表数据","endkey":"9223370469183283782|1207|||","newkey":"1567675355619|1207","data":[{"batchid":"3382c05b46ed9718","bigpic":1,"cacheTime":0,"clkrate":"","ctrtime":1567676642658,"date":1567677469,"dfh_headpic":"//00.imgmini.eastday.com/dcminisite/portrait/42944c1c65f2ac294245aba298ec0629.jpg","dfh_nickname":"环球时报","dfh_uid":"200000000006440","hotnews":1,"imggif":[],"ispol":"0","isrecom":0,"issptopic":"0","isvideo":0,"lbimg":[{"imgheight":309,"imgwidth":550,"src":"//05imgmini.eastday.com/mobile/20190903/20190903084149_dd7f7a1290283cb64f06371043614cbf_1_mwpm_05501609.jpg"}],"miniimg":[{"imgheight":180,"imgwidth":320,"src":"//05imgmini.eastday.com/mobile/20190903/20190903084149_dd7f7a1290283cb64f06371043614cbf_1_mwpm_03201609.jpg"}],"miniimg_size":1,"recommendtype":"-1","recommendurl":"","rowkey":"9223370469383466438","source":"环球时报","subtype":"","suptop":"0","topic":"突发:黑衣暴徒不顾法庭禁制令 再次闯入香港机场闹事","tpch":"国内","type":"guonei","url":"https://mini.eastday.com/a/190903084149369.html?qid=null&needrec=index_jrdftt&subtype=toutiao&rcgid=5af90557223ed2c2&pgnum=1&idx=1&ishot=1&recommendtype=-1&suptop=0&domain=mini","urlfrom":"ifengapp","videoalltime":0},{"batchid":"3382c05b46ed9718","bigpic":1,"cacheTime":0,"clkrate":"","ctrtime":1567676642655,"date":1567676389,"dfh_headpic":"//00.imgmini.eastday.com/dcminisite/portrait/7ee64efff27b8678bf11f98a5dc98347.jpg","dfh_nickname":"新民网","dfh_uid":"200000000006557","hotnews":0,"imggif":[],"ispol":"0","isrecom":0,"issptopic":"0","isvideo":0,"lbimg":[{"imgheight":309,"imgwidth":550,"src":"//01imgmini.eastday.com/mobile/20190903/20190903144248_d7a25a43e1f6204eb556ca7b24cbd7f6_1_mwpm_05501609.jpg"}],"miniimg":[{"imgheight":180,"imgwidth":320,"src":"//01imgmini.eastday.com/mobile/20190903/20190903144248_d7a25a43e1f6204eb556ca7b24cbd7f6_1_mwpm_03201609.jpg"}],"miniimg_size":1,"recommendtype":"-1","recommendurl":"","rowkey":"9223370469367857543","source":"新民网","subtype":"","suptop":"0","topic":"香港暴徒砸地铁后被警察抓捕时发抖求饶“我是学生”,媒体:既然害怕,就早点收手","tpch":"国际","type":"guoji","url":"https://mini.eastday.com/a/190903130158264.html?qid=null&needrec=index_jrdftt&subtype=toutiao&rcgid=5af90557223ed2c2&pgnum=1&idx=2&ishot=1&recommendtype=-1&suptop=0&domain=mini","urlfrom":"sohunews","videoalltime":0},{"batchid":"3382c05b46ed9718","bigpic":1,"cacheTime":0,"clkrate":"","ctrtime":1567674242417,"date":1567676089,"dfh_headpic":"//00imgmini.eastday.com/dcminisite/portrait/36e7c935adb3c0a155e9f64a37607497.jpg","dfh_nickname":"快乐小龙儿","dfh_uid":"200000000187882","hotnews":1,"imggif":[],"ispol":"0","isrecom":0,"issptopic":"0","isvideo":0,"lbimg":[{"imgheight":309,"imgwidth":550,"src":"//09imgmini.eastday.com/mobile/20190905/2019090501_e8e09e6fc4de4838b1246f069ce60832_7082_cover_mwpm_05501609.jpg"}],"miniimg":[{"imgheight":180,"imgwidth":320,"src":"//09imgmini.eastday.com/mobile/20190905/2019090501_e8e09e6fc4de4838b1246f069ce60832_7082_cover_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//09imgmini.eastday.com/mobile/20190905/2019090501_a5d8ba9278a0435d8498e221fc4c3b18_4395_cover_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//09imgmini.eastday.com/mobile/20190905/2019090501_584cd0e3dd5c4206a7d0756e10a89583_3649_cover_mwpm_03201609.jpg"}],"miniimg_size":3,"recommendtype":"-1","recommendurl":"","rowkey":"9223370469236812686","source":"快乐小龙儿","subtype":"","suptop":"0","topic":"海清已经无戏可拍?有导演曝出丑闻,自己的行为是要付出代价的","tpch":"娱乐","type":"yule","url":"https://mini.eastday.com/a/190905012603121.html?qid=null&needrec=index_jrdftt&subtype=toutiao&rcgid=5af90557223ed2c2&pgnum=1&idx=3&ishot=1&recommendtype=-1&suptop=0&domain=mini","urlfrom":"dongfanghao","videoalltime":0},{"batchid":"3382c05b46ed9718","bigpic":1,"cacheTime":0,"clkrate":"","ctrtime":1567668242508,"date":1567676329,"dfh_headpic":"//00.imgmini.eastday.com/dcminisite/portrait/1540450834206a2cb4c_media_head_pic.png","dfh_nickname":"光明网","dfh_uid":"200000000134100","hotnews":0,"imggif":[],"ispol":"0","isrecom":0,"issptopic":"0","isvideo":0,"lbimg":[{"imgheight":309,"imgwidth":550,"src":"//04imgmini.eastday.com/mobile/20190904/20190904152101_48ca9006da313747c0399c5d605de858_2_mwpm_05501609.jpg"}],"miniimg":[{"imgheight":180,"imgwidth":320,"src":"//04imgmini.eastday.com/mobile/20190904/20190904152101_48ca9006da313747c0399c5d605de858_2_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//04imgmini.eastday.com/mobile/20190904/20190904152101_48ca9006da313747c0399c5d605de858_3_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//04imgmini.eastday.com/mobile/20190904/20190904152101_48ca9006da313747c0399c5d605de858_1_mwpm_03201609.jpg"}],"miniimg_size":3,"recommendtype":"-1","recommendurl":"","rowkey":"9223370469273114451","source":"光明网","subtype":"","suptop":"0","topic":"男子开宝马姿势羞耻 路人连忙报警 原因竟难以启齿","tpch":"社会","type":"shehui","url":"https://mini.eastday.com/a/190904152101356.html?qid=null&needrec=index_jrdftt&subtype=toutiao&rcgid=5af90557223ed2c2&pgnum=1&idx=4&ishot=1&recommendtype=-1&suptop=0&domain=mini","urlfrom":"dongfanghao","videoalltime":0},{"batchid":"3382c05b46ed9718","bigpic":1,"cacheTime":0,"clkrate":"","ctrtime":1567665842125,"date":1567676329,"dfh_headpic":"","dfh_nickname":"","dfh_uid":"","hotnews":0,"imggif":[],"ispol":"0","isrecom":0,"issptopic":"0","isvideo":0,"lbimg":[{"imgheight":239,"imgwidth":426,"src":"//03imgmini.eastday.com/mobile/20190902/20190902202859_314038a7ed990b837273094e8cda09b9_3_mwpm_05501609.jpg"}],"miniimg":[{"imgheight":180,"imgwidth":320,"src":"//03imgmini.eastday.com/mobile/20190902/20190902202859_314038a7ed990b837273094e8cda09b9_3_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//03imgmini.eastday.com/mobile/20190902/20190902202859_314038a7ed990b837273094e8cda09b9_4_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//03imgmini.eastday.com/mobile/20190902/20190902202859_314038a7ed990b837273094e8cda09b9_2_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//03imgmini.eastday.com/mobile/20190902/20190902202859_314038a7ed990b837273094e8cda09b9_1_mwpm_03201609.jpg"}],"miniimg_size":4,"recommendtype":"-1","recommendurl":"","rowkey":"9223370469427436170","source":"萌狗宝宝","subtype":"","suptop":"0","topic":"买大米时,这种大米千万别买,白送给你也不要,家里有的赶紧扔掉","tpch":"健康","type":"jiankang","url":"https://mini.eastday.com/a/190902202859637.html?qid=null&needrec=index_jrdftt&subtype=toutiao&rcgid=5af90557223ed2c2&pgnum=1&idx=5&ishot=1&recommendtype=-1&suptop=0&domain=mini","urlfrom":"sohunews","videoalltime":0},{"batchid":"3382c05b46ed9718","bigpic":1,"cacheTime":0,"clkrate":"","ctrtime":1567676042733,"date":1567677349,"dfh_headpic":"//00.imgmini.eastday.com/dcminisite/portrait/a7181f66b3ee0912d7da03b4ae961a2c.jpg","dfh_nickname":"飞行邦","dfh_uid":"200000000127367","hotnews":1,"imggif":[],"ispol":"0","isrecom":0,"issptopic":"0","isvideo":0,"lbimg":[{"imgheight":300,"imgwidth":533,"src":"//04imgmini.eastday.com/mobile/20190903/2019090322_3ccfd7506d4a4e018c25855cc14a78d8_7197_cover_mwpm_05501609.jpg"}],"miniimg":[{"imgheight":180,"imgwidth":320,"src":"//04imgmini.eastday.com/mobile/20190903/2019090322_3ccfd7506d4a4e018c25855cc14a78d8_7197_cover_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//04imgmini.eastday.com/mobile/20190903/2019090322_0ff2ef8e142a468c95453bf8f41ed8a6_3134_cover_mwpm_03201609.jpg"},{"imgheight":180,"imgwidth":320,"src":"//04imgmini.eastday.com/mobile/20190903/2019090322_5e914e2acb6341b9b81c7962f01194af_8613_cover_mwpm_03201609.jpg"}],"miniimg_size":3,"recommendtype":"-1","recommendurl":"","rowkey":"9223370469333882500","source":"飞行邦","subtype":"","suptop":"0","topic":"2019第五届中国航空维修成本管理高峰论坛","tpch":"国内","type":"guonei","url":"https://mini.eastday.com/a/190903222813307.html?qid=null&needrec=index_jrdftt&subtype=toutiao&rcgid=5af90557223ed2c2&pgnum=1&idx=6&ishot=1&recommendtype=-1&suptop=0&domain=mini","urlfrom":"dongfanghao","videoalltime":0}]}
for json_s in json['data']:
url=json_s['url']
print(json_s['url']) |
18,932 | e8b1b11c222be83451992de51e331c9a01e4485d | import requests, re, pymongo, time, xlrd
from lxml import html
from multiprocessing import Pool
url = 'http://www.newseed.cn/project/61926'
header = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
cookies = {
'Cookie': 'ARRAffinity=197ae5372184c64aeca47f780a2e053f3a50366e2bda392cd4bfa3b38e39a929; __uid=7708072285; __utmt=1; ASP.NET_SessionId=l3ey4acdym3sk2mjtjmxn5rd; pedaily.cn=uid=201531&username=18516630543&password=9724D8CA473B50D9B007DAE52181AFD7&email=&mobile=18516630543&oauth_token=&oauth_token_secret=&unionid=&hiname=%E6%96%B0%E8%8A%BD%E7%BD%91%E5%8F%8B721531&photo=&blogurl=&usertype=0&companyid=0&logintype=12&roletype=0&ismobilevalidated=True&isemailvalidated=False&isverified=False&isok=False; jiathis_rdc=%7B%22http%3A//www.newseed.cn/project/62156%22%3A%220%7C1513836386928%22%7D; __utma=117171865.1601227618.1513836341.1513836341.1513836341.1; __utmb=117171865.5.10.1513836341; __utmc=117171865; __utmz=117171865.1513836341.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); zg_did=%7B%22did%22%3A%20%2216077ad63fc158-017288883860d9-464a0129-e1000-16077ad63fd5b%22%7D; zg_2804ec8ba91741c0853e364274858816=%7B%22sid%22%3A%201513836340227%2C%22updated%22%3A%201513836391712%2C%22info%22%3A%201513836340233%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22%22%2C%22cuid%22%3A%20%22201531%22%7D; Hm_lvt_155833ecab8e70af6f2498f897bd8616=1513836341; Hm_lpvt_155833ecab8e70af6f2498f897bd8616=1513836392; Hm_lvt_25919c38fb62b67cfb40d17ce3348508=1513836341; Hm_lpvt_25919c38fb62b67cfb40d17ce3348508=1513836392'}
db = pymongo.MongoClient(host='localhost', port=27017)['Falonie']
collection_crawled = db['newseed_Pre-A_crawled_result']
collection=db['newseed_Pre-A_urls']
test_urls = ['http://www.newseed.cn/project/35361', 'http://www.newseed.cn/project/35154',
'http://www.newseed.cn/project/34386', 'http://www.newseed.cn/project/33610',
'http://www.newseed.cn/project/30502']
file = 'newseed_种子_urls.xlsx'
def read_excel(file):
with xlrd.open_workbook(file) as data_:
table = data_.sheets()[0]
# for rownum in range(0, table.nrows):
# row = table.row_values(rownum)
# yield row[0]
# urls_list = [table.row_values(rownum)[0] for rownum in range(0, table.nrows)]
urls_list = [table.row_values(rownum)[0] for rownum in range(100, table.nrows)]
return urls_list
def read_mongodb():
collection = db['newseed_Pre-A_urls']
urls = [_['url'] for _ in collection.find({})]
return urls
def uncrawled_urls():
urls = [_['url'] for _ in list(collection.find({}))]
urls_crawled = [_['url'] for _ in list(collection_crawled.find({}))]
uncrawled_urls_=list(set(urls)-set(urls_crawled))#.__len__()
return uncrawled_urls_
def parse_url(url):
session = requests.session()
r = session.get(url=url, headers=header, cookies=cookies).text
selector = html.fromstring(r)
for _ in selector.xpath('//div[@class="info-box"]/div[@class="info"]'):
product = _.xpath('h1/text()')
product = ''.join(str(i).strip() for i in product)
field = _.xpath('ul[@class="subinfo"]/li[@class="l"]/p[1]/a/text()')
field = ''.join(str(i).strip() for i in field)
platform = _.xpath('ul[@class="subinfo"]/li[@class="l"]/p[2]/span[1]/text()')
platform = ''.join(str(i).strip() for i in platform)
location = _.xpath('ul[@class="subinfo"]/li[@class="l"]/p[2]/span[2]/text()')
location = ''.join(str(i).strip() for i in location)
homepage = _.xpath('ul[@class="subinfo"]/li[@class="l"]/p[3]/span[1]/descendant::text()')
homepage = ''.join(str(i).strip() for i in homepage)
establish_time = _.xpath('ul[@class="subinfo"]/li[@class="r box-fix-r"]/p[1]/text()')
establish_time = ''.join(str(i).strip() for i in establish_time)
status = _.xpath('ul[@class="subinfo"]/li[@class="r box-fix-r"]/p[2]/text()')
status = ''.join(str(i).strip() for i in status)
tags = selector.xpath('//div[@class="project-top"]/div[@class="txt"]/div[1]/a/text()')
tags = ''.join(str(i).strip() for i in tags)
description = selector.xpath('//div[@class="box-plate"]/div[@class="desc"]/text()')
description = re.sub(r'[\n\r ]', '', ''.join(str(i).strip() for i in description))
contact = _.xpath('//div[@class="project-status"]/div[@class="people-list"]/h4[@class="title"]/a/text()')
contact = ''.join(str(i).strip() for i in contact)
leadership = selector.xpath('//div[@class="item-list people-list"]/ul/li/div[2]/descendant::text()')
leadership = list(filter(lambda x: len(x) > 1, [str(_).strip() for _ in leadership]))
logo_url = selector.xpath('//div[@class="img"]/span[@class="img-middle"]/img/@src')
logo_url = ''.join(str(i).strip() for i in logo_url)
# print(product,field,platform,location,homepage,establish_time,status,tags,description)
item = {'product': product, 'field': field, 'platform': platform, 'location': location, 'homepage': homepage,
'establish_time': establish_time, 'status': status, 'tags': tags, 'description': description,
'contact': contact, 'leadership': leadership, 'logo_url': logo_url, 'url': url}
collection.insert(item)
return item
def manage_read_excel():
t0 = time.time()
for i, j in enumerate(read_excel(file), 1):
print(i, parse_url(j))
print(time.time() - t0)
def manage():
t0 = time.time()
with Pool() as pool:
# p = pool.map(parse_url, read_excel(file))
# p = pool.map(parse_url, read_mongodb())
p = pool.map(parse_url, uncrawled_urls())
for i, j in enumerate(p, 1):
print(i, j)
try:
collection_crawled.insert(j)
except Exception as e:
print(e)
print(time.time() - t0)
if __name__ == '__main__':
# print(read_mongodb().__len__())
# print(parse_url('http://www.newseed.cn/project/30502'))
manage()
# print(uncrawled_urls().__len__())
# manage_read_excel()
# print(read_excel(file)) |
18,933 | 90fb6ee33111b8663d54ac62844ea388fa4fde71 | import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from numpy import sin, pi, shape
import sigA
import sounddevice as sd
#foldername = "/Users/macbookpro/PycharmProjects/PSUACS/ACS597_SigAnalysis/"
foldername = "C:/Users/alshehrj/PycharmProjects/PSUACS/ACS597_SigAnalysis/"
def spectrogram(x_time, fs, sliceLength, sync=0, overlap=0,color="jet", dB=True, winType="uniform", scale=True):
N = len(x_time)
Nslices = int(N / sliceLength)
T = Nslices * sliceLength / float(fs)
print "T: " + str(T)
_, freqAvg, _, Gxx = sigA.spectroArray(x_time=x_time, fs=fs, sliceLength=sliceLength, sync=sync, overlap=overlap, winType=winType)
GxxRef = 1.0 # V^2/Hz
Gxx_dB = 10 * np.log10(Gxx / GxxRef)
ext = [0, T, 0, fs / 2]
if dB:
plt.imshow(Gxx_dB.T, aspect="auto", origin="lower", cmap=color, extent=ext)
else:
plt.imshow(Gxx.T, aspect="auto", origin="lower",cmap=color, extent=ext)
if scale:
plt.ylim(ext[1] + 1, ext[3] * 0.8)
def main(args):
sinTest()
raceCar()
#recording()
def sinTest():
fs = 2048.0
T = 6.0
N=int(fs*T)
print N
times = sigA.timeVec(N,fs)
delT,delF,_= sigA.param(N,fs)
f = 128
x_time = np.zeros(N)
t = sigA.timeVec(N,fs)
for i in range(N):
if i*delT < 2.0:
x_time[i] += 0
elif i*delT < 4.0:
x_time[i] += sin(2*pi*f*times[i])
else:
x_time[i] += 0
plt.figure()
plt.plot(t,x_time)
plt.figure()
sliceLength = 256 # Length of single record
ov = 0 # Overlap
spectrogram(x_time,fs,sliceLength,sync=0,dB=False,color="YlOrRd",overlap=ov,winType="uniform",scale=False)
plt.xlabel("Time [s]")
plt.ylabel("Frequency [Hz]")
plt.title("Spectrogram, 128Hz Sine Wave")
plt.show()
def raceCar():
filename = "T4_C5_3L_dec4a.wav"
path = foldername+filename
fs , data = wavfile.read(path)
Nwav = len(data)
print data.dtype
print data
if data.dtype != np.float32:
print "Converting from " + str(data.dtype) + " to float32"
data = data.astype(np.float32)
data = data /32768.0
print fs
print data
print Nwav
print Nwav/float(fs)
print 10 * "-"
#t = sigA.timeVec(Nwav, fs)
print np.shape(data)
ov = 0.75
sliceLength = 1024
plt.figure()
spectrogram(data,fs,sliceLength,sync=0,dB=True,overlap=ov,winType="hann")
plt.xlabel("Time [s]")
plt.ylabel("Frequency [Hz]")
plt.title("Racecar Doppler Shift")
plt.show()
def recording():
fs = 44100
T = 5
N = fs*T
print N
recArray = sd.rec(frames=N,samplerate=fs,channels=1,blocking=True)
x_time = np.reshape(recArray, (len(recArray),))
t = sigA.timeVec(N,fs)
plt.figure()
plt.plot(t,x_time)
ov = 0.75
sliceLength = 2056
GxxAvg = sigA.ssSpec(x_time=x_time,fs=fs)
FreqAvg = sigA.freqVec(N,fs)
plt.figure()
plt.plot(FreqAvg[:len(GxxAvg)],GxxAvg)
scaled = np.int16(x_time/np.max(np.abs(x_time)) * 32767)
wavfile.write('test.wav', 44100, scaled)
print np.shape(x_time)
plt.figure()
spectrogram(x_time=x_time, fs=fs, sliceLength=sliceLength, sync=0, dB=True, overlap=ov, winType="hann",scale=True)
#spectroArray(x_time, fs, sliceLength, sync=0, overlap=ov, winType="hann")
plt.title("Electric Guitar, Cmajor Chord + tremolo")
plt.xlabel("Time [s]")
plt.ylabel("Frequency [Hz]")
plt.show()
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv)) |
18,934 | 3cd34750a43504816b0b356f86c04a334fab18b3 | import sys
import os
os.system("raspivid -t 999999 -h 1080 -w 1920 -fps 30 -hf -b 2000000 -o - | gst-launch-1.0 -v fdsrc ! h264parse ! rtph264pay config-interval=1 pt=96 ! gdppay ! tcpserversink host= "+ipaddress+" port=5000") |
18,935 | bb10eb775f1db7ea84b1bf40994de12d6df4d5f9 | import hashlib
import users_table
def create_user(username, password):
encoded = password.encode("utf-8")
hashed = hashlib.sha256(encoded).hexdigest()
users_table.add_user(username, hashed)
def login(username, password):
encoded = password.encode("utf-8")
hashed = hashlib.sha256(encoded).hexdigest()
return users_table.get_user(username) and \
hashed == users_table.get_user(username)["hashed"]
|
18,936 | bf05a2b19e034ee2d5fb708b6748406b0473ffbb | import pickle
import keras
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
import numpy as np
import pandas as pd
import scipy as sp
import mxnet as mx
from mxnet import gluon, nd
from mxnet.gluon.model_zoo import vision
from os.path import join
SIZE = (224, 224)
inputShape = (224, 224)
MEAN_IMAGE = mx.nd.array([0.485, 0.456, 0.406])
STD_IMAGE = mx.nd.array([0.229, 0.224, 0.225])
EMBEDDING_SIZE = 512
class Matcher:
def transform(self, image):
resized = mx.image.resize_short(image, SIZE[0]).astype('float32')
cropped, crop_info = mx.image.center_crop(resized, SIZE)
cropped /= 255.
normalized = mx.image.color_normalize(cropped,
mean=MEAN_IMAGE,
std=STD_IMAGE)
transposed = nd.transpose(normalized, (2, 0, 1))
return transposed
def __init__(self, features_file, skus_file):
#self._model = keras.applications.vgg16.VGG16(weights='imagenet',
# include_top=False,
# input_shape=(224, 224, 3),
# pooling='avg')
#self.graph = tf.get_default_graph()
self.ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
self.net = vision.resnet18_v2(pretrained=True, ctx=self.ctx).features
print('finishing initialization')
# self._preprocess = keras.applications.vgg16.preprocess_input
self._load_files(features_file, skus_file)
#self.path_generator = PathGenerator()
def _load_files(self, features_file, skus_file):
self._feature_dict = []
self._sku_dict = []
with (open(features_file, "rb")) as openfile:
while True:
try:
self._feature_dict.append(pickle.load(openfile))
except EOFError:
break
with (open(skus_file, "rb")) as openfile:
while True:
try:
self._sku_dict.append(pickle.load(openfile))
except EOFError:
break
def match(self, file):
#inputShape = (256, 256)
#image = load_img(file, target_size=inputShape)
#image = img_to_array(image)
#hand_feature = extract_feature(image, self.path_generator)
#hand_feature2 = extract_feature2(image)
#image = load_img(file, target_size=inputShape)
#image = img_to_array(image)
img = load_img(file)
img = img_to_array(img)
img = self.transform(nd.array(img))
feature = img.expand_dims(axis=0).as_in_context(self.ctx)
#bic_feature = hand_feature[:128]
#hog_feature = hand_feature[128:]
#image = np.expand_dims(image, axis=0)
#image = self._preprocess(image)
#with self.graph.as_default():
# feature = self._model.predict(image)
feature_np = np.array(feature)
matches = []
for d_value in self._feature_dict:
distance = sp.spatial.distance.euclidean(d_value, feature_np)
matches.append(distance)
dataframe = pd.DataFrame({'sku': self._sku_dict, 'matches': matches})
dataframe = dataframe.nsmallest(10, 'matches')
return dataframe['sku'].values.tolist()
def main():
matcher = Matcher("/Users/pdrglv/Desktop/features_vgg.pickle", "/Users/pdrglv/Desktop/skus_vgg.pickle")
print(matcher.match('/Users/pdrglv/Desktop/90.jpg'))
if __name__ == "__main__":
main()
|
18,937 | d20f3cdb5bf2d9624115daaf55fdde5222de34df | from django.apps import AppConfig
class GroupConfig(AppConfig):
name = 'group'
verbose_name = "用户组"
verbose_name_plural = "用户组"
|
18,938 | 0425fe42641529b0879e9f1e61288cbab06cc712 | # Error-checking code is code that a programmer introduces to detect and handle errors that may occur while the program executes.
# Python has special constructs known as exception-handling constructs because they handle exceptional circumstances, another word for errors during execution.
# Consider the following program that has a user enter weight and height, and that outputs the corresponding body-mass index
# (BMI is one measure used to determine normal weight for a given height).
user_input = ''
while user_input != 'q':
weight = int(input("Enter weight (in pounds): "))
height = int(input("Enter height (in inches): "))
bmi = (float(weight) / float(height * height)) * 703
print('BMI:', bmi)
print('(CDC: 18.6-24.9 normal)\n')
# Source www.cdc.gov
user_input = input("Enter any key ('q' to quit): ") |
18,939 | 0bd4690a8220a35d703a3f84778780ba23464072 | import requests
i = 0
alldata = ''
temp = ''
bad = '''
<th aria-label="Rank" data-stat="ranker" class="ranker sort_default_asc show_partial_when_sorting right" data-tip="Rank" >Rk</th>
<th aria-label="If listed as single number, the year the season ended.★ - Indicates All-Star for league.Only on regular season tables." data-stat="season" class=" sort_default_asc center" data-tip="If listed as single number, the year the season ended.<br>★ - Indicates All-Star for league.<br>Only on regular season tables." >Season</th>
<th aria-label="Team" data-stat="team_id" class=" sort_default_asc left" data-tip="Team" >Tm</th>
<th aria-label="League" data-stat="lg_id" class=" sort_default_asc left" data-tip="League" >Lg</th>
<th aria-label="Games" data-stat="g" class=" right" data-tip="Games" >G</th>
<th aria-label="Wins" data-stat="wins" class=" right" data-tip="Wins" >W</th>
<th aria-label="Losses" data-stat="losses" class=" right" data-tip="Losses" >L</th>
<th aria-label="Win-Loss Percentage" data-stat="win_loss_pct" class=" right" data-tip="Win-Loss Percentage" >W/L%</th>
<th aria-label="Minutes Played" data-stat="mp" class=" right" data-tip="Minutes Played" >MP</th>
<th aria-label="Field Goals" data-stat="fg" class=" right" data-tip="Field Goals" >FG</th>
<th aria-label="Field Goal Attempts" data-stat="fga" class=" right" data-tip="Field Goal Attempts" >FGA</th>
<th aria-label="2-Point Field Goals" data-stat="fg2" class=" right" data-tip="2-Point Field Goals" >2P</th>
<th aria-label="2-point Field Goal Attempts" data-stat="fg2a" class=" right" data-tip="2-point Field Goal Attempts" >2PA</th>
<th aria-label="3-Point Field Goals" data-stat="fg3" class=" right" data-tip="3-Point Field Goals" >3P</th>
<th aria-label="3-Point Field Goal Attempts" data-stat="fg3a" class=" right" data-tip="3-Point Field Goal Attempts" >3PA</th>
<th aria-label="Free Throws" data-stat="ft" class=" right" data-tip="Free Throws" >FT</th>
<th aria-label="Free Throw Attempts" data-stat="fta" class=" right" data-tip="Free Throw Attempts" >FTA</th>
<th aria-label="Offensive Rebounds" data-stat="orb" class=" right" data-tip="Offensive Rebounds" >ORB</th>
<th aria-label="Defensive Rebounds" data-stat="drb" class=" right" data-tip="Defensive Rebounds" >DRB</th>
<th aria-label="Total Rebounds" data-stat="trb" class=" right" data-tip="Total Rebounds" >TRB</th>
<th aria-label="Assists" data-stat="ast" class=" right" data-tip="Assists" >AST</th>
<th aria-label="Steals" data-stat="stl" class=" right" data-tip="Steals" >STL</th>
<th aria-label="Blocks" data-stat="blk" class=" right" data-tip="Blocks" >BLK</th>
<th aria-label="Turnovers" data-stat="tov" class=" right" data-tip="Turnovers" >TOV</th>
<th aria-label="Personal Fouls" data-stat="pf" class=" right" data-tip="Personal Fouls" >PF</th>
<th aria-label="Points" data-stat="pts" class=" right" data-tip="Points" >PTS</th>
'''
bad2 = '<tr class="thead"> </tr>'
bad3 = '''
'''
bad4 = '''
</tbody></table>
'''
bad5 = '''</tr><tr >'''
bad6 = '''</tr>\n<tr >'''
bad7 = '''<tr ><th scope="row" class="right " data-stat="ranker" csk="4" >4</th><td class="left " data-stat="season" >'''
bad8 = ''' </div>'''
while i < 13:
url = 'https://www.basketball-reference.com/play-index/tsl_finder.cgi?request=1&match=single&type=team_totals&year_min=1970&year_max=2019&lg_id=NBA&franch_id=&c1stat=&c1comp=&c1val=&c2stat=&c2comp=&c2val=&c3stat=&c3comp=&c3val=&c4stat=&c4comp=&c4val=&order_by=year_id&order_by_asc=&offset='+str(i*100)
website = requests.get( url )
temp = website.text[:]
temp = "\n".join(temp.split("\n")[2216:])
temp = "\n".join(temp.split("\n")[:-500])
temp = temp.replace(bad,'')
temp = temp.replace(bad2,'')
temp = temp.replace(bad3,'')
temp = temp.replace(bad4,'')
temp = temp.replace(bad5,bad6)
alldata += temp
i += 1
alldata = alldata.replace(bad5,bad6)
alldata = alldata.replace(bad8,'')
print(alldata)
|
18,940 | 17fe43e1bf77de20c695bcb25b9b471d3b583ba7 | import logging
from unittest import TestCase
from unittest.mock import patch
from rail_uk import lambda_handler
from helpers import helpers
class TestLambdaHandler(TestCase):
def setUp(self):
logging.basicConfig(level='DEBUG')
self.mock_env = helpers.get_test_env()
self.mock_env.start()
def tearDown(self):
self.mock_env.stop()
def test_lambda_handler_invalid_application_id(self):
test_event = helpers.generate_test_event('IntentRequest', 'INVALID_ADD_ID')
with self.assertRaises(ValueError) as context:
lambda_handler.lambda_handler(test_event, {})
self.assertEqual('Invalid Application ID', str(context.exception))
@patch('rail_uk.lambda_handler.on_launch')
@patch('rail_uk.lambda_handler.logger')
def test_lambda_handler_launch_request(self, mock_logger, mock_event):
mock_response = 'Welcome to Rail UK!'
mock_event.return_value = mock_response
test_event = helpers.generate_test_event('LaunchRequest')
response = lambda_handler.lambda_handler(test_event, {})
mock_logger.info.assert_called_with('Session started: ' + test_event['session']['sessionId'])
self.assertEqual(response, mock_response)
@patch('rail_uk.lambda_handler.on_intent')
def test_lambda_handler_intent_request(self, mock_intent):
mock_response = 'There are no trains to Train Town at this time.'
mock_intent.return_value = mock_response
test_event = helpers.generate_test_event('IntentRequest')
response = lambda_handler.lambda_handler(test_event, {})
mock_intent.assert_called_with(test_event['request'], test_event['session'])
self.assertEqual(response, mock_response)
@patch('rail_uk.lambda_handler.logger')
def test_lambda_handler_session_ended_request(self, mock_logger):
test_event = helpers.generate_test_event('SessionEndedRequest')
lambda_handler.lambda_handler(test_event, {})
mock_logger.info.assert_called_with('Session ended: {}'.format(test_event['session']['sessionId']))
|
18,941 | 8cfd14cb38b86fbe8da9ba71eb237dbd80b8893e | secret_word = "Infinite"
guessing_word = ""
limit_count = 3
guess_count = 0
out_of_guess = False
while guessing_word != secret_word and not out_of_guess:
if guess_count < limit_count:
if guess_count == 0:
print("Its a Kpop Group")
guessing_word = input("Enter word: ")
elif guess_count == 1:
print("Boy Group Debuted in 2010")
guessing_word = input("Enter word: ")
elif guess_count == 2:
print("Group of 7 people, Known for their knife like sharp dance moves")
guessing_word = input("Enter word: ")
guess_count += 1
else:
out_of_guess = True
if not out_of_guess:
print("You win")
else:
print("You loose")
|
18,942 | 947dd3590c8245a1c4e7085e559a1e27191f7a7b | import mysql_helper as db
import sys
from scrapy import exceptions
level = {'debug': 0, 'warning': 1, 'error': 2, 'fatal': 9}
def log(message, lv='debug', spider='none', params='none'):
global level
s = db.s
log = db.SpiderLog(Spider=spider,
ParamList=params,
Level=level[lv],
Message=message)
s.add(log)
s.commit()
if lv=='fatal':
raise exceptions.CloseSpider('fatal error encountered, please check log in database') |
18,943 | 3250531d09be18a77a4ca4e3b5593edd42326be9 | import pyperclip
from translation import get_translate
import sys
cd = pyperclip
to_lang = sys.argv[1]
text = cd.paste()
try:
output = get_translate(text=text, to_lang=to_lang, from_lang="AUTO")
cd.copy(output)
except Exception:
print("Sorry, try again") |
18,944 | c218e783ffa311ef9e2be0afe6d2d1c7617bc8b4 | # https://www.acmicpc.net/problem/17135
# 3 <= R, C <= 15
# 1 <= D <= 10
def game():
count = 0
for i in range(0,R):
target = []
for a in archer:
target.append(find(a[0],a[1]))
for t in target:
if t[0] == -1 and t[1] == -1:
continue
elif arr[t[0]][t[1]] == 1:
arr[t[0]][t[1]] = 0
count += 1
exitFlag = False
for r in range(1,R):
for c in range(0,C):
arr[R-r][c] = arr[R-r-1][c]
if arr[R-r][c] == 1:
exitFlag = True
for c in range(0,C):
arr[0][c] = 0
if not exitFlag:
return count
return count
def select():
global ANSWER
global archer
for i in range(0,C-2):
for j in range(i+1, C-1):
for k in range(j+1,C):
archer = []
archer.append([R,i])
archer.append([R,j])
archer.append([R,k])
set_arr()
reset_visit()
result = game()
ANSWER = max(ANSWER, result)
def find(archerR,archerC):
reset_visit()
count = 1
queue = [[archerR-1, archerC]]
while count <= D:
queue_ = []
for q in queue:
r = q[0]
c = q[1]
if arr[r][c] == 1:
return [r,c]
for d in dir:
rr = r + d[0]
cc = c + d[1]
if C > cc and cc >= 0 and rr >= 0 and not visit[rr][cc]:
visit[rr][cc] = True
queue_.append([rr,cc])
queue = queue_
count += 1
return [-1, -1]
def set_arr():
for r in range(0,R):
for c in range(0,C):
arr[r][c] = area[r][c]
def reset_visit():
for r in range(0,R):
for c in range(0,C):
visit[r][c] = False
dir = [[0,-1],[-1,0], [0,1]]
R, C, D = list(map(int, input().split()))
area = [list(map(int, input().split())) for _ in range(0,R)]
arr = [[0] * C for _ in range(0,R)]
visit = [[False] * C for i in range(0,R)]
archer = []
ANSWER = 0
select()
print(ANSWER) |
18,945 | 322558b17924534e521374471e3154a1df8cc981 | # -*- coding: utf-8 -*-
#
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring
import collections
import unittest
import numpy
import qiskit.ignis.verification.tomography.fitters as fitters
class TestFitters(unittest.TestCase):
def assertMatricesAlmostEqual(self, lhs, rhs, places=None):
self.assertEqual(lhs.shape, rhs.shape,
"Marix shapes differ: {} vs {}".format(lhs, rhs))
n, m = lhs.shape
for x in range(n):
for y in range(m):
self.assertAlmostEqual(
lhs[x, y], rhs[x, y], places=places,
msg="Matrices {} and {} differ on ({}, {})".format(
lhs, rhs, x, y))
# the basis matrix for 1-qubit measurement in the Pauli basis
A = numpy.array([
[0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j],
[0.5 + 0.j, -0.5 + 0.j, -0.5 + 0.j, 0.5 + 0.j],
[0.5 + 0.j, 0. - 0.5j, 0. + 0.5j, 0.5 + 0.j],
[0.5 + 0.j, 0. + 0.5j, 0. - 0.5j, 0.5 + 0.j],
[1. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, 0. + 0.j, 0. + 0.j, 1. + 0.j]
])
def test_trace_constraint(self):
p = numpy.array([1/2, 1/2, 1/2, 1/2, 1/2, 1/2])
for trace_value in [1, 0.3, 2, 0, 42]:
rho = fitters.cvx_fit(p, self.A, trace=trace_value)
self.assertAlmostEqual(numpy.trace(rho), trace_value, places=3)
def test_fitter_data(self):
data = collections.OrderedDict()
data[('X',)] = {'0': 5000}
data[('Y',)] = {'0': 2508, '1': 2492}
data[('Z',)] = {'0': 2490, '1': 2510}
p, A, _ = fitters.fitter_data(data)
self.assertMatricesAlmostEqual(self.A, A)
n = 5000
expected_p = [5000 / n, 0 / n, 2508 / n, 2492 / n, 2490 / n, 2510 / n]
self.assertListEqual(expected_p, p)
if __name__ == '__main__':
unittest.main()
|
18,946 | 54118d30ccf6263bfd0970381b4c509882d216c7 | import os
class SimpleFastaReader:
def __init__(self,file_name=None):
self.file_name = file_name
self.h = open(self.file_name)
self.seq = ''
self.id = None
def next(self):
def read_id():
return self.h.readline().strip()[1:]
def read_seq():
ret = ''
while True:
line = self.h.readline()
while len(line) and not len(line.strip()):
# found empty line(s)
line = self.h.readline()
if not len(line):
# EOF
break
if line.startswith('>'):
# found new defline: move back to the start
self.h.seek(-len(line), os.SEEK_CUR)
break
else:
ret += line.strip()
return ret
self.id = read_id()
self.seq = read_seq()
if self.id:
return True
def close(self):
self.h.close() |
18,947 | 37ae27f0657103c7f678c1982099f5eff187da41 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset and upload it."""
import argparse
import os
import subprocess
import sys
import tempfile
import create
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
ASSET = os.path.basename(FILE_DIR)
def main():
if 'linux' not in sys.platform:
print('This script only runs on Linux.', file=sys.stderr)
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('--lib_path', '-l', required=True)
args = parser.parse_args()
# Pass lib_path to the creation script via an environment variable, since
# we're calling the script via `sk` and not directly.
os.environ[create.ENV_VAR] = args.lib_path
sk = os.path.realpath(os.path.join(
FILE_DIR, os.pardir, os.pardir, os.pardir, os.pardir, 'bin', 'sk'))
if os.name == 'nt':
sk += '.exe'
if not os.path.isfile(sk):
raise Exception('`sk` not found at %s; maybe you need to run bin/fetch-sk?')
# Upload the asset.
subprocess.check_call([sk, 'asset', 'upload', ASSET], cwd=FILE_DIR)
if __name__ == '__main__':
main()
|
18,948 | 4e245ae8d3ae46f593bdd188036c7653d7057030 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the minimumBribes function below.
def minimumBribes(q):
count = 0
qq = [ x for x in range(1, len(q) + 1)]
for i, v in enumerate(q):
if (q[i] == qq[i]):
# print("%d no change" % i)
continue
elif (i < len(q) - 1 and q[i] == qq[i + 1]):
count += 1
qq[i + 1] = qq[i]
qq[i] = q[i]
# print("%d ch 1 " % i)
# print(qq)
elif (i < len(q) - 2 and q[i] == qq[i + 2]):
count += 2
qq[i + 2] = qq[i + 1]
qq[i + 1] = qq[i]
qq[i] = q[i]
# print("%d ch 2 " % i)
# print(qq)
else:
print("Too chaotic")
return
print(count)
if __name__ == '__main__':
# minimumBribes([1, 2, 5, 3, 7, 8, 6, 4])
# minimumBribes([2, 1, 5, 3, 4])
# minimumBribes([2, 5, 1, 3, 4])
# 1 2 3 4 5 6 7 8
# 1 2 3 5 4 6 7 8
# 1 2 5 3 4 6 7 8
# 1 2 5 3 4 7 6 8
# 1 2 5 3 7 4 6 8
# 1 2 5 3 7 4 8 6
# 1 2 5 3 7 8 4 6
# 1 2 5 3 7 8 6 4
t = int(input())
for t_itr in range(t):
n = int(input())
q = list(map(int, input().rstrip().split()))
minimumBribes(q)
|
18,949 | 5bccf63a42c4a0ffca2a69348b250114107f7518 | from django.conf.urls import url
from django.conf import settings
from django.contrib.auth.views import LogoutView
from django.views.generic import TemplateView
from django.contrib.auth import views as auth_views
from account import views
from account.views import (
RegisterUserView,
LoginUserView,
DashboardView,
OrderMealView,
OrderMealListView,
LunchView,
UserProfileDetailView,
)
from django.contrib.auth.views import logout
urlpatterns = [
# /account/register
#url(r'^home/$', views.home, name='home'),
url(r'^register/$', view=RegisterUserView.as_view(), name='register'),
url(r'^login/$', view=LoginUserView.as_view(), name='login'),
#url(r'^logout/$', auth_views.logout),
url(r'^logout/$', logout, {'next_page': settings.LOGOUT_REDIRECT_URL}, name='logout'),
#url(r'^logout/$', view=LogoutView.as_view(), name='logout'),
url(r'^orders/$', view=OrderMealView.as_view(), name='orders'),
url(r'^dashboard/$', view=DashboardView.as_view(), name='dashboard'),
# url(r'^dashboard/myorders$', view=OrderMealListView.as_view(), name='myorders'),
url(r'^myprofile/(?P<pk>\d+)$', view=UserProfileDetailView.as_view(), name='myprofile'),
# url(r'user/<int:pk>/', views=UserProfileDetailView.as_view(), name='user_detail'),
url(r'^lunch/$', view=LunchView.as_view(), name='lunch'),
url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
] |
18,950 | 50006cd38b22233b9facc1f2b19ac5c4b12210ab | # ile liczb w pliku liczby.txt ma minimum 15 jedynek
with open("liczby.txt") as plik:
dane = plik.readlines()
dane_czyste = []
for elem in dane:
dane_czyste.append(elem.strip())
# ile liczb ma min. 15 jedynek
wynik = 0
for liczba in dane_czyste:
if liczba.count("1") > 14:
wynik += 1
print(f"W pliku jest {wynik} liczb z min. 15 jedynkami.")
|
18,951 | 1b61103a5357795b4a03a8e6830337645692c15f | import json, config
from flask import Flask, request, jsonify, render_template
from binance.client import Client
from binance.enums import *
from math import ceil, floor
from binance.exceptions import BinanceAPIException
app = Flask(__name__)
API_KEY = 'E2TnptYKp2MigaCSWuMPuHBtJqIwwJnMqghYouRAUNh08zVZLGwoucb4N0kuDFK2'
API_SECRET = 'JmNksYt81bikkoMY6R4sqVlSSjsK0AxIrS8dw0IxCmPzWE2BwZ9l3tm3vUA2Gry8'
client = Client(API_KEY, API_SECRET) #testnet=True
print("Start Bot")
client.futures_cancel_all_open_orders(symbol='ETHUSDT')
config.orders_status = []
config.neworder = []
config.type_tp = ''
config.current_tp = 0
def clear_current_tp():
config.current_tp = 0
def cancel_all_order(symbol):
#client.futures_cancel_all_open_orders(symbol=symbol)
client = Client(API_KEY, API_SECRET)
config.candle_count = 0
for x in config.all_orders:
try:
client.futures_cancel_order(symbol=symbol, orderId=x['orderId'])
except BinanceAPIException as e:
client = Client(API_KEY, API_SECRET)
continue
def check_position_status(symbol):
orders = client.futures_position_information(symbol=symbol)
print('current posotion quantity = ',orders[0]['positionAmt'])
if float(orders[0]['positionAmt']) != 0:
return True
else: return False
def check_main_order_type(symbol):
orders = client.futures_get_open_orders(symbol=symbol)
for x in orders:
if x['reduceOnly'] == False:
return str(x['side'])
return 0
def check_main_order_status(symbol):
orders = client.futures_get_open_orders(symbol=symbol)
#print('total order has open is', len(orders))
for x in orders:
if x['reduceOnly'] == False:
return True
return False
def save_orders_json(symbol):
orders = client.futures_get_open_orders(symbol=symbol)
orders.sort(key=lambda x: float(x['stopPrice']))
config.all_orders = orders
print('\n' , 'total order ' , len(config.all_orders))
for x in config.all_orders:
print('order ID ' , x['orderId'] , ' | ', ' side ' , x['side'] , ' price ' , x['stopPrice'] , ' | ' , ' reduceOnly ' , x['reduceOnly'] )
def save_orders_status_1to3_json():
index = [x['reduceOnly'] for x in config.all_orders].index(False)
if config.all_orders[index]['side'] == 'BUY':
config.order_status =[
{"price":config.all_orders[index+1]['stopPrice'],"orderId":config.all_orders[index+1]['orderId']},
{"price":config.all_orders[index+2]['stopPrice'],"orderId":config.all_orders[index+2]['orderId']}]
else:
config.order_status =[
{"price":config.all_orders[index-1]['stopPrice'],"orderId":config.all_orders[index-1]['orderId']},
{"price":config.all_orders[index-2]['stopPrice'],"orderId":config.all_orders[index-2]['orderId']}]
print('\njson status')
print(config.order_status)
def save_orders_status_other_json():
index = [x['reduceOnly'] for x in config.all_orders].index(False)
if config.all_orders[index]['side'] == 'BUY':
config.order_status ={"price":config.all_orders[index+1]['stopPrice'],"orderId":config.all_orders[index+1]['orderId']}
else:
config.order_status ={"price":config.all_orders[index-1]['stopPrice'],"orderId":config.all_orders[index-1]['orderId']}
print('\njson status')
print(config.order_status)
def check_hit_SL_TP(symbol):
client = Client(API_KEY, API_SECRET)
orders = client.futures_get_open_orders(symbol=symbol)
try:
index = [x['reduceOnly'] for x in orders].index(False)
check_candle(symbol)
except Exception as e:
print('can not find main orders')
client = Client(API_KEY, API_SECRET)
if check_position_status(symbol=symbol) == True:
print('but have position')
else:
cancel_all_order(symbol)
return True
try:
print('all_orders' , config.all_orders)
print('index' , [x['reduceOnly'] for x in config.all_orders].index(False))
index = [x['reduceOnly'] for x in config.all_orders].index(False)
if config.all_orders[index]['side'] == 'BUY':
check_sl_order = [x['orderId'] for x in orders].index(config.all_orders[index-1]['orderId'])
else:
check_sl_order = [x['orderId'] for x in orders].index(config.all_orders[index+1]['orderId'])
except Exception as e:
print('\n Has hit ST order!')
client = Client(API_KEY, API_SECRET)
cancel_all_order(symbol)
return True
return False
def check_close_order(symbol): #เมื่อมีการชนเขต SLO หรือไม่เข้าออเดอร์ภายใน 5 แท่ง
print('!!!check hit SL or all TP!!!')
return check_hit_SL_TP(symbol=symbol)
def check_hit_TP(symbol,index):
print(config.order_status)
orders = client.futures_get_open_orders(symbol=symbol)
if config.type_tp == '1to3':
try:
print('check TP order id ', config.order_status[index]['orderId'])
check_sl_order = [x['orderId'] for x in orders].index(config.order_status[index]['orderId'])
#print('index is ',check_sl_order)
except Exception as e:
return True
else:
try:
print('check TP order id ', config.order_status['orderId'])
check_sl_order = [x['orderId'] for x in orders].index(config.order_status['orderId'])
#print('index is ',check_sl_order)
except Exception as e:
return True
return False
def change_new_stoploss(symbol,index):
print('Have change new stoploss!!')
orders = client.futures_get_open_orders(symbol=symbol)
try:
main_index = [x['reduceOnly'] for x in config.all_orders].index(False)
if config.all_orders[main_index]['side'] == 'BUY':
client.futures_cancel_order(symbol=symbol, orderId=config.all_orders[main_index-1]['orderId'])
print('Closed old SL order ',config.all_orders[main_index-1]['orderId'])
else:
client.futures_cancel_order(symbol=symbol, orderId=config.all_orders[main_index+1]['orderId'])
print('Closed old SL order ',config.all_orders[main_index+1]['orderId'])
except Exception as e:
print("an exception occured - {}".format(e))
if config.type_tp == '1to3':
try:
print('Replace new SL order')
print('send order TP index ', index)
main_index = [x['reduceOnly'] for x in config.all_orders].index(False)
if config.all_orders[main_index]['side'] == 'BUY': #main_index['stopPrice'] config.order_status[index-1]['stopPrice']
if index == 0:
config.neworder = client.futures_create_order(symbol=symbol, side="SELL", closePosition="true",
type="STOP_MARKET",stopPrice=config.all_orders[main_index]['stopPrice'], timeInForce=TIME_IN_FORCE_GTC,)
else:
config.neworder = client.futures_create_order(symbol=symbol, side="SELL", closePosition="true",
type="STOP_MARKET",stopPrice=config.order_status[index-1]['stopPrice'], timeInForce=TIME_IN_FORCE_GTC,)
elif config.all_orders[main_index]['side'] == 'SELL':
if index == 0:
config.neworder = client.futures_create_order(symbol=symbol, side="BUY", closePosition="true",
type="STOP_MARKET",stopPrice=config.all_orders[main_index]['stopPrice'], timeInForce=TIME_IN_FORCE_GTC,)
else:
config.neworder = client.futures_create_order(symbol=symbol, side="BUY", closePosition="true",
type="STOP_MARKET",stopPrice=config.order_status[index-1]['stopPrice'], timeInForce=TIME_IN_FORCE_GTC,)
print('new stoploss price = ', config.order_status[index-1]['stopPrice'])
except Exception as e:
print("an exception occured - {}".format(e))
else:
try:
print('Replace new SL order')
main_index = [x['reduceOnly'] for x in config.all_orders].index(False)
if config.all_orders[main_index]['side'] == 'BUY': #main_index['stopPrice']
config.neworder = client.futures_create_order(symbol=symbol, side="SELL", closePosition="true",
type="STOP_MARKET",stopPrice=config.order_status['stopPrice'], timeInForce=TIME_IN_FORCE_GTC,)
elif config.all_orders[main_index]['side'] == 'SELL':
config.neworder = client.futures_create_order(symbol=symbol, side="BUY", closePosition="true",
type="STOP_MARKET",stopPrice=config.order_status['stopPrice'], timeInForce=TIME_IN_FORCE_GTC,)
print('new stoploss price = ', config.order_status['stopPrice'],)
except Exception as e:
print("an exception occured - {}".format(e))
try:
orders = client.futures_get_open_orders(symbol=symbol)
index = [x['reduceOnly'] for x in config.all_orders].index(False)
if config.all_orders[index]['side'] == 'BUY':
sl_index = index-1
else:
sl_index = index+1
print('sl_index ',sl_index)
print('new SL order',config.neworder)
new_orders_id = config.neworder['orderId']
new_orders_price = config.neworder['stopPrice']
config.all_orders[sl_index]['orderId'] = new_orders_id
config.all_orders[sl_index]['stopPrice'] = new_orders_price
config.all_orders.sort(key=lambda x: float(x['stopPrice']))
print('\n' , 'total order ' , len(config.all_orders))
for x in config.all_orders:
print('order ID ' , x['orderId'] , ' | ', ' side ' , x['side'] , ' price ' , x['stopPrice'] , ' | ' , ' reduceOnly ' , x['reduceOnly'] )
print('Finish change SL order id json')
except Exception as e:
print("an exception occured - {}".format(e))
def change_stoploss(symbol):
if config.type_tp == '1to3': #risk/reward 1/3
if config.current_tp == 1 and check_hit_TP(symbol,1) == True:
change_new_stoploss(symbol,1)
config.current_tp = 2
elif config.current_tp == 0 and check_hit_TP(symbol,0) == True: #เป้าแรก ทำกำไร25% ที่ 1/3
change_new_stoploss(symbol,0)
config.current_tp = 1
else:
print('dont have any change SL')
elif config.type_tp == '1to2': #risk/reward 1/2
if config.current_tp == 0 and check_hit_TP(symbol,0) == True: #เป้าแรก ทำกำไร25% ที่ 1/2
change_new_stoploss(symbol,0)
config.current_tp = 1
else:
print('dont have any change SL')
elif config.type_tp == '1to1': #risk/reward 1/1
if config.current_tp == 0 and check_hit_TP(symbol,0) == True: #เป้าแรก ทำกำไร25% ที่ 0.5/1
print('check_hit_TP pass')
change_new_stoploss(symbol,0)
config.current_tp = 1
else:
print('dont have any change SL')
else:
print('error')
print('change new TP | current TP is ',config.current_tp)
def calculate_balance(stoploss_percent,balance):
if stoploss_percent >= 30:
return int(balance/2)
elif stoploss_percent >= 20:
return int(balance/1.5)
elif stoploss_percent >= 15:
return int(balance/1.3 )
else:
return balance
def check_candle(symbol):
if check_position_status(symbol) == False:
if config.candle_count < 1200:
config.candle_count = config.candle_count + 1
print('total time pass main order not hit = ', config.candle_count , ' minute')
print('total time pass main order not hit = ', int(config.candle_count)/240 , 'hour')
elif config.candle_count >= 1200:
print('Close all orders 4h Candle more than 5 unit')
cancel_all_order(symbol)
def open_position(side, symbol, high, low, order_type=ORDER_TYPE_MARKET):
try:
precision = 3
precision2 = 2
tick_price = float(low)
low_price = float(floor(tick_price))
tick_price = float(high)
high_price = float(ceil(tick_price))
stoploss_percent = float(((float(high_price) - float(low_price))/float(low_price))*100)
stoploss_percent = float(round(stoploss_percent, precision2))
print("stoploss % is ", stoploss_percent)
if stoploss_percent >= 15:
config.type_tp = '1to1'
elif stoploss_percent >= 6:
config.type_tp = '1to2'
else:
config.type_tp = '1to3'
print('type take profit = ',config.type_tp)
pre_balance = client.futures_account_balance()
balance = int(float(pre_balance[1]['balance']))
print('your balance is', balance, 'USDT')
balance_quantity = calculate_balance(stoploss_percent,balance)
print('position size is ', balance_quantity, 'USDT')
amount = (balance_quantity/1.025) / float(high)
quantity = float(round(amount, precision))
quantity_tp = quantity/4
quantity_tp = float(round(quantity_tp, precision))
if config.type_tp == '1to3':
if side == "BUY": tp1 = (high_price*stoploss_percent/100)+high_price
else: tp1 = low_price - (low_price*stoploss_percent/100)
tp1 = float(round(tp1, precision2))
print('Take Profit 1 = ',tp1)
if side == "BUY": tp2 = (high_price*(stoploss_percent*2)/100)+high_price
else: tp2 = low_price - (low_price*(stoploss_percent*2)/100)
tp2 = float(round(tp2, precision2))
print('Take Profit 2 = ',tp2)
if side == "BUY": final_tp = (high_price*(stoploss_percent*3)/100)+high_price
else: final_tp = low_price - (low_price*(stoploss_percent*3)/100)
final_tp = float(round(final_tp, precision2))
print('Take Profit 3 = ',final_tp)
if config.type_tp == '1to2':
if side == "BUY": tp1 = (high_price*stoploss_percent/100)+high_price
else: tp1 = low_price - (low_price*stoploss_percent/100)
tp1 = float(round(tp1, precision2))
print('Take Profit 1 = ',tp1)
if side == "BUY": final_tp = (high_price*(stoploss_percent*2)/100)+high_price
else: final_tp = low_price - (low_price*(stoploss_percent*2)/100)
final_tp = float(round(final_tp, precision2))
print('Take Profit 2 = ',final_tp)
if config.type_tp == '1to1':
if side == "BUY": tp1 = (high_price*(stoploss_percent/2)/100)+high_price
else: tp1 = low_price - (low_price*(stoploss_percent/2)/100)
tp1 = float(round(tp1, precision2))
print('Take Profit 1 = ',tp1)
if side == "BUY": final_tp = (high_price*stoploss_percent/100)+high_price
else: final_tp = low_price - (low_price*stoploss_percent/100)
final_tp = float(round(final_tp, precision2))
print('Take Profit 2 = ',final_tp)
position_status = check_position_status(symbol)
if position_status == True:
print("position has ready!")
else:
print("position has not ready!")
print('your quantity', quantity)
print('Tick price is ', high_price)
if check_main_order_status(symbol) == True and check_position_status(symbol) == False:
mainOrder_side = check_main_order_type(symbol)
if mainOrder_side != side:
cancel_all_order(symbol)
print("New opposite signal so cancel all order")
if check_main_order_status(symbol) == False and check_position_status(symbol) == False:
cancel_all_order(symbol)
if side == "BUY":
order = client.futures_create_order(symbol=symbol, side=side,
type="STOP_MARKET",stopPrice=high_price, quantity=quantity, timeInForce=TIME_IN_FORCE_GTC,)
order = client.futures_create_order(symbol=symbol, side="SELL", reduceOnly="true",
type="TAKE_PROFIT_MARKET",stopPrice=tp1, quantity=quantity_tp, timeInForce=TIME_IN_FORCE_GTC,)
if config.type_tp == '1to3':
order = client.futures_create_order(symbol=symbol, side="SELL", reduceOnly="true",
type="TAKE_PROFIT_MARKET",stopPrice=tp2, quantity=quantity_tp, timeInForce=TIME_IN_FORCE_GTC,)
order = client.futures_create_order(symbol=symbol, side="SELL", closePosition="true",
type="TAKE_PROFIT_MARKET",stopPrice=final_tp, timeInForce=TIME_IN_FORCE_GTC,)
order = client.futures_create_order(symbol=symbol, side="SELL", closePosition="true",
type="STOP_MARKET",stopPrice=low_price, timeInForce=TIME_IN_FORCE_GTC,)
save_orders_json(symbol)
if config.type_tp == '1to3':
save_orders_status_1to3_json()
else:
save_orders_status_other_json()
clear_current_tp()
elif side == "SELL":
order = client.futures_create_order(symbol=symbol, side=side,
type="STOP_MARKET",stopPrice=low_price, quantity=quantity, timeInForce=TIME_IN_FORCE_GTC,)
order = client.futures_create_order(symbol=symbol, side="BUY", reduceOnly="true",
type="TAKE_PROFIT_MARKET",stopPrice=tp1, quantity=quantity_tp, timeInForce=TIME_IN_FORCE_GTC,)
if config.type_tp == '1to3':
order = client.futures_create_order(symbol=symbol, side="BUY", reduceOnly="true",
type="TAKE_PROFIT_MARKET",stopPrice=tp2, quantity=quantity_tp, timeInForce=TIME_IN_FORCE_GTC,)
order = client.futures_create_order(symbol=symbol, side="BUY", closePosition="true",
type="TAKE_PROFIT_MARKET",stopPrice=final_tp, quantity=quantity_tp, timeInForce=TIME_IN_FORCE_GTC,)
order = client.futures_create_order(symbol=symbol, side="BUY", closePosition="true",
type="STOP_MARKET",stopPrice=high_price, timeInForce=TIME_IN_FORCE_GTC,)
save_orders_json(symbol)
if config.type_tp == '1to3':
save_orders_status_1to3_json()
else:
save_orders_status_other_json()
clear_current_tp()
else:
print('--- Order/Position has ready can not open new order!!! ---')
return False
except Exception as e:
print("an exception occured - {}".format(e))
return False
return order
@app.route('/')
def welcome():
return render_template('index.html')
@app.route('/webhook', methods=['POST'])
def webhook():
#print(request.data)
print('')
data = json.loads(request.data)
if data['passphrase'] != config.WEBHOOK_PASSPHRASE:
return {
"code": "error",
"message": "Nice try, invalid passphrase"
}
high = data['bar']['high']
low = data['bar']['low']
symbol = data['ticker']
side = data['strategy']['order_action'].upper()
order_response = open_position(side, symbol, high, low)
if order_response:
return {
"code": "success",
"message": "order executed"
}
else:
print("order failed")
return {
"code": "error",
"message": "order failed"
}
@app.route('/check', methods=['POST'])
def check():
#print(request.data)
print('')
data = json.loads(request.data)
if data['passphrase'] != config.WEBHOOK_PASSPHRASE:
return {
"code": "error",
"message": "Nice try, invalid passphrase"
}
symbol = data['ticker']
#check_close_order(symbol)
if check_close_order(symbol) == False:
print('chack change stoloss')
change_stoploss(symbol)
return {
"code": "success",
"message": "check executed"
} |
18,952 | cb9d11a6294b4a001cf6821b8d101c54bca58105 | import sqlite3
connection = sqlite3.connect("test_data.db")
c = connection.cursor()
c.execute("CREATE TABLE People(FirstName TEXT, LastName TEXT, Age INT)")
c.executescript("""
DROP TABLE IF EXISTS People;
CREATE TABLE People(FirstName TEXT, LastName TEXT, Age INT);
INSERT INTO People VALUES('Ron', 'Obvious', '42');
""")
connection.commit() |
18,953 | 9603ec52b9445a2e7d77a613f3f67e9d5a947075 | """
#######################################
@ Author : The DemonWolf
#######################################
This is a python script that convert the languages. Simply put all the .srt files that need to convert,
into given "Input files" folder then run the program. Output files will be available
after end of the program in "Output files" folder.
The python script used Free Google Translate API for Python to translate the languages.
Translates totally free of charge.
"""
# Import necessary libraries
import glob
import os
import re
import translators as ts
# Run main method to execute the program
if __name__ == '__main__':
Input_file_path = "Input files/"
Output_file_path = "Output files/"
file_names = []
# Grab all the .srt files one by one in the Input files folder and open them
for file in glob.glob(os.path.join(Input_file_path, '*.srt')):
with open(file, 'r') as openfile:
# Read file line by line
lines = openfile.readlines()
openfile.close()
outfile = open("Output files/" + file.split("\\")[1], 'w', encoding="utf-8")
for line in lines:
if re.search('^[0-9]+$', line) is None and \
re.search('^[0-9]{2}:[0-9]{2}:[0-9]{2}', line) is None and \
re.search('^$', line) is None:
Trans = ts.google(line.rstrip('\n'), if_use_cn_host=True, from_language='auto', to_language='si')
print(line.rstrip('\n'), Trans)
line = Trans
if not line.strip():
outfile.write("\n")
outfile.write(line)
outfile.close()
|
18,954 | 45cbd695bab97f18edb1ee7ec6cc8bbd4195ffc0 | from typing import Iterable, Iterator
from itertools import islice, takewhile, groupby
class Enumerable(object):
def __init__(self, data_source):
assert isinstance(data_source, Iterable)
if not isinstance(data_source, Iterator):
self._data_source = iter(data_source)
else:
self._data_source = data_source
def __iter__(self):
return self
def __next__(self):
return next(self._data_source)
def where(self, predicate):
self._data_source = (element for element in self._data_source if predicate(element))
return self
def first_or_none(self):
return next(self, None)
def take(self, num):
self._data_source = islice(self._data_source, num)
return self
def take_while(self, predicate):
self._data_source = takewhile(predicate, self._data_source)
return self
def distinct(self, key=None):
seen = set()
if key is None:
for element in (element for element in self._data_source if not seen.__contains__(element)):
seen.add(element)
yield element
else:
for element in self._data_source:
k = key(element)
if k not in seen:
seen.add(k)
yield element
def sort_by(self, key=None):
self._data_source = iter(sorted(self._data_source, key=key))
return self
def group_by(self, key=None):
self.sort_by(key)
for k, g in groupby(self._data_source, key):
yield (k, Enumerable(g))
res = Enumerable([1,2,3,3,3,3,2,2,2,1,1,1,1,4,4,4,3,3,2,3,4,4,1,1])
t = res.where(lambda i: i == 8).first_or_none()
pass
|
18,955 | 79d622b63cbbaa7e2ffee9fc881df507efba9546 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Module to plot data, create simple html page
@author: Daniel Boline <ddboline@gmail.com>
"""
import os
import matplotlib
matplotlib.use('Agg')
import pylab as pl
import numpy as np
#from pandas.tools.plotting import scatter_matrix
def create_html_page_of_plots(list_of_plots, prefix='html'):
"""
create html page with png files
"""
if not os.path.exists(prefix):
os.makedirs(prefix)
os.system('mv *.png %s' % prefix)
#print(list_of_plots)
idx = 0
htmlfile = open('%s/index_0.html' % prefix, 'w')
htmlfile.write('<!DOCTYPE html><html><body><div>\n')
for plot in list_of_plots:
if idx > 0 and idx % 200 == 0:
htmlfile.write('</div></html></html>\n')
htmlfile.close()
htmlfile = open('%s/index_%d.html' % (prefix, (idx//200)), 'w')
htmlfile.write('<!DOCTYPE html><html><body><div>\n')
htmlfile.write('<p><img src="%s"></p>\n' % plot)
idx += 1
htmlfile.write('</div></html></html>\n')
htmlfile.close()
### Specify histogram binning by hand
BOUNDS = {}
def plot_data(indf, prefix='html'):
"""
create scatter matrix plot, histograms
"""
list_of_plots = []
# scatter_matrix(indf)
# pl.savefig('scatter_matrix.png')
# list_of_plots.append('scatter_matrix.png')
for col in indf:
pl.clf()
# cond = indf[col].notnull()
# v = indf[cond][col]
v = indf[col]
# nent = len(v)
# hmin, hmax = v.min(), v.max()
# xbins = np.linspace(hmin,hmax,nent)
# hmin, hmax, nbin = BOUNDS[col]
# xbins = np.linspace(hmin, hmax, nbin)
v.hist(bins=20, histtype='step', normed=True, log=True)
pl.title(col)
pl.savefig('%s_hist.png' % col)
list_of_plots.append('%s_hist.png' % col)
create_html_page_of_plots(list_of_plots, prefix)
return
|
18,956 | 5a4100c969525ab7a0bbd090cebefa6bd7cd6b44 | # # -*- coding: utf-8 -*-
# from __future__ import unicode_literals
# from django.contrib import admin
# from reversion.admin import VersionAdmin
# # from tabbed_admin import TabbedModelAdmin
# from . import models
# @admin.register(models.Manpower)
# class ManPowerAdmin(VersionAdmin):
# search_fields = ['nombre']
# list_filter = ('type_manpower', 'moneda')
# list_display = [
# 'nombre',
# 'unidad',
# 'type_manpower',
# 'moneda',
# 'precio'
# ]
# # tab_overview = (
# # (None, {
# # 'fields': ('nombre', 'unidad', 'type_manpower')
# # }),
# # )
# # tab_cost = (
# # ('Costo', {
# # 'fields': ('type_cost', 'moneda', 'precio')
# # }),
# # )
# # tabs = [
# # ('Overview', tab_overview),
# # ('Costos', tab_cost)
# # ]
# @admin.register(models.Material)
# class MaterialAdmin(VersionAdmin):
# search_fields = ['nombre']
# list_filter = ('type_material', 'moneda')
# list_display = [
# 'nombre',
# 'unidad',
# 'type_material',
# 'moneda',
# 'precio'
# ]
# @admin.register(models.Equipment)
# class EquipmentAdmin(VersionAdmin):
# search_fields = ['nombre']
# list_filter = ('type_equipment', 'moneda')
# list_display = [
# 'nombre',
# 'unidad',
# 'type_equipment',
# 'moneda',
# 'precio'
# ]
# @admin.register(models.TypeManpower)
# class TypeManpowerAdmin(admin.ModelAdmin):
# pass
# @admin.register(models.TypeMaterial)
# class TypeMaterialAdmin(admin.ModelAdmin):
# pass
# @admin.register(models.TypeEquipment)
# class TypeEquipmentAdmin(admin.ModelAdmin):
# pass |
18,957 | c833c41159d28a5998394a7305ad811193d30a78 | The fundamental `tuples`
When `range` comes in handy
Get more with `collections`!
Operations with `bytes` and `bytearray`
`queue`s and threads |
18,958 | 518f84974dd0dc6221a4700a700d2afd39361168 | # -*- coding: utf-8 -*-
"""
Functions to facilitate all sorts of data loading and saving from files to
memory and visa versa.
"""
import os
import pickle
import numpy as np
import pandas as pd
from google.cloud import bigquery
from .data import TimeSeries
table_names = ['buoy_MTE','buoy_SJB','sharp_ceilometer','sharp_aps',
'sharp_fast','sharp_opc','sharp_platform',
'sharp_pwd','sharp_rosr','sharp_slow','sharp_smps',
'sharp_sms','sharp_sonde']
# Functions for loading data from gcp bigquery
def get_sharp_data(client, start, end, date_part=None):
"""Wrapper to get all of sharp data using functions below"""
print("Not loading fast/slow/platform data.")
data_out = {}
data_out['sharp_sonde'] = get_sondes(client, start, end)
data_out['sharp_ceilometer'] = get_ceilometer(client, start, end)
data_out['sharp_opc'] = get_opc(client, start, end)
data_out['sharp_aps'] = get_aps(client, start, end)
data_out['sharp_smps'] = get_smps(client, start, end)
tnames = ['buoy_MTE', 'buoy_SJB', 'sharp_pwd', 'sharp_rosr',
'sharp_sms']
for table_id in tnames:
data_out[table_id] = get_table(client, start, end, table_id)
# for table_id in ['sharp_fast', 'sharp_slow', 'sharp_platform']:
# data_out[table_id] = get_table(client, start, end, table_id, date_part)
return data_out
def get_sondes(client, start, end):
"""Getting radiosonde data from bigquery client
Args:
client (bigquery.Client) : Configured client to access bigquery
start (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
end (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
Returns:
dict : a dictionary of radiosonde data, with key as "mm-dd-HH",
and the value being another dictionary of the radiosonde data
with the actual measurements in a dataframe.
"""
sonde_query_str = "SELECT * FROM cfog.sharp_radiosonde " + \
f"WHERE LaunchTime BETWEEN '{start}' AND '{end}' " + \
"ORDER BY LaunchTime ASC"
print(f"Executing bigquery query string: ")
print(sonde_query_str + '\n')
sonde_data = {f"{s['LaunchTime'].strftime('%m-%d_%H')}":s for s in client.query(query=sonde_query_str)}
print("Radiosondes obtained within the queried time bounds: ")
print(list(sonde_data))
sonde_data_out = {}
for t in sonde_data:
# ignored col: SoundingIdPk, RadioRxTimePk, PtuStatus
sonde_data_out[t] = {}
sonde_data_out[t]['df'] = pd.DataFrame({
'DataSrvTime' : sonde_data[t]['DataSrvTime'],
'Pressure' : sonde_data[t]['Pressure'],
'Temperature' : sonde_data[t]['Temperature'],
'Humidity' : sonde_data[t]['Humidity'],
'WindDir' : sonde_data[t]['WindDir'],
'WindSpeed' : sonde_data[t]['WindSpeed'],
'WindNorth' : sonde_data[t]['WindNorth'],
'WindEast' : sonde_data[t]['WindEast'],
'Height' : sonde_data[t]['Height'],
'WindInterpolated' : sonde_data[t]['WindInterpolated'],
'Latitude' : sonde_data[t]['Latitude'],
'Longitude' : sonde_data[t]['Longitude'],
'North' : sonde_data[t]['North'],
'East' : sonde_data[t]['East'],
'Up' : sonde_data[t]['Up'],
'Altitude' : sonde_data[t]['Altitude'],
'Dropping' : sonde_data[t]['Dropping']
}
)
sonde_data_out[t]['LaunchTime'] = sonde_data[t]['LaunchTime']
sonde_data_out[t]['LaunchLatitude'] = sonde_data[t]['LaunchLatitude']
sonde_data_out[t]['LaunchLongitude'] = sonde_data[t]['LaunchLongitude']
print(f"Query complete. Total number of data entries: {len(sonde_data_out)}.\n\n")
del sonde_data
return sonde_data_out
def get_ceilometer(client, start, end):
"""Getting ceilometer data from bigquery client
Args:
client (bigquery.Client) : Configured client to access bigquery
start (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
end (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
Returns:
dict : a dictionary of ceilometer data, with keys includings,
* backscatter: np.array of backscatter profile (heights x time)
with sensitivity normalized units
(100000·srad·km)-1 unless otherwise scaled with
the SCALE parameter.
* heights : np.array of heights calculated from resolution and
num_gates
* resolution
* num_gates
* df : dataframe of other the other data
"""
# load image or load from bigquery
ceil_query_str = "SELECT * FROM cfog.sharp_ceilometer " +\
f"WHERE timestamp BETWEEN '{start}' AND '{end}' " +\
"ORDER BY timestamp ASC"
print(f"Executing bigquery query string: ")
print(ceil_query_str + '\n')
ceil_query_job = client.query(ceil_query_str)
ceil_query_job.result()
ceil_data = ceil_query_job.to_dataframe()
# Check consistency of resolution and num_gates
if ceil_data['resolution'].unique().size == 1 and ceil_data['num_gates'].unique().size==1:
print("Consistency check on resolution and num_gates passed.")
resolution = ceil_data['resolution'].unique()[0]
num_gates = ceil_data['num_gates'].unique()[0]
else:
raise ValueError("Resolutions and num_gates are not consistent")
scatter_arr = np.array(ceil_data['backscatter_profile'].values.tolist()).T
ceil_data_df = ceil_data.drop(columns=['backscatter_profile']).set_index('timestamp')
heights = np.arange(10, 10+resolution*num_gates, resolution)
ceil_data_out = dict(backscatter = scatter_arr,
heights=heights,
df=ceil_data_df,
resolution = resolution,
num_gates = num_gates)
print(f"Query complete. Total number of data entries: {ceil_data_out['df'].shape[0]}.\n\n")
return ceil_data_out
def get_opc(client, start, end):
"""Getting ceilometer data from bigquery client
Args:
client (bigquery.Client) : Configured client to access bigquery
start (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
end (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
Returns:
dict : a dictionary of opc data, with keys includings,
* spectra: np.array of spectra profile (units?)
* binsize : np.array of heights calculated from resolution and
num_gates
* df : dataframe of other the other data
"""
# load image or load from bigquery
opc_query_str = "SELECT * FROM cfog.sharp_OPC " +\
f"WHERE timestamp BETWEEN '{start}' AND '{end}' " +\
"ORDER BY timestamp ASC"
print(f"Executing bigquery query string: ")
print(opc_query_str + '\n')
opc_query_job = client.query(opc_query_str)
opc_query_job.result()
opc_data = opc_query_job.to_dataframe()
spectra_arr = np.array(opc_data['spectra'].values.tolist()).T
opc_data_df = opc_data.drop(columns=['spectra']).set_index('timestamp')
binsize = np.array([0.46010524556604593,0.6606824566769,0.91491746243386907,
1.195215726298366,1.4649081758117393,1.8300250375885727,
2.5350321387248442,3.4999845389695112,4.50000193575099,
5.7499993072082258,7.2499995196838025,8.9999960119985154,
11.000000156148959,13.000001845860073,15.000000374490131,
16.7500010443006])
opc_data_out = dict(spectra = spectra_arr,
binsize=binsize,
df=opc_data_df)
print(f"Query complete. Total number of data entries: {opc_data_out['df'].shape[0]}.\n\n")
return opc_data_out
def get_aps(client, start, end):
"""Getting ceilometer data from bigquery client
Args:
client (bigquery.Client) : Configured client to access bigquery
start (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
end (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
Returns:
dict : a dictionary of aps data, with keys includings,
* values: np.array of spectra profile (units?)
* lowBouDia: np.array of measurement bounds
* highBouDia: np.array of measurement bounds
* midDia: np.array of median diameters
* df : dataframe of other the other data
"""
# load image or load from bigquery
aps_query_str = "SELECT * FROM cfog.sharp_aps " +\
f"WHERE timestamp BETWEEN '{start}' AND '{end}' " +\
"ORDER BY timestamp ASC"
print(f"Executing bigquery query string: ")
print(aps_query_str + '\n')
aps_query_job = client.query(aps_query_str)
aps_query_job.result()
aps_data = aps_query_job.to_dataframe()
values = np.array(aps_data['values'].values.tolist()).T
lowBouDia = np.array(aps_data['lowBouDia'].values.tolist()).T
highBouDia = np.array(aps_data['highBouDia'].values.tolist()).T
midDia = np.array(aps_data['midDia'].values.tolist()).T
aps_data_df = aps_data.drop(columns=['values','lowBouDia','highBouDia','midDia']).set_index('timestamp')
aps_data_out = dict(values=values,
lowBouDia=lowBouDia,
highBouDia=highBouDia,
midDia=midDia,
df=aps_data_df)
print(f"Query complete. Total number of data entries: {aps_data_out['df'].shape[0]}.\n\n")
return aps_data_out
def get_smps(client, start, end):
"""Getting ceilometer data from bigquery client
Args:
client (bigquery.Client) : Configured client to access bigquery
start (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
end (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
Returns:
dict : a dictionary of smps data, with keys includings,
* values: np.array of spectra profile (units?)
* lowBouDia: np.array of measurement bounds
* highBouDia: np.array of measurement bounds
* midDia: np.array of median diameters
* df : dataframe of other the other data
"""
# load image or load from bigquery
smps_query_str = "SELECT * FROM cfog.sharp_smps " +\
f"WHERE timestamp BETWEEN '{start}' AND '{end}' " +\
"ORDER BY timestamp ASC"
print(f"Executing bigquery query string: ")
print(smps_query_str + '\n')
smps_query_job = client.query(smps_query_str)
smps_query_job.result()
smps_data = smps_query_job.to_dataframe()
values = np.array(smps_data['values'].values.tolist()).T
lowBouDia = np.array(smps_data['lowBouDia'].values.tolist()).T
highBouDia = np.array(smps_data['highBouDia'].values.tolist()).T
midDia = np.array(smps_data['midDia'].values.tolist()).T
smps_data_df = smps_data.drop(columns=['values','lowBouDia','highBouDia','midDia']).set_index('timestamp')
smps_data_out = dict(values=values,
lowBouDia=lowBouDia,
highBouDia=highBouDia,
midDia=midDia,
df=smps_data_df)
print(f"Query complete. Total number of data entries: {smps_data_out['df'].shape[0]}.\n\n")
return smps_data_out
def get_table(client,start,end,table_id,date_part = None):
"""Getting table data from bigquery client.
Args:
client (bigquery.Client) : Configured client to access bigquery
start (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
end (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF]
table_id (str) : table name of any bigquery table without array column.
date_part (str) : date_part param for SQL TIMESTAMP_TRUNC() function.
Returns:
pd.DataFrame : with index being the timestamp of the data
"""
# enable the ability to obtain averaged data.
if date_part is None:
table_query_str = f"SELECT * FROM cfog.{table_id} " +\
f"WHERE timestamp BETWEEN '{start}' AND '{end}' " +\
"ORDER BY timestamp ASC"
else:
# first obtain a list of field names
table_ref = client.dataset('cfog').table(table_id)
table = client.get_table(table_ref)
schemas = [s for s in table.schema if s.field_type in ['INT', 'FLOAT']]
field_names = [s.name for s in schemas]
field_name_strs = ','.join([f"AVG({name}) as {name}" for name in field_names])
trunc_exp = f"TIMESTAMP_TRUNC(timestamp, {date_part}) AS timestamp"
table_query_str = f"SELECT {trunc_exp}, {field_name_strs} FROM cfog.{table_id} " +\
f"WHERE timestamp BETWEEN '{start}' AND '{end}' " +\
"GROUP BY timestamp ORDER BY timestamp"
print(f"Executing bigquery query string: ")
print(table_query_str + '\n')
table_query_job = client.query(table_query_str)
table_query_job.result()
print("Query job complete. Start Loading Data. ")
table_data = table_query_job.to_dataframe().set_index('timestamp')
print(f"Query complete. Total number of data entries: {table_data.shape[0]}.\n\n")
return table_data
|
18,959 | adb0a1ea4fad7af427643b2232c8b41bddf110ba | __author__ = 'Tauren'
from app.geocoder.geocoder import Geocoder
from flask.ext.restful import Resource, marshal, fields
from flask import got_request_exception
from app import app
geocoder = Geocoder()
class GeocoderApi(Resource):
def get(self, address_string):
results = geocoder.geocode(address_string)
if len(results) == 0:
return {'results': []}, 200
else:
return {'results': [res.to_dict() for res in results]}, 200
def log_exception(sender, exception, **extra):
""" Log an exception to our logging framework """
app.logger.error('Error in Geocoding Service: %s', exception)
got_request_exception.connect(log_exception, app) |
18,960 | f57f27a5655ddad63128b0b1b44bad9bb8382f29 | import pandas as pd
import numpy as np
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score , roc_curve , auc , log_loss
from xgboost import XGBClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from datetime import datetime
import time
import gc
gc.enable()
test = pd.read_csv('C:/Users/oussama/Documents/red hat/act_test.csv',header=0)
train = pd.read_csv('C:/Users/oussama/Documents/red hat/act_train.csv',header=0)
people = pd.read_csv('C:/Users/oussama/Documents/red hat/people.csv',header=0)
for x in train.columns:
if train[x].isnull().sum().item()>1000:
train.drop(x,inplace=True,axis=1)
test.drop(x,inplace=True,axis=1)
for x in [ col for col in people.columns if people[col].dtype ==np.dtype(bool)]:
people[x] = people[x]*1
for k in range(1,10,1):
people['char_{}'.format(k)]= pd.factorize(people['char_{}'.format(k)])[0]
train['day_of_week']=train.loc[:,'date'].apply(lambda x : datetime.strptime(str(x) ,'%Y-%m-%d').weekday())
train['month']=train.loc[:,'date'].apply(lambda x : datetime.strptime(str(x) ,'%Y-%m-%d').strftime('%B'))
train['year']=train.loc[:,'date'].apply(lambda x : datetime.strptime(str(x) ,'%Y-%m-%d').strftime('%Y'))
train.date = train.date.apply(lambda x : datetime.strptime(x , '%Y-%m-%d'))
people.date=people.date.apply(lambda x : datetime.strptime(x ,'%Y-%m-%d'))
train = pd.concat([train , pd.get_dummies(train.activity_category)] , axis=1)
train = pd.concat([train , pd.get_dummies(train.month)] , axis=1)
train.drop('month',axis=1,inplace=True)
train = pd.concat([train , pd.get_dummies(train.year)] , axis=1)
train.drop('year',axis=1,inplace=True)
train = pd.concat([train , pd.get_dummies(train.day_of_week)] , axis=1)
train.drop('day_of_week',axis=1,inplace=True)
train_data=pd.merge(train,people,on='people_id')
del train,test
#performe the same activitie at the same time:
group = pd.DataFrame(train_data.groupby(['people_id','date_x' ,'activity_category']).size())
group.columns=['count_activity']
people_2=[]
people_3=[]
people_4=[]
for pep , df in group.groupby(level = 0):
if 2 in df.count_activity.values:
people_2.append(pep)
if 3 in df.count_activity.values:
people_3.append(pep)
if 4 in df.count_activity.values:
people_4.append(pep)
del group
t=set(people_2)
train_data['t_2_activities'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(people_3)
train_data['t_3_activities'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(people_4)
train_data['t_4_activities'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
#select the same acitivitie more than one time
group = pd.DataFrame(train_data.groupby(['people_id','activity_category']).size())
group.columns=['act_count']
same_activ_2 =[]
same_activ_4 =[]
same_activ_6 =[]
same_activ_8 =[]
same_activ_10 =[]
for pep,df in group.groupby(level=0):
if any(df.act_count.values >9) :
same_activ_10.append(pep)
elif any(df.act_count.values >7) :
same_activ_8.append(pep)
elif any(df.act_count.values >5) :
same_activ_6.append(pep)
elif any(df.act_count.values >3) :
same_activ_4.append(pep)
elif any(df.act_count.values >1) :
same_activ_2.append(pep)
else :
pass
del group
t=set(same_activ_2)
train_data['same_activity_2'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(same_activ_4)
train_data['same_activity_4'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(same_activ_6)
train_data['same_activity_6'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(same_activ_8)
train_data['same_activity_8'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(same_activ_10)
train_data['same_activity_10'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
#if yes selecting them in the same time
activities_2=[]
activities_4=[]
activities_6=[]
activities_8=[]
activities_10=[]
tet = pd.DataFrame(train_data.groupby(['people_id','date_x'])['activity_category'].agg({'counts_the_activities':np.size}))
for pep , df in tet.groupby(level=0):
if 2 & 3 in df.counts_the_activities.values:
activities_2.append(pep)
if 4 & 5 in df.counts_the_activities.values:
activities_4.append(pep)
if 6 & 7 in df.counts_the_activities.values:
activities_6.append(pep)
if 8 & 9 in df.counts_the_activities.values:
activities_8.append(pep)
if any(df.counts_the_activities.values>9):
activities_10.append(pep)
del tet
t=set(activities_2)
train_data['same_time_activ_2'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(activities_4)
train_data['same_time_activ_4'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(activities_6)
train_data['same_time_activ_6'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(activities_8)
train_data['same_time_activ_8'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
t=set(activities_10)
train_data['same_time_activ_10'] = train_data.people_id.apply(lambda x : set([x]).intersection(t)==set([x]))
# number of selected activities per person
train_data['occur']=train_data.people_id
train_data.occur=train_data.people_id.apply(dict(train_data.people_id.value_counts()).get)
#mean of the time interval between activities
for pep , df in train_data.groupby('people_id')['date_x']:
df=pd.DataFrame(df)
df.sort(columns='date_x',ascending=False,inplace=True)
l=list(set(df.date_x.values))
if len(l)>1:
mean_time= (sum([l[i]-l[i+1] for i in range(0,len(l)-1,1)])/np.timedelta64(1,'D'))/(len(df.date_x.values)-1)
people.loc[people.people_id==pep,'mean_time']=mean_time
else:
people.loc[people.people_id==pep,'mean_time']=0
train_data=pd.merge(train_data,people.loc[:,['people_id','mean_time']],on='people_id')
#percentage of groups that are in the test and not in the train
test_train.loc[test_train.group_1.isin(groups)==False,'group_1'].shape[0]/test_train.shape[0]
#the first and the last activitie selected
first_activitie= train_data.loc[:,['people_id','date_x','activity_category']].sort(columns=['people_id','date_x']).drop_duplicates(['people_id'] ,keep='first')
first_activitie.rename(columns = {'activity_category':'first activity'} , inplace = True)
first_activitie.drop('date_x',axis=1,inplace=True)
last_activity = train_data.loc[:,['people_id','date_x','activity_category']].sort(columns=['people_id','date_x']).drop_duplicates(['people_id'],keep='last')
last_activity.rename(columns = {'activity_category':'last_activity'} , inplace=True)
last_activity.drop('date_x',axis=1,inplace=True)
train_data = pd.merge(train_data,first_activitie,on='people_id')
train_data=pd.merge(train_data,last_activity,on='people_id')
del last_activity , first_activitie
gc.collect()
#time between date_x and date_y
people_group =train_data.groupby('people_id')
frame_x=pd.DataFrame(people_group['date_x'].agg({'min_date_x':np.min}))
frame_y=pd.DataFrame(people_group['date_y'].agg({'min_date_y':np.min}))
frame_x.reset_index(level='people_id',inplace=True)
frame_y.reset_index(level='people_id',inplace=True)
frame=pd.merge(frame_x,frame_y,on='people_id')
frame['time_diff']=((frame.min_date_x -frame.min_date_y)/np.timedelta64(1,'D')).astype(int)
train_data=pd.merge(train_data,frame.loc[:,['people_id','time_diff']],on='people_id')
del people_group , frame
for x in [ col for col in train_data.columns if train_data[col].dtype ==np.dtype(bool)]:
train_data[x] = train_data[x]*1
#drop and start the train
train.drop(['activity_category','date'] , axis=1,inplace=True)
y=train.loc[:,'outcome'].values
X=train.values
#the final model is to blend an Xgb , Rf and gbm by a logisticregression model
xg = XGBClassifier(n_estimators=1000,max_depth=2,learning_rate=0.01,nthread=-1,gamma=0.1)
rf = RandomForestClassifier(n_estimators=1000,max_depth=2)
gbm=GradientBoostingClassifier(n_estimators=1000,learning_rate=0.01,max_depth=2)
models=[xg,rf,gbm]
j=1
data = np.zeros((X.shape[0] , len(models)))
for model in models:
split = StratifiedKFold(y,n_folds=5,shuffle=False)
for k , (train_test) in enumerate(split):
X_train , X_test , y_train,y_test = X[train] , X[test] , y[train] , y[test]
model.fit(X_train,y_train)
data[test][j] = model.predict_proba(X_test)
j=j+1
# get to the blending part
lrg = LogisticRegression()
split_2= StratifiedKFold(y,n_folds=10)
log_loss_train =[]
log_loss_test=[]
for k,(train,test) in enumerate(split_2):
X_train , X_test , y_train , y_test = X[train] , X[test] , y[train] , y[test]
lrg.fit(X_train,y_train)
print('for the {0} iteration the log loss on the train data = {1}\n'.format(k,log_loss(y_train,lrg.predict_proba(X_train))))
log_loss_train.append(log_loss(y_train,log_loss(y_train,lrg.predict_proba(X_train))))
print('for the {0} iteration the log loss on the test data = {1}\n'.format(k,log_loss(y_test,lrg.predict_proba(X_test))))
log_loss_test.append(log_loss(y_test,lrg.predict_proba(X_test)))
#for having an eye on overfitting
print('the mean of the log loss values on the train data is : {0} with a std of : {1}\n'.format(np.mean(log_loss_train) , np.std(log_loss_train)))
print('the mean of the log loss values on the test test data is : {0} with a std of : {1}\n'.format(np.mean(log_loss_test) , np.std(log_loss_test)))
|
18,961 | 05602650650082ed630042e0a0e2110dccddea75 | import json
import argparse
import torch
import torch.nn as nn
import pandas as pd
from torch import optim
from torch.utils.data import DataLoader
from gensim.models import Word2Vec
from tqdm import tqdm
import config as cfg
from utils import padding
from word2vec_dict import Word2VecDict
from language_model import LanguageModel, init_hidden
from language_dataset import LanguageDataset
############################################################
# Usage of Gensim Word2Vec Object:
#
# target = '<START>'
# target_vec = model_load.wv.__getitem__(['war', 'victory'])
# print(target_vec.shape)
# print(model_load.wv.similar_by_vector(target_vec)[0][0])
# print(model_load.wv.vocab['war'].index)
# print(model_load.wv.index2word[543])
# print(model_load.wv.vector_size)
# print(len(model_load.wv.vocab))
############################################################
# Define config parser
def program_config(parser):
# ------ Add new params here ------>
parser.add_argument('--max_seq_len', default=cfg.max_seq_len, type=int)
parser.add_argument('--test_ratio', default=cfg.test_ratio, type=float)
parser.add_argument('--hidden_dim', default=cfg.hidden_dim, type=int)
parser.add_argument('--batch_size', default=cfg.batch_size, type=int)
parser.add_argument('--num_epochs', default=cfg.num_epochs, type=int)
parser.add_argument('--check_interval', default=cfg.check_interval, type=int)
parser.add_argument('--lr', default=cfg.lr, type=float)
parser.add_argument('--sch_factor', default=cfg.sch_factor, type=float)
parser.add_argument('--sch_patience', default=cfg.sch_patience, type=int)
parser.add_argument('--sch_verbose', default=cfg.sch_verbose, type=bool)
parser.add_argument('--device', default=cfg.device, type=str)
parser.add_argument('--emb_model_dir', default=cfg.emb_model_dir, type=str)
parser.add_argument('--lyrics_dir', default=cfg.lyrics_dir, type=str)
parser.add_argument('--pretrained_lm_dir', default=cfg.pretrained_lm_dir, type=str)
parser.add_argument('--save_lm_dir', default=cfg.save_lm_dir, type=str)
parser.add_argument('--save_tr_l_dir', default=cfg.save_tr_l_dir, type=str)
parser.add_argument('--save_tr_a_dir', default=cfg.save_tr_a_dir, type=str)
parser.add_argument('--save_tst_l_dir', default=cfg.save_tst_l_dir, type=str)
parser.add_argument('--save_tst_a_dir', default=cfg.save_tst_a_dir, type=str)
parser.add_argument('--save_log_dir', default=cfg.save_log_dir, type=str)
return parser
# Define training method
def train_dis_epoch(epoch, model, train_loader, criterion, optimizer):
train_losses, train_accs = [], []
total_loss, total_acc = 0, 0
model.train()
for i, (feature, target) in enumerate(train_loader):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_acc += 100 * torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
if (i + 1) % cfg.check_interval == 0:
train_losses.append(total_loss / (i + 1))
train_accs.append(total_acc / (i + 1))
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
return train_losses, train_accs
# Define testing method
def test(model, test_loader, criterion):
total_loss, total_acc = 0, 0
model.eval()
with torch.no_grad():
for feature, target in tqdm(test_loader, desc='Test'):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += 100 * \
torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
return total_loss / len(test_loader), total_acc / len(test_loader)
# Main
if __name__ == '__main__':
# Hyper parameters and configs
parser = argparse.ArgumentParser()
parser = program_config(parser)
opt = parser.parse_args()
cfg.init_param(opt)
# Get word2vec dict with embedding model
cfg.logger.info('Loading embedding model.')
wv_dict = Word2VecDict(Word2Vec.load(cfg.emb_model_dir))
# Load lyrics data, then delete any lyric whose length's greater than max_seq_len
cfg.logger.info('Loading lyrics data.')
with open(cfg.lyrics_dir, 'r') as f:
lyrics_dict = f.read()
lyrics_dict = json.loads(lyrics_dict)
data = []
for key, val in tqdm(lyrics_dict.items()): # val is a batch
cur_seq_len = len(val)
if cur_seq_len <= cfg.max_seq_len:
data.append(val)
# Uncomment this part to train the partial dataset
# data = data[:100]
# Split data into training and testing sets
num_train = int(len(data) * (1 - cfg.test_ratio))
data_train = data[:num_train]
data_test = data[num_train:]
# Torch dataset and dataloader
train_dataset = LanguageDataset(data_train, wv_dict, padding, cfg.max_seq_len)
train_loader = DataLoader(dataset=train_dataset, batch_size=cfg.batch_size, shuffle=False)
if cfg.test_ratio > 0:
test_dataset = LanguageDataset(data_test, wv_dict, padding, cfg.max_seq_len)
test_loader = DataLoader(dataset=test_dataset, batch_size=cfg.batch_size, shuffle=False)
vocab_size = len(wv_dict.emb_model.wv.vocab) + 1
# Uncomment this part to check the validity of the dataloader
# for minibatch in train_loader:
# features, targets = minibatch
# print(features.size(), targets.size())
# for i, (f, t) in enumerate(zip(features, targets)): # minibatch (one lyric)
# for (wv_f, idx_t) in zip(f, t): # word vector of feature, index of target
# print(wv_dict.index2word(wv_dict.vector2index(wv_f.numpy())), wv_dict.index2word(int(idx_t.item())))
# Print basic info
cfg.logger.debug('Number of lyrics (Valid / Total): {} / {}'.format(len(data), len(lyrics_dict)))
cfg.logger.debug('Training / testing size: {} / {}'.format(len(data_train), len(data_test)))
cfg.logger.debug('Testing set ratio: {}'.format(cfg.test_ratio))
cfg.logger.debug('Total vocabulary size including paddings: {}'.format(vocab_size))
cfg.logger.debug('Max sequence length: {}'.format(cfg.max_seq_len))
cfg.logger.debug('Hidden dimension: {}'.format(cfg.hidden_dim))
cfg.logger.debug('Batch size: {}'.format(cfg.batch_size))
cfg.logger.debug('Total epochs: {}'.format(cfg.num_epochs))
cfg.logger.debug('Intervals to check: {}'.format(cfg.check_interval))
cfg.logger.debug('Learning rate: {}'.format(cfg.lr))
cfg.logger.debug('Schedular factor: {}'.format(cfg.sch_factor))
cfg.logger.debug('Schedular patience: {}'.format(cfg.sch_patience))
cfg.logger.debug('Schedular verbose: {}'.format(cfg.sch_verbose))
cfg.logger.debug('Device: {}'.format(cfg.device))
cfg.logger.debug('Embedding model directory: {}'.format(cfg.emb_model_dir))
cfg.logger.debug('Lyrics data directory: {}'.format(cfg.lyrics_dir))
if cfg.pretrained_lm_dir:
cfg.logger.debug('Pre-trained language model: {}'.format(cfg.pretrained_lm_dir))
else:
cfg.logger.debug('Pre-trained language model: initial training')
# Training
language_model = LanguageModel(wv_dict, cfg.hidden_dim).to(cfg.device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(language_model.parameters(), lr=cfg.lr)
schedular = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=cfg.sch_factor, patience=cfg.sch_patience, verbose=cfg.sch_verbose)
if cfg.pretrained_lm_dir:
lm_loading_res = language_model.load_state_dict(torch.load(cfg.pretrained_lm_dir))
cfg.logger.debug('Loading language model: {}'.format(lm_loading_res))
train_losses, train_accs = [], [] # losses & accuracies to save
if cfg.test_ratio > 0:
test_losses, test_accs = [], []
cfg.logger.info('Training.')
for epoch in range(1, cfg.num_epochs + 1):
train_losses_, train_accs_ = train_dis_epoch(epoch, language_model, train_loader, criterion, optimizer)
train_losses += train_losses_
train_accs += train_accs_
if cfg.test_ratio > 0:
test_loss_, test_acc_ = test(language_model, test_loader, criterion)
test_losses.append(test_loss_)
test_accs.append(test_acc_)
cfg.logger.debug(
"[Epoch %d/%d] ----------------> [Test Loss: %f] [Test Acc: %f]"
% (epoch, cfg.num_epochs, test_losses[-1], test_accs[-1])
)
else:
cfg.logger.debug("-" * 74)
schedular.step(train_losses[-1])
# Save language model, losses and training accuracies
cfg.logger.info('Saving language model.')
torch.save(language_model.state_dict(), cfg.save_lm_dir)
cfg.logger.info('Saving training losses.')
saving_train_losses = pd.DataFrame({'Training Loss': train_losses})
saving_train_losses.to_csv(cfg.save_tr_l_dir, index=False)
cfg.logger.info('Saving training accuracies.')
saving_train_accs = pd.DataFrame({'Training Accuracy': train_accs})
saving_train_accs.to_csv(cfg.save_tr_a_dir, index=False)
if cfg.test_ratio > 0:
cfg.logger.info('Saving testing losses.')
saving_test_losses = pd.DataFrame({'Testing Loss': test_losses})
saving_test_losses.to_csv(cfg.save_tst_l_dir, index=False)
cfg.logger.info('Saving testing accuracies.')
saving_test_accs = pd.DataFrame({'Testing Accuracy': test_accs})
saving_test_accs.to_csv(cfg.save_tst_a_dir, index=False)
cfg.logger.debug('Saved language model to: {}'.format(cfg.save_lm_dir))
cfg.logger.debug('Saved training losses to: {}'.format(cfg.save_tr_l_dir))
cfg.logger.debug('Saved training accuracies to: {}'.format(cfg.save_tr_a_dir))
if cfg.test_ratio > 0:
cfg.logger.debug('Saved testing losses to: {}'.format(cfg.save_tst_l_dir))
cfg.logger.debug('Saved testing accuracies to: {}'.format(cfg.save_tst_a_dir))
cfg.logger.debug('Saved dis training log to: {}'.format(cfg.save_log_dir))
cfg.logger.info('Everything\'s done.') |
18,962 | 2b944f46575c63870594d13dd2d2cb4ecd74b416 | # Question (Part 2)
# Ab ek check_numbers_list naam ka ek function likho jo inetgers ki list ko arguments ki tarah le aur fir check kare ki same index
# waale dono integers even hain ya nahi. Yeh check karne ke liye pichle Part 1 mein likhe check_numbers function ka use karo. Agar
# # aapne apne function ko [2, 6, 18, 10, 3, 75] aur [6, 19, 24, 12, 3, 87] Toh usko yeh output deni chaiye:
# def check_number_list(a,b):
# i=0
# while i<len(a):
# s=a[i]
# j=b[i]
# if s%2==0 and j%2==0:
# print("both are even")
# else:
# print("both are not even")
# i=i+1
# a=[2,6,18,10,3,75]
# b=[6,19,24,12,3,87]
# check_number_list(a,b) |
18,963 | 0aec8653f5badb8b0f76d8afeff9cfd051223993 | from rest_framework import serializers
from .models import UsersData
class UsersSerializer(serializers.ModelSerializer):
"""
Returns users list.
"""
class Meta:
model = UsersData
fields = ['id','full_name','emp_code', 'status', 'email', 'status', 'crd', 'upd']
class CreateUsersSerializer(serializers.ModelSerializer):
"""
"""
email = serializers.EmailField()
emp_code = serializers.IntegerField()
class Meta:
model= UsersData
fields = ['id','full_name','emp_code', 'email', 'status', 'crd', 'upd']
def create(self, validated_data):
if UsersData.objects.filter(email =validated_data['email']).exists():
user_data = UsersData.objects.get(email__iexact=validated_data['email'].strip())
user_data.full_name = validated_data['full_name'].strip()
user_data.emp_code = validated_data['emp_code']
user_data.save()
else:
user_data = UsersData.objects.create(full_name=validated_data['full_name'].strip(),email =validated_data['email'].lower(),emp_code = validated_data['emp_code'])
return user_data |
18,964 | 164a6bdbebaf46260f6b7af2d092b1bdf6674b27 | from flask import Flask, request, jsonify, abort
from flask_cors import CORS, cross_origin
import pandas as pd
import pickle
from janome.tokenizer import Tokenizer
from datetime import datetime
import sys
sys.path.append("./models") # 前処理で使った自作モジュール「pipeline」を読み込むためPYTHONPATHに追加
app = Flask(__name__)
CORS(app, support_credentials=True)
# アプリ起動時に前処理パイプラインと予測モデルを読み込んでおく
tfidf = pickle.load(open("models/tfidf.pkl", "rb"))
model = pickle.load(open("models/lgbm.pkl", "rb"))
dic = pickle.load(open("label2genre.pkl", "rb"))
@app.route('/api/predict', methods=["POST"])
@cross_origin(supports_credentials=True)
def predict():
"""/api/predict にPOSTリクエストされたら予測値を返す関数"""
try:
response = request.headers
# APIにJSON形式で送信された特徴量
X = pd.DataFrame(request.json, index=[0])
X = X["title"][0]
# 前処理
t = Tokenizer(wakati=True)
X = " ".join([token for token in t.tokenize(X)])
X = tfidf.transform([X])
# 予測
y_pred = model.predict(X)
print(y_pred.argmax(1))
pred = dic[int(y_pred.argmax(1)[0])]
response = {"status": "OK", "predicted": pred}
print(response)
return jsonify(response), 200
except Exception as e:
print(e) # デバッグ用
abort(400)
@app.errorhandler(400)
def error_handler(error):
"""abort(400) した時のレスポンス"""
response = {"status": "Error", "message": "Invalid Parameters"}
return jsonify(response), error.code
if __name__ == "__main__":
app.run() # 開発用サーバーの起動 |
18,965 | 0ecde8a0540eae4ffc87e165c9eb545cff928e68 | from fibonacci import naive, dynamic
import pytest
def pytest_generate_tests(metafunc):
# fibonacci sequence
fib = [[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]]
if 'fib' in metafunc.fixturenames:
metafunc.parametrize('fib', fib)
def test_naive(fib):
for i in range(len(fib)):
assert naive(i) == fib[i]
def test_dynamic(fib):
for i in range(len(fib)):
assert dynamic(i) == fib[i]
|
18,966 | 4b904482dccb5674a77d05427e1fe6e97e7e7cd9 | #! /usr/bin/env python
# -*-python-*-
# This script takes a set of files and a set of machines.
# It uploads the files to the given machines in round-robin fashion.
# The script can also be given an optional schema file.
# This file will be uploaded to all machines.
# The list of machines is provided in an ansible inventory file in the section
# called "backends", e.g.:
# [backends]
# machine1
# machine2
# etc.
# Only the "backends" section is used to derive the list of machines.
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from optparse import OptionParser
import getpass
import subprocess
def usage(parser):
print parser.print_help()
exit(1)
def parse_hosts(filename):
variable_manager = VariableManager()
loader = DataLoader()
inventory = Inventory(
loader = loader,
variable_manager = variable_manager,
host_list = filename
)
workers = inventory.get_hosts("backends")
return workers
def execute_command(command):
print command
subprocess.call(command, shell=True)
def create_remote_folder(host, folder, user):
if user is None:
user = ""
else:
user = user + "@"
command = "ssh " + user + host.name + " 'mkdir -p " + folder + "'"
execute_command(command)
def copy_file_to_remote_host(source, host, folder, user):
create_remote_folder(host, folder, user)
if user is None:
user = ""
else:
user = user + "@"
command = "scp -C " + source + " " + user + host.name + ":" + folder + "/"
execute_command(command)
def copy_schema(schema, folder, workers, user):
print "Copying", schema, "to all hosts"
for w in workers:
copy_file_to_remote_host(schema, w, folder, user)
def copy_files(filelist, folder, workers, user):
print "Copying", len(filelist), "files to all hosts"
index = 0
for f in filelist:
host = workers[index]
index = (index + 1) % len(workers)
copy_file_to_remote_host(f, host, folder, user)
def main():
parser = OptionParser(usage="%prog [options] fileList")
parser.add_option("-i", help="List of machines to use", dest="hosts")
parser.add_option("-u", help="Username", dest="user")
parser.add_option("-d", help="destination folder where output is written",
dest="folder")
parser.add_option("-s", help="optional JSON file describing the data schema", dest="schema")
(options, args) = parser.parse_args()
if options.hosts == None:
usage(parser)
workers = parse_hosts(options.hosts)
user = options.user
if user is None:
user = getpass.getuser()
if options.schema != None:
copy_schema(options.schema, options.folder, workers, options.user)
copy_files(args, options.folder, workers, options.user)
if __name__ == "__main__":
main()
|
18,967 | f0688a7540e23e5d9e436b69bd106379dc59535d | import pytesseract
from PIL import Image
# img = Image.open('phone_number.png')
# img = Image.open('eng_text.png')
img = Image.open('rus_text.jpg')
# pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
file_name = img.filename
file_name = file_name.split(".")[0]
# custom_config = r'--oem 3 --psm 13'
custom_config = r'--oem 3 --psm 6'
text = pytesseract.image_to_string(img, lang='rus', config=custom_config)
print(text)
with open(f'{file_name}.txt', 'w') as text_file:
text_file.write(text) |
18,968 | 6a978626996c98cea50a576bad22328e13ba50df | # Desafio 050 - Desenvolva um programa que leia seis números inteiros e mostre
# a soma apenas daqueles que forem pares. Se o valor digitado
# for impar desconsidere-o.
s = 0
for c in range(0,6):
n = int(input('Digite o {} número: '.format(c+1)))
if n % 2 == 0:
s += n
else:
continue
print('A soma dos números pares é: {}'.format(s))
|
18,969 | d5440d5aa563a226ef890515d49aec1c0447d85f | # pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
def test_can_construct_author(author):
assert isinstance(author.name, str)
assert isinstance(author.url, str)
assert isinstance(author.github_url, str)
assert isinstance(author.github_avatar_url, str)
assert str(author) == author.name
assert repr(author) == author.name
assert author._repr_html_(width="21x", height="22px") == (
'<a href="https://github.com/holoviz/" title="Author: panel" target="_blank">'
'<img application="https://avatars2.githubusercontent.com/u/51678735" alt="panel" '
'style="border-radius: 50%;width: 21x;height: 22px;vertical-align: text-bottom;">'
"</img></a>"
)
|
18,970 | b28b642bd028ff58c35d512db24e3ca5faadb082 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from extractor.issue_tracker.github.issue2db_extract_main import GitHubIssue2DbMain
__author__ = 'valerio cosentino'
import glob
import logging
import logging.handlers
import os
import sys
import uuid
import mysql.connector
from extractor.cvs.git.git2db_extract_main import Git2DbMain
from extractor.cvs.git.git2db_update import Git2DbUpdate
from extractor.db.dbschema import DbSchema
from extractor.forum.eclipse.forum2db_extract_main import EclipseForum2DbMain
from extractor.forum.eclipse.forum2db_update import EclipseForum2DbUpdate
from extractor.forum.stackoverflow.stackoverflow2db_extract_main import StackOverflow2DbMain
from extractor.forum.stackoverflow.stackoverflow2db_update import StackOverflow2DbUpdate
from extractor.instant_messaging.slack.slack2db_extract_main import Slack2DbMain
from extractor.issue_tracker.bugzilla.issue2db_extract_main import BugzillaIssue2DbMain
from extractor.issue_tracker.bugzilla.issue2db_update import BugzillaIssue2DbUpdate
from extractor.issue_tracker.github.issue2db_update import GitHubIssue2DbUpdate
LOG_FOLDER_PATH = "logs"
LOG_NAME = "gitana"
class Gitana:
def __init__(self, config, log_folder_path):
self.config = config
self.cnx = mysql.connector.connect(**self.config)
self.get_logger(log_folder_path)
def get_logger(self, log_folder_path):
if log_folder_path:
self.get_file_logger(log_folder_path)
else:
self.get_console_logger()
def get_console_logger(self):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def get_file_logger(self, log_folder_path):
self.create_log_folder(log_folder_path)
self.log_folder_path = log_folder_path
self.log_path = self.log_folder_path + "/" + LOG_NAME + "-" + str(uuid.uuid4())[:5] + ".log"
self.logger = logging.getLogger(self.log_path)
fileHandler = logging.FileHandler(self.log_path, mode='w')
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s", "%Y-%m-%d %H:%M:%S")
fileHandler.setFormatter(formatter)
self.logger.setLevel(logging.INFO)
self.logger.addHandler(fileHandler)
def create_log_folder(self, name):
if not os.path.exists(name):
os.makedirs(name)
def delete_previous_logs(self):
try:
files = glob.glob(self.log_folder_path + "/*")
for f in files:
try:
os.remove(f)
except:
continue
except AttributeError:
pass
def init_db(self, db_name):
self.logger.info("initializing db")
db = DbSchema(self.cnx, self.logger)
db.init_database(db_name)
def create_project(self, db_name, project_name):
self.logger.info("creating project")
db = DbSchema(self.cnx, self.logger)
db.create_project(db_name, project_name)
def list_projects(self, db_name):
db = DbSchema(self.cnx, self.logger)
projects = db.list_projects(db_name)
for p in projects:
print p
def import_git_data(self, db_name, project_name, repo_name, git_repo_path, before_date, import_type, references, processes):
self.logger.info("importing git data")
git2db = Git2DbMain(db_name, project_name,
repo_name, git_repo_path, before_date, import_type, references, processes,
self.config, self.logger)
git2db.extract()
def update_git_data(self, db_name, project_name, repo_name, git_repo_path, before_date, recover_import, import_new_references, processes):
self.logger.info("updating git data")
git2db = Git2DbUpdate(db_name, project_name,
repo_name, git_repo_path, before_date, recover_import, import_new_references, processes,
self.config, self.logger)
git2db.update()
def import_bugzilla_tracker_data(self, db_name, project_name, repo_name, issue_tracker_name, url, product,
before_date, recover_import, processes):
self.logger.info("importing bugzilla data")
issue2db = BugzillaIssue2DbMain(db_name, project_name,
repo_name, "bugzilla", issue_tracker_name, url, product, before_date,
recover_import, processes,
self.config, self.logger)
issue2db.extract()
def update_bugzilla_tracker_data(self, db_name, project_name, repo_name, issue_tracker_name, product, processes):
self.logger.info("updating bugzilla data")
issue2db = BugzillaIssue2DbUpdate(db_name, project_name,
repo_name, issue_tracker_name, product, processes,
self.config, self.logger)
issue2db.update()
def import_eclipse_forum_data(self, db_name, project_name, forum_name, eclipse_forum_url, before_date,
recover_import, processes):
self.logger.info("importing eclipse forum data")
forum2db = EclipseForum2DbMain(db_name, project_name,
"eclipse_forum", forum_name, eclipse_forum_url, before_date, recover_import,
processes,
self.config, self.logger)
forum2db.extract()
def update_eclipse_forum_data(self, db_name, project_name, forum_name, processes):
self.logger.info("importing eclipse forum data")
forum2db = EclipseForum2DbUpdate(db_name, project_name, forum_name, processes,
self.config, self.logger)
forum2db.update()
def import_stackoverflow_data(self, db_name, project_name, forum_name, search_query, before_date, recover_import,
tokens):
self.logger.info("importing stackoverflow data")
stackoverflow2db = StackOverflow2DbMain(db_name, project_name,
"stackoverflow", forum_name, search_query, before_date, recover_import,
tokens,
self.config, self.logger)
stackoverflow2db.extract()
def update_stackoverflow_data(self, db_name, project_name, forum_name, tokens):
self.logger.info("updating stackoverflow data")
stackoverflow2db = StackOverflow2DbUpdate(db_name, project_name, forum_name, tokens,
self.config, self.logger)
stackoverflow2db.update()
def import_slack_data(self, db_name, project_name, instant_messaging_name, before_date, recover_import, tokens):
self.logger.info("importing slack data")
slack2db = Slack2DbMain(db_name, project_name,
"slack", instant_messaging_name, before_date, recover_import, tokens,
self.config, self.logger)
slack2db.extract()
def update_slack_data(self, db_name, project_name, instant_messaging_name, tokens):
self.logger.info("updating slack data")
# TODO
def import_github_tracker_data(self, db_name, project_name, repo_name, issue_tracker_name, github_repo_full_name,
access_tokens, processes):
logging.info("importing github data")
github_importer = GitHubIssue2DbMain(db_name, project_name, repo_name, issue_tracker_name,
github_repo_full_name,
access_tokens,
processes, self.config)
github_importer.extract()
def update_github_tracker_data(self, db_name, project_name, repo_name, issue_tracker_name, github_repo_full_name,
access_tokens,
processes):
logging.info("updating github data")
github_updater = GitHubIssue2DbUpdate(db_name, project_name, repo_name, issue_tracker_name,
github_repo_full_name,
access_tokens,
processes, self.config)
github_updater.update()
|
18,971 | 65a8add2ae44dc673373696070d67ed46324b2bd | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Person(object):
country = 'cn'
def __getattr__(self, name):
return object.__getattribute__(self, name)
def show(self):
print 'hello'
return self.country
p = Person()
attr = getattr(p, 'show')
if callable(attr):
print attr()
else:
print attr
attr = getattr(p, 'country')
print attr
print '-' * 30
class A(object): # new style class
def __init__(self):
self.a = 1
def __getattr__(self, name):
print 'calling __getattr__ ...'
if name == 'x':
return 'x'
else:
raise AttributeError('No such attribute: %s' % name)
def __getattribute__(self, name):
print 'calling __getattribute__ ...'
return object.__getattribute__(self, name)
a = A()
print a.__dict__
print a.a
print getattr(a, 'a')
print a.x
print a.__dict__
try:
print getattr(a, 'y')
except AttributeError as e:
print e.message
print '-' * 30
class A(): # old style class
def __init__(self):
self.a = 1
def __getattr__(self, name):
print 'calling __getattr__ ...'
if name == 'x':
return 'x'
else:
raise AttributeError('No such attribute: %s' % name)
def __getattribute__(self, name):
print 'calling __getattribute__ ...'
return object.__getattribute__(self, name)
a = A()
print a.__dict__
print a.a
print getattr(a, 'a')
print a.x
print a.__dict__
try:
print getattr(a, 'y')
except AttributeError as e:
print e.message
|
18,972 | b944e3c798b26ca8af2d2cd58e7aae1103066b38 | from django.db import models
from django.core.validators import MaxLengthValidator
class Category(models.Model):
name = models.CharField(max_length = 25)
def __str__(self):
return self.name
def __unicode__(self):
return '%s' % self.name
class Meta:
verbose_name_plural = 'Categories'
class Product(models.Model):
sku = models.CharField(unique = True, max_length = 200)
barcode = models.CharField(max_length = 200)
name = models.CharField(max_length = 50)
description = models.TextField(max_length = 500, blank=True, validators=[MaxLengthValidator(500)])
base_price = models.DecimalField(max_digits = 5, decimal_places = 2, default = 0)
product_image = models.ImageField(blank = True, null = True, upload_to = 'product_images/%Y/%m/%D/')
number_of_stocks = models.IntegerField(default = 0)
date_created = models.DateField(auto_now_add = True)
category = models.ManyToManyField(Category, default = None)
def __str__(self):
return self.name
# Displays the image
def image_tag(self):
return u'<img src="%s" width="150" height="150" />' % (self.product_image.upload_to)
image_tag.short_description = 'Image'
class OrderItem(models.Model):
product = models.OneToOneField(Product, on_delete=models.SET_NULL, null = True)
is_ordered = models.BooleanField(default=False)
date_added = models.DateTimeField(auto_now=True)
date_ordered = models.DateTimeField(null=True)
def __str__(self):
return self.product.name
class Order(models.Model):
OR = models.CharField(max_length = 15)
# user fk
is_ordered = models.BooleanField(default=False)
items = models.ManyToManyField(OrderItem)
order_date = models.DateTimeField(auto_now=True)
raw_total_price = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
def get_cart_items(self):
return self.products.all()
def get_cart_total(self):
return sum([item.product.base_price for item in self.items.all()])
def __str__(self):
return self.OR
class UserAccount(models.Model):
username = models.CharField(max_length = 25)
email = models.EmailField(max_length = 50)
password = models.CharField(max_length = 15, null = False)
|
18,973 | af6944db51ab0b63aca21391cf594e9c518680b7 | import numpy as np
import torch as tc
def random(size,num_dis,min_jump=0.3):
"""Generate a random signal of signal size, discontinuities positions
follows a geom probability
:size: size of the signal
:num_dis: number of discontinuities
:min_jump: minimu jump
:returns: torch tensor containing the
"""
# array of the discontinuities
dis = sorted(np.random.choice(range(1,size),num_dis,replace=False))
# TODO : Should add min jumps to avoid
jumps = min_jump + np.random.rand(num_dis)
#flipping given jumps
mask = np.random.choice([True,False],num_dis,replace=True)
jumps[mask] = -jumps[mask]
return _discriteSignal(size,dis,jumps)
def _discriteSignal(size,dis,jumps,initial_value=0):
""" Function to create a PWC with given discontinuities and jumps
:size: size of the signal
:dis: Array of the jumps positions
:jumps: jumps values at each dicontinuity
:initial_value: initial value at the first plateau
:returns: torch tensor containing the pwc signal
"""
assert(len(dis) == len(jumps)), " dis and jumps should have the same lenghts"
signal = tc.zeros(size)
#initial pleateau
signal[:dis[0]] = initial_value
value=initial_value
for i in range(len(dis)-1):
value += jumps[i]
signal[dis[i]:dis[i+1]] = value
#last plateau
value += jumps[-1]
signal[dis[-1]:] = value
return signal
if __name__ == "__main__":
X = random(100,5)
print(X)
|
18,974 | 0596581f3dc8ac6f7051649eb2715766dc22ddb8 | '''
Created on 2016/06/19
visualization by python
@author: Hitoshi_Nakamura
'''
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from scipy.stats import sem
# 粒子のヒストグラムを見る際,何回目の結果を見たいか
whenOfPartDist1 = 1000
whenOfPartDist2 = 4000
whenOfPartDist3 = 7000
whenOfPartDist4 = 9000
# データ取得
df = pd.read_csv( 'C:/Users/Hitoshi_Nakamura/Documents/Eclipse_workspace/Filtering/result.csv' )
# アンサンブルの集合データを取得
#df_ensamble = pd.read_csv( 'C:/Users/Hitoshi_Nakamura/Documents/Eclipse_workspace/Filtering/ensambleresult.csv' )
plt.plot( df.iloc[:, 0], df.iloc[:, 1] , label = 'true_theta' )# θの真値
plt.plot( df.iloc[:, 0], df.iloc[:, 2] , label = 'estimate_theta' )# θの推定値
plt.plot( df.iloc[:, 0], df.iloc[:, 3] , label = 'true_theta_dot' )# θ_dotの真値
plt.plot( df.iloc[:, 0], df.iloc[:, 4] , label = 'estimate_theta_dot' )# θ_dotの推定値
plt.grid( True )
plt.xlabel( 'Value' )
plt.ylabel( 'Time Series' )
plt.title( 'Target & Estimate of PF' )
plt.xlim( [0, 10000] ) # x軸の範囲
plt.ylim( [-0.5, 0.5] ) # y軸の範囲
plt.legend( loc = 'best' )# 凡例
# plt.savefig( trialName + prob + 'paratofront.pdf' );
# ヒストグラムの表示(縮退を起こしていないかチェック)
plt.figure()
plt.subplot(221)
plt.hist( df_ensamble.iloc[int( whenOfPartDist1 ), :], bins = 50, label = 'ensamble histogram' )
plt.axvline( x = df.iloc[whenOfPartDist1, 1], color = "red", lw = 5, label = 'true value' ) # x=~に沿ってx軸垂直に引く
plt.axvline( x = df.iloc[whenOfPartDist1, 2], color = "green", lw = 5, label = 'estimated value' ) # x=~に沿ってx軸垂直に引く
title = 'Distribution of Particle at t=' + str( whenOfPartDist1 )
plt.xlim( [-0.4, 0.4] )
plt.title( title )
plt.xlabel( 'x' )
plt.ylabel( 'freq' )
plt.legend( loc = 'best' )# 凡例
plt.subplot(222)
plt.hist( df_ensamble.iloc[int( whenOfPartDist2 ), :], bins = 50, label = 'ensamble histogram' )
plt.axvline( x = df.iloc[whenOfPartDist2, 1], color = "red", lw = 5, label = 'true value' ) # x=~に沿ってx軸垂直に引く
plt.axvline( x = df.iloc[whenOfPartDist2, 2], color = "green", lw = 5, label = 'estimated value' ) # x=~に沿ってx軸垂直に引く
title = 'Distribution of Particle at t=' + str( whenOfPartDist2 )
plt.xlim( [-0.4, 0.4] )
plt.title( title )
plt.xlabel( 'x' )
plt.ylabel( 'freq' )
plt.legend( loc = 'best' )# 凡例
plt.subplot(223)
plt.hist( df_ensamble.iloc[int( whenOfPartDist3 ), :], bins = 50, label = 'ensamble histogram' )
plt.axvline( x = df.iloc[whenOfPartDist3, 1], color = "red", lw = 5, label = 'true value' ) # x=~に沿ってx軸垂直に引く
plt.axvline( x = df.iloc[whenOfPartDist3, 2], color = "green", lw = 5, label = 'estimated value' ) # x=~に沿ってx軸垂直に引く
title = 'Distribution of Particle at t=' + str( whenOfPartDist3 )
plt.xlim( [-0.4, 0.4] )
plt.title( title )
plt.xlabel( 'x' )
plt.ylabel( 'freq' )
plt.legend( loc = 'best' )# 凡例
plt.subplot(224)
plt.hist( df_ensamble.iloc[int( whenOfPartDist4 ), :], bins = 50, label = 'ensamble histogram' )
plt.axvline( x = df.iloc[whenOfPartDist4, 1], color = "red", lw = 5, label = 'true value' ) # x=~に沿ってx軸垂直に引く
plt.axvline( x = df.iloc[whenOfPartDist4, 2], color = "green", lw = 5, label = 'estimated value' ) # x=~に沿ってx軸垂直に引く
title = 'Distribution of Particle at t=' + str( whenOfPartDist4 )
plt.xlim( [-0.4, 0.4] )
plt.title( title )
plt.xlabel( 'x' )
plt.ylabel( 'freq' )
plt.legend( loc = 'best' )# 凡例
# 平均と95%区間を同時にプロットする.
# x軸
time = range( len( df_ensamble ) )
# 平均値
mean = df_ensamble.mean( 1 ).values
# 95%区間
semVal = df_ensamble.apply( sem, axis = 1 ).mul( 1.65 ).values
plt.figure()
plt.fill_between( time, mean - semVal ,
mean + semVal, color = "#3F5D7D" )
plt.xlabel( 'Value' )
plt.ylabel( 'Time Series' )
plt.xlim( [8000, 10000] ) # x軸の範囲
plt.ylim( [-0.5, 0.5] ) # y軸の範囲
plt.plot( time, mean, color = "yellow", lw = 2 )
plt.title( "Mean Value and 90% Intervals of Ensamble", fontsize = 22 )
plt.grid( True )
plt.show()
|
18,975 | a32efe62f99b8d8ef27fcf944d96fafab7ba211a | import os
inp = open('B-large.in', "r")
out = open('outputBL.out', "w")
text = inp.read()
x = text.split("\n")
count = int(x[0])
over_scores = []
for i in range (0, int(count)):
c = i+1
line = x[c].split(" ");
g= line[0]
s = int(line[1])
p = int(line[2])
scores = line[3:]
can =0
# print g, s, p
for score in scores:
k = p - (int(score)/3)
m = int(score)%3
# print "SCORE/3",int(score)/3, "SCORE", score,"k", k,"m", m
if k<=0:
can+=1
# print "CAN1"
elif k==1:
if m>0:
can+=1
# print "CAN2"
elif m==0 and s>0 and int(score)/3!=0:
#print s
s=s-1
can+=1
# print "CAN4"
# print "SUB"
elif k==2 and m==2 and s>0:
# print "CAN3 \n SUB"
can+=1
s = s-1
out.write("Case #"+str(c)+": "+str(can)+"\n")
#out.write()
out.close()
inp.close()
|
18,976 | cec41dccd3e9e81c7d7fe0253be8cc5f9256699d | from django.urls import path
from api.recipe.views.biscuit_recipe import RecipeListAPIView, RecipeDetailAPIView
from api.recipe.views.manufactured_product import (
ManufacturedProductRecipeListAPIView,
ManufacturedProductRecipeDetailAPIView
)
urlpatterns = [
path('', RecipeListAPIView.as_view(), name='biscuit recipe create'),
path('manufactured_product/', ManufacturedProductRecipeListAPIView.as_view(), name='create_recipe'),
path('update_or_detail/', RecipeDetailAPIView.as_view(), name='biscuit recipe update and get detail'),
path('manufactured_product/update_or_detail/', ManufacturedProductRecipeDetailAPIView.as_view(), name='update')
]
|
18,977 | 0744b2db46ee28c32ab4d3f0f4535d1f0a0b293a | from flask import *
app = Flask(__name__)
@app.route('/')
def hello():
return render_template('index.html')
@app.route('/covid_map')
def covid():
return render_template('covid_map.html')
@app.route('/predict', methods=['POST','GET'])
def predict():
if request.method == 'POST':
#f = request.files.get('file')
f = request.files['file']
fname=f.filename
fname = fname.split(".")
name=fname[0]
return render_template('index.html', pred1="Condition of given X-ray Scan : {} ".format(name))
#return render_template('index.html', pred1="Success")
if __name__ == "__main__":
app.run(debug = True)
|
18,978 | cb801071e96e5977c0e75cc55629c93fe471c800 | # Function checks if given number is palindromic in both binary and decimal systems
def is_palindrome(number):
binNumber = bin(number)
if str(number) == str(number)[::-1] and binNumber[2:] == binNumber[-1: 1: -1]:
return True
else:
return False
# is_palindrome(7)
|
18,979 | 39a362b091b1dca809a262b92f7605ddd996142a | import tkinter as tk
from tkinter import *
import datetime
from functools import partial
import requests
import pandas as pd
import numpy as np
import sys
import os
import tkinter.ttk
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
te=TransactionEncoder()
dff=pd.read_csv("./database.csv")
ind=110
det_ind=200
arrec=[]
mycolor = '#%02x%02x%02x' % (50, 50, 50)
added_count=0
newent_count=0
twilio_account_id="API Key"
tkinter_umlauts=['odiaeresis', 'adiaeresis', 'udiaeresis', 'Odiaeresis', 'Adiaeresis', 'Udiaeresis', 'ssharp']
class AutocompleteEntry(tk.Entry):
"""
Subclass of tk.Entry that features autocompletion.
To enable autocompletion use set_completion_list(list) to define
a list of possible strings to hit.
To cycle through hits use down and up arrow keys.
"""
def set_completion_list(self, completion_list):
self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list
self._hits = []
self._hit_index = 0
self.position = 0
self.bind('<KeyRelease>', self.handle_keyrelease)
def autocomplete(self, delta=0):
"""autocomplete the Entry, delta may be 0/1/-1 to cycle through possible hits"""
if delta: # need to delete selection otherwise we would fix the current position
self.delete(self.position, tk.END)
else: # set position to end so selection starts where textentry ended
self.position = len(self.get())
# collect hits
_hits = []
for element in self._completion_list:
if element.lower().startswith(self.get().lower()): # Match case-insensitively
_hits.append(element)
# if we have a new hit list, keep this in mind
if _hits != self._hits:
self._hit_index = 0
self._hits=_hits
# only allow cycling if we are in a known hit list
if _hits == self._hits and self._hits:
self._hit_index = (self._hit_index + delta) % len(self._hits)
# now finally perform the auto completion
if self._hits:
self.delete(0,tk.END)
self.insert(0,self._hits[self._hit_index])
self.select_range(self.position,tk.END)
entry1.delete(0,tk.END)
entry1.insert(0,self.get())
def handle_keyrelease(self, event):
"""event handler for the keyrelease event on this widget"""
if event.keysym == "BackSpace":
self.delete(self.index(tk.INSERT), tk.END)
self.position = self.index(tk.END)
if event.keysym == "Left":
if self.position < self.index(tk.END): # delete the selection
self.delete(self.position, tk.END)
else:
self.position = self.position-1 # delete one character
self.delete(self.position, tk.END)
if event.keysym == "Right":
self.position = self.index(tk.END) # go to end (no selection)
if event.keysym == "Down":
self.autocomplete(1) # cycle to next hit
if event.keysym == "Up":
self.autocomplete(-1) # cycle to previous hit
if len(event.keysym) == 1 or event.keysym in tkinter_umlauts:
self.autocomplete()
overall_user=dff.iloc[:,0]
overall_user=np.array(overall_user)
overall_user=list(overall_user)
overall_phone=dff.iloc[:,1]
overall_phone=np.array(overall_phone)
overall_phone=list(overall_phone)
overall_date=dff.iloc[:,2]
overall_date=np.array(overall_date)
overall_date=list(overall_date)
overall_time=dff.iloc[:,3]
overall_time=np.array(overall_time)
overall_time=list(overall_time)
overall_name=dff.iloc[:,4]
overall_name=np.array(overall_name)
overall_name=list(overall_name)
overall_price=dff.iloc[:,5]
overall_price=np.array(overall_price)
overall_price=list(overall_price)
overall_quantity=dff.iloc[:,6]
overall_quantity=np.array(overall_quantity)
overall_quantity=list(overall_quantity)
overall_amount=dff.iloc[:,7]
overall_amount=np.array(overall_amount)
overall_amount=list(overall_amount)
overall_cno=dff.iloc[:,8]
overall_cno=np.array(overall_cno)
overall_cno=list(overall_cno)
cno=dff["Customer No"][len(overall_cno)-1] + 1
curr_user=[]
curr_phone=[]
curr_date=[]
curr_time=[]
curr_name=[]
curr_price=[]
curr_quantity=[]
curr_amount=[]
curr_cno=[]
def print_bill():
if os.path.isfile('print.txt'):
os.remove('print.txt')
with open('print.txt','a') as file:
file.write('\t\tThank you for shopping\t\t\n')
file.write('\t\t-----------------------\t\t\n')
file.write(f'{curr_date[0]}\t\t\t{curr_time[0]}\n')
file.write(f'Customer Name: {curr_user[0]}\n')
file.write(f'Customer Phone: {curr_phone[0]}\n')
file.write('Product\t\t\tQuantity\t\tPrice\t\t\tAmount\n')
for i in range(len(curr_name)):
with open('print.txt','a') as file:
file.write(f'{curr_name[i]}\t\t\t{curr_quantity[i]}\t\t\t{curr_price[i]}\t\t\t{curr_amount[i]}\n')
with open('print.txt','a') as file:
file.write(f'Payable Amount:\tRs.{sum(curr_amount)}\n')
os.startfile("print.txt", "print") #print bill using printer
window1=tk.Tk()
window1.configure(background="Light blue")
window1.title("Supermarket Recommendation System")
window1.geometry('600x600')
now = datetime.datetime.now()
date=now.strftime("%Y-%m-%d")
time=now.strftime("%H:%M:%S")
timee=tk.Label(window1,text=time, bg="Light blue", fg=mycolor)
timee.place(x=200,y=15)
datee=tk.Label(window1,text=date,bg="Light blue", fg=mycolor)
datee.place(x=300,y=15)
e11=tk.Label(window1,text="Name : ",bg="Light blue", fg=mycolor)
e11.place(x=50,y=45)
e22=tk.Label(window1,text="Phone Number : ",bg="Light blue", fg=mycolor)
e22.place(x=270,y=45)
e1=tk.Entry(window1)
e1.place(x=100,y=45)
e2=tk.Entry(window1)
e2.place(x=380,y=45)
l1=tk.Label(window1,text="Item name",bg="Light blue", fg=mycolor)
l1.place(x=10, y=80)
l2=tk.Label(window1,text="Price",bg="Light blue", fg=mycolor)
l2.place(x=110, y=80)
l3=tk.Label(window1,text="Quantity",bg="Light blue", fg=mycolor)
l3.place(x=210, y=80)
l3=tk.Label(window1,text="Amount",bg="Light blue", fg=mycolor)
l3.place(x=310, y=80)
def store() :
global added_count
added_count=added_count+1
global e1,e2
usern=e1.get()
phno=e2.get()
x=entry1.get()
y=entry2.get()
z=entry3.get()
y=int(y)
z=int(z)
w=z*y
l4=tk.Label(window1,text=(str(w)+"Rs."),bg="Light blue", fg=mycolor)
l4.place(x=310,y=ind)
l5=tk.Label(window1,text="Added.",bg="Light blue", fg=mycolor)
l5.place(x=410,y=ind)
curr_user.append(usern)
curr_phone.append(phno)
curr_date.append(date)
curr_time.append(time)
curr_name.append(x)
curr_price.append(y)
curr_quantity.append(z)
curr_amount.append(w)
curr_cno.append(cno)
def newent() :
global newent_count
newent_count=newent_count+1
if(newent_count!=added_count+1 and newent_count!=0):
store()
global ind
ind=ind+20
global entry1,entry2,entry3
entry1=tk.Entry(window1)
entry1.place(x=10,y=ind)
entry = AutocompleteEntry(entry1)
test_list=list(set(pd.read_csv("./database.csv")['Name']))
if(np.nan in test_list):
test_list.remove(np.nan)
entry.set_completion_list(test_list)
entry.pack()
entry.focus_set()
entry2=tk.Entry(window1)
entry2.place(x=110,y=ind)
entry3=tk.Entry(window1)
entry3.place(x=210,y=ind)
button1=tk.Button(window1,text="Add",command=store,fg="White", bg=mycolor)
button1.place(x=400,y=430)
button1=tk.Button(window1,text="New item",command=newent, fg="White", bg=mycolor)
button1.place(x=400,y=400)
'''Below function requires changes for different users'''
def send_text() :
text="Thank you for shopping with us! Here's your bill: "
for i in range(len(curr_name)):
text+=str(curr_name[i])+" - Rs."+str(curr_amount[i])+"\n"
total_amount=0
for k in curr_amount :
total_amount=total_amount+k
text+="Total: "+str(total_amount)
from twilio.rest import Client
'''Create Twilio Account to get account_sid and auth_token'''
account_sid = 'Account_sid'
auth_token = 'Acc_Token'
client = Client(account_sid, auth_token)
'''from_ = 'whatsapp:+the number assigned by twilio','''
message = client.messages.create(
from_='whatsapp:+000000000',
body=text,
to='whatsapp:+91'+curr_phone[0]
)
print(message.sid)
def subm() :
global ind
overall_user.extend(curr_user)
overall_phone.extend(curr_phone)
overall_date.extend(curr_date)
overall_time.extend(curr_time)
overall_name.extend(curr_name)
overall_price.extend(curr_price)
overall_quantity.extend(curr_quantity)
overall_amount.extend(curr_amount)
overall_cno.extend(curr_cno)
df=pd.DataFrame({"UserName":overall_user,"Phone":overall_phone,"Date":overall_date,"Time":overall_time,"Name":overall_name,"Price":overall_price,"Quantity":overall_quantity,"Amount":overall_amount,"Customer No" : overall_cno })
df.to_csv("./database.csv",index=False)
ans=0
for k in curr_amount :
ans=ans+k
op=tk.Label(window1,text="Submission successful. Thank you for shopping! Click below button to print bill",bg="Light blue", fg=mycolor)
op.place(x=50,y=ind+50)
op1=tk.Label(window1,text=("Total amount : "+ str(ans) + "Rs."),bg="Light blue", fg=mycolor)
op1.place(x=50,y=ind+80)
button1=tk.Button(window1,text="Print Bill",command=print_bill, fg="White", bg=mycolor)
button1.place(x=0,y=400)
send_text()
button3=tk.Button(window1,text="Submit",command=subm, fg="White", bg=mycolor)
button3.place(x=400,y=460)
lg=[]
def recm() :
df_new=pd.read_csv("./database.csv")
for i in range(cno+1) :
lg=[]
for z in df_new.index :
if df_new.iloc[z][8]==i :
lg.append(df_new.iloc[z][4])
arrec.append(lg)
booldata=te.fit(arrec).transform(arrec)
dff_new=pd.DataFrame(booldata,columns=te.columns_)
freq_items=apriori(dff_new,min_support=0.05,use_colnames=True)
freq_items['Length']=freq_items['itemsets'].apply(lambda x: len(x))
recc=freq_items[(freq_items['Length']>=2) & (freq_items['support']>=0.02)]
op=(recc.iloc[:,1].to_string(index=False)).split('\n')
window_rec=tk.Tk()
window_rec.title("Recommendations")
window_rec.configure(background=mycolor)
window_rec.geometry('300x300')
for zz in op :
l1=tk.Label(window_rec,text=zz,fg="White", bg=mycolor)
l1.pack()
button4=tk.Button(window1,text="Recommend",command=recm,fg="White", bg=mycolor)
button4.place(x=400,y=490)
f=0
def det() :
w11=tk.Tk()
w11.title("Find Details")
w11.configure(background=mycolor)
w11.geometry('600x600')
l12=tk.Label(w11,text="Username",fg="White", bg=mycolor)
l12.place(x=100,y=50)
e12=tk.Entry(w11)
e12.place(x=160,y=50)
l22=tk.Label(w11,text="Phone",fg="White", bg=mycolor)
l22.place(x=100,y=80)
e22=tk.Entry(w11)
e22.place(x=160,y=80)
def det2() :
df_d=pd.read_csv("./database.csv")
global det_ind
zzz=e12.get()
yyy=e22.get()
laa1=tk.Label(w11,text="Date",fg="White", bg=mycolor)
laa2=tk.Label(w11,text="Time",fg="White", bg=mycolor)
laa3=tk.Label(w11,text="Product",fg="White", bg=mycolor)
laa4=tk.Label(w11,text="Price",fg="White", bg=mycolor)
laa5=tk.Label(w11,text="Quantity",fg="White", bg=mycolor)
laa6=tk.Label(w11,text="Amount",fg="White", bg=mycolor)
laa1.place(x=30,y=160)
laa2.place(x=100,y=160)
laa3.place(x=170,y=160)
laa4.place(x=240,y=160)
laa5.place(x=310,y=160)
laa6.place(x=380,y=160)
global f
for j in df_d.index :
if (df_d.iloc[j][0]==zzz) & (df_d.iloc[j][1]==int(yyy)) :
f=1
la1=tk.Label(w11,text=df_d.iloc[j][2],fg="White", bg=mycolor)
la2=tk.Label(w11,text=df_d.iloc[j][3],fg="White", bg=mycolor)
la3=tk.Label(w11,text=df_d.iloc[j][4],fg="White", bg=mycolor)
la4=tk.Label(w11,text=df_d.iloc[j][5],fg="White", bg=mycolor)
la5=tk.Label(w11,text=df_d.iloc[j][6],fg="White", bg=mycolor)
la6=tk.Label(w11,text=df_d.iloc[j][7],fg="White", bg=mycolor)
la1.place(x=30,y=det_ind)
la2.place(x=100,y=det_ind)
la3.place(x=170,y=det_ind)
la4.place(x=240,y=det_ind)
la5.place(x=310,y=det_ind)
la6.place(x=380,y=det_ind)
det_ind=det_ind+30
if f==0 :
la7=tk.Label(w11,text="Not Found!",bg="White", fg=mycolor)
la7.place(x=170,y=400)
button6=tk.Button(w11,text="Submit",command=det2,fg="White", bg=mycolor)
button6.place(x=170,y=115)
button5=tk.Button(window1,text="Find Customer Details",command=det,fg="White", bg=mycolor)
button5.place(x=400,y=520)
window1.mainloop() |
18,980 | b9d0db6db170e08ce2a8313cd78f2b4a76f87ded | import boto3
from datetime import datetime
import dateutil.tz
import json
import ast
BUILD_VERSION = '1.1.14'
AWS_REGION = 'us-east-1'
AWS_EMAIL_REGION = 'us-east-1'
SERVICE_ACCOUNT_NAME = 'IAM_USERNAME_TO_EXCLUDE_IF_ANY'
EMAIL_TO_ADMIN = 'receipient@example.com'
EMAIL_FROM = 'sender@example.com'
EMAIL_SEND_COMPLETION_REPORT = ast.literal_eval('False')
GROUP_LIST = "svc-accounts"
# Length of mask over the IAM Access Key
MASK_ACCESS_KEY_LENGTH = ast.literal_eval('16')
# First email warning
FIRST_WARNING_NUM_DAYS = 83
FIRST_WARNING_MESSAGE = 'key is due to expire in 1 week (7 days)'
# Last email warning
LAST_WARNING_NUM_DAYS = 89
LAST_WARNING_MESSAGE = 'key is due to expire in 1 day (tomorrow)'
# Max AGE days of key after which it is considered EXPIRED (deactivated)
KEY_MAX_AGE_IN_DAYS = 90
KEY_EXPIRED_MESSAGE = 'key is now EXPIRED! Changing key to INACTIVE state'
KEY_YOUNG_MESSAGE = 'key is still young'
# ==========================================================
# Character length of an IAM Access Key
ACCESS_KEY_LENGTH = 20
KEY_STATE_ACTIVE = "Active"
KEY_STATE_INACTIVE = "Inactive"
# ==========================================================
#check to see if the MASK_ACCESS_KEY_LENGTH has been misconfigured
if MASK_ACCESS_KEY_LENGTH > ACCESS_KEY_LENGTH:
MASK_ACCESS_KEY_LENGTH = 16
# ==========================================================
def tzutc():
return dateutil.tz.tzutc()
def key_age(key_created_date):
tz_info = key_created_date.tzinfo
age = datetime.now(tz_info) - key_created_date
print('key age %s' % age)
key_age_str = str(age)
if 'days' not in key_age_str:
return 0
days = int(key_age_str.split(',')[0].split(' ')[0])
return days
def send_deactivate_email(email_to, username, age, access_key_id):
client = boto3.client('ses', region_name=AWS_EMAIL_REGION)
data = 'The Access Key [%s] belonging to User [%s] has been automatically ' \
'deactivated due to it being %s days old' % (access_key_id, username, age)
response = client.send_email(
Source=EMAIL_FROM,
Destination={
'ToAddresses': [email_to]
},
Message={
'Subject': {
'Data': 'AWS IAM Access Key Rotation - Deactivation of Access Key: %s' % access_key_id
},
'Body': {
'Text': {
'Data': data
}
}
})
def send_completion_email(email_to, finished, deactivated_report):
client = boto3.client('ses', region_name=AWS_EMAIL_REGION)
data = 'AWS IAM Access Key Rotation Lambda Function (cron job) finished successfully at %s \n \n ' \
'Deactivation Report:\n%s' % (finished, json.dumps(deactivated_report, indent=4, sort_keys=True))
response = client.send_email(
Source=EMAIL_FROM,
Destination={
'ToAddresses': [email_to]
},
Message={
'Subject': {
'Data': 'AWS IAM Access Key Rotation - Lambda Function'
},
'Body': {
'Text': {
'Data': data
}
}
})
def mask_access_key(access_key):
return access_key[-(ACCESS_KEY_LENGTH-MASK_ACCESS_KEY_LENGTH):].rjust(len(access_key), "*")
def lambda_handler(event, context):
print('*****************************')
print('RotateAccessKey (%s): starting...' % BUILD_VERSION)
print('*****************************')
# Connect to AWS APIs
client = boto3.client('iam')
users = {}
data = client.list_users(MaxItems=999)
print(data)
userindex = 0
for user in data['Users']:
userid = user['UserId']
username = user['UserName']
users[userid] = username
users_report1 = []
users_report2 = []
for user in users:
userindex += 1
user_keys = []
print('---------------------')
print('userindex %s' % userindex)
print('user %s' % user)
username = users[user]
print('username %s' % username)
# test is a user belongs to a specific list of groups. If they do, do not invalidate the access key
print("Test if the user belongs to the exclusion group")
user_groups = client.list_groups_for_user(UserName=username)
skip = False
for groupName in user_groups['Groups']:
if groupName['GroupName'] == GROUP_LIST:
print('Detected that user belongs to ', GROUP_LIST)
skip = True
continue
if skip:
print("Do invalidate Access Key")
continue
# check to see if the current user is a special service account
if username == SERVICE_ACCOUNT_NAME:
print('detected special service account %s, skipping account...', username)
continue
access_keys = client.list_access_keys(UserName=username)['AccessKeyMetadata']
for access_key in access_keys:
print(access_key)
access_key_id = access_key['AccessKeyId']
masked_access_key_id = mask_access_key(access_key_id)
print('AccessKeyId %s' % masked_access_key_id)
existing_key_status = access_key['Status']
print(existing_key_status)
key_created_date = access_key['CreateDate']
print('key_created_date %s' % key_created_date)
age = key_age(key_created_date)
print('age %s' % age)
# we only need to examine the currently Active and about to expire keys
if existing_key_status == "Inactive":
key_state = 'key is already in an INACTIVE state'
key_info = {'accesskeyid': masked_access_key_id, 'age': age, 'state': key_state, 'changed': False}
user_keys.append(key_info)
continue
key_state = ''
key_state_changed = False
if age < FIRST_WARNING_NUM_DAYS:
key_state = KEY_YOUNG_MESSAGE
elif age == FIRST_WARNING_NUM_DAYS:
key_state = FIRST_WARNING_MESSAGE
elif age == LAST_WARNING_NUM_DAYS:
key_state = LAST_WARNING_MESSAGE
elif age >= KEY_MAX_AGE_IN_DAYS:
key_state = KEY_EXPIRED_MESSAGE
client.update_access_key(UserName=username, AccessKeyId=access_key_id, Status=KEY_STATE_INACTIVE)
#send_deactivate_email(EMAIL_TO_ADMIN, username, age, masked_access_key_id)
key_state_changed = True
print('key_state %s' % key_state)
key_info = {'accesskeyid': masked_access_key_id, 'age': age, 'state': key_state, 'changed': key_state_changed}
user_keys.append(key_info)
user_info_with_username = {'userid': userindex, 'username': username, 'keys': user_keys}
user_info_without_username = {'userid': userindex, 'keys': user_keys}
users_report1.append(user_info_with_username)
users_report2.append(user_info_without_username)
finished = str(datetime.now())
deactivated_report1 = {'reportdate': finished, 'users': users_report1}
print('deactivated_report1 %s ' % deactivated_report1)
if EMAIL_SEND_COMPLETION_REPORT:
deactivated_report2 = {'reportdate': finished, 'users': users_report2}
send_completion_email(EMAIL_TO_ADMIN, finished, deactivated_report2)
print('*****************************')
print('Completed (%s): %s' % (BUILD_VERSION, finished))
print('*****************************')
return deactivated_report1
#if __name__ == "__main__":
# event = 1
# context = 1
# lambda_handler(event, context)
|
18,981 | 1c1a8a0e2baf1643df97a3c3d36ce2bf95c3c461 | from sqlalchemy.orm.exc import NoResultFound
from users import models as user_models
def groupfinder(userid, request):
try:
user = request.rel_db_session.query(user_models.User).filter_by(id=userid).one()
except NoResultFound:
return []
return [user.group.name] |
18,982 | 2aaf766d0a011a893432dd2fa02addfdc0ab3075 | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DataSource:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'datasource_name': 'str',
'datasource_type': 'str',
'datasource_guid': 'str',
'datasource_qualified_name': 'str',
'obs_folder_count': 'int',
'obs_file_count': 'int',
'css_index_count': 'int',
'css_index_field_count': 'int',
'namespace_count': 'int',
'ges_vertex_count': 'int',
'ges_edge_count': 'int',
'database_count': 'int',
'stream_count': 'int',
'table_count': 'int',
'data_size': 'int',
'databases': 'list[Database]',
'folders': 'list[ObsFolder]',
'css_indices': 'list[CssIndex]',
'namespaces': 'list[Namespace]',
'dis_streams': 'list[DisStream]'
}
attribute_map = {
'datasource_name': 'datasource_name',
'datasource_type': 'datasource_type',
'datasource_guid': 'datasource_guid',
'datasource_qualified_name': 'datasource_qualified_name',
'obs_folder_count': 'obs_folder_count',
'obs_file_count': 'obs_file_count',
'css_index_count': 'css_index_count',
'css_index_field_count': 'css_index_field_count',
'namespace_count': 'namespace_count',
'ges_vertex_count': 'ges_vertex_count',
'ges_edge_count': 'ges_edge_count',
'database_count': 'database_count',
'stream_count': 'stream_count',
'table_count': 'table_count',
'data_size': 'data_size',
'databases': 'databases',
'folders': 'folders',
'css_indices': 'css_indices',
'namespaces': 'namespaces',
'dis_streams': 'dis_streams'
}
def __init__(self, datasource_name=None, datasource_type=None, datasource_guid=None, datasource_qualified_name=None, obs_folder_count=None, obs_file_count=None, css_index_count=None, css_index_field_count=None, namespace_count=None, ges_vertex_count=None, ges_edge_count=None, database_count=None, stream_count=None, table_count=None, data_size=None, databases=None, folders=None, css_indices=None, namespaces=None, dis_streams=None):
"""DataSource
The model defined in huaweicloud sdk
:param datasource_name: 数据连接名称
:type datasource_name: str
:param datasource_type: 数据连接类型
:type datasource_type: str
:param datasource_guid: 数据连接guid
:type datasource_guid: str
:param datasource_qualified_name: 数据连接唯一标识名称
:type datasource_qualified_name: str
:param obs_folder_count: obs目录数
:type obs_folder_count: int
:param obs_file_count: obs文件数
:type obs_file_count: int
:param css_index_count: css索引数
:type css_index_count: int
:param css_index_field_count: css 索引字段数目
:type css_index_field_count: int
:param namespace_count: 命名空间数
:type namespace_count: int
:param ges_vertex_count: ges点的总数
:type ges_vertex_count: int
:param ges_edge_count: ges边的总数
:type ges_edge_count: int
:param database_count: 数据库总数
:type database_count: int
:param stream_count: 通道总数
:type stream_count: int
:param table_count: 表总数
:type table_count: int
:param data_size: 数据大小
:type data_size: int
:param databases: 数据库统计信息
:type databases: list[:class:`huaweicloudsdkdataartsstudio.v1.Database`]
:param folders: 顶层目录统计信息
:type folders: list[:class:`huaweicloudsdkdataartsstudio.v1.ObsFolder`]
:param css_indices: css索引统计信息
:type css_indices: list[:class:`huaweicloudsdkdataartsstudio.v1.CssIndex`]
:param namespaces: 命名空间统计信息
:type namespaces: list[:class:`huaweicloudsdkdataartsstudio.v1.Namespace`]
:param dis_streams: 通道统计信息
:type dis_streams: list[:class:`huaweicloudsdkdataartsstudio.v1.DisStream`]
"""
self._datasource_name = None
self._datasource_type = None
self._datasource_guid = None
self._datasource_qualified_name = None
self._obs_folder_count = None
self._obs_file_count = None
self._css_index_count = None
self._css_index_field_count = None
self._namespace_count = None
self._ges_vertex_count = None
self._ges_edge_count = None
self._database_count = None
self._stream_count = None
self._table_count = None
self._data_size = None
self._databases = None
self._folders = None
self._css_indices = None
self._namespaces = None
self._dis_streams = None
self.discriminator = None
if datasource_name is not None:
self.datasource_name = datasource_name
if datasource_type is not None:
self.datasource_type = datasource_type
if datasource_guid is not None:
self.datasource_guid = datasource_guid
if datasource_qualified_name is not None:
self.datasource_qualified_name = datasource_qualified_name
if obs_folder_count is not None:
self.obs_folder_count = obs_folder_count
if obs_file_count is not None:
self.obs_file_count = obs_file_count
if css_index_count is not None:
self.css_index_count = css_index_count
if css_index_field_count is not None:
self.css_index_field_count = css_index_field_count
if namespace_count is not None:
self.namespace_count = namespace_count
if ges_vertex_count is not None:
self.ges_vertex_count = ges_vertex_count
if ges_edge_count is not None:
self.ges_edge_count = ges_edge_count
if database_count is not None:
self.database_count = database_count
if stream_count is not None:
self.stream_count = stream_count
if table_count is not None:
self.table_count = table_count
if data_size is not None:
self.data_size = data_size
if databases is not None:
self.databases = databases
if folders is not None:
self.folders = folders
if css_indices is not None:
self.css_indices = css_indices
if namespaces is not None:
self.namespaces = namespaces
if dis_streams is not None:
self.dis_streams = dis_streams
@property
def datasource_name(self):
"""Gets the datasource_name of this DataSource.
数据连接名称
:return: The datasource_name of this DataSource.
:rtype: str
"""
return self._datasource_name
@datasource_name.setter
def datasource_name(self, datasource_name):
"""Sets the datasource_name of this DataSource.
数据连接名称
:param datasource_name: The datasource_name of this DataSource.
:type datasource_name: str
"""
self._datasource_name = datasource_name
@property
def datasource_type(self):
"""Gets the datasource_type of this DataSource.
数据连接类型
:return: The datasource_type of this DataSource.
:rtype: str
"""
return self._datasource_type
@datasource_type.setter
def datasource_type(self, datasource_type):
"""Sets the datasource_type of this DataSource.
数据连接类型
:param datasource_type: The datasource_type of this DataSource.
:type datasource_type: str
"""
self._datasource_type = datasource_type
@property
def datasource_guid(self):
"""Gets the datasource_guid of this DataSource.
数据连接guid
:return: The datasource_guid of this DataSource.
:rtype: str
"""
return self._datasource_guid
@datasource_guid.setter
def datasource_guid(self, datasource_guid):
"""Sets the datasource_guid of this DataSource.
数据连接guid
:param datasource_guid: The datasource_guid of this DataSource.
:type datasource_guid: str
"""
self._datasource_guid = datasource_guid
@property
def datasource_qualified_name(self):
"""Gets the datasource_qualified_name of this DataSource.
数据连接唯一标识名称
:return: The datasource_qualified_name of this DataSource.
:rtype: str
"""
return self._datasource_qualified_name
@datasource_qualified_name.setter
def datasource_qualified_name(self, datasource_qualified_name):
"""Sets the datasource_qualified_name of this DataSource.
数据连接唯一标识名称
:param datasource_qualified_name: The datasource_qualified_name of this DataSource.
:type datasource_qualified_name: str
"""
self._datasource_qualified_name = datasource_qualified_name
@property
def obs_folder_count(self):
"""Gets the obs_folder_count of this DataSource.
obs目录数
:return: The obs_folder_count of this DataSource.
:rtype: int
"""
return self._obs_folder_count
@obs_folder_count.setter
def obs_folder_count(self, obs_folder_count):
"""Sets the obs_folder_count of this DataSource.
obs目录数
:param obs_folder_count: The obs_folder_count of this DataSource.
:type obs_folder_count: int
"""
self._obs_folder_count = obs_folder_count
@property
def obs_file_count(self):
"""Gets the obs_file_count of this DataSource.
obs文件数
:return: The obs_file_count of this DataSource.
:rtype: int
"""
return self._obs_file_count
@obs_file_count.setter
def obs_file_count(self, obs_file_count):
"""Sets the obs_file_count of this DataSource.
obs文件数
:param obs_file_count: The obs_file_count of this DataSource.
:type obs_file_count: int
"""
self._obs_file_count = obs_file_count
@property
def css_index_count(self):
"""Gets the css_index_count of this DataSource.
css索引数
:return: The css_index_count of this DataSource.
:rtype: int
"""
return self._css_index_count
@css_index_count.setter
def css_index_count(self, css_index_count):
"""Sets the css_index_count of this DataSource.
css索引数
:param css_index_count: The css_index_count of this DataSource.
:type css_index_count: int
"""
self._css_index_count = css_index_count
@property
def css_index_field_count(self):
"""Gets the css_index_field_count of this DataSource.
css 索引字段数目
:return: The css_index_field_count of this DataSource.
:rtype: int
"""
return self._css_index_field_count
@css_index_field_count.setter
def css_index_field_count(self, css_index_field_count):
"""Sets the css_index_field_count of this DataSource.
css 索引字段数目
:param css_index_field_count: The css_index_field_count of this DataSource.
:type css_index_field_count: int
"""
self._css_index_field_count = css_index_field_count
@property
def namespace_count(self):
"""Gets the namespace_count of this DataSource.
命名空间数
:return: The namespace_count of this DataSource.
:rtype: int
"""
return self._namespace_count
@namespace_count.setter
def namespace_count(self, namespace_count):
"""Sets the namespace_count of this DataSource.
命名空间数
:param namespace_count: The namespace_count of this DataSource.
:type namespace_count: int
"""
self._namespace_count = namespace_count
@property
def ges_vertex_count(self):
"""Gets the ges_vertex_count of this DataSource.
ges点的总数
:return: The ges_vertex_count of this DataSource.
:rtype: int
"""
return self._ges_vertex_count
@ges_vertex_count.setter
def ges_vertex_count(self, ges_vertex_count):
"""Sets the ges_vertex_count of this DataSource.
ges点的总数
:param ges_vertex_count: The ges_vertex_count of this DataSource.
:type ges_vertex_count: int
"""
self._ges_vertex_count = ges_vertex_count
@property
def ges_edge_count(self):
"""Gets the ges_edge_count of this DataSource.
ges边的总数
:return: The ges_edge_count of this DataSource.
:rtype: int
"""
return self._ges_edge_count
@ges_edge_count.setter
def ges_edge_count(self, ges_edge_count):
"""Sets the ges_edge_count of this DataSource.
ges边的总数
:param ges_edge_count: The ges_edge_count of this DataSource.
:type ges_edge_count: int
"""
self._ges_edge_count = ges_edge_count
@property
def database_count(self):
"""Gets the database_count of this DataSource.
数据库总数
:return: The database_count of this DataSource.
:rtype: int
"""
return self._database_count
@database_count.setter
def database_count(self, database_count):
"""Sets the database_count of this DataSource.
数据库总数
:param database_count: The database_count of this DataSource.
:type database_count: int
"""
self._database_count = database_count
@property
def stream_count(self):
"""Gets the stream_count of this DataSource.
通道总数
:return: The stream_count of this DataSource.
:rtype: int
"""
return self._stream_count
@stream_count.setter
def stream_count(self, stream_count):
"""Sets the stream_count of this DataSource.
通道总数
:param stream_count: The stream_count of this DataSource.
:type stream_count: int
"""
self._stream_count = stream_count
@property
def table_count(self):
"""Gets the table_count of this DataSource.
表总数
:return: The table_count of this DataSource.
:rtype: int
"""
return self._table_count
@table_count.setter
def table_count(self, table_count):
"""Sets the table_count of this DataSource.
表总数
:param table_count: The table_count of this DataSource.
:type table_count: int
"""
self._table_count = table_count
@property
def data_size(self):
"""Gets the data_size of this DataSource.
数据大小
:return: The data_size of this DataSource.
:rtype: int
"""
return self._data_size
@data_size.setter
def data_size(self, data_size):
"""Sets the data_size of this DataSource.
数据大小
:param data_size: The data_size of this DataSource.
:type data_size: int
"""
self._data_size = data_size
@property
def databases(self):
"""Gets the databases of this DataSource.
数据库统计信息
:return: The databases of this DataSource.
:rtype: list[:class:`huaweicloudsdkdataartsstudio.v1.Database`]
"""
return self._databases
@databases.setter
def databases(self, databases):
"""Sets the databases of this DataSource.
数据库统计信息
:param databases: The databases of this DataSource.
:type databases: list[:class:`huaweicloudsdkdataartsstudio.v1.Database`]
"""
self._databases = databases
@property
def folders(self):
"""Gets the folders of this DataSource.
顶层目录统计信息
:return: The folders of this DataSource.
:rtype: list[:class:`huaweicloudsdkdataartsstudio.v1.ObsFolder`]
"""
return self._folders
@folders.setter
def folders(self, folders):
"""Sets the folders of this DataSource.
顶层目录统计信息
:param folders: The folders of this DataSource.
:type folders: list[:class:`huaweicloudsdkdataartsstudio.v1.ObsFolder`]
"""
self._folders = folders
@property
def css_indices(self):
"""Gets the css_indices of this DataSource.
css索引统计信息
:return: The css_indices of this DataSource.
:rtype: list[:class:`huaweicloudsdkdataartsstudio.v1.CssIndex`]
"""
return self._css_indices
@css_indices.setter
def css_indices(self, css_indices):
"""Sets the css_indices of this DataSource.
css索引统计信息
:param css_indices: The css_indices of this DataSource.
:type css_indices: list[:class:`huaweicloudsdkdataartsstudio.v1.CssIndex`]
"""
self._css_indices = css_indices
@property
def namespaces(self):
"""Gets the namespaces of this DataSource.
命名空间统计信息
:return: The namespaces of this DataSource.
:rtype: list[:class:`huaweicloudsdkdataartsstudio.v1.Namespace`]
"""
return self._namespaces
@namespaces.setter
def namespaces(self, namespaces):
"""Sets the namespaces of this DataSource.
命名空间统计信息
:param namespaces: The namespaces of this DataSource.
:type namespaces: list[:class:`huaweicloudsdkdataartsstudio.v1.Namespace`]
"""
self._namespaces = namespaces
@property
def dis_streams(self):
"""Gets the dis_streams of this DataSource.
通道统计信息
:return: The dis_streams of this DataSource.
:rtype: list[:class:`huaweicloudsdkdataartsstudio.v1.DisStream`]
"""
return self._dis_streams
@dis_streams.setter
def dis_streams(self, dis_streams):
"""Sets the dis_streams of this DataSource.
通道统计信息
:param dis_streams: The dis_streams of this DataSource.
:type dis_streams: list[:class:`huaweicloudsdkdataartsstudio.v1.DisStream`]
"""
self._dis_streams = dis_streams
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DataSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
18,983 | f1bcc3273b364f7a1493c47cdc20a403cb79006e | from appium import webdriver
from selenium import webdriver
def get_driver():
driver = webdriver.Firefox()
driver.get("http://www.tpshop.com/Home/user/login.html")
return driver
|
18,984 | 48dbbf722cf10fc860789f04a420a28de3e06187 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# Based on cython/tests/run/enumerate_T316.pyx
from numba import *
@autojit
def go_py_enumerate():
"""
>>> go_py_enumerate()
0 1
1 2
2 3
3 4
"""
for i,k in enumerate(range(1,5)):
print(i, k)
@autojit
def py_enumerate_list_index_target():
"""
>>> py_enumerate_list_index_target()
[0] 1
[1] 2
[2] 3
[3] 4
"""
target = [None]
for target[0],k in enumerate(range(1,5)):
print(target, k)
@autojit
def go_py_enumerate_start():
"""
>>> go_py_enumerate_start()
5 1
6 2
7 3
8 4
"""
for i,k in enumerate(list(range(1,5)), 5):
print(i, k)
@autojit
def go_c_enumerate():
"""
>>> go_c_enumerate()
0 1
1 2
2 3
3 4
"""
for i,k in enumerate(range(1,5)):
print(i, k)
@autojit
def go_c_enumerate_step():
"""
>>> go_c_enumerate_step()
0 1
1 3
2 5
"""
for i,k in enumerate(range(1,7,2)):
print(i, k)
# @autojit # TODO:
def py_enumerate_dict(d):
"""
>>> py_enumerate_dict({})
:: 55 99
>>> py_enumerate_dict(dict(a=1, b=2, c=3))
0 True
1 True
2 True
:: 2 True
"""
i = 55
k = 99
keys = list(d.keys())
for i,k in enumerate(d):
k = keys[i] == k
print(i, k)
print("::", i, k)
@autojit
def py_enumerate_break(t):
"""
>>> py_enumerate_break([1,2,3,4])
0 1
:: 0 1
"""
i,k = 55,99
for i,k in enumerate(t):
print(i, k)
break
print("::", i, k)
@autojit
def py_enumerate_return(t):
"""
>>> py_enumerate_return([])
:: 55 99
>>> py_enumerate_return([1,2,3,4])
0 1
"""
i,k = 55,99
for i,k in enumerate(t):
print(i, k)
return
print("::", i, k)
@autojit
def py_enumerate_continue(t):
"""
>>> py_enumerate_continue([1,2,3,4])
0 1
1 2
2 3
3 4
:: 3 4
"""
i,k = 55,99
for i,k in enumerate(t):
print(i, k)
continue
print("::", i, k)
@autojit
def empty_c_enumerate():
"""
>>> empty_c_enumerate()
(55, 99)
"""
i,k = 55,99
for i,k in enumerate(range(0)):
print(i, k)
return i, k
# Not supported (yet)
# @autojit
# def single_target_enumerate():
# """
# >>> single_target_enumerate()
# 0 1
# 1 2
# 2 3
# 3 4
# """
# for t in enumerate(range(1,5)):
# print(t[0], t[1])
# @autojit # TODO:
def multi_enumerate():
"""
>>> multi_enumerate()
0 0 0 1
1 1 1 2
2 2 2 3
3 3 3 4
"""
for a,(b,(c,d)) in enumerate(enumerate(enumerate(range(1,5)))):
print(a,b,c,d)
# @autojit # TODO:
def multi_enumerate_start():
"""
>>> multi_enumerate_start()
0 2 0 1
1 3 1 2
2 4 2 3
3 5 3 4
"""
for a,(b,(c,d)) in enumerate(enumerate(enumerate(range(1,5)), 2)):
print(a,b,c,d)
# @autojit # TODO:
def multi_c_enumerate():
"""
>>> multi_c_enumerate()
0 0 0 1
1 1 1 2
2 2 2 3
3 3 3 4
"""
for a,(b,(c,d)) in enumerate(enumerate(enumerate(range(1,5)))):
print(a,b,c,d)
@autojit
def convert_target_enumerate(L):
"""
>>> convert_target_enumerate([2,3,5])
0 2
1 3
2 5
"""
for a, b in enumerate(L):
print(a,b)
@autojit
def convert_target_enumerate_start(L, n):
"""
>>> convert_target_enumerate_start([2,3,5], 3)
3 2
4 3
5 5
"""
for a, b in enumerate(L, n):
print(a,b)
if __name__ == '__main__':
import numba
numba.testing.testmod()
|
18,985 | b8c55549f527866c1af22060d7d3cfc7a018be89 | # Generated by Django 2.2.8 on 2020-02-02 11:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scoreboard', '0004_auto_20200202_0921'),
]
operations = [
migrations.AlterModelOptions(
name='groupscoreboard',
options={'base_manager_name': 'objects'},
),
migrations.RemoveField(
model_name='groupscoreboard',
name='id',
),
migrations.AddField(
model_name='groupscoreboard',
name='scoreboard_ptr',
field=models.OneToOneField(auto_created=True, default=None, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='scoreboard.ScoreBoard'),
preserve_default=False,
),
]
|
18,986 | 681bc8e7284d96bffb7c38880bdaf3c71fd26118 | # -*- coding: utf-8 -*-
locations = ["kuzey", "doğu", "batı", "güney", "kuzeybatı", "kuzeydoğu", "güneybatı", "güneydoğu", "anadolu"]
cities = ['Adana', 'Adıyaman', 'Afyon', 'Ağrı', 'Amasya', 'Ankara', 'Antalya', 'Artvin',
'Aydın', 'Balıkesir', 'Bilecik', 'Bingöl', 'Bitlis', 'Bolu', 'Burdur', 'Bursa', 'Çanakkale',
'Çankırı', 'Çorum', 'Denizli', 'Diyarbakır', 'Edirne', 'Elazığ', 'Erzincan', 'Erzurum', 'Eskişehir',
'Gaziantep', 'Giresun', 'Gümüşhane', 'Hakkari', 'Hatay', 'Isparta', 'Mersin', 'İstanbul', 'İzmir',
'Kars', 'Kastamonu', 'Kayseri', 'Kırklareli', 'Kırşehir', 'Kocaeli', 'Konya', 'Kütahya', 'Malatya',
'Manisa', 'Kahramanmaraş', 'Mardin', 'Muğla', 'Muş', 'Nevşehir', 'Niğde', 'Ordu', 'Rize', 'Sakarya',
'Samsun', 'Siirt', 'Sinop', 'Sivas', 'Tekirdağ', 'Tokat', 'Trabzon', 'Tunceli', 'Şanlıurfa', 'Uşak',
'Van', 'Yozgat', 'Zonguldak', 'Aksaray', 'Bayburt', 'Karaman', 'Kırıkkale', 'Batman', 'Şırnak',
'Bartın', 'Ardahan', 'Iğdır', 'Yalova', 'Karabük', 'Kilis', 'Osmaniye', 'Düzce', "Ayvalık",
"Cunda"]
countries = [ 'Türkiye', 'ABD Virgin Adaları', 'Afganistan', 'Aland Adaları', 'Almanya', 'Amerika Birleşik Devletleri',
'ABD',
'Amerika Birleşik Devletleri Küçük Dış Adaları', 'Amerikan Samoası', 'Andora', 'Angola', 'Anguilla',
'Antarktika', 'Antigua ve Barbuda', 'Arjantin', 'Arnavutluk', 'Aruba', 'Avrupa Birliği', 'Avustralya',
'Avusturya', 'Azerbaycan', 'Bahamalar', 'Bahreyn', 'Bangladeş', 'Barbados', 'Batı Sahara', 'Belize',
'Belçika', 'Benin', 'Bermuda', 'Beyaz Rusya', 'Bhutan', 'Bilinmeyen veya Geçersiz Bölge',
'Birleşik Arap Emirlikleri', 'Birleşik Krallık', 'Bolivya', 'Bosna Hersek', 'Botsvana', 'Bouvet Adası',
'Brezilya', 'Brunei', 'Bulgaristan', 'Burkina Faso', 'Burundi', 'Cape Verde', 'Cebelitarık', 'Cezayir',
'Christmas Adası', 'Cibuti', 'Cocos Adaları', 'Cook Adaları', 'Çad', 'Çek Cumhuriyeti', 'Çin',
'Danimarka', 'Dominik', 'Dominik Cumhuriyeti', 'Doğu Timor', 'Ekvator', 'Ekvator Ginesi', 'El Salvador',
'Endonezya', 'Eritre', 'Ermenistan', 'Estonya', 'Etiyopya', 'Falkland Adaları (Malvinalar)',
'Faroe Adaları', 'Fas', 'Fiji', 'Fildişi Sahilleri', 'Filipinler', 'Filistin Bölgesi', 'Finlandiya',
'Fransa', 'Fransız Guyanası', 'Fransız Güney Bölgeleri', 'Fransız Polinezyası', 'Gabon', 'Gambia',
'Gana', 'Gine', 'Gine-Bissau', 'Granada', 'Grönland', 'Guadeloupe', 'Guam', 'Guatemala', 'Guernsey',
'Guyana', 'Güney Afrika', 'Güney Georgia ve Güney Sandwich Adaları', 'Güney Kore',
'Güney Kıbrıs Rum Kesimi', 'Gürcistan', 'Haiti', 'Heard Adası ve McDonald Adaları', 'Hindistan',
'Hint Okyanusu İngiliz Bölgesi', 'Hollanda', 'Hollanda Antilleri', 'Honduras', 'Hong Kong SAR - Çin',
'Hırvatistan', 'Irak', 'İngiliz Virgin Adaları', 'İran', 'İrlanda', 'İspanya', 'İsrail', 'İsveç',
'İsviçre', 'İtalya', 'İzlanda', 'Jamaika', 'Japonya', 'Jersey', 'Kamboçya', 'Kamerun', 'Kanada',
'Karadağ', 'Katar', 'Kayman Adaları', 'Kazakistan', 'Kenya', 'Kiribati', 'Kolombiya', 'Komorlar',
'Kongo', 'Kongo Demokratik Cumhuriyeti', 'Kosta Rika', 'Kuveyt', 'Kuzey Kore', 'Kuzey Mariana Adaları',
'Küba', 'Kırgızistan', 'Laos', 'Lesotho', 'Letonya', 'Liberya', 'Libya', 'Liechtenstein', 'Litvanya',
'Lübnan', 'Lüksemburg', 'Macaristan', 'Madagaskar', 'Makao S.A.R. Çin', 'Makedonya', 'Malavi',
'Maldivler', 'Malezya', 'Mali', 'Malta', 'Man Adası', 'Marshall Adaları', 'Martinik', 'Mauritius',
'Mayotte', 'Meksika', 'Mikronezya Federal Eyaletleri', 'Moldovya Cumhuriyeti', 'Monako', 'Montserrat',
'Moritanya', 'Mozambik', 'Moğolistan', 'Myanmar', 'Mısır', 'Namibya', 'Nauru', 'Nepal', 'Nijer',
'Nijerya', 'Nikaragua', 'Niue', 'Norfolk Adası', 'Norveç', 'Orta Afrika Cumhuriyeti', 'Özbekistan',
'Pakistan', 'Palau', 'Panama', 'Papua Yeni Gine', 'Paraguay', 'Peru', 'Pitcairn', 'Polonya',
'Portekiz', 'Porto Riko', 'Reunion', 'Romanya', 'Ruanda', 'Rusya Federasyonu', 'Saint Helena',
'Saint Kitts ve Nevis', 'Saint Lucia', 'Saint Pierre ve Miquelon', 'Saint Vincent ve Grenadinler',
'Samoa', 'San Marino', 'Sao Tome ve Principe', 'Senegal', 'Seyşeller', 'Sierra Leone', 'Singapur',
'Slovakya', 'Slovenya', 'Solomon Adaları', 'Somali', 'Sri Lanka', 'Sudan', 'Surinam', 'Suriye',
'Suudi Arabistan', 'Svalbard ve Jan Mayen', 'Svaziland', 'Sırbistan', 'Sırbistan-Karadağ', 'Şili',
'Tacikistan', 'Tanzanya', 'Tayland', 'Tayvan', 'Togo', 'Tokelau', 'Tonga', 'Trinidad ve Tobago',
'Tunus', 'Turks ve Caicos Adaları', 'Tuvalu', 'Türkmenistan', 'Uganda', 'Ukrayna', 'Umman', 'Uruguay',
'Uzak Okyanusya', 'Ürdün', 'Vanuatu', 'Vatikan', 'Venezuela', 'Vietnam', 'Wallis ve Futuna', 'Yemen',
'Yeni Kaledonya', 'Yeni Zelanda', 'Yunanistan', 'Zambiya', 'Zimbabve']
continents = ["Asya", "Kuzey Amerika", "Güney Amerika", "Amerika", "Afrika", "Antartika", "Okyanusya", "Avrupa",
"Avustralya"]
capitals = ["Andorra la Vell" ,
"Kabul" ,
"St. John's" ,
"Tirana" ,
"Yerevan" ,
"Luanda" ,
"Buenos Aires" ,
"Vienna" ,
"Canberra" ,
"Baku" ,
"Bridgetown" ,
"Dhaka" ,
"Brussels" ,
"Ouagadougou" ,
"Sofia" ,
"Manama" ,
"Bujumbura" ,
"Porto-Novo" ,
"Bandar Seri Beg" ,
"Sucre" ,
"Bras" ,
"Nassau" ,
"Thimphu" ,
"Gaborone" ,
"Minsk" ,
"Belmopan" ,
"Ottawa" ,
"Kinshasa" ,
"Brazzaville" ,
"Yamoussoukro" ,
"Santiago" ,
"Yaound" ,
"Beijing" ,
"Bogot" ,
"San Jos" ,
"Havana" ,
"Praia" ,
"Nicosia" ,
"Prague" ,
"Berlin" ,
"Djibouti City" ,
"Copenhagen" ,
"Roseau" ,
"Santo Domingo" ,
"Quito" ,
"Tallinn" ,
"Cairo" ,
"Asmara" ,
"Addis Ababa" ,
"Helsinki" ,
"Suva" ,
"Paris" ,
"Libreville" ,
"Tbilisi" ,
"Accra" ,
"Banjul" ,
"Conakry" ,
"Athens" ,
"Guatemala City" ,
"Port-au-Prince" ,
"Bissau" ,
"Georgetown" ,
"Tegucigalpa" ,
"Budapest" ,
"Jakarta" ,
"Dublin" ,
"Jerusalem" ,
"New Delhi" ,
"Baghdad" ,
"Tehran" ,
"Reykjav" ,
"Rome" ,
"Kingston" ,
"Amman" ,
"Tokyo" ,
"Nairobi" ,
"Bishkek" ,
"Tarawa" ,
"Pyongyang" ,
"Seoul" ,
"Kuwait City" ,
"Beirut" ,
"Vaduz" ,
"Monrovia" ,
"Maseru" ,
"Vilnius" ,
"Luxembourg City" ,
"Riga" ,
"Tripoli" ,
"Antananarivo" ,
"Majuro" ,
"Skopje" ,
"Bamako" ,
"Naypyidaw" ,
"Ulaanbaatar" ,
"Nouakchott" ,
"Valletta" ,
"Port Louis" ,
"Malé" ,
"Lilongwe" ,
"Mexico City" ,
"Kuala Lumpur" ,
"Maputo" ,
"Windhoek" ,
"Niamey" ,
"Abuja" ,
"Managua" ,
"Amsterdam" ,
"Oslo" ,
"Kathmandu" ,
"Yaren" ,
"Wellington" ,
"Muscat" ,
"Panama City" ,
"Lima" ,
"Port Moresby" ,
"Manila" ,
"Islamabad" ,
"Warsaw" ,
"Lisbon" ,
"Ngerulmud" ,
"Asunci" ,
"Doha" ,
"Bucharest" ,
"Moscow" ,
"Kigali" ,
"Riyadh" ,
"Honiara" ,
"Victoria" ,
"Khartoum" ,
"Stockholm" ,
"Singapore" ,
"Ljubljana" ,
"Bratislava" ,
"Freetown" ,
"San Marino" ,
"Dakar" ,
"Mogadishu" ,
"Paramaribo" ,
"Damascus" ,
"Lom" ,
"Bangkok" ,
"Dushanbe" ,
"Ashgabat" ,
"Tunis" ,
"Nuku" ,
"Ankara" ,
"Port of Spain" ,
"Funafuti" ,
"Dodoma" ,
"Kiev" ,
"Kampala" ,
"Washington, D.C" ,
"Montevideo" ,
"Tashkent" ,
"Vatican City" ,
"Caracas" ,
"Hanoi" ,
"Port Vila" ,
"Sana'a" ,
"Lusaka" ,
"Harare" ,
"Algiers" ,
"Sarajevo" ,
"Phnom Penh" ,
"Bangui" ,
"N'Djamena" ,
"Moroni" ,
"Zagreb" ,
"Dili" ,
"San Salvador" ,
"Malabo" ,
"St. George's" ,
"Astana" ,
"Vientiane" ,
"Palikir" ,
"Chi" ,
"Monaco" ,
"Podgorica" ,
"Rabat" ,
"Basseterre" ,
"Castries" ,
"Kingstown" ,
"Apia" ,
"Belgrade" ,
"Pretoria" ,
"Madrid" ,
"Sri Jayewardene" ,
"Mbabane" ,
"Bern" ,
"Abu Dhabi" ,
"London" ,
"New York",
"Hollywood",
"Visconsin",
"Michigan"
]
|
18,987 | 2a51acb0c6f29cd8f07e810e5e3559945ce082b9 | test_list_1 = ['100', '200', '300', '200', '100']
print(test_list_1.index('200'))
|
18,988 | e35788dfea52fa58e55ea8497344cebaa433955f | __author__ = 'MrHowe'
def person(name, age, **kw):
if 'city' in kw:
kw['city']='Shanghai'
print('name:', name, 'age:', age, 'other:', kw)
person('Bob', 35, city='Beijing')
def fun(a,b,*,c,*f):
print(a,b,c,f)
|
18,989 | 502929c0cb1100a0ea2909ba8163d00fb8cc2b60 | import os, machine, display, easydraw, time, neopixel
def configureWakeupSource():
machine.RTC().wake_on_ext0(pin = machine.Pin(39), level = 0) # pca9555 interrupt
return True
def prepareForSleep():
try:
os.umountsd()
except:
pass
neopixel.send(bytes([0]*24)) # Turn off LEDs
configureWakeupSource()
def prepareForWakeup():
time.sleep(0.05) # Give the SD card time to initialize itself
os.mountsd()
def showLoadingScreen(app=""):
try:
display.drawFill(0x000000)
display.drawText( 0, 28, "LOADING APP...", 0xFFFFFF, "org18")
display.drawText( 0, 52, app, 0xFFFFFF, "org18")
display.flush()
except:
pass
def showMessage(message="", icon=None):
easydraw.messageCentered(message, False, icon)
def setLedPower(state):
pass
|
18,990 | 1b6917c7a7278041316b34a739835eca69058ba1 | from django.conf.urls import include, url
from django.conf import settings
from django.contrib.staticfiles import views
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
app_name = 'survey'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^results/$', views.results, name='results'),
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
url(r'^submit/$', views.submit, name='submit'),
url(r'^cancel/$', views.cancel, name='cancel'),
] |
18,991 | d1a3f36fccf6346e764748c1ca3967b5a4288171 | from django.urls import path,include,re_path
from .views import UserInfoView,ImageUploadView,ChangePwdView,SendEmailView,UpdateEmailView,MyCourseView,MyFavOrgView
from .views import MyFavTeacherView,MyFavCourseView,MyMessageView
app_name = 'users'
urlpatterns = [
# 用户信息
path('info/',UserInfoView.as_view(),name="user_info"),
# 用户头像上传
path('image/upload/',ImageUploadView.as_view(),name="image_upload"),
#
path('update/pwd/',ChangePwdView.as_view(),name="update_pwd"),
# 发送邮箱验证码
path('sendemail_code/',SendEmailView.as_view(),name="sendemail_code"),
# 修改邮箱
path('update_email/',UpdateEmailView.as_view(),name="update_email"),
# 个人课程
path('mycourse/',MyCourseView.as_view(),name="mycourse"),
# 个人收藏机构
path('myfav/org',MyFavOrgView.as_view(),name="myfav_org"),
# 个人收藏教师
path('myfav/teacher',MyFavTeacherView.as_view(),name="myfav_teacher"),
# 个人收藏课程
path('myfav/course',MyFavCourseView.as_view(),name="myfav_course"),
# 个人消息
path('mymessage/',MyMessageView.as_view(),name="mymessage"),
]
|
18,992 | 8677113267c071ab009eb0b5ac9fcaf64f31d6bd | '''
4. Ingresar dos valores enteros y sumarlos.
'''
# Fecha: 06/06/2019
# Autor: Agustin Arce
# Programa: Suma de numeros enteros
# Declaracion de variables
numero1 = 0
numero2 = 0
resultado = 0
# Ingreso de datos
numero1 = int(input("Ingrese primer numero a sumar: "))
numero2 = int(input("Ingrese segundo numero a sumar: "))
# Proceso de suma
resultado = numero1 + numero2
# Muestra de resultado
print(resultado) |
18,993 | dc2b855ee7da17ba81a69fe47e4c9918924859d4 | from itertools import permutations
n = int(input())
cost= [list(map(int, input().split())) for _ in range(n)]
order = [i for i in range(n)]
order_sum=list(permutations(order[1:n]))
ans=100000000
for i in order_sum:
i=list(i)
i.append(order[0])
sum=0
flag=0
for j in range(1,n+1):
if j==n:
j=0
if cost[i[j-1]][i[j]]==0:
flag=1
break
sum+=cost[i[j-1]][i[j]]
if flag==1:
continue
ans=min(ans,sum)
print(ans) |
18,994 | 3beea3ef204cd89639726803a9e022efe22d2c78 | from django.shortcuts import render, HttpResponse
from phoneapp.models import phoneSpecs
# Create your views here.
def index(request):
return render(request, "base.html")
def phoneSearch(request):
result_set = phoneSpecs.objects.filter
def search(request):
errors = []
if request.GET['q']:
q = request.GET['q']
print('query', q)
if len(q) > 20:
errors.append('Please enter at most 20 characters.')
else:
phones = phoneSpecs.objects.filter(name__icontains=q)
return render(request, 'searchresults.html', {'phones': phones, 'query': q})
else:
errors.append('Enter a search term.')
return render(request, 'searchform.html', {'errors': errors})
def filter(request):
errors = []
if request.GET['minValue']:
q = request.GET['minValue']
print('query', q)
if len(q) > 20:
errors.append('Please enter proper characters.')
else:
phones=pricefilterMin(q)
if request.GET['maxValue']:
a = request.GET['maxValue']
print('query', a)
phoness=pricefilterMax(a,phones)
return render(request, 'filterresults.html', {'phones': phoness, 'min': q, 'max':a})
else:
errors.append('Enter a search term.')
return render(request, 'filterresults.html', {'errors': errors})
def pricefilterMin(q):
k = int(q)
j = 1000
t = 1000
for j in range(k):
phonephone = phoneSpecs.objects.exclude(price__lt=j)
return phonephone
def pricefilterMax(q, phones):
k = int(q)
j = 1000
t = 1000
for j in range(k):
phonephone = phones.exclude(price__gt=k)
return phonephone
def filterform(request):
return render(request, 'filterform.html')
def searchform(request):
return render(request, 'searchform.html')
def slidevalue(request):
return render(request, HttpResponse("ghgg")) |
18,995 | 0eab9d2ee1e5724156979951b098501cae5d236a | import smtplib
from email.message import EmailMessage
import common
def send_verification(user):
link = 'http://localhost:4200/verify/' + str(user['id'])
msg = EmailMessage()
msg['Subject'] = 'Verification'
msg['From'] = 'flx.grimm@gmail.com'
msg['To'] = user['email']
msg.set_content(
'Thank you for signing up to our chat!\n'
'To Proceed you need to verify your E-Mail by clicking on the following link:\n'
'' + link)
smtp = smtplib.SMTP('smtp.gmail.com', 587)
smtp.ehlo()
smtp.starttls()
cfg = common.config()
smtp.login(cfg['email'], cfg['password'])
smtp.send_message(msg)
smtp.quit()
def valid_email(email):
split1 = email.split('@')
if len(split1) <= 1:
return False
split2 = split1[1].split('.')
if len(split2) <= 1:
return False
return True
|
18,996 | 9a497a60442dcbaaa5cc8a161b077c47cbf16138 | from django.db import models
class Project(models.Model):
"""
Объект на котором проводят измерения.
"""
name = models.TextField()
latitude = models.FloatField()
longitude = models.FloatField()
created_at = models.DateTimeField(
auto_now_add=True
)
updated_at = models.DateTimeField(
auto_now=True
)
def __str__(self):
return self.name
class Meta:
verbose_name = 'объект'
verbose_name_plural = 'объекты'
class Measurement(models.Model):
"""
Измерение температуры на объекте.
"""
value = models.FloatField()
project = models.ForeignKey(Project, on_delete=models.CASCADE)
created_at = models.DateTimeField(
auto_now_add=True
)
updated_at = models.DateTimeField(
auto_now=True
)
def __str__(self):
return self.value
class Meta:
verbose_name = 'значение температуры'
verbose_name_plural = 'значения температур'
|
18,997 | 6264f908c9c02f47c21725536eaf1622facb406c |
import cv2
import tensorflow as tf
import keras as ks
import numpy as np
#List of emotions
emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
#Detection loop
def detect(gray_scale, frame, face_cascade, model):
#Detect faces in gray scale of current frame
detected_faces = face_cascade.detectMultiScale(gray_scale)
#Draw rectangle over current frame
for (column, row, width, height) in detected_faces:
cv2.rectangle(
frame,
(column, row),
(column + width, row + height),
(0, 255, 0),
2
)
face_region= gray_scale[row : row+width, column : column+height]
face_region=cv2.resize(face_region,(48,48))
img_arr = tf.keras.preprocessing.image.img_to_array(face_region)
img_arr = np.expand_dims(img_arr, axis = 0)
img_arr /= 255
predictions = model.predict(img_arr)
max_index = np.argmax(predictions[0])
predicted_emotion = emotions[max_index]
cv2.putText(frame, predicted_emotion, (int(row), int(column)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
return frame
def main():
#Cascade of classifers that detect faces based on Haar features
face_cascade = face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
#Trained CNN model
model = ks.models.load_model('CNN_lowest')
#Capture video from webcam
vidstream = cv2.VideoCapture('http://10.0.0.142:4747/video') #cv2.VideoCapture(0, cv2.CAP_DSHOW)
while True:
#Get frame -> to gray scale -> detect faces
_, frame = vidstream.read()
gray_scale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
result = detect(gray_scale, frame, face_cascade, model)
#Show result frame
cv2.imshow('Detected', frame)
#Wait for exit key
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
exit()
if __name__ == "__main__":
main()
|
18,998 | 45af02969ee3654a24e9adf1dd6ed6ae9ec68197 | import turtle
# in code, we use loops to repeat a task
# we are going to have some fun in this module by drawing objects
# we will use loops to draw some of our objects
# hello turtle
# turtle is a python library that lets you draw
# you can probably guess what some of the turtle commands do:
# right(x) --> rotate right x degrees
# left(x) --> rotate left x degrees
# color("x") --> change pen color to "x"
# forward(x) --> move forward x
# backward(x) --> move backward x
# how would we get turtle to draw a square?
turtle.clearscreen()
turtle.color("green")
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
# we are basically repeating those first 2 lines 4 times
# so we oculd just do this instead:
turtle.clearscreen()
turtle.color("blue")
for steps in range(4):
turtle.forward(100)
turtle.right(90)
# only the indented code is repeated
turtle.clearscreen()
turtle.color("red")
for steps in range(4):
turtle.forward(100)
turtle.right(90)
turtle.forward(200)
# you can have lots of fun whn you put a loop inside another loop
turtle.clearscreen()
turtle.color("yellow")
for steps in range(4):
turtle.forward(100)
turtle.right(90)
for more_steps in range(4):
turtle.forward(50)
turtle.right(90)
# just for fun
turtle.clearscreen()
turtle.color("purple")
for steps in range(5):
turtle.forward(100)
turtle.right(360 / 5)
for more_steps in range(5):
turtle.forward(50)
turtle.right(360 / 5)
# you can also use a variable to decide the number of sides our object will have
turtle.clearscreen()
turtle.color("brown")
sides = 7
for steps in range(sides):
turtle.forward(100)
turtle.right(360 / sides)
for more_steps in range(5):
turtle.forward(50)
turtle.right(360 / sides)
# you can look at the loop values within the loop
for steps in range(4):
print(steps)
# it starts with 0, so it's gonna print 0, 1, 2, 3 (4 times)
# if you need to start counting from "1" you can specify numbers to count to and from
for steps in range(1, 4):
print(steps)
# it executes up until for, so it's gonna print 1, 2, 3 (3 times)
# you can also tell the loop to skip values by specifying a step
for steps in range(1, 10, 2):
print(steps)
# it executes from 1 up until 10, so it would print 1, 2, 3, 4, 5, 6, 7, 8, 9
# but since it increments 2 by 2, it would actually print 1, 3, 5, 7, 9
# one of the cool things about python is the way you can tell it exactly what values
# you want to use in the loop
for steps in [1, 2, 3, 4, 5]:
print(steps)
# in this case, yes, it will execute for the last value
# so this would print 1, 2, 3, 4, 5
# and you don't have to use numbers!
turtle.clearscreen()
turtle.color("black")
for steps in ["red", "blue", "purple", "black"]:
turtle.color(steps)
turtle.forward(100)
turtle.right(90)
# you can even mix up different data types, for example, numbers and strings, but it may raise some errors
for steps in ["red", "blue", "purple", "black", 8]:
print(steps)
# Your challenge
# get turtle to draw an octagon
# hint: to calculate the angle, you take 360 degrees and divide it by the number of sides of the shape you are drawing.
# extra challenge: let the user specify how many sides the object will have and draw whatever they ask
# double bonus challenge: add a nested loop to draw a smaller version of the object inside
# regular challenge
turtle.clearscreen()
turtle.color("red")
sides = 8
for steps in range(sides):
turtle.forward(100)
turtle.right(360 / sides)
for more_steps in range(sides):
turtle.forward(50)
turtle.right(360 / sides)
# extra challenge
turtle.clearscreen()
turtle.color("orange")
sides = int(input("How many sides do you want? "))
for steps in range(sides):
turtle.forward(100)
turtle.right(360 / sides)
for more_steps in range(sides):
turtle.forward(50)
turtle.right(360 / sides)
# double bonus challenge
turtle.clearscreen()
turtle.color("blue")
sides = int(input("How many sides do you want? "))
for steps in range(sides):
turtle.forward(100)
turtle.right(360 / sides)
for more_steps in range(sides):
turtle.forward(50)
turtle.right(360 / sides)
for even_more_steps in range(sides):
turtle.forward(25)
turtle.right(360 / sides)
|
18,999 | d18aaa3a872e364d44243f72b46f00125fed4009 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import jinja2
import webapp2
import hashlib
import hmac
import logging
from google.appengine.ext import db
SECRET = 'imsosecret'
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
def hash_str(s):
return hmac.new(SECRET, s).hexdigest()
def make_secure_val(s):
return "%s|%s" % (s, hash_str(s))
def check_secure_val(h):
val = h.split('|')[0]
if h == make_secure_val(val):
return val
class Handler(webapp2.RequestHandler):
# rendering functions
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
# validation functions
def validate_username(self, username):
# get all existing users from DB
users = User.all()
# Loop through all of them and check
for user in users:
#logging.info(user.username)
if str(user.username) == username:
logging.info("existing user found: " + str(user.username))
return False
else:
pass
return True
def validate_password(self, password):
# password must be at least 3 chars long
if len(password) < 3:
return False
else:
return True
return True
def validate_vrfypass(self, vrfypass, password):
return vrfypass == password
def validate_email(self, email):
# a dummy verification, does it contain exactly one '@' ?
return email.count('@') == 1
def validate_login(self, username, password):
# users = User.all()
user = User.all().filter('username =', username).get()
if user:
return user.password == password
# for user in users:
# if user.username == username:
# return user.password == password # check password
# return False
# Gql model
class User(db.Model):
username = db.StringProperty(required = True)
password = db.StringProperty(required = True)
email = db.EmailProperty(required = False)
class Welcome(Handler):
def get(self):
# name - get from cookie
cookie = self.request.cookies.get('my_cookie_name')
# get the cookie and verify
if cookie:
cookie_val = check_secure_val(cookie)
if cookie_val:
cookie_username = str(cookie_val)
else:
self.redirect('/signup')
else:
self.redirect('/signup')
self.render("welcome.html", name = cookie_username)
class SignUp(Handler):
def get(self):
self.render("signup.html")
def post(self):
user_username = self.request.get("username")
user_password = self.request.get("password")
user_vrfypass = self.request.get("verify")
user_email = self.request.get("email")
# validate username, password, vrfypass
if self.validate_username(user_username):
if self.validate_password(user_password):
if self.validate_vrfypass(user_vrfypass, user_password):
# validate email
if user_email:
if self.validate_email(user_email):
pass
else:
user_email = None
# store new user into DB
u = User(username=user_username, password=user_password, email=user_email)
u.put()
# make cookie value secure first
secure_username = make_secure_val(str(user_username))
# store the secured cookie
self.response.headers.add_header('Set-Cookie', 'my_cookie_name='+ secure_username +' Path=/')
# redirect
self.redirect("/welcome")
else:
self.render('signup.html', username=user_username, email=user_email, error_vrfy="Your password didn't match")
else:
self.render('signup.html', username=user_username, email=user_email, error_pass="Password error!")
else:
self.render('signup.html', username=user_username, email=user_email, error_username='User already exists.')
class Login(Handler):
def get(self):
self.render('login.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
# if the username exists in the DB
if self.validate_login(username, password):
# make cookie value secure first
secure_username = make_secure_val(str(username))
# store the secured cookie
self.response.headers.add_header('Set-Cookie', 'my_cookie_name='+ secure_username +' Path=/')
# and redirct
self.redirect('/welcome')
else:
self.render('login.html', username=username, error='Invalid Login')
class Logout(Handler):
def get(self):
# delete cookie
self.response.delete_cookie('my_cookie_name')
# redirect
self.redirect('/signup')
app = webapp2.WSGIApplication([
('/signup', SignUp),
('/welcome', Welcome),
('/login', Login),
('/logout', Logout)
], debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.