blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4c3fd20fe91df1a726de4f0de6047ffbf3b2668 | 9420a853d13ab74df55400c1b8896881bdaf22da | /pachong4.py | b03249d5c9b4d5d3a8510dbd6c60a80a67c8c586 | [] | no_license | cqcmdwym/learn_python | d7858f53f4f49a3b0a87e5111fbd41cd3b7d35d7 | d5cd4cb481ec5e98bfedc5264d4156ab50d40ab1 | refs/heads/master | 2020-04-14T04:56:32.991714 | 2019-01-01T06:03:14 | 2019-01-01T06:03:14 | 163,649,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | import requests
url = 'http://httpbin.org/get'
data = {'key':'value','abc':'xyz'}
# get是使用get方式请求url,字典型不用进行额外处理
response = requests.get(url,data)
print(response.text)
url = 'http://httpbin.org/post'
response = requests.post(url,data)
#返回json格式
print(response.json()) | [
"chenqi@chenqideMacBook-Pro.local"
] | chenqi@chenqideMacBook-Pro.local |
699fa8f775535c777a6233018731e0119e9f7461 | 81f0c8c0771d72f17606de18f00c445012754142 | /hackerearth/call_process.py | 803b7b406bab66358dd9deadb0dabc6e7d1cf9ab | [] | no_license | amarish-kumar/Practice | be739bed2db45d2ebea33d43d52c76bdc50fe0b6 | e70566c0d81a89c1f7db7d1cb18a308ac9c5d4e1 | refs/heads/master | 2020-03-21T10:25:18.097619 | 2017-10-15T17:14:39 | 2017-10-15T17:14:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | ''' Question taken from hackerearth
https://www.hackerearth.com/practice/data-structures/arrays/1-d/practice-problems/algorithm/monk-and-power-of-time/
'''
timeToCompute = 0
def rotate_list(rotations):
# rotate the list, 'rotations' number of times.
i =1
while i <= rotations:
# increment the time to compute.
global timeToCompute
timeToCompute += 1
j = 1
# store the top element.
top = callingOrder[0]
# move elements up position up starting from the second element.
while j < len(callingOrder):
callingOrder[j] = callingOrder[j+1]
# add the top element to the last position.
callingOrder[len(callingOrder) -1 ] = top
print("enter num of processes")
numOfProcesses = int(input())
#callingOrder = [int(x) for x in input().split()]
#idealOrder = [int(x) for x in input().split()]
callingOrder = [3,2,1]
idealOrder = [1,2,3]
'''match the head of the lists, and rotate callingOrder to match the
heads.
'''
while (len(idealOrder) == len(callingOrder)) and len(idealOrder) > 0 :
if idealOrder[0] == callingOrder[0]:
idealOrder.pop(0)
callingOrder.pop(0)
timeToCompute += 1
else:
''' calculate the number of rotation required and
call method to rotate the list(callingOrder)
'''
rotate_list(callingOrder.index(idealOrder[0]))
print(timeToCompute)
| [
"pratikadarsh24@gmail.com"
] | pratikadarsh24@gmail.com |
224561cf0f9316564e09902f74fcf58e0baa2dbe | dd8b6c34babf6484067ba05742d6970e9cf90af0 | /modules/message.py | fd7603be2251af9f8e732e895d1a0c8acb193e32 | [] | no_license | kumikoda110/AOps | f4c87935fcca8583b4adb2daec19a8c279460991 | 6bb27aede5affd8c2f5e2a2f3160b87b80ff136b | refs/heads/master | 2021-01-17T18:11:03.112257 | 2016-10-18T01:31:42 | 2016-10-18T01:31:42 | 71,205,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,563 | py | #!/usr/bin/python
#-*- coding: utf-8 -*-
# coding:utf8
# set windows charset
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import conf
import requests
import db
class Message(object):
"""docstring for Message"""
def __init__(self, url=conf.message_info['url'],
user=conf.message_info['user'],
passwd=conf.message_info['passwd']):
self.url = url
self.user = user
self.passwd = passwd
self.smsbao_code = {
30: '密码错误',
40: '账号不存在',
41: '余额不足',
42: '帐号过期',
43: 'IP地址限制',
50: '内容含有敏感词',
51: '手机号码不正确',
-1: '参数不全'
}
# 数据库对象
self.conn, self.cur = db.db_object()
# 获取短信日志
def log(self):
try:
query = 'SELECT id,phone,content,status FROM message_log'
self.cur.execute(query)
data = self.cur.fetchall()
result = {
'code': 0,
'message': '获取短信成功',
'data': data
}
return result
except Exception, e:
result = {
'code': 107,
'message': '获取短信日志失败:' + str(e)
}
return result
# 短信使用情况
def usage(self):
# 0 0,2702
try:
url = 'http://www.smsbao.com/query'
payload = {
'u': self.user,
'p': self.passwd
}
r = requests.get(url, params=payload)
ret = r.text
if ret.isdigit():
code = int(ret)
result = {
'code': 106,
'message': self.smsbao_code[code]
}
else:
code, data = ret.split()
data = data.split(',')
data = [int(data[0]), int(data[1])]
result = {
'code': 0,
'message': '获取短信余额成功',
'result': {
'data': data
}
}
return result
except Exception, e:
result = {
'code': 106,
'message': str(e)
}
# 发送短信
def send(self, phone, content):
payload = {
'u': self.user,
'p': self.passwd,
'm': phone,
'c': content
}
try:
r = requests.get(self.url, params=payload)
sms_code = r.text
if sms_code.isdigit(): # 返回结果是否可转数字
code = int(sms_code)
if not code: # 发送成功
result = {
'code': 0,
'message': '短信发送成功',
'data': {
'phone': phone,
'content': content
}
}
else: # 返回短信发送错误信息
result = {
'code': 102,
'message': self.smsbao_code[code]
}
# -1单独处理
elif sms_code == '-1':
result = {
'code': 102,
'message': '参数不全'
}
else:
result = {
'code': 102,
'message': '短信发送失败'
}
# 写日志
if not result['code']:
status = 1
else:
status = 0
query = '''
INSERT INTO message_log (phone, content, status)
VALUES
('{phone}', '{content}', {status})
'''.format(phone=phone, content=content, status=status)
try:
self.cur.execute(query)
self.conn.commit()
result['log'] = True
except Exception, e:
result['log'] = False
result['phone'] = phone
result['content'] = content
return result
except Exception, e:
result = {
'code': 101,
'message': str(e)
}
return result
# 上课通知
def notify(self, class_type='python',
class_num=3,
ntime='晚上8点',
class_content='到了你就知道'):
result = {
'code': 0,
'message': 'success',
'data': {}
}
query = '''
SELECT
phone,
username
FROM
user
WHERE
class_type = '{class_type}'
AND class_num = {class_num}
AND status = 1;
'''.format(class_type=class_type,
class_num=class_num)
try:
if self.cur.execute(query):
phones = self.cur.fetchall() # (().())
for phone, username in phones:
content = '【京峰课堂】尊敬的{username}:我们将于{ntime}进行{class_type}上课,上课内容为[{class_content}],请准时上课。'
t_content = content.format(username=username,
ntime=ntime,
class_type=class_type,
class_content=class_content)
# 过手机号批量发送短信,并获得发送此短信的结果
p_ret = self.send(phone=phone, content=t_content)
result['data'][phone] = p_ret
return result
else:
result = {
'code': 103,
'message': '发送上课通知时查不到用户,请修改你的条件'
}
return result
except Exception, e:
result = {
'code': 104,
'message': str(e)
}
return result
if __name__ == '__main__':
m = Message()
print m.send(150102208148, '测试一下短信接口')
# m.notify(class_num=1)
# print result
# print m.log()
| [
"yanjun914@aliyun.com"
] | yanjun914@aliyun.com |
2615f69827d1777bd77742afd5da86bf82b0f1d3 | 8f9d4f6552672f4be14926c4190dacfc8c21a7fa | /scripts/jasist/similarity_measures.py | 40bf90379b82c6df1e63ef3258056bf9cc6439ce | [
"MIT"
] | permissive | petershan1119/semantic-progressiveness | 276dbd909cfb73aed1513228f4cbde6d627dff11 | 824079b388d0eebc92b2197805b27ed320353f8f | refs/heads/master | 2022-11-26T00:23:13.421855 | 2020-08-02T23:24:21 | 2020-08-02T23:24:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | import numpy as np
def cosine_sim (v1: np.ndarray, v2: np.ndarray) -> float:
""" Cosine similarity between two vectors."""
return (np.dot (v1, v2)/(np.linalg.norm (v1, 2) * np.linalg.norm (v2, 2)))
def cosine_dist (v1: np.ndarray, v2:np.ndarray) -> float:
""" Cosine distance between two vectors."""
return 1 - cosine_sim(v1, v2)
def neighbors(word:str, embs:np.array, voc:tuple, k=3) -> list:
""" Get the list of near neighbors for a given word from the embeddings.
Each row of the matrix `embs` is a vector for a word.
The mapping of words and row numbers is in `voc`.
"""
w2i, i2w = voc
vec_len = np.linalg.norm (embs[w2i[word], :], 2)
norms = np.linalg.norm (embs, 2, axis=1)
sims = np.dot(embs[w2i[word],], embs.T)
sims = sims / (vec_len * norms)
output = []
for sim_idx in sims.argsort()[::-1][1:(1+k)]:
if sims[sim_idx] > 0:
output.append(i2w[sim_idx])
return output
def get_neighbor_sims(word:str, neighbors_set:set, vec: np.ndarray, voc:tuple) -> np.array:
w2i, i2w = voc
v_self = vec[w2i[word], :]
v_neighbors = vec[[w2i[neighbor] for neighbor in neighbors_set], :]
vec_len = np.linalg.norm (v_self, 2)
norms = np.linalg.norm (v_neighbors, 2, axis=1)
sims = np.dot (v_self, v_neighbors.T)
sims = sims / (vec_len * norms)
return sims
def hamilton_local_score (word:str, voc:tuple, old:np.array, new:np.array, k=50) -> float:
near_neighbors_old = neighbors (word, old, voc, k=k)
near_neighbors_new = neighbors (word, new, voc, k=k)
common_neighbors = set (near_neighbors_old).union (near_neighbors_new)
sim_old = get_neighbor_sims (word, common_neighbors, old, voc)
sim_new = get_neighbor_sims (word, common_neighbors, new, voc)
return cosine_dist (sim_old, sim_new)
def hamilton_global_score (word:str, voc:tuple, old:np.ndarray, new: np.ndarray) -> float:
w2i, i2w = voc
return cosine_dist (old[w2i[word],:], new[w2i[word], :])
| [
"sandeepsoni@gatech.com"
] | sandeepsoni@gatech.com |
fb9730470fbf887944b12c86c85a3e7df785334a | b49b5f6cf78aa55aadf27b01a1bc2d8153d54767 | /samples/factory/py_version/include/__init__.py | 9bb531ed7bf4a6b67a2dc590a675b74ab83c0702 | [] | no_license | eligantRU/ood | 0d17aa585160b4c443867786b6c9da86f562e1b4 | 7d3871eeb58ae7cb4d88f48580bf2a5a47654aba | refs/heads/master | 2020-03-28T04:19:40.303035 | 2019-11-19T22:24:09 | 2019-11-19T22:24:09 | 147,707,815 | 0 | 0 | null | 2018-09-06T17:15:44 | 2018-09-06T17:15:44 | null | UTF-8 | Python | false | false | 81 | py | from typing import Tuple
from include.Color import Color
Vec2 = Tuple[int, int]
| [
"eligant.ru@gmail.com"
] | eligant.ru@gmail.com |
db14361524f6ae8972b50bb26204bdf8d4c92611 | d35907f489a88b27163be0b7e874eb482252a672 | /app.py | a4d8f3993cee34f94afe96f2c47088ff8f9a5435 | [] | no_license | tinvan94/sample-python-api | 13418627d0ab6037f547cebcae24c19667450fff | f511854b281a21a1e12086e4d14be821ad754cc5 | refs/heads/master | 2020-06-06T14:30:49.064746 | 2019-06-18T15:59:40 | 2019-06-18T15:59:40 | 192,765,015 | 0 | 0 | null | 2019-10-31T20:56:52 | 2019-06-19T16:06:38 | Python | UTF-8 | Python | false | false | 476 | py | from flask import Blueprint
from flask_restful import Api
from resources.customer import CustomerResource
from resources.user import UserRegistration, UserLogin, UserLogout
api_bp = Blueprint('api', __name__)
api = Api(api_bp)
# Route
routes = [
'/customer/<int:customer_id>',
'/customer',
]
api.add_resource(CustomerResource, *routes)
api.add_resource(UserRegistration, '/registration')
api.add_resource(UserLogin, '/login')
api.add_resource(UserLogout, '/logout')
| [
"tinnv@trobz.com"
] | tinnv@trobz.com |
9a7c0605403c732acc14c1c84feb77aa47721430 | b6d723d7ac20b5b01a55e574088d053c9952ef76 | /usecase1/scripts/uc1_scikit_linear_regression.py | b265c2de36eb278168882755b9f3ce0d7c6cc658 | [] | no_license | amoghntt/Aspire | d704c53bbfe89794fc34e6ea3bd605d51eb0a754 | 97953894c82ac565d451df9bd6eea35d23e83c6b | refs/heads/master | 2021-08-30T15:07:18.408757 | 2017-12-18T11:27:49 | 2017-12-18T11:27:49 | 114,597,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,197 | py | import json
import sys
import math
import numpy as numpy
import mysql.connector
from pandas import DataFrame
from sklearn.linear_model import LinearRegression
def predict_dc():
#Predictionin=Prediction_in
#if (Predictionin == 1):
cnx = mysql.connector.connect(user='cresta', password='cresta', host='10.248.3.91', database='cresta_uat')
cols = ['KLOC', 'test_case_count', 'application_complexity', 'domain_knowledge', 'technical_skills', 'requirements_query_count', 'code_review_comments', 'design_review_comments','defect_count/KLOC']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1 "
print(1)
try:
cursor = cnx.cursor()
cursor.execute(query)
data = DataFrame(cursor.fetchall(), columns=cols)
input_data = data[cols[:-1]][-1:]
data = data[:-1]
finally:
cnx.close()
X = data[cols[:-1]]
data = data.rename(columns={'defect_count/KLOC': 'defect_density'})
y = data.defect_density
lm = LinearRegression()
lm.fit(X, y)
result = lm.predict(input_data)
return result
'''#elif(Predictionin==2):
cnx = mysql.connector.connect(user='cresta', password='cresta', host='10.248.3.91', database='cresta')
cols = ['KLOC', 'test_case_count', 'application_complexity', 'domain_knowledge', 'technical_skills', 'requirements_query_count', 'code_review_comments', 'design_review_comments', 'acceptance']
strColumns = ','.join(cols)
query = "select " + strColumns + " from UseCaseData "
print(2)
try:
cursor = cnx.cursor()
cursor.execute(query)
data = DataFrame(cursor.fetchall(), columns=cols)
input_data = data[cols[:-1]][-1:]
data = data[:-1]
finally:
cnx.close()
X = data[cols[:-1]]
y = data.acceptance
lm = LinearRegression()
lm.fit(X, y)
result = lm.predict(input_data)
elif(Predictionin==3):
cnx = mysql.connector.connect(user='cresta', password='cresta', host='10.248.3.91', database='cresta')
cols = ['KLOC', 'test_case_count', 'application_complexity', 'domain_knowledge', 'technical_skills', 'requirements_query_count', 'code_review_comments', 'design_review_comments', 'defect_count']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1C "
print (21)
try:
cursor = cnx.cursor()
cursor.execute(query)
data = DataFrame(cursor.fetchall(), columns=cols)
input_data = data[cols[:-1]][-1:]
data = data[:-1]
finally:
cnx.close()
X = data[cols[:-1]]
y = data.defect_count
lm = LinearRegression()
lm.fit(X, y)
result = lm.predict(input_data)
else:
cnx = mysql.connector.connect(user='cresta', password='cresta', host='10.248.3.91', database='cresta')
cols = ['KLOC', 'test_case_count', 'application_complexity', 'domain_knowledge', 'technical_skills', 'requirements_query_count', 'code_review_comments', 'design_review_comments', 'defect_count']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1D "
print(23)
try:
cursor = cnx.cursor()
cursor.execute(query)
data = DataFrame(cursor.fetchall(), columns=cols)
input_data = data[cols[:-1]][-1:]
data = data[:-1]
finally:
cnx.close()
X = data[cols[:-1]]
y = data.defect_count
lm = LinearRegression()
lm.fit(X, y)
result = lm.predict(input_data)
'''
def graph_data(Prediction_in):
Predictionin=Prediction_in
if (Predictionin == 1):
cnx = mysql.connector.connect(user='cresta', password='cresta', host='10.248.3.91', database='cresta')
cols = ['ID']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1"
data = []
data1 = []
data2 = []
data3 = []
data_d = []
data1_d = []
data2_d = []
data3_d = []
ucl = []
lcl = []
try:
cursor = cnx.cursor()
cursor.execute(query)
# data = DataFrame(cursor.fetchall(), columns=cols)
data = cursor.fetchall()
for x in data:
x = str(x)
data1.append(x.replace(',', ''))
for x in data1:
x = str(x)
data2.append(x.replace('(', ''))
for x in data2:
x = str(x)
data3.append(x.replace(')', ''))
data3.append('20192')
Relid = data3[:]
cols = ['defect_count']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1CTelephonica"
cursor = cnx.cursor()
cursor.execute(query)
data_d = cursor.fetchall()
ucl1, lcl1 = predict_ucllcl(data_d)
for y in data_d:
y = str(y)
data1_d.append(y.replace(',', ''))
for y in data1_d:
y = str(y)
data2_d.append(y.replace('(', ''))
for y in data2_d:
y = str(y)
data3_d.append(y.replace(')', ''))
graph_data = data3_d[:]
finally:
cnx.close()
print(ucl1, lcl1)
predicted_result = predict_dc(Predictionin)
predicted_result = str(Predictionin)
pred = ""
pred1 = ""
pred = predicted_result.replace('[', '')
pred1 = pred.replace(']', '')
graph_data.append(pred1)
for x in graph_data:
ucl.append(ucl1)
lcl.append(lcl1)
return Relid, graph_data, ucl, lcl
''' elif (Predictionin == 2):
cnx = mysql.connector.connect(user='cresta', password='cresta', host='10.248.3.91', database='cresta')
cols = ['ID']
strColumns = ','.join(cols)
query = "select " + strColumns + " from UseCaseData"
data = []
data1 = []
data2 = []
data3 = []
data_d = []
data1_d = []
data2_d = []
data3_d = []
ucl = []
lcl = []
try:
cursor = cnx.cursor()
cursor.execute(query)
# data = DataFrame(cursor.fetchall(), columns=cols)
data = cursor.fetchall()
for x in data:
x = str(x)
data1.append(x.replace(',', ''))
for x in data1:
x = str(x)
data2.append(x.replace('(', ''))
for x in data2:
x = str(x)
data3.append(x.replace(')', ''))
data3.append('20192')
Relid = data3[:]
cols = ['defect_count']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1CTelephonica"
cursor = cnx.cursor()
cursor.execute(query)
data_d = cursor.fetchall()
ucl1, lcl1 = predict_ucllcl(data_d)
for y in data_d:
y = str(y)
data1_d.append(y.replace(',', ''))
for y in data1_d:
y = str(y)
data2_d.append(y.replace('(', ''))
for y in data2_d:
y = str(y)
data3_d.append(y.replace(')', ''))
graph_data = data3_d[:]
finally:
cnx.close()
print(ucl1, lcl1)
predicted_result = predict_dc(Predictionin)
predicted_result = str(Predictionin)
pred = ""
pred1 = ""
pred = predicted_result.replace('[', '')
pred1 = pred.replace(']', '')
graph_data.append(pred1)
for x in graph_data:
ucl.append(ucl1)
lcl.append(lcl1)
elif (Predictionin == 3):
cnx = mysql.connector.connect(user='cresta', password='cresta', host='10.248.3.91', database='cresta')
cols = ['ID']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1C"
data = []
data1 = []
data2 = []
data3 = []
data_d = []
data1_d = []
data2_d = []
data3_d = []
ucl = []
lcl = []
try:
cursor = cnx.cursor()
cursor.execute(query)
# data = DataFrame(cursor.fetchall(), columns=cols)
data = cursor.fetchall()
for x in data:
x = str(x)
data1.append(x.replace(',', ''))
for x in data1:
x = str(x)
data2.append(x.replace('(', ''))
for x in data2:
x = str(x)
data3.append(x.replace(')', ''))
data3.append('20192')
Relid = data3[:]
cols = ['defect_count']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1CTelephonica"
cursor = cnx.cursor()
cursor.execute(query)
data_d = cursor.fetchall()
ucl1, lcl1 = predict_ucllcl(data_d)
for y in data_d:
y = str(y)
data1_d.append(y.replace(',', ''))
for y in data1_d:
y = str(y)
data2_d.append(y.replace('(', ''))
for y in data2_d:
y = str(y)
data3_d.append(y.replace(')', ''))
graph_data = data3_d[:]
finally:
cnx.close()
print(ucl1, lcl1)
predicted_result = predict_dc(Predictionin)
predicted_result = str(Predictionin)
pred = ""
pred1 = ""
pred = predicted_result.replace('[', '')
pred1 = pred.replace(']', '')
graph_data.append(pred1)
for x in graph_data:
ucl.append(ucl1)
lcl.append(lcl1)
else :
cnx = mysql.connector.connect(user='cresta', password='cresta', host='10.248.3.91', database='cresta')
cols = ['ID']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1D"
data = []
data1 = []
data2 = []
data3 = []
data_d = []
data1_d = []
data2_d = []
data3_d = []
ucl = []
lcl = []
try:
cursor = cnx.cursor()
cursor.execute(query)
# data = DataFrame(cursor.fetchall(), columns=cols)
data = cursor.fetchall()
for x in data:
x = str(x)
data1.append(x.replace(',', ''))
for x in data1:
x = str(x)
data2.append(x.replace('(', ''))
for x in data2:
x = str(x)
data3.append(x.replace(')', ''))
data3.append('20192')
Relid = data3[:]
cols = ['defect_count']
strColumns = ','.join(cols)
query = "select " + strColumns + " from usecase1CTelephonica"
cursor = cnx.cursor()
cursor.execute(query)
data_d = cursor.fetchall()
ucl1, lcl1 = predict_ucllcl(data_d)
for y in data_d:
y = str(y)
data1_d.append(y.replace(',', ''))
for y in data1_d:
y = str(y)
data2_d.append(y.replace('(', ''))
for y in data2_d:
y = str(y)
data3_d.append(y.replace(')', ''))
graph_data = data3_d[:]
finally:
cnx.close()
print(ucl1, lcl1)
predicted_result = predict_dc(Predictionin)
predicted_result = str(Predictionin)
pred = ""
pred1 = ""
pred = predicted_result.replace('[', '')
pred1 = pred.replace(']', '')
graph_data.append(pred1)
for x in graph_data:
ucl.append(ucl1)
lcl.append(lcl1)
'''
def predict_ucllcl(mydata):
data_set=mydata
avg=0
num=0
i=len(data_set)
#average = reduce(lambda x, y: x + y, data_set) / len(data_set)
average=numpy.mean(data_set, axis=0)
#average=avg/num
s = [x-average for x in data_set]
square = [x*x for x in s]
avg_new=0
data=[]
for x in square:
avg_new=x+avg_new
varience=avg_new/i
#sigma=(varience)^(1/2)
sigma=math.pow(varience, 0.5)
three_sigma=3*sigma
print(three_sigma)
ucl=average+three_sigma
lcl=average-three_sigma
print(ucl)
ucl= math.floor(ucl)
if lcl<0:
lcl=0
return ucl,lcl
| [
"109100@NTTDATA.COM"
] | 109100@NTTDATA.COM |
689331242b3ee1bd08dad666a2f70791a5042787 | 47c7e495c25f77c2129e3a88d01e797bbc18085a | /bot/flaskapp-application-web.py | b0abb6434b4e330922619b2296faa396af919d3b | [] | no_license | fireprophet/attemt_telegram_bot_flask | c1eb2c5bca35270cb9348f0e54bbc29c025e233f | ea26fca407adb6c6489b4ed96a58a930ce4fc9b8 | refs/heads/master | 2023-07-17T21:31:02.661117 | 2021-09-07T08:08:45 | 2021-09-07T08:08:45 | 399,856,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | from application-web import app
if __name__ == "__main__":
app.run()
| [
"fireprophet.io@gmail.com"
] | fireprophet.io@gmail.com |
076793b7912b8882ef8e8bb776e73d03c6da5d0b | 3aea63678e588629df5894cdb5df5b0a635b66bb | /Covid19.py | 6a3a5f52777e7b08f3ef6d58101c5e67947b6a65 | [] | no_license | braveseba/Clarusway_python_assignment | 4751959b8a905950b299f67ddae356e381a49e51 | 60cf263a4c27fe0f718194a2caefb23f109c93c8 | refs/heads/master | 2022-12-13T15:24:26.617519 | 2020-07-30T15:08:20 | 2020-07-30T15:08:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | age=(input("Are you a cigarette addict older than 75 years old? True/False: ").title())== "Yes"
chronic=(input("Do you have a severe chronic disease? True/False: ").title())== "Yes"
immune=(input("Is your immune system too weak? True/False: ").title())=="Yes"
if age or chronic or immune:
print("You are in risky group")
else:
print("You are not in risky group")
| [
"habibkc71@gmail.com"
] | habibkc71@gmail.com |
1fa3cbca32b5e8e3c20fc0ac646c98e01573d497 | d1dd5da8ef670280e22ce716780afe0d5b417320 | /da_v1/eval/total_text/Deteval2.py | a3490f9578ce25a7ea99566cfbc2d6a6b8284743 | [
"Apache-2.0"
] | permissive | xieenze/PSENet | 643d52f71b6870cd8681c02a46550b110187ece2 | 85a64d337e462352b8397c04566d1fd2eb141935 | refs/heads/master | 2020-05-27T19:23:42.245279 | 2019-07-03T05:16:21 | 2019-07-03T05:16:21 | 188,760,423 | 0 | 0 | null | 2019-05-27T02:50:17 | 2019-05-27T02:50:16 | null | UTF-8 | Python | false | false | 14,367 | py | from os import listdir
from scipy import io
import numpy as np
from skimage.draw import polygon
from polygon_wrapper import iou
from polygon_wrapper import iod
from polygon_wrapper import area_of_intersection
from polygon_wrapper import area
"""
Input format: y0,x0, ..... yn,xn. Each detection is separated by the end of line token ('\n')'
"""
input_dir = '/home/xieenze/R4D/PSENET/pytorch-text-unit-v5/submit_tt'
gt_dir = '/unsullied/sharefs/zangyuhang/isilon-home/DataSet/TOTAL_TEXT/Groundtruth/Polygon/Test'
fid_path = '/home/xieenze/R4D/PSENET/pytorch-text-unit-v5/res.txt'
allInputs = listdir(input_dir)
def input_reading_mod(input_dir, input):
"""This helper reads input from txt files"""
with open('%s/%s' % (input_dir, input), 'r') as input_fid:
pred = input_fid.readlines()
det = [x.strip('\n') for x in pred]
return det
def gt_reading_mod(gt_dir, gt_id):
"""This helper reads groundtruths from mat files"""
gt_id = gt_id.split('.')[0]
gt = io.loadmat('%s/poly_gt_%s.mat' % (gt_dir, gt_id))
gt = gt['polygt']
return gt
def detection_filtering(detections, groundtruths, threshold=0.5):
for gt_id, gt in enumerate(groundtruths):
if (gt[5] == '#') and (gt[1].shape[1] > 1):
gt_x = map(int, np.squeeze(gt[1]))
gt_y = map(int, np.squeeze(gt[3]))
for det_id, detection in enumerate(detections):
detection = detection.split(',')
detection = map(int, detection[0:-1])
det_y = detection[0::2]
det_x = detection[1::2]
det_gt_iou = iod(det_x, det_y, gt_x, gt_y)
if det_gt_iou > threshold:
detections[det_id] = []
detections[:] = [item for item in detections if item != []]
return detections
def sigma_calculation(det_x, det_y, gt_x, gt_y):
"""
sigma = inter_area / gt_area
"""
return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) / area(gt_x, gt_y)), 2)
def tau_calculation(det_x, det_y, gt_x, gt_y):
"""
tau = inter_area / det_area
"""
return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) / area(det_x, det_y)), 2)
##############################Initialization###################################
global_tp = 0
global_fp = 0
global_fn = 0
global_sigma = []
global_tau = []
tr = 0.7
tp = 0.6
fsc_k = 0.8
k = 2
###############################################################################
for input_id in allInputs:
if (input_id != '.DS_Store'):
print(input_id)
detections = input_reading_mod(input_dir, input_id)
groundtruths = gt_reading_mod(gt_dir, input_id)
detections = detection_filtering(detections, groundtruths) # filters detections overlapping with DC area
dc_id = np.where(groundtruths[:, 5] == '#')
groundtruths = np.delete(groundtruths, (dc_id), (0))
local_sigma_table = np.zeros((groundtruths.shape[0], len(detections)))
local_tau_table = np.zeros((groundtruths.shape[0], len(detections)))
for gt_id, gt in enumerate(groundtruths):
if len(detections) > 0:
for det_id, detection in enumerate(detections):
detection = detection.split(',')
detection = map(int, detection[:-1])
det_y = detection[0::2]
det_x = detection[1::2]
gt_x = map(int, np.squeeze(gt[1]))
gt_y = map(int, np.squeeze(gt[3]))
local_sigma_table[gt_id, det_id] = sigma_calculation(det_x, det_y, gt_x, gt_y)
local_tau_table[gt_id, det_id] = tau_calculation(det_x, det_y, gt_x, gt_y)
global_sigma.append(local_sigma_table)
global_tau.append(local_tau_table)
global_accumulative_recall = 0
global_accumulative_precision = 0
total_num_gt = 0
total_num_det = 0
def one_to_one(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
for gt_id in xrange(num_gt):
qualified_sigma_candidates = np.where(local_sigma_table[gt_id, :] > tr)
num_qualified_sigma_candidates = qualified_sigma_candidates[0].shape[0]
qualified_tau_candidates = np.where(local_tau_table[gt_id, :] > tp)
num_qualified_tau_candidates = qualified_tau_candidates[0].shape[0]
if (num_qualified_sigma_candidates == 1) and (num_qualified_tau_candidates == 1):
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, gt_id] = 1
matched_det_id = np.where(local_sigma_table[gt_id, :] > tr)
det_flag[0, matched_det_id] = 1
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
def one_to_many(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
for gt_id in xrange(num_gt):
#skip the following if the groundtruth was matched
if gt_flag[0, gt_id] > 0:
continue
non_zero_in_sigma = np.where(local_sigma_table[gt_id, :] > 0)
num_non_zero_in_sigma = non_zero_in_sigma[0].shape[0]
if num_non_zero_in_sigma >= k:
####search for all detections that overlaps with this groundtruth
qualified_tau_candidates = np.where((local_tau_table[gt_id, :] >= tp) & (det_flag[0, :] == 0))
num_qualified_tau_candidates = qualified_tau_candidates[0].shape[0]
if num_qualified_tau_candidates == 1:
if ((local_tau_table[gt_id, qualified_tau_candidates] >= tp) and (local_sigma_table[gt_id, qualified_tau_candidates] >= tr)):
#became an one-to-one case
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, gt_id] = 1
det_flag[0, qualified_tau_candidates] = 1
elif (np.sum(local_sigma_table[gt_id, qualified_tau_candidates]) >= tr):
gt_flag[0, gt_id] = 1
det_flag[0, qualified_tau_candidates] = 1
global_accumulative_recall = global_accumulative_recall + fsc_k
global_accumulative_precision = global_accumulative_precision + num_qualified_tau_candidates * fsc_k
local_accumulative_recall = local_accumulative_recall + fsc_k
local_accumulative_precision = local_accumulative_precision + num_qualified_tau_candidates * fsc_k
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
def many_to_many(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
for det_id in xrange(num_det):
# skip the following if the detection was matched
if det_flag[0, det_id] > 0:
continue
non_zero_in_tau = np.where(local_tau_table[:, det_id] > 0)
num_non_zero_in_tau = non_zero_in_tau[0].shape[0]
if num_non_zero_in_tau >= k:
####search for all detections that overlaps with this groundtruth
qualified_sigma_candidates = np.where((local_sigma_table[:, det_id] >= tp) & (gt_flag[0, :] == 0))
num_qualified_sigma_candidates = qualified_sigma_candidates[0].shape[0]
if num_qualified_sigma_candidates == 1:
if ((local_tau_table[qualified_sigma_candidates, det_id] >= tp) and (local_sigma_table[qualified_sigma_candidates, det_id] >= tr)):
#became an one-to-one case
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, qualified_sigma_candidates] = 1
det_flag[0, det_id] = 1
elif (np.sum(local_tau_table[qualified_sigma_candidates, det_id]) >= tp):
det_flag[0, det_id] = 1
gt_flag[0, qualified_sigma_candidates] = 1
global_accumulative_recall = global_accumulative_recall + num_qualified_sigma_candidates * fsc_k
global_accumulative_precision = global_accumulative_precision + fsc_k
local_accumulative_recall = local_accumulative_recall + num_qualified_sigma_candidates * fsc_k
local_accumulative_precision = local_accumulative_precision + fsc_k
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
for idx in xrange(len(global_sigma)):
print(allInputs[idx])
local_sigma_table = global_sigma[idx]
local_tau_table = global_tau[idx]
num_gt = local_sigma_table.shape[0]
num_det = local_sigma_table.shape[1]
total_num_gt = total_num_gt + num_gt
total_num_det = total_num_det + num_det
local_accumulative_recall = 0
local_accumulative_precision = 0
gt_flag = np.zeros((1, num_gt))
det_flag = np.zeros((1, num_det))
#######first check for one-to-one case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag = one_to_one(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag)
#######then check for one-to-many case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag = one_to_many(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag)
#######then check for many-to-many case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag = many_to_many(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag)
# for det_id in xrange(num_det):
# # skip the following if the detection was matched
# if det_flag[0, det_id] > 0:
# continue
#
# non_zero_in_tau = np.where(local_tau_table[:, det_id] > 0)
# num_non_zero_in_tau = non_zero_in_tau[0].shape[0]
#
# if num_non_zero_in_tau >= k:
# ####search for all detections that overlaps with this groundtruth
# qualified_sigma_candidates = np.where((local_sigma_table[:, det_id] >= tp) & (gt_flag[0, :] == 0))
# num_qualified_sigma_candidates = qualified_sigma_candidates[0].shape[0]
#
# if num_qualified_sigma_candidates == 1:
# if ((local_tau_table[qualified_sigma_candidates, det_id] >= tp) and (local_sigma_table[qualified_sigma_candidates, det_id] >= tr)):
# #became an one-to-one case
# global_accumulative_recall = global_accumulative_recall + 1.0
# global_accumulative_precision = global_accumulative_precision + 1.0
# local_accumulative_recall = local_accumulative_recall + 1.0
# local_accumulative_precision = local_accumulative_precision + 1.0
#
# gt_flag[0, qualified_sigma_candidates] = 1
# det_flag[0, det_id] = 1
# elif (np.sum(local_tau_table[qualified_sigma_candidates, det_id]) >= tp):
# det_flag[0, det_id] = 1
# gt_flag[0, qualified_sigma_candidates] = 1
#
# global_accumulative_recall = global_accumulative_recall + num_qualified_sigma_candidates * fsc_k
# global_accumulative_precision = global_accumulative_precision + fsc_k
#
# local_accumulative_recall = local_accumulative_recall + num_qualified_sigma_candidates * fsc_k
# local_accumulative_precision = local_accumulative_precision + fsc_k
fid = open(fid_path, 'a+')
try:
local_precision = local_accumulative_precision / num_det
except ZeroDivisionError:
local_precision = 0
try:
local_recall = local_accumulative_recall / num_gt
except ZeroDivisionError:
local_recall = 0
temp = ('%s______/Precision:_%s_______/Recall:_%s\n' % (allInputs[idx], str(local_precision), str(local_recall)))
fid.write(temp)
fid.close()
try:
recall = global_accumulative_recall / total_num_gt
except ZeroDivisionError:
recall = 0
try:
precision = global_accumulative_precision / total_num_det
except ZeroDivisionError:
precision = 0
try:
f_score = 2*precision*recall/(precision+recall)
except ZeroDivisionError:
f_score = 0
fid = open(fid_path, 'a')
temp = ('Precision:_%s_______/Recall:_%s\n' %(str(precision), str(recall)))
fid.write(temp)
fid.close()
print('pb')
| [
"Johnny_ez@163.com"
] | Johnny_ez@163.com |
5901cd761f795addb37355ab5dfb91b136524937 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/45/usersdata/118/15614/submittedfiles/lista1.py | e7b973d29fb37d041373635daf0586e519cab283 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
from __future__ import division
n = input('Digite o número de termos:')
a = []
for i in range(0,n+1,1):
a.append(input('Digite o valor:')
somap = 0
somai = 0
contp = 0
conti = 0
for j in range(0,len(a),1):
if a[i]%2 == 0:
contp = contp +1
somap = somap +1
else:
conti = conti +1
somai = somai +1
print(somai)
print(somap)
print(conti)
print(contp)
print(a) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
039696c97042596d6d70749d3c2109351fd97b6f | b62b3d52d911a07e4ae6790afae73b870e7de948 | /raw/registrees/excel_users.py | 4bd72c7ceae539c10d2059f88462ae94bb7a47d8 | [] | no_license | kimvanwyk/md410_2021_conv_website | ac77ac11d6abc6c48686ac16e77ee2971a31241b | 40faa10aaf1377463c7d6d42787d12af0e74d1a9 | refs/heads/master | 2023-04-18T18:54:10.418683 | 2021-05-04T11:57:22 | 2021-05-04T11:57:22 | 338,836,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | """ Functions to retrieve registree data for the 2020
virtual MD410 Virtual Convention hosted on GoToWebinar
"""
import json
import os.path
import attr
import dateparser
from openpyxl import load_workbook
if 1:
class ExcelSheet:
def __init__(self):
self.registrees = {}
self.get_registrees()
def get_registrees(self):
wb = load_workbook(
filename="/home/kimv/src/md410_2020_conv_website/raw/registrees/Service Knows No Boundaries Virtual Convention - Registration Report.xlsx"
)
skip = True
for (n, row) in enumerate(wb["Sheet0"].values):
if not skip and "Approved" in row[4].strip():
self.registrees[n] = {
"first_name": row[0],
"last_name": row[1],
"date": dateparser.parse(row[3]).isoformat(),
"club": row[5],
}
if row[0] == "First Name":
skip = False
if __name__ == "__main__":
sheet = ExcelSheet()
print(sheet.registrees)
| [
"kimv@sahomeloans.com"
] | kimv@sahomeloans.com |
cc0b4291cf557814775dee5633ec150f36a75948 | 5bc611ce8b5629d09562c98e9b2cc51bc5860c7c | /graph.py | 94838052ccdd373211f51c58a32731e46154ff0f | [] | no_license | thepolm3/Hello-Internet-Stats | 45b4197d0e62357b7c687cb45df5984634671bba | 23f283da0c0062901f77881bb058f92732b03c97 | refs/heads/master | 2022-07-06T01:08:45.399828 | 2022-06-21T22:27:11 | 2022-06-21T22:27:11 | 146,997,315 | 0 | 0 | null | 2022-06-21T22:27:12 | 2018-09-01T12:41:13 | Python | UTF-8 | Python | false | false | 3,672 | py | """Graphs the episodes"""
import pickle
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
from matplotlib.dates import date2num
import numpy as np
with open('episodes.pickle', 'rb') as f:
episodes = pickle.load(f)
#for our purposes we need a reddit thread
for episode in episodes:
if episode['reddit-thread'] is None:
print(f"{episode['title']} has no reddit thread")
episodes.pop(episodes.index(episode))
lengths = [episode['length'].seconds/3600 for episode in episodes]
numbers = [episode['number'] for episode in episodes]
titles = [episode['title'] for episode in episodes]
dates = [episode['date'] for episode in episodes]
no_of_comments = []
for episode in episodes:
no_of_comments.append(episode['reddit-thread'].num_comments)
days_since = [0]
for i in range(1, len(episodes)):
days_since.append((episodes[i]['date'] - episodes[i-1]['date']).days)
#available values: lengths, numbers, titles, dates, no_of_comments, days_since
x, y = days_since, no_of_comments
s, c = lengths*50, date2num(dates)
xlabel = "Days since last episode"
ylabel = "Number of reddit comments"
title = "Days since last episode vs Engagement in Hello Internet episodes"
############## bar chart
# top_values = [e['date'] for e in episodes[4::5]]
# top_labels = [str(e['number']) for e in episodes[4::5]]
# fig = plt.figure(figsize=(20,10), dpi=300)
# ax = host_subplot(111, axes_class=AA.Axes)
# ax2 = ax.twin()
# ax2.set_xticks(top_values)
# ax2.set_xticklabels(top_labels, rotation=45)
# ax2.grid(zorder=0)
# ax2.axis["right"].major_ticklabels.set_visible(False)
# ax2.axis["top"].major_ticklabels.set_visible(True)
# ax.bar(x, y, width=3, zorder=3)
# ax.xaxis_date()
# longest_episodes = sorted(episodes, key=lambda ep: ep['length'], reverse=True)[:5]
# for ep in longest_episodes:
# height = ep['length'].seconds/3600
# ax.text(ep['date'], height,
# str(ep['number']),
# ha = 'center', va='bottom')
#fig.savefig('graph.png')
fig = plt.figure(figsize=(10, 10), dpi=300)
plt.scatter(x, y, s=s, c=c, cmap='plasma', alpha=1)
#set up the colorbar
first_episode_tick = ((numbers[0] + 5)//5)*5
last_episode_tick = numbers[-1] - 5 #gives room for the date label
ep_nums = list(range(first_episode_tick, last_episode_tick, 5))
ep_dates = []
for episode in episodes:
if episode['number'] in ep_nums:
ep_dates.append(date2num(episode['date']))
dates = [date2num(episodes[0]['date'])] + ep_dates + [date2num(episodes[-1]['date'])]
tick_labels = [episodes[0]['date'].strftime("%d/%m/%Y")] + ep_nums + [episodes[-1]['date'].strftime("%d/%m/%Y")]
cbar = plt.colorbar()
cbar.set_ticks(dates)
cbar.ax.set_yticklabels(tick_labels)
cbar.ax.set_ylabel('Episode Release Date', rotation=270)
#line of best fit
#plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), linestyle=':')
#label extreme points
label_points = sorted(zip(x, y, numbers, episodes), reverse=True)[:5]
label_points.extend(sorted(zip(x, y, numbers, episodes))[:3])
label_points.extend(sorted(zip(x, y, numbers, episodes), key=lambda x: x[1], reverse=True)[:3])
label_points.extend(sorted(zip(x, y, numbers, episodes), key=lambda x: x[1])[:3])
for x_, y_, label, ep in label_points:
print(f"\n{ep['number']}\n{ep['title']}\nduration: {ep['length']}\nlink: {ep['link']}")
if label == -1:
plt.annotate(ep['title'][:13], (x_, y_))
continue
plt.annotate(str(label), (x_, y_))
#work our the correlation
corr = np.corrcoef(x, y)[0, 1]
print(f'correlation: {corr}')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.savefig('graphs/graph.png')
| [
"thepolm3@gmail.com"
] | thepolm3@gmail.com |
0f6b4c0e8a7fc2507d68d242905734ba1e2e2592 | 6b033e3dddc280417bb97500f72e68d7378c69d6 | /IV. COLAB/Enemy-Spotted/2. Uniform Classification/crawling/crawling_version_2_deprecated.py | fa6711bbd2b14e88c54793181f0ffa2d0b600bb1 | [] | no_license | inyong37/Study | e5cb7c23f7b70fbd525066b6e53b92352a5f00bc | e36252a89b68a5b05289196c03e91291dc726bc1 | refs/heads/master | 2023-08-17T11:35:01.443213 | 2023-08-11T04:02:49 | 2023-08-11T04:02:49 | 128,149,085 | 11 | 0 | null | 2022-10-07T02:03:09 | 2018-04-05T02:17:17 | Jupyter Notebook | UTF-8 | Python | false | false | 903 | py | from urllib.request import urlopen
import argparse
import requests as req
from bs4 import BeautifulSoup
# reference: https://enjoysomething.tistory.com/42
parser = argparse.ArgumentParser()
parser.add_argument("-data", required=False, default='acu pattern')
args = parser.parse_args()
data = args.data
def main():
url_info = "https://www.google.com/search?"
params = {
"q": data
}
html_object = req.get(url_info, params)
if html_object.status_code == 200:
bs_object = BeautifulSoup(html_object.text, "html.parser")
img_data = bs_object.find_all("img")
for i in enumerate(img_data[1:]):
t = urlopen(i[1].attrs['src']).read()
filename = "img_" + str(i[0] + 1) + '.jpg'
with open(filename, "wb") as f:
f.write(t)
print("Image Save Success")
if __name__ == "__main__":
main()
| [
"inyong1020@gmail.com"
] | inyong1020@gmail.com |
d1f909d566fa886cccb10ff084d7311a66db8b23 | d0cb6e6cf22696d852af807aeb529426035ebc91 | /subtledata/sd_collections_locations.py | 036abdee467494ea4c2b72666edcbc088033cdd4 | [
"MIT"
] | permissive | jakeharding/subtledata_python | 974417fbc7d93724ab8f6789c6d1be6b5312df62 | 7c24492dcc06b66aea3fd040c82152d2c3fdf719 | refs/heads/master | 2021-01-21T00:53:26.289601 | 2013-07-18T01:35:03 | 2013-07-18T01:35:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | __author__ = 'gsibble'
from base_types import SDFirstClassCollection
from sd_location import SDLocation
class SDLocationCollection(SDFirstClassCollection):
def __init__(self, parent):
"""
:param parent:
"""
super(SDLocationCollection, self).__init__(parent)
@property
def all(self):
#Get all locations via swagger
"""
:return:
"""
self._swagger_locations = self._swagger_locations_api.getAllLocations(self._api_key, use_cache=self._use_cache)
return [SDLocation(parent=self, location_id=location.location_id, fetch=False, initial_data=location) for location in self._swagger_locations]
def get(self, location_id, use_cache=True, include_menu=False):
"""
:param location_id:
:param use_cache:
:param include_menu:
:return:
"""
if not self._use_cache:
use_cache = False
return SDLocation(self, location_id, include_menu, use_cache)
def filter(self, name=None, postal_code=None):
"""
:param name:
:param postal_code:
:return:
"""
return []
def near(self, latitude, longitude, radius):
"""
:param latitude:
:param longitude:
:param radius:
:return:
"""
return []
def create(self):
pass
| [
"gsibble@gmail.com"
] | gsibble@gmail.com |
af8ba639185f3e1cad576566a26e97b93daee28c | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/datahub/storekit/druid.py | 73bd116ab7896d4819f9a8cc6250460549d55a6b | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 44,065 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import random
import threading
import time
import uuid
from datetime import datetime, timedelta
import requests
from common.http import get, post
from common.log import logger
from datahub.common.const import (
APPEND_FIELDS,
BAD_FIELDS,
BIGINT,
CHECK_DIFF,
CHECK_RESULT,
CLUSTER_NAME,
CONNECTION_INFO,
COUNT,
DATASOURCE,
DRUID,
EXPIRES,
FAILED,
FIELD_NAME,
FIELD_TYPE,
FIELDS,
HOST,
ID,
INFO,
INTERVAL,
JSON_HEADERS,
LOCATION,
LONG,
MESSAGE,
MINTIME,
NAME,
PENDING,
PERIOD,
PHYSICAL_TABLE_NAME,
PORT,
REPORT_TIME,
RESULT_TABLE_ID,
RT_FIELDS,
RUNNING,
SAMPLE,
SEGMENTS,
SIZE,
STATUS,
STORAGE_CLUSTER,
STORAGE_CONFIG,
STORAGES,
STRING,
SUCCESS,
TABLE,
TABLE_RECORD_NUMS,
TABLE_SIZE_MB,
TASK,
TASK_TYPE,
TIMESTAMP,
TYPE,
UNKNOWN,
VARCHAR,
VERSION,
WAITING,
ZOOKEEPER_CONNECT,
)
from datahub.storekit import model_manager
from datahub.storekit.exceptions import (
DruidCreateTaskErrorException,
DruidDeleteDataException,
DruidHttpRequestException,
DruidQueryDataSourceException,
DruidQueryExpiresException,
DruidQueryHistoricalException,
DruidQueryTaskErrorException,
DruidQueryWorkersException,
DruidShutDownTaskException,
DruidUpdateExpiresException,
DruidZkConfException,
DruidZKPathException,
NotSupportTaskTypeException,
)
from datahub.storekit.settings import (
CLEAN_DELTA_DAY,
COORDINATOR,
DEFAULT_DRUID_EXPIRES,
DEFAULT_EXPIRES_RULE,
DEFAULT_MAX_IDLE_TIME,
DEFAULT_SEGMENT_GRANULARITY,
DEFAULT_TASK_MEMORY,
DEFAULT_TIMESTAMP_COLUMN,
DEFAULT_WINDOW_PERIOD,
DRUID_CLEAN_DEEPSTORAGE_TASK_CONFIG_TEMPLATE,
DRUID_COMPACT_SEGMENTS_TASK_CONFIG_TEMPLATE,
DRUID_MAINTAIN_TIMEOUT,
DRUID_VERSION_V1,
DRUID_VERSION_V2,
ENDPOINT_DATASOURCE_RULE,
ENDPOINT_GET_ALL_DATASOURCES,
ENDPOINT_GET_DATASOURCES,
ENDPOINT_GET_PENDING_TASKS,
ENDPOINT_GET_RUNNING_TASKS,
ENDPOINT_GET_RUNNING_WORKERS,
ENDPOINT_HISTORICAL_SIZES,
ENDPOINT_PUSH_EVENTS,
ENDPOINT_RUN_TASK,
ENDPOINT_SHUTDOWN_TASK,
EXCEPT_FIELDS,
EXECUTE_TIMEOUT,
HTTP_REQUEST_TIMEOUT,
INT_MAX_VALUE,
MAINTAIN_DELTA_DAY,
MERGE_BYTES_LIMIT,
MERGE_DAYS_DEFAULT,
OVERLORD,
TASK_CONFIG_TEMPLATE,
TASK_TYPE_PENDING,
TASK_TYPE_RUNNING,
TIME_ZONE_DIFF,
UTC_BEGIN_TIME,
UTC_FORMAT,
ZK_DRUID_PATH,
)
from datahub.storekit.util import translate_expires_day
from django.template import Context, Template
from kazoo.client import KazooClient
def initialize(rt_info):
"""
初始化rt的druid存储
:param rt_info: rt的字段和配置信息
:return: 初始化操作结果
"""
return prepare(rt_info)
def info(rt_info):
"""
获取rt的druid存储相关信息
:param rt_info: rt的字段和配置信息
:return: rt的druid相关信息
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid[STORAGE_CLUSTER][VERSION])
# 获取维度和指标信息
broker_host, broker_port = conn_info[HOST], conn_info[PORT]
schema_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
schema_sql = (
'{"query": "SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE '
"TABLE_NAME = '%s'\"}" % physical_tn
)
ok, schema = post(schema_url, params=json.loads(schema_sql))
table_schema = {}
if ok and schema:
for e in schema:
table_schema[e["COLUMN_NAME"].lower()] = e["DATA_TYPE"].lower()
logger.info(f"physical_tn: {physical_tn}, schema_url: {schema_url}, schema: {table_schema}")
# 获取segments信息:curl -XGET http://{router_ip:port}/druid/coordinator/v1/datasources/{datasource}
segments_url = f"http://{coordinator}/druid/coordinator/v1/datasources/{physical_tn}"
ok, segments = get(segments_url)
logger.info(f"physical_tn: {physical_tn}, segments_url: {segments_url}, segments: {segments}")
# 获取样例数据
sample_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
sample_sql = '{"query": "SELECT * FROM \\"%s\\" ORDER BY __time DESC LIMIT 10"}' % physical_tn
ok, sample = post(sample_url, params=json.loads(sample_sql))
logger.info(f"physical_tn: {physical_tn}, sample_url: {sample_url}, sample_sql: {sample_sql}, sample: {sample}")
druid[INFO] = {TABLE: table_schema, SEGMENTS: segments, SAMPLE: sample}
return druid
def get_task_status(overlord, task_id, druid_version):
"""
获取指定task_id的任务状态
:param druid_version: druid集群的版本
:param overlord: overlord角色leader,形式ip:port
:param task_id: index task的id
:return: index task的状态
"""
# 获取segments信息:curl -XGET http://{router_ip:port}/druid/coordinator/v1/datasources/{datasource}
status_url = f"http://{overlord}/druid/indexer/v1/task/{task_id}/status"
# 5种状态:RUNNING, PENDING, WAITING, SUCCESS, FAILED
ok, status = get(status_url)
if not ok:
return UNKNOWN
logger.info(f"task_id: {task_id}, status_url: {status_url}, status: {status}")
runner_status = status[STATUS][STATUS]
if druid_version == DRUID_VERSION_V1:
return runner_status
else:
return runner_status if runner_status in [SUCCESS, FAILED] else status[STATUS]["runnerStatusCode"]
def shutdown_index_task(overlord, task_id):
"""
强制关闭指定task_id的任务状态,会导致丢peon数据, 谨慎使用
:param overlord: overlord角色,形式ip:port
:param task_id: index task的id
:return: index task的状态
"""
# 关闭任务:curl -XPOST http://{router_ip:port}/druid/overlord/v1/task/{task_id}/shutdown
shutdown_url = f"http://{overlord}/druid/indexer/v1/task/{task_id}/shutdown"
# 尽最大努力关闭druid index task, 重试3次
for i in range(3):
try:
resp = requests.post(shutdown_url, headers=JSON_HEADERS, timeout=HTTP_REQUEST_TIMEOUT)
if resp.status_code == 200:
break
except Exception:
logger.error(
f"{i} times, shutdown index task failed with task_id: {task_id}, shutdown_url: {shutdown_url}, "
f"resp.text: {resp.text}"
)
def merge_segments(zk_addr, datasource, begin_date, end_date, druid_version, timeout, merge_days):
"""
按照天级合并指定数据源的指定时间范围的segments
:param merge_days: 合并天数
:param zk_addr: zk连接信息
:param datasource: 合作操作的datasource
:param begin_date: 合并操作的开始日期
:param end_date: 合并操作的结束日期
:param druid_version: druid集群版本
:param timeout: merge任务执行超时时间,单位分钟
"""
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid_version)
# 检查是否需要Merge
if not should_merge(coordinator, datasource, begin_date, end_date, merge_days):
return
interval = f"{begin_date}/{end_date}"
overlord = _get_role_leader(zk_addr, OVERLORD, druid_version)
execute_task(DRUID_COMPACT_SEGMENTS_TASK_CONFIG_TEMPLATE, overlord, datasource, interval, druid_version, timeout)
def execute_task(task_template, overlord, datasource, interval, druid_version, timeout=60):
"""
:param task_template: task config模板
:param overlord: overlord leader进程 ip:port格式
:param datasource: druid datasource名称
:param interval: 时间区间
:param druid_version: druid集群版本
:param timeout: 任务执行超时时间,单位分钟
"""
data = Template(task_template)
context = Context({DATASOURCE: datasource, INTERVAL: interval})
body = data.render(context)
task_url = f"http://{overlord}/druid/indexer/v1/task"
ok, task = post(task_url, params=json.loads(body))
task_id = task["task"] if ok else ""
logger.info(
f"datasource: {datasource}, overlord: {overlord}, interval: {interval}, task config: {body}, task_id: {task_id}"
)
begin_time = datetime.now()
time_delta = timedelta(minutes=timeout)
while True:
time.sleep(10)
status = get_task_status(overlord, task_id, druid_version)
if status == RUNNING:
if datetime.now() - begin_time > time_delta:
shutdown_index_task(overlord, task_id)
logger.warning(f"datasource: {datasource}, task_id {task_id} timeout, has been shutdown")
return
elif status in [PENDING, WAITING]:
shutdown_index_task(overlord, task_id)
return
else:
return
def clean_unused_segments(cluster_name, druid_version, timeout=60):
"""
清理的单个集群的
:param cluster_name: 集群名
:param druid_version: druid集群版本
:param timeout: clean任务执行超时时间,单位分钟
:return:
"""
coordinator = get_leader(cluster_name, COORDINATOR)
ok, datasources_all = get(f"http://{coordinator}{ENDPOINT_GET_ALL_DATASOURCES}")
if not ok or not datasources_all:
return False
ok, datasources_used = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not ok:
return False
logger.info(f"datasources_all: {datasources_all}, datasources_used: {datasources_used}")
for datasource in datasources_all:
try:
begin_date, end_date = "1000-01-01", "3000-01-01"
if datasource in datasources_used:
coordinator = get_leader(cluster_name, COORDINATOR)
ok, resp = get(f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/")
if not ok:
continue
end_date = (
datetime.strptime(resp[SEGMENTS][MINTIME], "%Y-%m-%dT%H:%M:%S.000Z") - timedelta(CLEAN_DELTA_DAY)
).strftime("%Y-%m-%d")
interval = f"{begin_date}/{end_date}"
overlord = get_leader(cluster_name, OVERLORD)
logger.info(f"datasource: {datasource}, overlord: {overlord}, interval: {interval}")
execute_task(
DRUID_CLEAN_DEEPSTORAGE_TASK_CONFIG_TEMPLATE, overlord, datasource, interval, druid_version, timeout
)
except Exception:
logger.warning(f"clean unused segments failed for datasource {datasource}", exc_info=True)
return True
def should_merge(coordinator, datasource, begin_date, end_date, merge_days=MERGE_DAYS_DEFAULT):
"""
判断指定数据源的指定时间范围的segments是否需要合并,interval是一天, 下列条件下不需要merge,
1) 平均segment size大于300MB
2) 平均每天的segment文件数量小于2
:param merge_days: 合并天数
:param coordinator: coordinator角色leader节点
:param datasource: druid数据源名称
:param begin_date: merge时间区间的左边界
:param end_date: merge时间区间的右边界
:return:
"""
segments_url = (
f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/intervals/"
f"{begin_date}_{end_date}?simple"
)
ok, segments = get(segments_url)
# segments是按天合并的,预期合并后每天至多一个segment
if not ok or len(segments) <= merge_days:
return False
size = 0
file_count = 0
for value in segments.values():
size += value[SIZE]
file_count += value[COUNT]
logger.info(
f"datasource: {datasource}, segments_url: {segments_url}, segments: {segments}, size: {size}, "
f"file_count: {file_count}, status: True"
)
if file_count <= 1 or size > MERGE_BYTES_LIMIT:
return False
return True
def alter(rt_info):
"""
修改rt的druid存储相关信息
:param rt_info: rt的字段和配置信息
:return: rt的druid存储的变更结果
"""
return prepare(rt_info)
def prepare(rt_info):
"""
准备rt关联的druid存储(创建新库表或旧表新增字段)
:param rt_info: rt的配置信息
:return: True/False
"""
return True
def maintain_merge_segments(zk_addr, physical_tn, expires_day, delta_day, druid_version, timeout, merge_days):
"""
用于在maintain和maintain_all中执行的merge segment逻辑
:param zk_addr: zk连接信息
:param physical_tn: 物理表名
:param expires_day: 数据保留天数
:param delta_day: 跳过的天数
:param druid_version : druid 集群版本
:param timeout : druid 任务的执行超时时间
"""
expires_date = (datetime.today() - timedelta(expires_day)).strftime("%Y-%m-%d")
end_date = (datetime.today() - timedelta(delta_day)).strftime("%Y-%m-%d")
begin_date = (datetime.today() - timedelta(delta_day + merge_days)).strftime("%Y-%m-%d")
logger.info(
f"physical_tn: {physical_tn}, expires_day: {expires_day}, begin_date: {begin_date}, end_date: {end_date}"
)
if end_date >= expires_date:
merge_segments(zk_addr, physical_tn, begin_date, end_date, druid_version, timeout, merge_days)
def set_retain_rule(coordinator, cluster_name, physical_tn, expires_day, druid_version):
"""
设置druid datasource的数据保留规则
:param coordinator: coordinator角色leader, 格式hostname:port
:param cluster_name: 集群名称
:param physical_tn: 物理表名
:param expires_day: 数据保留天数
:param druid_version: druid集群版本
:return: 数据保留规则是否设置成功,True or False
"""
rules = build_retain_rule(druid_version, expires_day)
url = f"http://{coordinator}/druid/coordinator/v1/rules/{physical_tn}"
resp = requests.post(url, data=rules, headers=JSON_HEADERS)
if resp.status_code != 200:
logger.warning(
f"{cluster_name}: failed to set retention rule for datasource {physical_tn}. "
f"status_code: {resp.status_code}, response: {resp.text}"
)
return False
return True
def build_retain_rule(druid_version, expires_day):
"""
构建数据保留规则
:param expires_day: 数据保留天数
:param druid_version: druid集群版本
:return: json字符串
"""
load_rule = {
PERIOD: f"P{expires_day}D",
"includeFuture": True,
"tieredReplicants": {"_default_tier": 2},
TYPE: "loadByPeriod",
}
if druid_version == DRUID_VERSION_V1:
load_rule["tieredReplicants"]["tier_hot"] = 2
rules = [load_rule, {"type": "dropForever"}]
return json.dumps(rules)
def kill_waiting_tasks(cluster_name):
"""
kill druid集群的所有waiting状态的任务
:param cluster_name: 集群名
"""
try:
overlord = get_leader(cluster_name, OVERLORD)
waiting_tasks_url = "http://" + overlord + "/druid/indexer/v1/waitingTasks"
res = requests.get(waiting_tasks_url, verify=False, timeout=HTTP_REQUEST_TIMEOUT)
pending_tasks = json.loads(res.text, encoding="utf-8")
for task_json in pending_tasks:
kill_task_url = "http://" + overlord + "/druid/indexer/v1/task/" + task_json[ID] + "/shutdown"
headers = JSON_HEADERS
requests.post(kill_task_url, headers=headers, verify=False)
except Exception:
logger.warning("failed to kill waiting tasks", exc_info=True)
def kill_pending_tasks(cluster_name):
"""
kill druid集群的所有pending状态的任务
:param cluster_name: 集群名
"""
try:
overlord = get_leader(cluster_name, OVERLORD)
pending_tasks_url = "http://" + overlord + "/druid/indexer/v1/pendingTasks"
res = requests.get(pending_tasks_url, verify=False, timeout=HTTP_REQUEST_TIMEOUT)
pending_tasks = json.loads(res.text, encoding="utf-8")
for task_json in pending_tasks:
kill_task_url = "http://" + overlord + "/druid/indexer/v1/task/" + task_json[ID] + "/shutdown"
headers = JSON_HEADERS
requests.post(kill_task_url, headers=headers, verify=False)
except Exception:
logger.warning("failed to kill pending tasks", exc_info=True)
def maintain(rt_info, delta_day=MAINTAIN_DELTA_DAY, timeout=EXECUTE_TIMEOUT, merge_days=MERGE_DAYS_DEFAULT):
"""
根据用户设定的数据保留时间维护druid表数据保留规则
:param merge_days: 合并天数
:param rt_info: rt的配置信息
:param delta_day: merge segments的日期偏移量
:param timeout: druid index任务的执行超时时间
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
cluster_name, version = druid[STORAGE_CLUSTER][CLUSTER_NAME], druid[STORAGE_CLUSTER][VERSION]
coordinator = get_leader(cluster_name, COORDINATOR)
expires_day = translate_expires_day(druid[EXPIRES])
# 设置数据保留规则
set_retain_rule(coordinator, cluster_name, physical_tn, expires_day, version)
# merge segments
zk_addr = conn_info[ZOOKEEPER_CONNECT]
maintain_merge_segments(zk_addr, physical_tn, expires_day, delta_day, version, timeout, merge_days)
return True
def maintain_all(delta_day=MAINTAIN_DELTA_DAY):
"""
根据用户设定的数据保留时间维护druid表数据保留规则
"""
start = time.time()
# rt维度的mantain, 主要是设置数据保存时间
storage_rt_list = model_manager.get_storage_rt_objs_by_type(DRUID)
for rt_storage in storage_rt_list:
try:
conn_info = json.loads(rt_storage.storage_cluster_config.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, rt_storage.storage_cluster_config.version)
expires_day = translate_expires_day(rt_storage.expires)
physical_tn = rt_storage.physical_table_name
cluster_name = rt_storage.storage_cluster_config.cluster_name
# 设置数据保留规则
set_retain_rule(
coordinator, cluster_name, physical_tn, expires_day, rt_storage.storage_cluster_config.version
)
except Exception:
logger.warning(
f"{rt_storage.storage_cluster_config.cluster_name}: failed to maintain the retention rule of "
f"datasource {rt_storage.physical_table_name}",
exc_info=True,
)
set_rule_finish = time.time()
# 集群维度的maintain, 功能是清理deepstorage和compact segments
cluster_list = model_manager.get_storage_cluster_configs_by_type(DRUID)
check_threads = []
for cluster in cluster_list:
cluster_name = cluster[CLUSTER_NAME]
thread = threading.Thread(target=maintain_druid_cluster, name=cluster_name, args=(cluster_name,))
# 设置线程为守护线程,主线程结束后,结束子线程
thread.setDaemon(True)
check_threads.append(thread)
thread.start()
# join所有线程,等待所有集群检查都执行完毕
# 设置超时时间,防止集群出现问题,一直阻塞,导致后续集群维护任务等待
for th in check_threads:
th.join(timeout=DRUID_MAINTAIN_TIMEOUT)
end = time.time()
logger.info(
f"druid maintain_all total time: {end - start}(s), set rule take {set_rule_finish - start}(s), "
f"cluster maintain takes {end - set_rule_finish}(s)"
)
return True
def maintain_druid_cluster(cluster_name):
"""
对单个集群串行maintain其rt, 清理rt在deepstorage上的无用数据和合并小segment
:param cluster_name: 集群名称
"""
cluster = model_manager.get_storage_cluster_config(cluster_name, DRUID)
version = cluster[VERSION]
clean_unused_segments(cluster_name, version, EXECUTE_TIMEOUT)
# 对于0.11 druid版,无法执行compact操作
if version == DRUID_VERSION_V2:
segments_compaction(cluster_name, MAINTAIN_DELTA_DAY, MERGE_DAYS_DEFAULT, EXECUTE_TIMEOUT)
logger.info(
"{cluster_name}: maintain_druid_cluster total time: {end - start}(s), clean_unused_segments task "
"{clean_finish - start}(s), compaction takes {end - clean_finish}(s)"
)
def check_schema(rt_info):
"""
校验RT的字段(名字、类型)的修改是否满足存储的限制
:param rt_info: rt的配置信息
:return: rt字段和存储字段的schema对比
"""
result = {RT_FIELDS: {}, "druid_fields": {}, CHECK_RESULT: True, CHECK_DIFF: {}}
for field in rt_info[FIELDS]:
if field[FIELD_NAME].lower() in EXCEPT_FIELDS:
continue
result[RT_FIELDS][field[FIELD_NAME]] = field[FIELD_TYPE]
_, physical_tn, conn_info = _get_druid_storage_info(rt_info)
broker_host, broker_port = conn_info[HOST], conn_info[PORT]
druid_schema_url = f"http://{broker_host}:{broker_port}/druid/v2/sql/"
druid_schema_sql = (
'{"query": "SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS '
"WHERE TABLE_NAME = '%s'\"}" % physical_tn
)
ok, druid_schema = post(druid_schema_url, params=json.loads(druid_schema_sql))
if not ok or not druid_schema:
return result
logger.info(f"physical_tn: {physical_tn}, druid_schema_url: {druid_schema_url}, druid_schema: {druid_schema}")
for e in druid_schema:
result["druid_fields"][e["COLUMN_NAME"].lower()] = e["DATA_TYPE"].lower()
append_fields, bad_fields = check_rt_druid_fields(result[RT_FIELDS], result["druid_fields"])
result[CHECK_DIFF] = {APPEND_FIELDS: append_fields, BAD_FIELDS: bad_fields}
if bad_fields:
result[CHECK_RESULT] = False
logger.info(f"diff result: {result}")
return result
def check_rt_druid_fields(rt_table_columns, druid_columns):
"""
对比rt的字段,和druid物理表字段的区别
:param rt_table_columns: rt的字段转换为druid中字段后的字段信息
:param druid_columns: druid物理表字段
:return: (append_fields, bad_fields),需变更增加的字段 和 有类型修改的字段
"""
append_fields, bad_fields = [], []
for key, value in rt_table_columns.items():
col_name, col_type = key.lower(), value.lower()
if druid_columns[col_name]:
# 再对比类型
druid_col_type = druid_columns[col_name]
ok = (
(col_type == druid_col_type)
or (col_type == STRING and druid_col_type == VARCHAR)
or (col_type == LONG and druid_col_type == BIGINT)
)
if not ok:
bad_fields.append({col_name: f"difference between rt and druid({col_type} != {druid_col_type})"})
else:
append_fields.append({FIELD_NAME: col_name, FIELD_TYPE: col_type})
return append_fields, bad_fields
def clusters():
"""
获取druid存储集群列表
:return: druid存储集群列表
"""
result = model_manager.get_storage_cluster_configs_by_type(DRUID)
return result
def create_task(rt_info):
"""
创建任务
:param rt_info: rt的配置信息
:return: 创建task
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info.get(ZOOKEEPER_CONNECT)
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
task_config = _get_task_config(rt_info)
url = f"http://{overlord}{ENDPOINT_RUN_TASK}"
result, resp = post(url=url, params=json.loads(task_config))
if not result or not resp[TASK]:
logger.error(f"create task error, url: {url}, param: {task_config}, result: {resp}")
raise DruidCreateTaskErrorException(message_kv={RESULT_TABLE_ID: rt_info[RESULT_TABLE_ID]})
# 获取正在执行的该任务地址
task_id = resp[TASK]
# 轮询结果
return _get_task_location(overlord, task_id)
def _get_task_location(overlord, task_id, max_times=3):
"""
:param overlord: overlord 节点
:param task_id: 任务id
:param max_times: 最大超时时间
:return: 任务地址
"""
if max_times < 0:
return ""
running_tasks = _get_tasks(overlord, TASK_TYPE_RUNNING)
for task in running_tasks:
if task[ID] == task_id:
task_location = f"http://{task[LOCATION][HOST]}:{task[LOCATION][PORT]}{ENDPOINT_PUSH_EVENTS}"
return task_location
time.sleep(5)
max_times = max_times - 1
return _get_task_location(overlord, task_id, max_times)
def shutdown_task(rt_info):
"""
:param rt_info: 结果表信息
:return: 停止成功或者失败
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
return _shutdown_task_with_retry(overlord, physical_tn)
def _shutdown_task_with_retry(overlord, data_source, max_times=3):
"""
停止任务
:param overlord: overlord 节点
:param data_source: 数据源
:param max_times: 最大次数
:return: 停止task
"""
if max_times < 0:
raise DruidShutDownTaskException(message_kv={MESSAGE: "shut down overtime"})
running_tasks = _get_tasks(overlord, TASK_TYPE_RUNNING)
pending_tasks = _get_tasks(overlord, TASK_TYPE_PENDING)
tasks = running_tasks + pending_tasks
counter = 0
for task in tasks:
if task[ID].find(data_source) > 0:
peon_url = f"http://{task[LOCATION][HOST]}:{task[LOCATION][PORT]}{ENDPOINT_SHUTDOWN_TASK}"
resp = requests.post(peon_url)
logger.info(f"shutdown task info, url: {peon_url}, result: {resp.content}")
if resp.status_code != 200:
logger.error(f"shutdown task exception, {resp}")
raise DruidShutDownTaskException(message_kv={MESSAGE: resp})
logger.info(f"shutdown task success, peon_url: {peon_url}, task_id: {task[ID]}")
else:
counter = counter + 1
if counter == len(tasks):
return True
time.sleep(5)
max_times = max_times - 1
return _shutdown_task_with_retry(overlord, data_source, max_times)
def _get_druid_storage_info(rt_info):
"""
获取存储基本信息
:param rt_info: rt的信息
:return: druid, physical_tn, conn_info
"""
druid = rt_info[STORAGES][DRUID]
physical_tn = druid[PHYSICAL_TABLE_NAME]
conn_info = json.loads(druid[STORAGE_CLUSTER][CONNECTION_INFO])
return (
druid,
physical_tn,
conn_info,
)
def _get_role_leader(zk_addr, zk_node, druid_version):
"""
:param zk_addr: zk连接信息
:param zk_node: zk节点类型
:param druid_version: Druid版本
:return: 获取leader
"""
path = f"{ZK_DRUID_PATH}/{zk_node.lower() if druid_version == DRUID_VERSION_V1 else zk_node.upper()}"
zk = KazooClient(hosts=zk_addr, read_only=True)
zk.start()
result = zk.get_children(path)
zk.stop()
if not result or len(result) == 0:
logger.error(f"not found any zk path {path}, or this path is empty")
raise DruidZkConfException()
role = random.sample(result, 1)[0]
if zk_node in ["overlord", "OVERLORD"]:
leader_url = f"http://{role}/druid/indexer/v1/leader"
elif zk_node in ["coordinator", "COORDINATOR"]:
leader_url = f"http://{role}/druid/coordinator/v1/leader"
else:
logger.error(f"the zk path {path} is not for overlord or coordinator, please input a correct path")
raise DruidZKPathException()
resp = requests.get(leader_url, timeout=HTTP_REQUEST_TIMEOUT)
if resp.status_code != 200:
logger.error(f"failed to get leader from url: {leader_url}")
raise DruidHttpRequestException()
leader = resp.text.strip("http://")
return leader
def _get_task_config(rt_info):
"""
:param rt_info: 结果表信息
:return: 获取Druid 任务配置
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
task_config_dict = {
"availability_group": f"availability-group-{str(uuid.uuid4())[0:8]}",
"required_capacity": DEFAULT_TASK_MEMORY,
"data_source": physical_tn,
"metrics_spec": _get_dimensions_and_metrics(rt_info)["metrics_fields"],
"segment_granularity": DEFAULT_SEGMENT_GRANULARITY,
"timestamp_column": DEFAULT_TIMESTAMP_COLUMN,
"dimensions_spec": _get_dimensions_and_metrics(rt_info)["dimensions_fields"],
"dimension_exclusions": [],
"max_idle_time": DEFAULT_MAX_IDLE_TIME,
"window_period": DEFAULT_WINDOW_PERIOD,
"partition_num": random.randint(1, INT_MAX_VALUE),
"context": {
"druid.indexer.fork.property.druid.processing.buffer.sizeBytes": DEFAULT_TASK_MEMORY * 1024 * 1024 / 11,
"druid.indexer.runner.javaOpts": "-Xmx%dM -XX:MaxDirectMemorySize=%dM"
% (DEFAULT_TASK_MEMORY * 6 / 11 + 1, DEFAULT_TASK_MEMORY * 5 / 11 + 1),
},
}
task_config = TASK_CONFIG_TEMPLATE.format(**task_config_dict).replace("'", '"')
return task_config
def _get_dimensions_and_metrics(rt_info):
"""
:param rt_info: 结果表信息
:return: 返回纬度和度量字段
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
storage_config = json.loads(druid.get(STORAGE_CONFIG, "{}"))
dimensions_fields = storage_config.get("dimensions_fields", [])
metrics_fields = storage_config.get("metrics_fields", [])
default_dimensions = [{NAME: str(field[FIELD_NAME]), TYPE: str(field[FIELD_TYPE])} for field in rt_info[FIELDS]]
default_metrics = [{TYPE: "count", NAME: "__druid_reserved_count", "fieldName": ""}]
dimensions_fields = dimensions_fields if dimensions_fields else default_dimensions
metrics_fields = metrics_fields if metrics_fields else default_metrics
return {"dimensions_fields": dimensions_fields, "metrics_fields": metrics_fields}
def _get_tasks(overlord_conn_info, task_type):
"""
:param overlord_conn_info: overlord连接信息
:param task_type: 任务类型
:return: 该任务类型结果集
"""
if task_type not in [TASK_TYPE_RUNNING, TASK_TYPE_PENDING]:
raise NotSupportTaskTypeException(message_kv={TASK_TYPE, task_type})
if task_type == TASK_TYPE_RUNNING:
result, resp = get(f"http://{overlord_conn_info}{ENDPOINT_GET_RUNNING_TASKS}")
else:
result, resp = get(f"http://{overlord_conn_info}{ENDPOINT_GET_PENDING_TASKS}")
if not result:
raise DruidQueryTaskErrorException()
return resp
def get_roles(cluster_name):
"""
:param cluster_name: 集群名称
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
zk = KazooClient(hosts=zk_addr, read_only=True)
zk.start()
result = zk.get_children(ZK_DRUID_PATH)
if not result or len(result) == 0:
logger.error("Failed to get overload node")
zk.stop()
raise DruidZkConfException()
data = dict()
for role in result:
data[role] = zk.get_children(f"{ZK_DRUID_PATH}/{role}")
zk.stop()
return data
def get_datasources(cluster_name):
"""
:param cluster_name: 集群名称
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, cluster.version)
result, resp = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not result:
raise DruidQueryDataSourceException(message_kv={MESSAGE: resp})
return resp
def get_workers(cluster_name):
"""
:param cluster_name: 集群名称
:return: workers信息
"""
overlord = get_leader(cluster_name, OVERLORD)
result, resp = get(f"http://{overlord}{ENDPOINT_GET_RUNNING_WORKERS}")
if not result:
raise DruidQueryWorkersException(message_kv={MESSAGE: resp})
return resp
def get_historical(cluster_name):
"""
:param cluster_name: 集群名称
:return: historical容量
"""
coordinator = get_leader(cluster_name, COORDINATOR)
result, resp = get(f"http://{coordinator}{ENDPOINT_HISTORICAL_SIZES}")
if not result:
raise DruidQueryHistoricalException(message_kv={MESSAGE: resp})
return resp
def get_cluster_capacity(cluster_name):
"""
:param cluster_name: 集群名称
:return: 容量信息
"""
cluster_capacity = {
"slot_capacity": 0,
"slot_capacity_used": 0,
"slot_usage": 0,
"used_size": 0,
"max_size": 0,
"storage_usage": 0,
"segments_count": 0,
"timestamp": time.time(),
}
try:
# 获取druid槽位信息
worker_info = get_workers(cluster_name)
if worker_info:
for worker in worker_info:
cluster_capacity["slot_capacity"] = cluster_capacity["slot_capacity"] + worker["worker"]["capacity"]
cluster_capacity["slot_capacity_used"] = (
cluster_capacity["slot_capacity_used"] + worker["currCapacityUsed"]
)
# 获取historical 容量信息
historical_info = get_historical(cluster_name)
if historical_info:
for historical in historical_info:
if historical[TYPE] == "historical":
cluster_capacity["used_size"] = cluster_capacity["used_size"] + historical["currSize"]
cluster_capacity["max_size"] = cluster_capacity["max_size"] + historical["maxSize"]
# 获取segments总数
coordinator = get_leader(cluster_name, COORDINATOR)
datasource_list_url = f"http://{coordinator}/druid/coordinator/v1/datasources/"
ok, datasource_list = get(datasource_list_url)
segments_sum = 0
for physical_tn in datasource_list:
segments_url = f"http://{coordinator}/druid/coordinator/v1/datasources/{physical_tn}"
ok, datasource_meta = get(segments_url)
segments_sum += datasource_meta[SEGMENTS][COUNT]
cluster_capacity["segments_count"] = segments_sum
cluster_capacity["slot_usage"] = (
int(100 * cluster_capacity["slot_capacity_used"] / cluster_capacity["slot_capacity"])
if cluster_capacity["slot_capacity"] > 0
else 0
)
cluster_capacity["storage_usage"] = (
int(100 * cluster_capacity["used_size"] / cluster_capacity["max_size"])
if cluster_capacity["max_size"] > 0
else 0
)
cluster_capacity[TIMESTAMP] = time.time()
except Exception:
logger.warning("failed to execute function druid.get_cluster_capacity", exc_info=True)
return cluster_capacity
def get_table_capacity(conn_info):
"""
读取druid集群容量数据
:param conn_info: 集群链接信息
:return:
"""
url = f"http://{conn_info[HOST]}:{conn_info[PORT]}/druid/v2/sql/"
sql = (
'{"query": "SELECT datasource, sum(size * num_replicas)/1000000 as total_size, sum(num_rows) as total_nums '
'FROM sys.segments WHERE is_available = 1 GROUP BY datasource"} '
)
rt_size = {}
try:
ok, table_capacity_list = post(url, params=json.loads(sql))
if not ok or not table_capacity_list:
return rt_size
for table_capacity in table_capacity_list:
rt_size[table_capacity[DATASOURCE]] = {
TABLE_SIZE_MB: table_capacity["total_size"],
TABLE_RECORD_NUMS: table_capacity["total_nums"],
REPORT_TIME: time.time(),
}
except Exception:
logger.warning("failed to execute function druid.get_table_capacity", exc_info=True)
return rt_size
def get_leader(cluster_name, role_type):
"""
:param cluster_name: 集群名称
:param role_type: 角色类型
:return: overlord or coordinator
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
return _get_role_leader(zk_addr, role_type, cluster.version)
def get_tasks(cluster_name, task_type):
"""
:param cluster_name: 集群名称
:param task_type: 任务类型
:return:
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, DRUID)
conn_info = json.loads(cluster.connection_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
overlord = _get_role_leader(zk_addr, OVERLORD, cluster.version)
if task_type != TASK_TYPE_RUNNING and task_type != TASK_TYPE_PENDING:
raise NotSupportTaskTypeException(message_kv={TASK_TYPE: task_type})
elif task_type == TASK_TYPE_RUNNING:
result, resp = get(f"http://{overlord}{ENDPOINT_GET_RUNNING_TASKS}")
else:
result, resp = get(f"http://{overlord}{ENDPOINT_GET_PENDING_TASKS}")
if not result:
raise DruidQueryTaskErrorException()
return resp
def update_expires(rt_info, expires):
"""
更新datasource的数据过期规则
:param rt_info: 结果表
:param expires: 过期时间
:return:
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
expires = druid.get(EXPIRES, DEFAULT_DRUID_EXPIRES) if not expires else expires
zk_addr = conn_info[ZOOKEEPER_CONNECT]
coordinator = _get_role_leader(zk_addr, COORDINATOR, druid[STORAGE_CLUSTER][VERSION])
rule_path = f"{ENDPOINT_DATASOURCE_RULE}/{physical_tn}"
rule_url = f"http://{coordinator}{rule_path}"
result, resp = get(rule_url)
if not result:
raise DruidQueryExpiresException(message_kv={MESSAGE: f"{physical_tn}获取数据过期时间异常"})
rule = resp
if not rule or len(rule) == 0:
# 没有查询到过期规则,取默认的数据过期规则
rule = DEFAULT_EXPIRES_RULE
# 2 更新data_source中的数据过期时间
rule[0]["period"] = f"P{expires.upper()}"
resp = requests.post(rule_url, json=rule)
if resp.status_code != 200:
raise DruidUpdateExpiresException(message_kv={MESSAGE: f"{physical_tn}更新数据过期时间异常"})
return True
def delete(rt_info, expires):
"""
删除数据
:param rt_info: 结果表
:param expires: 过期时间
:return:
"""
druid, physical_tn, conn_info = _get_druid_storage_info(rt_info)
zk_addr = conn_info[ZOOKEEPER_CONNECT]
expires = druid.get(EXPIRES, "360d") if not expires else expires
overlord = _get_role_leader(zk_addr, OVERLORD, druid[STORAGE_CLUSTER][VERSION])
expires = translate_expires_day(expires)
kill_interval = _get_kill_interval(expires)
task_id = f'kill_{rt_info[RESULT_TABLE_ID]}_{kill_interval.replace("/", "_")}_{str(uuid.uuid4())[0:8]}'
data = {TYPE: "kill", ID: task_id, "dataSource": physical_tn, INTERVAL: kill_interval}
url = f"http://{overlord}{ENDPOINT_RUN_TASK}"
logger.info(f"start delete data, url:{url}, params: {json.dumps(data)}")
result, resp = post(url, data)
if not result:
raise DruidDeleteDataException(message_kv={MESSAGE: resp})
return _check_delete_result(overlord, rt_info[RESULT_TABLE_ID], task_id)
def _get_kill_interval(expires):
"""
获取kill的时间间隔
:param expires: 过期时间
:return:
"""
date_diff = (datetime.today() + timedelta(-expires + 1)).strftime("%Y-%m-%dT00:00:00.000Z")
time_utc = datetime.strptime(date_diff, UTC_FORMAT) - timedelta(hours=TIME_ZONE_DIFF)
return f"{UTC_BEGIN_TIME}/{time_utc.strftime(UTC_FORMAT)}"
def _check_delete_result(overlord, result_table_id, task_id, max_times=60):
"""
:param overlord: overload节点
:param result_table_id: 结果表id
:param task_id: 任务id
:param max_times: 超时次数
:return:
"""
if max_times < 0:
logger.error(f"deleting expired data failed, rt: {result_table_id}, task_id: {task_id}")
raise DruidDeleteDataException(message_kv={MESSAGE: "删除过期数据失败, 超过最大重试次数"})
time.sleep(5)
result, resp = get(f"http://{overlord}{ENDPOINT_RUN_TASK}/{task_id}/status")
if not result:
raise DruidDeleteDataException(message_kv={MESSAGE: "检查任务运行状态异常"})
result = resp
if result.get(STATUS, {}).get(STATUS, "") == SUCCESS:
return True
else:
max_times = max_times - 1
logger.info(f"Enter the next poll, max_times: {max_times}, current result: {result}")
return _check_delete_result(overlord, result_table_id, task_id, max_times)
def segments_compaction(cluster_name, delta_day, merge_days, timeout):
"""
segments合并
:param cluster_name: druid集群名
:param delta_day: 合并跳过的天数
:param merge_days: 合并的天数
:param timeout: 合并操作的超时时间
:return:
"""
cluster = model_manager.get_storage_cluster_config(cluster_name, DRUID)
zk_addr = json.loads(cluster[CONNECTION_INFO])[ZOOKEEPER_CONNECT]
version = cluster[VERSION]
coordinator = _get_role_leader(zk_addr, COORDINATOR, version)
ok, datasources_used = get(f"http://{coordinator}{ENDPOINT_GET_DATASOURCES}")
if not ok:
return False
for datasource in datasources_used:
try:
coordinator = _get_role_leader(zk_addr, COORDINATOR, version)
ok, resp = get(f"http://{coordinator}/druid/coordinator/v1/datasources/{datasource}/")
if not ok:
continue
last_day = datetime.strptime(resp[SEGMENTS][MINTIME], "%Y-%m-%dT%H:%M:%S.000Z").strftime("%Y-%m-%d")
end_date = (datetime.today() - timedelta(delta_day)).strftime("%Y-%m-%d")
begin_date = (datetime.today() - timedelta(delta_day + merge_days)).strftime("%Y-%m-%d")
if end_date <= last_day:
continue
begin_date = last_day if last_day > begin_date else begin_date
merge_segments(zk_addr, datasource, begin_date, end_date, version, timeout, merge_days)
except Exception:
logger.warning(f"segments compaction failed for datasource {datasource}", exc_info=True)
return True
| [
"terrencehan@tencent.com"
] | terrencehan@tencent.com |
70e094a669dbbd62c32f85af26ee429b6dc31670 | 75f551e4070d15ba49ace9d08a8e117edb5df74d | /python-implementations/test_2.py | 019eb40257d8b3d1a306da5f352d614604273402 | [] | no_license | aboyd52501/hermes | 4c853c0bf21062f52da28e326f85599992b5fcc9 | 9cd2d862123d4e609b64485d8b80f0fb31704306 | refs/heads/main | 2023-03-16T18:48:26.441838 | 2021-03-03T22:28:18 | 2021-03-03T22:28:18 | 343,240,663 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,944 | py | from threading import Thread, Event
from sys import argv
import socket
class Map(dict):
def __setitem__(self, key, value):
if key in self:
del self[key]
if value in self:
del self[value]
dict.__setitem__(self, key, value)
dict.__setitem__(self, value, key)
def __delitem__(self, key):
dict.__delitem__(self, self[key])
dict.__delitem__(self, key)
def __len__(self):
return super().__len__(self) // 2
DATATYPES = Map()
DATATYPES["Plaintext"] = 0
DATATYPES["Ciphertext"] = 1
DATATYPES["RSA Key Request"] = 2
DATATYPES["RSA Key Response"] = 3
LENGTH_HEADER_SIZE = 4
DATATYPE_HEADER_SIZE = 1
# msg is a 2-tuple of (datatype, data)
def send_message(socket, msg):
datatype = msg[0]
data = msg[1]
length_header = len(data).to_bytes(LENGTH_HEADER_SIZE, 'little')
datatype_header = datatype.to_bytes(DATATYPE_HEADER_SIZE, 'little')
data_out = length_header + datatype_header + data
socket.sendall(data_out)
def recv_message(socket):
length_header = socket.recv(LENGTH_HEADER_SIZE)
datatype_header = socket.recv(DATATYPE_HEADER_SIZE)
datatype = int.from_bytes(datatype_header, 'little')
data_length = int.from_bytes(length_header, 'little')
bytes_received = 0
data_in = bytes()
while bytes_received < data_length:
bytes_to_read = min(4096, data_length-bytes_received)
bytes_received += bytes_to_read
data_in = data_in + socket.recv(bytes_to_read)
return (datatype, data_in)
class Server:
def __init__(self):
self.connections = []
self.running = True
def attach(self, address, port):
self.socket = socket.socket()
self.socket.bind((address, port))
self.socket.listen()
# listen for incoming connections
while self.running:
# this line is blocking
connection_socket, connection_address = self.socket.accept()
# once a new connection comes in, delegate it to a handler thread
new_connection_thread = Thread(
target=self.handle_incoming_connection,
args=(connection_socket, connection_address))
new_connection_thread.daemon = False
new_connection_thread.start()
def handle_incoming_connection(self, connection, address):
try:
print(f"Connected {address}")
while self.running:
data_in_type, data_in = recv_message(connection)
print(f"\nAddress: {address}\nType: {DATATYPES[data_in_type]}\nContent: {data_in.decode('utf-8')}")
data_out = data_in[::-1]
data_out_type = data_in_type
send_message(connection, (data_out_type, data_out))
except Exception as e:
print(e)
finally:
print(f"Disconnected {address}")
def close(self):
self.running = False
self.socket.close()
class Client:
def __init__(self):
self.inbox = []
self.outbox = []
self.can_send = Event()
self.can_recv = Event()
self.attached = False
self.running = True
def attach(self, target_address, target_port):
self.socket = socket.socket()
self.socket.connect((target_address, target_port))
self.attached = True
self.input_thread = Thread(target=self.listen_to_server, args=())
self.input_thread.daemon = True
self.input_thread.start()
self.output_thread = Thread(target=self.listen_to_input)
self.output_thread.daemon = True
self.output_thread.start()
def listen_to_server(self):
try:
while True:
new_message = recv_message(self.socket)
self.inbox.append(new_message)
self.can_recv.set()
except Exception as e:
print(f"Exception {e} occurred.\nShutting down socket.")
self.close()
def listen_to_input(self):
try:
while True:
self.can_send.wait()
for msg in self.outbox:
send_message(self.socket, msg)
self.outbox = []
self.can_send.clear()
except Exception as e:
print(f"Exception {e} occurred.\nShutting down socket.")
self.close()
# non-blocking
def queue_message(self, msg):
self.outbox.append(msg)
self.can_send.set()
def get_output(self):
self.can_recv.wait()
out = self.inbox
# clear the inbox and reset the can_receive Event
self.inbox = []
self.can_recv.clear()
return out
def close(self):
self.running = False
self.can_send.set()
self.can_recv.set()
self.input_thread.join()
print("Input thread joined")
self.output_thread.join()
print("Output thread joined")
self.socket.close()
class ConsoleIO:
def __init__(self, address, port):
self.client = Client()
self.client.attach(address, port)
try:
while self.client.running:
text_out = input(">>> ")
data_out = text_out.encode('utf-8')
self.client.queue_message((DATATYPES["Plaintext"], data_out))
msg_in = self.client.get_output()
for msg in msg_in:
print(msg)
except KeyboardInterrupt:
print("\nExiting, goodbye!")
except Exception as e:
print(e)
finally:
self.client.close()
if __name__ == "__main__":
if argv[1] == "client":
c = ConsoleIO(argv[2], int(argv[3]))
elif argv[1] == "server":
s = Server()
s.attach('', int(argv[2])) | [
"aboyd52501@gmail.com"
] | aboyd52501@gmail.com |
7a2c9eb7044540d777bca9c0f68a4a888895eb00 | 06904f68018fbd42bba1909e12a79c2106af71f4 | /mirror_en.py | 733cf287ae4ed857491c9bb00206dfa953eb9428 | [] | no_license | rzbfreebird/MCDR-Mirror-Server | 2d079ac30c073805045f97302b2379937b8f95e2 | fbaebc8eeddaefe3675efff8abe98e7e69d83e30 | refs/heads/master | 2022-12-07T01:14:01.603244 | 2020-09-03T14:30:43 | 2020-09-03T14:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,443 | py | # -*- coding: utf-8 -*-
import shutil
import datetime
import os
import json as js
import platform
from os.path import abspath, dirname
from utils import rcon
current_path = abspath(dirname(__file__))
def read_config():
with open("config/mirror.json") as json_file:
config = js.load(json_file)
return config
conf=read_config()
mirror_folder=conf['path']
remote_enable=conf['remote']['enable']
address=conf['remote']['address']
port=conf['remote']['port']
secret=conf['remote']['secret']
start_command=conf['command']
world=conf["world"]
source=[]
target=[]
mirror_started=False
MCDRJudge=os.path.exists("{}MCDReforged.py".format(mirror_folder))
for i in range(len(world)):
source.append('./server/{}'.format(world[i-1]))
if(MCDRJudge):
for i in range(len(world)):
target.append('{}/server/{}'.format(mirror_folder,world[i-1]))
else:
for i in range(len(world)):
target.append('{}/{}'.format(mirror_folder,world[i-1]))
if(remote_enable):
connection=rcon.Rcon(address,port,secret)
remote_info='''
§6[Mirror]§bRemote Information:
§5Rcon Address: §b{}
§5Rcon Port: §b{}
'''.format(address,port)
help_msg='''
§r======= §6Minecraft Mirror Plugin §r=======
Use §6!!mirror sync§r to sync the main server's world to the mirror one
Use §6!!mirror start§r to turn on the mirror server
§4BE CAUTIOUS: IF YOU DON'T ENABLE THE RCON FREATURE OF THE MIRROR SERVER, YOU CANNOT SHUTDOWN THE SERVER BY REMOTE COMMAND
§4YOU CAN ONLY SHUTDOWN IT IN THE MIRROR SERVER, TO DO THIS, YOU CAN CHECKOUT THE FOLLOWING MCDR PLUGINS
§4SimpleOP without MCDR-Admin permission required
§4StartStopHelper with MCDR-Admin permission required
-----Rcon Features-----
Use §6!!mirror info§r to checkout rcon information(MCDR-Admin Permission is Required)
Use §6!!mirror stop§r to stop mirror server
Use §6!!mirror status§r to checkout whether the mirror has been turned on or not
Use §6!!mirror rcon <command>§r to send command to mirror server(MCDR-Admin Permission is Required, use it WITHOUT SLASH)
'''
SimpleOP=' {"text":"§6Checkout SimpleOP","clickEvent":{"action":"open_url","value":"https://github.com/GamerNoTitle/SimpleOP"}}'
StartStopHelper=' {"text":"§6Checkout StartStopHelper","clickEvent":{"action":"open_url","value":"https://github.com/MCDReforged-Plugins/StartStopHelper"}}'
def helpmsg(server,info):
if info.is_player and info.content == '!!mirror':
server.reply(info, help_msg, encoding=None)
server.execute('tellraw '+ info.player + SimpleOP)
server.execute('tellraw '+ info.player + StartStopHelper)
def sync(server,info):
start_time=datetime.datetime.now()
server.execute('save-all')
server.say('§6[Mirror]Syncing...')
i=0
try:
while True:
if(i>len(world)-1): break
shutil.copytree(source[i],target[i])
i=i+1
except:
try:
while True:
if(i>len(world)-1): break
shutil.rmtree(target[i],True)
shutil.copytree(source[i],target[i])
i=i+1
except Exception:
while True:
if(i>len(world)-1): break
shutil.rmtree(target[i],True)
ignore=shutil.ignore_patterns('session.lock')
shutil.copytree(source[i],target[i],ignore=ignore)
i=i+1
end_time=datetime.datetime.now()
server.say('§6[Mirror]Sync completed in {}'.format(end_time-start_time))
def start(server,info):
server.say('§6[Mirror]Mirror server is launching, please wait...')
if platform.system()=='Windows':
os.system('cd {} && powershell {}'.format(mirror_folder,start_command))
else:
os.system('cd {} && {}'.format(mirror_folder,start_command))
os.system('cd {}'.format(current_path))
global mirror_started
mirror_started=False
server.say('§6[Mirror]Mirror server has been shutdown!')
def command(server,info):
if(conf['remote']['command']):
if(server.get_permission_level(info)>2):
try:
connection.connect()
connection.send_command(info.content[14:])
connection.disconnect()
server.reply(info,'§6[Mirror]Command Sent!', encoding=None)
except Exception as e:
server.reply(info,'§6[Mirror]§4Error: {}'.format(e), encoding=None)
else:
server.reply(info,'§6[Mirror]§4Error: Permission Denied!', encoding=None)
else:
server.reply(info,' §6[Mirror]§4Error: Rcon feature is disabled!', encoding=None)
def stop(server,info):
try:
connection.connect()
connection.send_command('stop')
connection.disconnect()
except Exception as e:
server.reply(info,'§6[Mirror]§4Connection Failed: {}'.format(e), encoding=None)
def information(server,info):
if(server.get_permission_level(info)>2):
server.reply(info,remote_info)
else:
server.reply(info,"§6[Mirror]§4Error: Permission Denied!", encoding=None)
def status(server,info):
global mirror_started
try:
connection.connect()
server.reply(info,'§6[Mirror]§lMirror Server is online!', encoding=None)
connection.disconnect()
except:
if mirror_started:
server.reply(info,'§6[Mirror]§lMirror Server is Starting...(or mirror has been started but rcon feature didn\'t work well', encoding=None)
else:
server.reply(info,'§4[Mirror]§lMirror Server is offline!', encoding=None)
def on_load(server, old_module):
server.add_help_message('!!mirror', '§6Get the usage of Mirror')
def on_info(server,info):
if info.is_player and info.content == '!!mirror':
helpmsg(server,info)
if info.content == '!!mirror sync':
sync(server,info)
if info.content == '!!mirror start':
global mirror_started
if mirror_started:
server.reply(info,'§b[Mirror]Mirror server has already started, please don\'t run the command again!', encoding=None)
else:
mirror_started=True
start(server,info)
if('!!mirror rcon' in info.content):
command(server,info)
if(info.content=='!!mirror info'):
information(server,info)
if(info.content=='!!mirror stop'):
stop(server,info)
if(info.content=='!!mirror status'):
status(server,info)
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
2a08695213000cecf794bebc195f346db2f55e7f | 01f0beab21eccc37aa2a94df947d6b0ae0400e7f | /base/views.py | 967f5d5fd2d181fdda321184fb51658457b86e04 | [] | no_license | mousavi-lg/Simply-site | 90281ab9344d7f450b5f857a84820983bd816376 | bcde7801d5cda2bde16e6b8ed2f21f44a762495b | refs/heads/main | 2023-03-15T15:48:42.559681 | 2021-02-27T15:43:38 | 2021-02-27T15:43:38 | 342,884,129 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse , HttpResponseRedirect
from .models import *
from .forms import *
from django.contrib.auth import logout, authenticate
# Create your views here.
def home(request):
if request.POST:
try:
if request.POST["Logout"]:
logout(request)
except:
pass
return render(request, 'base/home.html')
def register(response):
if response.method == "POST":
form = RegisterForm(response.POST)
if form.is_valid():
form.save()
return redirect("/")
else:
form = RegisterForm()
return render(response, 'register/register.html', {"form":form})
def comment(request):
tasks = Task.objects.all()
form = TaskForm()
if request.method == 'POST':
if request.POST:
form = TaskForm(request.POST)
if form.is_valid():
form.save()
context= {
'tasks': tasks,
'form': form
}
return render(request, 'comments/comment.html', context) | [
"noreply@github.com"
] | noreply@github.com |
b6cd7d8565d0eb02480f3f7a35eb136564660245 | 990dec6eb7bb6c7cbbb9b8d94d3c2f359da2dad4 | /matplotlib_learn/plt6_ax_setting2.py | b2562dd3c85a1d861b07b1cee1c03016793c6ced | [] | no_license | PeakGe/python_learn | 116323a8cb1e1ef60a8036d47d6de685e8d0103f | 81a232ba160dd62a6fc1a67c610f1effcb778c0a | refs/heads/master | 2020-06-17T20:12:57.692010 | 2019-07-10T02:42:41 | 2019-07-10T02:42:41 | 196,039,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | # 6 - axis setting
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://www.scipy-lectures.org/intro/matplotlib/matplotlib.html
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-3, 3, 50)
y1 = 2*x + 1
y2 = x**2
plt.figure()
plt.plot(x, y2)
# plot the second curve in this figure with certain parameters
plt.plot(x, y1, color='red', linewidth=1.0, linestyle='--')
# set x limits
plt.xlim((-1, 2))
plt.ylim((-2, 3))
# set new ticks
new_ticks = np.linspace(-1, 2, 5)
plt.xticks(new_ticks)
# set tick labels
plt.yticks([-2, -1.8, -1, 1.22, 3],
['$really\ bad$', '$bad$', '$normal$', '$good$', '$really\ good$'])
# to use '$ $' for math text and nice looking, e.g. '$\pi$'
# gca = 'get current axis'
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
# ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
ax.spines['bottom'].set_position(('data', 0))
# the 1st is in 'outward' | 'axes' | 'data'
# axes: percentage of y axis
# data: depend on y data
ax.yaxis.set_ticks_position('left')
# ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
ax.spines['left'].set_position(('data',0))
plt.show() | [
"peakge@163.com"
] | peakge@163.com |
53d70ee33495d952c65ffba481eb70809a2f23b5 | ad055a3e56cbfdecda112b164315dcb7af529481 | /openapi_client/models/__init__.py | e4e7a653f568596861565c3239f6367e4937fdc6 | [] | no_license | shahrukhss/aqua-sdk-python | 328ed4e72a2433a6f5e128118b148555eab6544b | 3a850971e0940628af8eb457ea9dd3114bd2982e | refs/heads/master | 2020-06-16T19:22:45.345926 | 2019-07-07T17:47:58 | 2019-07-07T17:47:58 | 195,677,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # coding: utf-8
# flake8: noqa
"""
Aqua Security Test Api Definition Document Authered By - Shaharuk Shaikh
This document is the api def document api's given to test by Aqua Security
The version of the OpenAPI document: 0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from openapi_client.models.task_detail import TaskDetail
from openapi_client.models.task_id import TaskID
| [
"shaharuk.pardes@gmail.com"
] | shaharuk.pardes@gmail.com |
c5868d381536bb3ca6432d8af25e6a6c8d8e7bf0 | 7a6c9a4e38e4c7271bddd3c51ff8fb1bfa714c87 | /4/I.py | 50e41a5b2f5a2c354e55bdcd0a08db81b8f1386b | [] | no_license | simonsayscodes/School | bf934e2a32b01e063d5f3fa49e4a4668b566518c | 377de06267ab6744992fd4f241c64cb047ba8c26 | refs/heads/master | 2023-02-23T04:49:39.232936 | 2021-01-24T22:53:03 | 2021-01-24T22:53:03 | 292,821,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | N = int (input("Tal:"))
for x in range(N+1):
print (x,"Abracadabra")
| [
"70754158+simonsayscodes@users.noreply.github.com"
] | 70754158+simonsayscodes@users.noreply.github.com |
3422b1a5ccdb881ac91f119398d788f8d54eb5c3 | 3b5d86841a8f18e1ac4ce9b8e3b9227149946c2a | /attendanceapp/attd_app/urls.py | be7b4f72171e7ad88f14488763ea96738656d76e | [] | no_license | Harish5074/Attendance_App | 4d5fc7cd8bf4598c847ff7ff884bcd596de52651 | 5632ba1ab428f2fc58a592b06d0868055d0ac87b | refs/heads/master | 2023-01-06T03:11:54.834643 | 2020-10-23T09:42:03 | 2020-10-23T09:42:03 | 295,389,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | """attendanceapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^$", views.employee_HomePage, name="EmpHomepage"),
url(r"^EmpDetails/(?P<id>[0-9]+)/", views.employee_Details, name="EmpDetails"),
url(r"^EmpCreate/", views.employee_Create, name="EmpCreate"),
url(r"^EmpUpdate/(?P<id>[0-9]+)/", views.employee_Update, name="EmpUpdate"),
url(r"^EmpDelete/(?P<id>[0-9]+)/", views.employee_Delete, name="EmpDelete"),
url(r"^All_Attendance/$", views.atd_HomePage, name="AtdHomepage"),
url(r"^AtdDetails/(?P<id>[0-9]+)/", views.atd_Details, name="AtdDetails"),
url(r"^AtdCreate/", views.atd_Create, name="AtdCreate"),
url(r"^AtdUpdate/(?P<id>[0-9]+)/", views.atd_Update, name="AtdUpdate"),
url(r"^AtdDelete/(?P<id>[0-9]+)/", views.atd_Delete, name="AtdDelete"),
url(r"^Issuetracker/$", views.isu_HomePage, name="EmpHomepage"),
url(r"^IsuDetails/(?P<id>[0-9]+)/", views.isu_Details, name="IsuDetails"),
url(r"^IsuCreate/", views.isu_Create, name="IsuCreate"),
url(r"^IsuUpdate/(?P<id>[0-9]+)/", views.isu_Update, name="IsuUpdate"),
url(r"^IsuDelete/(?P<id>[0-9]+)/", views.isu_Delete, name="IsuDelete"),
url(r"Download_Attendance_Report/(?P<id>[0-9]+)/", views.userdetails, name="userdetails"),
]
| [
"harish5074@gmail.com"
] | harish5074@gmail.com |
c69d55d3f7500378e3a928dff4e8a0e47d70916b | 09db0d94ef90ff4df3b17cf8d9c2cca7f79b2c65 | /buffer.py | 317b3835a2a7a73b712441fc4f3f631cdf1c3eb1 | [] | no_license | tgbugs/desc | 5e17e7e35445908b14c7cbaed766764bb3cbab6b | b68a07af90f87f55c4b5be6ff433f310a0bc7e2c | refs/heads/master | 2020-04-09T12:20:02.650756 | 2019-05-08T07:34:29 | 2019-05-08T07:34:29 | 20,045,270 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | #!/usr/bin/env python3.4
""" Example for how to load vertex data from numpy directly
"""
import numpy as np
from panda3d.core import Geom, GeomVertexFormat, GeomVertexData
from .util.ipython import embed
size = 1000
data = np.random.randint(0,1000,(size,3))
#color = np.random.randint(0,255,(size,4))
color = np.repeat(np.random.randint(0,255,(1,4)), size, 0)
#full = np.hstack((data,color))
full = [tuple(d) for d in np.hstack((data,color))]
#full = [tuple(*d,*color) for d in data]
geom = GeomVertexData('points', GeomVertexFormat.getV3c4(), Geom.UHDynamic)
geom.setNumRows(len(full))
array = geom.modifyArray(0) # need a writeable version
handle = array.modifyHandle()
#options are then the following:
view = memoryview(array)
arr = np.asarray(view)
arr[:] = full
embed()
#OR
#handle.copyDataFrom('some other handle to a GVDA')
#handle.copySubataFrom(to_start, to_size, buffer, from_start, from_size)
| [
"tgbugs@gmail.com"
] | tgbugs@gmail.com |
919495fad13d666a69898ac017fe34ab0231dff7 | 55ec90e5dc40ecb48bf98bd4330a0cba00b4c3b9 | /predicao-com-serie/Obtendo-resultado-sem-filtro.py | 6bfce6e9fb20d7d17f6b45e21a63583a80eab4bc | [] | no_license | Matheuspds/Prediction_Volume | 670dbad017c6e1b55761eaa09b44e40e0ac6152b | 24ce47dd2f91fb70ed9c93bcf8831ae3641dc69e | refs/heads/master | 2020-08-05T22:59:51.295156 | 2019-12-10T04:59:17 | 2019-12-10T04:59:17 | 212,745,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,495 | py |
# coding: utf-8
# In[1]:
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
import numpy as np
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
import random
# In[20]:
df_test = pd.read_csv("test2.csv")
df_train0 = pd.read_csv("train.csv")
df_train1 = pd.read_csv("train1.csv")
df_train2 = pd.read_csv("train2.csv")
df_train3 = pd.read_csv("train3.csv")
df_train_list = [df_train0, df_train1, df_train2, df_train3]
# In[21]:
def feature_transform_split(key, data):
# data = remove_exception(data)
data["precipitation"] = data[["precipitation"]].fillna(value=0)
data["rel_humidity"] = data[["rel_humidity"]].fillna(value=50)
data["precipitation"] = data["precipitation"].apply(lambda x: x > 0)
data["rel_humidity"] = data["rel_humidity"].apply(lambda x: x > 90)
data = data.drop("precipitation", axis=1)
# data = data.drop("rel_humidity", axis= 1)
# data["sum"] = data["0"] + data["1"] + data["2"] + data["3"] + data["4"] + data["5"]
data = pd.concat([data, pd.get_dummies(data['period_num'])], axis=1)
data = data.drop("period_num", axis=1)
data = pd.concat([data, pd.get_dummies(data['holiday'])], axis=1)
data = data.drop("holiday", axis=1)
#
# data = pd.concat([data, pd.get_dummies(data['first_last_workday'])], axis=1)
data = data.drop("first_last_workday", axis=1)
data = data.drop("day_of_week", axis=1)
if (key == 1):
data = pd.concat([data, pd.get_dummies(data['tollgate_id'])], axis=1)
# data["tollgate_id1"] = data['tollgate_id']
data["direction1"] = data['direction']
return data
# In[22]:
random.shuffle(df_train_list)
df_train = pd.concat(df_train_list)
#df_ts = pd.read_csv("ts_feature2_simple.csv")
df_date = pd.read_csv("date.csv")
df_train = df_train.merge(df_date, on="date", how="left")
#df_train = df_train.merge(df_ts, on=["tollgate_id", "hour", "miniute", "direction"], how="left")
df_test = df_test.merge(df_date, on="date", how="left")
#df_test = df_test.merge(df_ts, on=["tollgate_id", "hour", "miniute", "direction"], how="left")
df_train_grouped = df_train.groupby(["tollgate_id", "direction"])
df_test_grouped = df_test.groupby(["tollgate_id", "direction"])
df_train_grouped = df_train.groupby(["tollgate_id", "direction"])
df_test_grouped = df_test.groupby(["tollgate_id", "direction"])
result = []
oob = []
for key, train_data in df_train_grouped:
test_data = df_test_grouped.get_group(key)
len_train = len(train_data)
train_data = train_data.append(test_data)[train_data.columns.tolist()]
train_data = feature_transform_split(key, train_data)
regressor_cubic = RandomForestRegressor(n_estimators=500, max_features='sqrt', random_state=10, oob_score=True)
train_data = pd.DataFrame.reset_index(train_data)
train_data = train_data.drop("index", axis=1)
y = train_data.ix[:len_train - 1, :]["volume"]
x = train_data.ix[:len_train - 1, 8:]
x1 = train_data.ix[len_train:, 8:]
regressor_cubic.fit(x, y)
yhat = regressor_cubic.predict(x1)
test_data["volume"] = yhat
result.append(test_data[['tollgate_id', 'time_window', 'direction', 'volume']])
# In[23]:
df_result = pd.concat(result, axis=0)
df_result.to_csv("result/result_split_rf_TESTAR_AGORA"+".csv", index=False)
# In[9]:
#regressor = RandomForestRegressor(n_estimators=500, max_features='sqrt', random_state=10, oob_score=True)
# In[16]:
#regressor.fit(x, y)
# In[24]:
df_pred = pd.read_csv("result/result_split_rf_TESTAR_AGORA"+".csv")
df_real = pd.read_csv("resultado_real_teste.csv")
# In[25]:
df_pred.head()
# In[26]:
df_real.head()
# In[75]:
df_test_v = pd.read_csv("test2_no_filter.csv")
df_train_v = pd.read_csv("train_no_filter.csv")
# In[76]:
def feature_format():
#pd_volume_train = pd_volume_train.set_index(['time'])
#pd_volume_test = pd_volume_test.set_index(['time'])
#volume_train = v_train.groupby(['time_window','tollgate_id','direction','date', 'hour']).size().reset_index().rename(columns = {0:'volume'})
#volume_test = v_test.groupby(['time_window','tollgate_id','direction','date', 'hour']).size().reset_index().rename(columns = {0:'volume'})
#print(volume_train)
x = pd.Series(df_train_v['time_window'].unique())
s = pd.Series(range(len(x)),index = x.values)
df_train_v['window_n'] = df_train_v['time_window'].map(s)
df_test_v['window_n'] = df_test_v['time_window'].map(s)
# print vol_test.tail()
#volume_train['weekday'] = v_train['weekday']
#volume_test['weekday'] = v_test['weekday']
feature_train = df_train_v.drop('volume', axis = 1)
feature_test = df_test_v.drop('volume',axis = 1)
values_train = df_train_v['volume'].values
values_test = df_test_v['volume'].values
return feature_train, feature_test, values_train, values_test
# In[78]:
feature_train, feature_test, values_train, values_test = feature_format()
# In[81]:
feature_test.count()
# In[82]:
regressor = RandomForestRegressor(n_estimators=500, max_features='sqrt', random_state=10, oob_score=True)
# In[91]:
regressor.fit(feature_train[['tollgate_id', 'direction', 'hour', 'miniute', 'am_pm']], values_train)
# In[92]:
y_pred = regressor.predict(feature_test[['tollgate_id', 'direction', 'hour', 'miniute', 'am_pm']])
# In[94]:
values_test
| [
"matheuspds2@gmail.com"
] | matheuspds2@gmail.com |
aeaef43d8a4b225e46b049173c23de3d40ed4fa1 | 49f8826ed11233ff57296d5b66eb329c1bd0ee29 | /selenium-wd/lib/BackPackSim/common.py | 8fe9188f721e1e6425da7e42d7051f7317a1878d | [] | no_license | SXiang/JUnitWebTest | c221ce11dfce127557004f7cef388e790449b39b | 5f03edfd9f0b95454d8ee332a29bd4a876e67b26 | refs/heads/master | 2021-08-31T10:43:55.069732 | 2017-12-18T21:44:03 | 2017-12-18T21:44:03 | 114,942,302 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import os
def envPathExists(ps):
paths = os.environ['PATH']
pArr = paths.split(';')
for p in pArr:
if (p.lower() == ps.lower()):
return True
return False
| [
"spulikkal@picarro.com"
] | spulikkal@picarro.com |
061f3e186fa2184c6d2f1f49d0255e16c91807d1 | 49d1e9e20091e862165fba693bfa1824caab2f9b | /func.py | 3ef312c1cb2d5d9867e1750a49299a9e35b96783 | [] | no_license | PiKa1804/tables-website | 7e089c1366e5e2d0a29cdb3edb50eb3c34641588 | 331db7dd80a6e8cb5e93236ad92b3f74b8c3bb51 | refs/heads/master | 2020-05-26T21:28:57.765915 | 2019-05-24T09:32:49 | 2019-05-24T09:32:49 | 188,380,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | from elasticsearch import Elasticsearch
from elasticsearch import helpers
es = Elasticsearch()
def Download(name, name1): #get the data from elasticsearch indices and types
result_list=[]
size=[]
doc = {'query': {'match_all' : {}}}
res = es.search(index=name,doc_type=name1,body=doc)
size=res['hits']['total']
res = es.search(index=name,doc_type=name1,body=doc,size=size)
ala=[]
ind=[]
for i in range(0,size):
ala.append(res['hits']['hits'][i]['_source'])
ind.append(res['hits']['hits'][i]['_id'])
res2 = ala.copy()
result_list=[]
for i in range(0,size):
result_list.append([v for k,v in res2[i].items()])
result_list[i].append(ind[i])
size=len(result_list)+1
return result_list, size
def Change(name, name1, firm, ident, ind): #search and change specific document in the types
result_list2=[]
size2=[]
actions = [
{
'_op_type': 'update',
'_index': name,
'_type': name1,
'_id': ind,
'doc': {'nsk': firm,
'id':ident
}
}
]
helpers.bulk(es, actions)
doc2 = {'query': {'match_all' : {}}}
res2 = es.search(index=name,doc_type=name1,body=doc2)
size2=res2['hits']['total']
res2 = es.search(index=name,doc_type=name1,body=doc2,size=size2)
ala2=[]
ind2=[]
for i in range(0,size2):
ala2.append(res2['hits']['hits'][i]['_source'])
ind2.append(res2['hits']['hits'][i]['_id'])
res3 = ala2.copy()
result_list2=[]
for i in range(0,size2):
result_list2.append([v for k,v in res3[i].items()])
result_list2[i].append(ind2[i])
size2=len(result_list2)#get the changed data
return result_list2, size2 | [
"46743066+PiKa1804@users.noreply.github.com"
] | 46743066+PiKa1804@users.noreply.github.com |
50b70699b80e3e66d8103401a1e5438187aa64fb | b5e8d81bb0f1459616d9662f9884b2c6e3581421 | /typeidea/config/views.py | 256f2abc006f2466860f2b4c85e5a950f006719d | [] | no_license | zhangbenxiang/typeidea | cd03ec0c976dc048102deb52b61f23e10ad1478d | a472a211b207ab6fb95e65207ab5705dee778ac3 | refs/heads/master | 2020-04-29T08:39:50.451518 | 2019-03-20T16:16:19 | 2019-03-20T16:16:19 | 175,962,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
from django.views.generic import ListView
from blog.views import CommonViewMixin
from .models import Link
class LinkListView(CommonViewMixin,ListView):
queryset=Link.objects.filter(status=Link.STATUS_NORMAL)
template_name='config/links.html'
context_object_name='link_list' | [
"910530496@qq.com"
] | 910530496@qq.com |
702bd9c61198ec10b638bc639c75d3d5761e243f | 3f7e0e3e60323e0acbec5182d94ef17b7353be11 | /myblog/settings.py | 646a5b461f91c8494655e4dab15c5f2ad6bfa607 | [] | no_license | viciousvizard/My-Blog-Django | d8ead80cd21f2013be0bb7aab736e515acf633bb | 02383b861e512fc1bb1558c140e5801a6533fc19 | refs/heads/master | 2020-07-09T20:12:40.949585 | 2019-08-23T21:30:53 | 2019-08-23T21:30:53 | 204,072,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,163 | py | """
Django settings for myblog project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$fc!&n&esfwkw5p%(zlqb(smt0=q9@vplb2sy@e^vjz0*&uk5$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT=os.path.join(BASE_DIR, "media")
MEDIA_URL='/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"utkarsh7839@gmail.com"
] | utkarsh7839@gmail.com |
9ee36689f1628a59d8a7f28c1af469ca7adedfe2 | b5e15fc6fe0132f18c72a1bf035b3edab618e35c | /microfinance/project_data/helpers.py | 4e75b923715a09285f8ea6047a5c9c702562fcbf | [] | no_license | Jubair70/BRAC-Customer-Service-Assisstant | ced72b4c81e0f4670c4be9efdb7d0d113f285b28 | fe35de8b96e2d8a44bf8ed811faa628ea27861d2 | refs/heads/master | 2021-06-27T06:38:35.239131 | 2020-01-13T05:17:48 | 2020-01-13T05:17:48 | 233,516,095 | 0 | 0 | null | 2021-06-10T22:28:56 | 2020-01-13T05:12:26 | JavaScript | UTF-8 | Python | false | false | 608 | py | import paho.mqtt.client as mqtt
from microfinance.settings import MQTT_SERVER_PATH, MQTT_SERVER_PORT
def send_push_msg(topic = "/CSA/1/11111", payload = None, qos = 1, retained = False):
# MQTT_SERVER_PATH = "192.168.22.114"
# MQTT_SERVER_PORT = 1884
# MQTT_SUBSCRIBE_TOKEN = "/CSA/1/11111"
# MQTT_SERVER_RESPONSE = "response from view=> ayayayayya :)"
mqttc = mqtt.Client("",True)
mqttc.connect(MQTT_SERVER_PATH, MQTT_SERVER_PORT,100)
print "sending.. token: %s: response text: %s" % (topic, payload)
mqttc.publish(topic, payload, qos , retained)
mqttc.disconnect() | [
"jubair@mpower-social.com"
] | jubair@mpower-social.com |
757ed9d6d342db985ed43a6ad0ed1034ef06e4b4 | 7e2e839b3887e7f4cc2206876adf2fbceae3778d | /garbage/Cube/cube.py | 4497af418b312c86798211ca1c5a74e13c843789 | [] | no_license | CadenKun/rubiks_cube | bb911a4bd5c77f6ecd751d945d02d708bd3c4fe0 | 21ddb5733a9b3916f22ae0eadee07b8a6df47c3c | refs/heads/main | 2023-07-13T20:20:44.144880 | 2021-09-01T12:12:37 | 2021-09-01T12:12:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,004 | py | import numpy as np
from garbage.Cube import Side
class Cube:
def __init__(self, dim):
self.dim = dim
self.front = Side(self.dim)
self.__build_cube()
def __str__(self):
res = ""
res += str(self.front.top) + '\n'
res += str(self.front.left) + '\n'
res += str(self.front) + '\n'
res += str(self.front.right) + '\n'
res += str(self.front.right.right) + '\n'
res += str(self.front.bottom) + '\n'
return res
def __build_cube(self):
top = Side(self.dim)
right = Side(self.dim)
bottom = Side(self.dim)
left = Side(self.dim)
back = Side(self.dim)
top.top = back
top.right = right
top.bottom = self.front
top.left = left
right.top = top
right.right = back
right.bottom = bottom
right.left = self.front
bottom.top = self.front
bottom.right = right
bottom.bottom = back
bottom.left = left
left.top = top
left.right = self.front
left.bottom = bottom
left.left = back
back.top = top
back.right = right
back.bottom = bottom
back.left = left
self.front.top = top
self.front.right = right
self.front.bottom = bottom
self.front.left = left
sideSize = self.dim[0] * self.dim[1]
scramble = ""
colors = ['w', 'o', 'g', 'r', 'b', 'y']
for color in colors:
for i in range(sideSize):
scramble += color
self.load_scramble(scramble)
def __move(self, direction):
if direction == "t":
self.front.left.turn('r')
self.front.right.turn('l')
self.front.right.right.side = np.rot90(self.front.right.right.side, 2)
self.front = self.front.top
elif direction == "r":
self.front.top.turn('r')
self.front.bottom.turn('l')
self.front = self.front.right
elif direction == "b":
self.front.right.turn('r')
self.front.left.turn('l')
self.front.right.right.side = np.rot90(self.front.right.right.side, 2)
self.front = self.front.bottom
elif direction == "l":
self.front.bottom.turn('r')
self.front.top.turn('l')
self.front = self.front.left
def __turn(self, direction):
directions = {'r': -1, 'l': 1}
self.front.side = np.rot90(self.front.side, directions[direction])
end = self.dim[0] - 1
if direction == 'l':
for i in range(self.dim[0]):
temp = self.front.top.side[end][i]
self.front.top.side[end][i] = self.front.right.side[i][0]
self.front.right.side[i][0] = self.front.bottom.side[0][end - i]
self.front.bottom.side[0][(self.dim[0] - 1) - i] = self.front.left.side[end - i][end]
self.front.left.side[end - i][end] = temp
if direction == 'r':
for i in range(self.dim[0]):
temp = self.front.right.side[i][0]
self.front.right.side[i][0] = self.front.top.side[end][i]
temp2 = self.front.bottom.side[0][end - i]
self.front.bottom.side[0][end - i] = temp
temp = temp2
temp2 = self.front.left.side[end - i][end]
self.front.left.side[end - i][end] = temp
temp = temp2
self.front.top.side[end][i] = temp
def turn(self, side, direction):
"""
run a single move on the cube
:param side:
:param direction:
:return: void
"""
if side == 'U':
self.__move('t')
self.__turn(direction)
self.__move('b')
elif side == 'R':
self.__move('r')
self.__turn(direction)
self.__move('l')
elif side == 'D':
self.__move('r')
self.__turn(direction)
self.__move('l')
elif side == 'L':
self.__move('l')
self.__turn(direction)
self.__move('r')
elif side == 'B':
self.__move('r')
self.__move('r')
self.__turn(direction)
self.__move('l')
self.__move('l')
elif side == 'F':
self.__turn(direction)
elif side == 'M':
pass
def sequence(self, sequence):
"""
run a sequence of moves on the cube
:param sequence:
:return: void
"""
moved = False
sequence = sequence.split()
for move in sequence:
move = list(move)
direction = 'r'
if "`" in move:
direction = 'l'
elif "2" in move:
for i in range(2):
if "W" in move:
pass
# W moves are not fully done yet
else:
self.turn(move[0], direction)
moved = True
elif "W" in move:
pass
# W moves are not fully done yet
if not moved:
self.turn(move[0], direction)
else:
moved = False
def load_scramble(self, scramble):
"""
loads the scramble onto the cube
:param scramble:
:return: void
"""
sideSize = self.dim[0] * self.dim[1]
scramble = list(scramble)[:sideSize * 6]
front = self.front
top = self.front.top
right = self.front.right
bottom = self.front.bottom
left = self.front.left
back = self.front.right.right
sides = [top, left, front, right, back, bottom]
i = 0
for side in sides:
side.load_colors(scramble[i:i + sideSize])
i += sideSize
| [
"along.blabri@gmail.com"
] | along.blabri@gmail.com |
b2ea4351140bdf0bee22fa9de7d78f74a508824c | fff640a7b4da979a9c3626a217a6bc11e4ab3729 | /mnist_adversarial.py | af48ad50fcbb8caf12e7015f86f26ad3894688b1 | [] | no_license | glarbalytic/adversarial_example | d1c07b052a9ff6f04535e1afbeeb4121af7ee522 | 44c98ca9d5afeb71040fac3f885fc9c1d12a841c | refs/heads/master | 2020-05-07T18:55:34.307992 | 2019-03-16T21:46:56 | 2019-03-16T21:46:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,405 | py | from matplotlib import pyplot as plt
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets, transforms
from torch import utils
import tensorflow as tf
import numpy as np
from cleverhans.attacks import FastGradientMethod
from cleverhans.compat import flags
from cleverhans.model import CallableModelWrapper
from cleverhans.utils import AccuracyReport
from cleverhans.utils_pytorch import convert_pytorch_model_to_tf
def plot_predictions(images, predicted_labels, true_labels):
for image, predicted_label, true_label in zip(images, predicted_labels, true_labels):
plt.imshow(image[0], 'gray')
plt.title('P:{}, G:{}'.format(predicted_label, true_label))
plt.show()
plt.clf()
# basic CNN
class ConvNN(nn.Module):
def __init__(self):
super(ConvNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 16, 3, padding=1)
self.fc1 = nn.Linear(16 * 7 * 7, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, 16 * 7 * 7)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
# adverserialNN
class AdverserialNN(nn.Module):
def __init__(self, cs):
super(AdverserialNN, self).__init__()
self.cs = cs
self.conv1 = nn.Conv2d(1, cs, 3, padding=1)
self.conv2 = nn.Conv2d(cs, cs, 3, padding=1)
self.fc1 = nn.Linear(cs * 7 * 7, 784)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.cs * 7 * 7)
x = self.fc1(x)
x = x.view(-1, 1, 28, 28)
return x
gpu = torch.cuda.is_available()
if gpu:
print('Running on GPU')
else:
print('Running on CPU')
# load using datasets loader from torchvision
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('data', train=True, download=True,
transform=transforms.ToTensor()),
batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('data', train=False, transform=transforms.ToTensor()),
batch_size=16)
model = ConvNN()
if gpu:
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# train the CNN
for idx, (xs, ys) in enumerate(train_loader):
if gpu:
xs = xs.cuda()
ys = ys.cuda()
optimizer.zero_grad()
preds = model(xs)
loss = F.nll_loss(preds, ys)
loss.backward()
optimizer.step()
if idx >= 10000:
print(preds[0].argmax(), ys[0], loss.item())
# get test images
test_set = next(iter(test_loader))
test_images = test_set[0]
test_labels = test_set[1]
if gpu:
test_images = test_images.cuda()
test_labels = test_labels.cuda()
# predict with untouched test images
predicted_labels = model(test_images).argmax(dim=1).cpu().detach().numpy()
plot_predictions(test_images[0:10].cpu().detach().numpy(), predicted_labels[0:10], test_labels[0:10].cpu().detach().numpy())
native_adverserial_model = True
adverserial_epochs = 100000
info_range = 1477
if native_adverserial_model:
channel_size = 128
adverserial_model = AdverserialNN(channel_size)
adverserial_model.cuda()
optimizer = torch.optim.Adam(adverserial_model.parameters(), lr=0.0001)
# fix the CNN, train adverserialNN to make the test_images look as close to their original as possible, while still misclassifiying
min_clip = 2
max_clip = 0
for idx in range(adverserial_epochs):
current_batch = idx % len(test_images)
optimizer.zero_grad()
output_images = adverserial_model(test_images[current_batch:current_batch + 1])
preds = model(output_images)
min_torch = torch.FloatTensor([min_clip])
max_torch = torch.FloatTensor([max_clip])
if gpu:
min_torch = min_torch.cuda()
max_torch = max_torch.cuda()
classifier_loss = torch.min(min_torch, F.cross_entropy(preds, test_labels[current_batch:current_batch + 1]))
# if it is closer than 10, it is perfect
closeness = torch.max(max_torch, torch.norm(test_images[current_batch:current_batch + 1] - output_images))
adverserial_loss = -classifier_loss + closeness
adverserial_loss.backward()
optimizer.step()
if idx % info_range == 0:
plot_predictions(output_images[:1].cpu().detach().numpy(), preds[:1].argmax(dim=1).cpu().detach().numpy(),
test_labels[current_batch:current_batch + 1].cpu().detach().numpy())
print('CNN Loss:{}, Closeness:{} Adverserial Loss:{}'.format(classifier_loss.item(), closeness.item(), adverserial_loss.item()))
else:
# We use tf for evaluation on adversarial data
sess = tf.Session()
x_op = tf.placeholder(tf.float32, shape=(None, 1, 28, 28,))
# Convert pytorch model to a tf_model and wrap it in cleverhans
tf_model_fn = convert_pytorch_model_to_tf(model)
cleverhans_model = CallableModelWrapper(tf_model_fn, output_layer='logits')
# Create an FGSM attack
fgsm_op = FastGradientMethod(cleverhans_model, sess=sess)
fgsm_params = {'eps': 0.025,
'clip_min': 0.,
'clip_max': 1.}
adv_x_op = fgsm_op.generate(x_op, **fgsm_params)
adv_preds_op = tf_model_fn(adv_x_op)
# Run an evaluation of our model against fgsm
total = 0
correct = 0
for idx in range(adverserial_epochs):
current_batch = idx % len(test_images)
avd_images = sess.run(adv_x_op, feed_dict={x_op: test_images[current_batch:current_batch + 1].cpu()})
adv_preds = sess.run(adv_preds_op, feed_dict={x_op: test_images[current_batch:current_batch + 1].cpu()})
correct += (np.argmax(adv_preds, axis=1) == test_labels[current_batch:current_batch + 1].cpu().detach().numpy()).sum()
total += 1
if idx % info_range == 0:
plot_predictions(avd_images[0:8], adv_preds[:8].argmax(axis=1), test_labels[:8].cpu().detach().numpy())
acc = float(correct) / total
print('Adv accuracy: {:.3f}'.format(acc * 100))
acc = float(correct) / total
print('Adv accuracy: {:.3f}'.format(acc * 100))
| [
"egeozsoy97@gmail.com"
] | egeozsoy97@gmail.com |
65862506e7c2a0b1eba9b24168fb76d1f57c32fd | 87fb0ae5563512bf4cfe2754ea92e7f4173f753f | /Chap_05/Ex_129.py | 67451fbd6333873e212e51249f4b024c92250365 | [] | no_license | effedib/the-python-workbook-2 | 87291f5dd6d369360288761c87dc47df1b201aa7 | 69532770e6bbb50ea507e15f7d717028acc86a40 | refs/heads/main | 2023-08-21T13:43:59.922037 | 2021-10-12T20:36:41 | 2021-10-12T20:36:41 | 325,384,405 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # Tokenizing a String
# Tokenizing is the process of converting a string into a list of substrings, known as tokens.
def tokenbystring(string: str) -> list:
string = string.replace(' ', '')
tokens = []
dgt = ''
for s in string:
if s in ['*', '/', '^', '+', '-', '(', ')']:
if dgt != '':
tokens.append(dgt)
dgt = ''
tokens.append(s)
elif 0 <= int(s) <= 9:
dgt += s
if s == string[len(string)-1]:
tokens.append(dgt)
return tokens
def main():
# exp = input("Enter a mathematical expressione: ")
exp = '52 + 3 - 86 * (936 / 2)'
print('The tokens are: {}'.format(tokenbystring(exp)))
if __name__ == "__main__":
main()
| [
"cicciodb@hotmail.it"
] | cicciodb@hotmail.it |
ac82f95718232006db1ee07eb767a672285c182c | 5e8c0364a0eaa3b21a4ab1f098264c2de438446d | /study/str_multy.py | 10dcc9bdadc0bf209dd5e85aa8fe1d309226208a | [] | no_license | A-hungry-wolf/python | fcc2467b7ce00733df685b306a7aec6cf3aaef4b | beefe6c024accfc2d928a0170257f9d6b9afc88b | refs/heads/master | 2022-12-12T01:56:08.175830 | 2020-09-10T07:44:13 | 2020-09-10T07:44:13 | 292,148,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | sentence = input("Sentence: ")
screen_width = 80
text_width = len(sentence)
box_width = text_width +6
left_margin = (screen_width - box_width) // 2
print()
print('' * left_margin + '+' + '-'*(box_width-2)+ '+')
print('' * left_margin + '| ' + ' '*text_width + ' |')
print('' * left_margin + '| ' + sentence + ' |')
print('' * left_margin + '| ' + ' '*text_width + ' |')
print('' * left_margin + '+' + '-'*(box_width-2)+ '+')
print()
| [
"yutao.chen@aliyun.com"
] | yutao.chen@aliyun.com |
8e3659ceaa3bcdb5a687bc5c9fac627026b66876 | cfb69b167f38980a276919830707b6f10dde94a4 | /urls.py | 1995a74bddbe211c342534b77cb437d834a6235d | [] | no_license | rahul342/evernote_toy_app | 370ac9e7dd01f366548082a88cce8f7a78739624 | a231088cf430e7cca74f8711b68a20406f26b801 | refs/heads/master | 2021-01-02T08:34:17.625629 | 2012-10-26T23:18:23 | 2012-10-26T23:18:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from django.conf.urls.defaults import *
from django.views.generic.simple import redirect_to
handler500 = 'djangotoolbox.errorviews.server_error'
urlpatterns = patterns('',
url(r'', include('social_auth.urls')),
('^_ah/warmup$', 'djangoappengine.views.warmup'),
('^login/$', 'evernotoy.views.login'),
('^home/$', 'evernotoy.views.home'),
('^load_more/(\d+)/(\d+)/$', 'evernotoy.views.load_more'),
('^logout/$', 'evernotoy.views.logout'),
('^$', redirect_to, {'url': '/home/'}),
)
| [
"rahul@digitalgreen.org"
] | rahul@digitalgreen.org |
3b821c8916710daabeecc602986e7cb5128ee24c | e0aa52e2d6ff5e0200c7606c34990df59403843a | /InfiniteSkills - Learning Python Programming/Chapter 10/listcomp2.py | 52c5e2cd4e6f084d11d94efd885aaf9b82f2893e | [] | no_license | Marrary2/Python-Studies- | 9c5e51f56c192be8d1f3755cb46d59daefcfbcc4 | d4d43605c9be9939616cc3ded2262d1e08bf188d | refs/heads/master | 2020-07-01T11:45:48.686067 | 2016-12-12T18:03:36 | 2016-12-12T18:03:36 | 74,079,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | #file = open('grades.txt')
#grades = file.readlines()
#print(grades)
#for i in range(len(grades)):
# grades[i] = grades[i].rstrip()
#print(grades)
grades = [grade.rstrip() for grade in open('grades.txt')]
print(grades) | [
"moisesmarrary@gmail.com"
] | moisesmarrary@gmail.com |
c56bd17bde912c7ffd206eecf251a580b76d0c35 | 837ec24d4d90bb19844947b5ddf11a17d95469e6 | /main.py | b6aebee01c43c7a2a7a36ad5921156d4eb0803ab | [] | no_license | hocchudong/openstack-report | 99ca66cb762292cdae4b362325082f445853cbca | 95b75bd022242570ac0ac1c16b0e5e289522cbed | refs/heads/master | 2016-08-10T23:57:53.240635 | 2016-03-18T07:11:27 | 2016-03-18T07:11:27 | 49,656,226 | 1 | 10 | null | 2016-03-18T07:11:28 | 2016-01-14T15:34:05 | null | UTF-8 | Python | false | false | 9,656 | py |
from flask import Flask, session, render_template, url_for, redirect, request
from flask.ext.bootstrap import Bootstrap
from keystone_api import (get_token, get_tenant_id, get_tenant_list)
from mail import (send_mail,reports)
from neutron_api import (check_neutron_service, get_ports, get_network)
from nova_api import (get_server_list, get_compute_list, get_compute_statistics, check_nova_service, get_tenant_usage)
from cinder_api import (get_volumes_list)
import os
# default Variable
username = None
password = None
tenant_name = 'admin'
hostname = None
error = None
network_public_id = ''
ip_used = 0
app = Flask(__name__)
Bootstrap(app)
# load config from file
app.config.from_pyfile('config.py')
## import config value
keystone_port = app.config['KEYSTONE_PORT']
nova_port = app.config['NOVA_PORT']
neutron_port = app.config['NEUTRON_PORT']
cinder_port = app.config['CINDER_PORT']
## config email
mail_server = app.config['MAIL_SERVER']
mail_server_port = app.config['MAIL_SERVER_PORT']
# your mail
sender = app.config['SENDER']
password_sender = app.config['PASSWORD_SENDER']
#sender = os.environ.get('SENDER')
#password_sender = os.environ.get('PASSWORD_SENDER')
# config neutron
network_public_name = app.config['NETWORK_PUBLIC_NAME']
#network_public_name = os.environ.get('NETWORK_PUBLIC_NAME')
## login to UI use username, password and IP API
@app.route("/login", methods=['GET', 'POST'])
def login():
global username
global password
global hostname
global error
error = request.args.get('error')
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
hostname = request.form['hostname']
token = get_token(tenant_name, username, password, hostname, keystone_port)
print token
session['logged_in'] = True
session['token'] = token
return redirect(url_for("index"))
return render_template("login.html", error=error)
## logout
@app.route("/logout")
def logout():
session.pop("logged_in", None)
return redirect(url_for("login"))
## show all service status in openstack
@app.route("/services")
def services():
if not session.get('logged_in'):
return redirect(url_for('login'))
token = session.get('token')
id_tenant_admin = get_tenant_id(token, hostname, keystone_port, 'admin')
nova_service = check_nova_service(token=token, tenant_id=id_tenant_admin, username=username, password=password,
hostname=hostname, keystone_port=keystone_port)
neutron_agents = check_neutron_service(token=token, tenant_id=id_tenant_admin, username=username, password=password,
hostname=hostname, keystone_port=keystone_port)
print nova_service
return render_template("services.html", nova_service=nova_service, neutron_agents=neutron_agents)
###show all instance in openstack
@app.route("/instances")
def show_instance():
if not session.get('logged_in'):
return redirect(url_for('login'))
token = session.get('token')
if token != None:
id_tenant_admin = get_tenant_id(token, hostname, keystone_port, 'admin')
instances_list = get_server_list(id_tenant_admin, token, hostname, nova_port)
return render_template("instances.html", instances_list=instances_list,
network_public_name=network_public_name)
else:
error = 'Time Out'
return redirect(url_for('login', error=error))
## show resource usage all tenant
@app.route("/tenant")
def tenant_usage():
if not session.get('logged_in'):
return redirect(url_for('login'))
token = session.get('token')
if token != None:
tenant_list = get_tenant_list(token, hostname, keystone_port) # get tenant list
tenant_admin_id = get_tenant_id(token, hostname, keystone_port, 'admin') # get id tenant admin
tenant_usage = {}
all_tenant_usage = []
for tenant in range(len(tenant_list['tenants'])):
tenant_usage['name'] = tenant_list['tenants'][tenant]['name']
tenant_usage['id'] = tenant_list['tenants'][tenant]['id']
tenant_usage_detail = get_tenant_usage(tenant_admin_id, tenant_list['tenants'][tenant]['id'],
token,hostname, nova_port) # get instance in tenant
if 'server_usages' in tenant_usage_detail['tenant_usage']:
instances = len(tenant_usage_detail['tenant_usage']['server_usages'])
vcpus_used = 0
rams_used = 0
disks_used = 0
for instance in range(instances):
rams_used = rams_used + tenant_usage_detail['tenant_usage']['server_usages'][instance]['memory_mb']
vcpus_used = vcpus_used + tenant_usage_detail['tenant_usage']['server_usages'][instance]['vcpus']
disks_used = disks_used + tenant_usage_detail['tenant_usage']['server_usages'][instance]['local_gb']
tenant_usage['tenant_usage'] = {"instances": instances, "rams_used": rams_used,
"disks_used": disks_used, "vcpus_used": vcpus_used}
else:
instances = 0
vcpus_used = 0
rams_used = 0
disks_used = 0
tenant_usage['tenant_usage'] = {"instances": instances, "rams_used": rams_used,
"disks_used": disks_used, "vcpus_used": vcpus_used}
all_tenant_usage.append(tenant_usage.copy())
return render_template("tenant.html", all_tenant_usage=all_tenant_usage)
else:
error = 'Time Out'
return redirect(url_for('login', error=error))
return render_template("tenant.html")
## index show resource from total compute or each compute
@app.route("/", methods=['GET','POST'])
def index():
all = True
alert = None
if not session.get('logged_in'):
return redirect(url_for('login'))
token = session.get('token')
if token != None:
if request.method=='POST':
email = request.form.get('email')
node = request.args.get('node')
cpu_used = int(request.args.get('cpu_used'))
cpu_total = int(request.args.get('cpu_total'))
ram_used =int(request.args.get('ram_used'))
ram_total = int(request.args.get('ram_total'))
hdd_free = int(request.args.get('hdd_free'))
hdd_total = int(request.args.get('hdd_total'))
instances = int(request.args.get('instances'))
if node == "all":
volumes = int(request.args.get('volumes'))
else:
volumes = 0
alert = reports(node,cpu_used,cpu_total,ram_total,ram_used,
hdd_total,hdd_free,instances,volumes,email,mail_server,
mail_server_port,sender,password_sender)
id_tenant_admin = get_tenant_id(token, hostname, keystone_port, 'admin') # get ID of tenant Admin
ports = get_ports(token, hostname, neutron_port) # get all ports details
networks_list = get_network(token, hostname, neutron_port) # get all network list
for net in range(len(networks_list['networks'])):
if networks_list['networks'][net]['name'] == network_public_name:
global network_public_id
network_public_id = networks_list['networks'][net]['id']
if request.args.get('show') == 'all': # display all compute list
compute_list = []
list_node = get_compute_list(id_tenant_admin, token, hostname, nova_port)
for i in range(len(list_node['hypervisors'])):
ip_used =0
info = get_compute_list(id_tenant_admin, token, hostname, nova_port,
str(list_node['hypervisors'][i]['id']))
compute_name = list_node['hypervisors'][i]['hypervisor_hostname']
for ip in range(len(ports['ports'])):
if ports['ports'][ip]['network_id'] == network_public_id and ports['ports'][ip]['binding:host_id'] == compute_name: # if network in instance match with network public
ip_used = ip_used + 1
info['ip_used'] = ip_used
compute_list.append(info)
print compute_list
return render_template("index.html", compute_list=compute_list,
total=False,alert =alert)
else:
ip_used = 0
for ip in range(len(ports['ports'])):
if ports['ports'][ip][
'network_id'] == network_public_id: # if network in instance match with network public
ip_used = ip_used + 1
volumes = get_volumes_list(id_tenant_admin, token, hostname, cinder_port)
compute_list = get_compute_statistics(id_tenant_admin, token, hostname, nova_port)
return render_template("index.html", compute_list=compute_list,
ip_used=ip_used, volumes=volumes,total=True,alert = alert)
else:
error = 'Time Out'
return redirect(url_for('login', error=error))
return render_template('index.html')
## run app
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port,debug=True)
| [
"great_bn@yahoo.com"
] | great_bn@yahoo.com |
5c1886950887ff7a8a83d2c67ef0a6f8888de771 | 1fcc1f9fb9309ab8be1d68b3118b1784a515714a | /app.py | e3c0a0c598b507bd04f4df4dc22cae3c8fb85a2e | [] | no_license | miguelmg4/capstoneXIV | e4af82c4dd28b14508ab41e5335d28dd4b449cb3 | 43394e4292f852aed048b7b6eb829f22873d6468 | refs/heads/main | 2023-08-16T11:08:57.830164 | 2021-10-05T18:07:03 | 2021-10-05T18:07:03 | 413,119,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,095 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
from datetime import date
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# DATA
df = pd.read_parquet('/tmp/social_network.parquet')
# LAYOUT
app.layout = html.Div([
html.H1('Dashboard Social Networks', style={
"text-align": "center", "margin-top": "24px", "margin-bottom": "48px"}),
html.Div([
html.Label('Datetime Range'),
dcc.DatePickerRange(
id='date-picker-range',
start_date=date(2021, 1, 1),
end_date=date(2021, 4, 30),
),
html.Label('Social Networks'),
dcc.Dropdown(
id="social-networks-dropdown",
options=[{"label": social_network, "value": social_network}
for social_network in df.social_network.unique()],
value=[social_network for social_network in df.social_network.unique()],
multi=True
),
html.Label('Devices'),
dcc.Checklist(
id='devices-checkbox',
options=[{"label": device, "value": device}
for device in df.device.unique()],
value=[device for device in df.device.unique()],
labelStyle={'display': 'inline-block'}
)
], style={"columnCount": 3, 'textAlign': "center", "margin-top": "24px", "margin-bottom": "48px"}),
html.Div([
html.Div([
html.Img(src="https://upload.wikimedia.org/wikipedia/commons/9/99/Sample_User_Icon.png",
style={"width": "50px"}),
html.H2(
id='total-visit',
)
]),
html.Div([
html.Img(src="https://upload.wikimedia.org/wikipedia/commons/thumb/c/cd/Facebook_logo_%28square%29.png/240px-Facebook_logo_%28square%29.png",
style={"width": "50px"}),
html.H2(
id='facebook-visit',
)
]),
html.Div([
html.Img(src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxAREBAQEA8PFQ8WFRYQERYVFg8VEBEQFRUWFxUWGBUYHSggGBolHRYVITEiJikrMC4uFx81ODMtNygtLi8BCgoKDg0OGhAQGi0lHyUtLi83Ky0tLS0rLjUtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLy0tLS8tLSstLS0tKy0tNf/AABEIAOEA4QMBIgACEQEDEQH/xAAcAAEAAgIDAQAAAAAAAAAAAAAABgcCBQEDBAj/xABKEAABAwIACQYHDAkFAQAAAAABAAIDBBEFBhIhMUFRYXEHEyKBkaEjMlJyscHSFEJTVGJzgpKTosLRFhc0Q2Oyw+HwJDOjs/EV/8QAGgEAAgMBAQAAAAAAAAAAAAAAAAQDBQYBAv/EADYRAAECAwQHBwQCAgMAAAAAAAEAAgMEEQUhMVESQWFxobHRFCKBkcHh8BMyNFJCsiQzFWLx/9oADAMBAAIRAxEAPwC8UREIRF01M7I2ufI5rWNGU5ziA1oGkklVpjPygyPLoqO7I9BlI8I/zQfEG85+ClhQXRDRqkhwnPNAp7hbDtNSi88zGnSG6ZHDcwXJG+yhuEeU1ucU9OT8qVwH3G3uOsKuZHlxLnEucTdxJJc47STnJWKsYclDH3XngnGSrBjepPVY+YQk0TMjGyONtu1+Ue9auXGKuf41ZU9UkrR2NIC1iJlsJjcAPJTiGwYAeS9jsK1J01NQeMsx9a6nVcp0yyHi559a6EXdEDUvWiMlkZHeU7tKxJRF25doiIskVXaLFFkiEUWKyD3eUe0oiEaKzbVSDRJIODnD1rubhOoGioqBwklHrXmRcoMgjQGS2EeMFa03bWVXXLM4dhJC2NNjzhCM/wC+HjY9jCO0AO71HlivJhsOLR5LwYTTiB5Kw8HcpxFhU0wI1uiNj1Mf7SmOCMZqSrsIZm855DujJvs0+NxFwqLXP/o3FLvk4bvtuUL5VhwuX0aiqPFvHyeAtjqS6aHRcm87BucfHG52ffqVoYProp42ywvD2O0Ed4I0gjYc4VfGgOhY4ZpKJCdDN69iIihUSIiIQi6aidkbHSPcGsaC5zjmDWjOSV3KruUzGIySe44neDYQZyPfyaQzg3MTv81SwYRiO0QpIUMvdQLT4441vrH5DLtpmnoN0F5Hv379g1cVGkRXbGBg0W4K1a0NFBgiErd4t4sz1r+gMmIGz5HA5DTsA987d2kXCtHAuKFJTAFsfOSj95JZzr7WjQ3qHaoY00yFcbzl1+V2KKJHay7WqlwfgGrnsYaaZzTodk5LDwe6zT2rc0/J5XuGdkLNz5Bf7gcrjRJOnnnAAJUzbjgAqnj5M6v301KODpT+ALvbyYz66mHqa8q0UUZnIufBee1RM+CrEcl8nxqP6j/aWX6r5PjbPsz7asxFztcXPgOiO1Rc+A6Ks/1Xv+Nt+zPtp+q9/wAbb9mfbVmIjtUbPgOiO1Rc+A6Ks/1Xv+Nt+zPtp+q9/wAbb9mfbVmIjtUXPgOiO1xc+A6Ks/1Xv+Nt+zPtrg8mEnxtn2bvaVmojtUXPgOiO1xc+A6Kr3cmM+qphPFjx610ycmlV72amPF0o/AVayLva4ufBd7XFz4BVBLyd17dAgf5sh/E0LUVuLVdCLyUswGstHONHEsuAr2RemzsQY0XsTrxiByXzmAivLDOLdJVg87C3L+EZZso+kNPA3CrHGjFCaj6bSX01/HAsWX0B41bL6DuuAm4U0192BTcKZZENMDl89lG1t8WsYJaKXLbd0TiOdjv0XjaNjhqK1CJg0cKHBTOYHChwV/4MwhHURMmidlRuFwdY2gjUQbgjcvaqdxAxhNNOIpHeAlIa7ZHKbBr9wOYHdY6lcSp40L6bqalURoRhupqRERQqFavGPCYpaWac2u1vQB0Okd0WDhlEKhpHlxLnElxJc4nS5xNyTvJVj8rdf0aemB0kzP+j0WdWd/1Qq3srWSYGw9LNWMqyjK5ot3ingB1bOGAkRNs6Z40tZqA+U6xA4E57WWmV1Yk4H9y0kbSLSvHOy7cpwFm/RFh1HavczG+my7E/KqSPE+myoxK3FDSRwxsjiYGxtGS1o0AevjrXqRFTqqRERCEREQhEREIRERCEREQhEREIRERCEWufhmka7JdV0wfoyTLEHX4XuqwxzxqkqZHxRPLaVpLQGm3O2zFzraWnUNFrE59EVTTJaoq4qzhWaS2rzTZ85L6HY8EAggg5wRoIXEsYcC1wBaQQQQCCDmIIOkKlMWcZJqOQEFzoSfCR36JGstHvXb9etXRTTtkYyRhBY5oe06i1wuD2FRRYRYlZmWdAIreDr+fNqqLHjFn3JKHxg+5pD0NJ5t+kxk6xa5B2A7LmMK+cPYMbVU8sDrdJvRPkyDOx3UQFRMsZaS1ws5pLXDWHA2I7U9Lxi9tDiFYycUxWX4hdZCunEXCxqaONzjeSPwMm0uaBZx4tLTxJVMKa8lddkVMsBPRkZlD5xhuLfRc/wCqETLdKHXJcnIWlDrrF/XhyVqoiKsVMqZ5RqjnMISjVG2OIcMnLPe8qMgLa4zSZdbVO/jyN6mvLR3BawBXLO6wDYOSu4TKMA2Dkthi5Q8/V08JF2ukblDaxvSePqtKvpVByawB1e0+RHI8bjmZ+NW+kJt1XgbEjOnvgbOaIiJVJIiIhCIiIQiLQ4cxopqW4e7Ll8hli4ecdDevPsBUFwrjzVy3EZbCzYyxfbe9w9AC9BpKagSUWLeBQZlWrJIGi7iANpIAXgfhykBsaumB2c7FfsuqVqJ5JDlSPkc7a5znHtcutetAZp9tkj+TvIdVdow9R6BWUt/nYfzXuhma8XY5rhtaQR2hUJZcwvcw5THuDtRaS13aM679Pauuskfxdw91f6KocGY61sNgZOdb5Mud1tzx0r8SVOMB44U1TZhPNTHMGvIs47Gv0HgbHcvJYQko8hGhCtKjZ89tqky8OGXOFNUFnjiKQt87INu9e5cELwkwaGq+eLLiykGNmLr6OY2aTA4kxOzkWPvCdTho3gX220NlYh4N4WsaWvaHtvB+f+rFXRiG5xwdTZWmzgPNEjw37oCqnAWBpqqURxg6st1uhG3yiduwa1d1DSshjjiYLMY0Mbts0WF9pUMw+4BVdqPaGiHrrX5vqvSqYx/ouar5rCzZMmUfTzO7XNeetXOqw5WIQJqZ+t0bo+pjrj+cqOWdR6Us51I1Mweqgi2uKlTzVdSP/iNYdwkPNnucVq1lDKWOa8aWkPHFpv6lYG8UV06HpAtzu819DouMobQip6rLKg8Lm9RUHbNIe17l5QF3VZvJIdr3HtcVgArTSuWiY3uhTHkrZ/rJTsgcO2SP8laarPkrb4aoP8MDtd/ZWYkYxq9VM9/uO4ckREUSURERCEVe4146G7oKR1hofMNO8R7B8rs1Fc4+Yxm7qSF2bRO4az8GD6ezaoLZAIVzIyIIESINw9SsTpJOk5ztJOkpZZ2WcUTnODWtc4nM0NBLidgAzlBiK5ouiyWUvwdiJUyAGUsibv6Un1Wm3et5Dye09unNOT8nm2jsLSu6RSj56XYaF1d1/K7yKrSy4srNk5PKb3s1QDvMRHYGhabCHJ/OwEwyRyjYfBv4C5I7wjSK6yflnGmlTfUKFWXGSvXVUr4nFsjHMcNLXAg8c+reuiy9B6eA1hSzFXHN0JbDUuc+HQH5y+Mekt3aRqvoVlwyte1rmuDmkBzSCCCDoIOsKh7KWYj4ymB4p5neAeeiToieTp3NJ07Dn2rhoVUT9nBwMSEL9Yz9+e/GzJ4WvaWPa1zDmc1wBaRsIOlaZ2KGDy7K9ysvsBkDfqh1u5b9FwEjBUbIj2fa4jcaLz0lJHE0MijYxg0NaA1t9tgvQiLi8Iq85W2dGkdsMje3mz+FWGoHyrt8DTn+I4drP7KSF94Tlnn/ACG+PIqslwR6FkhCsAb1pdFW7/8AR3oor7t3okNBZ3syiT85J3n0rkBZALkBSuerhrVOOSseEqfMj9LlYyrzksHTqeDPS9WGl3GpVHPf73eHIIiIvKURaPGvC3uWnc5p8K7wce5x0u6hc8bDWt4qwx8r+dqiwHoRDIGzLzF59A+ivL3aITkjA+tGAOAvPTxNPCqjFttydZOklcZK7LLsiiLnNa0ElxDWgaS4mwHalzEWoou/A2CZamURxDe5x8Vrdp/LWrRwJgGGlbaNt5COnIbZbvyG4f3TF/BLaWFsYsXnpSO8p/HYNA/9W3TDW0vKzc9PGMSxv2c9p6eeKIiL0q9EREIWtwtgqGpZkysv5Lhmew7Wu1eg61V2MWAJKSSx6UbvEcBmcNh2OGxXGvDhXB8dTE+GQdFwzHNdrtThvC4RXBPyU86XdQ3tyy2j5fxVJ2WBC9uEKN8MskMg6TTknYdhG4ix615bLwHFasUIqFZ+IWGTPBzTzeSKzbnS6P3p3kWIPAHWpWqdxSr+Yq4nXs1x5p/mPIGfcDkn6KuJewarLWpLfRj1GDr+vG/cQiIi6q5FB+VYf6aD578DlOFCeVT9mg+e/pvXpn3BNyH5LN6q5FyQuE6HLV6K3XPlF5ctF4SX011ALkBcgLsASbnr0ApryXDpVPCP0vVgqA8mQ6dTwj9Miny401CoLQ/Id4f1CIiLqTXXJIGguOgAk8AqYnkL3ukd4znF7uLiSfSrcw261NU2081JbjzZVRlqTmn0ICvrHZ3Xu2geqxyVJcQaEPqDIR0Y23HnuuG92X2BRyyn3J5EBBK7WZcnqaxpH8xUME6TwCnbQfoS7iNd3ndyqPFS1ERWSyiIiIQiIiEIiIhCr/lKweLxVAGm8T+q7md2X2BQcq1MfYQ6hefJcxw45Qb6HFVYVA+5y1VlRC+WFdRI9fVYkK68D1HO08Ep0vjY53nFov33VKOVtYjuJoIL6stvUJHgd1l7YVDbTAYLXZGnmPZb9ERSLNooXyp/ssPzw/63qaKGcqH7LF89/TeujFO2d+SzequIWJC7CFwQmGuWv0V33XC5XC9VUFF6AFkAuQFmAql71AAplyaDp1Hms9L1PVBeTcdOo4R+l6nSmgmrAVnrR/Id4f1CIiKVJLw4bF6apH8KT+QqprK45ow5rmnQQWngRZU/JGWktd4zSWniDYqvnbi0q+sZ/ce3aD5rqsp9yeu/08jdYlv1FjB+EqB2UnxCqwyZ8R0SNuPPZc27C7sUEs+kQV3Jy0WF8u6mq/yVgIiK3WVRERCEREQhEREIUdx6ktQyjyiwD7RrvQCqrU+5Sa0ZMNODnuZnbgAWt7bv7FAyloh7y1NksLZYV1knkPRYFWviK21BDvyz/wAj1VSuTANPzdLAwizhGzK88i7u8le4WKitlwEFrczyB6rYoiKZZtFDOVD9li+e/pvUzUN5T/2WL54f9b0J2zvymb1WBWJWZWJUjStiiLsyUXuqKrYBq7AFkW5zxXLWqgiPSTVL+TsdOfgz0vU4UI5PvGqPNb6XKbp+UNYQO/mVnLS/Jd4f1CIiJlIoq1xwouaqnkDoyeEbxPjdeVc9YVlLQ414M5+C7R4SO727SLdJvWM/EBLTUMvh3Yi/54J6z5gQowrgblXGSuynmdG9sjDZzSHNO8epYpZUwctTsKtXBVeyoibKzXmcNbXDS0/5sXuVW4Dww+lkuM8ZzSM2jaNjgrGwfXxTsD4nAjX5TTsI1FXUvMCKKHH5estOyZl3VH2nA+h+X4r2IiJhIoiIhCLzVlUyKN8jzZjRlOP+aTqXNXVRxMMkjmtYNJPo3ncq2xnxgdUuyW3bA03a3W8+U71DUo4kQM3pyTk3TD/+us+g2rVYXrnVE0krsxccw8lozNb1DvuvEVmVik61Wta0NAaMAvfi9g/n6mKO125Qe/Zzbc7u0C3WFcaiGIOCeaiNQ8dOQWZt5rTf6Rz8A1S9NwhRtVmbVmPqRtEYNu8dfoPBERFKqxFDeU/9mh+e/pvUyUM5UP2aH538Dlwp2zvymb/RVmViVmVwV7aVsQu/m0Xu5hF2qV+ou+ZlnuHyiO8rlrV6cIR2mlGyR47HFdYCzMV95C8swCk+IP8AuTD5DfSfzU2UHxEd4aQbYyexzfzU4VvIGsAePNZ20x/kHcOSIiJxV6IiIQoFjbgTm3meMeCcekB7x5/CT35tijRCt6SMOBa4AtIsQc4IOkEKC4xYtuivJCC6LSRnLmcdrd/btVTNypaS9mGsZfOG7C/s+fDwIUQ36jns389+MaXdSVckLsuJ7mHaNY2EaCNxXVZcWSLXawrcgEUKltDjsRYTxX3szfdObvW3jxsoyM8jm7i19/ugquyFgU6yciDXX5sokIlly776Ebj1qrHkxtoxolJ3BknrAWqr8eW2IgiN/KkIAH0WnP2hQwrgr2ZuIbsN3vVDLLl2mpBO89AF34RwjNO7KlkJI0DQ1vBozBeIrsK4KjDiTUqwa0NFAKBdZW8xUwCamTKcD7nYemfKOnIHr2DiFni9i3JUkOddkGt2t25l9PHQN+hWRSUrImNjjaGsaLAD/M/FNwYWlecOarZ+0BBBhwz3uXvyxyXaxgAAAAAzADQAs0ROrNIiIhCKEcp58DAP4jj2N/upuoHyou6NKNplPYGD1rhwT9mCs0zx5FV+VgVkVi71LrVr24hS73HuRSr/AOcuVyqzHaiophhlqmoH8V56i4keleSy3ONUOTVSbHZLh1tAPeCtTZZePdFcDmeauILtKG12YHJbzEt9qkjymOb/ACu/Cp6q0wFNzdTC46MoA8HdEnvVlq3st9YRGR5gKktZlIwdmORPsiIislVoiIhCIiIQo7hbFaKW74/ByaTYeDcd7dR3jvUSwhgGphveMlvlMu5nHNnHWArPRKRZKG+8XHYrCXtKNCFD3htx8+tRsVOLgq2KnB0EmeSGNx2loyu3SvA7FejP7m3B8o7spKmQeMCOI6qxbbEIjvNI8j0VaFcFWY3FWjH7m/F8vtL2U+CaePOyCIHbkgu+sc69Nkn6yOPsuutiCPtaT5D1KrSgwLUT25uJxaffHos+scx6rqW4HxNjZZ85EjtOSL80Dv1v67DcpaibhyzG43qvj2pGiXN7o2Y+fSiwYwAAAAAZgBoAWaImFWoiIhCIiIQirvlPlvJTs2Nc/wCsQPwKxFVGP1Xzla8DRG1sfYMo97yOpeXYKzshlZkHIHp6qNlcNjLiGjTfJHE5gsivdi5Bl1dO3bKwnzQ4F3cChpWpc7QBdlf5Xq6ebbsCLNF2iwV6iOPFL/tSgajG7+Zv41FrKxsOUfPQPYPGtlN89ucdujrVd2WdtOHoRtLU70uPp5rSWZF04Ojrbd6j5sWNlY+BaznoGPv0rWd5409unrVcrb4vYU5iSzr806wd8k6nAendwUdnzIgxe9gbuh+aqr1aEuY0Lu4i8eo+a6KfosGPBAIIIOcEZwQdBWa06zSIiIQiIiEIiIhCIiIQiIiEIiIhCIiIQiIiEIiIhC8mEqxsEUkzvFY0uO/YBvJsOtUtUzOe973G7nOLnec43PeVKceMPiZ3ueF14mG7nDQ+QbNrR3nPqBUSKhc6pWosqVMGGXOxdy1dT4DGqxKlfJtRZdU6UjoxsNtzn9Fv3ecUUKtXEXBnMUjS4WfKeddtAIswfVseLivbVJacb6cu4a3XeePCvmpKiIvayaKE414M5uTnWjwbzc/Jk19untU2XRUwNkY5jxdrhYhKzcuI8PR14jf8u90zKzBgRNLVrVZWXNl78LYLfA+xzsPiO1OHqO5eBZJ7HMcWuFCFp2Pa9oc01BW4wJh18PQcC6HZ75u0t/L0KX0VfFMLxvB2jQ4cRpCrhctcQQQSCNBGYjgU9LWlEgjRI0hy3G/y8qJKZkIcY6QuPPerSRQCmxhqWZsvKGx4v36e9e+PG948aFp4FzfTdW7LVl3C+o8K8qqtfZccYUPj1opgiircchrpz1Pv+FZjHCPXE/taphaEscHcD0URs+ZH8eI6qToo1+l8PwUv3PzXP6YwfBzdjPzXvtkD9wjsEx+nJSRFGv0yp/g5uyP2k/TOn+Dm7I/aXe1wf2COwTH6cuqkqKM/ppT/AAc3ZH7S4/Tan+Dn7I/aXe0wv2R2CY/TkpOii/6b0/wc/ZH7SxOPEHwU3/H+aO0wv2R/x8z+nJSpFEDj1Fqhl7WLqfj6NVMTxkA/CV3tEPNdFnTJ/hxHVTRFApsfX+9p2ji4u9AC1tZjjWPzB8bB8htu91yOpcMwzUpWWTMnEAeNeVVYldXRQty5pGsbvOc7gNJO4KA4y43umDoafKbEcznHNI8bPkt7zuzhRqonc9xc9znO1lxLndpXUV4MYuwVrK2XDgnScdI8B4a958q3rArErIr2YJwXLUyiOMZ9LifFa3ynf5nQ1WbnBoLnGgGte3FHAhqqgZQ8Cyz5DqcNTPpW7AVbq12BsGR00TYoxmGdxNsp7jpcd/qAC2KZAoslPTfaYlR9ow6+PsiIi6kkREQheeppmSsLHtBadXrGwqHYXxfkhu6O74t3jtHygPSO5TlEpNScOYHeuOYx9xsKZlpp8A93DL5gVViKfV+A4JrktyXn3zMx6xoPYtFVYqSNzxva8bD0Xesd4VBFsuPDPdGkMx0x5q7hWjBfiaHb1w5KPLgrYTYHqGaYJOoZQ7W3Xie0t8YEdRHpSTmOZ9wI3gjmnWuDvtNdxryXWViVzmXBXWuGa90KwK4KyK4KnaULArErIrgqULq6yuCsisSpQurArgrIrEqVq6sSsCsyVibbVK1eqFcFdZXY1pcbNBJ2DOe5eyDAlU/xaaY7y17W/WNgpm34LjnBv3Gm+5a0rgqV0eI9S6xkdHGNefKeOpub7ykuDMUKWGznNMj9sli0HczR23TLITjqSUa05eHgdI7OuHEqE4BxYnqSHWyIdb3DM4fIHvvRvVk4JwXFTR83E2w0uJsXudtcdZWwRMsYGqhm5+JMXG5uQ9c0REXtJIiIhCIiIQiIiEIiIhCIiKVi4tRhDWo/VoiqplW8stZIvI9EVQ5W8NdDliURRtU4WBWJRFMF6WQXfEiJiGo34LZUepSPBupEVlBVNNKRs0BZIicVLrRERcXUREQhEREIRERCF//Z",
style={"width": "50px"}),
html.H2(
id='instagram-visit',
)
]),
html.Div([
html.Img(src="https://logos-marcas.com/wp-content/uploads/2020/04/Twitter-Logo.png",
style={"width": "100px"}),
html.H2(
id='twitter-visit',
)
]),
], style={"columnCount": 4, 'textAlign': "center"}),
html.H3('Total Visits by Month', style={"textAlign": "center"}),
dcc.Graph(
id='total-visit-line'
),
html.H3('Total Visits by Social Networks', style={"textAlign": "center"}),
dcc.Graph(
id='total-visit-social-networks-line'
),
html.Div([
html.H3('Total Visits by Country', style={"textAlign": "center"}),
dcc.Graph(
id='world-map'
),
html.H3('Total Visits by Device', style={"textAlign": "center"}),
dcc.Graph(
id='diveces-pie'
)
], style={"columnCount": 2})
])
@app.callback(
Output('total-visit', 'children'),
Output('facebook-visit', 'children'),
Output('instagram-visit', 'children'),
Output('twitter-visit', 'children'),
Output('total-visit-line', 'figure'),
Output('total-visit-social-networks-line', 'figure'),
Output('world-map', 'figure'),
Output('diveces-pie', 'figure'),
Input('date-picker-range', 'start_date'),
Input('date-picker-range', 'end_date'),
Input('social-networks-dropdown', 'value'),
Input('devices-checkbox', 'value'))
def update_figures(start_date_selected, end_date_selected, social_networks_selected, devices_selected):
total_visit = (
df
.loc[(df.social_network.isin(social_networks_selected)) &
(df.device.isin(devices_selected)) &
(df.datetime >= start_date_selected) &
(df.datetime <= end_date_selected)]
).shape[0]
facebook_visit = (
df
.loc[(df.social_network == 'facebook') &
(df.social_network.isin(social_networks_selected)) &
(df.device.isin(devices_selected)) &
(df.datetime >= start_date_selected) &
(df.datetime <= end_date_selected)]
).shape[0]
instagram_visit = (
df
.loc[(df.social_network == 'instagram') &
(df.social_network.isin(social_networks_selected)) &
(df.device.isin(devices_selected)) &
(df.datetime >= start_date_selected) &
(df.datetime <= end_date_selected)]
).shape[0]
twitter_visit = (
df
.loc[(df.social_network == 'twitter') &
(df.social_network.isin(social_networks_selected)) &
(df.device.isin(devices_selected)) &
(df.datetime >= start_date_selected) &
(df.datetime <= end_date_selected)]
).shape[0]
df_by_month = (
df
.loc[(df.social_network.isin(social_networks_selected)) &
(df.device.isin(devices_selected)) &
(df.datetime >= start_date_selected) &
(df.datetime <= end_date_selected)]
.groupby(['year', 'month'])
.count()
.name
.reset_index()
.assign(
year_month=lambda df: df.year+'-'+df.month
)
)
df_by_month_social_networks = (
df
.loc[(df.social_network.isin(social_networks_selected)) &
(df.device.isin(devices_selected)) &
(df.datetime >= start_date_selected) &
(df.datetime <= end_date_selected)]
.groupby(['year', 'month', 'social_network'])
.count()
.name
.reset_index()
.assign(
year_month=lambda df: df.year+'-'+df.month
)
)
df_country = (
df
.loc[(df.social_network.isin(social_networks_selected)) &
(df.device.isin(devices_selected)) &
(df.datetime >= start_date_selected) &
(df.datetime <= end_date_selected)]
.groupby(['country_code', 'country'])
.count()
.name
.reset_index()
)
df_devices = (
df
.loc[(df.social_network.isin(social_networks_selected)) &
(df.device.isin(devices_selected)) &
(df.datetime >= start_date_selected) &
(df.datetime <= end_date_selected)]
.groupby(['device'])
.count()
.name
.reset_index()
)
total_visit_fig = px.line(
df_by_month,
x="year_month",
y="name",
labels={
"name": "Total Visits", "year_month": "Month"
}
)
total_visit_social_network_fig = px.line(
df_by_month_social_networks,
x="year_month",
y="name",
color="social_network",
labels={
"name": "Total Visits", "year_month": "Month"
}
)
world_map_fig = px.choropleth(
df_country,
locations='country_code',
color="name",
hover_name="country",
color_continuous_scale='plasma',
labels={
'name': 'Total Visits'
}
)
devices_pie_fig = px.pie(
df_devices,
values='name',
names='device',
labels={
'name': 'Total Visits'
}
)
return total_visit, facebook_visit, instagram_visit, twitter_visit, total_visit_fig, total_visit_social_network_fig, world_map_fig, devices_pie_fig
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port="80")
| [
"noreply@github.com"
] | noreply@github.com |
6e16831181959034015488712920032acebc6c61 | df2df2cb11f9f78b6e3493cb24f83dff43536e5e | /MyTestProjects/selenium_text/1s.py | 33596846b1178aa4876d6da8f31589ec6e5682e1 | [] | no_license | chengzizhen/Airmcl_Test | b4068bf9b0eec5ea8f160f080f1a783f90d8ec13 | aa6e44838843e4e812094d33d94f4a4c4c7d8312 | refs/heads/master | 2023-03-16T00:30:39.602224 | 2019-01-19T03:45:17 | 2019-01-19T03:45:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
def ok():
driver = webdriver.Firefox()
driver.get('http://www.51zxw.net')
driver.find_element_by_name('username').send_keys('z87254091')
driver.find_element_by_name('password').send_keys('z87254091')
driver.find_element_by_css_selector('[type="submit"]').click()
# username_loc = (By.NAME, 'username')
# password_loc = (By.NAME, 'password')
# submit_loc = (By.CSS_SELECTOR, '[type="submit"]')
#driver.maximize_window()
return driver
if __name__ == '__main__':
ok()
| [
""
] | |
37b44ac59997b25f1b9ca2ffef1404ae0c944360 | 086b24ee80b9ee943e709cfb38bdd9be216f416c | /utils.py | a14afa86b1bca6c9b79d07e8b3b00002f8162004 | [] | no_license | a84227321a/yyzz_ocr | 1fe49dbc1ada295cd313245dd8c351d870668850 | 5dea7f1dd105331e5be4ef3cf50f3f278286e348 | refs/heads/master | 2022-11-06T02:28:24.066208 | 2020-06-12T09:54:39 | 2020-06-12T09:54:39 | 271,237,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,649 | py | # -*- coding: utf-8 -*-
import glob
import os
import pickle
import cv2
from keras.callbacks import ModelCheckpoint, Callback
from keras.layers.core import *
from keras import backend as K
import re
import codecs
def create_result_subdir(result_dir):
# Select run ID and create subdir.
while True:
run_id = 0
for fname in glob.glob(os.path.join(result_dir, '*')):
try:
fbase = os.path.basename(fname)
ford = int(fbase)
run_id = max(run_id, ford + 1)
except ValueError:
pass
result_subdir = os.path.join(result_dir, '%03d' % (run_id))
try:
os.makedirs(result_subdir)
break
except OSError:
if os.path.isdir(result_subdir):
continue
raise
return result_subdir
def get_dict(label_pkl_path):
with open(label_pkl_path, 'rb') as f:
idx_char_dict, char_idx_dict = pickle.load(f)
return idx_char_dict,char_idx_dict
def pad_image(img, img_size, nb_channels):
# img_size : (width, height)
# loaded_img_shape : (height, width)
img_reshape = cv2.resize(img, (int(img_size[1] / img.shape[0] * img.shape[1]), img_size[1]))
if nb_channels == 1:
padding = np.zeros((img_size[1], img_size[0] - int(img_size[1] / img.shape[0] * img.shape[1])), dtype=np.int32)
else:
padding = np.zeros((img_size[1], img_size[0] - int(img_size[1] / img.shape[0] * img.shape[1]), nb_channels),
dtype=np.int32)
img = np.concatenate([img_reshape, padding], axis=1)
return img
def resize_image(img, img_size):
img = cv2.resize(img, img_size, interpolation=cv2.INTER_CUBIC)
img = np.asarray(img)
return img
def load_test_sample(img_root, label_root, char_idx_dict):
label_name_list = os.listdir(label_root)
sample_list = []
for label_name in label_name_list:
label_path = os.path.join(label_root, label_name)
img_path = os.path.join(img_root, re.sub('txt', 'jpg', label_name))
with codecs.open(label_path, 'rb', encoding='utf-8') as label_file:
text = label_file.readline()
flag = False
for char in text:
if char not in char_idx_dict:
flag = True
break
if flag:
continue
# img = cv2.imread(img_path)
# try:
# load_train_img(img_path, 32)
# except:
# print(img_path)
# continue
sample_list.append([img_path, text])
return sample_list | [
"865046239@qq.com"
] | 865046239@qq.com |
e3571be56d0195a8510b1367cf30522596fd6fc9 | d5e3f0a8dbac202866328bd56efb5ab93a11f869 | /ecommerce/ecommerce/settings.py | 25f040f8f4ed3d134544c47c9a561f8b52826fb6 | [] | no_license | koitaki/ecommerce | 5a906482e500e6b1f969520063766ac362d2ac3d | e9cd75326107cfac8e3b4a1e4658c00ca4938901 | refs/heads/master | 2021-01-22T13:58:05.515395 | 2015-03-12T16:24:48 | 2015-03-12T16:24:48 | 30,614,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,907 | py | """
Django settings for ecommerce project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8r%(5g)qfaqx%d)pmy1llzn4w!hhf$8xgfb-xdpq2nwdqn_v_1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'products',
'carts',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ecommerce.urls'
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'GMT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Context Processors
# https://docs.djangoproject.com/en/1.6/ref/templates/api/#django-core-context-processors-request
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(ROOT_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(ROOT_DIR, 'static', 'static_files'),
)
MEDIA_URL= '/media/'
MEDIA_ROOT = os.path.join(ROOT_DIR, 'static', 'media')
#MEDIA_ROOT = '/c/Users/Chris/Projects/ecommerce/static/media/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
| [
"github@christopheradams.com.au"
] | github@christopheradams.com.au |
cd70b7da056f0c665b71aad7bd5232357e178005 | cc23cf70670f72155b9f86d734bda3b985d88c56 | /asura/core/utils/http.py | b87ec7ce78463eb18b83f79a9a4c278782a5122b | [] | no_license | EtheriousNatsu/asura-web | a3fecc9b14809c6b2846f6715d8c080e1829412b | 2885edcf91ad887505850ae5d0ef7f65dbebef34 | refs/heads/master | 2023-07-15T13:47:58.985740 | 2021-08-19T11:12:59 | 2021-08-19T11:12:59 | 397,573,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # encoding: utf-8
"""
@author: John
@contact: zhouqiang847@gmail.com
@file: http.py
@time: 2021/8/17
"""
import requests
class HttpClient:
"""Send http request"""
def request(self, url, method, **kwargs):
"""Send a http request.
Args:
url(str): Http url.
data(dict): a dictionary of key-value pairs that
will be urlencoded and sent as POST data.
json(dict): a value that will be json encoded
and sent as POST data if data is not specified.
params(dict): Query params.
headers(dict): a dictionary of headers to use
with the request.
Returns:
:obj:`requests.models.Response`
"""
with requests.Session() as s:
return s.request(method, url, **kwargs)
| [
"zhouqiang@zhouqiangdeMacBook-Pro-2.local"
] | zhouqiang@zhouqiangdeMacBook-Pro-2.local |
763bc812b8f217850cfe652bacd10219358909ff | 34108db83f45a027783385382244e4f53769f140 | /traditional/svm_model.py | a2f45d28bdadbdac4fa297ebd29e0f2e74d09434 | [] | no_license | azh18/query_size_estimate | 8cfb6f9f464f6606fff0d840151b91558fcb8927 | 09c11f37dc83fc9b19e9dc592399a869ea5fdb15 | refs/heads/master | 2022-12-09T10:39:59.242279 | 2019-05-28T13:56:24 | 2019-05-28T13:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,853 | py | from sklearn.svm import SVR
import math
import numpy as np
from traditional.util import unnormalize_labels, normalize_labels
def get_q_error(real_labels, pred_labels):
errors = []
for i in range(len(real_labels)):
if pred_labels[i] > real_labels[i]:
err_unit = math.fabs(float(pred_labels[i])/real_labels[i])
else:
err_unit = math.fabs(float(real_labels[i])/pred_labels[i])
errors.append(err_unit)
errors.sort()
median = errors[int(len(errors)*0.5)-1]
p90 = errors[int(len(errors)*0.9)-1]
p95 = errors[int(len(errors)*0.95)-1]
p99 = errors[int(len(errors)*0.99)-1]
print("median=", median)
print("p90=", p90)
print("p95=", p95)
print("p99=", p99)
def build_SVR_dataset(joins_enc, predicates_enc, label, num_queries, column_min_max_vals):
label_norm, min_val, max_val = normalize_labels(label)
# Split in training and validation samples
num_train = int(num_queries * 0.9)
num_test = num_queries - num_train
predicates_train = predicates_enc[:num_train]
joins_train = joins_enc[:num_train]
labels_train = label_norm[:num_train]
predicates_test = predicates_enc[num_train:num_train + num_test]
joins_test = joins_enc[num_train:num_train + num_test]
labels_test = label_norm[num_train:num_train + num_test]
print("Number of training samples: {}".format(len(labels_train)))
print("Number of validation samples: {}".format(len(labels_test)))
max_num_joins = max(max([len(j) for j in joins_train]), max([len(j) for j in joins_test]))
max_num_predicates = max(max([len(p) for p in predicates_train]), max([len(p) for p in predicates_test]))
train_data = []
test_data = []
for i in range(len(predicates_train)):
train_data.append(np.hstack([predicates_train[i], joins_train[i]]))
for i in range(len(predicates_test)):
test_data.append(np.hstack([predicates_test[i], joins_test[i]]))
label_min_max_val = [min_val, max_val]
return train_data, labels_train, test_data, labels_test, column_min_max_vals, label_min_max_val
class SVRModel:
def __init__(self):
self.model = SVR(kernel="rbf", gamma='scale', C=0.1)
self.train_data, self.train_label, self.test_data, self.test_label = None, None, None, None
self.origin_label_min_max = None
def bind_dataset(self, train_data, train_label, test_data, test_label, origin_label_min_max):
self.train_data = train_data
self.train_label = train_label
self.test_data = test_data
self.test_label = test_label
self.origin_label_min_max = origin_label_min_max
def train_grid(self, gamma_list, c_list):
for gamma in gamma_list:
for c in c_list:
self.model = SVR(kernel="rbf", gamma=gamma, C=c)
print("gamma = ", gamma, "C = ", c)
self.model.fit(self.train_data, self.train_label)
predict_label = self.model.predict(self.train_data)
train_predict_label = unnormalize_labels(predict_label, self.origin_label_min_max[0], self.origin_label_min_max[1])
train_real_label = unnormalize_labels(self.train_label, self.origin_label_min_max[0], self.origin_label_min_max[1])
print("On Training Set:")
get_q_error(train_real_label, train_predict_label)
predict_label = self.model.predict(self.test_data)
test_predict_label = unnormalize_labels(predict_label, self.origin_label_min_max[0], self.origin_label_min_max[1])
test_real_label = unnormalize_labels(self.test_label, self.origin_label_min_max[0], self.origin_label_min_max[1])
print("On Test Set:")
get_q_error(test_real_label, test_predict_label)
print("-----")
| [
"zbw0046@gmail.com"
] | zbw0046@gmail.com |
b7dd7a197154d308863a5d0f9d1d548a6a166d6e | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/plone.app.controlpanel-2.1.1-py2.7.egg/plone/app/controlpanel/skins.py | a649d961b9669e9e19a497770d9f1e3f809ad3e2 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,314 | py | from zope.interface import Interface
from zope.component import adapts
from zope.formlib.form import FormFields
from zope.interface import implements
from zope.schema import Bool
from zope.schema import Choice
from Products.CMFCore.utils import getToolByName
from Products.CMFDefault.formlib.schema import SchemaAdapterBase
from Products.CMFPlone import PloneMessageFactory as _
from Products.CMFPlone.interfaces import IPloneSiteRoot
from form import ControlPanelForm
from widgets import DropdownChoiceWidget
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
ICON_VISIBILITY_CHOICES = {
_(u"Only for users who are logged in"): 'authenticated',
_(u"Never show icons"): 'disabled',
_(u"Always show icons"): 'enabled',
}
ICON_VISIBILITY_VOCABULARY = SimpleVocabulary(
[SimpleTerm(v, v, k) for k, v in ICON_VISIBILITY_CHOICES.items()]
)
class ISkinsSchema(Interface):
theme = Choice(title=_(u'Default theme'),
description=_(u'''Select the default theme for the site.'''),
required=True,
missing_value=tuple(),
vocabulary="plone.app.vocabularies.Skins")
mark_special_links = Bool(title=_(u'Mark external links'),
description=_(u"If enabled all external links "
"will be marked with link type "
"specific icons."),
default=True)
ext_links_open_new_window = Bool(title=_(u"External links open in new "
"window"),
description=_(u"If enabled all external "
"links in the content "
"region open in a new "
"window."),
default=False)
icon_visibility = Choice(title=_(u'Show content type icons'),
description=_(u"If disabled the content icons "
"in folder listings and portlets "
"won't be visible."),
vocabulary=ICON_VISIBILITY_VOCABULARY)
use_popups = Bool(title=_(u'Use popup overlays for simple forms'),
description=_(u"If enabled popup overlays will be "
"used for simple forms like login, "
"contact and delete confirmation."),
default=True)
class SkinsControlPanelAdapter(SchemaAdapterBase):
adapts(IPloneSiteRoot)
implements(ISkinsSchema)
def __init__(self, context):
super(SkinsControlPanelAdapter, self).__init__(context)
self.context = getToolByName(context, 'portal_skins')
self.jstool = getToolByName(context, 'portal_javascripts')
self.csstool = getToolByName(context, 'portal_css')
self.ksstool = getToolByName(context, 'portal_kss')
ptool = getToolByName(context, 'portal_properties')
self.props = ptool.site_properties
self.themeChanged = False
def get_theme(self):
return self.context.getDefaultSkin()
def set_theme(self, value):
self.themeChanged = True
self.context.default_skin = value
theme = property(get_theme, set_theme)
def _update_jsreg_mark_special(self):
self.jstool.getResource('mark_special_links.js').setEnabled(
self.mark_special_links or self.ext_links_open_new_window
)
self.jstool.cookResources()
def get_mark_special_links(self):
msl = getattr(self.props, 'mark_special_links', False)
if msl == 'true':
return True
return False
# return self.jstool.getResource('mark_special_links.js').getEnabled()
def set_mark_special_links(self, value):
if value:
mark_special_links='true'
else:
mark_special_links='false'
if self.props.hasProperty('mark_special_links'):
self.props.manage_changeProperties(mark_special_links=mark_special_links)
else:
self.props.manage_addProperty('mark_special_links', mark_special_links, 'string')
self._update_jsreg_mark_special()
mark_special_links = property(get_mark_special_links,
set_mark_special_links)
def get_ext_links_open_new_window(self):
elonw = self.props.external_links_open_new_window
if elonw == 'true':
return True
return False
def set_ext_links_open_new_window(self, value):
if value:
self.props.manage_changeProperties(external_links_open_new_window='true')
else:
self.props.manage_changeProperties(external_links_open_new_window='false')
self._update_jsreg_mark_special()
ext_links_open_new_window = property(get_ext_links_open_new_window,
set_ext_links_open_new_window)
def get_icon_visibility(self):
return self.props.icon_visibility
def set_icon_visibility(self, value):
self.props.manage_changeProperties(icon_visibility=value)
icon_visibility = property(get_icon_visibility,set_icon_visibility)
def get_use_popups(self):
return self.jstool.getResource('popupforms.js').getEnabled()
def set_use_popups(self, value):
self.jstool.getResource('popupforms.js').setEnabled(value)
self.jstool.cookResources()
use_popups = property(get_use_popups, set_use_popups)
class SkinsControlPanel(ControlPanelForm):
form_fields = FormFields(ISkinsSchema)
form_fields['theme'].custom_widget = DropdownChoiceWidget
label = _("Theme settings")
description = _("Settings that affect the site's look and feel.")
form_name = _("Theme settings")
def _on_save(self, data=None):
# Force a refresh of the page so that a new theme choice fully takes
# effect.
if not self.errors and self.adapters['ISkinsSchema'].themeChanged:
self.request.response.redirect(self.request.URL)
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
dd073724f67a10570c13e2cc5c18b7fcfbc40144 | 1f5d98c97ac9ff75b1d6b81f0a4a5110b05d4284 | /social_network/personal_settings.py | c2bb5dd5e177c74698e5d3621758e902a6d59781 | [] | no_license | DukhDmytro/social_network | 70cdd4aeb1448fdbacce6d32f627b421b8614a8c | a8d563b17ffc90dc467c67150fd4f0e7aa5f3992 | refs/heads/master | 2022-12-12T21:04:17.354395 | 2020-03-03T13:34:45 | 2020-03-03T13:34:45 | 241,352,402 | 0 | 0 | null | 2022-12-08T03:38:56 | 2020-02-18T12:12:34 | Python | UTF-8 | Python | false | false | 357 | py | SECRET_KEY_ = ')*k_v+g1&sj%o*%ocf#=m@s+!fmgnt$rcg$9puzlp7-!st$6f1'
DATABASES_ = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'social_network',
'USER': 'admin',
'PASSWORD': '1',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
HUNTER_API_KEY_ = '44a18a3fef94f60b3cf2f985f316b62a43f0a0eb' | [
"cowboybebop4991@gmail.com"
] | cowboybebop4991@gmail.com |
3528dc2697499f23c8d35c92ffec15cf241338f1 | e9fbb4718f6b68bf73ca6acd63fa068169e53e28 | /src/python/com/expleague/media_space/topics/fast_qt.py | 1e694f31d53c274c201705c27e3b221833bc16f5 | [] | no_license | mrMakaronka/topic_modeling | 940f752953acf5c8ef18f811933fbedbab3f40d1 | 15ccd3473cd85ec7472a2b00fc1ac21109fdba13 | refs/heads/master | 2020-05-03T05:10:39.935471 | 2020-02-15T00:54:31 | 2020-02-15T00:54:31 | 178,441,410 | 0 | 0 | null | 2019-10-19T15:44:23 | 2019-03-29T16:34:40 | Python | UTF-8 | Python | false | false | 1,731 | py | import faiss
import numpy as np
from numpy import ma
class FastQt:
def __init__(self, threshold, cluster_min_size):
self.threshold = threshold
self.cluster_min_size = cluster_min_size
# noinspection PyArgumentList
def fit(self, X, callback):
# noinspection PyAttributeOutsideInit
labels = np.full(len(X), -1)
index = faiss.IndexFlatL2(X.shape[1])
index.add(X)
lims, distances, indices = index.range_search(X, self.threshold)
lims = np.array([0] + lims, dtype=np.int64)
counters = np.int64(np.diff(lims))
left = np.repeat(np.arange(0, len(counters), dtype=np.int64), counters)
pairs = np.left_shift(left, 32) + np.array(indices, dtype=np.int64)
cluster_index = 0
mask = np.zeros(len(counters), dtype=np.bool)
mask[:] = True
while True:
best = np.argmax(counters)
if counters[best] < self.cluster_min_size:
break
cluster_mask = ma.masked_where(((pairs >> 32) != best), pairs, True)
cluster = cluster_mask.compressed() & np.int64(0xFFFFFFFF)
counters[cluster] = 0
labels[cluster] = cluster_index
callback(cluster, ma.array(distances, mask=cluster_mask.mask).compressed())
mask[cluster] = False
pairs = ma.masked_where(~mask[pairs & 0xFFFFFFFF], pairs, False)
(indices, removes) = np.unique(ma.masked_where(mask[pairs >> 32], pairs, True).compressed() & 0xFFFFFFFF,
return_counts=True)
counters[indices] -= removes
mask[cluster] = True
cluster_index += 1
return labels
| [
"trofimov9artem@gmail.com"
] | trofimov9artem@gmail.com |
ba94bc9f3edf78ffcb975d69c1352d2ab5a93987 | 3fff4f34d6430643c5c59371ea0b7415b499f419 | /slim/data/prefetch_queue_test.py | 96e66fcf0873a86906d250cee5571013e532a735 | [
"Apache-2.0"
] | permissive | mive93/nasnet-tensorflow | c72101438b2fb8e9ec03c0b0bd1aa18031865676 | edb0c98424521253c3afe2436384d5dc9a9e6582 | refs/heads/master | 2020-09-23T02:39:41.132008 | 2019-12-02T15:33:51 | 2019-12-02T15:33:51 | 225,381,698 | 0 | 0 | Apache-2.0 | 2019-12-02T13:29:34 | 2019-12-02T13:29:33 | null | UTF-8 | Python | false | false | 8,712 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.prefetch_queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
sys.path.insert(0,'/home/repos/nasnet-mive')
from slim.data import prefetch_queue
# from tensorflow.contrib.slim.python.slim.data import prefetch_queue
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
class PrefetchQueueTest(test.TestCase):
def testOneThread(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=1)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batches)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultiThread(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(num_batches):
results = sess.run(batches)
value_counter.append(results[0])
self.assertEqual(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEqual(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultipleDequeue(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 4
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batcher = prefetch_queue.prefetch_queue(batches)
batches_list = [batcher.dequeue() for _ in range(2)]
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(int(num_batches / 2)):
for batches in batches_list:
results = sess.run(batches)
value_counter.append(results[0])
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDynamicPad_failure(self):
with ops.Graph().as_default():
variable_tensor = array_ops.placeholder(dtypes.int32, shape=[None, 3])
with self.assertRaisesRegexp(ValueError, 'shapes must be fully defined'):
prefetch_queue.prefetch_queue([variable_tensor])
def testDynamicPad(self):
with self.test_session() as sess:
# Create 3 tensors of variable but compatible shapes.
var_shape = [None, 2]
p1 = constant_op.constant([[1, 2], [3, 4]])
p1.set_shape(var_shape)
p2 = constant_op.constant([[5, 6], [7, 8], [9, 10]])
p2.set_shape(var_shape)
p3 = constant_op.constant([[11, 12]])
p3.set_shape(var_shape)
batch = [p1, p2, p3]
batch_size = len(batch)
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(batch_size)
# Create a PaddingFIFOQueue to enqueue these tensors.
q = data_flow_ops.PaddingFIFOQueue(
capacity=10, dtypes=[dtypes.int32], shapes=[var_shape])
for tensor in [p1, p2, p3]:
q.enqueue([tensor]).run()
# Dequeue from the queue and batch them using batch().
batches = input_lib.batch([q.dequeue(), counter], batch_size=batch_size,
num_threads=1, dynamic_pad=True)
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
# Finally, assemble them into prefetch_queue with dynamic_pad.
batcher = prefetch_queue.prefetch_queue(batches, dynamic_pad=True)
batches = batcher.dequeue()
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
values, _ = sess.run(batches)
# We enqueued 3 tensors of [None, 2] shapes, so using dynamic_pad
# they should be padded to the fixed size [3, 3, 2], where 3
# is the maximum length of the batch.
self.assertTrue(np.array_equal(
np.array([[[1, 2], [3, 4], [0, 0]],
[[5, 6], [7, 8], [9, 10]],
[[11, 12], [0, 0], [0, 0]]]),
values))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDictConstruction(self):
with ops.Graph().as_default():
batches = {
'first': constant_op.constant([1]),
'second': constant_op.constant([2.0, 2.1])
}
prefetcher = prefetch_queue.prefetch_queue(batches)
dequeued = prefetcher.dequeue()
self.assertTrue(isinstance(dequeued, dict))
self.assertEqual(2, len(dequeued))
self.assertEqual(dtypes.int32, dequeued['first'].dtype)
self.assertEqual(dtypes.float32, dequeued['second'].dtype)
if __name__ == '__main__':
test.main()
| [
"micaelaverucchi@gmail.com"
] | micaelaverucchi@gmail.com |
378d655e48469b9bd2b553dcbabcb01ce35b7bf8 | d3fda89105d6e1f9e5d606515c17852a5560ac67 | /quora_dup_utils.py | 26f26677dc8fd0e11527db1bedc8ad258500d61d | [] | no_license | megoco27/vecn-words | 5d0ec342a39461ec5cb4d4ee8233c0bdf3613b06 | b75ed206939ab374892f4e10b997bbeed6cd7752 | refs/heads/master | 2021-09-13T03:41:17.339719 | 2018-04-24T15:14:39 | 2018-04-24T15:14:39 | 126,105,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,471 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 09:33:00 2018
@author: megoconnell
"""
""" Helper functions for exploring quora duplicate questions data set.
"""
import numpy as np
from sklearn.metrics import roc_auc_score, precision_recall_fscore_support
# libraries for timing
from contextlib import contextmanager
from timeit import default_timer
import time
""" Functions to calculate ROC AUC score for model
"""
def calculate_AUC(model, doc_names_and_duplicate_class):
""" Return area under ROC curve for model. This is done by simply taking
cosine similarity between
document vectors to predict whether they are duplicate questions or not.
"""
doc_distances = []
for i in range(len(doc_names_and_duplicate_class)):
# get word vectors for given pair
vec1_name = doc_names_and_duplicate_class[i][0]
vec2_name = doc_names_and_duplicate_class[i][1]
vec1 = model.docvecs[vec1_name]
vec2 = model.docvecs[vec2_name]
# take cosine distance between them
distance = cosine_similarity(vec1, vec2)
doc_distances.append(distance)
doc_distances = np.array(doc_distances)
doc_scores = np.array([x[2] for x in doc_names_and_duplicate_class])
return roc_auc_score(doc_scores, doc_distances)
def cosine_similarity(vec1, vec2):
"""return cosine angle between numpy vectors v1 and v2
"""
def unit_vector(vec):
return vec/np.linalg.norm(vec)
vec1_u, vec2_u = unit_vector(vec1), unit_vector(vec2)
return np.dot(vec1_u, vec2_u)
""" helper function for recording time of computations
"""
@contextmanager
def elapsed_timer():
start = default_timer()
elapser = lambda: default_timer() - start
yield lambda: elapser()
end = default_timer()
elapser = lambda: end-start
"""
functions to find best accuracy threshold given the cosine similarities
between document vectors; the function to call in the notebook is
report_accuracy_prec_recall_F1
The function get_model_distances_and_scores returns the true tag (1 or 0)
for each pair of documents along with the cosine similarity (float between
-1 and 1) for each pair of documents.
"""
def max_accuracy(y_target, y_pred, thresh_number=5000):
# find the maximum accuracy that can be achieved with y_pred by
# choosing appropriate threshold
# returns (max_accuracy, max_accuracy_threshold, max_accuracy_predictions)
min_thresh, max_thresh = min(y_pred), max(y_pred)
thresholds = np.linspace(min_thresh, max_thresh,thresh_number)
best_thresh, best_acc = 0, 0
best_preds = y_pred
for thresh in thresholds:
# make predictions list
y_pred_vals = np.array([0 if x<thresh else 1 for x in y_pred])
# compute accuracy
acc = get_accuracy(y_target, y_pred_vals)
if acc > best_acc:
best_thresh, best_acc = thresh, acc
best_preds = y_pred_vals
print("Best accuracy:", round(best_acc,4))
return (round(best_acc,4), best_thresh, best_preds)
def get_accuracy(y_target, y_pred_vals):
# get accuracy between vector of targets and vector of definite predictions
assert len(y_target) == len(y_pred_vals)
num_correct = 0
for i in range(len(y_target)):
if y_target[i] == y_pred_vals[i]:
num_correct += 1
return float(num_correct)/float(len(y_target))
def report_accuracy_prec_recall_F1(y_target, y_pred):
(best_acc, best_thresh, best_preds) = max_accuracy(y_target, y_pred)
(precision, recall, F1, support) = precision_recall_fscore_support(y_target,
best_preds, average='binary')
print( "Precision:", precision)
print ("Recall:", recall)
print ("F1-score:", round(F1, 4))
def get_model_distances_and_scores(model, doc_names_and_duplicate_class):
""" Return (y_target, y_pred) for model and given documents
y_pred is number between -1 and 1
"""
doc_distances = []
for i in range(len(doc_names_and_duplicate_class)):
# get word vectors for given pair
vec1_name = doc_names_and_duplicate_class[i][0]
vec2_name = doc_names_and_duplicate_class[i][1]
vec1 = model.docvecs[vec1_name]
vec2 = model.docvecs[vec2_name]
# take cosine distance between them
distance = cosine_similarity(vec1, vec2)
doc_distances.append(distance)
doc_distances = np.array(doc_distances)
doc_scores = np.array([x[2] for x in doc_names_and_duplicate_class])
return (doc_scores, doc_distances)
""" function that takes sentence (list of words) and word2vec model and
returns the average of the word2vec vectors of the words in the sentence
"""
def make_question_vectors(model, sentence):
# return numpy document vector by averaging constituent word vectors
# model is pretrained gensim word2vec model
# sentence is a list of words in same style as iterator makes for
# entering into word2vec
word_vecs = []
for word in sentence:
try:
new_word = model[word]
except KeyError:
continue
# check whether array has nan before appending
if not np.isnan(np.sum(new_word)):
word_vecs.append(new_word)
# if no appropriate word vectors found, return array of zeros
if not word_vecs:
return np.zeros(model.vector_size)
word_vecs = np.array(word_vecs)
return word_vecs.mean(axis=0)
| [
"noreply@github.com"
] | noreply@github.com |
3bfa3f4c4a8a9f814374e4c99f74e47289a534a5 | e2bd39106992b592de686e5bd79002edc05cc8bc | /917-仅仅反转字母/ReverseOnlyLetters.py | 97b3c141717bf5f5f882bee7025ae41b63f441cb | [] | no_license | Mumulhy/LeetCode | 9b8ad3af9f9a3b838bdd54727cf8f33401292d27 | 269419ba2a2840fcf100fa217c5275029ffa229e | refs/heads/master | 2022-10-28T23:06:54.081073 | 2022-10-23T07:48:49 | 2022-10-23T07:48:49 | 212,135,892 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # -*- coding: utf-8 -*-
# LeetCode 917-仅仅反转字母
"""
Created on Wed Feb 23 12:52 2022
@author: _Mumu
Environment: py38
"""
class Solution:
def reverseOnlyLetters(self, s: str) -> str:
n = len(s)
p1, p2 = 0, n
ans = []
while p1 < n:
if 65 <= ord(s[p1]) <= 90 or 97 <= ord(s[p1]) <= 122:
p2 -= 1
while not (65 <= ord(s[p2]) <= 90 or 97 <= ord(s[p2]) <= 122):
p2 -= 1
ans.append(s[p2])
else:
ans.append(s[p1])
p1 += 1
return ''.join(ans)
if __name__ == '__main__':
s = Solution()
print(s.reverseOnlyLetters("Test1ng-Leet=code-Q!"))
| [
"noreply@github.com"
] | noreply@github.com |
df9b3ba209711e8bd0de73e352d8017f30ba0f9a | c8822d9aecc2b5a8250f429f4c15c994bcc21519 | /adventofcode/2018/05b.py | 4437e5a8cf72b6c2f5db728874d2405b35cb2d26 | [] | no_license | a-falcone/puzzles | 9aba855a345efda44dc91c7b3d63a830b2c06b31 | 928307a4c28b2a220357dda0ec4915937f11f788 | refs/heads/master | 2023-09-03T10:10:31.711447 | 2023-01-20T03:19:42 | 2023-01-20T03:19:42 | 48,375,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | #!/usr/local/bin/python3
"""
--- Day 5: Alchemical Reduction ---
You've managed to sneak in to the prototype suit manufacturing lab. The Elves are making decent progress, but are still struggling with the suit's size reduction capabilities.
While the very latest in 1518 alchemical technology might have solved their problem eventually, you can do better. You scan the chemical composition of the suit's material and discover that it is formed by extremely long polymers (one of which is available as your puzzle input).
The polymer is formed by smaller units which, when triggered, react with each other such that two adjacent units of the same type and opposite polarity are destroyed. Units' types are represented by letters; units' polarity is represented by capitalization. For instance, r and R are units with the same type but opposite polarity, whereas r and s are entirely different types and do not react.
For example:
In aA, a and A react, leaving nothing behind.
In abBA, bB destroys itself, leaving aA. As above, this then destroys itself, leaving nothing.
In abAB, no two adjacent units are of the same type, and so nothing happens.
In aabAAB, even though aa and AA are of the same type, their polarities match, and so nothing happens.
Now, consider a larger example, dabAcCaCBAcCcaDA:
dabAcCaCBAcCcaDA The first 'cC' is removed.
dabAaCBAcCcaDA This creates 'Aa', which is removed.
dabCBAcCcaDA Either 'cC' or 'Cc' are removed (the result is the same).
dabCBAcaDA No further actions can be taken.
After all possible reactions, the resulting polymer contains 10 units.
How many units remain after fully reacting the polymer you scanned?
--- Part Two ---
Time to improve the polymer.
One of the unit types is causing problems; it's preventing the polymer from collapsing as much as it should. Your goal is to figure out which unit type is causing the most problems, remove all instances of it (regardless of polarity), fully react the remaining polymer, and measure its length.
For example, again using the polymer dabAcCaCBAcCcaDA from above:
Removing all A/a units produces dbcCCBcCcD. Fully reacting this polymer produces dbCBcD, which has length 6.
Removing all B/b units produces daAcCaCAcCcaDA. Fully reacting this polymer produces daCAcaDA, which has length 8.
Removing all C/c units produces dabAaBAaDA. Fully reacting this polymer produces daDA, which has length 4.
Removing all D/d units produces abAcCaCBAcCcaA. Fully reacting this polymer produces abCBAc, which has length 6.
In this example, removing all C/c units was best, producing the answer 4.
What is the length of the shortest polymer you can produce by removing all units of exactly one type and fully reacting the result?
"""
import string, re
DATA = open("05.data","r")
#DATA = ["dabAcCaCBAcCcaDA\n"]
for line in DATA:
line = line.strip()
minimum = len(line)
for char in string.ascii_lowercase:
reg = re.compile(char, re.IGNORECASE)
tempstring = reg.sub('', line)
i = 0
while( i < len(tempstring) - 1 ):
if tempstring[i].upper() == tempstring[i+1].upper() and tempstring[i] != tempstring[i+1]:
tempstring = tempstring[0:i] + tempstring[i+2:]
i -= 1
else:
i += 1
if i < 0:
i = 0
if len(tempstring) < minimum:
minimum = len(tempstring)
print(minimum)
| [
"falcone@gmail.com"
] | falcone@gmail.com |
8d5f12010d898b523479089cc1196e9871533824 | 49ed52e1793fe66d4022736a7dff4540a8eea75a | /actions/g_tg.py | e01e478e3156a630ff33ddde1c46f8dec60a8487 | [
"Apache-2.0"
] | permissive | alirezabayatmk/albert-persian-lab | a3d888295732cb6a79835fc8e0f9a10770668db9 | f43ec9efad236c4024bf2648a7af02e04e952b4c | refs/heads/master | 2022-11-23T11:14:01.990948 | 2020-07-30T07:38:05 | 2020-07-30T07:38:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py | import streamlit as st
import time
from utils import is_identical
from utils.loader import task_configuration, local_css, load_snippet
from utils.model import load_tokenizer, load_model, text_generation
def do_text_generation(task_title, task_config_filename, do_print_code=False):
st.title(task_title)
config_names, config_map = task_configuration('assets/%s.json' % task_config_filename)
example = st.selectbox('Choose an example', config_names)
# st.markdown(config_map[example][2], unsafe_allow_html=True)
height = min((len(config_map[example][0].split()) + 1) * 2, 200)
if config_map[example][4] == 'rtl':
local_css('assets/rtl.css')
sequence = st.text_area('Text', config_map[example][0], key='sequence', height=height)
labels = st.text_input('Mask (placeholder)', config_map[example][1], max_chars=1000)
original_labels = config_map[example][1].split(', ')
labels = list(set([x.strip() for x in labels.strip().split(',') if len(x.strip()) > 0]))
if len(labels) == 0 or len(sequence) == 0:
st.write('Enter some text and at least one label to see predictions.')
return
if not is_identical(labels, original_labels, 'list'):
st.write('Your labels must be as same as the NLP task `%s`' % task_title)
return
if st.button('Analyze'):
if do_print_code:
load_snippet('snippets/text_generation_code.txt', 'python')
s = st.info('Predicting ...')
tokenizer = load_tokenizer(config_map[example][3])
model = load_model(config_map[example][3], 'TFAlbertForMaskedLM', from_pt=True)
masked_words, words = text_generation(model, tokenizer, sequence)
new_sequence = []
for index, word in enumerate(words):
if index in masked_words:
masks_sequence = []
for mi in masked_words[index]:
masks_sequence.append(
'<span class="masked" style="background-color: %s;">%s</span>' %
(mi['color'], mi['token_str'])
)
new_sequence.append(
'<span class="token"><span class="masks-start">[</span><span class="token-masks">%s</span><span class="masks-end">]</span></span>' %
(''.join(masks_sequence))
)
else:
new_sequence.append(
'<span class="token">%s</span>' %
word
)
new_sequence = ' '.join(new_sequence)
time.sleep(1)
s.empty()
st.markdown(f'<p class="masked-box">{new_sequence}</p>', unsafe_allow_html=True)
| [
"m3hrdadfi@gmail.com"
] | m3hrdadfi@gmail.com |
e7bc5b408596623a5bf610c7bba934e4da24efab | 197420c1f28ccb98059888dff214c9fd7226e743 | /elements, blocks and directions/classes/class5_A_funcs.py | 3f2a7d6da1786dea286652d45ddc788ab0d67f48 | [] | no_license | Vovanuch/python-basics-1 | fc10b6f745defff31364b66c65a704a9cf05d076 | a29affec12e8b80a1d3beda3a50cde4867b1dee2 | refs/heads/master | 2023-07-06T17:10:46.341121 | 2021-08-06T05:38:19 | 2021-08-06T05:38:19 | 267,504,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | ''' class A '''
class A:
val = 1
def foo(self):
A.val += 2
def bar(self):
self.val += 1
a = A()
b = A()
a.bar()
a.foo()
c = A()
print(a.val)
print(b.val)
print(c.val)
| [
"vetohin.vladimir@gmail.com"
] | vetohin.vladimir@gmail.com |
eb74fd563b3cb040a1fd1f494b888b3306520da1 | cc8b9618bc1a6fe069922c4d77f1cb09784575a3 | /databases/triage-patient-db.py | a554a85e63dc865b14fb5bdb45529f71b44402bd | [] | no_license | mell00/oneview | 53b0e881dc73920892532fab617e61ab5f3c3cf5 | b36fde58aee64ffe8aacee276d2117e700fbd14a | refs/heads/master | 2021-01-01T07:47:57.123781 | 2020-03-09T19:58:53 | 2020-03-09T19:58:53 | 239,179,383 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | import os
import pyodbc
if start = 'import':
driver = pyodbc.drivers()
conn = pyodbc.connect(('Driver={};Server=DESKTOP-TVBHO64\SQLEXPRESS;Database=OneView;Trusted_Connection=yes;').format(driver))
cursor = conn.cursor()
cursor.execute('SELECT * FROM OneView.dbo.Patient_Info')
cursor.execute('INSERT INTO OneView VALUES {%s, %s, %s, %s, %s, %s, %s, %s}').format(ip_mr_num, ip_firstname,ip_lastname,ip_birthdate,ip_visit,ip_history,ip_visit_date,ip_reason))
conn.commit()
else if start = 'export':
driver = pyodbc.drivers()
conn = pyodbc.connect(('Driver={};Server=DESKTOP-TVBHO64\SQLEXPRESS;Database=OneView;Trusted_Connection=yes;').format(driver))
cursor = conn.cursor()
cursor.execute('SELECT * FROM OneView.dbo.Patient_Info')
cursor.execute('DELETE FROM OneView VALUES {%s, %s, %s, %s, %s, %s, %s, %s}').format(ip_mr_num, ip_firstname,ip_lastname,ip_birthdate,ip_visit,ip_history,ip_visit_date,ip_reason))
conn.commit()
© 2020 GitHub, Inc.
| [
"noreply@github.com"
] | noreply@github.com |
4f90f4f74e88b9a17bedae70a57b7ec991d00770 | 60cf1c6f0b357e9c4199636b5b219c11b54860a6 | /20190215_OBDp/check_error_log.py | 6248c45482990e741699d921a0f2764705b7fda1 | [] | no_license | aloejhb/EMana | ab33240e88204a74719e35270dc328b7fc66c909 | 3d70fadd3d0360c1d508e816abc77a531fb9e78e | refs/heads/master | 2020-05-22T15:25:31.614787 | 2019-06-20T10:19:43 | 2019-06-20T10:19:43 | 186,407,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | import os
# import sys
# sys.path.insert(0, '/path/to/application/app/folder')
data_root_dir = '/run/user/1000/gvfs/smb-share:server=tungsten-nas.fmi.ch,share=landing_gmicro_sem'
result_dir = '/home/hubo/Projects/juvenile_EM/OBDp_overview/'
os.chdir(data_root_dir)
stack_name = '20190215_Bo_juvenile_overviewstackOBDp'
error_file_name = 'error_list.txt'
error_file = os.path.join(result_dir, error_file_name)
cmd = 'cat {}*/meta/logs/error_* > {}'.format(stack_name, error_file)
# print(cmd)
os.system(cmd)
| [
"b.hu@stud.uni-heidelberg.de"
] | b.hu@stud.uni-heidelberg.de |
a716caf5a278f27a52b4c137c8691c3da74f975d | f8d37aba59066156738dec56a9e97ee8f7230c4f | /venv/bin/django-admin | 8c9a9d7899d4895766f2b734bcc8f4e7635eae8d | [] | no_license | Piyush026/django-crud | 01ff6c0c89f58614bd2050b4af92af4046bb1424 | 0e0827be439991de9bd84da8d399518059e02b46 | refs/heads/master | 2020-12-13T21:09:53.310959 | 2020-01-17T10:52:38 | 2020-01-17T10:52:38 | 234,529,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | #!/home/lovkesh/PycharmProjects/django-crud/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"piyush.jaiswal@ezeiatech.in"
] | piyush.jaiswal@ezeiatech.in | |
7c7e67d27b764ca813e58971be7ee5ec46ca05c5 | e49a07ad215172e9c82cb418b10371bf0ce1c0f7 | /第1章 python基础/Python基础01/19-打印1-100之间的偶数.py | 4549dced2d7a0a0a558734f64134b9b56b6a40e8 | [] | no_license | taogangshow/python_Code | 829c25a7e32ead388c8b3ffa763cb9cf587bfd7b | 4b3d6992ec407d6069f3187ca7e402a14d863fff | refs/heads/master | 2022-12-16T01:26:17.569230 | 2018-11-16T10:07:59 | 2018-11-16T10:07:59 | 157,832,985 | 0 | 1 | null | 2022-11-25T09:55:32 | 2018-11-16T08:00:13 | Python | UTF-8 | Python | false | false | 61 | py | i = 1
while i<=100:
if i%2==0:
print(i)
i+=1
| [
"cdtaogang@163.com"
] | cdtaogang@163.com |
0d7ea71313641eb814772572eb30f21bfa1e6d30 | ac37ceb60d504d39b78d87acd06c85717a627659 | /topological_sort.py | df3e4e2f83d8640c38cdafcbd2cf8883ca87c5db | [] | no_license | shlvd/Diff_algo | bf8c399f6053971477e80f92caa82251d5b76509 | 19c4afe4488f13fd74c185c855ff88d91c7113de | refs/heads/master | 2023-03-03T17:29:32.164528 | 2023-02-15T19:21:17 | 2023-02-15T19:21:17 | 195,655,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,607 | py | class Stack:
"""
Stack: LIFO Data Structure.
Operations:
push(item)
pop()
peek()
isEmpty()
size()
"""
def __init__(self):
"""
Define an empty stack.
Here we are using list to implement the Stack data structure.
"""
self._list = [] #Hold items in the stack.
self._top = -1 #Denotes the top of the stack
def isEmpty(self):
"""
Test if the stack has no items.
:return: True if Stack is Empty. False Otherwise
"""
return self._top == -1
def push(self, item):
"""
Pushes an item at the top of the stack updating the top of the stack.
:param item: item to be added on to the stack
"""
self._list.append(item)
self._top += 1
def pop(self):
"""
Removes an item from the top of the stack modifying it.
:return: item removed from the top of the stack.
:raises: EmptyStackError if stack has no elements.
"""
if self.isEmpty():
raise EmptyStackError("Stack is Empty: Trying to pop from an empty stack")
self._top -= 1
return self._list.pop()
def peek(self):
"""
Just returns the item at the top of the stack without modifying the stack.
:return: item at the top of the stack.
:raises: EmptyStackError if stack has no elements.
"""
if self.isEmpty():
raise EmptyStackError("Stack is Empty: Trying to peek into an empty stack")
return self._list[self._top]
def size(self):
"""
Returns the number of elements currently in the stack.
:return: size of the stack.
"""
return self._top + 1
class Queue:
"""
Queue: FIFO Data Structure.
Operations:
enqueue(item)
dequeue()
isEmpty()
size()
"""
def __init__(self):
"""
Define an empty queue.
Here we are using list to implement the Queue data structure.
"""
self._data = []
def isEmpty(self):
"""
Test if the queue has no items.
:return: True if Queue is Empty. False Otherwise
"""
return self.size() == 0
def enqueue(self, item):
"""
Insert the item at the rear of the Queue
:param item: item to be added on to the Queue
"""
self._data.append(item)
def dequeue(self):
"""
Removes an item from the front of the Queue.
:return: item removed from the front of the Queue.
:raises: EmptyQueueError if Queue has no elements.
"""
if self.isEmpty():
raise EmptyQueueError("Trying to dequeue from an Empty Queue.")
return self._data.pop(0)
def size(self):
"""
Returns the number of elements currently in the Queue.
:return: size of the Queue.
"""
return len(self._data)
class Vertex:
"""
An example implementation of a Vertex or Node of a graph.
"""
def __init__(self, key):
"""
Creates a new Vertex.
"""
self._neighbors = []
self._key = key
def add_neighbor(self, neighbor_vertex, weight):
self._neighbors.append((neighbor_vertex, weight))
def get_connections(self):
return self._neighbors
def get_key(self):
return self._key
def get_weight(self, to_vertex):
for neighbor in self._neighbors:
if to_vertex == neighbor[0].get_key():
return neighbor[1]
class Graph:
"""
An example implementation of Directed Graph ADT.
"""
def __init__(self):
"""
Creates a new, empty Graph.
"""
self._vertices = {}
def add_vertex(self, vertex):
"""
Adds a new vertex into the graph.
:param vertex: The Vertex to be added into the Graph.
:return: None.
"""
v = Vertex(vertex)
self._vertices[vertex] = v
def add_edge(self, from_vertex, to_vertex, weight):
"""
Add a directed edge between two vertices
:param from_vertex: Starting vertex of the edge
:param to_vertex: Where the edge ends.
:param weight: weight of the edge
:return: None
"""
if from_vertex not in self._vertices:
self.add_vertex(from_vertex)
if to_vertex not in self._vertices:
self.add_vertex(to_vertex)
self._vertices[from_vertex].add_neighbor(self._vertices[to_vertex], weight)
def get_vertices(self):
"""
Get all the vertices of the directed Graph.
:return: List of vertices of the graph.
"""
vertices = self._vertices.keys()
vertices = sorted(vertices)
return vertices
def get_edges(self):
"""
Get all the edges of the directed graph.
:return: List of edges of the graph.
"""
edges = []
for vertex in self._vertices:
neighbors = self._vertices[vertex].get_connections()
for neighbor in neighbors:
edges.append((vertex, neighbor[0].get_key(), self._vertices[vertex].get_weight(neighbor[0].get_key())))
return edges
def get_vertex(self, vertex_key):
for vertex in self._vertices:
if vertex == vertex_key:
return self._vertices[vertex]
return None
def BFS(self, start_vertex):
start_vertex = self.get_vertex(start_vertex)
if start_vertex is None:
raise Exception("Vertex {} is not found in graph".format(start_vertex))
visited = [False] * len(self._vertices)
traversed = []
q = Queue()
q.enqueue(start_vertex)
while not q.isEmpty():
v = q.dequeue()
key = v.get_key()
if not visited[key]:
visited[key] = True
traversed.append(key)
for neighbor in v.get_connections():
if not visited[neighbor[0].get_key()]:
q.enqueue(neighbor[0])
return traversed
def dfs_topological_sort(self, start_vertex_key, visited, sorted):
start_vertex = self.get_vertex(start_vertex_key)
# Set that the vertex is visited.
visited[start_vertex_key] = True
for neighbor in start_vertex.get_connections():
# For each unvisited neighbor of the vertex, recursively call the DFS.
if not visited[neighbor[0].get_key()]:
self.dfs_topological_sort(neighbor[0].get_key(), visited, sorted)
# When there are no more nodes unvisited nodes to traverse from the current vertex,
# push it onto the sorted stack.
sorted.push(start_vertex_key)
def topological_sort(self, start_vertex_key):
# Visit only unvisited nodes.
visited = [False] * len(g.get_vertices())
# The stack that holds the topological sort.
sorted = Stack()
# Call the modified version of the DFS.
self.dfs_topological_sort(start_vertex_key, visited, sorted)
# pop into a list till the stack is empty to get the topological sort.
topo_sort = []
while not sorted.isEmpty():
topo_sort.append(sorted.pop())
return topo_sort
| [
"noreply@github.com"
] | noreply@github.com |
8bc98a9ea1f436150e41ed6967f9de092a257ae4 | 7a6e6387f23c0c1f3d3ee3e71c09a0601039704b | /mysite/mysite/settings.py | 1c9f9ac6a9a98578912474ba43ed078ded494097 | [] | no_license | sivavavilla/python_django | 8140c3ee78899cbe61690afc6691bc2190081894 | bd460963df4fbf87c49090aef06f6ca966e9ba8d | refs/heads/master | 2020-03-27T19:11:02.124482 | 2018-09-02T04:24:04 | 2018-09-02T04:24:04 | 146,970,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'loh#110mma(m^7=xxnmvu6r_1z!_z)kjqv_i!_6y#e085qqqaa'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static') | [
"shiva.vavilla@gmail.com"
] | shiva.vavilla@gmail.com |
d91d13ddff8861446f8953cb6599c35a87c5b6a4 | ac557bfc774de2ac4b6ad6955e905616ce6b5c8d | /easy_ocr.py | eee2cc369ef65a1fdf503c841de2bd4d16ef58ce | [] | no_license | Lee-jaehyun/api | 45b796c8a4e810f4fd2552c0d4858ffa3180856e | caaf7488af7ca735e6e91567d43a5b72fb1d56e9 | refs/heads/main | 2023-08-13T17:38:06.128614 | 2021-10-14T05:22:29 | 2021-10-14T05:22:29 | 416,998,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import easyocr
import time
import datetime
import cv2
from deskew import determine_skew
from mser import mser_process
from angle4 import rotate
from deskew import determine_skew
from angle4 import rotate
reader = easyocr.Reader(['ko'], gpu=False)
image = cv2.imread("../../Desktop/skewed1.jpeg")
start = time.time()
#image = cv2.resize(image, dsize=(960, 1280), interpolation=cv2.INTER_AREA)
#w, h = image.shape[:2]
grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
angle = determine_skew(grayscale) #[int(h/3):, int(w/4):int(w*2/3)]
print(angle)
if angle < -77 or angle > 80:
rotated = grayscale
else:
rotated = rotate(grayscale, 90 + angle, (0, 0, 0))
cv2.imshow('rotated', rotated)
cv2.waitKey(0)
#angle = determine_skew(grayscale)
#angle = determine_skew(grayscale) #[int(h/3):, int(w/4):int(w*2/3)]
#print(angle)
#if (angle < -78) or (angle > 80):
# rotated = image
#else:
# rotated = rotate(image, angle, (0, 0, 0))
#rotated = cv2.resize(rotated, dsize=(800, 960), interpolation=cv2.INTER_AREA)
#cv2.imshow("rotated", rotated)
#cv2.waitKey(0)
result = reader.readtext(rotated)
end_ocr = time.time()
for i in result:
print(i[1])
sec = (end_ocr - start)
print("TOTAL_process :", datetime.timedelta(seconds=sec)) | [
"noreply@github.com"
] | noreply@github.com |
f9579482f65166748aaafc04748e269ac0730e34 | 082a2149d5f03426455f4021df3759011a4e5597 | /CDN/telemetry_upload.py | 83428070a89dab2a96e980033ea8a900345fe61f | [
"MIT"
] | permissive | projectOpenRAP/OpenRAP | e1252cf3e04f690054cd2da9584b2a6932a9e30d | 858bcdc23d7cd1ad22388ef9779e6779384f963a | refs/heads/develop-v3 | 2022-12-12T10:50:10.685557 | 2019-06-22T10:51:18 | 2019-06-22T10:51:18 | 104,078,355 | 43 | 14 | MIT | 2022-12-08T11:20:25 | 2017-09-19T13:27:20 | JavaScript | UTF-8 | Python | false | false | 7,870 | py | #!/usr/bin/env python
""" Uploaded telemetry to cloud server in regular interval """
import sys, os, subprocess, shutil
import json
import jwt
import logging
import random
import requests
import string
#from secrets import token_urlsafe
import time, threading
#############
#Global configurations for build
regURL = 'https://qa.ekstep.in/api/api-manager/v1/consumer/cdn_device/credential/register'
tmURL = 'https://qa.ekstep.in/api/data/v3/telemetry'
app_jwt = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJvcGVucmFwLXYxIn0.BlKcxXLMXDe5wGSLIyN7DV6B808Fmi87-OJRHGS0NCE'
JWT_ALGORITHM = "HS256"
logfile="telemetry_upload.log"
device_key=""
device_secret=""
tm_jwt=""
tmDir = "/var/www/ekstep/telemetry"
tm_timer_interval=300 # 5 minutes
#################
class BreakoutException(Exception):
""" Custom expection """
pass
#################
def logging_init():
global log
global logfd
log = logging.getLogger('TELEMETRY')
log.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
build_logfilename = "/tmp/" + logfile
#Needed to log output of subprocess.Popen
logfd = open(build_logfilename, "a")
# create file handler which logs even debug messages
fh = logging.FileHandler(build_logfilename)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
ch.setFormatter(formatter)
log.addHandler(ch)
# Keep a global reference of the logger
log.info("START: Logfile: " + build_logfilename)
def run_cmd(cmd):
global logfd
print(logfd, "executing: %s" % cmd)
p = subprocess.Popen(cmd, shell=True, stdout=logfd, stderr=subprocess.PIPE)
(result, error) = p.communicate()
if p.returncode != 0:
print(error)
#sys.exit(p.returncode)
return (p.returncode)
def jwt_generate(key, secret):
payload = { "iss": key }
header = { "alg": "HS256", "typ": "JWT"}
token = jwt.encode(payload, secret, algorithm=JWT_ALGORITHM, headers=header)
return token
def check_netconnectivity():
cmd = "ping -c 2 -W 5 8.8.8.8"
status = run_cmd(cmd)
if status == 0:
return True
# Ping not sucessful, just check with another server
cmd = "ping -c 2 -W 5 www.amazon.com"
status = run_cmd(cmd)
if status == 0:
return True
return False
def token_generate():
#generate a unique device_key
global device_key, device_secret, tm_jwt
#device_key = token_urlsafe(16)
device_key = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(16))
# Construct a POST request to get app key and secret from reqURL
payload = {
"id": "ekstep.cdn.pinut",
"ver": "2.0",
"request": { "key": device_key}
}
auth_text = "bearer " + app_jwt
headers = {'Content-Type': 'application/json', 'Authorization': auth_text}
r = requests.post(url=regURL, data=json.dumps(payload), headers=headers)
if r.status_code // 100 != 2:
log.error("Server error: Not received SECRET for device_key: " + device_key)
sys.exit(1)
device_key = r.json().get('result').get('key')
device_secret = r.json().get('result').get('secret')
#generate the telemetry jwt from app key and secret
tm_jwt = jwt_generate(device_key, device_secret).decode()
log.info("Device_key[%s] Device_secret[%s] JWT_token[%s]\n" %(device_key, device_secret, tm_jwt))
def telemetry_upload_file(filename, jwt, endpoint=tmURL):
# Construct a POST request to upload telemetry
auth_text = "bearer " + jwt
headers = {'Content-Type': 'application/json', 'Content-Encoding': 'gzip', 'Authorization': auth_text}
fin = open(filename, 'rb')
try:
r = requests.post(url=endpoint, data=fin, headers=headers)
print(r.text)
finally:
fin.close()
# Parse the response json
es_resp_status = r.json().get('params').get('status')
es_resp_err = r.json().get('params').get('err')
es_resp_errmsg = r.json().get('params').get('errmsg')
return (r.status_code, es_resp_status, es_resp_err, es_resp_errmsg)
# Generate log sparingly
log_optimization_limit = 25
log_current_value = 0
def telemetry_upload_dir():
#
# Check if telemetry file avalable to sync
# If not, just return
#
global tmDir
tm_dir = tmDir
try:
try:
os.chdir(tm_dir)
except:
err_msg = "Directory read error: " + tm_dir
raise BreakoutException
tmfile_list = os.listdir(tm_dir)
#tmfile_list = sorted(os.listdir(tm_dir),key=os.path.getctime);
if not tmfile_list:
err_msg = "No file in " + tm_dir + " to upload..."
raise BreakoutException
#log.info(' '.join(str(x) for x in tmfile_list))
#
# We have some files to upload; check net connectivity now
# If not connected, just return
#
netstatus = check_netconnectivity()
if not netstatus:
err_msg = "Not connected to network..."
raise BreakoutException
else:
log.info("Connected to network...")
tmfile_timesorted_list = sorted(tmfile_list, key=os.path.getmtime)
#log.info(' '.join(str(x) for x in tmfile_list))
#
# Upload the first file with existing credential
# If we get unauthorized/rate limited error
# Handle that
#
# We have telemetry ratelimit(in cloud server) 10000/hour
# and the timer expires in every 5 minutes
ratelimit_count = 1000
for filename in tmfile_timesorted_list:
status, es_resp_status, es_resp_err, es_resp_errmsg = telemetry_upload_file(filename, tm_jwt, tmURL)
if es_resp_status == "successful" or es_resp_err == "INVALID_DATA_ERROR":
log.info("telemetry upload(%s) status: %s %s" %
(filename, es_resp_status, es_resp_errmsg))
# delete this file
os.remove(filename)
elif status == 401:
log.info("telemetry upload(%s) status: %d es_status: %s es_err: %s es_errmsg: %s" %
(filename, status, es_resp_status, es_resp_err, es_resp_errmsg))
log.info("Unauthorized: Regenerating token...")
token_generate()
break
elif status == 429:
log.info("telemetry upload(%s) status: %d es_status: %s es_err: %s es_errmsg: %s" %
(filename, status, es_resp_status, es_resp_err, es_resp_errmsg))
log.info("Ratelimit: API rate limit exceeded...")
break
else:
# some other error; take a break for now
break
# Ensure we are not rate limited by server
if ratelimit_count < 1:
break
else:
ratelimit_count = ratelimit_count - 1
except:
# Don't flood with logs from timer
global log_optimization_limit, log_current_value
log_current_value = log_current_value + 1
if log_current_value == 1:
log.error(err_msg)
elif log_current_value >= log_optimization_limit:
log_current_value = 0
# The below line required for next timer fire
global tm_timer_interval
threading.Timer(tm_timer_interval, telemetry_upload_dir).start()
##########################################
# MAIN
##########################################
if __name__ == '__main__':
logging_init()
token_generate()
telemetry_upload_dir()
| [
"pronoy@aikaan.io"
] | pronoy@aikaan.io |
4e27cea422c4ce1bae7e9ddb013707c1ff873417 | 12b3c5674d9123da7da7074981d8c5e8f5acfb0e | /helloword.py | 0b7e3b8bd73153a6807150febdeb3b4ffc873b76 | [] | no_license | lunalucas123/Preparation | ba208131bb9aa2bf8949703bfa2a8a514533f01e | 4ed22472117b82b708433d826dc159f145e3b6de | refs/heads/master | 2022-12-24T18:11:57.232942 | 2020-10-03T19:28:38 | 2020-10-03T19:28:38 | 300,959,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | print("Hello Babe") | [
"geovannymolina@Geovannys-Air.attlocal.net"
] | geovannymolina@Geovannys-Air.attlocal.net |
c071be9b07b67960b4b7cbc9873e9ecba9fc6b0a | 2117d925581acb2feec8a8efb492d11eb227b7d4 | /ddpg_cl/approach.py | 6a7ffa95c190712bf34c8b6f30140c3460802b80 | [
"MIT"
] | permissive | genipap/deep-q-learning | fc61dd81dd378cec57ee005df8f9aeb3e54b296e | 19c44696b680a5b189548797cee43e0d67b62f8e | refs/heads/master | 2021-08-24T02:56:42.851986 | 2017-12-07T19:10:46 | 2017-12-07T19:10:46 | 110,624,525 | 0 | 0 | null | 2017-11-14T01:35:12 | 2017-11-14T01:35:12 | null | UTF-8 | Python | false | false | 17,154 | py | #!/usr/bin/env python
import sys
if "../" not in sys.path: # Path to utilities and other custom modules
sys.path.append("../")
import logging
import numpy as np
import tensorflow as tf
import json
from approach_network.app_actor_net import AppActorNetwork
from approach_network.app_critic_net import AppCriticNetwork
from approach_network.app_replay import AppReplay
from utilities.toolfunc import ToolFunc
from keras import backend as keras
from inter_sim import InterSim
from reward_app import AppReward
import time
import matplotlib.pyplot as plt
from random import random
import log_color
__author__ = 'qzq'
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
Step_size = 500
Buffer = AppReplay(10000)
class ReinAcc(object):
tools = ToolFunc()
Tau = 1. / 30
gamma = 0.99
buffer_size = 10000
batch_size = 128
tau = 0.0001 # Target Network HyperParameters
LRA = 0.001 # Learning rate for Actor
LRC = 0.001 # Learning rate for Critic
explore_iter = 100000.
# explore_iter = 1000.
episode_count = 500
max_steps = 2000
action_dim = 1 # Steering/Acceleration/Brake
action_size = 1
state_dim = 10
# Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf_sess = tf.Session(config=config)
keras.set_session(tf_sess)
Speed_limit = 12
def __init__(self, ini_pos, task_k):
self.epsilon = 1.
self.task = task_k
self.sim = InterSim(ini_pos, False)
self.reward = AppReward()
self.total_reward = 0
self.if_pass = False
self.if_done = False
self.crash = []
self.success = []
self.not_finish = []
self.overspeed = []
self.not_move = []
self.cannot_stop = []
self.loss = []
self.run_time = []
self.if_train = []
self.total_loss = 0.
self.total_rewards = []
self.sub_crash = 0
self.sub_success = 0
self.sub_not_finish = 0
self.sub_overspeed = 0
self.sub_not_move = 0
self.sub_cannot_stop = 0
self.app_actor = None
self.app_critic = None
# self.buffer = AppReplay()
self.batch = None
self.batch_state = None
self.batch_action = None
self.batch_reward = None
self.batch_new_state = None
self.batch_if_done = None
self.batch_output = None
self.start_time = time.time()
self.end_time = time.time()
self.total_time = time.time()
def load_weights(self):
# logging.info('...... Loading weight ......')
try:
self.app_actor.model.load_weights("task9/actormodel.h5")
self.app_critic.model.load_weights("task9/criticmodel.h5")
self.app_actor.target_model.load_weights("task9/actormodel.h5")
self.app_critic.target_model.load_weights("task9/criticmodel.h5")
# logging.info("Weight load successfully")
except:
logging.warn("Cannot find the weight !")
def update_weights(self):
# logging.info('...... Updating weight ......')
self.app_actor.model.save_weights("task" + str(self.task) + "/actormodel.h5", overwrite=True)
with open("task" + str(self.task) + "/actormodel.json", "w") as outfile:
json.dump(self.app_actor.model.to_json(), outfile)
self.app_critic.model.save_weights("task" + str(self.task) + "/criticmodel.h5", overwrite=True)
with open("task" + str(self.task) + "/criticmodel.json", "w") as outfile:
json.dump(self.app_critic.model.to_json(), outfile)
def update_batch(self, s, a, r, s1):
# logging.info('...... Updating batch ......')
Buffer.add(s, a, r, s1, self.if_done)
self.batch = Buffer.get_batch(self.batch_size)
self.batch_state = np.squeeze(np.asarray([e[0] for e in self.batch]), axis=1)
self.batch_action = np.asarray([e[1] for e in self.batch])
self.batch_reward = np.asarray([e[2] for e in self.batch])
self.batch_new_state = np.squeeze(np.asarray([e[3] for e in self.batch]), axis=1)
self.batch_if_done = np.asarray([e[4] for e in self.batch])
self.batch_output = np.asarray([e[2] for e in self.batch])
target_q_values = self.app_critic.target_model.predict(
[self.batch_new_state, self.app_actor.target_model.predict(self.batch_new_state)])
for k, done in enumerate(self.batch_if_done):
self.batch_output[k] = self.batch_reward[k] if done else self.batch_reward[k] + self.gamma * target_q_values[k]
def update_loss(self):
# logging.info('...... Updating loss ......')
loss = self.app_critic.model.train_on_batch([self.batch_state, self.batch_action], self.batch_output)
actor_predict = self.app_actor.model.predict(self.batch_state)
actor_grad = self.app_critic.gradients(self.batch_state, actor_predict)
self.app_actor.train(self.batch_state, actor_grad)
self.app_actor.target_train()
self.app_critic.target_train()
return loss
def get_action(self, state_t, train_indicator):
# logging.info('...... Getting action ......')
action_ori = self.app_actor.model.predict(state_t)
if train_indicator:
self.epsilon -= 1.0 / self.explore_iter * train_indicator
noise = []
for i in range(self.action_size):
a = action_ori[0][i]
noise.append(train_indicator * max(self.epsilon, 0) * self.tools.ou(a, -0.5, 0.5, 0.3))
action = action_ori + np.array(noise)
else:
action = action_ori
return action
def if_exit(self, step, state, collision, not_move, cannot_stop):
if step >= self.max_steps:
# logging.warn('Not finished with max steps! Start: ' + str(self.sim.Stop_Line - state[-1]) +
# ', Dis to SL: ' + str(state[4]) + ', Dis to FL: ' + str(state[3]) +
# ', Velocity: ' + str(state[0]) + ', V0: ' + str(self.sim.ini_speed))
self.sub_not_finish += 1
self.if_pass = False
self.if_done = True
elif state[0] >= self.sim.Speed_limit + 2.:
# logging.warn('Exceed Speed Limit: ' + str(self.sim.Stop_Line - state[-1]) + ', Dis to SL: ' + str(state[4]) +
# ', Dis to FL: ' + str(state[3]) + ', Velocity: ' + str(state[0]) +
# ', V0: ' + str(self.sim.ini_speed))
self.sub_overspeed += 1
self.if_pass = False
self.if_done = True
elif not_move > 0:
# logging.warn('Not move! Start: ' + str(self.sim.Stop_Line - state[-1]) + ', Dis to SL: ' + str(state[4]) +
# ', Dis to FL: ' + str(state[3]) + ', Velocity: ' + str(state[0]) +
# ', V0: ' + str(self.sim.ini_speed))
self.sub_not_move += 1
self.if_pass = False
self.if_done = True
elif collision > 0:
# logging.warn('Crash to other vehicles or road boundary! Start: ' + str(self.sim.Stop_Line - state[-1]) +
# ', Dis to SL: ' + str(state[4]) + ', Dis to FL: ' + str(state[3]) +
# ', Velocity: ' + str(state[0]) + ', V0: ' + str(self.sim.ini_speed))
self.sub_crash += 1
self.if_pass = False
self.if_done = True
elif cannot_stop > 0:
# logging.warn('Did not stop at stop line! Start: ' + str(self.sim.Stop_Line - state[-1]) +
# ', Dis to SL: ' + str(state[4]) + ', Dis to FL: ' + str(state[3]) +
# ', Velocity: ' + str(state[0]) + ', V0: ' + str(self.sim.ini_speed))
self.sub_cannot_stop += 1
self.if_pass = False
self.if_done = True
elif state[4] <= 0.5 and (state[0] <= 0.1):
# logging.info('Congratulations! Reach stop line without crashing and has stopped. Start: ' +
# str(self.sim.Stop_Line - state[-1]) + ', Dis to SL: ' + str(state[4]) +
# ', Dis to FL: ' + str(state[3]) + ', Velocity: ' + str(state[0]) +
# ', V0: ' + str(self.sim.ini_speed))
self.sub_success += 1
self.if_pass = True
self.if_done = True
def launch_train(self, train_indicator=1): # 1 means Train, 0 means simply Run
# logging.info('Launch Training Process')
# np.random.seed(1337)
state_t = self.sim.get_state()
state_dim = state_t.shape[1]
self.app_actor = AppActorNetwork(self.tf_sess, state_dim, self.action_size, self.batch_size, self.tau, self.LRA)
self.app_critic = AppCriticNetwork(self.tf_sess, state_dim, self.action_size, self.batch_size, self.tau, self.LRC)
self.load_weights()
for e in range(self.episode_count):
total_loss = 0.
total_time = time.time()
total_reward = 0.
# logging.debug("Episode : " + str(e) + " Replay Buffer " + str(self.buffer.count()))
step = 0
state_t = self.sim.get_state()
while True:
action_t = self.get_action(state_t, train_indicator)
reward_t, collision, not_move, cannot_stop = self.reward.get_reward(state_t[0], action_t[0][0])
self.sim.update_vehicle(reward_t, action_t[0][0])
state_t1 = self.sim.get_state()
if train_indicator:
self.update_batch(state_t, action_t[0], reward_t, state_t1)
loss = self.update_loss() if train_indicator else 0.
total_reward += reward_t
self.if_exit(step, state_t[0], collision, not_move, cannot_stop)
step += 1
total_loss += loss
train_time = time.time() - self.start_time
# logging.debug('Episode: ' + str(e) + ', Step: ' + str(step) + ', Dis to SL: ' + str(state_t[0][6]) +
# ', Dis to fv: ' + str(state_t[0][5]) + ', v: ' + str(state_t[0][0]) +
# ', a: ' + str(action_t) + ', r: ' + str(reward_t) + ', loss: ' + str(loss) +
# ', time: ' + str(train_time))
# total_time += train_time
if self.if_done:
break
self.start_time = time.time()
state_t = state_t1
self.loss.append(total_loss)
self.total_rewards.append(total_reward)
plt.close('all')
total_step = step + 1
if train_indicator:
self.update_weights()
# mean_loss = total_loss / total_step
# mean_time = total_time / total_step
mean_time = time.time() - total_time
# logging.debug(str(e) + "-th Episode: Steps: " + str(total_step) + ', Time: ' + str(mean_time) +
# ', Reward: ' + str(total_reward) + " Loss: " + str(loss) + ', Crash: ' +
# str(self.sub_crash) + ', Not Stop: ' + str(self.sub_cannot_stop) + ', Not Finished: ' +
# str(self.sub_not_finish) + ', Overspeed: ' + str(self.sub_overspeed) + ', Not Move: ' +
# str(self.sub_not_move) + ', Success: ' + str(self.sub_success))
# self.sim = InterSim(True) if e % 50 == 0 else InterSim()
# self.sim = InterSim(task_pos[self.task] + 30. * random(), False)
self.sim = InterSim(140*random() + 10., False)
self.total_reward = 0.
self.if_pass = False
self.if_done = False
if (e + 1) % 100 == 0:
self.if_train.append(train_indicator)
self.crash.append(self.sub_crash)
self.success.append(self.sub_success)
self.not_finish.append(self.sub_not_finish)
self.overspeed.append(self.sub_overspeed)
self.not_move.append(self.sub_not_move)
self.cannot_stop.append(self.sub_cannot_stop)
self.run_time.append((time.time() - self.total_time) / 60.)
self.sub_crash = 0
self.sub_cannot_stop = 0
self.sub_success = 0
self.sub_not_finish = 0
self.sub_overspeed = 0
self.sub_not_move = 0
logging.info('Crash: ' + str(self.crash) + '\nNot Stop: ' + str(self.cannot_stop) +
'\nNot Finished: ' + str(self.not_finish) + '\nOverspeed: ' + str(self.overspeed) +
'\nNot Move: ' + str(self.not_move) + '\nSuccess: ' + str(self.success) +
'\nLoss: ' + str(self.loss))
results = {'crash': self.crash, 'not_stop': self.cannot_stop, 'unfinished': self.not_finish,
'stop': self.not_move, 'overspeed': self.overspeed,
'succeess': self.success, 'reward': self.total_rewards, 'loss': self.loss}
with open('task' + str(self.task) + '/result.txt', 'w+') as _file:
js_data = json.dumps(results)
_file.write(js_data)
# train_indicator = 0 if train_indicator == 1 else 1
# if (e + 1) % 1000 == 0:
# self.epsilon = 1.0
if __name__ == '__main__':
plt.ion()
tmp_agent = ReinAcc(140*random() + 10., 9)
while True:
tmp_agent.launch_train(1)
# alpha = 0.5
# task_pos = [10., 40., 70., 100, 130.]
# tictac = time.time()
# train_pro = []
# agents = []
# q = []
# q_exp = []
# for k, i in enumerate(task_pos):
# pos = i + 30. * random()
# tmp_agent = ReinAcc(pos, k)
# tmp_agent.launch_train(1)
# agents.append(tmp_agent)
# q.append(sum(tmp_agent.total_rewards[-Step_size:]) / Step_size / 1000.)
# q_exp.append(float(np.exp(q[-1])))
# logging.info('Time: {0:.2f}'.format((time.time() - tictac) / 3600.) + ', cond: ' + str(k) +
# ', Success: ' + str(tmp_agent.success))
#
# while True:
# q_p = np.array(q_exp) / (sum(q_exp))
# train_pro.append(q_exp)
# with open('train_pro.txt', 'w+') as json_file:
# jsoned_data = json.dumps(train_pro)
# json_file.write(jsoned_data)
#
# boltz_rand = random()
# if boltz_rand < q_p[0]:
# next_ind = 0
# elif q_p[0] <= boltz_rand < sum(q_p[0:2]):
# next_ind = 1
# elif sum(q_p[0:2]) <= boltz_rand < sum(q_p[0:3]):
# next_ind = 2
# elif sum(q_p[0:3]) <= boltz_rand < sum(q_p[0:4]):
# next_ind = 3
# else:
# next_ind = 4
# strFormat = len(q_p) * '{:2.3f} '
# logging.debug('[' + strFormat.format(*q_p) + '], ' + 'Next ind: ' + str(next_ind))
#
# tmp_agent = agents[next_ind]
# tmp_agent.app_actor.model.save_weights("weights/actormodel.h5", overwrite=True)
# with open("weights/actormodel.json", "w") as outfile:
# json.dump(tmp_agent.app_actor.model.to_json(), outfile)
# tmp_agent.app_critic.model.save_weights("weights/criticmodel.h5", overwrite=True)
# with open("weights/criticmodel.json", "w") as outfile:
# json.dump(tmp_agent.app_critic.model.to_json(), outfile)
#
# old_q = q
# q = []
# q_exp = []
# for k, i in enumerate(task_pos):
# # logging.debug(str(k) + ', ' + str(i))
# tmp_agent = agents[k]
# if k == next_ind:
# tmp_agent.launch_train(1)
# else:
# tmp_agent.launch_train(0)
# # q.append(float(np.exp(improve)))
# if sum(tmp_agent.success[-(Step_size / 50):]) / (Step_size / 5.) <= 8.0:
# # improve = (sum(tmp_agent.successes[-(Step_size / 100):]) -
# # sum(tmp_agent.successes[-2 * (Step_size / 100):-(Step_size / 100)])) / (Step_size / 50.)
# # q.append(float(np.exp(abs(improve))))
# qq = alpha * sum(tmp_agent.total_rewards[-Step_size:]) / Step_size / 1000. + \
# (1 - alpha) * old_q[k]
# q.append(qq)
# q_exp.append(float(np.exp(qq)))
# # q[next_ind] = float(np.exp(sum(tmp_agent.successes[-(Step_size / 100):]) / (Step_size / 10.)))
# else:
# qq = - alpha * 10. + (1 - alpha) * old_q[k]
# q_exp.append(float(np.exp(qq)))
# # q[next_ind] = float(np.exp(-10.))
# agents[k] = tmp_agent
# logging.info('Time: {0:.2f}'.format((time.time() - tictac) / 3600.) +
# ', cond: ' + str(k) + ', Success: ' + str(tmp_agent.success))
| [
"zhiqianq@andrew.cmu.edu"
] | zhiqianq@andrew.cmu.edu |
1f46f8881fa2869c4b9401b44520823e4ffea0f6 | a46da291be8183e7b466b0f2abd08e4f9156f805 | /unit2/project/activity_01.py | 40d0582ff3aa783ab9e41f11adc2f4730e5c5e9f | [] | no_license | AdriRiv/StructuredProgramming2A | 69802728ad3e6162e5caa6b4334bf84db87d6375 | dd44a3b2d85d55688af1493bcaa96701d2da1f80 | refs/heads/master | 2023-07-13T19:54:19.934959 | 2021-08-05T13:28:27 | 2021-08-05T13:28:27 | 370,607,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py |
import sys
def isitPrime(numb):
message("This is a function")
count= 1
for idx in range(1, numb ):
if (numb%idx == 0):
count= count+1
if(count<= 2):
print(f'The number {numb} is prime')
return True
else:
print (f'The number {numb} is not prime')
return False
def message(str):
print(str)
def pow2nums(num1, num2):
powNum1 = pow(num1, 2)
powNum2 = pow(num2, 2)
return (powNum1, powNum2)
if __name__ == "__main__":
print (isitPrime( int(sys.argv[1])))
print (isitPrime( int(sys.argv[2])))
pow2nums(int(sys.argv[1]), int( sys.argv[2]))
(x, y)= pow2nums(int(sys.argv[1]), int( sys.argv[2]))
print(f'x= {x}, and y= {y}')
| [
"dani2.martinez50@gmail.com"
] | dani2.martinez50@gmail.com |
12cafe149e4b1f948575999cdfe38d218724a2a0 | 62cefb74a4648f521ca0bedf2dc56ef00372c4e6 | /anti-XSS/lib/var/links.py | d7db2c2b5ca00930465a7570db20f1e2c1dcec06 | [
"MIT"
] | permissive | p4int3r/Tools | e78327780c2df153fd165e471dc1f81961ceb35e | d2c675c2be0a5fa5ec4bd6ba59c81fe1348d5c20 | refs/heads/master | 2021-09-04T14:03:11.158397 | 2018-01-19T09:03:28 | 2018-01-19T09:03:28 | 112,544,353 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #!/usr/bin/env python
'''
Copyright (c) 2016 anti-XSS developers
'''
class Links(object):
'''
Links class used as a global var.
'''
content = []
def __init__(self):
pass
def addText(self, text):
self.content.append(text)
def setContent(self, content):
self.content = content
def getContent(self):
return self.content
| [
"root@kali.org"
] | root@kali.org |
73a34062044e8bbacbf5e735782bef6c3a6cbc5a | 85df75bec1ea604c21db36b8892c90e0d7b7574f | /armstrong/core/arm_layout/utils.py | c7bb882623ba0881a93e8ae89a446d49251f0d1a | [
"Apache-2.0"
] | permissive | niran/armstrong.core.arm_layout | a569a64f84085b55509b26c004a9a41af3952047 | 229106581439c370ba51b1395e5e5e4db111a0bc | refs/heads/master | 2021-01-16T19:29:16.017160 | 2012-03-16T16:29:58 | 2012-03-16T16:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
def get_layout_template_name(model, name):
ret = []
for a in model.__class__.mro():
if not hasattr(a, "_meta"):
continue
ret.append("layout/%s/%s/%s.html" % (a._meta.app_label,
a._meta.object_name.lower(), name))
return ret
def render_model(object, name, dictionary=None, context_instance=None):
dictionary = dictionary or {}
dictionary["object"] = object
return mark_safe(render_to_string(get_layout_template_name(object, name),
dictionary=dictionary, context_instance=context_instance))
| [
"development@domain51.com"
] | development@domain51.com |
2df5698d0bfcfc28ba8d4dcf61539ca01cc5614e | c2a5617d7aa2b51706b443927cf0b1f046f4aa04 | /warmup.py | 1290ed5e47a8f29bde07af35dadfe477ae093598 | [] | no_license | TsvetomirTsanov/testing- | 9ba0e67e2c2bfd7ba750e8d21a5c0c0828368359 | 5fb1d41259d9f96bf3bfdc7e4fea42f726e9a257 | refs/heads/master | 2021-05-30T04:31:09.353130 | 2015-03-25T13:57:25 | 2015-03-25T13:57:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,593 | py | def factorial (n):
result = 1
for x in range(1, n+1):
result *=x
return result
def fibonacci(n):
result = []
a = 1
b = 1
while len(result) < n:
result.append(a)
next_fib = a+b
a = b
b = next_rib
return result
def sum_of_digits(n):
return sum(to_digits(n))
def to_digits(n):
return [int(x) for x in str(n)]
def factorial_digits(n):
return sum([factorial(x) for x in to_digits(n)])
def palindrome(obj):
return str(obj)[::-1] == str(obj)
def count_digits(n):
sum([1 for x in to_digits(n)])
def to_number(digits):
result = 0
for digit in digits:
digits_count = count_digits(digit)
result = result*(10**digits_count) + digit
return result
def fibonacci_number(n):
return to_number(fibonacci(n))
def count_vowels(string):
vowels = "sdadSDADdgher"
count = 0
for ch in string:
if ch in vowels:
count+=1
return count
def char_histogram(string):
result = {}
for ch in string:
if ch in result:
result[ch] +=1
else:
result[ch] = 1
return result
def p_score(n):
if(palindrome(n)):
return 1
s = n + int(str(n)[::-1])
return 1 + p_score(s)
def is_even(n):
return n%2 == 0
def odd(n):
return not even(n)
def is_hack(n):
binary_n = bin(n)[2:]
is_palindrome = palindrome(binary_n)
has_odd_ones = odd(binary_n.count("1"))
return is_palindrome and has_odd_ones
def next_hack(n):
n +=1
while not is_hack(n):
n+=1
return n
def sum_of_divisors(n):
a = 1
sum = 0
while a <= n:
if n%a == 0:
sum += a
a+=1
return sum
def is_prime(n):
a = 1
count = 0
if n == 1:
count = 2
else:
while a <= n:
if n%a == 0:
count +=1
a +=1
if count == 2:
return True
else:
return False
def contains_digit(number, digit):
return str(digit) in str(number)
def contains_digits(number, digits):
for n in digits:
if n not in to_digits(number):
return False
return True
def is_number_balanced(n):
a = to_digits(n)
sum1 = 0
sum2 = 0
if(is_even(n)):
for num in a:
if num < len(n)/2:
sum1 += num
else:
sum2 += num
def count_substrings(haystack, needle):
count = 0
for ch in len(haystack):
if needle in haystack[ch:]:
count += 1
return count | [
"cvetmir464@gmail.com"
] | cvetmir464@gmail.com |
60c721e6c7d21277963b95af8fdc2aa107b72302 | 21df7cd93e156af8357596143792c22b44e14747 | /regression/SimpleLinearRegression.py | 963a2797127498672b735dbc7c59e572c6b024fa | [] | no_license | yanyongyong/machineLearn | 0cac90c1d0b4f7021e3f9ca658268f3c433b481f | d77a13f83679ba4b06bf24c6c6019dc2af55986f | refs/heads/master | 2021-09-03T08:25:33.933996 | 2018-01-07T14:15:52 | 2018-01-07T14:15:52 | 107,839,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | import numpy as np
#简单的线性回归
def fitSLR(x,y):
n = len(x)
denominator = 0 #分母
numerator = 0 #分子
for i in range(0,n):
numerator += (x[i]- np.mean(x))*(y[i] - np.mean(y))
denominator += (x[i] - np.mean(x))**2
b1 = numerator/float(denominator)
b0 = np.mean(y) - b1*np.mean(x)
# b0 = np.mean(y)/float(np.mean(x))
return b0, b1
def predict(x,bo,b1):
return bo + x*b1
x = [1,3,2,1,3]
y = [14,24,18,17,27]
b0,b1 = fitSLR(x,y)
x_test = 8
y_test = predict(8,b0,b1)
print(y_test) | [
"123456"
] | 123456 |
a59f68bfb54a9e7eb89a1e7368ae60cb0a1cf949 | bddc3a96ba342ea235fd192f6ee6686ee0602dc9 | /src/basic-c6/human-class.py | 27b5269c8b60626dedc0849ffb4245e361cbfd93 | [] | no_license | n18010/programming-term2 | e927d12eb18427000ae71c604e449351ed9a548b | 89b16dc7baff918b97ba9fe07ccf3520bf88c227 | refs/heads/master | 2020-03-22T08:59:29.466308 | 2018-08-29T06:38:34 | 2018-08-29T06:38:34 | 139,806,129 | 0 | 0 | null | 2018-07-05T06:42:10 | 2018-07-05T06:42:10 | null | UTF-8 | Python | false | false | 588 | py | # クラスを設計したところ
class Human:
''' 人間を表すクラス'''
def search(self, place):
'''周りを見る処理'''
pass
def take(self, food):
'''物を掴む処理'''
self.food = food
def open_mouth(self):
'''口を開ける処理'''
pass
def eat(self):
'''食物を食べる処理'''
print(self.food+"を食べました")
# クラスHumanを元にオブジェクトを生成する
hito = Human()
# Humanで定義したメソッドを呼び出す
hito.take("Banana")
hito.eat()
| [
"n18010@std.it-college.ac.jp"
] | n18010@std.it-college.ac.jp |
ef0669b9fba46a00cfd0433f43ee520c247f88fb | 171b49fe5cb0b62db82daa735d1423b1d801f73d | /quiz_app/quizzes/urls.py | b3a2a0029da9e37e7591a924b16f7ca352d843ad | [] | no_license | H0sway/ssbm-quiz-app | add5999529209b8669615428c6d527b7187633a0 | 7bae166baadd5b713e5e594bfe42c188047d6134 | refs/heads/master | 2020-03-28T12:49:45.979772 | 2018-09-27T18:43:26 | 2018-09-27T18:43:26 | 148,338,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # Import Modules
from django.urls import path, re_path, include
from django.contrib import admin
from . import views
# API/Admin URLs
urlpatterns = [
path('admin/', admin.site.urls),
re_path('api/quizzes/', views.QuizList.as_view()),
re_path('api/quizzes/<str:name>/', views.SingleQuiz.as_view()),
re_path('api/questions/', views.QuestionList.as_view()),
re_path('api/answers/', views.AnswerList.as_view())
]
| [
"jkrussell756@gmail.com"
] | jkrussell756@gmail.com |
a2ee918ee914a6a2440aeba1db575f22ba3e78bf | 458b1133df5b38a017f3a690a624a54f0f43fda7 | /PaperExperiments/XHExp041/parameters.py | 62f97ccd29ad9f45eebb6360c8de059e6a0f209d | [
"MIT"
] | permissive | stefan-c-kremer/TE_World2 | 9c7eca30ee6200d371183c5ba32b3345a4cc04ee | 8e1fae218af8a1eabae776deecac62192c22e0ca | refs/heads/master | 2020-12-18T14:31:00.639003 | 2020-02-04T15:55:49 | 2020-02-04T15:55:49 | 235,413,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py |
# parameters.py
"""
Exp 41 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.15, 0, 0.55, 1, 0.30, 2', 'TE_Insertion_Distribution': 'Flat()', 'Carrying_capacity': '30', 'TE_excision_rate': '0.1', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Flat()', 'mutation_effect': '0.10', 'TE_death_rate': '0.0005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Flat();
Gene_Insertion_Distribution = Flat();
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.0005;
TE_excision_rate = 0.1; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.15, 0, 0.55, 1, 0.30, 2 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.10,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.10
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.10,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.10
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
| [
"stefan@kremer.ca"
] | stefan@kremer.ca |
05ee330ac8c279f0bae14d923b27e6c6e2202f01 | f9c8af20349f2fe4a73be4d038826caff87e0ab1 | /Problem Solving/Basic/gaming_array.py | e60b282eaa6118282d99197c61d03d8e0b47db8b | [] | no_license | danylo-boiko/HackerRank | 0ea14716328ac37377716df7c2fa997805d3f9bf | 1a5bb3462c59e9d8f4d675838a32c55e593f4b8a | refs/heads/main | 2023-08-14T12:36:18.179156 | 2021-10-05T23:13:29 | 2021-10-05T23:13:29 | 400,652,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # https://www.hackerrank.com/challenges/an-interesting-game-1/problem
# !/bin/python3
def gamingArray(arr):
mx = count = 0
for el in arr:
if el > mx:
mx = el
count += 1
return "ANDY" if count % 2 == 0 else "BOB"
if __name__ == '__main__':
g = int(input().strip())
for g_itr in range(g):
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = gamingArray(arr)
print(result)
| [
"danielboyko02@gmail.com"
] | danielboyko02@gmail.com |
2a57eb8d02ae5ea6db9e98289714f423bbba977b | 5823b28a4b4239d953b314f23cb3e3fb33f96c89 | /Uallio_learning/plugins/elimina_base.py | 2be1984ea6f73c2729bed08f0c4bcf18cdb8146a | [] | no_license | feniculiadmin/UallioEdu | 0c63553d8750b967ffbfba8034f69f504531d5bd | d4edb2ef94f2dec7a461a7fb00cacc66366144c2 | refs/heads/main | 2023-02-09T00:55:28.364174 | 2021-01-02T16:19:41 | 2021-01-02T16:19:41 | 305,513,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | #configurazione
id_data="learning_001"
import json
import string
class plugin_elimina(object):
def __init__(self):
self.input = 'text'
self.toListen = '/elimina'
print("Il comando /elimina è in ascolto")
def action(self, param=None, msg=None):
print (param)
if len(param)>1:
com=0
comando=""
for i in range(1 , len(param)):
comando= comando + param[i] + " "
comando=comando.strip()
#ricerca doppioni e salvataggio dei dati
trovato=0
with open('./data/data_'+str(id_data)+'.json', encoding="utf8") as json_file:
self.body=json.load(json_file)
lunghezza=len(self.body['new_action'])
for i in range(0 , lunghezza):
if trovato==1:
break
for a in self.body['new_action'][i]['command']:
#print (str(a) + str(type(a))+ str(comando)+ str(type(comando)))
if str(a)==str(comando):
trovato=1
print ("trovato")
self.body["new_action"].pop(i)
break
if trovato==0:
return("non è presente il comando selezionato")
with open('./data/data_'+str(id_data)+'.json', mode='w', encoding='utf8') as json_f:
json_f.write(json.dumps(self.body, indent=4))
return("Eliminerò il comando "+ str(comando))
else:
return("Per eliminare un comando devi usare questo comando ignorante: elimina [parola di attivazione]") | [
"noreply@github.com"
] | noreply@github.com |
5303d7dd4b2b9e93a5c36f9aa130708b0f19825b | 5528b7e582bde0c1c461c6784379182f149ee0c5 | /ulohy/24.3.py | 3c471bd88f4c7ec2aa62822aa18fda32d3475265 | [] | no_license | MichalRybecky/Informatika | 64c649c3dab76dce497d38a58e99d2c3ee613924 | fccc0b51a1ba8c6a6255bb63f48a97bd7ac1041d | refs/heads/main | 2023-04-10T20:50:17.088822 | 2021-04-26T21:05:17 | 2021-04-26T21:05:17 | 325,068,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | with open('ziaci.txt', 'r') as file:
mena = [x.strip() for x in file.readlines()]
slicer = int(len(mena) / 2)
mena_1 = mena[:slicer] # krstne mena
mena_2 = mena[slicer:] # priezviska
print(f'Pocet mien v subore: {slicer}')
print(f'Najdlhsie krstne meno: {max(mena_1, key=len)}')
print(f'Najdlhsie priezvisko: {max(mena_2, key=len)}')
with open('vystup.txt', 'w') as file:
for krstne, priezvisko in zip(mena_1, mena_2):
medzery = (20 - len(krstne))
file.write(str(krstne) + medzery * ' ' + str(priezvisko) + '\n')
| [
"michalrybec@protonmail.com"
] | michalrybec@protonmail.com |
465b87dd2605a4e591b7693d9ff7ef6ed379c2e6 | f39c2c500873180d953ab9a7b22a4f6df95fb1c3 | /Amazon/Pyramid Transition Matrix.py | 24f0dd8cd85fc636e5f6ed3c3ff56adc101c0a4e | [] | no_license | Jason003/interview | 458516f671d7da0d3585f89b098c5370edcd9f04 | e153306b85c3687b23a332812a0885d25ecce904 | refs/heads/master | 2021-07-15T15:28:07.175276 | 2021-02-05T03:21:59 | 2021-02-05T03:21:59 | 224,898,150 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | '''
Input: bottom = "BCD", allowed = ["BCG", "CDE", "GEA", "FFF"]
Output: true
Explanation:
We can stack the pyramid like this:
A
/ \
G E
/ \ / \
B C D
We are allowed to place G on top of B and C because BCG is an allowed triple. Similarly, we can place E on top of C and D, then A on top of G and E.
'''
import collections
class Solution:
def pyramidTransition(self, bottom: str, allowed: List[str]) -> bool:
d = collections.defaultdict(set)
for s in allowed:
d[s[:2]].add(s[2])
def helper(bottom, idx, nxt):
if len(bottom) == 1: return True
if idx == len(bottom) - 1: return helper(nxt, 0, '')
s = bottom[idx: idx + 2]
for c in d[s]:
if helper(bottom, idx + 1, nxt + c): return True
return False
return helper(bottom, 0, '')
| [
"jiefanli97@gmail.com"
] | jiefanli97@gmail.com |
ba4d170799f444f1d2db58e2b09ae9d09f2044e5 | 965af75565d1b36de2962cad22902f44dcb80b7e | /products/migrations/0002_product_description.py | a69d6cccf3a5f0f6907343eb081ee4c5301dbf7d | [] | no_license | Yojanpardo/cost_center | 675e6e6becdf6d59e76607dba5ced0487402ad97 | 346a52d917e070d244119e5ed08e93a99b701f51 | refs/heads/master | 2022-12-01T09:44:15.716651 | 2022-11-22T13:41:21 | 2022-11-22T13:41:21 | 167,300,526 | 0 | 0 | null | 2022-11-22T13:41:22 | 2019-01-24T04:02:54 | Python | UTF-8 | Python | false | false | 404 | py | # Generated by Django 2.1.5 on 2019-01-26 20:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='description',
field=models.TextField(blank=True, max_length=255, null=True),
),
]
| [
"Yojan.Pardo@gmail.com"
] | Yojan.Pardo@gmail.com |
600b1577655a55e545db415b031f237b419098f0 | ef2eacb4866a2df579cf43312891ac2df11d38be | /plan/rota.py | 6369d64dfcc1d0463bdb06da477fcfb77d3aede2 | [] | no_license | vitorshaft/roboMovel | 862a67eb697297954561cfec7010e8770f9f8f36 | d53ea2389d0f552dfdf3f1d98aa6467d26dcecd9 | refs/heads/master | 2022-11-29T05:21:58.751007 | 2020-08-23T01:18:07 | 2020-08-23T01:18:07 | 258,174,509 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | import math
class plan:
def __init__(self):
pass
def traj(self,xo,yo,xr,yr): #retorna angulo e distancia para deslocamento em linha reta
self.dx = float(xo-xr)
self.dy = float(yo-yr)
try:
self.tg = self.dy/self.dx
except:
self.tg = self.dy/(self.dx+0.0000000000001)
self.h = math.hypot(self.dx,self.dy)
self.teta = math.atan(self.tg)
self.seno = self.dy/self.h
self.cos = self.dx/self.h
if (self.seno > 0 and self.cos > 0): #Q1
self.alfa = math.degrees(math.atan(self.tg))
elif (self.seno > 0 and self.cos < 0): #Q2
self.alfa = math.degrees(math.acos(self.cos))
elif (self.seno < 0 and self.cos <0): #Q3
self.alfa = math.degrees(math.atan(self.tg))+180
elif (self.seno < 0 and self.cos >0): #Q4
self.alfa = math.degrees(math.atan(self.tg))+360
elif (self.seno == 0 and self.cos > 0):
self.alfa = 0.0
elif (self.seno == 0 and self.cos < 0):
self.alfa = 180.0
elif (self.seno > 0 and self.cos == 0):
self.alfa = 90.0
elif (self.seno < 0 and self.cos == 0):
self.alfa = 270.0
else:
print("CONDICAO TRIGONOMETRICA NAO ATENDIDA")
#print(self.alfa,self.h)
return [self.alfa,self.h]
| [
"vtrshaft@gmail.com"
] | vtrshaft@gmail.com |
8b9b6a34ebd947aea8f503348abdfbd4aa9a9026 | 3e5611aac055a0e1c7e9ab30aa8192e70527d4fb | /friendParing.py | eda1f762e32caba5b4ccf12f844c42ab00b21fae | [] | no_license | GeorgeGithiri5/Dynamic_Programming | 2d6234ef84f47ba97793cbbdd0ad3a95ffc5be4d | 66543496d428a52c2097f1eabd60cad4f8cad417 | refs/heads/master | 2023-03-29T07:23:04.441195 | 2021-04-13T09:22:21 | 2021-04-13T09:22:21 | 298,251,373 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | def countfriendPairing(n):
dp = [0 for i in range(n+1)]
for i in range(n+1):
if(i<=2):
dp[i] = i
else:
dp[i] = dp[i-1] + (i - 1)*dp[i-2]
return dp[n]
n = 4
print(countfriendPairing(n)) | [
"georgegithiri002@gmail.com"
] | georgegithiri002@gmail.com |
ae60e9b1424a37519eecbabcfeb13e32fe0e0f59 | df1348a67a54fa530f620ba1145c34d914710fde | /examples/sandbox/sandbox_export.py | 0279085899b7e8a7bfb5c5464169c3afb8f28481 | [
"MIT"
] | permissive | SilverLabUCL/netpyne | bf00991cec1ca44c44476e0a0fff2a15bc28b08c | 72ce78d8c79c060d44513bafa7843756ee06cc45 | refs/heads/master | 2020-07-12T12:45:39.959342 | 2016-11-16T10:26:23 | 2016-11-16T10:26:23 | 73,908,592 | 0 | 0 | null | 2016-11-16T10:21:48 | 2016-11-16T10:21:48 | null | UTF-8 | Python | false | false | 293 | py | import sandbox # import parameters file
from netpyne import sim # import netpyne sim module
sim.createExportNeuroML2(netParams = sandbox.netParams,
simConfig = sandbox.simConfig,
reference = 'sandbox') # create and export network to NeuroML 2 | [
"salvadordura@gmail.com"
] | salvadordura@gmail.com |
e51299f8abc1dd4c7949d0582e9e70aff73626cf | b80ccb2cd65f5f30666e45c1cc14d80ec135a4c1 | /app/api/auth_routes.py | 0bc6f81001df9b06eedf30ab381e62ee24d3d830 | [
"MIT"
] | permissive | nikhilmenon2/Learn2Cook | d1109a1820cbc3881ffbcf4ab7ee2a1357df3377 | 8276c8d42e1476f32916952b105fa434d4c9abcc | refs/heads/main | 2023-05-22T15:01:59.315691 | 2021-06-14T16:00:30 | 2021-06-14T16:00:30 | 335,084,032 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,634 | py | from flask import Blueprint, jsonify, session, request
from app.models import User, db
from app.forms import LoginForm
from app.forms import SignUpForm
from flask_login import current_user, login_user, logout_user, login_required
auth_routes = Blueprint('auth', __name__)
def validation_errors_to_error_messages(validation_errors):
"""
Simple function that turns the WTForms validation errors into a simple list
"""
errorMessages = []
for field in validation_errors:
for error in validation_errors[field]:
errorMessages.append(f"{field} : {error}")
return errorMessages
@auth_routes.route('/')
def authenticate():
"""
Authenticates a user.
"""
if current_user.is_authenticated:
return jsonify(current_user.to_dict())
return {'errors': ['Unauthorized']}, 401
@auth_routes.route('/login', methods=['POST'])
def login():
"""
Logs a user in
"""
form = LoginForm()
print(request.get_json())
# Get the csrf_token from the request cookie and put it into the
# form manually to validate_on_submit can be used
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
# Add the user to the session, we are logged in!
user = User.query.filter(User.email == form.data['email']).first()
login_user(user)
return user.to_dict()
return {'errors': validation_errors_to_error_messages(form.errors)}, 401
@auth_routes.route('/logout')
def logout():
logout_user()
return {'message': 'User logged out'}
@auth_routes.route('/signup', methods=['POST'])
def sign_up():
form = SignUpForm()
form['csrf_token'].data = request.cookies['csrf_token']
err = ''
data = request.get_json()
print(data)
if data['password'] != data['confirm_password']:
err = 'password and confirm password must match'
if form.validate_on_submit():
if err == '':
user = User(
username=form.data['username'],
email=form.data['email'],
firstName=form.data['firstName'],
lastName=form.data['lastName'],
password=form.data['password'],
profileImg=form.data['profileImg']
)
db.session.add(user)
db.session.commit()
login_user(user)
return user.to_dict()
errors = validation_errors_to_error_messages(form.errors)
if err:
errors.append(err)
return {'errors': errors}
@auth_routes.route('/unauthorized')
def unauthorized():
return {'errors': ['Unauthorized']}, 401
| [
"nikhilmenon@comcast.net"
] | nikhilmenon@comcast.net |
125c76db9f1f9f7db1a60cc1fac82e87519e6ac9 | c342df24a9e2a94c5b952b57d73e45ee35adea80 | /dqn_bullet_cartpole.py | f1a52ad5b053b903b878f9a354642da5683ba6ec | [
"MIT"
] | permissive | vyraun/cartpoleplusplus | 4b652d4ba0210e5abdb78931153d6076839cf6df | 87c0f1b896e6d6919c4dbfcd0bf4306f807b60ef | refs/heads/master | 2020-12-31T02:48:49.650551 | 2016-08-29T03:29:05 | 2016-08-29T03:29:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | #!/usr/bin/env python
# copy pasta from https://github.com/matthiasplappert/keras-rl/blob/master/examples/dqn_cartpole.py
# with some extra arg parsing
import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
import bullet_cartpole
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gui', action='store_true')
parser.add_argument('--initial-force', type=float, default=55.0,
help="magnitude of initial push, in random direction")
parser.add_argument('--action-force', type=float, default=50.0,
help="magnitude of action push")
parser.add_argument('--num-train', type=int, default=100)
parser.add_argument('--num-eval', type=int, default=0)
parser.add_argument('--load-file', type=str, default=None)
parser.add_argument('--save-file', type=str, default=None)
parser.add_argument('--delay', type=float, default=0.0)
opts = parser.parse_args()
print "OPTS", opts
ENV_NAME = 'BulletCartpole'
# Get the environment and extract the number of actions.
env = bullet_cartpole.BulletCartpole(gui=opts.gui, action_force=opts.action_force,
initial_force=opts.initial_force, delay=opts.delay)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(32))
model.add(Activation('tanh'))
#model.add(Dense(16))
#model.add(Activation('relu'))
#model.add(Dense(16))
#model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
memory = SequentialMemory(limit=50000)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
if opts.load_file is not None:
print "loading weights from from [%s]" % opts.load_file
dqn.load_weights(opts.load_file)
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=opts.num_train, visualize=True, verbose=2)
# After training is done, we save the final weights.
if opts.save_file is not None:
print "saving weights to [%s]" % opts.save_file
dqn.save_weights(opts.save_file, overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=opts.num_eval, visualize=True)
| [
"matthew.kelcey@gmail.com"
] | matthew.kelcey@gmail.com |
92e94347df736d42fe1f084b575f52e958825236 | c5ce89c843c5078f311cf0552a038733bbc6553d | /node_modules/node-sass/build/config.gypi | 899b7b3edd10524d02cbae95ed3a63445c8ec99a | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | Crazykaopu/myDemo | 58fce2813f8f7223a00d57ca1c10084eb8fa461e | f77ce23b5416648c2d303517757821280eefa258 | refs/heads/master | 2021-01-21T10:19:50.013695 | 2017-02-28T08:36:40 | 2017-02-28T08:36:40 | 83,408,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,466 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt58l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt58l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "58",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 51,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "51.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"xcode_version": "7.0",
"nodedir": "/Users/qingyunh5/.node-gyp/7.3.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"libsass_ext": "",
"libsass_cflags": "",
"libsass_ldflags": "",
"libsass_library": "",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"browser": "",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"npat": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/qingyunh5/.npmrc",
"init_module": "/Users/qingyunh5/.npm-init.js",
"user": "",
"node_version": "7.3.0",
"save": "",
"editor": "vi",
"tag": "latest",
"progress": "true",
"global": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/qingyunh5/.npm",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/3.10.10 node/v7.3.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"scope": "",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/7k/250yt15x6fd1vdgwsg8h0v6h0000gp/T",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"qingyunh5@yuanxiang.local"
] | qingyunh5@yuanxiang.local |
e0c05f71ba2c1ec6b84d1cee2e49a9f3fd585618 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_clutched.py | e70757cfd80eae64be874dd7819e132a2b0a95da | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.nouns._clutch import _CLUTCH
#calss header
class _CLUTCHED(_CLUTCH, ):
def __init__(self,):
_CLUTCH.__init__(self)
self.name = "CLUTCHED"
self.specie = 'nouns'
self.basic = "clutch"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
59d94563bfa6f5937003f4b1bdd3072c24cc7d4c | e9f111b913255e2a8963556a638017c6c4729f01 | /randomize_four_digits.py | 0384492097a4e58757931549c4dab66f38246c1c | [] | no_license | dojinkimm/daddy | d609c38333358a6119ad71b4c89f418ae8c071eb | 77e79324da3e7deb11d0a045d888e432a499d388 | refs/heads/master | 2023-01-31T08:21:26.544482 | 2020-12-15T12:25:26 | 2020-12-15T12:25:26 | 285,579,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | """
문장 리스트에서 4자리 혹은 3자리 숫자를 찾아서 랜덤 숫자로 변경해주는 GUI 프로그램
GUI Program that finds four digit or three digit number in a list of sentences,
and changes to random number
"""
import random
import re
import pandas as pd
import PySimpleGUI as sg
def arg_parse():
layout = [
[sg.Text("문장을 입력하세요", size=(25, 1))],
[sg.InputText()],
[sg.Text("변경할 숫자의 길이를 입력해주세요")],
[sg.InputText()],
[sg.Text("저장할 파일의 이름을 입력하세요")],
[sg.InputText()],
[sg.Submit(), sg.Cancel()],
]
window = sg.Window("문장 숫자 랜덤 생성기", layout)
event, values = window.read()
window.close()
if event is None or event == "Cancel":
exit()
return values
args = arg_parse()
phrases = args[0].split("\n")
digit = args[1]
file_name = args[2] + ".csv"
if args[2] == "":
file_name = "test.csv"
generated_words = []
digit_regexp = "\d\d\d\d((?=[^kg|^Kg|^ml|^cm|^mm|^MM|^WT]))|\d\d\d\d$"
if digit != "" and int(digit) == 3:
digit_regexp = "\d\d\d\d((?=[^kg|^Kg|^ml|^cm|^mm|^MM|^WT]))|\d\d\d\d$"
for p in phrases:
if p == "":
continue
match = re.search(digit_regexp, p)
if match is None:
generated_words.append(p)
continue
rand = random.randint(1000, 9999)
if digit != "" and int(digit) == 3:
rand = random.randint(100, 999)
random.seed(p)
new_p = re.sub(digit_regexp, str(rand), p)
generated_words.append(new_p)
df = pd.DataFrame(generated_words)
df.to_csv(file_name, encoding="utf-8-sig")
| [
"dojinkim119@gmail.com"
] | dojinkim119@gmail.com |
a03230f460994f28b677a293aea19162a7708eb2 | 8ff12c53e31f134b9f39f59b9a6f7d4f9142cea7 | /lvlist/teacherPython/lianxi.py | bf2cc0047479923ed84cd01299189d22e12ed361 | [] | no_license | quhuohuo/python | 5b0a80dbec7d22a0b274e4a32d269e85d254718c | 5732c5974519da8e8919dab42b36ab0ab2c99b37 | refs/heads/master | 2021-06-13T11:41:12.356329 | 2017-04-07T08:58:05 | 2017-04-07T08:58:05 | 75,054,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | #!/usr/bin/python
def fun(char):
l = char.split(" ")
char = ''.join(l)
return char
while True:
s = raw_input()
if not len(s):
break
print "before:",s
s = fun(s)
print "after:",s
| [
"813559081@qq.com"
] | 813559081@qq.com |
c1b555142268a2128bd6e519a011a0f94dbd4c23 | 19fa99388ee8ede27c810095667b1df4f31673a2 | /app/api/posts.py | 95af8d6d9081323b3fc93b584a143b9c4aa8026c | [] | no_license | zgfhill/flas | d32941fcc6c1cbe95da85b8a032626168e88f855 | 347cf88480953f92d1aed99087c65538715eef79 | refs/heads/master | 2020-04-18T11:26:21.271140 | 2019-01-25T06:45:27 | 2019-01-25T06:45:27 | 164,623,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | @api.route('/posts/')
def get_posts():
page = request.args.get('page', 1, type=int)
pagination = Post.query.paginate(page, per_page=current_app.config['FLASK_POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1)
return jsonify({'posts': [post.to_json() for post in posts], 'prev_url': prev, 'next_url': next, 'count': pagination.total})
posts = Post.query.all()
return jsonify({'posts': [post.to_json() for post in posts]})
@api.route('/posts/<int:id>')
def get_post():
post = Post.query.get_404(id)
return jsonify(post.to_json())
@api.route('/posts/', methods=['POST'])
@permission.required(Permission.WRITE)
def new_post():
post = Post.from_json(request.json)
post.author = g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, {'Location': url_for('api.get_post', id=post.id)}
@api.route('/posts/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE)
def edit_post(id):
post = Post.query.get(id)
if g.current_user != post.author and not g.current_user.can(Permission.ADMIN)
return forbidden('Insufficient permissions')
post.body = request.json.get('body', post.body)
db.session.add(post)
db.session.commit()
return jsonify(post.to_json())
| [
"zgfhill@163.com"
] | zgfhill@163.com |
19774af108915387eb5f2ee44608d270c5137efc | 6d4d69c91eb14150778468f7cf73d8e2a9aa9124 | /t/variant/read_expresses_del.py | a0eba72d31d3b41050cde4c2746b8eee0690634a | [] | no_license | phonybone/Nof1 | 847acf7ce785319590f99271d20b7c126b59b699 | 22d877a96cd4481fdb7bf860c4d0721fcb34ddbe | refs/heads/master | 2021-01-23T13:56:53.606351 | 2013-09-20T23:50:30 | 2013-09-20T23:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | import unittest, sys, os
from warnings import warn
libdir=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','lib'))
sys.path.append(libdir)
from variant import *
class TestExpressesSNP(unittest.TestCase):
def setUp(self):
print
def test_expresses_del(self):
var=Variant('ABC', 23, 'center', 'hg45', 'chr1', 3827, 3836, '+', 'Missense_Mutation', 'DEL',
'GTATCCGTCA', 'GTATCCGTCA', '')
seq='AAAAACCGAGCCCGGGGGTT'*4 # note presence of 'GAG' at correct location
pos=3820 # has to encompass variant position of 3829
self.assertTrue(var.is_expressed_in_seq(seq, pos))
seq='AAAAACGGTATCCGTCAAGC'*4 # note presence of 'GAG' at incorrect location
self.assertFalse(var.is_expressed_in_seq(seq, pos))
#-----------------------------------------------------------------------
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestExpressesSNP)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"phonybone@gmail.com"
] | phonybone@gmail.com |
8ae40c3182f52ae8c227bb8aabbc5c1f0fcc7c36 | 3804ff9274fe62d92f7f1cb65a5e7ee572e30ada | /personal/migrations/0007_personaldetails_cv.py | 66d0dcf7ca507c0450ae07bef7cde41486c9c775 | [] | no_license | hebs87/PortfolioBackend | eb9450cbe87af478d9714482635d40e0dfb29798 | 27233353045c0f40cbd9de242a77ca0656f7327f | refs/heads/master | 2023-04-18T04:51:13.523443 | 2021-04-06T19:24:21 | 2021-04-06T19:24:21 | 353,143,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # Generated by Django 3.1.7 on 2021-04-06 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('personal', '0006_auto_20210402_0237'),
]
operations = [
migrations.AddField(
model_name='personaldetails',
name='cv',
field=models.FileField(blank=True, null=True, upload_to='files'),
),
]
| [
"sunnyhebbar@hotmail.co.uk"
] | sunnyhebbar@hotmail.co.uk |
6e33258216c0107f3b441f5a78bae74fa6f4ca29 | 7d54074486b25c398fb2d35f217d40914c24e60c | /manage.py | 9ced450703c3c02c555f1d1c50e8749f3af60761 | [
"MIT"
] | permissive | acemaster/braindump | 2bf56fe6a9905f28bf92f0a9447c0247f4208742 | e8dccc723ce29ad38fc60cdb8153208c790e2947 | refs/heads/master | 2020-04-05T19:01:46.141987 | 2015-12-21T06:38:50 | 2015-12-21T06:38:50 | 48,020,660 | 0 | 1 | null | 2015-12-17T09:29:04 | 2015-12-15T05:01:23 | Python | UTF-8 | Python | false | false | 2,297 | py | #!/usr/bin/env python
import os
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from app.models import User, Note, Tag, Notebook
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(
app=app, db=db, User=User,
Note=Note, Tag=Tag,
Notebook=Notebook)
manager.add_command(
"shell",
Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
import sys
if coverage and not os.environ.get('FLASK_COVERAGE'):
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
import xmlrunner
tests = unittest.TestLoader().discover('tests')
results = xmlrunner.XMLTestRunner(output='test-reports').run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'test-reports/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
if len(results.failures) > 0:
sys.exit(1)
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade
# migrate database to latest revision
upgrade()
if __name__ == '__main__':
manager.run()
| [
"lev@circleci.com"
] | lev@circleci.com |
88c3f3374cc6e75c7552d12abc8d38e640ce8948 | ee5206c3cdc25ec6bf526622bf7d2f121c8a8b01 | /ProjectAPI/blog/post/migrations/0003_auto_20190128_1143.py | 1886292de1fcde42bd498d66a955e8a93f631743 | [] | no_license | SunitraD97/ProjectAPI | 6648773021d2ea0d38334ff57de827fe4cf04846 | c3d09ab8be1c2c9642648fabdee224a703820b1e | refs/heads/master | 2020-04-19T02:00:10.087778 | 2019-02-01T08:00:22 | 2019-02-01T08:00:22 | 167,510,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # Generated by Django 2.1.3 on 2019-01-28 04:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('post', '0002_auto_20190123_1131'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='name',
new_name='title',
),
]
| [
"sunitra1525@gmail.com"
] | sunitra1525@gmail.com |
983a777eea0b5f999dc64520c81090b60c106a85 | cd6c6298fb407b7158e25aba2ab28e58517b1bd0 | /tests/test_plugins.py | 54f26a7f79028ccfeae9522314b326c15c0de4a7 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | mociepka/coveragepy | dc58ef4b6072af0e55edb5d920d8a58d4cbeef0c | bc31b68776bb76ac9a650caa3c7a04c84817093d | refs/heads/master | 2021-01-17T20:16:31.014696 | 2016-01-02T21:30:47 | 2016-01-02T21:30:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,439 | py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for plugins."""
import os.path
import coverage
from coverage import env
from coverage.backward import StringIO
from coverage.control import Plugins
from coverage.misc import CoverageException
import coverage.plugin
from tests.coveragetest import CoverageTest
from tests.helpers import CheckUniqueFilenames
class FakeConfig(object):
"""A fake config for use in tests."""
def __init__(self, plugin, options):
self.plugin = plugin
self.options = options
self.asked_for = []
def get_plugin_options(self, module):
"""Just return the options for `module` if this is the right module."""
self.asked_for.append(module)
if module == self.plugin:
return self.options
else:
return {}
class LoadPluginsTest(CoverageTest):
"""Test Plugins.load_plugins directly."""
def test_implicit_boolean(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
config = FakeConfig("plugin1", {})
plugins = Plugins.load_plugins([], config)
self.assertFalse(plugins)
plugins = Plugins.load_plugins(["plugin1"], config)
self.assertTrue(plugins)
def test_importing_and_configuring(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1"], config))
self.assertEqual(len(plugins), 1)
self.assertEqual(plugins[0].this_is, "me")
self.assertEqual(plugins[0].options, {'a': 'hello'})
self.assertEqual(config.asked_for, ['plugin1'])
def test_importing_and_configuring_more_than_one(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
self.make_file("plugin2.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1", "plugin2"], config))
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0].this_is, "me")
self.assertEqual(plugins[0].options, {'a': 'hello'})
self.assertEqual(plugins[1].options, {})
self.assertEqual(config.asked_for, ['plugin1', 'plugin2'])
# The order matters...
config = FakeConfig("plugin1", {'a': 'second'})
plugins = list(Plugins.load_plugins(["plugin2", "plugin1"], config))
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0].options, {})
self.assertEqual(plugins[1].this_is, "me")
self.assertEqual(plugins[1].options, {'a': 'second'})
def test_cant_import(self):
with self.assertRaises(ImportError):
_ = Plugins.load_plugins(["plugin_not_there"], None)
def test_plugin_must_define_coverage_init(self):
self.make_file("no_plugin.py", """\
from coverage import CoveragePlugin
Nothing = 0
""")
msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function"
with self.assertRaisesRegex(CoverageException, msg_pat):
list(Plugins.load_plugins(["no_plugin"], None))
class PluginTest(CoverageTest):
"""Test plugins through the Coverage class."""
def test_plugin_imported(self):
# Prove that a plugin will be imported.
self.make_file("my_plugin.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
with open("evidence.out", "w") as f:
f.write("we are here!")
""")
self.assert_doesnt_exist("evidence.out")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["my_plugin"])
cov.start()
cov.stop() # pragma: nested
with open("evidence.out") as f:
self.assertEqual(f.read(), "we are here!")
def test_missing_plugin_raises_import_error(self):
# Prove that a missing plugin will raise an ImportError.
with self.assertRaises(ImportError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["does_not_exist_woijwoicweo"])
cov.start()
cov.stop()
def test_bad_plugin_isnt_hidden(self):
# Prove that a plugin with an error in it will raise the error.
self.make_file("plugin_over_zero.py", """\
1/0
""")
with self.assertRaises(ZeroDivisionError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["plugin_over_zero"])
cov.start()
cov.stop()
def test_plugin_sys_info(self):
self.make_file("plugin_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def sys_info(self):
return [("hello", "world")]
def coverage_init(reg, options):
reg.add_noop(Plugin())
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_sys_info"])
cov.load()
out_lines = debug_out.getvalue().splitlines()
expected_end = [
"-- sys: plugin_sys_info.Plugin -------------------------------",
" hello: world",
"-- end -------------------------------------------------------",
]
self.assertEqual(expected_end, out_lines[-len(expected_end):])
def test_plugin_with_no_sys_info(self):
self.make_file("plugin_no_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_no_sys_info"])
cov.load()
out_lines = debug_out.getvalue().splitlines()
expected_end = [
"-- sys: plugin_no_sys_info.Plugin ----------------------------",
"-- end -------------------------------------------------------",
]
self.assertEqual(expected_end, out_lines[-len(expected_end):])
def test_local_files_are_importable(self):
self.make_file("importing_plugin.py", """\
from coverage import CoveragePlugin
import local_module
class MyPlugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(MyPlugin())
""")
self.make_file("local_module.py", "CONST = 1")
self.make_file(".coveragerc", """\
[run]
plugins = importing_plugin
""")
self.make_file("main_file.py", "print('MAIN')")
out = self.run_command("coverage run main_file.py")
self.assertEqual(out, "MAIN\n")
out = self.run_command("coverage html")
self.assertEqual(out, "")
class PluginWarningOnPyTracer(CoverageTest):
"""Test that we get a controlled exception with plugins on PyTracer."""
def test_exception_if_plugins_on_pytracer(self):
if env.C_TRACER:
self.skip("This test is only about PyTracer.")
self.make_file("simple.py", """a = 1""")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["tests.plugin1"])
expected_warnings = [
r"Plugin file tracers \(tests.plugin1.Plugin\) aren't supported with PyTracer",
]
with self.assert_warnings(cov, expected_warnings):
self.start_import_stop(cov, "simple")
class FileTracerTest(CoverageTest):
"""Tests of plugins that implement file_tracer."""
def setUp(self):
super(FileTracerTest, self).setUp()
if not env.C_TRACER:
self.skip("Plugins are only supported with the C tracer.")
class GoodPluginTest(FileTracerTest):
"""Tests of plugin happy paths."""
def test_plugin1(self):
self.make_file("simple.py", """\
import try_xyz
a = 1
b = 2
""")
self.make_file("try_xyz.py", """\
c = 3
d = 4
""")
cov = coverage.Coverage()
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin1"])
# Import the Python file, executing it.
self.start_import_stop(cov, "simple")
_, statements, missing, _ = cov.analysis("simple.py")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [])
zzfile = os.path.abspath(os.path.join("/src", "try_ABC.zz"))
_, statements, _, _ = cov.analysis(zzfile)
self.assertEqual(statements, [105, 106, 107, 205, 206, 207])
def make_render_and_caller(self):
"""Make the render.py and caller.py files we need."""
# plugin2 emulates a dynamic tracing plugin: the caller's locals
# are examined to determine the source file and line number.
# The plugin is in tests/plugin2.py.
self.make_file("render.py", """\
def render(filename, linenum):
# This function emulates a template renderer. The plugin
# will examine the `filename` and `linenum` locals to
# determine the source file and line number.
fiddle_around = 1 # not used, just chaff.
return "[{0} @ {1}]".format(filename, linenum)
def helper(x):
# This function is here just to show that not all code in
# this file will be part of the dynamic tracing.
return x+1
""")
self.make_file("caller.py", """\
import sys
from render import helper, render
assert render("foo_7.html", 4) == "[foo_7.html @ 4]"
# Render foo_7.html again to try the CheckUniqueFilenames asserts.
render("foo_7.html", 4)
assert helper(42) == 43
assert render("bar_4.html", 2) == "[bar_4.html @ 2]"
assert helper(76) == 77
# quux_5.html will be omitted from the results.
assert render("quux_5.html", 3) == "[quux_5.html @ 3]"
# In Python 2, either kind of string should be OK.
if sys.version_info[0] == 2:
assert render(u"uni_3.html", 2) == "[uni_3.html @ 2]"
""")
# will try to read the actual source files, so make some
# source files.
def lines(n):
"""Make a string with n lines of text."""
return "".join("line %d\n" % i for i in range(n))
self.make_file("bar_4.html", lines(4))
self.make_file("foo_7.html", lines(7))
def test_plugin2(self):
self.make_render_and_caller()
cov = coverage.Coverage(omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
_, statements, missing, _ = cov.analysis("foo_7.html")
self.assertEqual(statements, [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(missing, [1, 2, 3, 6, 7])
self.assertIn("foo_7.html", cov.data.line_counts())
_, statements, missing, _ = cov.analysis("bar_4.html")
self.assertEqual(statements, [1, 2, 3, 4])
self.assertEqual(missing, [1, 4])
self.assertIn("bar_4.html", cov.data.line_counts())
self.assertNotIn("quux_5.html", cov.data.line_counts())
if env.PY2:
_, statements, missing, _ = cov.analysis("uni_3.html")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [1])
self.assertIn("uni_3.html", cov.data.line_counts())
def test_plugin2_with_branch(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
analysis = cov._analyze("foo_7.html")
self.assertEqual(analysis.statements, set([1, 2, 3, 4, 5, 6, 7]))
# Plugins don't do branch coverage yet.
self.assertEqual(analysis.has_arcs(), True)
self.assertEqual(analysis.arc_possibilities(), [])
self.assertEqual(analysis.missing, set([1, 2, 3, 6, 7]))
def test_plugin2_with_text_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
repout = StringIO()
total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"])
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Branch BrPart Cover Missing',
'--------------------------------------------------------',
'bar_4.html 4 2 0 0 50% 1, 4',
'foo_7.html 7 5 0 0 29% 1-3, 6-7',
'--------------------------------------------------------',
'TOTAL 11 7 0 0 36% ',
]
self.assertEqual(report, expected)
self.assertAlmostEqual(total, 36.36, places=2)
def test_plugin2_with_html_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.html_report(include=["*.html"], omit=["uni*.html"])
self.assertAlmostEqual(total, 36.36, places=2)
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/bar_4_html.html")
self.assert_exists("htmlcov/foo_7_html.html")
def test_plugin2_with_xml_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.xml_report(include=["*.html"], omit=["uni*.html"])
self.assertAlmostEqual(total, 36.36, places=2)
with open("coverage.xml") as fxml:
xml = fxml.read()
for snip in [
'filename="bar_4.html" line-rate="0.5" name="bar_4.html"',
'filename="foo_7.html" line-rate="0.2857" name="foo_7.html"',
]:
self.assertIn(snip, xml)
def test_defer_to_python(self):
# A plugin that measures, but then wants built-in python reporting.
self.make_file("fairly_odd_plugin.py", """\
# A plugin that claims all the odd lines are executed, and none of
# the even lines, and then punts reporting off to the built-in
# Python reporting.
import coverage.plugin
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
return OddTracer(filename)
def file_reporter(self, filename):
return "python"
class OddTracer(coverage.plugin.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
def line_number_range(self, frame):
lineno = frame.f_lineno
if lineno % 2:
return (lineno, lineno)
else:
return (-1, -1)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.make_file("unsuspecting.py", """\
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
""")
cov = coverage.Coverage(include=["unsuspecting.py"])
cov.set_option("run:plugins", ["fairly_odd_plugin"])
self.start_import_stop(cov, "unsuspecting")
repout = StringIO()
total = cov.report(file=repout)
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Cover Missing',
'-----------------------------------------------',
'unsuspecting.py 6 3 50% 2, 4, 6',
]
self.assertEqual(report, expected)
self.assertEqual(total, 50)
class BadPluginTest(FileTracerTest):
"""Test error handling around plugins."""
def run_plugin(self, module_name):
"""Run a plugin with the given module_name.
Uses a few fixed Python files.
Returns the Coverage object.
"""
self.make_file("simple.py", """\
import other, another
a = other.f(2)
b = other.f(3)
c = another.g(4)
d = another.g(5)
""")
# The names of these files are important: some plugins apply themselves
# to "*other.py".
self.make_file("other.py", """\
def f(x):
return x+1
""")
self.make_file("another.py", """\
def g(x):
return x-1
""")
cov = coverage.Coverage()
cov.set_option("run:plugins", [module_name])
self.start_import_stop(cov, "simple")
return cov
def run_bad_plugin(self, module_name, plugin_name, our_error=True, excmsg=None):
"""Run a file, and see that the plugin failed.
`module_name` and `plugin_name` is the module and name of the plugin to
use.
`our_error` is True if the error reported to the user will be an
explicit error in our test code, marked with an '# Oh noes!' comment.
`excmsg`, if provided, is text that should appear in the stderr.
The plugin will be disabled, and we check that a warning is output
explaining why.
"""
self.run_plugin(module_name)
stderr = self.stderr()
print(stderr) # for diagnosing test failures.
if our_error:
errors = stderr.count("# Oh noes!")
# The exception we're causing should only appear once.
self.assertEqual(errors, 1)
# There should be a warning explaining what's happening, but only one.
# The message can be in two forms:
# Disabling plugin '...' due to previous exception
# or:
# Disabling plugin '...' due to an exception:
msg = "Disabling plugin '%s.%s' due to " % (module_name, plugin_name)
warnings = stderr.count(msg)
self.assertEqual(warnings, 1)
if excmsg:
self.assertIn(excmsg, stderr)
def test_file_tracer_has_no_file_tracer_method(self):
self.make_file("bad_plugin.py", """\
class Plugin(object):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_file_tracer_has_inherited_sourcefilename_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False,
excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()",
)
def test_plugin_has_inherited_filereporter_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
def source_filename(self):
return "foo.xxx"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
cov = self.run_plugin("bad_plugin")
expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()"
with self.assertRaisesRegex(NotImplementedError, expected_msg):
cov.report()
def test_file_tracer_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
17/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_file_tracer_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return 3.14159
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_has_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
23/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
42/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return 17.3
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
101/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_line_number_range_returns_non_tuple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return 42.23
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_line_number_range_returns_triple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return (1, 2, 3)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_line_number_range_returns_pair_of_strings(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return ("5", "7")
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
| [
"ned@nedbatchelder.com"
] | ned@nedbatchelder.com |
f80a75e0fead93eb8553124874b7dc2654931a65 | df821c05ff8bf3012f4ccce09422fc5f5897e2ae | /tests.py | 745f9f862a45ea2b7b6b9d3d721ea387575ccd17 | [] | no_license | chibole/microblog | aead8056de3852cfb602e79d57a97e828b657abd | c8616e409f844091454a9f1e91bc27cd69c098d3 | refs/heads/master | 2022-12-10T13:10:42.652730 | 2018-09-03T08:26:14 | 2018-09-03T08:26:14 | 117,012,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,036 | py | from datetime import datetime, timedelta
import unittest
from app import create_app, db
from app.models import User, Post
from config import Config
class TestConfig(config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
class UserModelCase(unittest.TestCase):
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_hashing(self):
u = User(username='susan')
u.set_password('cat')
self.assertFalse(u.check_password('dog'))
self.assertTrue(u.check_password('cat'))
def test_avatar(self):
u = User(username='john', email='john@example.com')
self.assertEqual(u.avatar(128), ('https://www.gravatar.com/avatar/'
'd4c74594d841139328695756648b6bd6'
'?d=identicon&s=128'))
def test_follow(self):
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertEqual(u1.followed.all(), [])
self.assertEqual(u1.followers.all(), [])
u1.follow(u2)
db.session.commit()
self.assertTrue(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 1)
self.assertEqual(u1.followed.first().username, 'susan')
self.assertEqual(u2.followers.count(), 1)
self.assertEqual(u2.followers.first().username, 'john')
u1.unfollow(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 0)
self.assertEqual(u2.followers.count(), 0)
def test_follow_posts(self):
# create four users
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
u3 = User(username='mary', email='mary@example.com')
u4 = User(username='david', email='david@example.com')
db.session.add_all([u1, u2, u3, u4])
#create four posts
now = datetime.utcnow()
p1 = Post(body="post from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post from mary", author=u3,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post from david", author=u4,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
#setup the followers
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows davis
u2.follow(u3) # susan follows mary
u3.follow(u4) # mary follows david
db.session.commit()
# check the followed posts of each user
f1 = u1.followed_posts().all()
f2 = u2.followed_posts().all()
f3 = u3.followed_posts().all()
f4 = u4.followed_posts().all()
self.assertEqual(f1, [p2, p4, p1])
self.assertEqual(f2, [p2, p3])
self.assertEqual(f3, [p3, p4])
self.assertEqual(f4, [p4])
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"jpchibole@gmail.com"
] | jpchibole@gmail.com |
75b1c181f64e2a96ecbe839591084accf82ff610 | cf2b5de53e5c66238fd1bebd7f05b76d2a926f40 | /challenges/codility/lessons/q016/distinct_test.py | 657793d2ce018ba9eac3db63476a71b79dc4dcb5 | [
"MIT"
] | permissive | Joeffison/coding_challenges | 1a7b944cbea4304d482ab2b25e7cd79aed62267d | 0f0c5c2c3dad3a5aabfb66d66c5b6a89bff374ea | refs/heads/master | 2021-01-24T12:41:51.570981 | 2018-10-28T17:56:04 | 2018-10-28T17:56:04 | 123,147,999 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | #!/usr/bin/env python3
import random
import unittest
import numpy as np
from challenges.codility.lessons.q016.distinct_v001 import *
MAX_N = 100000
MIN_ELEMENT = -1000000
MAX_ELEMENT = 1000000
class DistinctTestCase(unittest.TestCase):
def test_description_examples(self):
self.assertEqual(3, solution([2, 1, 1, 2, 3, 1]))
# Correctness
def test_extreme_empty(self):
# empty sequence
self.assertEqual(0, solution([]))
def test_extreme_single(self):
# sequence of one element
self.assertEqual(1, solution([2]))
self.assertEqual(1, solution([0]))
def test_extreme_two_elems(self):
# sequence of two distinct elements
self.assertEqual(2, solution([2, 1]))
def test_extreme_one_value(self):
# sequence of 10 equal elements
self.assertEqual(1, solution([10]*10))
def test_extreme_negative(self):
# sequence of negative elements, length=5
self.assertEqual(4, solution([-1, MIN_ELEMENT, MIN_ELEMENT, -2, -3]))
def test_extreme_big_values(self):
# sequence with big values, length=5
n = 5
self.assertEqual(n, solution([MAX_ELEMENT - i for i in range(n)]))
def test_medium1(self):
# chaotic sequence of values from [0..1K], length=100
self.__test_chaotic(100, 0, 1000)
def test_medium2(self):
# chaotic sequence of values from [0..1K], length=200
self.__test_chaotic(200, 0, 1000)
def test_medium3(self):
# chaotic sequence of values from [0..10], length=200
self.__test_chaotic(200, 0, 10)
# Performance
def test_large1(self):
# chaotic sequence of values from [0..100K], length = 10K
self.__test_chaotic(10000, 0, 100000)
def test_large_random1(self):
# chaotic sequence of values from [-1M..1M], length=100K
self.__test_chaotic(MAX_N, MIN_ELEMENT, MAX_ELEMENT)
def test_large_random2(self):
# another chaotic sequence of values from [-1M..1M], length=100K
self.__test_chaotic(MAX_N, MIN_ELEMENT, MAX_ELEMENT)
# Utils
@staticmethod
def __brute_solution(array):
if array:
array.sort()
count = 1
for i in range(1, len(array)):
if array[i] != array[i - 1]:
count += 1
return count
else:
return 0
def __test_sequence(self, n=100, shuffled=True):
l = list(range(n))
if shuffled:
random.shuffle(l)
with self.subTest(n=n):
self.assertEqual(n, solution(l))
def __test_chaotic(self, n, min_value, max_value):
array = list(np.random.random_integers(min_value, max_value, n))
with self.subTest(n=n):
self.assertEqual(self.__brute_solution(array), solution(array))
if __name__ == '__main__':
unittest.main()
| [
"joeffison@gmail.com"
] | joeffison@gmail.com |
4b968d9144a0bdaac6149c0dd9b0fc065d9a732f | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /visitcountermacro/0.10/visitcounter/__init__.py | cc01602b681d5594a67a2baecf013cd1e5620f73 | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | from visitcounter import *
| [
"Blackhex@7322e99d-02ea-0310-aa39-e9a107903beb"
] | Blackhex@7322e99d-02ea-0310-aa39-e9a107903beb |
89dbae7928bc21a347120c229dc3439bce749e4e | 80d56c75aa74994b2a4ddd17b77fd374f891a487 | /leetcode/capacity-to-ship.py | 34f55e6a2fd397f3b75a521f26a4bb983578df22 | [] | no_license | multiojuice/solved | 97617eef92ae2f0539ca1fdcdc96b2ac8de6a31a | 39bc43980c0ff4f628e5bb6945f6697c52649d57 | refs/heads/master | 2020-04-01T12:33:06.169355 | 2019-12-05T23:49:22 | 2019-12-05T23:49:22 | 153,212,726 | 0 | 0 | null | 2018-10-26T06:45:03 | 2018-10-16T02:49:01 | Python | UTF-8 | Python | false | false | 949 | py | def shipWithinDays(weights, D) -> int:
min_weight = max(weights)
while True:
shipping_days = [0] * D
current_day = 0
finished = True
small_weight = 999999999
for elm in weights:
if (shipping_days[current_day] + elm <= min_weight):
shipping_days[current_day] += elm
elif current_day+1 < D:
if shipping_days[current_day] + elm < small_weight:
small_weight = shipping_days[current_day] + elm
current_day += 1
shipping_days[current_day] += elm
else:
if shipping_days[current_day] + elm < small_weight:
small_weight = shipping_days[current_day] + elm
min_weight = small_weight
finished = False
break
if finished:
return min_weight
print(shipWithinDays([1,2,3,4,5,6,7,8,9,10], 1)) | [
"multiojuice@gmail.com"
] | multiojuice@gmail.com |
4bbfd3063d60db8bdd0ba24404b6cba6e8214f32 | d916a3a68980aaed1d468f30eb0c11bfb04d8def | /2021_06_14_Linked_list_cycle.py | 2cfffe4d21e1cf1685d43336acfba01f596912c7 | [] | no_license | trinhgliedt/Algo_Practice | 32aff29ca6dc14f9c74308af1d7eaaf0167e1f72 | 480de9be082fdcbcafe68e2cd5fd819dc7815e64 | refs/heads/master | 2023-07-10T23:49:16.519671 | 2021-08-11T05:11:34 | 2021-08-11T05:11:34 | 307,757,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | # https://leetcode.com/problems/linked-list-cycle/
# Given head, the head of a linked list, determine if the linked list has a cycle in it.
# There is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. Internally, pos is used to denote the index of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
# Return true if there is a cycle in the linked list. Otherwise, return false.
# Example 1:
# Input: head = [3,2,0,-4], pos = 1
# Output: true
# Explanation: There is a cycle in the linked list, where the tail connects to the 1st node (0-indexed).
# Example 2:
# Input: head = [1,2], pos = 0
# Output: true
# Explanation: There is a cycle in the linked list, where the tail connects to the 0th node.
# Example 3:
# Input: head = [1], pos = -1
# Output: false
# Explanation: There is no cycle in the linked list.
# Constraints:
# The number of the nodes in the list is in the range [0, 104].
# -105 <= Node.val <= 105
# pos is -1 or a valid index in the linked-list.
# Follow up: Can you solve it using O(1) (i.e. constant) memory?
# Definition for singly-linked list.
from typing import List
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
hare = head
turtle = head
while turtle and hare and hare.next:
hare = hare.next.next
turtle = turtle.next
if turtle == hare:
return True
return False
s = Solution()
node1 = ListNode(1)
node5 = ListNode(5)
node11 = ListNode(11)
node8 = ListNode(8)
node9 = ListNode(9)
node1.next = node5
node5.next = node11
node11.next = node8
node8.next = node9
node9.next = node5
answer = s.hasCycle(node1)
print(answer)
| [
"chuot2008@gmail.com"
] | chuot2008@gmail.com |
7641a1c1f9068abb40afb542114f32591bf63472 | f645ebae84e973cb42cffbe7f1d112ff2e3b0597 | /no/edgebox_final/edgebox_final/settings.py | 8cc80e236c92caef201e903858278cbcd6d1bf38 | [] | no_license | bopopescu/file_trans | 709ce437e7aa8ce15136aa6be2f5d696261c30bd | fadc3faf6473539ed083ccd380df92f43115f315 | refs/heads/master | 2022-11-19T18:54:17.868828 | 2020-03-11T04:30:41 | 2020-03-11T04:30:41 | 280,964,974 | 0 | 0 | null | 2020-07-19T22:57:41 | 2020-07-19T22:57:40 | null | UTF-8 | Python | false | false | 3,754 | py | """
Django settings for edgebox_final project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i(67r0=ud0l6ti(1sr&d0)m6fl6+_^bus41y&h92%i_ynp(-ov'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"Agent",
"Device",
"Drive",
"SmartDevice",
'djcelery',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'edgebox_final.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'edgebox_final.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/10",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
#分页的设置
REST_FRAMEWORK = {
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning', #启动 drf 基于NameSpace的版本控制
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 5
}
from .celery_config import * | [
"871488533@qq.com"
] | 871488533@qq.com |
abc88a164d1c63b1a6859acf108ce1707522ff80 | 8f2192867087a15ea3e9b01153eda4abb124a777 | /zad1.py | 824a18ce79e8569d94c96788e67dd6990d9e9015 | [] | no_license | MateuszGrabuszynski/aem1 | 886d7baecc0be50c51235b212020c7552b18334e | 64e49a90f6fa8be8a4ff3881f4a89c19577870c4 | refs/heads/master | 2023-02-08T18:13:59.531961 | 2023-01-28T16:31:02 | 2023-01-28T16:31:02 | 176,085,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,449 | py | import random as rd
import pandas as pd
import time
start_time = time.time()
GROUPS = 10
POINTS = 201
#rd.seed(65)
# Read distances
distances = pd.read_csv('distances.data', usecols=range(0, POINTS + 1)) # usecols=Omit the index column
distances_cpy = distances.copy()
cols = []
for i in range(POINTS):
col = distances_cpy.sort_values(by=[str(i)])
cols.append(col[str(i)]) # .iloc[1:])
# Points that are still available as endings
not_available_ending_points = []
points = list(range(0, POINTS))
groups = list(range(0, GROUPS))
dictionaries = dict.fromkeys(groups, list())
copy_point = points.copy()
for group in dictionaries.keys():
value = rd.choice(copy_point)
dictionaries[group] = [(value, value)]
# print(copy_point)
copy_point.remove(value)
# print(copy_point)
not_available_ending_points.append(value)
while len(not_available_ending_points) < POINTS:
current_shortest_edge = {
'group': None,
'from_point': None,
'to_point': None,
'distance': None
}
for group in dictionaries.keys():
# print("Group:", group)
for _, point_in_group in dictionaries[group]:
# print("Point_in_group:", point_in_group)
for new_point, dist in cols[point_in_group].sort_values(ascending=True).iteritems():
# print(new_point, dist)
if new_point not in not_available_ending_points:
if current_shortest_edge['distance'] is None or dist < current_shortest_edge['distance']:
current_shortest_edge['group'] = group
current_shortest_edge['from_point'] = point_in_group
current_shortest_edge['to_point'] = new_point
current_shortest_edge['distance'] = dist
if current_shortest_edge['to_point'] is not None:
not_available_ending_points.append(current_shortest_edge['to_point'])
for group in dictionaries.keys():
for _, point_in_group in dictionaries[group]:
# print("Drop", point_in_group, current_shortest_edge['to_point'])
cols[point_in_group].drop(current_shortest_edge['to_point'], inplace=True)
# print(type(cols[point_in_group]))
print("Group:", current_shortest_edge['group'],
"From point:", current_shortest_edge['from_point'],
"To point:", current_shortest_edge['to_point'],
"Distance", current_shortest_edge['distance'])
dictionaries[current_shortest_edge['group']].append((current_shortest_edge['from_point'], current_shortest_edge['to_point']))
current_shortest_edge['group'] = None
current_shortest_edge['from_point'] = None
current_shortest_edge['to_point'] = None
current_shortest_edge['distance'] = None
print("dicti", dictionaries)
elapsed_time = time.time() - start_time
print(elapsed_time)
import matplotlib.pyplot as plt
import pandas as pd
points = pd.read_csv('objects.data', sep=" ", header=None, usecols=[0, 1])
points.columns = ['X', 'Y']
x = []
y = []
clrs = []
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray',
'tab:olive', 'tab:cyan']
edges = dictionaries
for i in range(len(points)):
x.append(points.iloc[i][0])
y.append(points.iloc[i][1])
clrs.append(colors[0])
clr_nr = 0
for grp in edges.values():
for case in grp:
pointa_x = x[case[0]]
pointa_y = y[case[0]]
pointb_x = x[case[1]]
pointb_y = y[case[1]]
plt.plot([pointa_x, pointb_x], [pointa_y, pointb_y], c=colors[clr_nr])#, marker='o')
clr_nr += 1
# plt.scatter(
# x,
# y,
# c=clrs
# )
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
| [
"jaroslaw.wieczorek@sealcode.org"
] | jaroslaw.wieczorek@sealcode.org |
c310f33e1c8dbb6251814466ec5e07be15b0a61f | 637fe43cb3b858be426e9b9ce10485430ae1f146 | /fsleyes/gl/gl14/glmask_funcs.py | 3d9bfb590cf1c45bff2b61b253fd436eaac571e6 | [
"BSD-3-Clause",
"CC-BY-3.0",
"Apache-2.0"
] | permissive | laurenpan02/fsleyes | 9dda45c1b1b77f0f042488ddf40fed46e5c77360 | eed8940d422994b6c1f1787381ebac2361b81408 | refs/heads/master | 2023-03-11T16:49:16.994945 | 2021-02-25T18:07:39 | 2021-02-25T18:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | #!/usr/bin/env python
#
# glmask_funcs.py - OpenGL 1.4 functions used by the GLMask class.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module provides functions which are used by the :class:`.GLMask`
class to render :class:`.Image` overlays in an OpenGL 1.4 compatible manner.
"""
import fsleyes.gl.shaders as shaders
from . import glvolume_funcs
def init(self):
"""Calls the :func:`compileShaders` and :func:`updateShaderState`
functions.
"""
self.shader = None
compileShaders( self)
updateShaderState(self)
def destroy(self):
"""Destroys the shader programs. """
self.shader.destroy()
self.shader = None
def compileShaders(self):
"""Loads the vertex/fragment shader source code, and creates a
:class:`.ARBPShader` program.
"""
if self.shader is not None:
self.shader.destroy()
vertSrc = shaders.getVertexShader( 'glvolume')
fragSrc = shaders.getFragmentShader('glmask')
textures = {
'imageTexture' : 0,
}
self.shader = shaders.ARBPShader(vertSrc,
fragSrc,
shaders.getShaderDir(),
textures)
def updateShaderState(self):
"""Updates all shader program variables. """
if not self.ready():
return
opts = self.opts
shader = self.shader
colour = self.getColour()
threshold = list(self.getThreshold())
if opts.invert: threshold += [ 1, 0]
else: threshold += [-1, 0]
shader.load()
shader.setFragParam('threshold', threshold)
shader.setFragParam('colour', colour)
shader.unload()
return True
def draw2D(self, zpos, axes, xform=None, bbox=None):
"""Draws a 2D slice at the given ``zpos``. Uses the
:func:`.gl14.glvolume_funcs.draw2D` function.
"""
self.shader.load()
self.shader.loadAtts()
glvolume_funcs.draw2D(self, zpos, axes, xform, bbox)
self.shader.unloadAtts()
self.shader.unload()
def drawAll(self, axes, zposes, xforms):
"""Draws all specified slices. Uses the
:func:`.gl14.glvolume_funcs.drawAll` function.
"""
self.shader.load()
self.shader.loadAtts()
glvolume_funcs.drawAll(self, axes, zposes, xforms)
self.shader.unloadAtts()
self.shader.unload()
| [
"pauldmccarthy@gmail.com"
] | pauldmccarthy@gmail.com |
217bd2af0238293662a1d0bef1aaf8b835af57ff | a4830a0189c325c35c9021479a5958ec870a2e8b | /lib/pyutil/django/mixins.py | 1f6e5aee271e26288ffc4fda4263d7ba951ea772 | [] | no_license | solutionprovider9174/steward | 044c7d299a625108824c854839ac41f51d2ca3fd | fd681593a9d2d339aab0f6f3688412d71cd2ae32 | refs/heads/master | 2022-12-11T06:45:04.544838 | 2020-08-21T02:56:55 | 2020-08-21T02:56:55 | 289,162,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | # Django
from django.http import JsonResponse
from django.forms import BaseFormSet, formset_factory
from django.forms.models import model_to_dict
from django.views.generic.edit import FormMixin
from django.core.exceptions import ImproperlyConfigured
from django.views.generic.detail import SingleObjectTemplateResponseMixin
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
def render_to_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return JsonResponse(
self.get_data(context),
**response_kwargs
)
def get_data(self, context):
"""
Returns an object that will be serialized as JSON by json.dumps().
"""
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return context
class JSONModelMixin(object):
"""
A mixin that can be used to render a Model as a JSON response.
"""
def render_to_response(self, context):
if self.request.is_ajax() or self.request.GET.get('format') == 'json':
return JSONResponseMixin.render_to_response(self, model_to_dict(self.get_object()))
else:
return SingleObjectTemplateResponseMixin.render_to_response(self, context)
class ProcessFormMixin(FormMixin):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
formset_class = None
formset_extra = 0
def get_formset_class(self):
return self.formset_class
def form_invalid(self, form, formset):
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def get_formset(self, formset_class=None, formset_extra=None):
if formset_class is None:
formset_class = self.get_formset_class()
if formset_extra is None:
formset_extra = self.formset_extra
if formset_class is None:
return None
else:
formset = formset_factory(formset_class, extra=formset_extra)
return formset(**self.get_form_kwargs())
def get_context_data(self, **kwargs):
if 'formset' not in kwargs:
kwargs['formset'] = self.get_formset()
return super(ProcessFormMixin, self).get_context_data(**kwargs)
def post(self, request, *args, **kwargs):
form = self.get_form()
formset = self.get_formset()
if formset:
if form.is_valid() and formset.is_valid():
return self.form_valid(form, formset)
else:
if form.is_valid():
return self.form_valid(form, None)
| [
"guangchengwang9174@yandex.com"
] | guangchengwang9174@yandex.com |
d9407c9366f1d45a1762fd66718e8925a40ced24 | fd1a1e72350a189e68a99287483a5aa725c2f37c | /assignment5/wiki_race_challenge.py | 62549a8ba0537ee2c26c256e54b414b1289dd4e3 | [] | no_license | j-hermansen/in4110 | 106909ccdaa5a4b5b395799151cc7037498a83b5 | f175c342e7df0a393236c8e9dadca5ba538a373a | refs/heads/master | 2023-04-14T01:49:43.987222 | 2021-04-22T06:08:14 | 2021-04-22T06:08:14 | 360,410,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | import os
import time
from collections import deque
from requesting_urls import get_html
from filter_urls import find_urls
from filter_urls import find_articles
def shortest_path(start, end):
""" Function to find shortest path between two url's using BFS (Breadth First Search).
:param start (str): The url to start from
:param end (str): The target url
:return: Path of urls.
"""
path = {}
path[start] = [start]
Q = deque([start]) # Double ended queue of pages to visit.
while len(Q) != 0:
page = Q.popleft() # Check next page to visit
print(page)
html_content = get_html(page) # Get html content
links = find_urls(html_content[1], base_url=html_content[0]) # First get all the links in page, html_content[0] is the base url
articles = find_articles(links, language='en') # Then get all articles
# print(articles)
for article in articles: # Go through every article link on page
if article == end: # Check if article is destination
return path[page] + [article] # Done!
if (article not in path) and (article != page): # Checks if article not already are in path, or in current page
path[article] = path[page] + [article]
Q.append(article)
return None # Return none if all links (articles) are checked
def result(start, end, path):
""" Function that returns a list as the result.
:param start (str): The url to start from
:param end (str): The target url
:param path (str): List of urls in path
:return: Result containing start url, end url, and the result, which can be a path or None.
"""
if path:
result = path
else:
result = None
result = [start, end, result]
return result
if __name__ == '__main__':
start_time = time.time()
start = 'https://en.wikipedia.org/wiki/Nobel_Prize'
# end = 'https://en.wikipedia.org/wiki/Array_data_structure'
end = 'https://en.wikipedia.org/wiki/Natural_science'
path = shortest_path(start, end)
result = result(start, end, path)
end_time = time.time()
totaltime = end_time - start_time
# Write to file
if not os.path.exists('wiki_race_challenge'):
os.makedirs('wiki_race_challenge')
file = open("wiki_race_challenge/shortest_path.txt", 'w', encoding='utf-8')
file.write('Wiki Race Challenge finished in:\n\t{} seconds\n'.format(totaltime))
file.write('Start Url:\n\t{}\n'.format(result[0]))
file.write('Target Url:\n\t{}\n'.format(result[1]))
file.write('Number of steps in Shortest path:\n\t{}\n'.format(len(result[2])))
file.write('Links (articles) visited:\n')
if result[2] is not None:
for article in result[2]:
file.write('\t{}\n'.format(article))
else:
file.write("Did not find a path.")
file.close() | [
"jhermansen@live.no"
] | jhermansen@live.no |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.