index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
993,100 | a3ffd918557e8c9f56a41195d2feb35a7e5ae975 | frase = str(input('Digite uma frase: ')).strip()
fraseu = frase.upper()
print('A letra "A" aparececeu {} vezes'.format(fraseu.count('A')))
print('A primeira letra A apareceu na posição de índice {}'.format(fraseu.find('A')))
print('A posição da última letra A é {}'.format(fraseu.rfind('A')))
|
993,101 | d93969787e72c446f6037b7c0e65681989506531 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 28 15:41:31 2018
速度预测
利用前面的TIMESTEPS个数据预测接下来的PREDICT_STEPS个数据(仅测试)
@author: lankuohsing
"""
# In[]
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import pandas as pd
import matplotlib as mpl
from sklearn.preprocessing import MinMaxScaler
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
mpl.use('Agg')
from matplotlib import pyplot as plt
# In[]
import shutil
import os
#模型存储路径
MODEL_PATH="Models/model_velocity"
"""
if not os.path.exists(MODEL_PATH): ###判断文件是否存在,返回布尔值
os.makedirs(MODEL_PATH)
shutil.rmtree(MODEL_PATH)
"""
# In[]
#读取数据
data_DataFrame=pd.read_excel("EUDC_velocity_2.xlsx",sheetname=0,header=None)
data=data_DataFrame.as_matrix()
#data=data[::-1]
# In[]
data_length=data.shape[0]
train_length=int(data_length*0.7)
test_length=data_length-train_length
# In[]
#数据归一化
#normalize_data=(data-np.mean(data))/np.std(data)
feature_range=(0,1)
scaler = MinMaxScaler(copy=True,feature_range=feature_range)#copy=True保留原始数据矩阵
normalize_data=scaler.fit_transform(data.reshape((data.shape[0],1))).flatten()
# In[]
"""
Hyperparameters
"""
learn = tf.contrib.learn
HIDDEN_SIZE = 1 # Lstm中隐藏节点的个数
NUM_LAYERS = 1 # LSTM的层数
TIMESTEPS = 5 # 循环神经网络的截断长度,也即input sequence的长度
TRAINING_STEPS = 5000 # 训练轮数
BATCH_SIZE = 100 # batch大小
PREDICT_STEPS=5 #每一轮的预测点个数,也即output sequence长度
# In[]
# 根据输入序列,切割出输入数据和标签。利用前面的TIMESTEPS项预测后面的PREDICT_STEPS项
def generate_data(seq):
X = []
Y = []
# 序列的第i项和后面的TIMESTEPS-1项合在一起作为输入;
# 第i+TIMESTEPS项和后面的PREDICT_STEPS-1项作为输出
# 即用sin函数前面的TIMESTPES个点的信息,预测后面的PREDICT_STEPS个点的值
for i in range(len(seq) - TIMESTEPS -(PREDICT_STEPS-1)):
X.append([seq[i:i + TIMESTEPS]])
Y.append([seq[i + TIMESTEPS:i + TIMESTEPS+PREDICT_STEPS]])
return np.array(X, dtype=np.float32), np.array(Y, dtype=np.float32)
def LstmCell():
lstm_cell = rnn.BasicLSTMCell(num_units=HIDDEN_SIZE,forget_bias=1.0,state_is_tuple=True)
return lstm_cell
# 定义lstm模型
def lstm_model(X, y):
cell = rnn.MultiRNNCell([LstmCell() for _ in range(NUM_LAYERS)])
print("X.shape:",X.shape)
outputs, final_state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
print("outputs.shape:",outputs.shape)
#print("final_state.shape:",final_state[0].dtype)
output = tf.reshape(outputs[:,TIMESTEPS-PREDICT_STEPS:TIMESTEPS,:], [-1, HIDDEN_SIZE])
# 通过无激活函数的全连接层计算线性回归,并将数据压缩成一维数组结构
#注意,这里不用在最后加一层softmax层,因为不是分类问题
predictions = tf.contrib.layers.fully_connected(output, 1, None)
# 将predictions和labels调整统一的shape
labels = tf.reshape(y, [-1])
predictions = tf.reshape(predictions, [-1])
print("predictions.shape:",predictions.shape)
print("labels.shape:",labels.shape)
loss = tf.losses.mean_squared_error(predictions, labels)
train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(),
optimizer="Adagrad",
learning_rate=0.1)
return predictions, loss, train_op
# In[]
# 进行训练
# 封装之前定义的lstm
regressor = SKCompat(learn.Estimator(model_fn=lstm_model, model_dir=MODEL_PATH))
#regressor = learn.Estimator(model_fn=lstm_model, model_dir=MODEL_PATH)
# 生成数据
train_X, train_y = generate_data(normalize_data[0:train_length])
test_X, test_y = generate_data(normalize_data[train_length:data_length])
train_X=np.transpose(train_X,[0,2,1])
train_y=np.transpose(train_y,[0,2,1])
test_X=np.transpose(test_X,[0,2,1])
test_y=np.transpose(test_y,[0,2,1])
# 拟合数据
# In[]
#regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
# 计算预测值
# In[]
#predicted = [[pred] for pred in regressor.predict(test_X)]
regressor.score(test_X,test_y)
predicted_list = list(regressor.predict(test_X))
# In[]
def final_data_for_plot(predicted_list,test_y):
test_y_list=test_y.reshape(test_y.shape[0]*test_y.shape[1],1).tolist()
final_predicted_list=[]
final_test_y_list=[]
for i in range(0,len(predicted_list)-PREDICT_STEPS+1):
if i%(PREDICT_STEPS*PREDICT_STEPS)==0:
final_predicted_list.extend(predicted_list[i:i+PREDICT_STEPS])
final_test_y_list.extend(test_y_list[i:i+PREDICT_STEPS])
final_predicted=np.array(final_predicted_list).reshape(len(final_predicted_list),1)
final_test_y=np.array(final_test_y_list).reshape(len(final_test_y_list),1)
return final_predicted, final_test_y
# In[]
final_predicted, final_test_y=final_data_for_plot(predicted_list,test_y)
# In[]
final_predicted=(final_predicted-feature_range[0])/(feature_range[1]-feature_range[0])\
*(scaler.data_max_[0]-scaler.data_min_[0])+scaler.data_min_[0]
final_test_y=(final_test_y-feature_range[0])/(feature_range[1]-feature_range[0])\
*(scaler.data_max_[0]-scaler.data_min_[0])+scaler.data_min_[0]
# In[]
# 计算MSE
rmse = np.sqrt(((final_predicted - final_test_y) ** 2).mean(axis=0))
print("Mean Square Error is:%f" % rmse[0])
# In[]
figure1=plt.figure(1)
figure1.set_figheight(5)
figure1.set_figwidth(8)
plot_test, = plt.plot(final_test_y, label='real_sin')
plot_predicted, = plt.plot(final_predicted, label='predicted')
plt.legend([plot_predicted, plot_test],['predicted', 'real_sin'])
x_start=1000
x_end=1060
y_start=-1
y_end=-0.2
#plt.axis([x_start,x_end,y_start,y_end])
plt.savefig('figures/test_'+'TIMESTEPS='+str(TIMESTEPS)+'PREDICT_STEPS='+str(PREDICT_STEPS)+'.png')
plt.show()
# In[]
predicted_list = list(regressor.predict(train_X))
final_predicted, final_test_y=final_data_for_plot(predicted_list,train_y)
# In[]
final_predicted=(final_predicted-feature_range[0])/(feature_range[1]-feature_range[0])\
*(scaler.data_max_[0]-scaler.data_min_[0])+scaler.data_min_[0]
final_test_y=(final_test_y-feature_range[0])/(feature_range[1]-feature_range[0])\
*(scaler.data_max_[0]-scaler.data_min_[0])+scaler.data_min_[0]
# In[]
# 计算MSE
rmse = np.sqrt(((final_predicted - final_test_y) ** 2).mean(axis=0))
print("Mean Square Error is:%f" % rmse[0])
# In[]
figure1=plt.figure(1)
figure1.set_figheight(5)
figure1.set_figwidth(8)
plot_test, = plt.plot(final_test_y, label='real_sin')
plot_predicted, = plt.plot(final_predicted, label='predicted')
plt.legend([plot_predicted, plot_test],['predicted', 'real_sin'])
x_start=1000
x_end=1060
y_start=-1
y_end=-0.2
#plt.axis([x_start,x_end,y_start,y_end])
plt.savefig('figures/train_'+'TIMESTEPS='+str(TIMESTEPS)+'PREDICT_STEPS='+str(PREDICT_STEPS)+'.png')
plt.show()
|
993,102 | 7502ad98f1f2ce1279d4724f944b27df9ec6caa1 |
### filter
## filter()函数用于过滤序列
# filter()也接收一个函数和一个序列。filter()把传入的函数依次作用于每个元素,
# 然后根据返回值是True还是False决定保留还是丢弃该元素。
def is_odd(n):
return n % 2 == 1
list(filter(is_odd, [1, 2, 4, 5, 6, 9, 10, 15]))
# 结果: [1, 5, 9, 15]
##删空字符串
def not_empty(s):
return s and s.strip() #用与非判断是否为空,strip应该用于去空
list(filter(not_empty, ['A', '', 'B', None, 'C', ' ']))
# 结果: ['A', 'B', 'C']
## filter()函数返回的是一个Iterator,也就是一个惰性序列,
## 所以要强迫filter()完成计算结果,需要用list()函数获得所有结果并返回list
##用filter求素数
def _odd_iter(): #用奇数列排除偶数,缩小范围
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n): #代表 取出不能被之前的数整除的数
return lambda x: x % n > 0 #x之后会由it列的数代替
def primes():
yield 2 # 取第一个素数2
it = _odd_iter() # 初始奇数序列
while True: #取出it中的n,不断筛选it后面的数
n = next(it) # 返回序列的第一个数
yield n
it = filter(_not_divisible(n), it) # 构造新序列
# 打印1000以内的素数:
for n in primes():
if n < 1000:
print(n)
else:
break
##练习
# 回数是指从左向右读和从右向左读都是一样的数,例如12321,909。请利用filter()筛选出回数:
def is_palindrome(n):
s = str(n)
return s == s[::-1] #[::-1]指的是从尾到头一次取值,相当于翻转了字符串
# 测试:
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('测试成功!')
else:
print('测试失败!')
# filter()的作用是从一个序列中筛出符合条件的元素。
# 由于filter()使用了惰性计算,所以只有在取filter()结果的时候,
# 才会真正筛选并每次返回下一个筛出的元素。
|
993,103 | c27dc2f129e924bad7dd6c7380f4fae238386f67 | """
Api tests for login and registration user
"""
import json
import allure
import requests
import pytest
from tests_api.config import URL_AUTH, AUTH_PAYLOADS, HEADER
@pytest.mark.usefixtures('delete_user_and_close_conn')
class TestAuth():
@allure.feature("Login admin api")
@allure.story('Admin have an ability to login in EventExpress site')
@allure.severity(allure.severity_level.CRITICAL)
def test_login_admin(self):
"""
Test for login as admin
"""
response_decoded_json = requests.post(URL_AUTH['url_login'],
data=json.dumps(AUTH_PAYLOADS['payload_admin']),
headers=HEADER['header'])
resp = response_decoded_json.json()
assert "Admin" == resp["role"], "You don't login with correct role"
assert 200 == response_decoded_json.status_code, "You have BAD REQUEST"
@allure.feature("Login user api")
@allure.story('User have an ability to login in EventExpress site')
@allure.severity(allure.severity_level.CRITICAL)
def test_login_user(self):
"""
Test for login as autorize user
"""
response_decoded_json = requests.post(URL_AUTH['url_login'],
data=json.dumps(AUTH_PAYLOADS['payload_user']),
headers=HEADER['header'])
resp = response_decoded_json.json()
assert "User" == resp["role"], "You don't login with correct role"
assert 200 == response_decoded_json.status_code, "You have BAD REQUEST"
@allure.feature("Login as unauthorized api")
@allure.story('Unauthorized user does not have an ability to login in EventExpress site')
@allure.severity(allure.severity_level.CRITICAL)
def test_unauthorized_user(self):
"""
Test for login as notautorize user
"""
response_decoded_json = requests.post(URL_AUTH['url_login'],
data=json.dumps(AUTH_PAYLOADS['payload_unauth']),
headers=HEADER['header'])
mes = response_decoded_json.json()
assert 400 == response_decoded_json.status_code, "You have BAD REQUEST"
assert "User not found" == mes, "There is unexpected ability to login as unknown user"
@allure.feature("Register new user api")
@allure.story('User that is previously registered does not have\
an ability to register in EventExpress site')
@allure.severity(allure.severity_level.CRITICAL)
def test_register_already_exist(self):
"""
Test for registration user that already exists
"""
response_decoded_json = requests.post(URL_AUTH['url_register'],
data=json.dumps(AUTH_PAYLOADS['payload_user']),
headers=HEADER['header'])
mes = response_decoded_json.json()
assert "Email already exists in database" == mes, "There is no verification of existing email on register"
assert 400 == response_decoded_json.status_code, "You have BAD REQUEST"
@allure.feature("Register new user api")
@allure.story('Every new user have an ability to register in EventExpress site')
@allure.severity(allure.severity_level.CRITICAL)
def test_register_new_user(self):
"""
Test for registration new user
"""
response_decoded_json = requests.post(URL_AUTH['url_register'],
data=json.dumps(AUTH_PAYLOADS['payload_unauth']),
headers=HEADER['header'])
assert 200 == response_decoded_json.status_code
@pytest.mark.skip("there is no correct way to verify by api or db that password changed")
def test_change_password(self):
response_decoded_json = requests.post(URL_AUTH['url_change_password'],
data=json.dumps(AUTH_PAYLOADS['payload_change_password']),
headers=self['header'])
assert 200 == response_decoded_json.status_code
|
993,104 | 0a745111498707b1f87248c86d2d7d8fc664d89b | #contains all sorting algorithms
'''
FUNCTION bubble
@param nums - list of numbers to sort
@param sz - size of list
@param graph - module for graphing
@param plt - matplotlib plt
@return number of swaps
'''
def bubble(nums, sz, graph, plt, GRAPHICS):
swaps = 0
for i in range(len(nums)-1, 0, -1):
for j in range(i):
if(nums[j] > nums[j+1]):
#swap element
tmp = nums[j]
nums[j] = nums[j+1]
nums[j+1] = tmp
swaps += 1
#update graph
if GRAPHICS:
graph.updateGraph(plt, nums, sz)
plt.pause(0.001)
return swaps
#TODO: Add more sorting algorithms
'''
FUNCTION partion
Quick sort helper function
'''
def partition(nums, begin, end):
pvt = begin
swaps = 0
for i in range(begin+1 , end+1):
if(nums[i] <= nums[begin]):
pvt += 1
nums[i], nums[pvt] = nums[pvt], nums[i]
swaps += 1
nums[pvt], nums[begin] = nums[begin], nums[pvt]
swaps += 1
return [pvt, swaps]
'''
FUNCTION quick
quick sort function
@return number of swaps
'''
def quick(nums, sz, graph, plt, GRAPHICS):
begin = 0
end = (sz-1)
swaps = 0
def _quicksort(nums, begin, end, swaps, GRAPHICS):
if begin >= end:
return swaps
val = partition(nums, begin, end)
pvt = val[0]
swaps = val[1]
if GRAPHICS:
graph.updateGraph(plt, nums, sz)
plt.pause(0.001)
swaps += _quicksort(nums, begin, pvt-1, swaps, GRAPHICS)
swaps += _quicksort(nums, pvt+1, end, swaps, GRAPHICS)
return swaps
swaps = _quicksort(nums, begin, end, swaps, GRAPHICS)
return swaps
def insertion(nums, sz, graph, plt, GRAPHICS):
swaps = 0
for i in range(1, sz):
val = nums[i]
pos = i
while pos > 0 and nums[pos-1] > val:
nums[pos] = nums[pos-1]
pos -= 1
swaps += 1
if GRAPHICS:
graph.updateGraph(plt, nums, sz)
plt.pause(0.001)
nums[pos] = val
return swaps
def selection(nums, sz, graph, plt, GRAPHICS):
swaps = 0
for i in range(sz):
minElementIndex = i
for j in range(i+1, sz):
if(nums[j] < nums[minElementIndex]):
minElementIndex = j
if(minElementIndex != i):
tmp = nums[i]
nums[i] = nums[minElementIndex]
nums[minElementIndex] = tmp
swaps += 1
if GRAPHICS:
graph.updateGraph(plt, nums, sz)
plt.pause(0.001)
return swaps
def shell(nums, sz, graph, plt, GRAPHICS):
swaps = 0
# generate gaps of N/2^k
gaps = [int(sz/ pow(2,k)) for k in range(sz)]
for gap in gaps:
for i in range(gap, sz):
temp = nums[i]
j = i
while j >= gap and nums[j-gap] > temp:
nums[j] = nums[j-gap]
swaps += 1
if GRAPHICS:
graph.updateGraph(plt, nums, sz)
plt.pause(0.001)
j -= gap
nums[j] = temp
swaps += 1
return swaps
def default_sort(nums, sz, graph, plt, GRAPHICS):
for i, e in enumerate(sorted(nums)):
nums[i] = e
if GRAPHICS:
graph.updateGraph(plt, nums, sz)
plt.pause(0.001)
'''
FUNCTION merge
recursive merge sort function
@return number of swaps
'''
def merge(nums, sz, graph, plt, GRAPHICS):
swaps = 0
if sz > 1:
mid = sz // 2
left = nums[:mid]
right = nums[mid:]
swaps += merge(left, len(left), graph, plt, GRAPHICS)
swaps += merge(right, len(right), graph, plt, GRAPHICS)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
nums[k] = left[i]
i += 1
else:
nums[k] = right[j]
swaps += 1
j += 1
k += 1
while i < len(left):
nums[k] = left[i]
i += 1
k += 1
while j < len(right):
nums[k] = right[j]
j += 1
k += 1
if GRAPHICS:
graph.updateGraph(plt, nums, sz)
plt.pause(0.001)
return swaps
'''
FUNCTION inMerge
in-place merge sort function
@return number of swaps
'''
def inMerge(nums, sz, graph, plt, GRAPHICS):
unit = 1
swaps = 0
while unit <= sz:
h = 0
for h in range(0, sz, unit * 2):
l, r = h, min(sz, h + 2 * unit)
mid = h + unit
p, q = l, mid
while p < mid and q < r:
if nums[p] < nums[q]:
p += 1
else:
tmp = nums[q]
nums[p + 1: q + 1] = nums[p:q]
nums[p] = tmp
p, mid, q = p + 1, mid + 1, q + 1
swaps += 1
unit *= 2
if GRAPHICS:
graph.updateGraph(plt, nums, sz)
plt.pause(1)
return swaps
|
993,105 | 0bedb127cf5ca5ab2a3f894e70540aeee99a9831 | from datetime import datetime
from django.db import models
from markdown import markdown
from smartypants import smartyPants
from taggit.managers import TaggableManager
class ArticleManager(models.Manager):
def published(self):
return self.filter(status=Article.PUBLISHED_STATUS)
class Article(models.Model):
"""
An Article is a writing entry that is translated into markdown.
"""
DRAFT_STATUS = 1
PUBLISHED_STATUS = 2
STATUS_CHOICES = (
(DRAFT_STATUS, 'Draft'),
(PUBLISHED_STATUS, 'Published'),
)
title = models.CharField(max_length=128)
slug = models.CharField(max_length=128)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, default=DRAFT_STATUS)
timestamp_published = models.DateTimeField(null=True, blank=True)
text_raw = models.TextField(null=True, blank=True)
text_html = models.TextField(null=True, blank=True)
tags = TaggableManager(blank=True)
objects = ArticleManager()
class Meta:
ordering = ['-timestamp_published', ]
get_latest_by = 'timestamp_published'
def save(self, *args, **kwargs):
if self.status == Article.PUBLISHED_STATUS and not self.timestamp_published:
self.timestamp_published = datetime.now()
self.text_html = markdown(smartyPants(self.text_raw), output_format='HTML5')
super(Article, self).save(*args, **kwargs)
def __unicode__(self):
return u'<Article: %s>' % self.title[:50]
|
993,106 | 7b53733e5b412e28776ec9028f6ba80673baf575 | from .vpp_papi import FuncWrapper, VPP, VppApiDynamicMethodHolder # noqa: F401
from .vpp_papi import VppEnum, VppEnumType # noqa: F401
from .vpp_papi import VPPIOError, VPPRuntimeError, VPPValueError # noqa: F401
from .vpp_papi import VPPApiClient # noqa: F401
from .vpp_papi import VPPApiJSONFiles # noqa: F401
from . macaddress import MACAddress, mac_pton, mac_ntop # noqa: F401
# sorted lexicographically
from .vpp_serializer import BaseTypes # noqa: F401
from .vpp_serializer import VPPEnumType, VPPType, VPPTypeAlias # noqa: F401
from .vpp_serializer import VPPMessage, VPPUnionType # noqa: F401
|
993,107 | 423efd696401d12ba3f19b4579a2be1c4000c7e3 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
chrome_options = webdriver.ChromeOptions()
prefs = {
# "profile.managed_default_content_settings.images": 2
}
chrome_options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome('./chromedriver', chrome_options = chrome_options)
driver.get("https://www.google.com/")
#Cách 1 dùng js click
# js = "document.querySelector('.read-more').click()"
# driver.execute_script(js)
#Cách 2 dùng driver tim phan tử
# driver.find_element_by_css_selector('.read-more').click()
#Điền chữ
#Cách 1 js
js= "document.querySelector('input[name=\"q\"]').value='thinh-sama'"
driver.execute_script(js)
js2= "document.querySelectorAll('center input')[2].click()"
driver.execute_script(js2)
js3 = "document.querySelectorAll('div')[32].click()"
driver.execute_script(js3)
#Cách 2 driver
input_search = driver.find_element_by_css_selector('input[name="q"]')
input_search.send_keys('thinh-sama123')
js4 = "document.querySelectorAll('button')[0].click()"
driver.execute_script(js4) |
993,108 | 5d37674fbe67015aa694d9661c05ce944a369898 | import csv
import json
def to_text(fname1, fname2):
with open(fname1, newline='') as csvfile1:
with open(fname2, 'a', newline='') as textfile:
reader = csv.reader(csvfile1, delimiter=',')
next(reader, None)
for id, name, latitude, longitude in reader:
try:
s = '{{from:{{name: \'Columbus\', coordinates: [-83.0007065, 39.9622601]}}, to: {{ name: \'{}\', coordinates: [{}, {}]}}'.format(name, longitude, latitude)
textfile.write(r'{},'.format(s))
except AttributeError:
continue
def to_geoJSON(infile):
'''convert csv to geoJSON'''
features = []
with open(infile, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for city, state, latitude, longitude in reader:
latitude, longitude = map(float, (latitude, longitude))
features.append(
Feature(
geometry = Point((longitude, latitude)),
properties = {
'city': city,
'state': state
}
)
)
collection = FeatureCollection(features)
with open("GeoObs.json", "w") as f:
f.write('%s' % collection)
def csvToJSON(infile, outfile):
with open(infile, newline='') as csvfile:
with open(outfile, 'a') as jsonfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for lname, fname, city, county, state, country, lat, lon, agent in reader:
try:
s1 = "\"from\": [-83.0007065, 39.9622601]"
s2 = "\"to\":[{}, {}], \"name\": \"{}, {}\"".format(lon, lat, lname, fname)
jsonfile.write(r'{{{},{}}},'.format(s1, s2))
except AttributeError:
continue
def csvToJSONChart(infile, outfile):
j = []
i = 0
with open(infile, newline='') as csvfile:
with open(outfile, 'a') as jsonfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for cat, freq in reader:
if(freq == ''):
continue
else:
try:
s = "{{\"x\":{}, \"y\": \"{}\"}}".format(freq, cat)
jsonfile.write(r'{},'.format(s))
i += 1
except AttributeError:
continue
def csvToIssue(textFile, infile, outfile):
with open(textFile) as tfile:
with open(infile, newline='') as csvfile:
with open(outfile, 'a') as jsonfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
date = []
i = 0
for line in tfile:
date.append(line)
for city, state, lat, lon, country, fname, lname, agentType in reader:
try:
s = '{{"lastName":\"{}\", "firstName":\"{}\", "agentType":\"{}\", "city":\"{}\", "state":\"{}\", "country": \"{}\", "lat":{}, "lon":{}, "pubDate":[{}]}}'.format(lname,
fname, agentType, city, state, country, lat, lon, date[i] )
jsonfile.write(r'{},'.format(s))
i += 1
except AttributeError:
continue
def subToJSON(infile, outfile):
with open(infile, newline='') as csvfile:
with open(outfile, 'a') as jsonfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for fname, lname, cost, pubDate in reader:
try:
s = '{{"lastName":\"{}\", "firstName":\"{}\", "amount":\"{}\", "pubDate":\"{}\"}}'.format(lname, fname, cost, pubDate)
jsonfile.write(r'{},'.format(s))
except AttributeError:
continue
def pubToJSON(infile, outfile):
with open(infile, newline='') as csvfile:
with open(outfile, 'a') as jsonfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for pub, loc, ethnic, lat, lon in reader:
try:
s = '{{"newspaper":\"{}\", "location":\"{}\", "ethnicPeriodical":\"{}\", "lat":{}, "lon":{}}}'.format(pub, loc, ethnic, lat, lon)
jsonfile.write(r'{},'.format(s))
except AttributeError:
continue
def subToChart(infile, outfile):
with open(infile, newline='') as csvfile:
with open(outfile, 'a') as jsonfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for cat, freq in reader:
try:
#name = '{}, {}'.format(lname, fname)
s = "{{\"x\":{}, \"y\": \"{}\"}}".format(freq, cat)
jsonfile.write(r'{},'.format(s))
except AttributeError:
continue
if __name__=="__main__":
fname = 'qp_aa.csv'
textname = 'qp_aa.json'
pubToJSON(fname, textname)
|
993,109 | b7b94f9e5a616808366206da16d46b53f9ed2013 | #!/usr/bin/env python
#
#
# This is the main iot console services CGI
# it allows iot devices to register via RT_REGISTER
# it fetches iot device status via RT_STATUS
# it sends set commands to iot devices via RT_CONTROL
#
# the FCGI is complaint with flup version for python 2.x
# flup version: flup 1.0.3.dev-20110405
import sys
from cgi import escape
from flup.server.fcgi import WSGIServer
import json
import requests
import urlparse
import logging
import dbm # simplest database option, you can change this to sql or others
from aeki_config import aeki_config as ac # configuration only
AEKI_HOST = ac["AEKI_HOST"] # host name where this cgi is being executed
IOT_PROTOCOL = "http://"
IOT_STATUS_SERVICE = "iotstatus"
IOT_CONTROL_SERVICE = "iotset"
RT_STATUS = "status"
RT_CONTROL = "control"
RT_UPDATE = "update"
RT_REGISTER = "register"
ERR_STR_JS = "{'error':1, 'errormsg':'%s'}"
HDR_CT_JSON = ('Content-Type', 'application/json')
DB_FILENAME = "data.registrations" #chmod these files ReadWrite for cgi
LOG_FILENAME = "log.iot" #chmod these files ReadWrite for cgi
LOG_LEVEL = logging.DEBUG #logging.WARNING
REQ_TIMEOUT = 3 # timeout if not connected to iot device in X seconds
g_services = {}
def handleNotFound(environ, start_response):
logging.warning("** Handling not found")
start_response('404 Notfound', [HDR_CT_JSON])
r = ERR_STR_JS % "Route Not Found"
yield r
def handleUpdate(environ, start_response):
d = urlparse.parse_qs(environ["QUERY_STRING"])
if "serviceName" in d.keys():
serviceName = d["serviceName"][0]
ip = d["ip"][0]
logging.debug("parsed ip and serv >"+serviceName+"<>"+ip+"<")
else:
start_response('500 Error', [HDR_CT_JSON])
r = ERR_STR_JS % "Format err: serviceName not sent"
yield r
return
updated = False
if serviceName in g_services.keys():
if ip != g_services[serviceName]:
updateServiceInfo(serviceName, ip)
updated = True
else:
updateServiceInfo(serviceName, ip)
logging.info("Updated existing service with>"+serviceName+"<>"+ip+"<")
updated = True
r = {"updated":updated,"serviceName":serviceName, "ip":g_services[serviceName]}
start_response('200 OK', [HDR_CT_JSON])
yield json.dumps(r)
def handleStatus(environ, start_response):
serviceName = parseServiceName(environ)
ip = g_services[serviceName]
logging.debug(IOT_PROTOCOL + ip +"/"+ IOT_STATUS_SERVICE)
try:
requrl = IOT_PROTOCOL + ip +"/"+ IOT_STATUS_SERVICE
r = requests.get(requrl, timeout=REQ_TIMEOUT)
start_response('200 OK', [HDR_CT_JSON])
yield json.dumps(r.json())
except:
start_response('500 connection error', [HDR_CT_JSON])
yield ERR_STR_JS % 'Problem connecting to iot device'
def handleControl(environ, start_response):
d = urlparse.parse_qs(environ["QUERY_STRING"])
serviceName = d["serviceName"][0]
no = d["no"][0]
ip = g_services[serviceName]
try:
r = requests.get(IOT_PROTOCOL + ip +"/"+ IOT_CONTROL_SERVICE +\
"?no="+no, timeout=REQ_TIMEOUT)
start_response('200 OK', [('Content-Type', 'application/json')])
yield json.dumps(r.json())
except:
start_response('500 connection error', [('Content-Type', 'application/json')])
yield "{'error':1, 'errormsg':'Problem connecting to iot device'}"
def appRouter(environ, start_response):
route = escape(environ.get("PATH_INFO","")).strip("/")
logging.debug("Serving >"+str(route)+"<")
if route == RT_STATUS :
return handleStatus(environ, start_response)
elif route == RT_UPDATE:
return handleUpdate(environ, start_response)
elif route == RT_CONTROL:
return handleControl(environ, start_response)
else:
return handleNotFound(environ, start_response)
"""
the following function is for test only
"""
def registerTest():
db = dbm.open(DB_FILENAME, 'c')
db["LR01"]= AEKI_HOST+"/aeki/cgi/test_iot_resp.fcgi"
db.close();
loadServiceInfo()
"""
remove this this is test only
"""
def openDatabase():
try:
db = dbm.open(DB_FILENAME, 'c')
except:
logging.error("Cannot open database file "+DB_FILENAME+"- will abort")
sys.exit("Exiting script iot.fcgi - cannot open database file")
return db
def updateServiceInfo(service, ip):
db = openDatabase()
db[service] = ip
db.close()
loadServiceInfo()
def loadServiceInfo():
db = openDatabase()
for k in db.keys():
g_services[k] = db[k]
db.close()
def parseServiceName(environ):
d = urlparse.parse_qs(environ["QUERY_STRING"])
return d["serviceName"][0]
logging.basicConfig(filename = LOG_FILENAME, level= LOG_LEVEL)
logging.error("* IoT Console Services: Logging started *")
###
registerTest() # !!!! # remove this if not test
###
loadServiceInfo()
logging.error("Starting iot console services")
WSGIServer(appRouter).run()
|
993,110 | e20d660cff53b7f9097309e7de02963fd0272f3f | print("Ведите сначала класс, потом фамилию через энтер")
class_1 = input()
surname = list(input())
name = ['v', 'a', 'd', 'i', 'm', 16]
print('Номер 1 - ', name[1:-1]) #Задание номер 1
print('Номер 2 - ', name + [class_1]) #Задание номер 2
print('Номер 3 - ', name + [class_1]+surname) #Задание номер 3 |
993,111 | d66a5f7e4d25fd9fd0f597cb032d01912799335e | import numpy as np
import matplotlib.pyplot as plt
from blimpy import Waterfall
from scipy.signal import detrend
import BL21BurstData as BL21
def load(filename, info, tstart, tstop):
'''
Loads file data as a waterfall plot type array and can print data info
Inputs:
filename - name of file being loaded
info - Boolean, True to print data info
tstart - Starting time bin
tstop - Ending time bin
Returns:
cleandata - Detrended (bandpass removed) array of data arrays
'''
fb = Waterfall(filename, t_start = tstart, t_stop = tstop)
if info == True:
fb.info()
freqs, data = fb.grab_data(4000, 8000, 1)
newdat = []
for i in range(0, len(data[0])):
newarr = []
for j in range(0, len(data)):
newarr.append(data[j][i])
newdat.append(newarr)
cleandata = detrend(newdat)
return(cleandata)
def get_freqs(fch1, nchan, foff):
'''
Uses data file info to create an array of frequencies
Inputs:
fch1 - Frequency of channel 1
nchan - Number of channels in data file
foff - Off frequency range
Returns:
Array of frequencies for each frequency channel in data
'''
return(fch1 + np.arange(nchan)*foff)
def dedisperse(data, dm, freqs, tsamp):
'''
Dedisperses data by input value
Inputs:
data - 2D data array
dm - Desired dispersion measure to dedisperse to
freqs - Array of frequencies of data channels
tsamp - Length of time of each time bin
Returns:
dedispersed - Dedispersed 2D data
'''
delay_bins = []
dedispersed = []
for i in range(0, len(freqs)):
delay_time = 4148808.0 * dm * (1/(freqs[0]**2) - (1/(freqs[i]**2)))/1000
delay_bins.append(int(np.round(delay_time/tsamp)))
dedispersed.append(np.zeros(len(data[0]), dtype = np.float32))
for j in range(0, len(data)):
dedispersed[j] = np.concatenate([data[j][-delay_bins[j]:], data[j][:-delay_bins[j]]])
return(dedispersed)
def data_plot(data, name, tag, fax, vmax, ext):
'''
Makes waterfall plot of input data
Inputs:
data - Array of data arrays
name - Save name
tag - Save tag
fax - Array of frequency axis values
vmax - Maximum data plot value (max colorbar value as well)
ext - Maximum extent of time bins on x-axis
Returns:
nothing
'''
TimeConversion = 25.6
FluxConversion = 64.5
newdat = []
for i in range(len(data)):
newdat.append(data[i]/FluxConversion)
plt.imshow(newdat, origin = 'upper', interpolation = 'nearest', aspect = 'auto', vmin = 0, vmax = vmax/FluxConversion, extent = [0, ext/TimeConversion, fax[len(fax)-1], fax[0]])
cbar = plt.colorbar()
cbar.set_label('Flux Density (mJy)')
plt.ylabel('Frequency (MHz)')
plt.xlabel('Time (ms)')
plt.title(name + ' Data of Burst ' + tag)
plt.savefig(name + '_' + tag)
def fscrunch(data, freqs, nchan, factor):
'''
Scrunches data along frequency axis to average data down for more visible plotting and analysis
Inputs:
data - Array of data arrays
freqs - Frequency axis array
nchan - Original number of frequency channels
factor - Number to divide nchan by to average over frequency
Returns:
retval - Averaged data, array of arrays
newfreq - New frequency axis array
'''
newnchan = nchan//factor
newfreq = np.zeros(newnchan)
for k in range(newnchan):
newfreq[k] = np.sum(freqs[k*factor:(k+1)*factor])/float(factor)
retval = np.zeros((len(np.linspace(0, nchan, len(newfreq))), len(data[0])))
for k in range(newnchan):
for l in range(len(data[0])):
tot = 0
for i in range(k*factor, (k+1)*factor):
tot += data[i][l]
retval[k][l] = tot
return(retval, newfreq)
def bscrunch(data, nbins, factor):
'''
Scrunches data along time axis similarly to fscrunch()
Inputs:
data - Array of data arrays
nbins - Number of time bins in each data array
factor - Number to divide nbins by
Returns:
retval - Data averaged along time dimension
'''
newnbins = nbins//factor
retval = np.zeros(shape = (len(data), len(np.arange(start = 0, stop = nbins, step = factor))))
counts = np.zeros_like(retval)
for i in range(factor):
arr = data[:, i:nbins:factor]
count = np.ones_like(arr)
length = np.shape(arr)[1]
retval[:, :length] += arr
counts[:, :length] += count
retval = retval/counts
return(retval)
def extract_bursts(namefile, plot):
'''
Uses input text file of burst file and time locations to pull out bursts, average in frequency and time if necessary,
plot if desired, then return the data array
Inputs:
namefile - Text file containing columns of burst tags, data file, TOA in time bins from start of file, and DM
Also includes peak frequency and width of burst if needed for fitting
plot - Boolean, True to plot the cleaned bursts
Returns:
bursts - Array of all bursts in data set. Each element is an Array with all information of that burst.
'''
read_data = open(namefile, 'r')
bursts = []
while True:
line = read_data.readline()
if not line:
break
splitline = line.split()
if splitline[0][0] == '#':
pass
else:
tag = str(splitline[0])
filename = str(splitline[1])
tsamp = int(splitline[2])
DM = float(splitline[3])
width = float(splitline[4])
nupeak = float(splitline[5])
bursts.append([tag, filename, tsamp, DM, width, nupeak])
freqs = get_freqs(fch1 = 8161.132568359375, nchan = 10924, foff = -0.3662109375) #nchan = 14848? fch1 = 9313.78173828125? from file info
for i in range(0, len(bursts)):
data = load(filename = bursts[i][1], info = False, tstart = bursts[i][2]-200, tstop = bursts[i][2]+200)
ext = 400
dedisdata = dedisperse(data = data, dm = bursts[i][3], freqs = freqs, tsamp = 0.0003495253333333333)
if len(bursts[i][0]) > 3: #Naming convention for low S/N bursts in first and second file
fscrunchdat, fax = fscrunch(data = dedisdata[:10912], freqs = freqs[:10912], nchan = 10912, factor = 682)
scrunchdat = bscrunch(data = fscrunchdat, nbins = ext, factor = 4)
best_vmax = 170*8
elif int(bursts[i][0][0:2]) > 12: #Naming convention for bursts after second file
fscrunchdat, fax = fscrunch(data = dedisdata[:10912], freqs = freqs[:10912], nchan = 10912, factor = 682)
scrunchdat = bscrunch(data = fscrunchdat, nbins = ext, factor = 4)
best_vmax = 170*8
else:
scrunchdat, fax = fscrunch(data = dedisdata[:10880], freqs = freqs[:10880], nchan = 10880, factor = 170) #Original data has nchan = 10924
best_vmax = 170*20
bursts[i].append(fax)
bursts[i].append(scrunchdat)
if plot == True:
data_plot(data = scrunchdat, name = '121102-Filterbank', tag = bursts[i][0], fax = fax, vmax = best_vmax, ext = ext)
plt.clf()
return(bursts)
def get_fluence(bursts, plot_center):
'''
Takes Input array of burst data arrays and uses BL21BurstData.py file to find fluence, width, amplitude, and center of each burst
Inputs:
bursts - Array of arrays; each array element contains all information for that burst
plot_center - Boolean, True to plot data with overlayed gaussian center
Reurns:
None
'''
tfdarr = []
for i in range(len(bursts)):
if len(bursts[i][0]) < 4:
pass
else:
tag = bursts[i][0]
tfdarr.append([tag])
print(tag)
tsamp = bursts[i][2]
nupeakGHz = bursts[i][5]
fax = bursts[i][6]
data = bursts[i][7]
peak, burst, nupeakind, tbin = BL21.find_peak(data)
pllim = [tbin-5, tbin+6, 0, 0]
phlim = [tbin+5, 0, 0, 0]
if tag == "11B2":
fllim = [nupeakind-4, 0, 0, 0]
fhlim = [nupeakind+4, 0, 0, 0]
else:
fllim = [nupeakind-2, 0, 0, 0]
fhlim = [nupeakind+2, 0, 0, 0]
try:
params = BL21.comp_param(data = data, mode = 'gaussian', n = 1, pllim = pllim, phlim = phlim, fllim = fllim, fhlim = fhlim, factor = 20.5, fax = fax, tag = tag)
tfdarr[-1].append([params[3][0]])
tfdarr[-1].append(data)
if plot_center == True:
BL21.data_plot(data = scrunchdat, tag = bursts[i][0], fax = fax, center = params[1], RSN = False, vmax = 170*8, ext = ext)
plt.clf()
except ValueError:
print("No fit found for burst " + str(tag))
tfdarr.pop(-1)
except IndexError:
print("Burst " + str(tag) + " not properly fit")
tfdarr.pop(-1)
'''
if tag == "11A1":
BL21.comp_plot(data = [params[3][0]], name = 'Fluence', fax = fax, units = 'Jy ms', tag = 'FB' + bursts[i][0], labels = ('F'), log = False, RSN = False)
elif tag == "11B2":
BL21.comp_plot(data = [params[3][0]], name = 'Fluence', fax = fax, units = 'Jy ms', tag = 'FB' + bursts[i][0], labels = ('F'), log = False, RSN = False)
'''
return(tfdarr)
def fluence_moment_scatt(moment, RSN, singleA):
'''
This function plot the total fluence vs. statistical moments of original 21 bursts
and an example few from the extended data set
'''
#First get info from original data set (4p files)
single_comp_BL21_tfdmarr = BL21.burst_stats(multi = False, plot = False)[3] #single component burst info
multi_comp_BL21_tfdmarr = BL21.burst_stats(multi = True, plot = False)[3] #multi component burst info
for i in range(len(multi_comp_BL21_tfdmarr)):
if multi_comp_BL21_tfdmarr[i][0] == '11E':
pass
elif multi_comp_BL21_tfdmarr[i][0] == '11K':
pass
elif multi_comp_BL21_tfdmarr[i][0] == '11O':
pass
else:
single_comp_BL21_tfdmarr.append(multi_comp_BL21_tfdmarr[i])
combined_tfdmarr = single_comp_BL21_tfdmarr
#Now find the info for extended data set bursts (filterbank files)
BurstInfo = extract_bursts(namefile = 'full_data.txt', plot = False)
tfdarr = get_fluence(bursts = BurstInfo, plot_center = False)
for i in range(len(tfdarr)):
moms = BL21.moments(tfdarr[i][1])
tfdarr[i].append(moms)
combined_tfdmarr.append(tfdarr[i])
BL21.fluence_moment_scatt(tfdmarr = combined_tfdmarr, moment = moment, RSN = RSN, singleA = singleA)
def main():
files = ["spliced_guppi_57991_49905_DIAG_FRB121102_0011.gpuspec.0001.8.fil", "spliced_guppi_57991_51723_DIAG_FRB121102_0012.gpuspec.0001.8.fil", "spliced_guppi_57991_53535_DIAG_FRB121102_0013.gpuspec.0001.8.fil", "spliced_guppi_57991_55354_DIAG_FRB121102_0014.gpuspec.0001.8.fil", "spliced_guppi_57991_57166_DIAG_FRB121102_0015.gpuspec.0001.8.fil", "spliced_guppi_57991_58976_DIAG_FRB121102_0016.gpuspec.0001.8.fil", "spliced_guppi_57991_60787_DIAG_FRB121102_0017.gpuspec.0001.8.fil", "spliced_guppi_57991_62598_DIAG_FRB121102_0018.gpuspec.0001.8.fil", "spliced_guppi_57991_64409_DIAG_FRB121102_0019.gpuspec.0001.8.fil", "spliced_guppi_57991_66219_DIAG_FRB121102_0020.gpuspec.0001.8.fil"]
for i in range(len(files)):
dat = load(filename = files[i], info = False, tstart = 0, tstop = 1000)
BurstInfo = extract_bursts(namefile = 'full_data.txt', plot = True)
#print(get_fluence(bursts = BurstInfo, plot_center = False))
#fluence_moment_scatt(moment = 'Skew', RSN = False, singleA = False)
main()
|
993,112 | 05cda59fede60d3067a35897c3c3f65a82e6e963 | # """"
# List Comprehensions
# """
# ls = [i for i in range(100) if i % 3 == 0]
# print(ls)
#
# """"
# Dictionary Comprehensions
# """
# dict1 = {
# i: f"Item{i}" for i in range(1, 101)
# if i % 5 == 0
# }
# dict2 = {
# value: key
# for key, value in dict1.items()
# }
# print(dict1, "\n", dict2)
#
# """"
# Set Comprehensions
# """
# dresses = {dress for dress in ["dress1", "dress2", "dress1",
# "dress2", "dress1", "dress2"]}
# print(type(dresses))
# print(dresses)
#
# """"
# Generators Comprehensions
# """
# evens = (i for i in range(100) if i % 2 == 0)
# print(type(evens))
# # print(evens.__next__())
# # print(evens.__next__())
# # print(evens.__next__())
# print(tuple(evens))
# Question On comprehension:-
n = int(input("Enter how many input you want to take :\n"))
while True:
print("Which comprehension you want to select")
print("1.List\n2.Dictionary\n3.Set")
inp = input()
if inp not in ['1', '2', '3']:
print("😞 Please enter a valid option 😞")
continue
else:
inp = int(inp)
if inp == 1:
ls = [i for i in range(n)]
print(ls)
elif inp is 2:
dict1 = {
i: f"Item{i}" for i in range(n)
}
dict1 = {
value:key
for key,value in dict1.items()
}
print(dict1)
elif inp is 3:
set1 = {i for i in range(n)}
print("Press Q to quit OR C to continue")
opt1 = ""
while (opt1 != "q" and opt1 != "c") or (opt1 != "q" and opt1 != "c"):
opt1 = input()
if opt1 == "q" or "Q":
break
elif opt1 == "C" or "c":
continue
|
993,113 | 3a69df190bf0ccedcc2f17ad2d26587083d364d9 | #
# @lc app=leetcode.cn id=70 lang=python3
#
# [70] 爬楼梯
#
# @lc code=start
class Solution:
def climbStairs(self, n: int) -> int:
#方法2:动态规划 + 空间优化
a = b = 1
for i in range(n):
b, a = a + b, b
return a
'''
#方法1:动态规划
#时间复杂度:O(n)
#空间复杂度:O(n)
#1. 重复性(分治) sub(n) = sub(n-1) + sub(n-2)
#2. 定义状态数组 dp[n] 到n阶台阶的方法数
#3. dp方程 dp[n] = dp[n-1] + dp[n-2]
dp = [1, 1] + [0] * (n - 1)
for i in range(2, n + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[-1]
'''
# @lc code=end
|
993,114 | 175345af9b4435a2bad90713e8478cac0d8c4c08 | /home/alex/myenv/zodiac/eggs/venusian-1.0a8-py2.7.egg/venusian/tests/fixtures/importerror/__init__.py |
993,115 | cf7f39ce28793085d196cdeedd2f35e69d985012 | '''
Sample Unit Test examples
'''
import unittest
class ExampleTests(unittest.TestCase):
def test_fizzbuzz_good(self):
output = []
for n in xrange(100):
output.append(str(fizzbuzz(n) + '\n'))
with open("fizzbuzz-output.txt", "r") as expected:
i = 0
for line in expected:
if line == output[i]:
print("Success!")
i += 1
else:
print("Nope. Try Again.")
def fizzbuzz(n):
ret = ''
if not (n%3):
ret += 'fizz'
if not (n%5):
ret += 'buzz'
return ret or str(n)
def create_expectedfile(n, output_file="fizzbuzz-output.txt"):
with open(output_file, "w") as expected:
for n in xrange(n):
expected.write(fizzbuzz(n) + "\n")
if __name__ == '__main__':
#create_expectedfile(100)
unittest.main() |
993,116 | 600755ef3214548a7046a1932d419a2a3dfd24b0 | import matplotlib.pyplot as plt
# if you need to create the data:
#test_data = process_test_data()
# if you already have some saved:
test_data = np.load('test_data.npy')
fig=plt.figure()
for num,data in enumerate(test_data[:12]):
# cat: [1,0]
# dog: [0,1]
img_num = data[1]
img_data = data[0]
y = fig.add_subplot(3,4,num+1)
orig = img_data
data = img_data.reshape(IMG_SIZE,IMG_SIZE,1)
#model_out = model.predict([data])[0]
model_out = model.predict([data])[0]
if np.argmax(model_out) == 1: str_label='Dog'
else: str_label='Cat'
y.imshow(orig,cmap='gray')
plt.title(str_label)
y.axes.get_xaxis().set_visible(False)
y.axes.get_yaxis().set_visible(False)
plt.show() |
993,117 | 41a210487501c82d9321133e4d648b3402f447fb | from django.shortcuts import render
from django.template.context_processors import csrf
from django.http import HttpResponseRedirect
from django.core.exceptions import ObjectDoesNotExist
from .models import Review,Movie,User
from datetime import datetime
import numpy as np
import pandas as pd
import preprocess_kgptalkie as ps
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
# Create your views here.
tfidf=[]
clf =[]
review=""
def get_clean(x):
x = str(x).lower().replace('\\', '').replace('_', ' ').replace(',','')
x = ps.cont_exp(x)
x = ps.remove_emails(x)
x = ps.remove_urls(x)
x = ps.remove_html_tags(x)
x = ps.remove_accented_chars(x)
x = ps.remove_special_chars(x)
x = re.sub("(.)\\1{2,}", "\\1", x)
return x
def readExcel(request):
df = pd.read_excel('~/SDP_Project/MR_SYSTEM/userapp/templates/AmazonSDPDataset_original.ods', engine='odf', usecols= ['reviewText','overall'])
df['reviewText'] = df['reviewText'].apply(lambda x: get_clean(x))
global tfidf
tfidf = TfidfVectorizer(analyzer='word')
X = tfidf.fit_transform(df['reviewText'])
Y = df['overall']
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.25, random_state=0)
global clf
clf = LinearSVC(C=0.1, class_weight='balanced')
clf.fit(X_train,Y_train)
return HttpResponseRedirect('/user/user_home/')
def user_home(request):
if "user" in request.session:
cid = request.session["user"]
star = request.GET.get('star','')
user = User.objects.filter(ID=cid)
if user.exists():
user = User.objects.get(ID=cid)
username = user.name
movies = Movie.objects.all().order_by('-ID')
if star != "":
movies = Movie.objects.filter(rating__range=(float(star)-0.9,float(star))).order_by('-rating')
if movies.exists():
return render(request,'user_home.html',{'movielist':movies,'nomovie':False,'username':username})
else:
return render(request,'user_home.html',{'movielist':movies,'nomovie':True,'username':username})
return HttpResponseRedirect('/login/')
def calculateRating(request):
if "user" in request.session:
id=request.POST.get('movieid','')
reviewText=request.POST.get('reviewText','')
global review
review = reviewText
reviewText = get_clean(reviewText)
global clf
global tfidf
reviewText = tfidf.transform([reviewText])
rating = clf.predict(reviewText)
return HttpResponseRedirect('/user/addReview?rating='+str(rating[0])+'&movieid='+str(id))
return HttpResponseRedirect('/login/')
def addReview(request):
if "user" in request.session:
rating = request.GET.get('rating','')
id = request.GET.get('movieid','')
global review
reviewText = review
Rating = rating
DateTime = datetime.now()
uid = request.session["user"]
user = User.objects.get(ID=uid)
movie = Movie.objects.get(ID=id)
new_review = Review(reviewText=reviewText,rating=Rating,dateTime=DateTime,mid=movie,uid=user)
new_review.save()
avg_rating(new_review.mid_id) #update avg movie rating
movie = Movie.objects.get(ID=id)
movie.releasedDate = (movie.releasedDate).strftime("%d-%m-%Y")
movie.duration = (movie.duration).strftime("%H:%M")
reviews = Review.objects.filter(mid=movie)
return HttpResponseRedirect('/user/showmovie?movieid='+str(id))
return HttpResponseRedirect('/login/')
def showmovie(request):
if "user" in request.session:
cid = request.session["user"]
m =request.GET.get('m','')
if m == "":
m= False
sortby= request.GET.get('star','')
id = request.GET.get('movieid','')
movie = Movie.objects.get(ID=id)
user = User.objects.get(ID= cid)
added = False
myreviews= Review.objects.filter(mid_id= movie, uid_id=user) #don't allow user to add review if already added
if myreviews.exists():
added = True
movie.releasedDate = (movie.releasedDate).strftime("%Y-%m-%d")
movie.duration = (movie.duration).strftime("%H:%M")
if sortby != '':
reviews= Review.objects.filter(mid=id, rating= sortby)
else:
reviews = Review.objects.filter(mid=id)
sortedReviews = sorted(
reviews,
key=lambda x: x.dateTime, reverse=True
)
return render(request,'showmovie.html',{'movie':movie,'reviews':sortedReviews,'currentuserid':cid, 'added':added,'mr':m})
return HttpResponseRedirect('/login/')
def my_reviews(request):
c= {}
c.update(csrf(request))
if 'user' in request.session:
userid = request.session["user"]
getuser = User.objects.get(ID = userid)
reviews = Review.objects.filter(uid_id= getuser).order_by('-dateTime')
if reviews.exists():
return render(request,'myreviews.html',{'reviews':reviews,'c':c})
else:
return render(request,'myreviews.html',{'reviews':reviews, 'msg':'You have not added any reviews yet..!!','c':c})
return HttpResponseRedirect('/login/')
def update_review(request):
if 'user' not in request.session:
return HttpResponseRedirect('/login/')
rid = request.POST.get('id','')
if rid:
review = Review.objects.get(ID= rid)
review.reviewText = request.POST.get('new-rw','')
review.dateTime = datetime.now()
global clf
global tfidf
rw = tfidf.transform([get_clean(review.reviewText)])
review.rating = clf.predict(rw)
review.save()
avg_rating(review.mid_id)
return HttpResponseRedirect('/user/reviews/')
def delete_review(request):
if 'user' not in request.session:
return HttpResponseRedirect('/login/')
rid = request.GET.get('id','')
if rid:
review= Review.objects.get(ID= rid)
movie_id = review.mid_id
review.delete()
avg_rating(movie_id)
return HttpResponseRedirect('/user/reviews/')
def avg_rating(movie_id):
"function to calculate average rating & to update it in database"
movie = Movie.objects.get(ID= movie_id)
reviews = Review.objects.filter(mid_id= movie) #update average rating of movie
rating = 0.0
if reviews.exists():
for r in reviews:
rating += r.rating
rating /= len(reviews)
movie.rating = round(rating,1) #store rating in y.x format- 1 decimal point
movie.save()
return
def profile(request):
id= request.GET.get('update','')
c = {}
c.update(csrf(request))
if 'user' in request.session:
getuser = User.objects.get(ID= request.session["user"])
if id=="":
id=0
return render(request,'profile.html',{'c':c, 'user':getuser, 'id':id})
else:
return HttpResponseRedirect('/login/')
def update_profile(request):
c= {}
c.update(csrf(request))
uid = request.session["user"]
name = request.POST.get('name','')
bio= request.POST.get('bio','')
try:
getuser= User.objects.get(ID= uid)
getuser.name = name
getuser.bio = bio
filepath=request.FILES.get('image',False)
if filepath:
getuser.image = request.FILES["image"]
getuser.save()
user= User.objects.get(ID= uid)
return render(request,'profile.html',{'c':c,'user':user,'id':0})
except ObjectDoesNotExist:
alert= "Profile Not Updated.."
return render(request,'profile.html',{'c':c,'msg':alert})
|
993,118 | 80dde7aa7cc1ae3bed8f01bb9e9517d4e75aa7ae | """Dataloader Wrapper"""
from __future__ import absolute_import
import six
from . import sampler as _sampler
class DataLoaderNgnBase(object):
# for distinguishing from other dataloader
_attr_ngn_dataloader = True
def __init__(self, dataset,
batch_size=None,
shuffle=False,
sampler=None,
last_batch='discard',
batch_sampler=None,
batchify_fn=None,
*args, **kwargs):
super(DataLoaderNgnBase, self).__init__()
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.sampler = sampler
self.last_batch = last_batch
self.batch_sampler = batch_sampler
self.batchify_fn = batchify_fn
if not self.sampler:
self.init_slice_sampler()
if not self.batch_sampler:
self.batch_sampler = self._make_batch_sampler(self.sampler)
def init_slice_sampler(self):
ds_len = len(self.dataset)
self.sampler = _sampler.SliceSampler(int(ds_len), shuffle=self.shuffle)
def _make_batch_sampler(self, smplr):
batch_sampler = _sampler.BatchSampler(
smplr, self.batch_size,
self.last_batch if self.last_batch else 'keep')
return batch_sampler
# for distributed dataloader
def reset_batch_sampler(self, new_sampler, *args, **kwargs):
self.sampler = new_sampler
self.batch_sampler = self._make_batch_sampler(self.sampler)
def __len__(self):
return len(self.batch_sampler)
def __iter__(self):
for batch in self.batch_sampler:
b = self._get_a_batch(batch)
if self.batchify_fn:
dt = self.batchify_fn(b)
else:
dt = b
yield dt
def _get_a_batch(self, batch):
b = []
for t in batch:
a = self.dataset[t]
b.append(a)
return b
|
993,119 | e29baa0110e18dff5bbb36868d08fc3c43d1e90a | """
:class:`.DataBC` geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_SCHEME, DEFAULT_TIMEOUT
from geopy.exc import GeocoderQueryError
from geopy.location import Location
from geopy.util import logger
__all__ = ("DataBC", )
class DataBC(Geocoder):
"""
Geocoder using the Physical Address Geocoder from DataBC. Documentation at:
http://www.data.gov.bc.ca/dbc/geographic/locate/geocoding.page
"""
def __init__(self, scheme=DEFAULT_SCHEME, timeout=DEFAULT_TIMEOUT, proxies=None, user_agent=None):
"""
Create a DataBC-based geocoder.
:param string scheme: Desired scheme.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
"""
super(DataBC, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent
)
self.api = '%s://apps.gov.bc.ca/pub/geocoder/addresses.geojson' % self.scheme
def geocode(
self,
query,
max_results=25,
set_back=0,
location_descriptor='any',
exactly_one=True,
timeout=None,
):
"""
Geocode a location query.
:param string query: The address or query you wish to geocode.
:param int max_results: The maximum number of resutls to request.
:param float set_back: The distance to move the accessPoint away
from the curb (in meters) and towards the interior of the parcel.
location_descriptor must be set to accessPoint for set_back to
take effect.
:param string location_descriptor: The type of point requested. It
can be any, accessPoint, frontDoorPoint, parcelPoint,
rooftopPoint and routingPoint.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {'addressString': query}
if set_back != 0:
params['setBack'] = set_back
if location_descriptor not in ['any',
'accessPoint',
'frontDoorPoint',
'parcelPoint',
'rooftopPoint',
'routingPoint']:
raise GeocoderQueryError(
"You did not provided a location_descriptor "
"the webservice can consume. It should be any, accessPoint, "
"frontDoorPoint, parcelPoint, rooftopPoint or routingPoint."
)
params['locationDescriptor'] = location_descriptor
if exactly_one is True:
max_results = 1
params['maxResults'] = max_results
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
response = self._call_geocoder(url, timeout=timeout)
# Success; convert from GeoJSON
if not len(response['features']):
return None
geocoded = []
for feature in response['features']:
geocoded.append(self._parse_feature(feature))
if exactly_one is True:
return geocoded[0]
return geocoded
@staticmethod
def _parse_feature(feature):
properties = feature['properties']
coordinates = feature['geometry']['coordinates']
return Location(
properties['fullAddress'], (coordinates[1], coordinates[0]),
properties
)
|
993,120 | 02a3b5da8ea9f9633048c2660d403ed731069198 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 20:08:38 2017
@author: Padam Singh
"""
def vowel_check(char) :
"""
This function takes a character (i.e. a string of length 1) and
returns True if it is a vowel, False otherwise.
"""
try :
if len(char) > 1 :
print("Plase enter a string of length 1.")
return "None"
if char in 'aeiou' :
return 'TRUE'
else :
return 'FALSE'
except :
pass
def main():
ch = input("Enter a character : ")
output = vowel_check(ch)
print("Vowel check for '{}' returns '{}'".format(ch,output))
if __name__ == '__main__':
main() |
993,121 | 4f3305158cc18cf5fbf4db21bbc4fe8c1b11e01c |
# coding: utf-8
# In[4]:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
import seaborn
import pandas as pd
import pickle
# get_ipython().magic(u'matplotlib inline')
# In[8]:
numFlights = pickle.load(open('numFlights.pickle', 'r'))
numConflicts = pickle.load(open('numConflicts.pickle', 'r'))
totaldelays = pickle.load(open('totaldelays.pickle', 'r'))
dmaxmin = pickle.load(open('dmaxmin.pickle', 'r'))
tdc_min = pickle.load(open('dmaxmin.pickle', 'r'))
# In[9]:
import seaborn
pdf = matplotlib.backends.backend_pdf.PdfPages('delay_only_cp_results.pdf');
fig = plt.figure(figsize=(5, 6));
partition = 44
maxDelay = 18
ax = fig.add_subplot(2, 1, 1)
ax = fig.add_subplot(2, 1, 1)
td = totaldelays[partition]
for maxDelay in sorted(td.keys()):
ax.plot(maxDelay/np.array(td[maxDelay][0], dtype=float), td[maxDelay][1], 'o-', label='$d_\mathrm{max} = %i$' % maxDelay)
ax.axhline(y=tdc_min[partition], linestyle='--', color='gray')
ax.grid(axis='x')
ax.set_xlabel('$\Delta_\mathrm{d}$')
ax.set_ylabel('Total delay')
ax.grid(True)
ax.legend()
dmm = [dmaxmin[p] for p in sorted(totaldelays.keys())]
Nf = [numFlights[p] for p in sorted(totaldelays.keys())]
Nc = [numConflicts[p] for p in sorted(totaldelays.keys())]
ax = fig.add_subplot(2, 1, 2)
data = {}
for nf, nc, d in zip(Nf, Nc, dmm):
data[(nf, nc)] = d
series = pd.Series(list(data.values()), index=pd.MultiIndex.from_tuples(data.keys()))
df = series.unstack().fillna(0)
annotation = df.applymap(lambda x: '' if x == 0.0 else "%i" % x).values
seaborn.heatmap(df, annot=annotation, fmt = '', ax=ax)
#ax.set_title('$d^0_{max}$')
ax.grid(axis='x')
ax.set_xlabel('$N_\mathrm{c}$')
ax.set_ylabel('$N_\mathrm{f}$');
ax.invert_yaxis()
plt.tight_layout()
pdf.savefig(figure=fig);
pdf.close();
plt.tight_layout()
# In[ ]:
|
993,122 | b23376eb6b60808cc11584088e21215dcaed8e04 | import FrMaya.tools.AboutFrMaya as AboutFrMaya
reload(AboutFrMaya)
AboutFrMaya.show(update_btn = True, remove_btn = True) |
993,123 | 588f5a991b723c133f9c335ef46cfb1ad45e66cc | # RESTful API Example
from flask import Flask, request, redirect,render_template
import base64
import random
import time
app = Flask(__name__)
redirect_uri = "http://localhost:5000/client/passport"
client_id = '123456'
users[client_id] = []
auth_code = {}
oauth_redirect_uri = []
users = {
"zagjab": ["123456"]
}
def gen_token(uid):
token = base64.b64encode(":".join([str(uid), str(random.random()), str(time.time() + 7200)]))
users[uid].append(token)
return token
def gen_auth_code(uri):
code=random.randint(0,10000)
auth_code[code]=uri
return code
def verify_token(token):
_token = base64.b64decode(token)
if not users.get(_token.split(':')[0])[-1] == token:
return -1
if float(_token.split(':')[-1]) >= time.time():
return 1
else:
return 0
@app.route('/', methods=['POST', 'GET'])
def index():
return render_template('Hello')
@app.route('/login', methods=['POST', 'GET'])
def login():
uid, pw = base64.b64decode(request.headers['Authorization'].split(' ')[-1]).split(':')
if users.get(uid)[0] == pw:
return gen_token(uid)
else:
return 'error'
@app.route('/oauth', methods=['POST', 'GET'])
def oauth():
if request.args.get('user'):
if users.get(request.args.get('user'))[0] ==request.args.get('pw') and oauth_redirect_uri:
uri = oauth_redirect_uri[0] + '?code=%s'% gen_auth_code(oauth_redirect_uri[0])
return redirect(uri)
if request.args.get('code'):
if auth_code.get(int(request.args.get('code'))) == request.args.get('redirect_uri'):
return gen_token(request.args.get('client_id'))
if request.args.get('redirect_uri'):
oauth_redirect_uri.append(request.args.get('redirect_uri'))
return 'please login'
@app.route('/client/login', methods=['POST','GET'])
def client_login():
uri = "http://localhost:5000/oauth?response_type=code&client_id=%s&redirect_uri=%s" %(client_id,redirect_uri)
return redirect(uri)
@app.route('/client/passport', methods=['POST','GET'])
def client_passport():
code = request.args.get('code')
uri = 'http://localhost:5000/oauth?response_type=%s&client_id=%s&redirect_uri=%s' %(code,client_id,redirect_uri)
return redirect(uri)
@app.route('/test1', methods=['POST', 'GET'])
def test():
token = request.args.get('token')
if verify_token(token) == 1:
return 'data'
else:
return 'error'
if __name__ == '__main__':
app.run(debug=True)
|
993,124 | b036865f0da93ee2a9bafba79a08fe165c30e6cc | import os
import sys
import unittest
from pygcam.windows import IsWindows
def printLink(path):
islink = os.path.islink(os.path.normpath(path))
sys.stderr.write("%s islink: %s\n" % (path, islink))
if islink:
path = os.readlink(path)
sys.stderr.write("Link: %s" % path)
# printLink(path)
class TestSymlinks(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_funcs(self):
if IsWindows:
from pygcam.windows import islinkWindows, readlinkWindows
self.assertEqual(os.path.islink, islinkWindows)
self.assertEqual(os.readlink, readlinkWindows)
def test_islink(self):
if IsWindows:
p1 = 'C:/Users/rjp/GCAM/current'
self.assertTrue(os.path.islink(p1), '%s should be seen as a link' % p1)
p2 = p1 + '/Main_User_Workspace'
self.assertFalse(os.path.islink(p2), '%s should not be seen as a link' % p2)
|
993,125 | eb899bd8ed9a4a6b78e557a2d51b1a9fcf031130 | #!/usr/bin/env python
# coding=utf-8
money_all = 123.6 + 23.8 + 47.2 + 53.7
print("总金额:" + str(money_all))
money_pay = int(money_all)
print("实收金额:" + str(money_pay))
|
993,126 | 6168045ffa527a3391e406fcbbe33c3eb01d6493 | l=int(input())
n=int(input())
for i in range(n):
a,b=list(map(int,input().split()))
if(a==b and a>=l and b>=l):
print("ACCEPTED")
elif(a!=b and a>=l and b>=l):
print("CROP IT")
else:
print("UPLOAD ANOTHER")
|
993,127 | 8604dde45e34917ced0fd1836f188a329517519a | import numpy as np
import os
import json
import random
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
from sklearn.externals import joblib
class TfidfFormatter:
def __init__(self,conf):
self.text_use_which=conf["text_use_which"]
self.label_use_which=conf["label_use_which"]
self.segmentor=jieba
self.vectorizer=TfidfVectorizer(min_df=2, max_df=1.0, token_pattern='\\b\\w+\\b')
self.task=conf["task"]
if self.task=="Classification":
self.label2id={}
self.id2label=[]
def format(self,data,train=False):
texts=[]
labels=[]
meta_infos=[]
for d in data:
text=' '.join(self.segmentor.cut(d[self.text_use_which]))
texts.append(text)
if (labels is not None):
if self.label_use_which in d:
label=d[self.label_use_which]
labels.append(label)
else:
assert not train
labels=None
if "meta_info" in d.keys():
meta_infos.append(d["meta_info"])
else:
meta_infos.append({})
if train:
self.vectorizer.fit(texts)
if self.task=="Classification":
all_labels=list(set(labels))
for l in all_labels:
if not l in self.label2id:
self.label2id[l]=len(self.label2id)
self.id2label=all_labels
if self.task=="Classification" and (labels is not None):
labels=[self.label2id[l] for l in labels]
texts=self.vectorizer.transform(texts)
ret={
"x":texts,
"meta_info":meta_infos
}
if labels is not None:
ret["y"]=labels
return ret
def pred2label(self,pred):
if self.task!="Classification":
return pred
ret=[]
for ele in pred:
ret.append(self.id2label[ele])
return ret
def dump(self,path):
if self.task=="Classification":
json.dump(self.label2id,open(os.path.join(path,"label2id.json"),"w"))
json.dump(self.id2label,open(os.path.join(path,"id2label.json"),"w"))
joblib.dump(self.vectorizer,os.path.join(path,"tf-idf.m"))
def load(self,path):
if self.task=="Classification":
self.label2id=json.load(open(os.path.join(path,"label2id.json"),"r"))
self.id2label=json.load(open(os.path.join(path,"id2label.json"),"r"))
self.vectorizer=joblib.load(os.path.join(path,"tf-idf.m"))
|
993,128 | b00491900978cc7864a50a15a94bd034175f802b | # -*- coding: utf-8 -*-
from httoop.meta import HTTPSemantic
class URIType(HTTPSemantic):
def __new__(mcs, name, bases, dict_):
cls = super(URIType, mcs).__new__(mcs, name, tuple(bases), dict_)
if dict_.get('SCHEME'):
for base in bases:
if getattr(base, 'SCHEMES', None) is not None:
base.SCHEMES.setdefault(dict_['SCHEME'].lower(), cls)
return cls
|
993,129 | 08afddea4b8538089bcfd35d14150c827d399ed8 | # Copyright (C) 2013 Christopher "Kasoki" Kaster
#
# This file is part of "FancyProjects". <http://github.com/Kasoki/FancyProjects>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
class ProjectStructure:
def __init__(self, path, json):
self.path = path
self.name = json["name"]
self.description = json["description"]
self.default_project_name = json["default_project_name"]
self.settings = json["settings"]
self.build_systems = json["build_systems"]
self.has_project_proto=False
self.proto_file=""
def to_quickpanel_item(self):
return [self.name, self.description]
def __str__(self):
return self.name
def check_for_protoproj(self):
items=os.listdir( self.path );
for item in items:
if item.endswith(".project-proto"):
self.has_project_proto=True;
self.proto_file=os.path.join(self.path,item);
break; |
993,130 | 5fda429c0cece5802011f7b9be3c31f24414df18 | # Author: Acer Zhang
# Datetime: 2020/10/12
# Copyright belongs to the author.
# Please indicate the source for reprinting.
from train import *
from reader import InferReader
DATA_PATH = "/Users/zhanghongji/PycharmProjects/CaptchaDataset/sample_img"
CHECKPOINT_PATH = "/Users/zhanghongji/PycharmProjects/CaptchaDataset/OCR_Module/output/10"
BATCH_SIZE = 32
def ctc_decode(text, blank=10):
"""
简易CTC解码器
:param text: 待解码数据
:param blank: 分隔符索引值
:return: 解码后数据
"""
result = []
cache_idx = -1
for char in text:
if char != blank and char != cache_idx:
result.append(char)
cache_idx = char
return result
if __name__ == '__main__':
model = pp.Model(Net(is_infer=True), inputs=input_define)
model.load(CHECKPOINT_PATH)
model.prepare()
infer_reader = InferReader(DATA_PATH)
img_names = infer_reader.get_names()
results = model.predict(infer_reader, batch_size=BATCH_SIZE)
index = 0
for text_batch in results[0]:
for prob in text_batch:
out = ctc_decode(prob, blank=10)
print(f"文件名:{img_names[index]},推理结果为:{out}")
index += 1
|
993,131 | 8eeab5738a954213262912cc81a7e014626b07bc | #!/usr/bin/python
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default="experiments/HAN/result/", help="Directory containing the dataset")
def merge(basedir, output_file):
columns = "location_traffic_convenience,location_distance_from_business_district,location_easy_to_find,\
service_wait_time,service_waiters_attitude,service_parking_convenience,service_serving_speed,\
price_level,price_cost_effective,price_discount,\
environment_decoration,environment_noise,environment_space,environment_cleaness,\
dish_portion,dish_taste,dish_look,dish_recommendation,\
others_overall_experience,others_willing_to_consume_again".split(",")
result = [pd.Series(name="content")]
for column in columns:
label = os.path.join(basedir, column + "_result.csv")
data = pd.read_csv(label, encoding='utf-8', names=["id", column], header=None, skiprows=1)
data.set_index('id')
result.append(data[column].map(lambda e: int(e.strip("[]")) - 2))
result_all = pd.concat(result, axis=1)
output_file = os.path.join(basedir, output_file)
result_all.to_csv(output_file, encoding='utf-8', index_label="id")
if __name__ == "__main__":
args = parser.parse_args()
merge(args.data_dir, "han_result.csv")
|
993,132 | 6a8fd584d0760ea193d1db551eddce5e5888f705 | # Generated by Django 3.2.6 on 2021-09-10 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0006_job_published_at'),
]
operations = [
migrations.AddField(
model_name='job',
name='experience',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='job',
name='salary',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='job',
name='vacancy',
field=models.BigIntegerField(default=1),
),
]
|
993,133 | b275678714d301a028aa868acf30bec68fc76782 | #!/usr/bin/env pyformex
# $Id$
##
## This file is part of pyFormex 0.8.5 Sun Nov 6 17:27:05 CET 2011
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: https://savannah.nongnu.org/projects/pyformex/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Double Layer Flat Space Truss Roof
level = 'advanced'
topics = ['FEA']
techniques = ['color']
"""
from plugins.properties import *
from plugins.fe_abq import *
import os
####
#Data
###################
dx = 1800 # Modular size [mm]
ht = 900 # Deck height [mm]
nx = 4 # number of bottom deck modules in x direction
ny = 5 # number of bottom deck modules in y direction
q = -0.005 #distributed load [N/mm^2]
#############
#Creating the model
###################
top = (Formex("1").replic2(nx-1,ny,1,1) + Formex("2").replic2(nx,ny-1,1,1)).scale(dx)
top.setProp(3)
bottom = (Formex("1").replic2(nx,ny+1,1,1) + Formex("2").replic2(nx+1,ny,1,1)).scale(dx).translate([-dx/2,-dx/2,-ht])
bottom.setProp(0)
T0 = Formex(4*[[[0,0,0]]]) # 4 times the corner of the top deck
T4 = bottom.select([0,1,nx,nx+1]) # 4 nodes of corner module of bottom deck
dia = connect([T0,T4]).replic2(nx,ny,dx,dx)
dia.setProp(1)
F = (top+bottom+dia)
# Show upright
createView('myview1',(0.,-90.,0.))
clear();linewidth(1);draw(F,view='myview1')
############
#Creating FE-model
###################
M = F.toMesh()
###############
#Creating elemsets
###################
# Remember: elems are in the same order as elements in F
topbar = where(F.prop==3)[0]
bottombar = where(F.prop==0)[0]
diabar = where(F.prop==1)[0]
###############
#Creating nodesets
###################
nnod=M.ncoords()
nlist=arange(nnod)
count = zeros(nnod)
for n in M.elems.flat:
count[n] += 1
field = nlist[count==8]
topedge = nlist[count==7]
topcorner = nlist[count==6]
bottomedge = nlist[count==5]
bottomcorner = nlist[count==3]
support = concatenate([bottomedge,bottomcorner])
edge = concatenate([topedge,topcorner])
########################
#Defining and assigning the properties
#############################
Q = 0.5*q*dx*dx
P = PropertyDB()
P.nodeProp(set=field,cload = [0,0,Q,0,0,0])
P.nodeProp(set=edge,cload = [0,0,Q/2,0,0,0])
P.nodeProp(set=support,bound = [1,1,1,0,0,0])
circ20 = ElemSection(section={'name':'circ20','sectiontype':'Circ','radius':10, 'cross_section':314.159}, material={'name':'S500', 'young_modulus':210000, 'shear_modulus':81000, 'poisson_ratio':0.3, 'yield_stress' : 500,'density':0.000007850})
# example of how to set the element type by set
P.elemProp(set=topbar,section=circ20,eltype='T3D2')
P.elemProp(set=bottombar,section=circ20,eltype='T3D2')
# alternatively, we can specify the elements by an index value
# in an array that we will pass in the Abqdata 'eprop' argument
P.elemProp(prop=1,section=circ20,eltype='T3D2')
# Since all elements have same characteristics, we could just have used:
# P.elemProp(section=circ20,elemtype='T3D2')
# But putting the elems in three sets allows for separate postprocessing
# Print node and element property databases
for p in P.nprop:
print p
for p in P.eprop:
print p
#############
#Writing the inputfile
###################
step = Step()
out = Output(type='field',variable='preselect')
res = [ Result(kind='element',keys=['S']),
Result(kind='node',keys=['U'])
]
model = Model(M.coords,M.elems)
if not checkWorkdir():
exit()
AbqData(model,P,[step],eprop=F.prop,out=[out],res=res).write('SpaceTruss')
# End
|
993,134 | 76c5e286eb7d076e8c5dd7dfd1a10c4d06691eee | '''
-*- coding: utf-8 -*-
@Author : zoeyzhu
@Time : 2021/8/8 9:34 下午
@Software: PyCharm
@File : 1137.py
@function:
泰波那契序列 Tn 定义如下:
T0 = 0, T1 = 1, T2 = 1, 且在 n >= 0 的条件下 Tn+3 = Tn + Tn+1 + Tn+2
给你整数 n,请返回第 n 个泰波那契数 Tn 的值。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/n-th-tribonacci-number
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
from typing import List
class Solution:
def __init__(self):
self.a=dict()
def tribonacci(self, n: int) -> int:
# print(n)
if n == 0:
return 0
if n == 1 or n == 2:
return 1
if n in self.a.keys():
return self.a[n]
data=self.tribonacci(n - 1) + self.tribonacci(n - 2) + self.tribonacci(n - 3)
self.a[n]=data
return data
s=Solution()
print(s.tribonacci(25)) |
993,135 | ff35a5699163e5c8d49bb8afd0741ea7c173048a | from PIL import Image
# filename = r'C:\Users\..\01.jpg'
filename = r'./Importing files/trash/01.jpg'
Image.open(filename)
|
993,136 | 5057f3e4128a0f8af284fd42060c3cde30604f79 | def get_dvalue(s):
if len(s) < 2:
return 0
s.sort()
dvalue = 0
for i in range(0, len(s)-1):
if s[i+1]-s[i] > dvalue:
dvalue = s[i+1]-s[i]
return dvalue
if __name__ == "__main__":
s = [int(n) for n in input()[1:-1].split(',')]
print(get_dvalue(s))
|
993,137 | 7cf4507fe1477673481797c1a55301912c0500c1 | '''
void call(int n){
int i = 1;
CHECK_NUM:
int x = i;
if ( x % 3 == 0 ){
cout << " " << i;
goto END_CHECK_NUM;
}
INCLUDE3:
if ( x % 10 == 3 ){
cout << " " << i;
goto END_CHECK_NUM;
}
x /= 10;
if ( x ) goto INCLUDE3;
END_CHECK_NUM:
if ( ++i <= n ) goto CHECK_NUM;
cout << endl;
}
'''
n = int(input())
for i in range(1, n+1):
x = i
if x % 3 == 0:
print(f' {i}', end='')
else:
while x >= 1:
if x % 10 == 3:
print(f' {i}', end='')
break
x = x // 10
print()
'''
3 6 9 12 13 15 18 21 23 24 27 30 31 32 33 34 35 36 37 38 39 42 43 45 48 51 53 54 57 60
3 6 9 12 13 15 18 21 23 24 27 30 31 32 33 34 35 36 37 38 39 42 43 45 48 51 53 54 57 60
''' |
993,138 | ef3c20230b3c20885c43ed60b977be761448147e | from numpy import*
v = array(eval(input("digite as notas: ")))
a =min(v)
b = sum(v)
t = size(v)
y = (b - a)/(t - 1)
print(round(y, 2)) |
993,139 | 1b9092ba0e6d320ebb6e5922dd3e07b464efde78 | from turtle import *
from random import randint
import string
for step in range(16):
speed(0.1)
write(step,align="left")
right(90)
forward(10)
pendown()
forward(150)
penup()
backward(160)
left(90)
forward(20)
try:
ada= Turtle("turtle")
ada.color("red")
ada.right(360)
ada.penup()
ada.goto(-30,-15)
ada.pendown()
ama= Turtle("turtle")
ama.color("blue")
ada.right(360)
ama.penup()
ama.goto(-30,-55)
ama.pendown()
aca= Turtle("turtle")
aca.color("green")
aca.right(360)
aca.penup()
aca.goto(-30,-95)
aca.pendown()
ava= Turtle("turtle")
ava.color("yellow")
ava.right(360)
ava.penup()
ava.goto(-30,-135)
ava.pendown()
for step in range(130):
ama.forward(randint(1,5))
ada.forward(randint(1,5))
aca.forward(randint(1,5))
ava.forward(randint(1,5))
|
993,140 | 11fead29a9fbb90cf234518be4a67048ecfa7427 | """
2. What is duck typing philosophy of python
"""
"""
Duck Typing is a type system used in dynamic languages. For example, Python, Perl, Ruby, PHP, Javascript, etc.
where the type or the class of an object is less important than the method it defines. Using Duck Typing, we do
not check types at all. Instead, we check for the presence of a given method or attribute.
The name Duck Typing comes from the phrase:
“If it looks like a duck and quacks like a duck, it’s a duck”
Example:
# Python program to demonstrate
# duck typing
class Specialstring:
def __len__(self):
return 21
# Driver's code
if __name__ == "__main__":
string = Specialstring()
print(len(string))
""" |
993,141 | a107b8439bc5e198dd434f1f2d197e8ff22015ca | #!/usr/bin/sudo python
from serial import Serial
import datetime
arduino = Serial("/dev/ttyS1", baudrate=115200, timeout=3.0)
success = 0
failure = 0
echoedPayload = ""
timeout = 1
print("Serial Test: The tinker send a payload containing a number from 0 to 10,000, which will be echoed back by the arduino."
"The echo number is then compared with the original payload.")
for i in range(0, 10001):
payload = (str(i) + "\n").encode()
arduino.write(payload)
t1 = datetime.datetime.now()
while not arduino.in_waiting: #wait for the return payload
t2 = datetime.datetime.now()
if t2.second - t1.second > timeout:
print("Timeout at payload: ", i)
exit(1)
echoedPayload = arduino.readline()
if payload == echoedPayload:
success+=1
else:
failure+=1
print("Unmatched:", payload, "vs", echoedPayload)
print("Success:", success, "Failures:", failure) |
993,142 | a73c4b055c00b389e2d8e70a30a2608051a02492 | from mmrcnn import model as modellib, visualize
import os
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]='-1'
import coco
import skimage.io
from datetime import datetime
import cv2
WEIGHTS_DIR = "./weights"
TEST_PIC_DIR = "./testpictures"
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
config = coco.CocoConfig()
model_path = model_path = os.path.join(WEIGHTS_DIR, "trained_coco_2018-Jun-14__17_39_03.h5")
#model_path = "/home/thiemi/MaskRCNN/Mask_RCNN/mask_rcnn_coco.h5"
model = modellib.MaskRCNN(mode="inference", config=config, model_dir=WEIGHTS_DIR)
#model = modellib.MaskRCNN(mode="inference", config=config, model_dir="/home/thiemi/MaskRCNN/Mask_RCNN")
# returns a compiled model
#model.load_weights(model_path, by_name=True)
print("successfully loaded model")
image = skimage.io.imread(os.path.join(TEST_PIC_DIR, "street" + str(7) + ".jpg"))
#image = cv2.imread(os.path.join(TEST_PIC_DIR, "street" + str(4) + ".jpg"))
#image = cv2.imread(os.path.join(TEST_PIC_DIR, "bayer.jpg"))
#cv2.imshow("big", image)
#cv2.waitKey(0)
height, width = image.shape[:2]
if height > width:
r = 64 / height
small = cv2.resize(image, (int(width * r) , 64))
else:
r = 64 / width
small = cv2.resize(image, (64, int(height * r)))
#cv2.imshow("smaller", small )
#cv2.waitKey(0)
# Run detection
start = datetime.now()
print("starting detection")
result = model.detect([image], verbose=1)
print("Time taken for detection: {}".format(datetime.now() - start))
r = result[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
|
993,143 | 35cd775a8631cb16ab25afe73cbb71b4b58d6f71 | import csv
import pandas as pd
import numpy as np
from sklearn import svm
import numpy as np
from numpy.random import randn
from numpy.random import seed
import properties as prop
import quandl
delta=prop.delta
target_index=prop.target_index
dependency=prop.dependency
authtoken=prop.authtoken
start_date=prop.start_date
end_date=prop.end_date
column=prop.column
train_ratio=prop.train_ratio
def find_all_dates(df):
return [str(date_index.date()) for date_index in df.index.tolist()]
def return_dictonary(df):
return { date: df.loc[date] for date in find_all_dates(df)}
def return_alldataInterpolated(dict,all_dates):
return pd.Series([float(dict[date]) if date in dict else np.NaN for date in all_dates]).interpolate()
def listNormaliser(list1,delta):
return [(list1[i]-list1[i+delta])/list1[i+delta] for i in range(0,len(list1)-delta)]
df_source =quandl.get(target_index, start_date=start_date, end_date=end_date,authtoken=authtoken)[[column]]
print 'ABCD'
print df_source
print 'ABCD'
all_dates=find_all_dates(df_source)
source_price=listNormaliser(df_source[column].tolist(),delta)
source_label= list(map(lambda x: 1 if x >0 else 0, source_price))
X1=[]
for d in dependency:
df =quandl.get(d, start_date=start_date, end_date=end_date, authtoken=authtoken)[[column]]
X1.append(listNormaliser(return_alldataInterpolated(return_dictonary(df),all_dates),delta))
train_cases=int(len(all_dates)*train_ratio)
test_cases=len(all_dates) - train_cases
X= np.array([list(a) for a in list(zip(*X1))][:train_cases])
y= source_label[:train_cases]
# print(X)
# print (y)
clf = svm.SVC( C = 20.0)
clf.fit(X, y)
predicted_indicator=clf.predict([list(a) for a in list(zip(*X1))][train_cases:])
accuracy_percentage = sum(list(map(lambda x :1 if x[0]==x[1] else 0,zip(predicted_indicator,source_label[train_cases:]))))/test_cases*100
print ('Accuracy of the model is :' +str(accuracy_percentage))
|
993,144 | d8c99e58f18169075093d6a471bc3225649a879a | ii = [('ShawHDE.py', 1), ('WilkJMC3.py', 1), ('TennAP.py', 1), ('FitzRNS3.py', 2), ('WilkJMC2.py', 1), ('WestJIT2.py', 1), ('BackGNE.py', 3), ('WheeJPT.py', 1), ('FitzRNS.py', 3), ('MackCNH2.py', 3), ('JacoWHI2.py', 1), ('SomeMMH.py', 3)] |
993,145 | cca0064c44809b3113939c9c328af9147c472380 | #aplanar una lista anidada utilizando el bucle for teniendo en cuenta los diferentes tipos de datos que se encuentran en la lista a aplanar
datos = [1,5,8,2,[1,5,6,7,],[1,[4,2,5,7,]]]
plana = []
for dato in datos:
if type(dato) == int:
plana.append(dato)
elif type(dato) == list:
for elemento in dato:
if type(elemento) == int:
plana.append(elemento)
elif type(elemento) == list:
for objet in elemento:
plana.append(objet)
print(plana)
print(datos) |
993,146 | 8568b358fcb09693f4ca2270dd43931520107d0c | import os
import pandas as pd
import numpy as np
import statsmodels.api as sm
boston = pd.read_csv("./boston_house.csv")
print(boston.head(5))
features = boston[['CRIM', 'RM', 'LSTAT']]
target = boston[['Target']]
print(features.head(3))
multi_features = sm.add_constant(features, has_constant='add')
multi_model = sm.OLS(target, multi_features)
fitted_multi_model = multi_model.fit()
print(fitted_multi_model.summary())
multi_pred = fitted_multi_model.predict(multi_features)
print(multi_pred)
import matplotlib.pyplot as plt
fitted_multi_model.resid.plot()
plt.xlabel("residual_number")
plt.show()
std_resid = fitted_multi_model.resid_pearson
plt.scatter(range(len(std_resid)), std_resid)
plt.show() |
993,147 | bab61c0963275ddcffab62d35d0bad7424a75569 | # avg of time = 2.7401173988
from lxml import etree
import cv2
from scipy import ndimage
import pytesseract
import numpy as np
from PIL import Image
def making_data_ready(n):
img1 = cv2.imread(n+'.jpg')
img2 = 255 - img1[650:650+200,750:750+1800]
rotated = ndimage.rotate(img2, 90)
temp = img1.copy()
temp[0:rotated.shape[0],0:rotated.shape[1]]=cv2.bitwise_xor(img1[0:rotated.shape[0],0:rotated.shape[1]],rotated)
cv2.imwrite(n+'1.jpg',temp)
rotated = ndimage.rotate(img2, 60)
temp = img1.copy()
temp[0:rotated.shape[0],0:rotated.shape[1]]=cv2.bitwise_xor(img1[0:rotated.shape[0],0:rotated.shape[1]],rotated)
cv2.imwrite(n+'2.jpg',temp)
rotated = ndimage.rotate(img2, 30)
temp = img1.copy()
temp[0:rotated.shape[0],0:rotated.shape[1]]=cv2.bitwise_xor(img1[0:rotated.shape[0],0:rotated.shape[1]],rotated)
cv2.imwrite(n+'3.jpg',temp)
rotated = ndimage.rotate(img2, 15)
temp = img1.copy()
temp[0:rotated.shape[0],0:rotated.shape[1]]=cv2.bitwise_xor(img1[0:rotated.shape[0],0:rotated.shape[1]],rotated)
cv2.imwrite(n+'4.jpg',temp)
def hOCR_detecting_lines(m,s):
img = cv2.imread(str(m) + str(s) + '.jpg')
f = pytesseract.pytesseract.image_to_pdf_or_hocr(img, lang='fas+ara', extension='hocr')
tree = etree.fromstring(f)
words = tree.xpath("//*[@class='ocr_line']")
for w in words:
title_splited = w.attrib['title'].split()
x1, y1, x2, y2 = int(title_splited[1]), int(title_splited[2]), int(title_splited[3]), int(
title_splited[4].split(';')[0])
img_hocr = cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 3)
# cv2.imwrite(str(m) + str(s) + 'rec.jpg', img_hocr)
# print(str(m) + str(s))
def hough_detecting_lines(img_sent , m=0, s=0 , source_img=0 ,slice = 10):
if not img_sent:
img = cv2.imread(str(m) + str(s) + '.jpg')
else:
img = source_img
try:
img = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)
except:
pass
edges = cv2.Canny(img, 50, 200, apertureSize=3)
minLineLength = 15
maxLineGap = 5
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 1, minLineLength, maxLineGap)
print (lines)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), (100, 100, 100), 2)
# cv2.imwrite(str(m) + str(s) + '_hough_line.jpg', img)
def semiHistogram_detecting_lines(m,s):
img = cv2.imread(str(m) + str(s) + '.jpg')
img_file = str(m) + str(s) + '.jpg'
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 1 - we need dpi for slicing image
imgPIL = Image.open(img_file)
dpi = (300, 300) # default is (300 , 300)
if 'dpi' in imgPIL.info.keys():
dpi = imgPIL.info['dpi']
del imgPIL
# 2 - use erod nad then dilate in order to clear small noises
gray_env = cv2.bitwise_not(gray)
kernel_dilate = np.ones((5,5),np.uint8)
gray_env_dilate = cv2.dilate(gray_env , kernel_dilate , iterations=2)
# 3 - by semi-histogram way we want to find wasted areas
slice = int(dpi[0]/20)
# cv2.imwrite('find_wasted_round_area_in_documents_1_inv.jpg', gray_env_dilate)
poly = np.zeros((int(gray_env_dilate.shape[0] / slice), int(gray_env_dilate.shape[1] / slice), 1), np.uint8)
poly.fill(0)
pices = (int(gray_env_dilate.shape[0] / slice), int(gray_env_dilate.shape[1] / slice))
for y in range(pices[0]):
for x in range(pices[1]):
poly[y, x] = np.mean(gray_env_dilate[(y * slice):((y + 1) * slice), (x * slice):((x + 1) * slice)])
_, poly = cv2.threshold(poly, 10, 255, cv2.THRESH_BINARY)
# cv2.imwrite('find_wasted_round_area_in_documents_2_poly_1.jpg', poly)
poly2 = np.zeros((int(gray_env_dilate.shape[0] / slice), int(gray_env_dilate.shape[1] / slice), 1), np.uint8)
poly2.fill(0)
for y in range(2, pices[0] - 2):
for x in range(2, pices[1] - 2):
if (np.mean(poly[y, x - 2:x + 3]) > 50):
poly2[y-2:y+3 + 1, x-2:x +3] = 255
else:
poly2[y, x] = 0
# cv2.imwrite('find_wasted_round_area_in_documents_4_poly2_{}_{}.jpg'.format(str(m),str(s)), poly2)
del poly
poly3 = np.zeros((int(gray_env_dilate.shape[0]), int(gray_env_dilate.shape[1]), 1), np.uint8)
poly3.fill(0)
for y in range(0, pices[0]):
for x in range(0, pices[1]):
poly3[(y * slice):((y + 1) * slice), (x * slice):((x + 1) * slice)] = poly2[y, x]
# cv2.imwrite('find_wasted_round_area_in_documents_5_poly3.jpg', poly3)
del poly2
contours , _ = cv2.findContours(poly3,cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_SIMPLE)
c = 1
for cnt in contours[:]:
rect = cv2.minAreaRect(cnt)
box = np.int0(cv2.boxPoints(rect))
first_sorted = sorted(box, key=lambda l: l[0])
lefts = first_sorted[0:2]
rights = first_sorted[2:]
tmp = sorted(lefts, key=lambda l: l[1])
top_left = tmp[0]
down_left = tmp[1]
tmp = sorted(rights, key=lambda l: l[1])
top_right = tmp[0]
down_right = tmp[1]
if (((top_left[1] - down_left[1])**2 + (top_left[0] - down_left[0])**2) <
((top_left[1] - top_right[1])**2 + (top_left[0] - top_right[0])**2)):
# print ('horosontal',c)
y1 = down_left[0]
x1 = down_left[1]
y2 = down_right[0]
x2 = down_right[1]
angle = (x2 - x1)/(y1 - y2)
degree = (np.arctan(angle)/np.pi)*180
# print(x1 , y1 , x2 , y2)
# print('angle: ',(np.arctan(angle)/np.pi)*180)
else:
# print ('vertical')
y1 = down_left[0]
x1 = down_left[1]
y2 = top_left[0]
x2 = top_left[1]
if y1 != y2 :
angle = (x2 - x1) / (y1 - y2)
else:
angle = 90
degree = (np.arctan(angle)/np.pi)*180
# print(x1 , y1 , x2 , y2)
# print('angle: ', (np.arctan(angle) / np.pi) * 180)
#img = cv2.drawContours(img,[box],0,(1*c,2*c,3*c),5)
cv2.putText(img , str(degree) , (down_right[0],down_right[1]) , cv2.FONT_HERSHEY_SIMPLEX ,1,0,2)
# print (degree)
if degree > 5 :
x , y , w , h = cv2.boundingRect(cnt)
# print(c,' must be changed' , ' => ',w,h)
new_img = img[y:y+h,x:x+w]
rotated = ndimage.rotate(new_img, -1*degree)
cv2.floodFill(rotated,None,(0,0),(255,255,255))
cv2.imwrite('over_rotated_paragraph_{}_{}_{}.jpg'.format(str(m),str(s),str(c)) , rotated)
c+=1
cv2.imwrite(str(m) + str(s) + '_semi_histogram.jpg', img)
if __name__=='__main__':
M = 5
S = 2
avg_time = []
for m in range(1,M+1):
for s in range(1,1+S):
e1 = cv2.getTickCount()
semiHistogram_detecting_lines(m,s)
e2 = cv2.getTickCount()
print(m,s)
avg_time.append((e2-e1)/cv2.getTickFrequency())
print(np.mean(avg_time))
|
993,148 | 811f518f6ab8f4064d4cd8b2daa5a24f8eba3753 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from core.task import on_start, on_message, on_timeout, on_output_message, Task # , on_sequence
# The following tasks us the following bit-based vocabulary:
# stay_quiet 01
# space 00
# period 10
# say 11
# in task0, the learner must only produce the 0 bit until the end of the task
class Task0(Task):
def __init__(self):
super(Task0, self).__init__(
max_time=1000)
@on_start()
def give_instructions(self, event):
self.set_message('011000')
@on_output_message(r'1000')
def reward_at_end(self, event):
self.set_reward(1)
@on_message(r'1')
def punish_not_quiet(self, event):
self.set_reward(-1)
# in task1, the learner must produce 1 right after the environment stops speaking
# (and 0 while env is talking)
class Task1(Task):
def __init__(self):
super(Task1, self).__init__(
max_time=1000)
@on_start()
def give_instructions(self, event):
self.finished_talking=False
self.set_message('1111000')
@on_output_message(r'1000')
def set_finished_talking_flag(self, event):
self.finished_talking=True
@on_message(r'.')
def check_right_response(self, event):
if event.is_message('1'):
if (self.finished_talking):
self.set_reward(1)
else:
self.set_reward(-1)
elif (self.finished_talking):
self.set_reward(-1)
# task11 is like task1, but not the learner must produce a 11 bit sequence
class Task11(Task):
def __init__(self):
super(Task11, self).__init__(
max_time=1000)
@on_start()
def give_instructions(self, event):
self.finished_talking=False
self.learner_turn_counter=0
self.set_message('11111000')
@on_output_message(r'1000')
def set_finished_talking_flag(self, event):
self.finished_talking=True
@on_message(r'.')
def check_right_response(self, event):
if event.is_message('1'):
if (self.finished_talking):
if (self.learner_turn_counter==0):
self.learner_turn_counter += 1
else:
self.set_reward(1)
else:
self.set_reward(-1)
elif (self.finished_talking):
self.set_reward(-1)
# task10 is like task11, but not the learner must produce a 10 bit sequence
class Task10(Task):
def __init__(self):
super(Task10, self).__init__(
max_time=1000)
@on_start()
def give_instructions(self, event):
self.finished_talking=False
self.learner_turn_counter=0
self.set_message('11101000')
@on_output_message(r'1000')
def set_finished_talking_flag(self, event):
self.finished_talking=True
@on_message(r'.')
def check_right_response(self, event):
if event.is_message('1'):
if (self.finished_talking and self.learner_turn_counter==0):
self.learner_turn_counter += 1
else:
self.set_reward(-1)
elif (self.finished_talking):
if (self.learner_turn_counter>0):
self.set_reward(1)
else:
self.set_reward(-1)
|
993,149 | d323f46bf2de5538b021a267f890ecafa9de0b11 | from socket import *
import os
import sys
import datetime, time
from _thread import start_new_thread
class Server:
def __init__(self, port, fileDir):
self.confFile = "dfs.conf"
self.fileDir = fileDir
self.host = "127.0.0.1"
self.port = int(port)
self.sSocket = None
self.buffer = 4096
self.isAuthenticated = False
self.start()
def start(self):
self.checkFileDir()
self.createSocket()
self.listenForConnections()
def checkFileDir(self):
if not os.path.exists(self.fileDir):
os.mkdir(self.fileDir)
log("File Directory Created")
def createSocket(self):
try:
self.sSocket = socket(AF_INET, SOCK_STREAM)
self.sSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sSocket.bind(('', self.port))
except Exception as e:
log(str(e))
except error as e:
log(str(e))
def listenForConnections(self):
try:
self.sSocket.listen(5)
log("Listening...")
while True:
try:
clientSocket, clientAddress = self.sSocket.accept()
self.connect(clientSocket)
except Exception as e:
log(str(e))
# sys.exit(1)
except KeyboardInterrupt:
log("Interrupting Server.")
time.sleep(.5)
finally:
log("Stopping Server...")
sys.exit()
def connect(self, clientSocket):
try:
request = clientSocket.recv(self.buffer)
userConfig = request.decode().split(" ")
self.authenticate(userConfig[0],userConfig[1])
if self.isAuthenticated :
clientSocket.send("Authenticated".encode())
self.listenForCommands(clientSocket)
except Exception as e:
log(str(e))
def listenForCommands(self,cSocket):
while True:
request = cSocket.recv(self.buffer).decode()
if request == "LIST":
print(os.listdir(self.fileDir))
cSocket.sendall("\n".join(os.listdir(self.fileDir)).encode())
elif request == "PUT":
self.receiveFile()
def receiveFile(self):
log("Receiving File...")
def authenticate(self,username,password):
try:
with open(self.confFile, "r") as f:
x = [i.split(":")[1].rstrip() for i in f.readlines()]
if x[0] == username and x[1] == password:
log("Authentication Successful")
self.isAuthenticated = True
else:
raise Exception("Authentication Failed")
self.sSocket.close()
except Exception as e:
log(str(e))
def log(message):
logtime = timestamp()
print(logtime + " : " + message)
def timestamp():
return "[" + str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) + "]"
if __name__ == "__main__":
if len(sys.argv) > 2:
s = Server(sys.argv[2], sys.argv[1].replace("/", ""))
else:
print("Usage: python3 server.py /DFS1 10001")
|
993,150 | f1f57f9c2f417beaebd2d28a1eaaa808420a3cd3 | # Generated by Django 3.1.3 on 2020-12-09 11:36
import api.models.tag
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0008_auto_20201206_2028'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='share_age',
new_name='share_birthday',
),
migrations.RemoveField(
model_name='profile',
name='age',
),
migrations.AddField(
model_name='profile',
name='birthday',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='tag',
name='color',
field=models.IntegerField(default=api.models.tag.random_color),
),
]
|
993,151 | a3f33a164da01432173475fce82dcf43338d2599 | n = int(input())
a = list(map(int, input().split(" ")))
a = sorted(a)[::-1]
if n % 2:
ans = a[0] + 2 * sum(a[1:(n//2)]) + a[n//2]
else:
ans = a[0] + 2 * sum(a[1:(n//2)])
print(ans) |
993,152 | 95a154e27123b445c5f91696e47933fded8aa5c5 | # MIT License
#
# Copyright (c) 2016-2022 Mark Qvist / unsigned.io
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from RNS.Interfaces.Interface import Interface
from time import sleep
import sys
import threading
import time
import math
import RNS
class KISS():
FEND = 0xC0
FESC = 0xDB
TFEND = 0xDC
TFESC = 0xDD
CMD_UNKNOWN = 0xFE
CMD_DATA = 0x00
CMD_FREQUENCY = 0x01
CMD_BANDWIDTH = 0x02
CMD_TXPOWER = 0x03
CMD_SF = 0x04
CMD_CR = 0x05
CMD_RADIO_STATE = 0x06
CMD_RADIO_LOCK = 0x07
CMD_DETECT = 0x08
CMD_IMPLICIT = 0x09
CMD_LEAVE = 0x0A
CMD_READY = 0x0F
CMD_STAT_RX = 0x21
CMD_STAT_TX = 0x22
CMD_STAT_RSSI = 0x23
CMD_STAT_SNR = 0x24
CMD_BLINK = 0x30
CMD_RANDOM = 0x40
CMD_FB_EXT = 0x41
CMD_FB_READ = 0x42
CMD_FB_WRITE = 0x43
CMD_FB_READL = 0x44
CMD_BT_CTRL = 0x46
CMD_PLATFORM = 0x48
CMD_MCU = 0x49
CMD_FW_VERSION = 0x50
CMD_ROM_READ = 0x51
CMD_RESET = 0x55
DETECT_REQ = 0x73
DETECT_RESP = 0x46
RADIO_STATE_OFF = 0x00
RADIO_STATE_ON = 0x01
RADIO_STATE_ASK = 0xFF
CMD_ERROR = 0x90
ERROR_INITRADIO = 0x01
ERROR_TXFAILED = 0x02
ERROR_EEPROM_LOCKED = 0x03
ERROR_INVALID_FIRMWARE = 0x10
PLATFORM_AVR = 0x90
PLATFORM_ESP32 = 0x80
@staticmethod
def escape(data):
data = data.replace(bytes([0xdb]), bytes([0xdb, 0xdd]))
data = data.replace(bytes([0xc0]), bytes([0xdb, 0xdc]))
return data
class AndroidBluetoothManager():
def __init__(self, owner, target_device_name = None, target_device_address = None):
from jnius import autoclass
self.owner = owner
self.connected = False
self.target_device_name = target_device_name
self.target_device_address = target_device_address
self.potential_remote_devices = []
self.rfcomm_socket = None
self.connected_device = None
self.connection_failed = False
self.bt_adapter = autoclass('android.bluetooth.BluetoothAdapter')
self.bt_device = autoclass('android.bluetooth.BluetoothDevice')
self.bt_socket = autoclass('android.bluetooth.BluetoothSocket')
self.bt_rfcomm_service_record = autoclass('java.util.UUID').fromString("00001101-0000-1000-8000-00805F9B34FB")
self.buffered_input_stream = autoclass('java.io.BufferedInputStream')
def connect(self, device_address=None):
self.rfcomm_socket = self.remote_device.createRfcommSocketToServiceRecord(self.bt_rfcomm_service_record)
def bt_enabled(self):
return self.bt_adapter.getDefaultAdapter().isEnabled()
def get_paired_devices(self):
if self.bt_enabled():
return self.bt_adapter.getDefaultAdapter().getBondedDevices()
else:
RNS.log("Could not query paired devices, Bluetooth is disabled", RNS.LOG_DEBUG)
return []
def get_potential_devices(self):
potential_devices = []
for device in self.get_paired_devices():
if self.target_device_address != None:
if str(device.getAddress()).replace(":", "").lower() == str(self.target_device_address).replace(":", "").lower():
if self.target_device_name == None:
potential_devices.append(device)
else:
if device.getName().lower() == self.target_device_name.lower():
potential_devices.append(device)
elif self.target_device_name != None:
if device.getName().lower() == self.target_device_name.lower():
potential_devices.append(device)
else:
if device.getName().lower().startswith("rnode "):
potential_devices.append(device)
return potential_devices
def connect_any_device(self):
if (self.rfcomm_socket != None and not self.rfcomm_socket.isConnected()) or self.rfcomm_socket == None:
self.connection_failed = False
if len(self.potential_remote_devices) == 0:
self.potential_remote_devices = self.get_potential_devices()
if len(self.potential_remote_devices) == 0:
RNS.log("No suitable bluetooth devices available, can't connect", RNS.LOG_DEBUG)
return
while not self.connected and len(self.potential_remote_devices) > 0:
device = self.potential_remote_devices.pop()
try:
self.rfcomm_socket = device.createRfcommSocketToServiceRecord(self.bt_rfcomm_service_record)
if self.rfcomm_socket == None:
raise IOError("Bluetooth stack returned no socket object")
else:
if not self.rfcomm_socket.isConnected():
try:
self.rfcomm_socket.connect()
self.rfcomm_reader = self.buffered_input_stream(self.rfcomm_socket.getInputStream(), 1024)
self.rfcomm_writer = self.rfcomm_socket.getOutputStream()
self.connected = True
self.connected_device = device
RNS.log("Bluetooth device "+str(self.connected_device.getName())+" "+str(self.connected_device.getAddress())+" connected.")
except Exception as e:
raise IOError("The Bluetooth RFcomm socket could not be connected: "+str(e))
except Exception as e:
RNS.log("Could not create and connect Bluetooth RFcomm socket for "+str(device.getName())+" "+str(device.getAddress()), RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
def close(self):
if self.connected:
if self.rfcomm_reader != None:
self.rfcomm_reader.close()
self.rfcomm_reader = None
if self.rfcomm_writer != None:
self.rfcomm_writer.close()
self.rfcomm_writer = None
if self.rfcomm_socket != None:
self.rfcomm_socket.close()
self.connected = False
self.connected_device = None
self.potential_remote_devices = []
def read(self, len = None):
if self.connection_failed:
raise IOError("Bluetooth connection failed")
else:
if self.connected and self.rfcomm_reader != None:
available = self.rfcomm_reader.available()
if available > 0:
if hasattr(self.rfcomm_reader, "readNBytes"):
return self.rfcomm_reader.readNBytes(available)
else:
# Compatibility mode for older android versions lacking readNBytes
rb = self.rfcomm_reader.read().to_bytes(1, "big")
return rb
else:
return bytes([])
else:
raise IOError("No RFcomm socket available")
def write(self, data):
try:
self.rfcomm_writer.write(data)
self.rfcomm_writer.flush()
return len(data)
except Exception as e:
RNS.log("Bluetooth connection failed for "+str(self), RNS.LOG_ERROR)
self.connection_failed = True
return 0
class RNodeInterface(Interface):
MAX_CHUNK = 32768
FREQ_MIN = 137000000
FREQ_MAX = 1020000000
RSSI_OFFSET = 157
CALLSIGN_MAX_LEN = 32
REQUIRED_FW_VER_MAJ = 1
REQUIRED_FW_VER_MIN = 52
RECONNECT_WAIT = 5
PORT_IO_TIMEOUT = 3
@classmethod
def bluetooth_control(device_serial = None, port = None, enable_bluetooth = False, disable_bluetooth = False, pairing_mode = False):
if (port != None or device_serial != None) and (enable_bluetooth or disable_bluetooth or pairing_mode):
serial = None
bluetooth_state = None
if pairing_mode:
bluetooth_state = 0x01
elif enable_bluetooth:
bluetooth_state = 0x01
elif disable_bluetooth:
bluetooth_state = 0x00
if port != None:
RNS.log("Opening serial port "+port+"...")
# Get device parameters
from usb4a import usb
device = usb.get_usb_device(port)
if device:
vid = device.getVendorId()
pid = device.getProductId()
# Driver overrides for speficic chips
from usbserial4a import serial4a as pyserial
proxy = pyserial.get_serial_port
if vid == 0x1A86 and pid == 0x55D4:
# Force CDC driver for Qinheng CH34x
RNS.log("Using CDC driver for "+RNS.hexrep(vid)+":"+RNS.hexrep(pid), RNS.LOG_DEBUG)
from usbserial4a.cdcacmserial4a import CdcAcmSerial
proxy = CdcAcmSerial
serial = proxy(
port,
baudrate = 115200,
bytesize = 8,
parity = "N",
stopbits = 1,
xonxoff = False,
rtscts = False,
timeout = None,
inter_byte_timeout = None,
# write_timeout = wtimeout,
dsrdtr = False,
)
if vid == 0x0403:
# Hardware parameters for FTDI devices @ 115200 baud
serial.DEFAULT_READ_BUFFER_SIZE = 16 * 1024
serial.USB_READ_TIMEOUT_MILLIS = 100
serial.timeout = 0.1
elif vid == 0x10C4:
# Hardware parameters for SiLabs CP210x @ 115200 baud
serial.DEFAULT_READ_BUFFER_SIZE = 64
serial.USB_READ_TIMEOUT_MILLIS = 12
serial.timeout = 0.012
elif vid == 0x1A86 and pid == 0x55D4:
# Hardware parameters for Qinheng CH34x @ 115200 baud
serial.DEFAULT_READ_BUFFER_SIZE = 64
serial.USB_READ_TIMEOUT_MILLIS = 12
serial.timeout = 0.1
else:
# Default values
serial.DEFAULT_READ_BUFFER_SIZE = 1 * 1024
serial.USB_READ_TIMEOUT_MILLIS = 100
serial.timeout = 0.1
elif device_serial != None:
serial = device_serial
if serial != None:
if serial.is_open:
kiss_command = bytes([KISS.FEND, KISS.CMD_BT_CTRL, bluetooth_state, KISS.FEND])
serial.write(kiss_command)
if pairing_mode:
kiss_command = bytes([KISS.FEND, KISS.CMD_BT_CTRL, 0x02, KISS.FEND])
serial.write(kiss_command)
if port != None:
serial.close()
def __init__(
self, owner, name, port, frequency = None, bandwidth = None, txpower = None,
sf = None, cr = None, flow_control = False, id_interval = None,
allow_bluetooth = False, target_device_name = None,
target_device_address = None, id_callsign = None):
import importlib
if RNS.vendor.platformutils.is_android():
self.on_android = True
if importlib.util.find_spec('usbserial4a') != None:
if importlib.util.find_spec('jnius') == None:
RNS.log("Could not load jnius API wrapper for Android, RNode interface cannot be created.", RNS.LOG_CRITICAL)
RNS.log("This probably means you are trying to use an USB-based interface from within Termux or similar.", RNS.LOG_CRITICAL)
RNS.log("This is currently not possible, due to this environment limiting access to the native Android APIs.", RNS.LOG_CRITICAL)
RNS.panic()
from usbserial4a import serial4a as serial
self.parity = "N"
self.bt_target_device_name = target_device_name
self.bt_target_device_address = target_device_address
if allow_bluetooth:
self.bt_manager = AndroidBluetoothManager(
owner = self,
target_device_name = self.bt_target_device_name,
target_device_address = self.bt_target_device_address
)
else:
self.bt_manager = None
else:
RNS.log("Could not load USB serial module for Android, RNode interface cannot be created.", RNS.LOG_CRITICAL)
RNS.log("You can install this module by issuing: pip install usbserial4a", RNS.LOG_CRITICAL)
RNS.panic()
else:
raise SystemError("Android-specific interface was used on non-Android OS")
self.rxb = 0
self.txb = 0
self.HW_MTU = 508
self.pyserial = serial
self.serial = None
self.owner = owner
self.name = name
self.port = port
self.speed = 115200
self.databits = 8
self.stopbits = 1
self.timeout = 150
self.online = False
self.hw_errors = []
self.allow_bluetooth = allow_bluetooth
self.frequency = frequency
self.bandwidth = bandwidth
self.txpower = txpower
self.sf = sf
self.cr = cr
self.state = KISS.RADIO_STATE_OFF
self.bitrate = 0
self.platform = None
self.display = None
self.mcu = None
self.detected = False
self.firmware_ok = False
self.maj_version = 0
self.min_version = 0
self.last_id = 0
self.first_tx = None
self.reconnect_w = RNodeInterface.RECONNECT_WAIT
self.r_frequency = None
self.r_bandwidth = None
self.r_txpower = None
self.r_sf = None
self.r_cr = None
self.r_state = None
self.r_lock = None
self.r_stat_rx = None
self.r_stat_tx = None
self.r_stat_rssi = None
self.r_stat_snr = None
self.r_random = None
self.packet_queue = []
self.flow_control = flow_control
self.interface_ready = False
self.announce_rate_target = None
self.last_port_io = 0
self.port_io_timeout = RNodeInterface.PORT_IO_TIMEOUT
self.last_imagedata = None
self.validcfg = True
if (self.frequency < RNodeInterface.FREQ_MIN or self.frequency > RNodeInterface.FREQ_MAX):
RNS.log("Invalid frequency configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if (self.txpower < 0 or self.txpower > 17):
RNS.log("Invalid TX power configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if (self.bandwidth < 7800 or self.bandwidth > 500000):
RNS.log("Invalid bandwidth configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if (self.sf < 7 or self.sf > 12):
RNS.log("Invalid spreading factor configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if (self.cr < 5 or self.cr > 8):
RNS.log("Invalid coding rate configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if id_interval != None and id_callsign != None:
if (len(id_callsign.encode("utf-8")) <= RNodeInterface.CALLSIGN_MAX_LEN):
self.should_id = True
self.id_callsign = id_callsign.encode("utf-8")
self.id_interval = id_interval
else:
RNS.log("The encoded ID callsign for "+str(self)+" exceeds the max length of "+str(RNodeInterface.CALLSIGN_MAX_LEN)+" bytes.", RNS.LOG_ERROR)
self.validcfg = False
else:
self.id_interval = None
self.id_callsign = None
if (not self.validcfg):
raise ValueError("The configuration for "+str(self)+" contains errors, interface is offline")
try:
self.open_port()
if self.serial != None:
if self.serial.is_open:
self.configure_device()
else:
raise IOError("Could not open serial port")
elif self.bt_manager != None:
if self.bt_manager.connected:
self.configure_device()
else:
raise IOError("Could not connect to any Bluetooth devices")
else:
raise IOError("Neither serial port nor Bluetooth devices available")
except Exception as e:
RNS.log("Could not open serial port for interface "+str(self), RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
if len(self.hw_errors) == 0:
RNS.log("Reticulum will attempt to bring up this interface periodically", RNS.LOG_ERROR)
thread = threading.Thread(target=self.reconnect_port)
thread.daemon = True
thread.start()
def read_mux(self, len=None):
if self.serial != None:
return self.serial.read()
elif self.bt_manager != None:
return self.bt_manager.read()
else:
raise IOError("No ports available for reading")
def write_mux(self, data):
if self.serial != None:
written = self.serial.write(data)
self.last_port_io = time.time()
return written
elif self.bt_manager != None:
written = self.bt_manager.write(data)
if (written == len(data)):
self.last_port_io = time.time()
return written
else:
raise IOError("No ports available for writing")
def open_port(self):
if self.port != None:
RNS.log("Opening serial port "+self.port+"...")
# Get device parameters
from usb4a import usb
device = usb.get_usb_device(self.port)
if device:
vid = device.getVendorId()
pid = device.getProductId()
# Driver overrides for speficic chips
proxy = self.pyserial.get_serial_port
if vid == 0x1A86 and pid == 0x55D4:
# Force CDC driver for Qinheng CH34x
RNS.log(str(self)+" using CDC driver for "+RNS.hexrep(vid)+":"+RNS.hexrep(pid), RNS.LOG_DEBUG)
from usbserial4a.cdcacmserial4a import CdcAcmSerial
proxy = CdcAcmSerial
self.serial = proxy(
self.port,
baudrate = self.speed,
bytesize = self.databits,
parity = self.parity,
stopbits = self.stopbits,
xonxoff = False,
rtscts = False,
timeout = None,
inter_byte_timeout = None,
# write_timeout = wtimeout,
dsrdtr = False,
)
if vid == 0x0403:
# Hardware parameters for FTDI devices @ 115200 baud
self.serial.DEFAULT_READ_BUFFER_SIZE = 16 * 1024
self.serial.USB_READ_TIMEOUT_MILLIS = 100
self.serial.timeout = 0.1
elif vid == 0x10C4:
# Hardware parameters for SiLabs CP210x @ 115200 baud
self.serial.DEFAULT_READ_BUFFER_SIZE = 64
self.serial.USB_READ_TIMEOUT_MILLIS = 12
self.serial.timeout = 0.012
elif vid == 0x1A86 and pid == 0x55D4:
# Hardware parameters for Qinheng CH34x @ 115200 baud
self.serial.DEFAULT_READ_BUFFER_SIZE = 64
self.serial.USB_READ_TIMEOUT_MILLIS = 12
self.serial.timeout = 0.1
else:
# Default values
self.serial.DEFAULT_READ_BUFFER_SIZE = 1 * 1024
self.serial.USB_READ_TIMEOUT_MILLIS = 100
self.serial.timeout = 0.1
RNS.log(str(self)+" USB read buffer size set to "+RNS.prettysize(self.serial.DEFAULT_READ_BUFFER_SIZE), RNS.LOG_DEBUG)
RNS.log(str(self)+" USB read timeout set to "+str(self.serial.USB_READ_TIMEOUT_MILLIS)+"ms", RNS.LOG_DEBUG)
RNS.log(str(self)+" USB write timeout set to "+str(self.serial.USB_WRITE_TIMEOUT_MILLIS)+"ms", RNS.LOG_DEBUG)
elif self.allow_bluetooth:
if self.bt_manager == None:
self.bt_manager = AndroidBluetoothManager(
owner = self,
target_device_name = self.bt_target_device_name,
target_device_address = self.bt_target_device_address
)
if self.bt_manager != None:
self.bt_manager.connect_any_device()
def configure_device(self):
sleep(2.0)
thread = threading.Thread(target=self.readLoop)
thread.daemon = True
thread.start()
self.detect()
sleep(0.5)
if not self.detected:
raise IOError("Could not detect device")
else:
if self.platform == KISS.PLATFORM_ESP32:
self.display = True
if not self.firmware_ok:
raise IOError("Invalid device firmware")
if self.serial != None and self.port != None:
RNS.log("Serial port "+self.port+" is now open")
if self.bt_manager != None and self.bt_manager.connected:
RNS.log("Bluetooth connection to RNode now open")
RNS.log("Configuring RNode interface...", RNS.LOG_VERBOSE)
self.initRadio()
if (self.validateRadioState()):
self.interface_ready = True
RNS.log(str(self)+" is configured and powered up")
sleep(0.3)
self.online = True
else:
RNS.log("After configuring "+str(self)+", the reported radio parameters did not match your configuration.", RNS.LOG_ERROR)
RNS.log("Make sure that your hardware actually supports the parameters specified in the configuration", RNS.LOG_ERROR)
RNS.log("Aborting RNode startup", RNS.LOG_ERROR)
if self.serial != None:
self.serial.close()
if self.bt_manager != None:
self.bt_manager.close()
raise IOError("RNode interface did not pass configuration validation")
def initRadio(self):
self.setFrequency()
time.sleep(0.15)
self.setBandwidth()
time.sleep(0.15)
self.setTXPower()
time.sleep(0.15)
self.setSpreadingFactor()
time.sleep(0.15)
self.setCodingRate()
time.sleep(0.15)
self.setRadioState(KISS.RADIO_STATE_ON)
time.sleep(0.15)
def detect(self):
kiss_command = bytes([KISS.FEND, KISS.CMD_DETECT, KISS.DETECT_REQ, KISS.FEND, KISS.CMD_FW_VERSION, 0x00, KISS.FEND, KISS.CMD_PLATFORM, 0x00, KISS.FEND, KISS.CMD_MCU, 0x00, KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while detecting hardware for "+str(self))
def leave(self):
kiss_command = bytes([KISS.FEND, KISS.CMD_LEAVE, 0xFF, KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while sending host left command to device")
def enable_bluetooth(self):
kiss_command = bytes([KISS.FEND, KISS.CMD_BT_CTRL, 0x01, KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while sending bluetooth enable command to device")
def disable_bluetooth(self):
kiss_command = bytes([KISS.FEND, KISS.CMD_BT_CTRL, 0x00, KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while sending bluetooth disable command to device")
def bluetooth_pair(self):
kiss_command = bytes([KISS.FEND, KISS.CMD_BT_CTRL, 0x02, KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while sending bluetooth pair command to device")
def enable_external_framebuffer(self):
if self.display != None:
kiss_command = bytes([KISS.FEND, KISS.CMD_FB_EXT, 0x01, KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while enabling external framebuffer on device")
def disable_external_framebuffer(self):
if self.display != None:
kiss_command = bytes([KISS.FEND, KISS.CMD_FB_EXT, 0x00, KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while disabling external framebuffer on device")
FB_PIXEL_WIDTH = 64
FB_BITS_PER_PIXEL = 1
FB_PIXELS_PER_BYTE = 8//FB_BITS_PER_PIXEL
FB_BYTES_PER_LINE = FB_PIXEL_WIDTH//FB_PIXELS_PER_BYTE
def display_image(self, imagedata):
self.last_imagedata = imagedata
if self.display != None:
lines = len(imagedata)//8
for line in range(lines):
line_start = line*RNodeInterface.FB_BYTES_PER_LINE
line_end = line_start+RNodeInterface.FB_BYTES_PER_LINE
line_data = bytes(imagedata[line_start:line_end])
self.write_framebuffer(line, line_data)
def write_framebuffer(self, line, line_data):
if self.display != None:
line_byte = line.to_bytes(1, byteorder="big", signed=False)
data = line_byte+line_data
escaped_data = KISS.escape(data)
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_FB_WRITE])+escaped_data+bytes([KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while writing framebuffer data device")
def hard_reset(self):
kiss_command = bytes([KISS.FEND, KISS.CMD_RESET, 0xf8, KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while restarting device")
sleep(4.0);
def setFrequency(self):
c1 = self.frequency >> 24
c2 = self.frequency >> 16 & 0xFF
c3 = self.frequency >> 8 & 0xFF
c4 = self.frequency & 0xFF
data = KISS.escape(bytes([c1])+bytes([c2])+bytes([c3])+bytes([c4]))
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_FREQUENCY])+data+bytes([KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring frequency for "+str(self))
def setBandwidth(self):
c1 = self.bandwidth >> 24
c2 = self.bandwidth >> 16 & 0xFF
c3 = self.bandwidth >> 8 & 0xFF
c4 = self.bandwidth & 0xFF
data = KISS.escape(bytes([c1])+bytes([c2])+bytes([c3])+bytes([c4]))
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_BANDWIDTH])+data+bytes([KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring bandwidth for "+str(self))
def setTXPower(self):
txp = bytes([self.txpower])
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_TXPOWER])+txp+bytes([KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring TX power for "+str(self))
def setSpreadingFactor(self):
sf = bytes([self.sf])
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_SF])+sf+bytes([KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring spreading factor for "+str(self))
def setCodingRate(self):
cr = bytes([self.cr])
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_CR])+cr+bytes([KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring coding rate for "+str(self))
def setRadioState(self, state):
self.state = state
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_RADIO_STATE])+bytes([state])+bytes([KISS.FEND])
written = self.write_mux(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring radio state for "+str(self))
def validate_firmware(self):
if (self.maj_version >= RNodeInterface.REQUIRED_FW_VER_MAJ):
if (self.min_version >= RNodeInterface.REQUIRED_FW_VER_MIN):
self.firmware_ok = True
if self.firmware_ok:
return
RNS.log("The firmware version of the connected RNode is "+str(self.maj_version)+"."+str(self.min_version), RNS.LOG_ERROR)
RNS.log("This version of Reticulum requires at least version "+str(RNodeInterface.REQUIRED_FW_VER_MAJ)+"."+str(RNodeInterface.REQUIRED_FW_VER_MIN), RNS.LOG_ERROR)
RNS.log("Please update your RNode firmware with rnodeconf from https://github.com/markqvist/rnodeconfigutil/")
error_description = "The firmware version of the connected RNode is "+str(self.maj_version)+"."+str(self.min_version)+". "
error_description += "This version of Reticulum requires at least version "+str(RNodeInterface.REQUIRED_FW_VER_MAJ)+"."+str(RNodeInterface.REQUIRED_FW_VER_MIN)+". "
error_description += "Please update your RNode firmware with rnodeconf from: https://github.com/markqvist/rnodeconfigutil/"
self.hw_errors.append({"error": KISS.ERROR_INVALID_FIRMWARE, "description": error_description})
def validateRadioState(self):
RNS.log("Wating for radio configuration validation for "+str(self)+"...", RNS.LOG_VERBOSE)
if not self.platform == KISS.PLATFORM_ESP32:
sleep(1.00);
else:
sleep(2.00);
self.validcfg = True
if (self.r_frequency != None and abs(self.frequency - int(self.r_frequency)) > 100):
RNS.log("Frequency mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.bandwidth != self.r_bandwidth):
RNS.log("Bandwidth mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.txpower != self.r_txpower):
RNS.log("TX power mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.sf != self.r_sf):
RNS.log("Spreading factor mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.state != self.r_state):
RNS.log("Radio state mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.validcfg):
return True
else:
return False
def updateBitrate(self):
try:
self.bitrate = self.r_sf * ( (4.0/self.r_cr) / (math.pow(2,self.r_sf)/(self.r_bandwidth/1000)) ) * 1000
self.bitrate_kbps = round(self.bitrate/1000.0, 2)
RNS.log(str(self)+" On-air bitrate is now "+str(self.bitrate_kbps)+ " kbps", RNS.LOG_VERBOSE)
except:
self.bitrate = 0
def processIncoming(self, data):
self.rxb += len(data)
def af():
self.owner.inbound(data, self)
threading.Thread(target=af, daemon=True).start()
self.r_stat_rssi = None
self.r_stat_snr = None
def processOutgoing(self,data):
datalen = len(data)
if self.online:
if self.interface_ready:
if self.flow_control:
self.interface_ready = False
if data == self.id_callsign:
self.first_tx = None
else:
if self.first_tx == None:
self.first_tx = time.time()
data = KISS.escape(data)
frame = bytes([0xc0])+bytes([0x00])+data+bytes([0xc0])
written = self.write_mux(frame)
self.txb += datalen
if written != len(frame):
raise IOError("Serial interface only wrote "+str(written)+" bytes of "+str(len(data)))
else:
self.queue(data)
def queue(self, data):
self.packet_queue.append(data)
def process_queue(self):
if len(self.packet_queue) > 0:
data = self.packet_queue.pop(0)
self.interface_ready = True
self.processOutgoing(data)
elif len(self.packet_queue) == 0:
self.interface_ready = True
def readLoop(self):
try:
in_frame = False
escape = False
command = KISS.CMD_UNKNOWN
data_buffer = b""
command_buffer = b""
last_read_ms = int(time.time()*1000)
# TODO: Ensure hotplug support for serial drivers
# This should work now with the new time-based
# detect polling.
while (self.serial != None and self.serial.is_open) or (self.bt_manager != None and self.bt_manager.connected):
serial_bytes = self.read_mux()
got = len(serial_bytes)
if got > 0:
self.last_port_io = time.time()
for byte in serial_bytes:
last_read_ms = int(time.time()*1000)
if (in_frame and byte == KISS.FEND and command == KISS.CMD_DATA):
in_frame = False
self.processIncoming(data_buffer)
data_buffer = b""
command_buffer = b""
elif (byte == KISS.FEND):
in_frame = True
command = KISS.CMD_UNKNOWN
data_buffer = b""
command_buffer = b""
elif (in_frame and len(data_buffer) < self.HW_MTU):
if (len(data_buffer) == 0 and command == KISS.CMD_UNKNOWN):
command = byte
elif (command == KISS.CMD_DATA):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
data_buffer = data_buffer+bytes([byte])
elif (command == KISS.CMD_FREQUENCY):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 4):
self.r_frequency = command_buffer[0] << 24 | command_buffer[1] << 16 | command_buffer[2] << 8 | command_buffer[3]
RNS.log(str(self)+" Radio reporting frequency is "+str(self.r_frequency/1000000.0)+" MHz", RNS.LOG_DEBUG)
self.updateBitrate()
elif (command == KISS.CMD_BANDWIDTH):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 4):
self.r_bandwidth = command_buffer[0] << 24 | command_buffer[1] << 16 | command_buffer[2] << 8 | command_buffer[3]
RNS.log(str(self)+" Radio reporting bandwidth is "+str(self.r_bandwidth/1000.0)+" KHz", RNS.LOG_DEBUG)
self.updateBitrate()
elif (command == KISS.CMD_TXPOWER):
self.r_txpower = byte
RNS.log(str(self)+" Radio reporting TX power is "+str(self.r_txpower)+" dBm", RNS.LOG_DEBUG)
elif (command == KISS.CMD_SF):
self.r_sf = byte
RNS.log(str(self)+" Radio reporting spreading factor is "+str(self.r_sf), RNS.LOG_DEBUG)
self.updateBitrate()
elif (command == KISS.CMD_CR):
self.r_cr = byte
RNS.log(str(self)+" Radio reporting coding rate is "+str(self.r_cr), RNS.LOG_DEBUG)
self.updateBitrate()
elif (command == KISS.CMD_RADIO_STATE):
self.r_state = byte
if self.r_state:
RNS.log(str(self)+" Radio reporting state is online", RNS.LOG_DEBUG)
else:
RNS.log(str(self)+" Radio reporting state is offline", RNS.LOG_DEBUG)
elif (command == KISS.CMD_RADIO_LOCK):
self.r_lock = byte
elif (command == KISS.CMD_FW_VERSION):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 2):
self.maj_version = int(command_buffer[0])
self.min_version = int(command_buffer[1])
self.validate_firmware()
elif (command == KISS.CMD_STAT_RX):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 4):
self.r_stat_rx = ord(command_buffer[0]) << 24 | ord(command_buffer[1]) << 16 | ord(command_buffer[2]) << 8 | ord(command_buffer[3])
elif (command == KISS.CMD_STAT_TX):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 4):
self.r_stat_tx = ord(command_buffer[0]) << 24 | ord(command_buffer[1]) << 16 | ord(command_buffer[2]) << 8 | ord(command_buffer[3])
elif (command == KISS.CMD_STAT_RSSI):
self.r_stat_rssi = byte-RNodeInterface.RSSI_OFFSET
elif (command == KISS.CMD_STAT_SNR):
self.r_stat_snr = int.from_bytes(bytes([byte]), byteorder="big", signed=True) * 0.25
elif (command == KISS.CMD_RANDOM):
self.r_random = byte
elif (command == KISS.CMD_PLATFORM):
self.platform = byte
elif (command == KISS.CMD_MCU):
self.mcu = byte
elif (command == KISS.CMD_ERROR):
if (byte == KISS.ERROR_INITRADIO):
RNS.log(str(self)+" hardware initialisation error (code "+RNS.hexrep(byte)+")", RNS.LOG_ERROR)
raise IOError("Radio initialisation failure")
elif (byte == KISS.ERROR_TXFAILED):
RNS.log(str(self)+" hardware TX error (code "+RNS.hexrep(byte)+")", RNS.LOG_ERROR)
raise IOError("Hardware transmit failure")
else:
RNS.log(str(self)+" hardware error (code "+RNS.hexrep(byte)+")", RNS.LOG_ERROR)
raise IOError("Unknown hardware failure")
elif (command == KISS.CMD_RESET):
if (byte == 0xF8):
if self.platform == KISS.PLATFORM_ESP32:
if self.online:
RNS.log("Detected reset while device was online, reinitialising device...", RNS.LOG_ERROR)
raise IOError("ESP32 reset")
elif (command == KISS.CMD_READY):
self.process_queue()
elif (command == KISS.CMD_DETECT):
if byte == KISS.DETECT_RESP:
self.detected = True
else:
self.detected = False
if got == 0:
time_since_last = int(time.time()*1000) - last_read_ms
if len(data_buffer) > 0 and time_since_last > self.timeout:
RNS.log(str(self)+" serial read timeout", RNS.LOG_DEBUG)
data_buffer = b""
in_frame = False
command = KISS.CMD_UNKNOWN
escape = False
if self.id_interval != None and self.id_callsign != None:
if self.first_tx != None:
if time.time() > self.first_tx + self.id_interval:
RNS.log("Interface "+str(self)+" is transmitting beacon data: "+str(self.id_callsign.decode("utf-8")), RNS.LOG_DEBUG)
self.processOutgoing(self.id_callsign)
if (time.time() - self.last_port_io > self.port_io_timeout):
self.detect()
if (time.time() - self.last_port_io > self.port_io_timeout*3):
raise IOError("Connected port for "+str(self)+" became unresponsive")
if self.bt_manager != None:
sleep(0.08)
except Exception as e:
self.online = False
RNS.log("A serial port occurred, the contained exception was: "+str(e), RNS.LOG_ERROR)
RNS.log("The interface "+str(self)+" experienced an unrecoverable error and is now offline.", RNS.LOG_ERROR)
if RNS.Reticulum.panic_on_interface_error:
RNS.panic()
RNS.log("Reticulum will attempt to reconnect the interface periodically.", RNS.LOG_ERROR)
self.online = False
if self.serial != None:
self.serial.close()
if self.bt_manager != None:
self.bt_manager.close()
self.reconnect_port()
def reconnect_port(self):
while not self.online and len(self.hw_errors) == 0:
try:
time.sleep(self.reconnect_w)
if self.serial != None and self.port != None:
RNS.log("Attempting to reconnect serial port "+str(self.port)+" for "+str(self)+"...", RNS.LOG_EXTREME)
if self.bt_manager != None:
RNS.log("Attempting to reconnect Bluetooth device for "+str(self)+"...", RNS.LOG_EXTREME)
self.open_port()
if hasattr(self, "serial") and self.serial != None and self.serial.is_open:
self.configure_device()
if self.online:
if self.last_imagedata != None:
self.display_image(self.last_imagedata)
self.enable_external_framebuffer()
elif hasattr(self, "bt_manager") and self.bt_manager != None and self.bt_manager.connected:
self.configure_device()
if self.online:
if self.last_imagedata != None:
self.display_image(self.last_imagedata)
self.enable_external_framebuffer()
except Exception as e:
RNS.log("Error while reconnecting RNode, the contained exception was: "+str(e), RNS.LOG_ERROR)
if self.online:
RNS.log("Reconnected serial port for "+str(self))
def detach(self):
self.disable_external_framebuffer()
self.setRadioState(KISS.RADIO_STATE_OFF)
self.leave()
def __str__(self):
return "RNodeInterface["+str(self.name)+"]"
|
993,153 | 16c854e2ddf8e252df552e8273a1f757253bf9bf | from sqlalchemy import Column, Integer, String, ForeignKey, DateTime, Float
from sqlalchemy.orm import relationship
import sqlalchemy.dialects.postgresql as postgresql
from sqlalchemy.sql import func
from .orm import Base
import uuid
class User(Base):
__tablename__ = "users"
id = Column(postgresql.UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True)
fullName = Column(String(64), index=True)
email = Column(String(120), unique=True, index=True, nullable=False)
cpf = Column(String(11), unique=True, index=True, nullable=False)
password = Column(String, nullable=False)
purchase = relationship("Purchase", back_populates="owner")
class Purchase(Base):
__tablename__ = "purchase"
id = Column(postgresql.UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True)
cod = Column(String, index=True)
price = Column(Float, index=True)
data = Column(DateTime, default=func.now())
percentCashBack = Column(Integer, index=True)
valueCashBack = Column(Float, index=True)
status = Column(String(12), index=True)
userId = Column(postgresql.UUID(as_uuid=True), ForeignKey("users.id"))
owner = relationship("User", back_populates="purchase")
|
993,154 | e205f1a1921c93f4398e69fa09c65727b6cec58d | import os
from unittest import TestCase
import warnings
import torch
import torch.nn as nn
from mock import patch, Mock, ANY, MagicMock
import torchbearer
from torchbearer.callbacks import TensorBoard, TensorBoardImages, TensorBoardProjector, TensorBoardText
class TestTensorBoard(TestCase):
@patch('tensorboardX.SummaryWriter')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_add_metric_single(self, _, __, writer):
mock_fn = MagicMock()
def fn_test(ex, types):
def fn_test_1(tag, metric, *args, **kwargs):
if type(metric) in types:
raise ex
else:
mock_fn(tag, metric)
return fn_test_1
tb = TensorBoard()
state = {torchbearer.METRICS: {'test': 1, 'test2': [1, 2, 3], 'test3': [[1], [2], [3, 4]]}}
tb.add_metric(fn_test(NotImplementedError, [list]), 'single', state[torchbearer.METRICS]['test'])
self.assertTrue(mock_fn.call_args_list[0][0] == ('single', 1))
@patch('tensorboardX.SummaryWriter')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_add_metric_list(self, _, __, writer):
mock_fn = MagicMock()
def fn_test(ex, types):
def fn_test_1(tag, metric, *args, **kwargs):
if type(metric) in types:
raise ex
else:
mock_fn(tag, metric)
return fn_test_1
tb = TensorBoard()
state = {torchbearer.METRICS: {'test': 1, 'test2': [1, 2, 3], 'test3': [[1], [2], [3, 4]]}}
tb.add_metric(fn_test(NotImplementedError, [list]), 'single', state[torchbearer.METRICS]['test2'])
self.assertTrue(mock_fn.call_args_list[0][0] == ('single_0', 1))
self.assertTrue(mock_fn.call_args_list[1][0] == ('single_1', 2))
self.assertTrue(mock_fn.call_args_list[2][0] == ('single_2', 3))
@patch('tensorboardX.SummaryWriter')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_add_metric_list_of_list(self, _, __, writer):
mock_fn = MagicMock()
def fn_test(ex, types):
def fn_test_1(tag, metric, *args, **kwargs):
if type(metric) in types:
raise ex
else:
mock_fn(tag, metric)
return fn_test_1
tb = TensorBoard()
state = {torchbearer.METRICS: {'test': 1, 'test2': [1, 2, 3], 'test3': [[1], 2, [3, 4]]}}
tb.add_metric(fn_test(NotImplementedError, [list]), 'single', state[torchbearer.METRICS]['test3'])
self.assertTrue(mock_fn.call_args_list[0][0] == ('single_0_0', 1))
self.assertTrue(mock_fn.call_args_list[1][0] == ('single_1', 2))
self.assertTrue(mock_fn.call_args_list[2][0] == ('single_2_0', 3))
self.assertTrue(mock_fn.call_args_list[3][0] == ('single_2_1', 4))
@patch('tensorboardX.SummaryWriter')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_add_metric_dict(self, _, __, writer):
mock_fn = MagicMock()
def fn_test(ex, types):
def fn_test_1(tag, metric, *args, **kwargs):
if type(metric) in types:
raise ex
else:
mock_fn(tag, metric)
return fn_test_1
tb = TensorBoard()
state = {torchbearer.METRICS: {'test': {'key1': 2, 'key2': 3}}}
tb.add_metric(fn_test(NotImplementedError, [list, dict]), 'single', state[torchbearer.METRICS]['test'])
call_args = list(mock_fn.call_args_list)
call_args.sort()
self.assertTrue(call_args[0][0] == ('single_key1', 2))
self.assertTrue(call_args[1][0] == ('single_key2', 3))
@patch('tensorboardX.SummaryWriter')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_add_metric_dict_and_list(self, _, __, writer):
mock_fn = MagicMock()
def fn_test(ex, types):
def fn_test_1(tag, metric, *args, **kwargs):
if type(metric) in types:
raise ex
else:
mock_fn(tag, metric)
return fn_test_1
tb = TensorBoard()
state = {torchbearer.METRICS: {'test': {'key1': 2, 'key2': [3, 4]}}}
tb.add_metric(fn_test(NotImplementedError, [list, dict]), 'single', state[torchbearer.METRICS]['test'])
call_args = list(mock_fn.call_args_list)
call_args.sort()
self.assertTrue(call_args[0][0] == ('single_key1', 2))
self.assertTrue(call_args[1][0] == ('single_key2_0', 3))
self.assertTrue(call_args[2][0] == ('single_key2_1', 4))
@patch('tensorboardX.SummaryWriter')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_add_metric_fail_iterable(self, _, __, writer):
mock_fn = MagicMock()
def fn_test(ex, types):
def fn_test_1(tag, metric, *args, **kwargs):
if type(metric) in types:
raise ex
else:
mock_fn(tag, metric)
return fn_test_1
tb = TensorBoard()
state = {torchbearer.METRICS: {'test': 0.1}}
with warnings.catch_warnings(record=True) as w:
tb.add_metric(fn_test(NotImplementedError, [list, dict, float]), 'single', state[torchbearer.METRICS]['test'])
self.assertTrue(len(w) == 1)
call_args = list(mock_fn.call_args_list)
call_args.sort()
self.assertTrue(len(call_args) == 0)
@patch('tensorboardX.SummaryWriter')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_add_metric_fail(self, _, __, writer):
mock_fn = MagicMock()
def fn_test(ex, types):
def fn_test_1(tag, metric, *args, **kwargs):
if type(metric) in types:
raise ex
else:
mock_fn(tag, metric)
return fn_test_1
tb = TensorBoard()
state = {torchbearer.METRICS: {'test': 0.1}}
with warnings.catch_warnings(record=True) as w:
tb.add_metric(fn_test(Exception, [float]), 'single', state[torchbearer.METRICS]['test'])
self.assertTrue(len(w) == 1)
call_args = list(mock_fn.call_args_list)
call_args.sort()
self.assertTrue(len(call_args) == 0)
@patch('tensorboardX.SummaryWriter')
@patch('visdom.Visdom')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_get_writer_oserror(self, mockdirs, isdir, _, __):
from torchbearer.callbacks.tensor_board import get_writer
import sys
isdir.return_value = True
mockdirs.side_effect = OSError
self.assertRaises(OSError, lambda: get_writer('test', 'nothing', visdom=True))
if sys.version_info[0] >= 3:
mockdirs.assert_called_once_with('test', exist_ok=True)
else:
mockdirs.assert_called_once_with('test')
@patch('tensorboardX.SummaryWriter')
@patch('visdom.Visdom')
@patch('torchbearer.callbacks.tensor_board.os.path.isdir')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
def test_get_writer_oserror_eexist(self, mockdirs, isdir, _, __):
from torchbearer.callbacks.tensor_board import get_writer
import sys
import errno
class MyError(OSError):
def __init__(self):
self.errno = errno.EEXIST
isdir.return_value = True
mockdirs.side_effect = MyError
get_writer('test', 'nothing', visdom=True)
if sys.version_info[0] >= 3:
mockdirs.assert_called_once_with('test', exist_ok=True)
else:
mockdirs.assert_called_once_with('test')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_log_dir(self, mock_board, _):
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoard(write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_end(state)
mock_board.assert_called_once_with(log_dir=os.path.join('./logs', 'Sequential_torchbearer'))
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_log_dir_visdom(self, mock_visdom, mock_writer, _):
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
mock_writer.__delete__ = Mock()
tboard = TensorBoard(visdom=True, write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_end(state)
self.assertEqual(mock_visdom.call_count, 1)
self.assertTrue(mock_visdom.call_args[1]['log_to_filename'] == os.path.join('./logs', 'Sequential_torchbearer',
'log.log'))
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_batch_log_dir(self, mock_board, _):
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0}
tboard = TensorBoard(write_batch_metrics=True, write_graph=False, write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_end_epoch(state)
tboard.on_end(state)
mock_board.assert_called_with(log_dir=os.path.join('./logs', 'Sequential_torchbearer', 'epoch-0'))
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_batch_log_dir_visdom(self, mock_visdom, mock_writer, _):
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}, torchbearer.BATCH: 0}
tboard = TensorBoard(visdom=True, write_batch_metrics=True, write_graph=False, write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_end_epoch(state)
tboard.on_end(state)
self.assertTrue(mock_visdom.call_args[1]['log_to_filename'] == os.path.join('./logs', 'Sequential_torchbearer', 'epoch', 'log.log'))
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
@patch('torch.rand')
def test_write_graph(self, mock_rand, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_graph = Mock()
mock_rand.return_value = 1
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.X: torch.zeros(1, 1, 9, 9)}
tboard = TensorBoard(write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_sample(state)
tboard.on_end(state)
mock_rand.assert_called_once_with(state[torchbearer.X].size(), requires_grad=False)
self.assertEqual(mock_board.return_value.add_graph.call_count, 1)
self.assertEqual(str(state[torchbearer.MODEL]), str(mock_board.return_value.add_graph.call_args_list[0][0][0]))
self.assertNotEqual(state[torchbearer.MODEL], mock_board.return_value.add_graph.call_args_list[0][0][0])
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_writer_closed_on_end(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.close = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoard(write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_end({})
self.assertEqual(mock_board.return_value.close.call_count, 1)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_writer_closed_on_end_visdom(self, mock_visdom, mock_writer, _):
mock_writer.return_value = Mock()
mock_writer.return_value.close = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoard(visdom=True, write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_end({})
self.assertEqual(mock_writer.return_value.close.call_count, 1)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_batch_writer_closed_on_end_epoch(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.close = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0}
tboard = TensorBoard(write_batch_metrics=True, write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_end_epoch({})
self.assertEqual(mock_board.return_value.close.call_count, 1)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_batch_writer_closed_on_end_epoch_visdom(self, mock_visdom, mock_writer, _):
mock_writer.return_value = Mock()
mock_writer.return_value.close = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0}
tboard = TensorBoard(visdom=True, write_batch_metrics=True, write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_end_epoch({})
tboard.on_end(state)
self.assertTrue(mock_writer.return_value.close.call_count == 2)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_batch_metrics(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_scalar = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}, torchbearer.BATCH: 0}
tboard = TensorBoard(write_batch_metrics=True, write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_step_training(state)
mock_board.return_value.add_scalar.assert_called_once_with('batch/test', 1, 0)
mock_board.return_value.add_scalar.reset_mock()
tboard.on_step_validation(state)
mock_board.return_value.add_scalar.assert_called_once_with('batch/test', 1, 0)
tboard.on_end_epoch(state)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_batch_metrics_visdom(self, mock_visdom, mock_writer, _):
mock_writer.return_value = Mock()
mock_writer.return_value.add_scalar = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}, torchbearer.BATCH: 0, torchbearer.TRAIN_STEPS: 0}
tboard = TensorBoard(visdom=True, write_batch_metrics=True, write_epoch_metrics=False)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_step_training(state)
mock_writer.return_value.add_scalar.assert_called_once_with('test', 1, 0, main_tag='batch')
mock_writer.return_value.add_scalar.reset_mock()
tboard.on_step_validation(state)
mock_writer.return_value.add_scalar.assert_called_once_with('test', 1, 0, main_tag='batch')
tboard.on_end_epoch(state)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_epoch_metrics(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_scalar = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0,
torchbearer.METRICS: {'test': 1}}
tboard = TensorBoard(write_batch_metrics=False, write_epoch_metrics=True)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_end_epoch(state)
mock_board.return_value.add_scalar.assert_called_once_with('epoch/test', 1, 0)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_epoch_metrics_visdom(self, mock_visdom, mock_writer, _):
mock_writer.return_value = Mock()
mock_writer.return_value.add_scalar = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0,
torchbearer.METRICS: {'test': 1}}
tboard = TensorBoard(visdom=True, write_batch_metrics=False, write_epoch_metrics=True)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_end_epoch(state)
mock_writer.return_value.add_scalar.assert_called_once_with('test', 1, 0, main_tag='epoch')
tboard.on_end(state)
class TestTensorBoardImages(TestCase):
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_log_dir(self, mock_board, _):
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(log_dir='./test', comment='torchbearer')
tboard.on_start(state)
tboard.on_end(state)
mock_board.assert_called_once_with(log_dir=os.path.join('./test', 'Sequential_torchbearer'))
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_log_dir_visdom(self, mock_visdom, mock_writer, _):
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
mock_writer.__delete__ = Mock()
tboard = TensorBoardImages(visdom=True, log_dir='./test', comment='torchbearer')
tboard.on_start(state)
tboard.on_end(state)
self.assertEqual(mock_visdom.call_count, 1)
self.assertTrue(mock_visdom.call_args[1]['log_to_filename'] == os.path.join('./test', 'Sequential_torchbearer',
'log.log'))
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_writer_closed_on_end(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.close = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages()
tboard.on_start(state)
tboard.on_end({})
self.assertEqual(mock_board.return_value.close.call_count, 1)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_writer_closed_on_end_visdom_visdom(self, mock_visdom, mock_writer, _):
mock_writer.return_value = Mock()
mock_writer.return_value.close = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoard(visdom=True)
tboard.on_start(state)
tboard.on_end({})
self.assertEqual(mock_writer.return_value.close.call_count, 1)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('torchvision.utils.make_grid')
@patch('tensorboardX.SummaryWriter')
def test_simple_case(self, mock_board, mock_grid, _):
mock_board.return_value = Mock()
mock_board.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(name='test', key='x', write_each_epoch=False, num_images=18, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_board.return_value.add_image.assert_called_once_with('test', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == state['x'].size())
tboard.on_end({})
@patch('torchvision.utils.make_grid')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_simple_case_visdom(self, mock_visdom, mock_writer, _, mock_grid):
mock_writer.return_value = Mock()
mock_writer.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(visdom=True, name='test', key='x', write_each_epoch=False, num_images=18, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_writer.return_value.add_image.assert_called_once_with('test1', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == state['x'].size())
tboard.on_end({})
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('torchvision.utils.make_grid')
@patch('tensorboardX.SummaryWriter')
def test_multi_batch(self, mock_board, mock_grid, _):
mock_board.return_value = Mock()
mock_board.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(name='test', key='x', write_each_epoch=False, num_images=36, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_board.return_value.add_image.assert_called_once_with('test', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == torch.ones(36, 3, 10, 10).size())
tboard.on_end({})
@patch('torchvision.utils.make_grid')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_multi_batch_visdom(self, mock_visdom, mock_writer, _, mock_grid):
mock_writer.return_value = Mock()
mock_writer.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(visdom=True, name='test', key='x', write_each_epoch=False, num_images=36, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_writer.return_value.add_image.assert_called_once_with('test1', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == torch.ones(36, 3, 10, 10).size())
tboard.on_end({})
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('torchvision.utils.make_grid')
@patch('tensorboardX.SummaryWriter')
def test_multi_epoch(self, mock_board, mock_grid, _):
mock_board.return_value = Mock()
mock_board.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(name='test', key='x', write_each_epoch=True, num_images=36, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
tboard.on_end_epoch(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_board.return_value.add_image.assert_called_once_with('test', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == torch.ones(36, 3, 10, 10).size())
tboard.on_end({})
@patch('torchvision.utils.make_grid')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_multi_epoch_visdom(self, mock_visdom, mock_writer, _, mock_grid):
mock_writer.return_value = Mock()
mock_writer.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(visdom=True, name='test', key='x', write_each_epoch=True, num_images=36, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
tboard.on_end_epoch(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_writer.return_value.add_image.assert_called_once_with('test1', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == torch.ones(36, 3, 10, 10).size())
tboard.on_end({})
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('torchvision.utils.make_grid')
@patch('tensorboardX.SummaryWriter')
def test_single_channel(self, mock_board, mock_grid, _):
mock_board.return_value = Mock()
mock_board.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(name='test', key='x', write_each_epoch=True, num_images=18, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_board.return_value.add_image.assert_called_once_with('test', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == torch.ones(18, 1, 10, 10).size())
tboard.on_end({})
@patch('torchvision.utils.make_grid')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_single_channel_visdom(self, mock_visdom, mock_writer, _, mock_grid):
mock_writer.return_value = Mock()
mock_writer.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(visdom=True, name='test', key='x', write_each_epoch=True, num_images=18, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_writer.return_value.add_image.assert_called_once_with('test1', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == torch.ones(18, 1, 10, 10).size())
tboard.on_end({})
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('torchvision.utils.make_grid')
@patch('tensorboardX.SummaryWriter')
def test_odd_batches(self, mock_board, mock_grid, _):
mock_board.return_value = Mock()
mock_board.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(name='test', key='x', write_each_epoch=True, num_images=40, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
tboard.on_step_validation(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_board.return_value.add_image.assert_called_once_with('test', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == torch.ones(40, 3, 10, 10).size())
tboard.on_end({})
@patch('torchvision.utils.make_grid')
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_odd_batches_visdom(self, mock_visdom, mock_writer, _, mock_grid):
mock_writer.return_value = Mock()
mock_writer.return_value.add_image = Mock()
mock_grid.return_value = 10
state = {'x': torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 1,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardImages(visdom=True, name='test', key='x', write_each_epoch=True, num_images=40, nrow=9, padding=3,
normalize=True, norm_range='tmp', scale_each=True, pad_value=1)
tboard.on_start(state)
tboard.on_step_validation(state)
tboard.on_step_validation(state)
tboard.on_step_validation(state)
mock_grid.assert_called_once_with(ANY, nrow=9, padding=3, normalize=True, range='tmp', scale_each=True,
pad_value=1)
mock_writer.return_value.add_image.assert_called_once_with('test1', 10, 1)
self.assertTrue(mock_grid.call_args[0][0].size() == torch.ones(40, 3, 10, 10).size())
tboard.on_end({})
class TestTensorBoardProjector(TestCase):
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_log_dir(self, mock_board, _):
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardProjector(log_dir='./test', comment='torchbearer')
tboard.on_start(state)
tboard.on_end(state)
mock_board.assert_called_once_with(log_dir=os.path.join('./test', 'Sequential_torchbearer'))
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_writer_closed_on_end(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.close = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
tboard = TensorBoardProjector()
tboard.on_start(state)
tboard.on_end({})
self.assertEqual(mock_board.return_value.close.call_count, 1)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_simple_case(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_embedding = Mock()
state = {torchbearer.X: torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 0,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.Y_TRUE: torch.ones(18),
torchbearer.BATCH: 0}
tboard = TensorBoardProjector(num_images=18, avg_data_channels=False, write_data=False,
features_key=torchbearer.Y_TRUE)
tboard.on_start(state)
tboard.on_step_validation(state)
mock_board.return_value.add_embedding.assert_called_once_with(ANY, metadata=ANY, label_img=ANY, tag='features',
global_step=0)
self.assertTrue(
mock_board.return_value.add_embedding.call_args[0][0].size() == state[torchbearer.Y_TRUE].unsqueeze(
1).size())
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['metadata'].size() == state[torchbearer.Y_TRUE].size())
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['label_img'].size() == state[torchbearer.X].size())
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_multi_epoch(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_embedding = Mock()
state = {torchbearer.X: torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 0,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.Y_TRUE: torch.ones(18),
torchbearer.BATCH: 0}
tboard = TensorBoardProjector(num_images=18, avg_data_channels=False, write_data=False,
features_key=torchbearer.Y_TRUE)
tboard.on_start(state)
tboard.on_step_validation(state)
mock_board.return_value.add_embedding.assert_called_once_with(ANY, metadata=ANY, label_img=ANY, tag='features',
global_step=0)
self.assertTrue(
mock_board.return_value.add_embedding.call_args[0][0].size() == state[torchbearer.Y_TRUE].unsqueeze(
1).size())
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['metadata'].size() == state[torchbearer.Y_TRUE].size())
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['label_img'].size() == state[torchbearer.X].size())
tboard.on_end_epoch({})
mock_board.return_value.add_embedding.reset_mock()
tboard.on_step_validation(state)
mock_board.return_value.add_embedding.assert_called_once_with(ANY, metadata=ANY, label_img=ANY, tag='features',
global_step=0)
self.assertTrue(
mock_board.return_value.add_embedding.call_args[0][0].size() == state[torchbearer.Y_TRUE].unsqueeze(
1).size())
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['metadata'].size() == state[torchbearer.Y_TRUE].size())
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['label_img'].size() == state[torchbearer.X].size())
tboard.on_end({})
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_multi_batch(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_embedding = Mock()
state = {torchbearer.X: torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 0,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.Y_TRUE: torch.ones(18),
torchbearer.BATCH: 0}
tboard = TensorBoardProjector(num_images=45, avg_data_channels=False, write_data=False,
features_key=torchbearer.Y_TRUE)
tboard.on_start(state)
for i in range(3):
state[torchbearer.BATCH] = i
tboard.on_step_validation(state)
mock_board.return_value.add_embedding.assert_called_once_with(ANY, metadata=ANY, label_img=ANY, tag='features',
global_step=0)
self.assertTrue(mock_board.return_value.add_embedding.call_args[0][0].size() == torch.Size([45, 1]))
self.assertTrue(mock_board.return_value.add_embedding.call_args[1]['metadata'].size() == torch.Size([45]))
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['label_img'].size() == torch.Size([45, 3, 10, 10]))
tboard.on_end({})
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_multi_batch_data(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_embedding = Mock()
state = {torchbearer.X: torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 0,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.Y_TRUE: torch.ones(18),
torchbearer.BATCH: 0}
tboard = TensorBoardProjector(num_images=45, avg_data_channels=False, write_data=True, write_features=False)
tboard.on_start(state)
for i in range(3):
state[torchbearer.BATCH] = i
tboard.on_step_validation(state)
mock_board.return_value.add_embedding.assert_called_once_with(ANY, metadata=ANY, label_img=ANY, tag='data',
global_step=-1)
self.assertTrue(mock_board.return_value.add_embedding.call_args[0][0].size() == torch.Size([45, 300]))
self.assertTrue(mock_board.return_value.add_embedding.call_args[1]['metadata'].size() == torch.Size([45]))
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['label_img'].size() == torch.Size([45, 3, 10, 10]))
tboard.on_end({})
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_channel_average(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_embedding = Mock()
state = {torchbearer.X: torch.ones(18, 3, 10, 10), torchbearer.EPOCH: 0,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.Y_TRUE: torch.ones(18),
torchbearer.BATCH: 0}
tboard = TensorBoardProjector(num_images=18, avg_data_channels=True, write_data=True, write_features=False)
tboard.on_start(state)
tboard.on_step_validation(state)
mock_board.return_value.add_embedding.assert_called_once_with(ANY, metadata=ANY, label_img=ANY, tag='data',
global_step=-1)
self.assertTrue(mock_board.return_value.add_embedding.call_args[0][0].size() == torch.Size([18, 100]))
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['metadata'].size() == state[torchbearer.Y_TRUE].size())
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['label_img'].size() == state[torchbearer.X].size())
tboard.on_end({})
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_no_channels(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_embedding = Mock()
state = {torchbearer.X: torch.ones(18, 10, 10), torchbearer.EPOCH: 0,
torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.Y_TRUE: torch.ones(18),
torchbearer.BATCH: 0}
tboard = TensorBoardProjector(num_images=18, avg_data_channels=False, write_data=True, write_features=False)
tboard.on_start(state)
tboard.on_step_validation(state)
mock_board.return_value.add_embedding.assert_called_once_with(ANY, metadata=ANY, label_img=ANY, tag='data',
global_step=-1)
self.assertTrue(mock_board.return_value.add_embedding.call_args[0][0].size() == torch.Size([18, 100]))
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['metadata'].size() == state[torchbearer.Y_TRUE].size())
self.assertTrue(
mock_board.return_value.add_embedding.call_args[1]['label_img'].size() == torch.Size([18, 1, 10, 10]))
tboard.on_end({})
class TestTensorbardText(TestCase):
def test_table_formatter_one_metric(self):
tf = TensorBoardText.table_formatter
metrics = str({'test_metric_1': 1})
table = tf(metrics).replace(" ", "")
correct_table = '<table><th>Metric</th><th>Value</th><tr><td>test_metric_1</td><td>1</td></tr></table>'
self.assertEqual(table, correct_table)
def test_table_formatter_two_metrics(self):
tf = TensorBoardText.table_formatter
metrics = str({'test_metric_1': 1, 'test_metric_2': 2})
table = tf(metrics).replace(" ", "")
self.assertIn('<tr><td>test_metric_1</td><td>1</td></tr>', table)
self.assertIn('<tr><td>test_metric_2</td><td>2</td></tr>', table)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_epoch_writer(self, mock_writer, _):
tboard = TensorBoardText(log_trial_summary=False)
metrics = {'test_metric_1': 1, 'test_metric_2': 1}
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 1, torchbearer.METRICS: metrics}
metric_string = TensorBoardText.table_formatter(str(metrics))
tboard.on_start(state)
tboard.on_start_training(state)
tboard.on_start_epoch(state)
tboard.on_end_epoch(state)
mock_writer.return_value.add_text.assert_called_once_with('epoch', metric_string, 1)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_epoch_writer_visdom(self, mock_visdom, mock_writer, _):
tboard = TensorBoardText(visdom=True, log_trial_summary=False)
metrics = {'test_metric_1': 1, 'test_metric_2': 1}
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 1, torchbearer.METRICS: metrics}
metric_string = TensorBoardText.table_formatter(str(metrics))
tboard.on_start(state)
tboard.on_start_training(state)
tboard.on_start_epoch(state)
tboard.on_end_epoch(state)
mock_writer.return_value.add_text.assert_called_once_with('epoch', '<h4>Epoch 1</h4>'+metric_string, 1)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_batch_writer(self, mock_writer, _):
tboard = TensorBoardText(write_epoch_metrics=False, write_batch_metrics=True, log_trial_summary=False)
metrics = {'test_metric_1': 1, 'test_metric_2': 1}
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 1, torchbearer.BATCH: 100, torchbearer.METRICS: metrics}
metric_string = TensorBoardText.table_formatter(str(metrics))
tboard.on_start(state)
tboard.on_start_training(state)
tboard.on_start_epoch(state)
tboard.on_step_training(state)
mock_writer.return_value.add_text.assert_called_once_with('batch', metric_string, 100)
tboard.on_end_epoch(state)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_batch_writer_visdom(self, mock_visdom, mock_writer, _):
tboard = TensorBoardText(visdom=True, write_epoch_metrics=False, write_batch_metrics=True, log_trial_summary=False)
metrics = {'test_metric_1': 1, 'test_metric_2': 1}
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 1, torchbearer.BATCH: 100, torchbearer.METRICS: metrics}
metric_string = TensorBoardText.table_formatter(str(metrics))
metric_string = '<h3>Epoch {} - Batch {}</h3>'.format(state[torchbearer.EPOCH], state[torchbearer.BATCH])+metric_string
tboard.on_start(state)
tboard.on_start_training(state)
tboard.on_start_epoch(state)
tboard.on_step_training(state)
mock_writer.return_value.add_text.assert_called_once_with('batch', metric_string, 1)
tboard.on_end_epoch(state)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_batch_metrics(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_text = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}, torchbearer.BATCH: 0}
tboard = TensorBoardText(write_batch_metrics=True, write_epoch_metrics=False, log_trial_summary=False)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_step_training(state)
mock_board.return_value.add_text.assert_called_once_with('batch', TensorBoardText.table_formatter(str(state[torchbearer.METRICS])), 0)
mock_board.return_value.add_text.reset_mock()
tboard.on_end_epoch(state)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.torchvis.VisdomWriter')
@patch('visdom.Visdom')
def test_batch_metrics_visdom(self, mock_visdom, mock_writer, _):
mock_writer.return_value = Mock()
mock_writer.return_value.add_text = Mock()
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}, torchbearer.BATCH: 0, torchbearer.TRAIN_STEPS: 0}
tboard = TensorBoardText(visdom=True, write_batch_metrics=True, write_epoch_metrics=False, log_trial_summary=False)
tboard.on_start(state)
tboard.on_start_epoch(state)
tboard.on_step_training(state)
mock_writer.return_value.add_text.assert_called_once_with('batch', '<h3>Epoch {} - Batch {}</h3>'.format(state[torchbearer.EPOCH], state[torchbearer.BATCH])+TensorBoardText.table_formatter(str(state[torchbearer.METRICS])), 1)
mock_writer.return_value.add_text.reset_mock()
tboard.on_step_validation(state)
tboard.on_end(state)
@patch('torchbearer.callbacks.tensor_board.os.makedirs')
@patch('tensorboardX.SummaryWriter')
def test_log_summary(self, mock_board, _):
mock_board.return_value = Mock()
mock_board.return_value.add_text = Mock()
mock_self = 'test'
state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}, torchbearer.BATCH: 0, torchbearer.SELF: mock_self}
tboard = TensorBoardText(write_batch_metrics=False, write_epoch_metrics=False, log_trial_summary=True)
tboard.on_start(state)
self.assertEqual(mock_board.return_value.add_text.call_args[0][0], 'trial')
self.assertEqual(mock_board.return_value.add_text.call_args[0][1], str(mock_self))
|
993,155 | c8c3d1ff0833808c3e006099e1c49a2902adea9a | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.urls import path
urlpatterns = []
|
993,156 | 35b8ddf13193f62e08ecce08fda73cb9aebf06df |
# Question 28
# Question:
# Define a function that can receive two integer numbers in string form and compute
# their sum and then print it in console.
def sum_of_two_number_string (str1,str2):
return int(str1)+int(str2)
print(sum_of_two_number_string("324","34243"))
def concatenate_two_strings(s1,s2):
return (f'{s1}{s2}')
con_two= lambda s1,s2: (f'{s1}{s2}')
print(concatenate_two_strings('ala',"ola"))
print(con_two('tak','zupa'))
# Question:
# Define a function that can accept two strings as input and print the string with maximum length in console.
# If two strings have the same length, then the function should print all strings line by line.
# longer_string= lambda s1,s2: s1 if len(s1)>
def longer_str (s1,s2):
if len(s1)>len(s2):
return s1
elif len(s1)<len(s2):
return s2
else:
return s1,s2
print (longer_str('aaa','bbb'))
|
993,157 | a358271c86077ce8cd6bfae3fb1b3b85d481ea3e | ans = 0
loss = 0
for i in range(5):
x = int(input())
if x%10 != 0:
loss = max(loss, 10-x%10)
ans += (x+9)//10*10
print(ans - loss)
|
993,158 | 80fe5b6ae687ead6ebd2f5c2b71742f4fc72684c | """
https://community.topcoder.com/stat?c=problem_statement&pm=2235&rd=5070
https://www.topcoder.com/community/data-science/data-science-tutorials/greedy-is-good/
"""
def GoldMine(mines, miners):
# sanitize input
t = []
for mine in mines:
t.append([int(i) / 100 for i in mine.split(', ')])
mines = t
# Construct value table
mines_value = []
for mine in mines:
mine_value = []
miners_used = 0
while miners_used <= miners:
total = 0
for ore, prob in enumerate(mine):
if miners_used < ore:
total += 60 * miners_used * prob
elif miners_used == ore:
total += 50 * miners_used * prob
else:
total += (50 * ore - 20 * (miners_used - ore)) * prob
mine_value.append(total)
miners_used += 1
mines_value.append(mine_value)
miner = 1
miner_distribution = [0] * len(mines)
while miner <= miners:
best_value = float('-inf')
to_update = None
for i, mine_value in enumerate(mines_value):
increase_in_value = mine_value[miner_distribution[i] + 1] - mine_value[miner_distribution[i]]
if increase_in_value > best_value:
best_value = increase_in_value
to_update = i
miner_distribution[to_update] += 1
miner += 1
return miner_distribution
test_mines = ["000, 030, 030, 040, 000, 000, 000",
"020, 020, 020, 010, 010, 010, 010"]
test_miners = 4
result = GoldMine(test_mines, test_miners)
solution = [2, 2]
print('Test case result: ', result == solution)
test_mines = ["026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004"]
test_miners = 56
result = GoldMine(test_mines, test_miners)
solution = [2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
print('Test case result: ', result == solution)
test_mines = ["100, 000, 000, 000, 000, 000, 000",
"090, 010, 000, 000, 000, 000, 000",
"080, 020, 000, 000, 000, 000, 000",
"075, 025, 000, 000, 000, 000, 000",
"050, 050, 000, 000, 000, 000, 000",
"025, 075, 000, 000, 000, 000, 000",
"020, 080, 000, 000, 000, 000, 000",
"010, 090, 000, 000, 000, 000, 000",
"000, 100, 000, 000, 000, 000, 000",
"000, 090, 010, 000, 000, 000, 000",
"000, 080, 020, 000, 000, 000, 000",
"000, 075, 025, 000, 000, 000, 000",
"000, 050, 050, 000, 000, 000, 000",
"000, 025, 075, 000, 000, 000, 000",
"000, 020, 080, 000, 000, 000, 000",
"000, 010, 090, 000, 000, 000, 000",
"000, 000, 100, 000, 000, 000, 000",
"000, 000, 090, 010, 000, 000, 000",
"000, 000, 080, 020, 000, 000, 000",
"000, 000, 075, 025, 000, 000, 000",
"000, 000, 050, 050, 000, 000, 000",
"000, 000, 025, 075, 000, 000, 000",
"000, 000, 020, 080, 000, 000, 000",
"000, 000, 010, 090, 000, 000, 000",
"000, 000, 000, 100, 000, 000, 000",
"000, 000, 000, 100, 000, 000, 000",
"000, 000, 000, 090, 010, 000, 000",
"000, 000, 000, 080, 020, 000, 000",
"000, 000, 000, 075, 025, 000, 000",
"000, 000, 000, 050, 050, 000, 000",
"000, 000, 000, 025, 075, 000, 000",
"000, 000, 000, 020, 080, 000, 000",
"000, 000, 000, 010, 090, 000, 000",
"000, 000, 000, 000, 100, 000, 000",
"000, 000, 000, 000, 090, 010, 000",
"000, 000, 000, 000, 080, 020, 000",
"000, 000, 000, 000, 075, 025, 000",
"000, 000, 000, 000, 050, 050, 000",
"000, 000, 000, 000, 025, 075, 000",
"000, 000, 000, 000, 020, 080, 000",
"000, 000, 000, 000, 010, 090, 000",
"000, 000, 000, 000, 000, 100, 000",
"000, 000, 000, 000, 000, 090, 010",
"000, 000, 000, 000, 000, 080, 020",
"000, 000, 000, 000, 000, 075, 025",
"000, 000, 000, 000, 000, 050, 050",
"000, 000, 000, 000, 000, 025, 075",
"000, 000, 000, 000, 000, 020, 080",
"000, 000, 000, 000, 000, 010, 090",
"000, 000, 000, 000, 000, 000, 100"]
test_miners = 150
result = GoldMine(test_mines, test_miners)
solution = [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6]
print('Test case result: ', result == solution)
|
993,159 | 37014af3faa29f539078f795415c0ad13c786521 | import sys
from PyQt5 import QtWidgets, QtCore, QtGui
from gui.RegisterPopup import Ui_Register
from gui.controllers.message_popup import MessagePopup
from gui.helperfunctions.helpers import combine_into_class
from server import Database
class RegisterPopup(QtWidgets.QDialog, Ui_Register):
def __init__(self, *args, obj=None, **kwargs):
super(RegisterPopup, self).__init__(*args, **kwargs)
self.setupUi(self)
rx = QtCore.QRegExp("[0-9]{8}")
validator = QtGui.QRegExpValidator(rx)
self.IDInput.setValidator(validator)
rx = QtCore.QRegExp("[0-9]{14}")
validator = QtGui.QRegExpValidator(rx)
self.barcodeInput.setValidator(validator)
rx = QtCore.QRegExp("[-a-zA-Z ]{25}")
validator = QtGui.QRegExpValidator(rx)
self.lastnameInput.setValidator(validator)
self.firstnameInput.setValidator(validator)
rx = QtCore.QRegExp("^[a-z0-9+_.-]+@[a-z0-9.-]+$")
validator = QtGui.QRegExpValidator(rx)
self.emailInput.setValidator(validator)
rx = QtCore.QRegExp("[-a-zA-Z0-9 ]+")
validator = QtGui.QRegExpValidator(rx)
self.classListComboBox.setValidator(validator)
self.classes = []
self.class_set = set()
self.popup = None
self.init_ui()
def init_ui(self):
class_list = Database.get_all_class_names()
formatted_class_list = []
for a_class in class_list:
formatted_class_list.append(combine_into_class(a_class["subject"], a_class["catalog"], a_class["section"]))
self.classes.append(a_class)
self.classListComboBox.addItems(formatted_class_list)
completer = QtWidgets.QCompleter(formatted_class_list)
completer.setFilterMode(QtCore.Qt.MatchContains)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.classListComboBox.setCompleter(completer)
self.classListComboBox.setCurrentIndex(-1)
self.classListComboBox.currentIndexChanged.connect(self.add_class_to_table)
self.classTable.itemDoubleClicked.connect(self.table_item_double_clicked)
def table_item_double_clicked(self):
selected_class = self.classTable.selectedItems()
tup = (selected_class[0].text(), selected_class[1].text(), selected_class[2].text())
self.class_set.remove(tup)
self.classTable.removeRow(self.classTable.currentRow())
def add_class_to_table(self):
if self.classListComboBox.currentIndex() < 0 or self.classListComboBox.currentIndex() >= len(self.classes):
return
curr_class = self.classes[self.classListComboBox.currentIndex()]
new_entry = (curr_class["subject"], curr_class["catalog"], curr_class["section"])
if new_entry not in self.class_set:
self.class_set.add(new_entry)
self.classTable.clearContents()
self.classTable.setRowCount(len(self.class_set))
count = 0
for a_class in self.class_set:
self.classTable.setItem(count, 0, QtWidgets.QTableWidgetItem(a_class[0]))
self.classTable.setItem(count, 1, QtWidgets.QTableWidgetItem(a_class[1]))
self.classTable.setItem(count, 2, QtWidgets.QTableWidgetItem(a_class[2]))
count += 1
# need to create custom sort
self.classTable.sortItems(0, QtCore.Qt.AscendingOrder)
def take_id(self, student_num):
if len(student_num) == 8:
self.IDInput.setText(student_num)
self.barcodeInput.setFocus()
else:
self.barcodeInput.setText(student_num)
self.IDInput.setFocus()
def accept(self) -> None:
self.popup = MessagePopup()
if self.IDInput.text() == "" or (self.IDInput.text() == "" and self.barcodeInput.text() == ""):
message = "Enter valid ID (8 digits) or barcode (14 digits)"
self.popup.show_message(message)
self.IDInput.setFocus()
return
if self.firstnameInput.text() == "" or self.lastnameInput.text() == "":
message = "Enter first name and last name"
self.popup.show_message(message)
self.firstnameInput.setFocus()
return
if self.classTable.rowCount() == 0:
message = "Select a class"
self.popup.show_message(message)
self.classListComboBox.setFocus()
return
success = Database.add_student(self.IDInput.text(), self.firstnameInput.text(), self.lastnameInput.text(),
self.barcodeInput.text(), self.emailInput.text())
message = ""
if success:
count = 0
while count < self.classTable.rowCount():
self.classTable.selectRow(count)
selected_class = self.classTable.selectedItems()
result = Database.register_student(self.IDInput.text(), self.firstnameInput.text(),
self.lastnameInput.text(), selected_class[0].text(),
selected_class[1].text(), selected_class[2].text())
a_class = combine_into_class(selected_class[0].text(), selected_class[1].text(),
selected_class[2].text())
if type(result) is tuple and result == (True, True):
message += "Student was successfully registered for " + a_class + "\n"
count += 1
elif type(result) is tuple and result == (True, False):
message += "Student is already registered for " + a_class + "\n"
elif type(result) is not tuple and result == False:
message += "The class " + a_class + " does not exist\n"
else:
message = "Student could not be added to system because student is already in the system"
print(message)
self.popup.show_message(message)
self.close()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = RegisterPopup()
window.show()
app.exec()
|
993,160 | 4221a91c4cd8500535e4ec33dec099161df507e8 | class VoucherService(object):
"""
:class:`fortnox.VoucherService` is used by :class:`fortnox.Client` to make
actions related to Voucher resource.
Normally you won't instantiate this class directly.
"""
"""
Allowed attributes for Voucher to send to Fortnox backend servers.
"""
OPTS_KEYS_TO_PERSIST = ['Description', 'VoucherSeries', 'TransactionDate', 'VoucherRows']
"""
VoucherRows has the following structures:
"VoucherRows": [
{
"Description": "Företagskonto / checkkonto / affärskonto",
"Debit": "1500",
"Account": "1930",
"Credit": "0"
},
{
"Description": "Kassa",
"Debit": "0",
"Account": "1910",
"Credit": "1500"
},
.................
]
"""
SERVICE = "Voucher"
def __init__(self, http_client):
"""
:param :class:`fortnox.HttpClient` http_client: Pre configured high-level http client.
"""
self.__http_client = http_client
@property
def http_client(self):
return self.__http_client
def list(self, **params):
"""
Retrieve all Voucher
Returns all Voucher available to the Company, according to the parameters provided
:calls: ``get /vouchers``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Voucher.
:rtype: list
"""
_, _, vouchers = self.http_client.get("/vouchers", params=params)
return vouchers
def retrieve_sublist(self, voucher_series):
"""
Retrieve a sublist Voucher from a series
Returns a single Voucher according to the unique Voucher ID provided
If the specified Voucher does not exist, this query returns an error
:calls: ``get /vouchers/sublist/{voucher_series}``
:param int id: Unique identifier of a Voucher.
:return: Dictionary that support attriubte-style access and represent Voucher resource.
:rtype: dict
"""
_, _, vouchers = self.http_client.get(
"/vouchers/sublist/{voucher_series}".format(voucher_series=voucher_series))
return vouchers
def retrieve(self, voucher_series, id):
"""
Retrieve a single Voucher
Returns a single Voucher according to the unique Voucher ID provided
If the specified Voucher does not exist, this query returns an error
:calls: ``get /vouchers/sublist/{voucher_series}/{id}``
:param int id: Unique identifier of a Voucher.
:return: Dictionary that support attriubte-style access and represent Voucher resource.
:rtype: dict
"""
_, _, voucher = self.http_client.get(
"/vouchers/sublist/{voucher_series}/{id}".format(voucher_series=voucher_series, id=id))
return voucher
def create(self, *args, **kwargs):
"""
Create a Voucher
Creates a new Voucher
**Notice** the Voucher's name **must** be unique within the scope of the resource_type
:calls: ``post /vouchers``
:param tuple *args: (optional) Single object representing Voucher resource.
:param dict **kwargs: (optional) voucher attributes.
:return: Dictionary that support attriubte-style access and represents newely created Voucher resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for Voucher are missing')
initial_attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in initial_attributes.items())
attributes.update({'service': self.SERVICE})
_, _, voucher = self.http_client.post("/vouchers", body=attributes)
return voucher
|
993,161 | 4a026c6176a5ae5f87dfa11d389a130e2f6a9d8c | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 23:21:08 2019
@author: thebjm
"""
# import the necessary packages
from flask import *
from flask_mysqldb import *
from wtforms import *
import os
from werkzeug import secure_filename
from wtforms.fields.html5 import *
from wtforms.validators import InputRequired
from functools import wraps
import datetime
#from flask_admin.contrib.sqla import ModelView
from flask_admin import Admin, expose
app = Flask('Smart Door')
app = Flask(__name__)
app.config['DEBUG'] = True
app.secret_key= 'secret123'
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
#Config MySQL
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'thebjm'
app.config['MYSQL_PASSWORD'] = 'password'
app.config['MYSQL_DB'] = 'homeSecurity'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
#Init MySQL
mysql = MySQL(app)
class LoginForm(Form):
username = StringField('User Name', validators = [InputRequired()])
password = PasswordField('Password', validators = [InputRequired()])
# Check if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login', 'danger')
return redirect(url_for('home'))
return wrap
@app.route("/")
def index():
return redirect(url_for('home'))
@app.route("/home", methods = ['GET', 'POST'])
def home():
if 'logged_in' in session:
return redirect(url_for('dashboard'))
else:
form = LoginForm(request.form)
if request.method == 'POST':
username = form.username.data
password = form.password.data
#create Cursor
cur = mysql.connection.cursor()
#get user data
result= cur.execute("SELECT * FROM logins WHERE username = %s", [username])
if result >0:
data = cur.fetchone()
get_name = data ['first_name'] + ' ' +data['last_name']
get_password = data['password']
app.logger.info(get_name)
if password == get_password:
session['logged_in'] = True
session['username'] = get_name
flash('Hello ' +get_name , 'success')
return redirect(url_for('dashboard'))
app.logger.info('Password Match')
else:
flash('Invalid Login', 'danger')
app.logger.info('Not')
#close connection
cur.close()
else:
flash('User not found', 'danger')
app.logger.info('No user')
app.logger.info(username)
return redirect(url_for('home'))
return render_template('home.html', form = form)
# Logout
@app.route('/logout')
@is_logged_in
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('home'))
#register
class RegisterForm(Form):
first_name = StringField('First Name', validators = [InputRequired()])
last_name = StringField('Last Name', validators = [InputRequired()])
email = EmailField('Email', validators = [InputRequired()])
phone = StringField('Phone Number', validators = [InputRequired()])
username = StringField('User Name', validators = [InputRequired()])
password = PasswordField('Password', [validators.DataRequired(), validators.EqualTo('confirm', message = 'passwords not match')])
confirm = PasswordField('Confirm Password', [validators.DataRequired()])
@app.route("/register", methods = ['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
f_name = form.first_name.data
l_name = form.last_name.data
email = form.email.data
phone = form.phone.data
username = form.username.data
password = form.password.data
#create cursor
curr = mysql.connection.cursor()
curr.execute("INSERT INTO logins (first_name, last_name, email_id, phone, username, password) VALUES (%s, %s, %s, %s, %s, %s)", (f_name, l_name, email, phone, username, password))
#cursor commit
mysql.connection.commit()
curr.close()
flash('You are now registered ' , 'success')
return redirect(url_for('home'))
return render_template('register.html', form = form)
@app.route('/dashboard')
@is_logged_in
def dashboard():
return render_template('dashboard.html')
#user upload
target = os.path.join(APP_ROOT, 'static/images/')
print(target)
if not os.path.isdir(target):
os.mkdir(target)
app.config['target'] = target
@app.route('/dashboard', methods=['GET' , 'POST'] )
def upload():
if request.method == 'POST':
name = request.form['name']
print (name)
user = os.path.join(target, name)
if not os.path.isdir(user):
os.mkdir(user)
file = request.files['file' ]
time = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M")
filename = secure_filename(file.filename or '')
filename = time + "_" + filename
print(filename)
destination = "/".join([user, filename])
print(destination)
file.save(destination)
#fetchdata
name = request.form['name']
phone = request.form['phone']
typeofvisitor = request.form['typeofvisitor']
address = request.form['address']
u_admin = session['username']
print (u_admin)
#create cursor
curr1 = mysql.connection.cursor()
curr1.execute("INSERT INTO visitors_details (name, address, typeofvisitor, phone, photo, admin) VALUES (%s, %s, %s, %s, %s, %s )", (name, address, typeofvisitor, phone, filename, u_admin))
#cursor commit
mysql.connection.commit()
curr1.close()
flash('New Entry Done ' , 'success')
return redirect(url_for('dashboard'))
return render_template('dashboard.html', form = form)
#check old data
@app.route('/database')
@is_logged_in
def database():
curr_data = mysql.connection.cursor()
result_data = curr_data.execute('SELECT * from visitors_details')
alldata = curr_data.fetchall()
if( result_data > 0):
return render_template('database.html', alldata = alldata)
else:
msg = 'No DATA found'
return render_template('database.html', msg = msg)
curr_data.close()
# Delete Member
@app.route('/delete_member/<string:id>', methods=['POST'])
@is_logged_in
def delete_member(id):
# Create cursor
cur = mysql.connection.cursor()
# Execute
cur.execute("DELETE FROM visitors_details WHERE id = %s", [id])
# Commit to DB
mysql.connection.commit()
#Close connection
cur.close()
flash('Visitor Delete', 'success')
return redirect(url_for('dashboard'))
if __name__ == '__main__':
app.run(host = '0.0.0.0', port = 80)
|
993,162 | 48fb4c2c89003099b29c4bd56cdd3097b50f38aa | import math
from model.resnetfpn import ResnetFPN
import torch
from torch import nn
class TextDet(nn.Module):
# reference: https://github.com/SakuraRiven/EAST/blob/cec7ae98f9c21a475b935f74f4c3969f3a989bd4/model.py#L136
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(256, 256, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(256)
self.relu1 = nn.ReLU()
# fg / bg
self.conv2 = nn.Conv2d(256, 1, 1)
self.sigmoid1 = nn.Sigmoid()
# bounding box
self.conv3 = nn.Conv2d(256, 4, 1)
self.sigmoid2 = nn.Sigmoid()
# TODO: find the range of bounding box co-ordinates
# self.scope = ___
# orientation
self.conv4 = nn.Conv2d(256, 1, 1)
self.sigmoid3 = nn.Sigmoid()
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
score = self.sigmoid1(self.conv2(x))
# TODO: Convert from 0-1 to 0-"w or h"
# loc of top, bot, left, right sides of the bounding bo
loc = self.sigmoid2(self.conv3(x))
angle = (self.sigmoid3(self.conv4(x)) - 0.5) * math.pi
geo = torch.cat((loc, angle), axis=1)
return score, geo
class RoiRotate(nn.Module):
def __init__(self):
super().__init__()
def forward(self, t, b, l, r, ht=8):
s = ht / (t + b)
class FOTS(nn.Module):
def __init__(self, backbone='resnet50', pretrained=False):
super().__init__()
self.fpn = ResnetFPN(arch=backbone, pretrained=pretrained)
self.fpn.create_architecture() # this is stupidity; remove this later
self.text_det = TextDet()
def forward(self, x):
shared_features = self.fpn(x)[0]
text_det = self.text_det(shared_features)
return text_det
|
993,163 | 3ef2d9b2c59ff885eff01f5829dd6e433660f053 | import tensorflow as tf
def dqn(state_input, name, training=None):
with tf.variable_scope(name) as scope:
conv_1 = tf.layers.conv2d(state_input, 32, 8, strides=4, padding='same', activation=tf.nn.relu, name='conv_1')
conv_2 = tf.layers.conv2d(conv_1, 64, 4, strides=2, padding='same', activation=tf.nn.relu, name='conv_2')
conv_3 = tf.layers.conv2d(conv_2, 64, 3, strides=1, padding='same', activation=tf.nn.relu, name='conv_3')
hidden1 = tf.layers.dense(conv_3, 512, activation=tf.nn.relu, name='hidden1')
op_output = tf.layers.dense(hidden1, 30, activation=None, name='hidden2') # 30 is the number of actions
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
trainable_vars_by_name = {var.name[len(name):]: var for var in trainable_vars}
return op_output, trainable_vars_by_name |
993,164 | dcae550b7c9b00aa80aac944ba68b0c4e22f51dc | # -*- coding:utf-8 -*-
# @Time: 2021/7/29 6:55 下午
# @Author: Elvin
'''
1.类变量(属性):类变量在整个实例化的对象中是公用的。类变量定义在类中,且在方法之外。类变量通常不
作为实例变量使用。类变量也称作属性。
2.数据成员:类变量或实例变量用于处理类及其实例对象的相关数据。
3.方法重写:如果从父类继承的方法不能满足子类的需求,就可以对其进行改写,这个过程称为方法的覆盖。
4.实例变量:定义在方法中的变量只作用于当前实例的类。
5.多态:对不同类的对象使用同样的操作。
6.封装:对外部隐藏工作细节。
7.继承:即一个派生类继承基类的字段和方法。继承允许把一个派生类的对象作为一个基类对象对待,以普通类为基础建立专门的类对象。
'''
#方法的调用需要绑定到特定的对象上,函数不需要绑定。
class MyClass(object):
i = 123
def f(self):
return 'hello world'
use_class = MyClass()
print(f'调用类的属性:{use_class.i}')
print(f'调用类的方法:{use_class.f()}')
#一个类中可定义多个构造方法,但实例化类时只实例化最后的构造方法
#获取私有属性值
class Student(object):
def __init__(self, name, score):
self.name = name
self.__score = score
def info(self):
print(f'学生:{self.name}; 分数:{self.__score}')
def get_score(self):
return self.__score
stu = Student('xiaowang',70)
print(f'修改分数:{stu.get_score()}')
stu.info()
print(f'修改后分数:{stu.get_score()}')
#设置私有属性值
class Student(object):
def __init__(self, name, score):
self.__name = name
self.__score = score
def info(self):
print(f'学生:{self.__name}; 分数:{self.__score}')
def set_score(self, score):
self.__score = score
stu = Student('xiaomming', 97)
print(f'修改前分数:{stu.get_score()}')
stu.info()
stu.set_score(20)
print(f'修改后分数:{stu.get_score()}')
stu.info()
#私有方法self.__private_methods
class PrivateMethod(object):
def __init__(self):
pass
def __foo(self):
print('这是私有方法')
def foo(self):
print('公共方法')
print('公共方法中调用私有方法')
self.__foo()
print('方法调用结束')
pri_pub = PrivateMethod()
print('开始调用公共方法:')
pri_pub()
print('开始调用私有方法')
pri_pub.__foo()
'''继承'''
class Person(object):
def __init__(self,age):
self.__age = age
def age(self): #通过这层包装,继承的子类通过访问此方法来访问私有属性
return self.__age
def __set_age(self,age): #这是私有方法,子类也无法直接访问。
self.__age = age #但Person类可以访问内部的私有方法
def setAge(self,age): #子类的实例可以正常调用此方法
self.__set_age(age)
class Student(Person):
def dis(self):
print(self.age())
if __name__ == "__main__":
stu = Student(12)
stu.dis()
stu.setAge(22)
stu.dis()
'''类方法'''
class Student(object):
def __init__(self, name, score):
self.name = name
self.score = score
stu = Student('名字',10)
def info(): #普通方法的调用
print(f'学生:{stu.name}; 分数:{stu.score}')
info(stu)
class Student0(object):
def __init__(self, name, score):
self.name = name
self.score = score
def info(self): #类方法
pass
print(f'学生:{self.name};分数: {self.score}')
stu = Student('小明', 12)
stu.info()
class Dog:
name = "老王"
def __init__(self):
self.name = "老张"
def test_01(self):
print("类内部访问name属性id=",id(self.name))
print("类内部访问name属性=",self.name) #类内部访问name属性
if __name__ == '__main__':
cl = Dog()
cl.test_01() #调用test_01方法,在类内部对实例属性进行访问
print("类外部访问name属性id=",id(cl.name))
print("类外部访问name属性=",cl.name)
# 输出:
# 类内部访问name属性id= 4388310512
# 类内部访问name属性= 老网
# 类外部访问name属性id= 4388310512
# 类外部访问name属性= 老网
'''多重继承'''
class Animal(object):
pass
class Bird(Animal):
pass
class Fly(object):
pass
class Parrot(Bird,Fly):
pass
class A(object):
def m(self):
print("m of A ")
#
# class B(A):
# pass
#
#
# class C(A):
# def m(self):
# print("m of C ")
#
#
# class D(B, C): #使用广度优先,从左到右的原则寻找属性和方法
# pass
# a = A()
class F:
def f1(self):
print('F.f1')
def b2(self):
print('F.f2')
class C(F):
def c1(self):
print('C.c1')
def c2(self):
print('C.c2')
obj = C()
obj.c1() #c1中的self是形参,指obj
obj.b2() #self用于指调用方法的调用者
class F:
def f1(self):
print('F.f1')
def c2(self):
print('F.f2')
class C(F):
def c1(self):
print('C.c1')
def c2(self):
print('C.c2')
#super(C, self).c2()#执行父类中的方法,
F.f1(self)#另一种调用父类的方法,self需要收到传入
obj = C()
obj.c1() #c1中的self是形参,指obj
obj.b2() #self用于指调用方法的调用者
class BaseOne():
pass
class EveryOne(BaseOne):
def sever_one(self):
self.pro_one() #调用的顺序是按照子类中的从左向右去查找
def pro_one(self):
print('this pro')
class EveryTwo:
def pro_one(self):
print('this pro two')
class EveryThree(EveryTwo,EveryOne):
pass
obj = EveryThree()
obj.sever_one()
class BaseOne():
def __init__(self):
print('this is base one')
class EveryOne(BaseOne):
def __init__(self): # 若出现两个初始化方法,只执行一个
print('this is zero')
BaseOne.__init__(self) #若想父类的初始化方法也执行,需要进行调用
def sever_one(self):
self.pro_one()
def pro_one(self):
print('this pro')
class EveryTwo:
def pro_one(self):
print('this pro two')
class EveryThree(EveryTwo,EveryOne):
pass
obj = EveryThree()
class Feel:
#静态字段,属于类,执行可以通过对象访问,也可以通过类访问
age = 10
def __init__(self, name):
#普通字段,属于对象,只能通过对象访问
self.name = name
#普通方法
def show(self):
print(self.name)
@staticmethod #静态方法,此时self,可以不传,直接通过类调用
def stat():
print('123')
@property #属性,用于获取值
def per(self):
print('232')
@per.setter #设置值
def per(self,svr):
print(svr)
obj = Feel('wangwang')
obj.name
obj.show()
Feel.age
Feel.show(obj) #类方法,需要传入对象
Feel.stat()
obj.per #调用方法,已字段的方式访问
class Foo:
def foo(self):
return 333
por = property(fget=foo)
# @property 与上面实现一致
# def por(self):
# return 333
obj = Foo()
res = obj.por
print(res)
'''成员修饰符:公有成员、私有成员 '''
class Foo:
def __init__(self, name, age):
self.name = name
self.__age = age
def show(self):
return Foo.__age
foo = Foo()
print(foo.__age)#私有属性无法通过外部访问,
res = foo.show()
print(res)
class Foo:
def __f2(self): #私有方法
return 123
def f3(self):
r = self.__f2()#通过对象调用私有方法
return r
obj = Foo()
ret = obj.__f1()
print(ret)
class Feel:
def __init__(self):
self.__gen = 123
self.ge = 44
class FeelGood(Feel):
def __init__(self, name):
self.name = 123
super(FeelGood, self).__init__()
def show(self):
print(self.ge) # 子类只能访问父类的公有字段
s = FeelGood()
class Foo:
def __init__(self):
print('初始方法')
def __call__(self, *args, **kwargs):
print('call')
obj = Foo()
obj() #该调用方式与__call__方法使用
Foo()()#与obj()一致
#使用isinstance()函数
#isinstance(a,A)
'''多态:python原生就是多态,有多种类型,其他语言明确指定某种类型'''
'''__len__方法'''
class Two:
def __init__(self, N):
self.N = N
self.even_list = [2 * x for x in range(N)]
def __len__(self):
return self.N
two = Two(10)
print(len(two))
'''__str__'''
class Foo:
def __init__(self, m, a):
self.name = m
self.age = a
def __str__(self):
return '%s-%s' %(self.name, self.age)
obj = Foo('xiaohyang', 13)
print(obj) #print(str(obj))
'''__dict__ : 输入为字典'''
class Foo:
def __init__(self, name, age):
self.name = name
self.age = age
self.eve = 12
obj = Foo('xiaoming', 12)
d = obj.__dict__
print(d)
'''__getitem__ : 切片或者索引'''
class Foo:
def __init__(self, name, age):
self.name = name
self.age = age
self.eve = 12
def __getitem__(self, item): #该方法有返回值,其他不需要
'''
#如果item是基本类型:int, str, 索引获取
slice对象的话,切片
:param item:
:return:
'''
if type(item) == slice:
print('调用者希望内部做切片处理')
else:
print('调用者希望内部做索引处理')
#return item + 1
print(item, type(item))
def __setitem__(self, key, value):
print(key,value)
def __delitem__(self, key):
print(key)
obj = Foo('xiaoming', 12)
d = obj[8] #自动执行对象的类中的__getitem__方法,当做参数进行传递item
d = obj[1:4:2]
print(d)
#obj[10] = 111
# del obj[222]
'''__iter__'''
class Foo:
def __init__(self, name,age):
self.name = name
self.age = age
def __iter__(self):
return iter(list([11,22,32,23]))
obj = Foo('xiaomiu', 19)
#1.获取类中有__iter__方法,对象可迭代,for循环,迭代器,next;for循环,可迭代独享,对象.__iter(),迭代器,next
#2.执行对象类的__iter__方法,并获取返回值
#3.循环上一步返回的对象
|
993,165 | 6ca224679872a56fb8eb1b1fb372e0c6e0f0d9e0 | def get_hypotenuse(side1, side2):
return (side1 ** 2 + side2 ** 2) ** 0.5
def get_area(side1, side2):
area = (side1 * side2) / 2
return area
def get_perimeter(side1, side2):
perimeter = 2 * side1 + 2 * side2
return perimeter
def write_to_file(side1, side2, file_destination):
print(
get_hypotenuse(side1, side2),
get_area(side1, side2),
get_perimeter(side1, side2),
sep=', ',
file=file_destination
) |
993,166 | a5f9f129056431df482892dca2d175d89fb68487 | #!/usr/bin/env python3
import sys
import json
import logging
import random
import time
from ratelimiter import RateLimiter
from jsonschema import validate
import singer
import singer.messages
import singer.metrics as metrics
from singer import utils
from singer import (UNIX_MILLISECONDS_INTEGER_DATETIME_PARSING,
Transformer, _transform_datetime)
from singer.catalog import Catalog, CatalogEntry
import httplib2
from googleapiclient import discovery
from googleapiclient.http import set_user_agent
from googleapiclient.errors import HttpError
from oauth2client import client, GOOGLE_TOKEN_URI, GOOGLE_REVOKE_URI
from oauth2client import tools
from oauth2client.file import Storage
import tap_sheets.conversion as conversion
LOGGER = singer.get_logger()
REQUIRED_CONFIG_KEYS = [
"client_id",
"client_secret",
"refresh_token"
]
rate_limiter = RateLimiter(max_calls=100, period=100)
def get_service(config, name, version):
credentials = client.OAuth2Credentials(
None,
config.get('client_id'),
config.get('client_secret'),
config.get('refresh_token'),
None,
GOOGLE_TOKEN_URI,
None,
revoke_uri=GOOGLE_REVOKE_URI)
http = credentials.authorize(httplib2.Http())
user_agent = config.get('user_agent')
if user_agent:
http = set_user_agent(http, user_agent)
return discovery.build(name, version, http=http, cache_discovery=False)
def do_discover(driveService, sheetsService, config):
LOGGER.info("Starting discover")
catalog = discover_catalog(driveService, sheetsService, config)
json.dump(catalog, sys.stdout, indent=2)
LOGGER.info('Finished Discover')
def discover_catalog(driveService, sheetsService, config):
#Gets sheet information for Docs present in account
buildSchema = []
tempSchema = sheetsList(None, driveService, sheetsService, config)
nextPageToken = tempSchema.pop("nextPageToken")
buildSchema = tempSchema["schema_data"]
while nextPageToken != None:
tempSchema = sheetsList(nextPageToken)
nextPageToken = tempSchema.pop("nextPageToken")
buildSchema.append(tempSchema["schema_data"])
print(buildSchema)
return Catalog(buildSchema).to_dict()
def sheetsList(pageToken, driveService, sheetsService, config):
nextPageToken = None
result = driveService.files().list(orderBy="modifiedTime desc", q='mimeType=\'application/vnd.google-apps.spreadsheet\'', includeTeamDriveItems=None, pageSize=1000, pageToken=pageToken, corpora=None, supportsTeamDrives=None, spaces=None, teamDriveId=None, corpus=None).execute()
nextPageToken = result.get('nextPageToken')
files = result.get('files', [])
tabList = []
schema_data = []
for row in files:
tabList = tabsInfo(sheetsService, row)
schema_data = schema_data + tabList
result = {"schema_data" : schema_data, "nextPageToken" : nextPageToken}
return(result)
def tabsInfo(sheetsService, row):
result = []
with rate_limiter:
tabs = makeRequestWithExponentialBackoff(sheetsService, row)
for tab_id, tab in enumerate(tabs["sheets"]):
sheet_id = row['id']
sheet_name = row['name'].lower().replace(" ", "")
tab_id = str(tab_id)
tab_name = tab["properties"]["title"].lower().replace(" ", "")
entry = CatalogEntry(
tap_stream_id = sheet_id + "?" + sheet_name + "?" + tab_id + "?" + tab_name + "?" + sheet_name + "_" + tab_name,
stream = tab["properties"]["title"].lower().replace(" ", ""),
database = row['name'].lower().replace(" ", "") + '&' + row['id'],
table = tab["properties"]["title"].lower().replace(" ", "") + '&' + str(tab_id),
)
result.append(entry)
return(result)
def makeRequestWithExponentialBackoff(sheetsService, row):
"""Wrapper to request Google Sheets data with exponential backoff.
Returns:
The API response from the makeRequest method.
"""
for n in range(0, 5):
try:
sheet = sheetsService.spreadsheets().get(
spreadsheetId=row['id']).execute()
return sheet
except HttpError as error:
if error.resp.reason in ['Too Many Requests', 'userRateLimitExceeded', 'quotaExceeded',
'internalServerError', 'backendError']:
time.sleep((2 ** n) + random.random())
else:
LOGGER.info(error.resp.reason)
break
print("There has been an error, the request never succeeded.")
def do_sync(sheetsService, config, catalog):
for stream in catalog["streams"]:
new_properties = stream["tap_stream_id"].split("?")
json = get_data(sheetsService, new_properties[0])
data_schema = conversion.generate_schema(json)
table_name = new_properties[1] + "_" + new_properties[3]
write_schema = [table_name,
{'properties':data_schema},
'']
singer.write_schema(
table_name,
data_schema,
['CID', 'Date']
)
for record in json:
to_write = conversion.convert_row(record, data_schema)
singer.write_record(table_name, to_write)
def get_data(sheetsService, spreadsheetId):
rangeName = 'A1:ZZZ'
result = sheetsService.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName, dateTimeRenderOption='FORMATTED_STRING', majorDimension='ROWS').execute()
values = result.get('values', [])
header_row = values[0]
json = []
if not values:
print('No data found.')
else:
for counter, row in enumerate(values):
if counter != 0:
record = {}
for column_id, value in enumerate(row):
record[header_row[column_id]] = row[column_id]
json.append(record)
return(json)
def main():
parsed_args = singer.utils.parse_args(REQUIRED_CONFIG_KEYS)
config = parsed_args.config
driveService = get_service(config, 'drive', 'v3')
sheetsService = get_service(config, 'sheets', 'v4')
if parsed_args.discover:
do_discover(driveService, sheetsService, config)
elif parsed_args.properties:
do_sync(sheetsService, config, parsed_args.properties)
|
993,167 | 090fc3e86ede15c4221b3e8778ba7afc42c1485f | """
Your chance to explore Loops and Turtles!
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Ethan Swallow.
"""
########################################################################
# done: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
########################################################################
import rosegraphics as rg
########################################################################
# TODO: 2.
#
# You should have RUN the PREVIOUS module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOUR WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT your work by using VCS ~ Commit and Push.
########################################################################
greg = rg.SimpleTurtle()
greg.pen = rg.Pen("red",4)
for k in range(5):
greg.draw_circle(10)
greg.pen_up()
greg.forward(20)
greg.pen_down()
tim = rg.SimpleTurtle()
for k in range(2):
tim.draw_square(20)
tim.pen_up()
tim.backward(10)
tim.pen_down()
|
993,168 | 700e8c5da83319e48b7c45522b1d13b262b7ae54 | import sys
import os
from trainpredict import TrainPredictData
import h5py
def get_existing_file(msg, skip=False):
"""Shows msg and asks for input until the input is an existing file.
:param msg: some message
"""
inp = None
while inp is None:
inp = raw_input(msg)
if skip and len(inp) == 0:
return None
if not os.path.isfile(inp):
print "Not a file:", inp
inp = None
return inp
def get_nonexisting_file(msg, skip=False):
"""Shows msg and asks for input until the input is not an existing file.
:param msg: some message
"""
inp = None
while inp is None:
inp = raw_input(msg)
if skip and len(inp) == 0:
return None
if os.path.isfile(inp):
print "Is a file:", inp
inp = None
return inp
def extract_h5_key(file_name, msg):
"""
Reads the given file using h5 py. If it contains only a single key, this
key is returned. Otherwise the given msg is shown until a valid key is found.
:param file_name: file name
:param msg: some message
:return: h5 key
"""
with h5py.File(file_name, "r") as f:
keys = f.keys()
if len(keys) == 1:
return keys[0]
else:
keys = [str(k) for k in keys]
inp = None
while inp is None:
print "Choose one of the keys:", keys
inp = raw_input(msg)
if inp in keys:
return inp
else:
inp = None
def main():
"""Ask the user for input to create a .tpd file.
"""
tpd_file_name = get_nonexisting_file("Enter name of new tpd file: ")
tpd = TrainPredictData(tpd_file_name)
print "You can now enter the file paths of the the newly created tpd file."
print "If you want to skip a data set, just press enter without typing anything."
train_raw_path = get_existing_file("Enter training raw path: ", skip=True)
if train_raw_path is not None:
train_raw_key = extract_h5_key(train_raw_path, "Enter training raw h5 key: ")
tpd.set_train_raw(train_raw_path, train_raw_key)
train_gt_path = get_existing_file("Enter training gt path: ", skip=True)
if train_gt_path is not None:
train_gt_key = extract_h5_key(train_gt_path, "Enter training gt h5 key: ")
tpd.set_train_gt(train_gt_path, train_gt_key)
train_pred_path = get_existing_file("Enter training pred path: ", skip=True)
if train_pred_path is not None:
train_pred_key = extract_h5_key(train_pred_path, "Enter training pred h5 key: ")
tpd.set_train_pred(train_pred_path, train_pred_key)
train_feat_path = get_existing_file("Enter training feature path: ", skip=True)
while train_feat_path is not None:
train_feat_key = extract_h5_key(train_feat_path, "Enter training feature path: ")
tpd.add_train_feature(train_feat_path, train_feat_key)
train_feat_path = get_existing_file("Enter training feature path: ", skip=True)
test_raw_path = get_existing_file("Enter test raw path: ", skip=True)
if test_raw_path is not None:
test_raw_key = extract_h5_key(test_raw_path, "Enter test raw h5 key: ")
tpd.set_test_raw(test_raw_path, test_raw_key)
test_gt_path = get_existing_file("Enter test gt path: ", skip=True)
if test_gt_path is not None:
test_gt_key = extract_h5_key(test_gt_path, "Enter test gt h5 key: ")
tpd.set_test_gt(test_gt_path, test_gt_key)
test_pred_path = get_existing_file("Enter test pred path: ", skip=True)
if test_pred_path is not None:
test_pred_key = extract_h5_key(test_pred_path, "Enter test pred h5 key: ")
tpd.set_test_pred(test_pred_path, test_pred_key)
test_feat_path = get_existing_file("Enter test feature path: ", skip=True)
while test_feat_path is not None:
test_feat_key = extract_h5_key(test_feat_path, "Enter test feature path: ")
tpd.add_test_feature(test_feat_path, test_feat_key)
test_feat_path = get_existing_file("Enter test feature path: ", skip=True)
return 0
if __name__ == "__main__":
status = main()
sys.exit(status)
|
993,169 | 3dfb5444ef29577c7fc56652e8764985b9ab5697 | '''
# ---------------------------------- prg-----------------------------------------------
# Prime_Number.py
# date : 26/08/2019
# Find given number is prime or not
'''
#method for find prime number
def prime(n):
#Check base condition 1
if n < 2 :
return False
#Check base condition 2
if n == 2:
return True
i = 2
while (i*i < n):
if(n%i == 0):
return False
i += 1
return True |
993,170 | d0b86ece98e6e802d41b22a977e8961671b528dd | # Simple example of reading the MCP3008 analog input channels and printing
# them all out.
# Author: Tony DiCola
# License: Public Domain
import time
import datetime
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
# Software SPI configuration:
CLK = 18
MISO = 23
MOSI = 24
CS = 25
mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
# Hardware SPI configuration:
# SPI_PORT = 0
# SPI_DEVICE = 0 # mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
def Now():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#datafile = open("output.csv", "a")
# Main program loop.
while True:
solar_value = mcp.read_adc(0)
# Read the ADC channel 0 values
# datafile.write(Now() + ',' +str(solar_value) + '\n')
print(Now() + ',' +str(solar_value))
# Pause for second.
time.sleep(1.0)
#datafile.close()
|
993,171 | dd1ad6a370feca5e9aa8f0f4d9781a245b93d774 | from splinter import Browser
from bs4 import BeautifulSoup
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
listings = {}
url = "https://weather.com/storms/tornado"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
listings["headline"] = soup.find("span", class_="styles__headline__1WDSw").get_text()
listings["Date_Uploaded"] = soup.find("span", class_="styles__wxTitleWrapTimestamp__12-cd").get_text()
extension = soup.find("div", class_="styles__wxMediaContent__37wjl").find("div", class_="styles__mobileHeadlineContainer__2LkPF").a['href']
link = "https://weather.com"
featured_url = link + extension
print(featured_url)
listings["featured_url"]=featured_url
return listings
# extension = soup1.find("article").find("figure", class_="lede").a["href"]
# link = "https://www.jpl.nasa.gov"
# featured_image_url = link + extension
# print(featured_image_url)
# listings_news["featured_image_url"]=featured_image_url |
993,172 | bf7bb1ce4f98089f9e9ea875d418e9744374bc0b | class Player:
def __init__(self):
self.player_status = None
self.player_moves = []
p1 = Player()
p2 = Player()
game_turns = 0
game_board = ["-", "-", "-", "-", "-", "-", "-", "-", "-"]
game_status = None
places_taken = []
def game_board_printing(game_board):
line = ""
for i in range(len(game_board)):
if i in [2, 5, 8]:
line += game_board[i]
print(line)
line = ""
else:
line += f"{game_board[i]} , "
def update_game_board(player1_moves, player2_moves):
global game_board
game_board = []
for i in range(9):
if i in player2_moves:
game_board.append("O")
elif i in player1_moves:
game_board.append("X")
else:
game_board.append("-")
def check_game_status(player_symbol):
global game_board
if game_board.count(player_symbol) < 3:
return None
else:
if not any(i != player_symbol for i in game_board[:3]):
return True
elif not any(i != player_symbol for i in game_board[0:9:4]):
return True
elif not any(i != player_symbol for i in game_board[0:7:3]):
return True
elif not any(i != player_symbol for i in game_board[1:8:3]):
return True
elif not any(i != player_symbol for i in game_board[2:7:2]):
return True
elif not any(i != player_symbol for i in game_board[2:9:3]):
return True
elif not any(i != player_symbol for i in game_board[3:6]):
return True
elif not any(i != player_symbol for i in game_board[6:9]):
return True
else:
return None
def game_outcome_check():
global game_status, game_turns
if game_turns == 9:
game_status = "Draw"
elif game_status is not None:
return "Game finished"
game_is_on = input("Do you want to start the game? (yes or no) ").lower()
while game_is_on == "yes":
while game_turns < 9:
wait_player1_move = True
while wait_player1_move:
game_board_printing(game_board)
player1_move = int(input("player 1 move, choose the number between 1,9? ")) - 1
if player1_move > 9 or player1_move < 0:
print("Please choose a number between 1 and 9 ")
elif player1_move in places_taken:
print("Place already chosen, choose another number ")
else:
p1.player_moves.append(player1_move)
places_taken.append(player1_move)
update_game_board(p1.player_moves, p2.player_moves)
game_turns += 1
p1.player_status = check_game_status("X")
if p1.player_status:
game_status = "Player 1 win"
wait_player1_move = False
end = game_outcome_check()
if end == "Game finished":
break
wait_player2_move = True
while wait_player2_move:
game_board_printing(game_board)
player2_move = int(input("player 2 move, choose the number between 1,9? ")) - 1
if player2_move > 9 or player2_move < 0:
print("Please choose a number between 1 and 9 ")
elif player2_move in places_taken:
print("Place already chosen, choose another number ")
else:
p2.player_moves.append(player2_move)
places_taken.append(player2_move)
update_game_board(p1.player_moves, p2.player_moves)
game_turns += 1
p2.player_status = check_game_status("O")
if p2.player_status:
game_status = "Player 2 win"
wait_player2_move = False
end = game_outcome_check()
if end == "Game finished":
break
game_board_printing(game_board)
print(f"The game ended up with: {game_status}")
game_is_on = "off"
|
993,173 | 3dcff9a70bf8c4491b70a4ab83da8b911fc495cb | import networkx
from networkx.convert_matrix import to_numpy_matrix
import numpy as np
from numpy.linalg import eig
import matplotlib.pyplot as plt
from scipy.sparse import csc_matrix
from matplotlib.pyplot import cm
# --------------------------- Modularity evolution --------------------------- #
def plot_Q(graph,NCommunityClassifier,eps=1e-3,maxQ=False):
"""
Build a classifier stopping at each level N, compute the corresponding modularity
"""
q1=0
q2=q1+2*eps
Q_results=[0]
i=1
while q2-q1>eps:
clfN=NCommunityClassifier(graph,Nmax=i)
clfN.fit()
q1=q2
q2=clfN.Q
Q_results.append(q2)
i+=1
plt.plot(np.arange(1,i+1),Q_results)
plt.xlabel("Number of communities")
plt.ylabel("Modularity")
plt.show()
if maxQ:
return q2
# ----------------------------- Plot communities ----------------------------- #
# import cm
def plot_communities(G,clf):
# Labelize lists
dict_aux = {}
dict_labels = {}
i = -1
for key,val in clf.category.items():
if dict_aux.get(tuple(val)) is None:
i += 1
a = dict_aux.setdefault(tuple(val),i)
dict_labels.setdefault(key,a)
print(dict_aux)
# Plot parameters
pos = networkx.kamada_kawai_layout(G)
rainbow = cm.rainbow(np.linspace(0,1,len(dict_aux)))
plt.figure()
for k in range(len(dict_aux)):
nodes = [i for i in dict_labels.keys() if dict_labels[i] == k]
networkx.draw_networkx_nodes(G,pos,
nodelist = nodes,
node_color =rainbow[k].reshape(1,4),
node_size=200,
node_shape = 'o',
label = str(k),
alpha=0.8)
networkx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)
plt.legend()
plt.show()
# ----------------------------- Plot with eigen ------------------------------ #
def a_b(list,q):
diff = max(list) - min(list)
a = (1-q)/(diff)
b = (diff -(1-q)*max(list))/(diff)
return([min(a*l + b,1) for l in list])
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def plot_communities_eigen(G,clf): # For now only with two communities
# Labelize lists
dict_aux = {}
dict_labels = {}
i = -1
for key,val in clf.category.items():
if dict_aux.get(tuple(val)) is None:
i += 1
a = dict_aux.setdefault(tuple(val),i)
dict_labels.setdefault(key,a)
print(dict_aux)
# Plot parameters
pos = networkx.kamada_kawai_layout(G)
rainbow = cm.rainbow(np.linspace(0,1,len(dict_aux)))
gradient = np.abs(clf.leading_eigenvector)
plt.figure()
aux = 1
for k in range(len(dict_aux)):
nodes = [i for i in dict_labels.keys() if dict_labels[i] == k]
grad = [np.abs(gradient[i]) for i in dict_labels.keys() if dict_labels[i] == k]
#grad = (grad+max(grad)-2*min(grad))/(2*(max(grad)-min(grad)))
grad = a_b(grad,1/9)
aux = rainbow[k].reshape(4,1).repeat(len(grad),axis=1)
print(lighten_color(rainbow[k],0.3))
col = (grad*aux).T
networkx.draw_networkx_nodes(G,pos,
nodelist = nodes,
node_color = [lighten_color(rainbow[k],p) for p in grad],
node_size=200,
node_shape = 'o',
label = str(k),
alpha=1)
aux = len(nodes)*2
print(aux, rainbow.shape)
aux += 1
networkx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)
plt.legend()
plt.show()
# plot_communities_eigen(G,clf)
|
993,174 | dc3ff6f66da18feb97ec00d500f4bcc6ed2a8b42 | class Demo:
def __init__(self):
print("parent constructor")
def func1(self):
print("func1")
class Demo1(Demo):
def func2(self):
print("func2")
def __init__(self):
print("child constructor")
class Demo2(Demo1):
def func3(self):
print("func3")
d2=Demo2()
d2.func1()
d2.func2()
d2.func3() |
993,175 | 11a4b9766a8845d4a2d47f46e5c40ec9b897756f | # Generated by Django 3.1.5 on 2021-02-11 09:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session', models.CharField(max_length=200)),
('is_ordered', models.BooleanField(default=False)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('phone', models.CharField(max_length=20)),
('email', models.CharField(blank=True, max_length=200, null=True)),
('address', models.TextField()),
('city', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session', models.CharField(max_length=200)),
('tracking_id', models.CharField(blank=True, editable=False, max_length=40, null=True, unique=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('date', models.DateField(auto_now=True)),
('status', models.CharField(choices=[(1, 'recieved'), (2, 'confirmed'), (3, 'shipped'), (4, 'delivered')], default=1, max_length=20)),
('notes', models.TextField(blank=True)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='orders.cart')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='orders.customer')),
],
),
migrations.CreateModel(
name='Cart_item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qty', models.IntegerField(default=1)),
('time', models.DateTimeField(auto_now_add=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.product')),
('shopping_cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.cart')),
],
),
migrations.AddField(
model_name='cart',
name='item',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='orders.cart_item'),
),
]
|
993,176 | cfba2efab8a2130941199efade9667cf18b4e794 | #!/usr/bin/env python
# coding: utf-8
# # Primeiros passos
#
# O interpretador Python é um cara legal que gosta de conversar, mas ele é um pouco repetitivo..
#
# Os notebooks Jupyter se comunicam com o interpretador, mandando suas mensagens e mostrando as resposta que ele dá.
#
# Clique no botão de Play para executar a célula abaixo (ou selecione a célula e aperte Shift+Enter).
# In[1]:
"Oi Python!"
# ## Melhorando a conversa com o interpretador
#
# >Se o interpretador apenas repete o que eu falo, pra que ele serve? 🤔
# >*, perguntou um aluno apressado.*
#
# O interpretador é mais sagaz do que parece. Teste as células abaixo:
# In[2]:
"Oi Python!".upper()
# In[3]:
"Oi Python!".lower()
# In[4]:
"Oi Python!" + " Tudo bom?"
# In[5]:
"Oi Python!".split()
# ### Exercícios de fixação
# EF1 - Peça pra o interpretador dizer **"Bom dia"** com letras minúsculas.
# In[6]:
"Bom dia".lower()
# EF2 - Peça pra o interpretador dizer **"Boa tarde"** com letras maiúsculas.
# In[8]:
"Boa tarde".upper()
# EF3 - Peça pra o interpretador dizer **"Bom dia ou Boa tarde?"**, sendo o **"Bom dia"** com letras maiúsculas e o **"boa tarde"** com letras minúsculas.
# In[11]:
"Bom dia ".upper() + "ou " + "Boa tarde?".lower()
# ### Exercícios complementares
# EC1 - O que você acha que a opção `split()` significa para o interpretador? Dica -- pesquise no Google Translate.
# In[ ]:
Divida, separe, reparta.
# *Escreva sua resposta aqui*
# A gente vai estudar o `split` com mais calma depois, mas por enquanto vamos ver um pouco sobre números.
# ## Trabalhando com números
#
# O interpretador Python também consegue lidar com números e operadores aritméticos, que podem ser usados para construir **expressões aritméticas**.
#
# As regras básicas sobre expressões aritméticas em Python são:
#
# * Em geral, a precedência dos operadores em Python segue a precedência que conhecemos da matemática.
# * Assim como na matemática, é possível usar parênteses para mudar a ordem de avaliação de uma expressão.
# * Caso reste apenas operações de mesma precedência, a expressão passa a ser avaliada da esquerda para a direita.
#
# Alguns dos operadores aritméticos disponíveis em Python estão listados abaixo.
# | Símbolo | Operação |
# |:----:|---|
# | + | Adição |
# | - | Subtração |
# | / | Divisão |
# | // | Divisão inteira |
# | % | Resto |
# | * | Multiplicação |
# | ** | Exponenciação |
# Teste as células abaixo:
# In[12]:
1+2+5
# In[13]:
2-1
# In[14]:
1*2
# In[15]:
3/4
# In[16]:
2 ** 3
# No entanto, você não deve misturar textos e números:
# In[17]:
"Este é um texto" + 3
# ### Exercícios de fixação
# EF4 - Calcule o produto dos números 11 e 12.
# In[18]:
11 * 12
# EF5 - Calcule o quadrado do número 16.
# In[19]:
16 ** 2
# EF6 - Calcule a raiz quadrada de 1024.
# In[24]:
1024 ** (1/2)
# ### Exercícios complementares
#
# Os operadores // e % trabalham com divisão inteira. Por exemplo, dividir 15 por 10 considerando apenas número inteiros é igual a 1. O resto da divisão é igual a 15 - (10*1), ou seja, 5.
# In[25]:
15//10
# In[26]:
15%10
# EC2 - Calcule o resto da divisão de 227 por 20.
# In[27]:
227%20
# # Valores, nomes e variáveis
# Em Python, tanto textos como números são chamados de *valores*.
#
# Podemos nos referir a valores usando *nomes*.
#
# >Em outras linguagens, usa-se o termo **variável** em vez de nome. Vamos adotar este termo aqui por ele ser mais universal.
#
# Teste as células abaixo:
# In[28]:
x = 2 # qualquer coisa após o # é um comentário
y = 5
x + y
# Múltiplas variáveis podem estar associadas ao mesmo valor.
# In[29]:
x = y = 1
y
# In[30]:
texto = 'Este é um texto.' # textos podem ser escritos entre aspas simples
texto
# In[31]:
outro_texto = "Este é outro texto." # textos podem ser escritos entre aspas duplas
outro_texto
# ### Exercícios de fixação
#
# Para verificar que seu código está correto, lembre-se de acrescentar uma linha contendo apenas o nome da variável para visualizar o valor associado a ela.
# EF7 - Associe uma variável `numero` ao número `10`.
# In[32]:
numero = 10
numero
# EF8 - Associe uma variável `nome` ao texto `Python`.
# In[33]:
nome = 'Python'
nome
# EF9 - Associe uma variável `resto` ao resultado do operação de resto entre `234` e `10`.
# In[34]:
resto = 230%10
resto
# EF10 - Associe uma variável `k` ao valor `8`. Associe uma variável `quadrado_k` ao quadrado do valor associado à variável `k`.
# In[40]:
k = 8
quadrado_k = k ** (1/2)
quadrado_k
# EF11 - Associe uma váriavel `z` ao valor `256`. Associe uma variável `divisao_zk` ao resultado da divisão entre os valores associados às variáveis `z` e `k`.
# In[42]:
z = 256
divisao_zk = z // k
divisao_zk
# ## Dados informados pelo usuário
# O procedimento `input()` solicita ao usuário dados que podem ser associados a variáveis. É possível personalizar a mensagem de solicitação, como mostrado abaixo.
# In[43]:
texto_usuario = input("Diga um valor: ")
texto_usuario
# Por padrão, qualquer dado passada pelo usuário será tratado como texto. Para tratá-lo como um valor numérico, você deve usar os procedimentos `int()` ou `float()`, dependendo de serem números inteiros ou reais.
# In[44]:
inteiro = int(input("Diga um valor inteiro: "))
inteiro + 1
# In[46]:
real = float(input("Diga um valor real: "))
real + 1
# ### Exercícios de fixação
# EF12 - Solicite ao usuário *seu nome* e o associe a uma variavél chamada `nome`.
# In[47]:
nome = input('Digite seu nome: ')
nome
# EF13 - Solicite ao usuário *sua idade* e a associe a uma variável chamada `idade`.
# In[48]:
idade = int(input('Digite sua idade: '))
idade
# In[50]:
altura = float(input('Digite sua altura: '))
altura
# EF14 - Solicite ao usuário *sua altura* e a associe a uma variável chamada `altura`.
# In[ ]:
# ## Informando dados ao usuário
# Assim como é possível receber dados do usuário, também é possível informar dados ao usuário.
#
# Para isto, usamos o procedimento `print()`.
# In[51]:
print(texto)
# É possível informar os valores associados a múltiplas variáveis com uma única chamada ao procedimento `print()`.
# In[52]:
print(texto, x)
# Também é possível informar textos, valores e o resultado de expressões:
# In[53]:
print("Testando", 3, x + y)
# ### Exercícios de fixação
# EF15 - Informe ao usuário **seu nome**.
# In[54]:
print(nome)
# EF16 - Informe ao usuário **sua idade**.
# In[55]:
print(idade)
# EF17 - Informe ao usuário **seu índice de massa corporal (IMC)**. Para isso, solicite ao usuário seu peso.
# In[56]:
peso = float(input('Digite seu peso: '))
IMC = peso/(altura ** 2)
print('Seu índice de massa corporal (IMC) é ', IMC)
# ## Exercícios do URI
# O URI é um juiz online utilizado em treinamentos para competições de programação.
#
# Nesta disciplina, utilizaremos exercícios inspirados na seção **Iniciante**, adaptados para o nosso contexto.
#
# Para ver a descrição do exercício em sua versão original do URI, clique no seu número.
# [1008](https://www.urionlinejudge.com.br/judge/pt/problems/view/1008) - Um sistema do setor de recursos humanos de uma empresa deve calcular o salário a ser pago para cada funcionário da empresa em função de quantas horas o funcionário trabalhou no mês e de quanto ele recebe por hora trabalhada.
#
# Escreva um código Python que leia o nome de um funcionário, seu número de horas trabalhadas, o valor que recebe por hora e calcula seu salário. Em seguida, mostre o nome e o salário do funcionário.
# |.| Entrada | Saída |
# |-|----|---|
# | *Exemplo 1* | João 100 5.50 | João 550.00 |
# | *Exemplo 2* | Maria 200 20.50 | Maria 4100.00 |
# | *Exemplo 3* | Facebookson 145 15.55 | Facebookson 2254.75 |
# In[60]:
nome = input('Digite o nome do funcionário: ')
hora_trab = float(input('Digite o número de horas trabalhadas: '))
sal_hora = float(input('Digite o valor que recebe por hora: '))
salario = hora_trab * sal_hora
print(nome, salario)
# [1009](https://www.urionlinejudge.com.br/judge/pt/problems/view/1009) - No caso de empresas do setor de comércio, a remuneração mensal de cada vendedor é composta por um salário fixo mais uma bonificação proporcional às vendas efetuadas pelo vendedor naquele mês.
#
# Escreva um código Python que leia o nome de um vendedor, o seu salário fixo e o total de vendas efetuadas por ele no mês (em dinheiro). Sabendo que este vendedor ganha 15% de comissão sobre suas vendas efetuadas, informe o total que ele deverá receber no final do mês.
# |.| Entrada | Saída |
# |-|----|---|
# | *Exemplo 1* | João 500 1230.30 | João 684.54 |
# | *Exemplo 2* | Pedro 700 0.00 | Pedro 700.00 |
# | *Exemplo 3* | Mangojata 1700 1230.50 | Mangojata 1884.58 |
# In[ ]:
nome = input('Digite o nome do vendedor: ')
sal_fixo = float(input('Digite o salario do vendedor: '))
total_vendas = float(input('Digite o tota de vendas efetuadas no mes (em dinheiro): '))
salario = sal_fixo + (sal_fixo * (total_vendas * 0,15))
print(nome, salario)
# [1010](https://www.urionlinejudge.com.br/judge/pt/problems/view/1010) - Outro tipo de sistema utilizado no setor de comércio é o sistema de frente de loja, que calcula o total de uma venda baseado nos itens adquiridos, suas quantidades e seus valores unitários.
#
# Escreva um código Python que leia as informações de dois produtos adquiridos em uma compra e informe o valor a ser pago. Para cada produto, leia seu código, sua quantidade e seu valor unitário.
# |.| Entrada | Saída |
# |-|----|---|
# | *Exemplo 1* | 12 1 5.30 <br> 16 2 5.10 | VALOR A PAGAR: 15.50 |
# | *Exemplo 2* | 13 2 15.30 <br> 161 4 5.20 | VALOR A PAGAR: 51.40 |
# | *Exemplo 3* | 1 1 15.10 <br> 2 1 15.10 | VALOR A PAGAR: 30.20 |
# In[ ]:
cod1 = int(input('Digite o código do produto 1: '))
qtd1 = int(input('Digite a quantidade do produto 1: '))
vl_unitario1 = float(input('Digite o valor unitário do produto 1: '))
cod2 = int(input('Digite o código do produto 2: '))
qtd2 = int(input('Digite a quantidade do produto 2: '))
vl_unitario2 = float(input('Digite o valor unitário do produto 2: '))
valor_pagar = (vl_unitario1 * qtd1) + (vl_unitario2 * qtd2)
print('VALOR A PAGAR: ', valor_pagar)
# [1018](https://www.urionlinejudge.com.br/judge/pt/problems/view/1018) - Sistemas de frente de loja também devem auxiliar vendedores a dar trocos. Por simplicidade, vamos considerar primeiro apenas trocos inteiros, que podem ser dados usando apenas cédulas.
#
# Escreva um código Python que leia um valor de troco e informe quantas cédulas de cada valor devem ser entregues pelo vendedor ao cliente.
#
# **Obs.:** Considere que ainda existem notas de R$ 1,00.
# |.| Entrada | Saída |
# |-|----|---|
# | *Exemplo 1* | 576 | 5 nota(s) de 100,00 <br /> 1 nota(s) de 50,00 <br /> 1 nota(s) de 20,00 <br /> 0 nota(s) de 10,00 <br /> 1 nota(s) de 5,00 <br /> 0 nota(s) de 2,00 <br /> 1 nota(s) de 1,00 |
# | *Exemplo 2* | 11257 | 112 nota(s) de 100,00 <br /> 1 nota(s) de 50,00 <br /> 0 nota(s) de 20,00 <br /> 0 nota(s) de 10,00 <br /> 1 nota(s) de 5,00 <br /> 1 nota(s) de 2,00 <br /> 0 nota(s) de 1,00 |
# | *Exemplo 3* | 503 | 5 nota(s) de 100,00 <br /> 0 nota(s) de 50,00 <br /> 0 nota(s) de 20,00 <br /> 0 nota(s) de 10,00 <br /> 0 nota(s) de 5,00 <br /> 1 nota(s) de 2,00 <br /> 1 nota(s) de 1,00 |
# In[ ]:
troco = float(input("Digite o valor do troco: "))
nota100 = 0
nota50 = 0
nota20 = 0
nota10 = 0
nota5 = 0
nota2 = 0
nota1 = 0
while (troco >= 100):
nota100 += 1;
troco = troco - 100
while (troco >= 50):
nota50 += 1;
troco = troco - 50
while (troco >= 20):
nota20 += 1;
troco = troco - 20
while (troco >= 10):
nota10 += 1;
troco = troco - 10
while (troco >= 5):
nota5 += 1;
troco = troco - 5
while (troco >= 2):
nota2 += 1;
troco = troco - 2
while (troco >= 1):
nota1 += 1;
troco = troco - 1
print(nota100, " nota(s) de 100,00")
print(nota50, " nota(s) de 50,00")
print(nota20, " nota(s) de 20,00")
print(nota10, " nota(s) de 10,00")
print(nota5, " nota(s) de 5,00")
print(nota2, " nota(s) de 2,00")
# [1021](https://www.urionlinejudge.com.br/judge/pt/problems/view/1021) - Agora vamos voltar ao mundo real, onde trocos podem precisar utilizar cédulas e moedas.
#
# Escreva um código Python que leia um valor de troco e informe quantas cédulas e moedas de cada valor devem ser entregues pelo vendedor ao cliente.
#
# **Obs.:** Considere que ainda existem moedas de R$ 0,01.
# |.| Entrada | Saída |
# |-|----|---|
# | *Exemplo 1* | 576.73 | NOTAS: <br /> 5 nota(s) de 100,00 <br /> 1 nota(s) de 50,00 <br /> 1 nota(s) de 20,00 <br /> 0 nota(s) de 10,00 <br /> 1 nota(s) de 5,00 <br /> 0 nota(s) de 2,00 <br /> MOEDAS: <br /> 1 moeda(s) de 1,00 <br /> 1 moeda(s) de 0,50 <br /> 0 moeda(s) de 0,25 <br /> 2 moeda(s) de 0,10 <br /> 0 moeda(s) de 0,05 <br /> 3 moeda(s) de 0,01 |
# | *Exemplo 2* | 4.00 | NOTAS: <br /> 0 nota(s) de 100,00 <br /> 0 nota(s) de 50,00 <br /> 0 nota(s) de 20,00 <br /> 0 nota(s) de 10,00 <br /> 0 nota(s) de 5,00 <br /> 2 nota(s) de 2,00 <br /> MOEDAS: <br /> 0 moeda(s) de 1,00 <br /> 0 moeda(s) de 0,50 <br /> 0 moeda(s) de 0,25 <br /> 0 moeda(s) de 0,10 <br /> 0 moeda(s) de 0,05 <br /> 0 moeda(s) de 0,01 |
# | *Exemplo 3* | 91.01 | NOTAS: <br /> 0 nota(s) de 100,00 <br /> 1 nota(s) de 50,00 <br /> 2 nota(s) de 20,00 <br /> 0 nota(s) de 10,00 <br /> 0 nota(s) de 5,00 <br /> 0 nota(s) de 2,00 <br /> MOEDAS: <br /> 1 moeda(s) de 1,00 <br /> 0 moeda(s) de 0,50 <br /> 0 moeda(s) de 0,25 <br /> 0 moeda(s) de 0,10 <br /> 0 moeda(s) de 0,05 <br /> 1 moeda(s) de 0,01 |
# In[ ]:
troco = float(input("Digite o valor do troco: "))
nota100 = 0
nota50 = 0
nota20 = 0
nota10 = 0
nota5 = 0
nota2 = 0
moeda1real = 0
cent50 = 0
cent25 = 0
cent10 = 0
cent5 = 0
cent1 = 0
while (troco >= 100):
nota100 += 1;
troco = troco - 100
while (troco >= 50):
nota50 += 1;
troco = troco - 50
while (troco >= 20):
nota20 += 1;
troco = troco - 20
while (troco >= 10):
nota10 += 1;
troco = troco - 10
while (troco >= 5):
nota5 += 1;
troco = troco - 5
while (troco >= 2):
nota2 += 1;
troco = troco - 2
while (troco >= 1):
moeda1real += 1;
troco = troco - 1
while (troco >= 0.50):
cent50 += 1
troco = troco - 0.50
while (troco >= 0.25):
cent25 += 1
troco = troco - 0.25
while (troco >= 0.10):
cent10 += 1
troco = troco - 0.10
while (troco >= 0.05):
cent5 += 1
troco = troco - 0.05
while (troco >= 0.01):
cent1 += 1
troco = troco - 0.01
print('NOTAS: ')
print(nota100, " nota(s) de 100,00")
print(nota50, " nota(s) de 50,00")
print(nota20, " nota(s) de 20,00")
print(nota10, " nota(s) de 10,00")
print(nota5, " nota(s) de 5,00")
print(nota2, " nota(s) de 2,00")
print("MOEDAS: ")
print(moeda1real, " moedas(s) de 1,00")
print(cent50, " moedas(s) de 0,50")
print(cent25, " moedas(s) de 0,25")
print(cent10, " moedas(s) de 0,10")
print(cent5, " moedas(s) de 0,05")
print(cent1, " moedas(s) de 0,01")
# [1019](https://www.urionlinejudge.com.br/judge/pt/problems/view/1019) - Sistemas de frente de loja também precisam registrar a data e o horário das vendas.
#
# Computadores normalmente armazenam datas utilizando uma única unidade de tempo, convertendo para o formato de apresentação desejado quando necessário. Por simplicidade, considere neste exercício que o dado informado representa apenas o horário da venda.
#
# Escreva um código Python que leia um valor em segundos e o converta para o formato *horas:minutos:segundos*.
#
# **Dica 1 --** a opção sep do procedimento print() permite configurar o caracter de separação entre as diferentes partes de uma impressão, como no exemplo abaixo.
# In[ ]:
print(10,33,51,sep=":")
# **Dica 2 --** é possível utilizar o procedimento print para impressão formatada. Pesquise o funcionamento da máscara de formatação abaixo:
# In[ ]:
print("%02d:%02d:%02d" % (9,33,51))
# |.| Entrada | Saída |
# |-|----|---|
# | *Exemplo 1* | 556 | 00:09:16 |
# | *Exemplo 2* | 1 | 00:00:01 |
# | *Exemplo 3* | 86153 | 23:55:53 |
tempo_total_segundos = int(input("Escreva o tempo total em segundos: "))
tempo_hora = tempo_total_segundos / 3600
tempo_total_segundos = tempo_total_segundos % 3600
tempo_minutos = tempo_total_segundos / 60
tempo_segundos = tempo_total_segundos = tempo_total_segundos % 60
print("%02d:%02d:%02d" % (tempo_hora, tempo_minutos, tempo_segundos))
|
993,177 | 393db6d24cdae2473f95c9234200ea264d4f3cc0 | #--encoding:utf-8--
from bag import Bag
from graph_visualized import MSTVisualized
class Edge(object):
_v = None
_w = None
_weight = None
_black = None
def __init__(self, v, w, weight):
super(Edge, self).__init__()
self._v = v
self._w = w
self._weight = weight
self._black = False
def markBlack(self):
self._black = True
def IsBlack(self):
return self._black
def weight(self):
return self._weight
#边的一个顶点
def either(self):
return self._v
def other(self, v):
if self._v == v:
return self._w
elif self._w == v:
return self._v
else:
raise Exception(print("不存在顶点...."))
def __lt__(self, other):
return self.weight() < other.weight()
def __le__(self, other):
return self.weight() <= other.weight()
def __gt__(self, other):
return self.weight() > other.weight()
def __ge__(self, other):
return self.weight() >= other.weight()
def __eq__(self, other):
return self.weight() == other.weight()
def __str__(self):
return "[vertex] %d-%d\t\t[weight] %.2f" % (self._v, self._w, self._weight)
class EdgeWeightGraph(object):
#顶点数量
num_vertexCnt = 0
#边的数量
num_edgeCnt = 0
#邻接表
arr_adj = None
def __init__(self, intext = None, vCount = None):
super(EdgeWeightGraph, self).__init__()
if intext == None:
self.num_vertexCnt = vCount
self.arr_adj = [Bag() for i in range(self.num_vertexCnt)]
self.num_edgeCnt = 0
else:
lines = intext.split('\n')
self.num_vertexCnt = int(lines[0])
self.num_edgeCnt = 0
self.arr_adj = [Bag() for i in range(self.num_vertexCnt)]
for x in range(1,len(lines)):
vs = lines[x].split('-')
v0 = int(vs[0])
v1 = int(vs[1])
v2 = float(vs[2])
edge = Edge(v0, v1, v2)
self.AddEdge(edge)
def AddEdge(self, edge):
v = edge.either()
w = edge.other(v)
self.arr_adj[v].Add(edge)
self.arr_adj[w].Add(edge)
def V(self):
return self.num_vertexCnt
def E(self):
return self.num_edgeCnt
def adj(self, v):
return self.arr_adj[v]
def Printf(self, fileName):
MSTVisualized(False).printf(self, fileName)
#Prim算法 一开始添加一个顶点, 然后添加一条它邻接的最小边,把这条边的两个顶点都加到树里面,然后继续寻找最小的邻接边直到找到V-1条边
class PrimMST(object):
graph = None
#最小生成树的顶点
marked = None
#最小生成树的边
edgeQueue = None
#横切边
pq = None
"""docstring for PrimMST"""
def __init__(self, graph):
super(PrimMST, self).__init__()
self.graph = graph
self.marked = [False for i in range(graph.V())]
self.edgeQueue = []
self.pq = []
self.visit(0)
while len(self.pq) > 0:
minEdge = self.pq[0]
self.pq.pop(0)
vv = minEdge.either()
vw = minEdge.other(vv)
if not self.marked[vv] or not self.marked[vw]:
self.edgeQueue.append(minEdge)
if not self.marked[vv]:
self.visit(vv)
if not self.marked[vw]:
self.visit(vw)
genGraph = EdgeWeightGraph(vCount=graph.V())
for edge in self.edgeQueue:
genGraph.AddEdge(edge)
genGraph.Printf("mst_gen")
def visit(self, v):
self.marked[v] = True
for edgeWrap in self.graph.adj(v):
edge = edgeWrap.value
if not self.marked[edge.other(v)]:
self.pq.append(edge)
self.pq.sort(key=lambda x:x.weight())
# self.pq.reverse()
#MST (Minimum Spanning Tree)
#----------------------------Test--------------------------------------
str_graph = "8\n\
4-5-0.35\n\
4-7-0.37\n\
5-7-0.28\n\
0-7-0.16\n\
1-5-0.32\n\
0-4-0.38\n\
2-3-0.17\n\
1-7-0.19\n\
0-2-0.26\n\
1-2-0.36\n\
1-3-0.29\n\
2-7-0.34\n\
6-2-0.40\n\
3-6-0.52\n\
6-0-0.58\n\
6-4-0.93"
graph = EdgeWeightGraph(intext = str_graph)
graph.Printf("mst_origin")
PrimMST(graph) |
993,178 | c21728dbc4bf168f17d0c2e0444a828e6420fa27 | #!/usr/bin/env python
## http://projecteuler.net/index.php?section=problems&id=25 ##
## What is the first term in the Fibonacci sequence to contain 1000 digits? ##
import math
def fib(n, fibs):
print "called fib(", n, ")"
fn1 = 0
fn2 = 0
if (n - 1) in fibs:
fn1 = fibs[n - 1]
else:
fn1 = fib(n - 1, fibs)
if (n - 2) in fibs:
fn2 = fibs[n - 2]
else:
fn2 = fib(n - 2, fibs)
fibs[n] = fn1 + fn2
return fibs[n]
fibs = { 1: 1, 2: 1 }
res = 1
n = 2
while math.floor(math.log10(res)) + 1 < 1000:
n = n + 1
res = fib(n, fibs)
print n, " is the first term with 1000+ digits"
|
993,179 | dc36e1be00ee3c29ec4dcae5e35ea4b3e748376a | '''
PATTERN MatchedING WITH REGULAR EXPRESSIONS
'''
import re
## (1) Basic structure of RegEx from re module
dateUnderScore = re.compile(r'\d\d_\d\d_\d\d\d\d') ## \ is for exit, so put r outside
mo = dateUnderScore.search('I name a file as today_file_12_05_2019')
print('Matched pattern: '+ mo.group())
## (1.1) Grouping by parentheses
dateUnderScore = re.compile(r'(\d\d)_(\d\d)_(\d\d\d\d)') ## \ is for exit, so put r outside
mo = dateUnderScore.search('I name a file as today_file_12_05_2019')
print('Matched pattern (Group 1 - Date): '+ mo.group(1))
print('Matched pattern (Group 2 - Month): '+ mo.group(2))
print('Matched pattern (Group 3 - Year): '+ mo.group(3))
print('Matched pattern (Group 0 - All): '+ mo.group(0))
print('Matched pattern (All): '+ mo.group())
date, month, year = mo.groups() ## using .groups() method
print(date)
print(month)
print(year)
## (1.2) When you mean () as a character (rather than special meaning)
## add backslash \( <str> \), as well as | ? * + and other special characters
dateUnderScore = re.compile(r'(\d\d)_(\(\d\d\))_(\d\d\d\d)') ## \ is for exit, so put r outside
mo = dateUnderScore.search('I name a file as today_file_12_(05)_2019')
print('Matched pattern: '+ mo.group())
## (1.3) Multiple groups with Pipe
heroPattern = re.compile(r'Batman|Iron Man|Spider Man') ## pipe
mo1 = heroPattern.search('My friend and me go to see Pikachu, Batman, Iron Man. It\'s fun')
mo2 = heroPattern.search('My friend and me go to see Pikachu, Iron Man, Batman. It\'s fun')
mo3 = heroPattern.findall('My friend and me go to see Pikachu, Iron Man, Batman. It\'s fun')
print('Matched Hero pattern: '+ mo1.group()) ## return the first occurence
print('Matched Hero pattern: '+ mo2.group())
print('Matched Hero pattern: '+ str(mo3)) ## return all
## use with () with same prefix
facebookProduct = re.compile(r'Face(book|time)')
mo = facebookProduct.search('Youtube, Google, Facebook, Medium, Netflix, Facetime')
print(mo.group())
## (1.4) Optional Matching with ?
businessPersonPattern = re.compile(r'business(wo)?man')
mo1 = businessPersonPattern.search('doctor, businessman, lawyer, businesswoman')
mo2 = businessPersonPattern.search('doctor, businesswoman, lawyer, businessman')
mo2 = businessPersonPattern.search('doctor, businesswowoman, lawyer'); mo3 == None
print(mo1.group())
print(mo2.group())
## (1.5) Matching 0+ with *
businessPersonPattern = re.compile(r'business(wo)*man')
mo0 = businessPersonPattern.search('doctor, businessman'); mo0 == None
mo1 = businessPersonPattern.search('doctor, businessman, lawyer, businesswoman')
mo2 = businessPersonPattern.search('doctor, businesswoman, lawyer, businessman')
mo3 = businessPersonPattern.search('doctor, businesswowowoman, lawyer, businessman')
print(mo1.group())
print(mo2.group())
print(mo3.group())
## (1.6) Matching 1+ with +
businessPersonPattern = re.compile(r'business(wo)+man')
mo1 = businessPersonPattern.search('doctor, businessman, lawyer')
mo2 = businessPersonPattern.search('doctor, businesswoman, lawyer, businessman')
mo3 = businessPersonPattern.search('doctor, businesswowowoman, lawyer, businessman')
print(mo1 == None)
print(mo2.group())
print(mo3.group())
## (1.7) Repetition with {}
hahaPattern = re.compile(r'(ha){2}')
laughBoundPattern = re.compile(r'(ha){3,5}')
laughUnboundPattern = re.compile(r'(ha){,5}')
mo1 = hahaPattern.search('And, I: ha'); print(mo1 == None)
mo2 = hahaPattern.search('And, I: haha'); print(mo2 == None)
mo3 = laughBoundPattern.search('And, I: haha'); print(mo3 == None)
mo4 = laughBoundPattern.search('And, I: hahaha'); print(mo4 == None)
mo5 = laughUnboundPattern.search('And, I: haha'); print(mo5 == None)
## (2) Greedy and Nongreedy Matching
hahaPattern = re.compile(r'(ha){1,2}?') ## non-greedy add ? (return the shortest version)
mo4 = hahaPattern.search('And, I: hahaha'); print(mo4.group())
## (3) .findall()
phoneNumRegex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d') # has no groups
phoneNumRegex_group = re.compile(r'(\d\d\d)-(\d\d\d)-(\d\d\d\d)') # has groups
phoneNumRegex.findall('Cell: 415-555-9999 Work: 212-555-0000')
phoneNumRegex_group.findall('Cell: 415-555-9999 Work: 212-555-0000') # with group (return tuples)
## (4) Character Classes
xmasRegex = re.compile(r'\d+\s\w+') ## \d+: 1+ digits; \s: space; \w+: 1+ letters/words
xmasRegex.findall('12 drummers, 11 pipers, 10 lords, 9 ladies, 8 maids, 7 \\
swans, 6 geese, 5 rings, 4 birds, 3 hens, 2 doves, 1 partridge')
## (4b) Your own character classes
vowelRegex = re.compile(r'[aeiouAEIOU]')
vowelRegex.findall('Robocop eats baby food. BABY FOOD.')
consonantRegex = re.compile(r'[^aeiouAEIOU]') ## ^ after the bracket, search anything but ones in []
consonantRegex.findall('Robocop eats baby food. BABY FOOD.')
## (5) Caret and $
wholeStringIsNum = re.compile(r'^\d+$')
print(wholeStringIsNum.search('1234567890').group())
print(wholeStringIsNum.search('12345xyz67890') == None)
print(wholeStringIsNum.search('123 4567890') == None)
## (6) Wildcard
atRegex = re.compile(r'.at') ## . match any character except for a newline
atRegex.findall('The cat in the hat sat on the flat mat.')
nameRegex = re.compile(r'First Name: (.*) Last Name: (.*)') ## .* match everything
mo = nameRegex.search('First Name: Al Last Name: Sweigart')
print(mo.group(1))
print(mo.group(2))
greedyRegex = re.compile(r'<.*>') ## .* is greedy, it takes as long as possible
mo = greedyRegex.search('<To serve man> for dinner.>')
print(mo.group())
nongreedyRegex = re.compile(r'<.*?>') ## add ? for non-grredy
mo = nongreedyRegex.search('<To serve man> for dinner.>')
print(mo.group())
noNewlineRegex = re.compile('.*') ## anything except new line
noNewlineRegex.search('Serve the public trust.\nProtect the innocent. \\
\nUphold the law.').group() ## everything up to the first \n
newlineRegex = re.compile('.*', re.DOTALL) ## add re.DOTALL, to also include new line
newlineRegex.search('Serve the public trust.\nProtect the innocent. \\
## (7) Case Insensitive
robocop = re.compile(r'robocop', re.I) ## IGNORECASE
robocop.search('Robocop is part man, part machine, all cop.').group()
robocop.search('ROBOCOP protects the innocent.').group()
## (8) substitue
namesRegex = re.compile(r'Agent \w+') ## Start with Agent then followed by Words (till space)
namesRegex.sub('CENSORED', 'Agent Alice gave the secret documents to Agent Bob.')
## (9) Complex Regexes
agentNamesRegex = re.compile(r'Agent (\w{2})\w*') ## n letters in group 1
## \1, \2, \3 to indicate the group in ( )
agentNamesRegex.sub(r'\1*****', 'Agent Alice told Agent Carol that Agent Eve knew Agent Bob was a double agent.')
phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # area code
(\s|-|\.)? # separator
\d{3} # first 3 digits
(\s|-|\.) # separator
\d{4} # last 4 digits
(\s*(ext|x|ext.)\s*\d{2,5})? # extension
)''',
re.VERBOSE) ## for complicated regex pattern, add re.VERBOSE to ignore new lines and comments inside the pattern
## (10) re.IGNORECASE, re.DOTALL, re.VERBOSE
someRegexValue = re.compile('foo', re.IGNORECASE | re.DOTALL | re.VERBOSE)
|
993,180 | a300191dbaedbd757a3f6dd27d0d080008ece33f | # Generated by Django 3.1.7 on 2021-03-14 07:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='note',
name='title',
field=models.CharField(blank=True, default='', max_length=240),
),
]
|
993,181 | d90b2351ce1f67b699a18797ebb0b9e2618d533b | import shutil
import tempfile
from django.contrib.auth import get_user_model
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django import forms
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from ..models import Follow, Post, Group, Comment
User = get_user_model()
@override_settings(MEDIA_ROOT=tempfile.mkdtemp(dir=settings.BASE_DIR))
class PostPagesTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.test_author = User.objects.create(username='TestAuthor')
cls.user = User.objects.create_user(username='TestUser')
cls.user2 = User.objects.create_user(username='TestUser2')
cls.group1 = Group.objects.create(
title='Тестовое имя группы 1',
slug='test_group',
description='Тестовое описание группы',
)
cls.group2 = Group.objects.create(
title='Тестовое имя группы 2',
slug='test_group2',
description='Тестовое описание группы 2',
)
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.post1 = Post.objects.create(
text='Тестовый текст поста 1 (1 группа)',
author=cls.test_author,
group=cls.group1,
image=uploaded
)
# Нужно было поставить сортировку по -id :/
cls.post2 = Post.objects.create(
text='Тестовый текст поста 2 (2 группа)',
author=cls.test_author,
group=cls.group2,
image=uploaded
)
cls.post3 = Post.objects.create(
text='Тестовый текст поста 3 (2 группа)',
author=cls.user,
group=cls.group2,
image=uploaded
)
cls.comment1 = Comment.objects.create(
post=cls.post1,
author=cls.user,
text='Тестовый текст комментария'
)
cls.follow_relation1 = Follow.objects.create(
user=cls.user,
author=cls.test_author
)
cls.follow_relation2 = Follow.objects.create(
user=cls.user,
author=cls.user2
)
cls.follow_relation3 = Follow.objects.create(
user=cls.user2,
author=cls.user
)
def setUp(self):
self.guest_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
self.authorized_author_client = Client()
self.authorized_author_client.force_login(self.test_author)
@classmethod
def tearDownClass(self) -> None:
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
def tearDown(self):
cache.clear()
def compare_posts(self, response, post1: Post, post2: Post):
'''Сравниваем содержимое двух постов'''
self.assertEqual(post1.text, post2.text)
self.assertEqual(post1.author, post2.author)
self.assertEqual(post1.group, post2.group)
# <img не работает для test_edit_post_page_context, small.gif не
# работает для остальных. Там есть другие варианты, но все они
# являются совсем уже костылями.
# self.assertContains(response, 'small.gif')
self.assertEqual(post1.image.name, post2.image.name)
def test_pages_templates(self):
'''URL адрес использует соответсвтующий шаблон'''
templates_pages_names_unauth = {
'index.html': reverse('index'),
'group.html': reverse('group_posts',
kwargs={'slug': self.group1.slug}),
'profile.html': reverse(
'profile',
kwargs={'username': self.test_author.username}
),
'post.html': reverse(
'post',
kwargs={'username': self.test_author.username,
'post_id': self.post1.id}
)
}
for template, reverse_name in templates_pages_names_unauth.items():
with self.subTest(reverse_name=reverse_name):
response = self.guest_client.get(reverse_name)
self.assertTemplateUsed(response, template, template)
response = self.authorized_client.get(reverse('new_post'))
self.assertTemplateUsed(response, 'new_post.html')
response = self.authorized_client.get(reverse('follow_index'))
self.assertTemplateUsed(response, 'follow.html')
def test_home_page_context(self):
'''Шаблон home сформирован с правильным контекстом.'''
response = self.guest_client.get(reverse('index'))
latest_object = response.context.get('page').object_list[0]
self.compare_posts(response, latest_object, self.post3)
def test_home_page_context_length(self):
'''Все посты из бд попали на главную'''
response = self.guest_client.get(reverse('index'))
self.assertEqual(len(response.context.get('page').object_list), 3)
def test_group_page_context(self):
'''Шаблон group_posts сформирован с правильным контекстом'''
response = self.guest_client.get(
reverse('group_posts', kwargs={'slug': self.group1.slug})
)
latest_object = response.context.get('page').object_list[0]
self.compare_posts(response, latest_object, self.post1)
def test_empty_group_page_objects(self):
'''Посты, которые не пренадлежат группе,
не выводятся на ее странице'''
group3 = Group.objects.create(
title='Тестовое имя группы 3',
slug='test_group3',
description='Тестовое описание группы 3',
)
response = self.guest_client.get(
reverse('group_posts', kwargs={'slug': group3.slug})
)
self.assertEqual(len(response.context.get('page').object_list), 0)
def test_new_post_page_context(self):
'''Шаблон new_post сформирован с правильным контекстом'''
response = self.authorized_client.get(reverse('new_post'))
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField
}
is_edit = response.context.get('is_edit')
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expected)
self.assertFalse(is_edit)
def test_profile_page_context(self):
'''Шаблон profile сформирован с правильным контекстом'''
response = self.guest_client.get(
reverse('profile', kwargs={'username':
self.test_author.username})
)
latest_object = response.context.get('page').object_list[0]
author = response.context.get('author')
username = response.context.get('username')
page = response.context.get('page')
self.compare_posts(response, latest_object, self.post2)
self.assertEqual(author, self.test_author)
self.assertEqual(username, self.test_author.username)
self.assertEqual(page.number, 1)
def test_post_page_context(self):
'''Шаблон post сформирован с правильным контекстом'''
response = self.guest_client.get(
reverse('post', kwargs={
'username': self.test_author.username,
'post_id': self.post1.id
})
)
username = response.context.get('username')
author = response.context.get('author')
comment = response.context.get('comments')[0]
requested_post = response.context.get('requested_post')
self.assertEqual(username, self.test_author.username)
self.assertEqual(author, self.test_author)
self.assertEqual(comment.text, self.comment1.text)
self.assertEqual(comment.post, self.comment1.post)
self.assertEqual(comment.author, self.comment1.author)
self.compare_posts(response, requested_post, self.post1)
def test_post_page_no_comments(self):
'''На странице с постом без комментариев нет комментариев'''
response = self.guest_client.get(
reverse('post', kwargs={
'username': self.test_author.username,
'post_id': self.post2.id
})
)
self.assertEqual(len(response.context.get('comments')), 0)
def test_edit_post_page_context(self):
'''Шаблон post_edit сформирован с правильным контекстом'''
response = self.authorized_author_client.get(
reverse('post_edit', kwargs={
'username': self.test_author.username,
'post_id': self.post1.id
})
)
is_edit = response.context.get('is_edit')
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField
}
post = response.context.get('post')
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expected)
self.assertTrue(is_edit)
self.compare_posts(response, post, self.post1)
def test_follow_index_context(self):
'''Шаблон follow сформирован с правильным контекстом.'''
response = self.authorized_client.get(reverse('follow_index'))
latest_object = response.context.get('page').object_list[0]
self.compare_posts(response, latest_object, self.post2)
def test_follow_index_no_other_posts(self):
'''В follow только посты от авторов, на которых подписан
пользователь'''
response = self.authorized_client.get(reverse('follow_index'))
self.assertEqual(len(response.context.get('page').object_list), 2)
def test_follow_index_empty(self):
'''В follow пост пользователя не появляется для тех кто не подписан'''
response = self.authorized_author_client.get(reverse('follow_index'))
self.assertEqual(len(response.context.get('page').object_list), 0)
def test_unfollow_author(self):
'''Проверка отписки'''
self.authorized_client.get(reverse('profile_unfollow', kwargs={
'username': self.test_author.username})
)
self.assertFalse(Follow.objects.filter(
user=self.follow_relation1.user,
author=self.follow_relation1.author
).exists())
self.assertEqual(Follow.objects.count(), 2)
def test_follow_author(self):
'''Проверка подписки'''
self.authorized_client.get(reverse('profile_follow', kwargs={
'username': self.test_author.username})
)
self.assertTrue(Follow.objects.filter(
user=self.follow_relation1.user,
author=self.follow_relation1.author
).exists())
self.assertEqual(Follow.objects.count(), 3)
def test_cache(self):
'''Проверка кэширования'''
self.guest_client.get(reverse('index'))
self.guest_client.get(reverse('index'))
key = make_template_fragment_key('post_list_index')
self.assertIsNotNone(cache.get(key))
class PaginatorViewsTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.test_author = User.objects.create(username='TestAuthor')
cls.group1 = Group.objects.create(
title='Тестовое имя группы 1',
slug='test_group',
description='Тестовое описание группы',
)
for i in range(13):
Post.objects.create(
text=f'Тестовый текст {i}',
author=cls.test_author
)
Post.objects.create(
text=f'Тестовый текст {i}',
author=cls.test_author,
group=cls.group1
)
def setUp(self):
self.user = User.objects.create_user(username='TestUser')
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def tearDown(self):
cache.clear()
def test_first_index_page_contains_ten_records(self):
'''На первой странице шаблона index находится 10 записей'''
response = self.client.get(reverse('index'))
self.assertEqual(len(response.context.get('page').object_list), 10)
def test_third_index_page_contains_six_records(self):
'''На третьей странице шаблона index находится 6 записей'''
response = self.client.get(reverse('index') + '?page=3')
self.assertEqual(len(response.context.get('page').object_list), 6)
def test_first_group_page_contains_ten_records(self):
'''На первой странице шаблона группы находится 10 записей'''
response = self.authorized_client.get(
reverse('group_posts', kwargs={'slug': self.group1.slug})
)
self.assertEqual(len(response.context.get('page').object_list), 10)
def test_second_group_page_contains_ten_records(self):
'''На второй странице шаблона группы находится 3 записи'''
response = self.authorized_client.get(
reverse('group_posts', kwargs={
'slug': self.group1.slug
}) + '?page=2'
)
self.assertEqual(len(response.context.get('page').object_list), 3)
def test_first_profile_page_contains_ten_records(self):
'''На первой странице шаблона профиля пользователя находится
10 записей'''
response = self.authorized_client.get(
reverse('profile', kwargs={'username': self.test_author.username})
)
self.assertEqual(len(response.context.get('page').object_list), 10)
def test_first_profile_page_contains_ten_records(self):
'''На третьей странице шаблона профиля пользователя находится
6 записей'''
response = self.authorized_client.get(
reverse('profile', kwargs={'username': self.test_author.username})
+ '?page=3'
)
self.assertEqual(len(response.context.get('page').object_list), 6)
|
993,182 | e39f96cdf55a59e94d37edb69248f676e8bf0f61 | __author__ = 'kpeterson'
import traceback, sys
def repl(prompt='lisp> '):
"""A prompt-read-eval-print loop."""
while True:
try:
val = eval(parse(raw_input(prompt)))
if val is not None:
print to_string(val)
except KeyboardInterrupt:
print "\nExiting...\n"
sys.exit()
except:
handle_error()
def handle_error():
"""Simple error handling for both the repl and load"""
print "An error occurred. Trace:\n"
traceback.print_exc()
Symbol = str
def parse(s):
return read_from(tokenize(s))
def tokenize(s):
return s.replace('(', ' ( ').replace(')', ' ) ').split()
def read_from(tokens):
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if token == '(':
L = []
while tokens[0] != ')':
L.append(read_from(tokens))
tokens.pop(0)
return L
elif token == ')':
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token):
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
return Symbol(token)
isa = isinstance
def to_string(exp):
if not isa(exp, list):
return str(exp)
else:
return '(' + ' '.join(map(to_string, exp)) + ')'
class Env(dict):
def __init__(self, params=(), args=(), outer=None):
self.update(zip(params, args))
self.outer = outer
def find(self, var):
return self if var in self else self.outer.find(var)
def add_globals(env):
import operator
env.update(
{'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.div,
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq}
)
env.update({'True': True, 'False': False})
return env
global_env = add_globals(Env())
def eval(x, env=global_env):
if isa(x, Symbol):
return env.find(x)[x]
elif not isa(x, list):
return x
elif x[0] == 'quote' or x[0] == 'q':
(_, exp) = x
return exp
elif x[0] == 'atom?':
(_, exp) = x
return exp
elif x[0] == 'eq?':
(_, exp1, exp2) = x
v1, v2 = eval(exp1, env), eval(exp2, env)
return (not isa(v1, list)) and (v1 == v2)
elif x[0] == 'car':
(_, exp) = x
return eval(exp, env)[0]
elif x[0] == 'cdr':
(_, exp) = x
return eval(exp, env)[1:]
elif x[0] == 'cons':
(_, exp1, exp2) = x
return [eval(exp1, env)] + eval(exp2, env)
elif x[0] == 'cond':
for (p, e) in x[1:]:
if eval(p, env):
return eval(e, env)
elif x[0] == 'null?':
(_, exp) = x
return eval(exp, env) == []
elif x[0] == 'if':
(_, test, conseq, alt) = x
if eval(test, env):
eval(conseq, env)
else:
eval(alt, env)
elif x[0] == 'set!':
(_, var, exp) = x
env.find(var)[var] = eval(exp, env)
elif x[0] == 'define':
(_, var, exp) = x
env[var] = eval(exp, env)
elif x[0] == 'lambda':
(_, vars, exp) = x
return lambda *args: eval(exp, Env(vars, args, env))
elif x[0] == 'begin':
for exp in x[1:]:
val = eval(exp, env)
return val
else:
exps = [eval(exp, env) for exp in x]
proc = exps.pop(0)
return proc(*exps)
def load(filename):
print "Loading and executing"
f = open(filename, "r")
program = f.readlines()
f.close()
rps = running_paren_sums(program)
full_line = ""
for (paren_sum, program_line) in zip(rps, program):
program_line = program_line.strip()
full_line += program_line + " "
if paren_sum == 0 and full_line.strip() != "":
try:
val = eval(parse(full_line))
if val is not None:
print to_string(val)
except:
handle_error()
print "\nThe line in which the error occurred:\n"
break
full_line = ""
repl()
def running_paren_sums(program):
count_open_parens = lambda line: line.count("(")-line.count(")")
paren_counts = map(count_open_parens, program)
rps = []
total = 0
for paren_count in paren_counts:
total += paren_count
rps.append(total)
return rps
if __name__ == "__main__":
if len(sys.argv) > 1:
load(sys.argv[1])
else:
repl() |
993,183 | 20ebf78543a8bbf3964244fd207bf57eeb603c56 | from Paasmer import *
import time
#Callback functions for subscribed feeds
def feed1_CB(name):
print("This is in feed1")
print(name)
def feed2_CB(name):
print("This is in feed2")
print(name)
def feed3_CB(name):
print("This is in feed3")
print(name)
###connecting to the Paasmer Edge docker device
test = Paasmer()
test.host = "localhost" #IP address of the Paasmer Edge docker device.
test.connect()
#subscribing to the feeds with callback functions
test.subscribe("feed1",feed1_CB)
test.subscribe("feed2",feed2_CB)
test.subscribe("feed3",feed3_CB)
#loop start
test.loop_start()
while True:
#publishing the feed details to Paasmer Edge docker device
'''
you can use the following analytics
1.filter
2.aggregate
3.feedMonitoring
4.average
for filter, provide the analytics condition like "function(x) x < 5.0"
for aggrgate, provide the number of values you want to do aggregate
for average, provide the number of values you want to do average
'''
#publishing the feed details with filter analytics
test.publish("feed4",feedValue = 5,analytics = "filter",analyticsCondition="function(x) x > 3.0")
time.sleep(2)
#publishing the feed details without any analytics
test.publish("feed5",feedValue = 9,feedType = "sensor")
time.sleep(2)
#publishing the feed details with aggregate analytics
test.publish("feed6",feedValue = 22,analytics = "aggregate",analyticsCondition = "10")
time.sleep(2)
#publishing the feed details with feedMonitoring
test.publish("feed7",feedValue = 22,analytics = "feedMonitoring")
time.sleep(2)
#publishing the feed details with average analytics
test.publish("feed8",feedValue = 28,analytics = "average",analyticsCondition = "10")
time.sleep(2)
|
993,184 | 0868b627d13af092760be815ba19a47f965b5da9 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import os
import subprocess
import sys
from setuptools import find_packages, setup
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
PACKAGE_JSON = os.path.join(BASE_DIR, "superset-frontend", "package.json")
with open(PACKAGE_JSON, "r") as package_file:
version_string = json.load(package_file)["version"]
with io.open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
def get_git_sha() -> str:
try:
s = subprocess.check_output(["git", "rev-parse", "HEAD"])
return s.decode().strip()
except Exception:
return ""
GIT_SHA = get_git_sha()
version_info = {"GIT_SHA": GIT_SHA, "version": version_string}
print("-==-" * 15)
print("VERSION: " + version_string)
print("GIT SHA: " + GIT_SHA)
print("-==-" * 15)
VERSION_INFO_FILE = os.path.join(BASE_DIR, "superset", "static", "version_info.json")
with open(VERSION_INFO_FILE, "w") as version_file:
json.dump(version_info, version_file)
setup(
name="apache-superset",
description="A modern, enterprise-ready business intelligence web application",
long_description=long_description,
long_description_content_type="text/markdown",
version=version_string,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
entry_points={
"console_scripts": ["superset=superset.cli.main:superset"],
},
install_requires=[
"backoff>=1.8.0",
"bleach>=3.0.2, <4.0.0",
"cachelib>=0.4.1,<0.5",
"celery>=5.2.2, <6.0.0",
"click>=8.0.3",
"colorama",
"croniter>=0.3.28",
"cron-descriptor",
"cryptography>=3.3.2",
"deprecation>=2.1.0, <2.2.0",
"flask>=2.0.0, <3.0.0",
"flask-appbuilder>=4.1.3, <5.0.0",
"flask-caching>=1.10.0",
"flask-compress",
"flask-talisman",
"flask-migrate",
"flask-wtf",
"func_timeout",
"geopy",
"graphlib-backport",
"gunicorn>=20.1.0",
"hashids>=1.3.1, <2",
"holidays==0.10.3", # PINNED! https://github.com/dr-prodigy/python-holidays/issues/406
"humanize",
"isodate",
"markdown>=3.0",
"msgpack>=1.0.0, <1.1",
"numpy==1.22.1",
"pandas>=1.3.0, <1.4",
"parsedatetime",
"pgsanity",
"polyline",
"pyparsing>=3.0.6, <4",
"python-dateutil",
"python-dotenv",
"python-geohash",
"pyarrow>=5.0.0, <6.0",
"pyyaml>=5.4",
"PyJWT>=2.4.0, <3.0",
"redis",
"selenium>=3.141.0",
"simplejson>=3.15.0",
"slackclient==2.5.0", # PINNED! slack changes file upload api in the future versions
"sqlalchemy>=1.4, <2",
"sqlalchemy-utils>=0.37.8, <0.38",
"sqlparse==0.3.0", # PINNED! see https://github.com/andialbrecht/sqlparse/issues/562
"tabulate==0.8.9",
# needed to support Literal (3.8) and TypeGuard (3.10)
"typing-extensions>=3.10, <4",
"wtforms-json",
],
extras_require={
"athena": ["pyathena>=1.10.8, <1.11"],
"aurora-data-api": ["preset-sqlalchemy-aurora-data-api>=0.2.8,<0.3"],
"bigquery": [
"pandas_gbq>=0.10.0",
"pybigquery>=0.4.10",
"google-cloud-bigquery>=2.4.0",
],
"clickhouse": ["clickhouse-sqlalchemy>=0.1.4, <0.2"],
"cockroachdb": ["cockroachdb>=0.3.5, <0.4"],
"cors": ["flask-cors>=2.0.0"],
"crate": ["crate[sqlalchemy]>=0.26.0, <0.27"],
"databricks": [
"databricks-sql-connector>=2.0.2, <3",
"sqlalchemy-databricks>=0.2.0",
],
"db2": ["ibm-db-sa>=0.3.5, <0.4"],
"dremio": ["sqlalchemy-dremio>=1.1.5, <1.3"],
"drill": ["sqlalchemy-drill==0.1.dev"],
"druid": ["pydruid>=0.6.1,<0.7"],
"solr": ["sqlalchemy-solr >= 0.2.0"],
"elasticsearch": ["elasticsearch-dbapi>=0.2.0, <0.3.0"],
"exasol": ["sqlalchemy-exasol >= 2.4.0, <3.0"],
"excel": ["xlrd>=1.2.0, <1.3"],
"firebird": ["sqlalchemy-firebird>=0.7.0, <0.8"],
"firebolt": ["firebolt-sqlalchemy>=0.0.1"],
"gsheets": ["shillelagh[gsheetsapi]>=1.0.14, <2"],
"hana": ["hdbcli==2.4.162", "sqlalchemy_hana==0.4.0"],
"hive": ["pyhive[hive]>=0.6.5", "tableschema", "thrift>=0.11.0, <1.0.0"],
"impala": ["impyla>0.16.2, <0.17"],
"kusto": ["sqlalchemy-kusto>=1.0.1, <2"],
"kylin": ["kylinpy>=2.8.1, <2.9"],
"mssql": ["pymssql>=2.1.4, <2.2"],
"mysql": ["mysqlclient>=2.1.0, <3"],
"oracle": ["cx-Oracle>8.0.0, <8.1"],
"pinot": ["pinotdb>=0.3.3, <0.4"],
"postgres": ["psycopg2-binary==2.9.1"],
"presto": ["pyhive[presto]>=0.6.5"],
"trino": ["trino>=0.313.0"],
"prophet": ["prophet>=1.0.1, <1.1", "pystan<3.0"],
"redshift": ["sqlalchemy-redshift>=0.8.1, < 0.9"],
"rockset": ["rockset>=0.8.10, <0.9"],
"shillelagh": [
"shillelagh[datasetteapi,gsheetsapi,socrata,weatherapi]>=1.0.3, <2"
],
"snowflake": [
"snowflake-sqlalchemy==1.2.4"
], # PINNED! 1.2.5 introduced breaking changes requiring sqlalchemy>=1.4.0
"spark": ["pyhive[hive]>=0.6.5", "tableschema", "thrift>=0.11.0, <1.0.0"],
"teradata": ["teradatasql>=16.20.0.23"],
"thumbnails": ["Pillow>=9.1.1, <10.0.0"],
"vertica": ["sqlalchemy-vertica-python>=0.5.9, < 0.6"],
"netezza": ["nzalchemy>=11.0.2"],
},
python_requires="~=3.8",
author="Apache Software Foundation",
author_email="dev@superset.apache.org",
url="https://superset.apache.org/",
download_url="https://www.apache.org/dist/superset/" + version_string,
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
|
993,185 | 5781f1c80b4e6fd0bbdeb017d828b09ee26c904f | from datetime import datetime, timedelta
from django.http import Http404, JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib import messages
import json
from django.core.serializers.json import DjangoJSONEncoder
from .models import Message
from .forms import MessageForm
@login_required
def messages_main(request):
inbox_count = Message.objects.inbox_for(request.user).count()
sent_count = Message.objects.outbox_for(request.user).count()
trash_count = Message.objects.trash_for(request.user).count()
context = {'inbox_count': inbox_count, 'sent_count': sent_count, 'trash_count': trash_count}
return render(request, 'messaging/messages_view.html', context=context)
# @login_required
# def message_check(request):
# threshold = timezone.now() - timedelta(minutes=5)
# new_messages = Message.objects.filter(recipient=request.user, sent_at__gt=threshold)
# # return JsonResponse({'new_messages': bool(new_messages)})
# return JsonResponse({'new_messages': True})
@login_required
def message_check(request):
temp = temp = Message.objects.not_seen_for(request.user)
json_val_list = temp.values_list('id', 'message_body').order_by('-sent_at')
new_messages = json.dumps(list(json_val_list), cls=DjangoJSONEncoder)
for message in temp:
message.recipient_seen = True
message.save()
return JsonResponse({
'new_messages': new_messages,
'new_messages_available': bool(json_val_list),
'new_messages_count': len(json_val_list),
})
@login_required
def inbox(request):
message_list = Message.objects.inbox_for(request.user)
context = {'message_list': message_list}
return render(request, 'display_chat.html', context=context)
@login_required
def sent(request):
message_list = Message.objects.outbox_for(request.user)
context = {'message_list': message_list}
return render(request, 'display_chat.html', context=context)
@login_required
def trash(request):
message_list = Message.objects.trash_for(request.user)
context = {'message_list': message_list}
return render(request, 'display_chat.html', context=context)
@login_required
def compose(request):
if request.method == "POST":
form = MessageForm(request.POST or None)
form.message_body = request.POST["message_body"]
form.recipient = request.POST["recipient"]
if form.is_valid():
message = form.save(commit=False)
message.sender = request.user
message.save()
messages.success(request, "You messaged successfully!")
return JsonResponse({'you_sent': message.message_body,
'messageId': message.id})
else:
messages.error(request, "You entered something wrong here!")
return JsonResponse({'message_body': 'Error!'})
else:
messages.error(request, "Something went wrong with this message!")
return JsonResponse({'message_body': 'Error!'})
def detail(request, message_id):
user = request.user
now = timezone.now()
message = get_object_or_404(Message, id=message_id)
if (message.sender != user) and (message.recipient != user):
raise Http404
context = {'message': message}
return render(request, "message.html", context=context)
def delete_message(request, message_id):
now = timezone.now()
deleted_message = get_object_or_404(Message, id=message_id)
deleted = False
if deleted_message.sender == request.user:
deleted = True
if deleted_message.sender_deleted_at is None:
deleted_message.sender_deleted_at = now
else:
deleted_message.sender_deleted_perm = True
if deleted_message.recipient == request.user:
deleted = True
if deleted_message.recipient_deleted_at is None:
deleted_message.recipient_deleted_at = now
else:
deleted_message.recipient_deleted_perm = True
if deleted_message.for_delete():
deleted_message.delete()
return redirect('messages:trash')
elif deleted:
deleted_message.save()
messages.success(request, "Message deleted successfully.")
return redirect('messages:inbox')
else:
raise Http404("You cannot delete this message!")
|
993,186 | 48dfde97c63fe5dfc9850d255fca3c6a10641cd4 | # -*- Python -*-
# ----------------------------------------------------------------------
# Generate geometry
# ----------------------------------------------------------------------
playback 'geometry.jou'
# ----------------------------------------------------------------------
# Set discretization size
# ----------------------------------------------------------------------
volume all size {3.0*km}
# ----------------------------------------------------------------------
# Generate the mesh
# ----------------------------------------------------------------------
volume all scheme tetmesh
mesh volume all
# ----------------------------------------------------------------------
# Mark entities for boundary conditions, etc.
# ----------------------------------------------------------------------
playback 'bc.jou'
# ----------------------------------------------------------------------
# Export exodus file
# ----------------------------------------------------------------------
export mesh "mesh_tet.exo" dimension 3 overwrite
|
993,187 | e5b174d0c56fe2575b5f977530623383c23bb9fe | # -*- coding: cp936 -*-
import re
c ="""fisixxrenxxdlkfsaxxqingxxwdlkjjixx
xiaoxxlwdhlkjxxzhuoxxdwlkn9kjxxmaxxddwdok"""
b = re.findall('xx(.*?)xx',c,re.S)
print b
#没有re.S输出的是:['ren', 'qing', 'lwdhlkj', 'dwlkn9kj']
#有re.S输出的是: ['ren', 'qing', '\nxiao', 'zhuo', 'ma']
#对比 sub的使用:替换的功能
s = "123abc123"
b = re.sub('123(.*?)123','123%d123'%789,s) #把 789替换到(.*?)中
print b
#匹配纯数字(\d+)
a = "15156dddjj656"
b = re.findall('(\d+)',a)
print b
|
993,188 | 5fb0490508d42d6c1ab512ea0120f3852b008889 | #!/usr/bin/env python
from core.agents.Parameters import Query
from core.agents.web.Yahoo.Controller import Controller
from strategy.basic import basic
import matplotlib.pyplot as plt
import matplotlib.dates as matplotlibdates
def get_data_from_stock(stock_name):
query = Query()
query.add(Controller.PARAM_NAME_STOCK_NAME, stock_name)
controller = Controller()
controller.init()
data = controller.run(query)
return data
def print_graph(stock_name, stock_prices, dates, capital, buys, sells):
'''Initialize the fig '''
plt.figure()
'''Cargamos las etiquetas, tanto del eje X como del Y'''
plt.xlabel(r"Stock Price", fontsize = 24, color = (1,0,0))
plt.ylabel(r"Date", fontsize = 24, color = 'blue')
'''Cargamos dos plots con una division horizontal'''
plt.subplot(2,1,1)
dates = matplotlibdates.datestr2num(dates)
plt.title("Stock:{stock}".format(stock=stock_name))
plt.plot_date(dates, stock_prices, fmt="", tz=None, xdate=True)
plt.subplot(2,1,2)
plt.title("Capital evolution:{stock}".format(stock=stock_name))
plt.plot_date(dates, capital, fmt="", tz=None, xdate=True)
'''Ensenamos el plot final'''
plt.savefig("img/basic/{stock}".format(stock=stock_name))
def InsertToFile(message):
with open("result.csv", "a") as myfile:
myfile.write(message)
def calculate(stock_name):
data = get_data_from_stock(stock_name)
capital_inicial = 10000
capital = 0
stocks = 0
stock_prices = []
dates = []
profits = []
b = basic(capital_inicial)
for d in data:
stocks, capital = b.run(stocks, d, data)
stock_prices.append(d.close)
profits.append(capital)
dates.append(d.date)
rdto = (capital / capital_inicial) - 1;
print_graph(stock_name,stock_prices, dates, profits, None, None)
InsertToFile("Stock:{stock};capital:{capital_inicial};capitalfinal:{capital};rendimiento:{rdto}".format(stock=stock_name, capital_inicial=capital_inicial, capital=capital, rdto=rdto))
calculate("TEF")
calculate("MSFT")
calculate("MO")
calculate("TSLA")
calculate("IBM")
calculate("SAN")
calculate("YHOO")
calculate("TFX")
calculate("DIS")
calculate("WFC")
calculate("TWTR")
calculate("BIDU")
calculate("BAC")
calculate("AXP")
calculate("FB")
|
993,189 | 49f7b49ad6f7f37992a3d4bb410497f6d9582786 | def build_profile(first, breed, **user_info):
user_info['first name'] = first.title()
user_info['breed'] = breed.title()
return user_info
user_info = build_profile('jock', 'jack russel', size='medium',
health='dead')
# print(f"first name: {user_info['first name']}")
# print(f"breeed {user_info['breed']}")
# # print(f"size: {user_info}")
# # print(f"health: {user_info}")
print(user_info) |
993,190 | a19052cc6a36261db60ae1b6d22418edb1276d50 | import librosa
import os
import pandas as pd
import sys
sys.path.insert(0, '/home/huanyuan/code/demo/common')
from common.utils.python.metrics_tools import *
def cal_fpr_tpr(src_csv, pst_csv, positive_label, bool_write_audio):
# laod csv
src_pd = pd.read_csv(src_csv)
pst_pd = pd.read_csv(pst_csv)
src_list = []
for _, row in src_pd.iterrows():
src_list.append({'label':row['label'], 'start_time':int(row['start_time']), 'end_time':int(row['end_time'])})
# src_list.append({'label':row['lable'], 'start_time':int(row['start_time']), 'end_time':int(row['end_time'])})
assert row['start_time'] < row['end_time']
pst_list = []
for _, row in pst_pd.iterrows():
pst_list.append({'label':row['label'], 'start_time':int(row['start_time']), 'end_time':int(row['end_time']), 'matched':0})
assert row['start_time'] < row['end_time']
# init
y_true = []
y_pred = []
fn_list = []
fp_list = []
double_matched_list = []
sample_rate = 16000
# match y_true/y_pred
for idx in range(len(src_list)):
row_idx = src_list[idx]
# y_true_idx
y_true_idx = 1 if row_idx['label'] == positive_label else 0
y_true.append(y_true_idx)
# y_pred_idx
y_pred_idx = 0
for idy in range(len(pst_list)):
row_idy = pst_list[idy]
if (row_idx['start_time'] > row_idy['start_time'] and row_idx['start_time'] < row_idy['end_time'] and row_idy['label'] == positive_label) \
or (row_idx['end_time'] > row_idy['start_time'] and row_idx['end_time'] < row_idy['end_time'] and row_idy['label'] == positive_label) \
or (row_idx['start_time'] < row_idy['start_time'] and row_idx['end_time'] > row_idy['end_time'] and row_idy['label'] == positive_label):
if y_pred_idx == 0 and row_idy['matched'] == 0:
row_idy['matched'] = 1
y_pred_idx = 1
else:
# 找到两次结果,说明两个检测结果与标签交叉
row_idy['matched'] = 1
y_pred_idx = 1
double_matched_list.append({'label':row_idy['label'], 'start_time':int(row_idy['start_time']), 'end_time':int(row_idy['end_time'])})
print("[Warning:] Please check result, two results are found, indicating that the two test results cross the label")
y_pred.append(y_pred_idx)
# find fn list
if y_true_idx == 1 and y_pred_idx == 0:
fn_list.append({'label':row_idx['label'], 'start_time':int(row_idx['start_time']), 'end_time':int(row_idx['end_time'])})
# find fp list
if y_true_idx == 0 and y_pred_idx == 1:
fp_list.append({'label':row_idx['label'], 'start_time':int(row_idx['start_time']), 'end_time':int(row_idx['end_time'])})
assert len(y_true) == len(y_pred)
tn, fp, fn, tp = get_confusion_matrix(y_true, y_pred)
accuracy = get_accuracy(tn, fp, fn, tp)
tpr = get_tpr(tn, fp, fn, tp)
fpr = get_fpr(tn, fp, fn, tp)
print("[Ground Truth] Accuracy:{:.2f}%({}/{}), Tpr:{:.2f}%({}/{}), Fpr:{:.2f}%({}/{})".format(accuracy*100, tp+tn, (tp+fp+tn+fn), tpr*100, tp, tp+fn, fpr*100, fp, fp+tn))
print("[Confusion Matrix] \n[{}, {} \n {}, {}]".format(tp, fn, fp, tn))
# print("[Ground Truth Total] number:{}, tp:{}, fp:{}, tn:{}, fn:{}".format((tp+fp+tn+fn), tp, fp, tn, fn))
if bool_write_audio:
# load data
audio_data = librosa.core.load(src_csv.split('.')[0] + '.wav', sr=sample_rate)[0]
output_dir = os.path.join(os.path.dirname(pst_csv), 'audio_result')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print()
for fn_case in fn_list:
print("[FN] {}".format(fn_case))
if bool_write_audio:
output_subdir = os.path.join(output_dir, 'fn')
if not os.path.exists(output_subdir):
os.makedirs(output_subdir)
output_path = os.path.join(output_subdir, 'label_{}_starttime_{}.wav'.format(fn_case['label'], fn_case['start_time']))
start_time = int(sample_rate * fn_case['start_time'] / 1000)
end_time = int(sample_rate * fn_case['end_time'] / 1000)
output_wav = audio_data[start_time: end_time]
librosa.output.write_wav(output_path, output_wav, sr=sample_rate)
print()
for fp_case in fp_list:
print("[FP] {}".format(fp_case))
if bool_write_audio:
output_subdir = os.path.join(output_dir, 'fp')
if not os.path.exists(output_subdir):
os.makedirs(output_subdir)
output_path = os.path.join(output_subdir, 'label_{}_starttime_{}.wav'.format(fp_case['label'], fp_case['start_time']))
start_time = int(sample_rate * fp_case['start_time'] / 1000)
end_time = int(sample_rate * fp_case['end_time'] / 1000)
output_wav = audio_data[start_time: end_time]
librosa.output.write_wav(output_path, output_wav, sr=sample_rate)
# find unmatched detection result
unmatched_list = []
for idy in range(len(pst_list)):
row_idy = pst_list[idy]
if row_idy['matched'] != 1:
print("[Warning:] Please check result, no labels are found")
unmatched_list.append({'label':row_idy['label'], 'start_time':int(row_idy['start_time']), 'end_time':int(row_idy['end_time'])})
print()
print("[Detection Total] number:{}, matched number:{}, unmatched number:{}, double matched number:{}".format(
len(pst_list), len(pst_list) - len(unmatched_list), len(unmatched_list), len(double_matched_list)))
for double_matched_case in double_matched_list:
print("[Double Matched] {}".format(double_matched_case))
for unmatched_case in unmatched_list:
print("[Unmatched] {}".format(unmatched_case))
if __name__ == "__main__":
bool_write_audio = True
cal_fpr_tpr("/home/huanyuan/model/test_straming_wav/xiaoyu_10292020_testing_3600_001.csv",
"/home/huanyuan/model/model_10_30_25_21/model/kws_xiaoyu2_0_res15_10292020/test_straming_wav/xiaoyu_10292020_testing_3600_001/found_words.csv",
"xiaoyu",
bool_write_audio) |
993,191 | d4efba6b82ed1e07d8ee0c74e587f016f9fe31e0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.virt.xenapi import agent
class AgentEnabledCase(test.TestCase):
def test_agent_is_present(self):
self.flags(xenapi_use_agent_default=False)
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "true"}]}
self.assertTrue(agent.should_use_agent(instance))
def test_agent_is_disabled(self):
self.flags(xenapi_use_agent_default=True)
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "false"}]}
self.assertFalse(agent.should_use_agent(instance))
def test_agent_uses_deafault_when_prop_invalid(self):
self.flags(xenapi_use_agent_default=True)
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "bob"}],
"uuid": "uuid"}
self.assertTrue(agent.should_use_agent(instance))
def test_agent_default_not_present(self):
self.flags(xenapi_use_agent_default=False)
instance = {"system_metadata": []}
self.assertFalse(agent.should_use_agent(instance))
def test_agent_default_present(self):
self.flags(xenapi_use_agent_default=True)
instance = {"system_metadata": []}
self.assertTrue(agent.should_use_agent(instance))
|
993,192 | 78241da7bdffaed93855ddfb2c7384bec30f845a |
import os, sys, glob, pickle
import optparse
import numpy as np
from scipy.interpolate import interpolate as interp
import scipy.stats
from astropy.table import Table, Column
import matplotlib
#matplotlib.rc('text', usetex=True)
matplotlib.use('Agg')
#matplotlib.rcParams.update({'font.size': 20})
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
plt.rcParams['xtick.labelsize']=30
plt.rcParams['ytick.labelsize']=30
import corner
import pymultinest
from gwemlightcurves.sampler import *
from gwemlightcurves.KNModels import KNTable
from gwemlightcurves.sampler import run
from gwemlightcurves import __version__
from gwemlightcurves import lightcurve_utils, Global
def easyint(x,y,xref):
ir = (xref>=min(x))&(xref<=max(x))
yint = interp.interp1d(x[np.argsort(x)],y[np.argsort(x)])(xref[ir])
#yout = np.zeros(len(xref),dmodel=float)
yout = np.zeros(len(xref),)
yup = y[-1]
ylow = y[0]
yout[ir] = yint
yout[xref<np.min(x)] = ylow
yout[xref>np.max(x)] = yup
return yout
def spec2mag(lam,Llam,band):
S = 0.1089/band[:,0]**2
S1 = S*band[:,1]
ZP = np.trapz(S1,x=band[:,0])
c = 2.99e10
nu = np.flipud(c/lam*1e8)
D_cm = 10*3.0857e16*100 # 10 pc in cm
spec = np.array(zip(lam,Llam/(4*np.pi*D_cm**2)))
spec1 = easyint(spec[:,0],spec[:,1],band[:,0])
conv = spec1*band[:,1]
flux = np.trapz(conv,x=band[:,0])
mag = -2.5*np.log10(flux/ZP)
return mag
plotDir = '../plots/gws/Ka2017_combine'
if not os.path.isdir(plotDir):
os.makedirs(plotDir)
errorbudgetmag = 1.00
errorbudget = 2.00
plotDir1 = '../plots/gws_spec/Ka2017_FixZPT0/5000_25000/GW170817/2.00/'
pcklFile = os.path.join(plotDir1,"data.pkl")
f = open(pcklFile, 'r')
(data_out, data1, t_best1, lambdas_best1, spec_best1, t0_best1, zp_best1, n_params1, labels1, truths1) = pickle.load(f)
f.close()
plotDir2 = '../plots/gws_spec/Ka2017x2_FixZPT0/5000_25000/GW170817/2.00/'
pcklFile = os.path.join(plotDir2,"data.pkl")
f = open(pcklFile, 'r')
(data_out, data2, t_best2, lambdas_best2, spec_best2, t0_best2, zp_best2, n_params2, labels2, truths2) = pickle.load(f)
f.close()
plotDir3 = '../plots/gws/Ka2017_FixZPT0/u_g_r_i_z_y_J_H_K/0_14/ejecta/GW170817/1.00/'
pcklFile = os.path.join(plotDir3,"data.pkl")
f = open(pcklFile, 'r')
(data_out3, data3, tmag3, lbol3, mag3, t0_best3, zp_best3, n_params3, labels3, best3, truths3) = pickle.load(f)
f.close()
spec_best_dic1 = {}
for key in data_out:
f = interp.interp2d(t_best1+t0_best1, lambdas_best1, np.log10(spec_best1), kind='cubic')
flux1 = (10**(f(float(key),data_out[key]["lambda"]))).T
zp_factor = 10**(zp_best1/-2.5)
flux1 = flux1*zp_factor
spec_best_dic1[key] = {}
spec_best_dic1[key]["lambda"] = data_out[key]["lambda"]
spec_best_dic1[key]["data"] = np.squeeze(flux1)
spec_best_dic2 = {}
for key in data_out:
f = interp.interp2d(t_best2+t0_best2, lambdas_best2, np.log10(spec_best2), kind='cubic')
flux1 = (10**(f(float(key),data_out[key]["lambda"]))).T
zp_factor = 10**(zp_best2/-2.5)
flux1 = flux1*zp_factor
spec_best_dic2[key] = {}
spec_best_dic2[key]["lambda"] = data_out[key]["lambda"]
spec_best_dic2[key]["data"] = np.squeeze(flux1)
filts = np.genfromtxt('../input/filters.dat')
filtnames = ["u","g","r","i","z","y","J","H","K"]
mag1, mag2 = {}, {}
for ii in xrange(9):
mag1[ii], mag2[ii] = [], []
tmag = []
for key in data_out:
tmag.append(float(key))
for ii in xrange(9):
band = np.array(zip(filts[:,0]*10,filts[:,ii+1]))
mag1[ii].append(spec2mag(spec_best_dic1[key]["lambda"],spec_best_dic1[key]["data"],band))
mag2[ii].append(spec2mag(spec_best_dic2[key]["lambda"],spec_best_dic2[key]["data"],band))
tmag = np.array(tmag)
for ii in xrange(9):
mag1[ii], mag2[ii] = np.array(mag1[ii]), np.array(mag2[ii])
title_fontsize = 30
label_fontsize = 30
filts = ["u","g","r","i","z","y","J","H","K"]
colors=cm.jet(np.linspace(0,1,len(filts)))
magidxs = [0,1,2,3,4,5,6,7,8]
tini, tmax, dt = 0.0, 10.0, 0.1
tt = np.arange(tini,tmax,dt)
color2 = 'coral'
color1 = 'cornflowerblue'
plotName = "%s/models_spec_panels.pdf"%(plotDir)
#plt.figure(figsize=(20,18))
plt.figure(figsize=(20,28))
cnt = 0
for filt, color, magidx in zip(filts,colors,magidxs):
cnt = cnt+1
vals = "%d%d%d"%(len(filts),1,cnt)
if cnt == 1:
ax1 = plt.subplot(eval(vals))
else:
ax2 = plt.subplot(eval(vals),sharex=ax1,sharey=ax1)
if not filt in data_out3: continue
samples = data_out3[filt]
t, y, sigma_y = samples[:,0], samples[:,1], samples[:,2]
idx = np.where(~np.isnan(y))[0]
t, y, sigma_y = t[idx], y[idx], sigma_y[idx]
if len(t) == 0: continue
idx = np.where(np.isfinite(sigma_y))[0]
plt.errorbar(t[idx],y[idx],sigma_y[idx],fmt='o',c=color, markersize=16)
idx = np.where(~np.isfinite(sigma_y))[0]
plt.errorbar(t[idx],y[idx],sigma_y[idx],fmt='v',c=color, markersize=16)
magave1 = mag1[magidx]
magave2 = mag2[magidx]
ii = np.where(~np.isnan(magave1))[0]
f = interp.interp1d(tmag[ii], magave1[ii], fill_value='extrapolate')
if filt == 'u':
tt1 = tt[tt<=3.0]
elif filt == 'g':
tt1 = tt[tt<=6.5]
else:
tt1 = tt[tt<=21.0]
maginterp1 = f(tt1)
plt.plot(tt1,maginterp1,'--',c=color1,linewidth=2,label='1 Component')
plt.plot(tt1,maginterp1-errorbudgetmag,'-',c=color1,linewidth=2)
plt.plot(tt1,maginterp1+errorbudgetmag,'-',c=color1,linewidth=2)
plt.fill_between(tt1,maginterp1-errorbudgetmag,maginterp1+errorbudgetmag,facecolor=color1,alpha=0.2)
ii = np.where(~np.isnan(magave2))[0]
f = interp.interp1d(tmag[ii], magave2[ii], fill_value='extrapolate')
maginterp2 = f(tt1)
plt.plot(tt1,maginterp2,'--',c=color2,linewidth=2,label='2 Component')
plt.plot(tt1,maginterp2-errorbudgetmag,'-',c=color2,linewidth=2)
plt.plot(tt1,maginterp2+errorbudgetmag,'-',c=color2,linewidth=2)
plt.fill_between(tt1,maginterp2-errorbudgetmag,maginterp2+errorbudgetmag,facecolor=color2,alpha=0.2)
plt.ylabel('%s'%filt,fontsize=48,rotation=0,labelpad=40)
plt.xlim([0.0, 10.0])
plt.ylim([-17.0,-11.0])
plt.gca().invert_yaxis()
plt.grid()
if cnt == 1:
ax1.set_yticks([-18,-16,-14,-12,-10])
plt.setp(ax1.get_xticklabels(), visible=False)
l = plt.legend(loc="upper right",prop={'size':36},numpoints=1,shadow=True, fancybox=True)
elif not cnt == len(filts):
plt.setp(ax2.get_xticklabels(), visible=False)
plt.xticks(fontsize=32)
plt.yticks(fontsize=32)
ax1.set_zorder(1)
plt.xlabel('Time [days]',fontsize=48)
plt.savefig(plotName, bbox_inches='tight')
plt.close()
keys = sorted(data_out.keys())
colors=cm.rainbow(np.linspace(0,1,len(keys)))
plotName = "%s/spec_panels_fit.pdf"%(plotDir)
plotNamePNG = "%s/spec_panels_fit.png"%(plotDir)
fig = plt.figure(figsize=(22,28))
cnt = 0
for key, color in zip(keys,colors):
cnt = cnt+1
vals = "%d%d%d"%(len(keys),1,cnt)
if cnt == 1:
#ax1 = plt.subplot(eval(vals))
ax1 = plt.subplot(len(keys),1,cnt)
else:
#ax2 = plt.subplot(eval(vals),sharex=ax1,sharey=ax1)
ax2 = plt.subplot(len(keys),1,cnt,sharex=ax1,sharey=ax1)
plt.plot(data_out[key]["lambda"],np.log10(data_out[key]["data"]),'--',c='k',linewidth=4,zorder=99)
lambdas = spec_best_dic1[key]["lambda"]
specmed = spec_best_dic1[key]["data"]
specmin = spec_best_dic1[key]["data"]/errorbudget
specmax = spec_best_dic1[key]["data"]*errorbudget
plt.plot(lambdas,np.log10(specmed),'--',c=color1,linewidth=2,label="1 Component")
plt.plot(lambdas,np.log10(specmin),'-',c=color1,linewidth=2)
plt.plot(lambdas,np.log10(specmax),'-',c=color1,linewidth=2)
plt.fill_between(lambdas,np.log10(specmin),np.log10(specmax),facecolor=color1,edgecolor=color1,alpha=0.2,linewidth=3)
lambdas = spec_best_dic2[key]["lambda"]
specmed = spec_best_dic2[key]["data"]
specmin = spec_best_dic2[key]["data"]/errorbudget
specmax = spec_best_dic2[key]["data"]*errorbudget
plt.plot(lambdas,np.log10(specmed),'--',c=color2,linewidth=2,label="2 Component")
plt.plot(lambdas,np.log10(specmin),'-',c=color2,linewidth=2)
plt.plot(lambdas,np.log10(specmax),'-',c=color2,linewidth=2)
plt.fill_between(lambdas,np.log10(specmin),np.log10(specmax),facecolor=color2,edgecolor=color2,alpha=0.2,linewidth=3)
plt.fill_between([13500.0,14500.0],[-100.0,-100.0],[100.0,100.0],facecolor='0.5',edgecolor='0.5',alpha=0.2,linewidth=3)
plt.fill_between([18000.0,19500.0],[-100.0,-100.0],[100.0,100.0],facecolor='0.5',edgecolor='0.5',alpha=0.2,linewidth=3)
plt.ylabel('%.1f'%float(key),fontsize=48,rotation=0,labelpad=40)
plt.xlim([5000, 25000])
plt.ylim([35.5,37.9])
plt.grid()
if (not cnt == len(keys)) and (not cnt == 1):
plt.setp(ax2.get_xticklabels(), visible=False)
elif cnt == 1:
plt.setp(ax1.get_xticklabels(), visible=False)
l = plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=2, prop={'size':48})
else:
plt.xticks(fontsize=36)
ax1.set_zorder(1)
ax2.set_xlabel(r'$\lambda [\AA]$',fontsize=48,labelpad=30)
plt.savefig(plotNamePNG, bbox_inches='tight')
plt.close()
convert_command = "convert %s %s"%(plotNamePNG,plotName)
os.system(convert_command)
keys_tmp = sorted(data_out.keys())
keys_float = np.array(keys_tmp,dtype=np.float64)
idx = np.where(keys_float <= 5.0)[0]
keys = [keys_tmp[ii] for ii in idx]
colors=cm.rainbow(np.linspace(0,1,len(keys)))
plotName = "%s/spec_panels_fit_early.pdf"%(plotDir)
plotNamePNG = "%s/spec_panels_fit_early.png"%(plotDir)
fig = plt.figure(figsize=(22,28))
cnt = 0
for key, color in zip(keys,colors):
cnt = cnt+1
vals = "%d%d%d"%(len(keys),1,cnt)
if cnt == 1:
#ax1 = plt.subplot(eval(vals))
ax1 = plt.subplot(len(keys),1,cnt)
else:
#ax2 = plt.subplot(eval(vals),sharex=ax1,sharey=ax1)
ax2 = plt.subplot(len(keys),1,cnt,sharex=ax1,sharey=ax1)
plt.plot(data_out[key]["lambda"],data_out[key]["data"],'--',c='k',linewidth=4,zorder=99)
lambdas = spec_best_dic1[key]["lambda"]
specmed = spec_best_dic1[key]["data"]
specmin = spec_best_dic1[key]["data"]/errorbudget
specmax = spec_best_dic1[key]["data"]*errorbudget
plt.plot(lambdas,specmed,'--',c=color1,linewidth=2,label="1 Component")
plt.plot(lambdas,specmin,'-',c=color1,linewidth=2)
plt.plot(lambdas,specmax,'-',c=color1,linewidth=2)
plt.fill_between(lambdas,specmin,specmax,facecolor=color1,edgecolor=color1,alpha=0.2,linewidth=3)
lambdas = spec_best_dic2[key]["lambda"]
specmed = spec_best_dic2[key]["data"]
specmin = spec_best_dic2[key]["data"]/errorbudget
specmax = spec_best_dic2[key]["data"]*errorbudget
plt.plot(lambdas,specmed,'--',c=color2,linewidth=2,label="2 Component")
plt.plot(lambdas,specmin,'-',c=color2,linewidth=2)
plt.plot(lambdas,specmax,'-',c=color2,linewidth=2)
plt.fill_between(lambdas,specmin,specmax,facecolor=color2,edgecolor=color2,alpha=0.2,linewidth=3)
plt.fill_between([13500.0,14500.0],[10**-100.0,10**-100.0],[10**100.0,10**100.0],facecolor='0.5',edgecolor='0.5',alpha=0.2,linewidth=3)
plt.fill_between([18000.0,19500.0],[10**-100.0,10**-100.0],[10**100.0,10**100.0],facecolor='0.5',edgecolor='0.5',alpha=0.2,linewidth=3)
plt.ylabel('%.1f'%float(key),fontsize=48,rotation=0,labelpad=40)
plt.xlim([5000, 25000])
plt.ylim([10**35.5,10**37.9])
plt.grid()
if (not cnt == len(keys)) and (not cnt == 1):
plt.setp(ax2.get_xticklabels(), visible=False)
elif cnt == 1:
plt.setp(ax1.get_xticklabels(), visible=False)
l = plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=2, prop={'size':48})
else:
plt.xticks(fontsize=36)
ax1.set_zorder(1)
ax2.set_xlabel(r'$\lambda [\AA]$',fontsize=48,labelpad=30)
plt.savefig(plotNamePNG, bbox_inches='tight')
plt.close()
convert_command = "convert %s %s"%(plotNamePNG,plotName)
os.system(convert_command)
keys_tmp = sorted(data_out.keys())
keys_float = np.array(keys_tmp,dtype=np.float64)
idx = np.where(keys_float >= 5.0)[0]
keys = [keys_tmp[ii] for ii in idx]
colors=cm.rainbow(np.linspace(0,1,len(keys)))
plotName = "%s/spec_panels_fit_late.pdf"%(plotDir)
plotNamePNG = "%s/spec_panels_fit_late.png"%(plotDir)
fig = plt.figure(figsize=(22,28))
cnt = 0
for key, color in zip(keys,colors):
cnt = cnt+1
vals = "%d%d%d"%(len(keys),1,cnt)
if cnt == 1:
#ax1 = plt.subplot(eval(vals))
ax1 = plt.subplot(len(keys),1,cnt)
else:
#ax2 = plt.subplot(eval(vals),sharex=ax1,sharey=ax1)
ax2 = plt.subplot(len(keys),1,cnt,sharex=ax1,sharey=ax1)
plt.plot(data_out[key]["lambda"],data_out[key]["data"],'--',c='k',linewidth=4,zorder=99)
lambdas = spec_best_dic1[key]["lambda"]
specmed = spec_best_dic1[key]["data"]
specmin = spec_best_dic1[key]["data"]/errorbudget
specmax = spec_best_dic1[key]["data"]*errorbudget
plt.plot(lambdas,specmed,'--',c=color1,linewidth=2,label="1 Component")
plt.plot(lambdas,specmin,'-',c=color1,linewidth=2)
plt.plot(lambdas,specmax,'-',c=color1,linewidth=2)
plt.fill_between(lambdas,specmin,specmax,facecolor=color1,edgecolor=color1,alpha=0.2,linewidth=3)
lambdas = spec_best_dic2[key]["lambda"]
specmed = spec_best_dic2[key]["data"]
specmin = spec_best_dic2[key]["data"]/errorbudget
specmax = spec_best_dic2[key]["data"]*errorbudget
plt.plot(lambdas,specmed,'--',c=color2,linewidth=2,label="2 Component")
plt.plot(lambdas,specmin,'-',c=color2,linewidth=2)
plt.plot(lambdas,specmax,'-',c=color2,linewidth=2)
plt.fill_between(lambdas,specmin,specmax,facecolor=color2,edgecolor=color2,alpha=0.2,linewidth=3)
plt.fill_between([13500.0,14500.0],[10**-100.0,10**-100.0],[10**100.0,10**100.0],facecolor='0.5',edgecolor='0.5',alpha=0.2,linewidth=3)
plt.fill_between([18000.0,19500.0],[10**-100.0,10**-100.0],[10**100.0,10**100.0],facecolor='0.5',edgecolor='0.5',alpha=0.2,linewidth=3)
plt.ylabel('%.1f'%float(key),fontsize=48,rotation=0,labelpad=40)
plt.xlim([5000, 25000])
plt.ylim([10**35.5,10**36.9])
plt.grid()
if (not cnt == len(keys)) and (not cnt == 1):
plt.setp(ax2.get_xticklabels(), visible=False)
elif cnt == 1:
plt.setp(ax1.get_xticklabels(), visible=False)
l = plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=2, prop={'size':48})
else:
plt.xticks(fontsize=36)
ax1.set_zorder(1)
ax2.set_xlabel(r'$\lambda [\AA]$',fontsize=48,labelpad=30)
plt.savefig(plotNamePNG, bbox_inches='tight')
plt.close()
convert_command = "convert %s %s"%(plotNamePNG,plotName)
os.system(convert_command)
|
993,193 | b3fe981518928ac866fa5c3c71fbb5bd9a09a78d | import logging
from flask_restplus import Api
log = logging.getLogger(__name__)
api = Api(
description="Description",
title="API",
doc="/documentation/",
validate=True,
)
|
993,194 | 944a976bbdcabb063f97a06fdfecee4fd9bc6e93 | from .entities.personas import personas
class modeloPersona():
@classmethod
def listar_personas(self,db):
try:
listapersonas=personas.query.all()
return listapersonas
except Exception as ex:
raise Exception(ex)
@classmethod
def listar_persona(self, db, user):
try:
print ('en listar personas')
dir=personas.query.filter_by(usr_danae=user).first()
print (dir)
return dir
except Exception as ex:
raise Exception(ex)
@classmethod
def registrar_persona(self, db, persona):
try:
cursor = db.connection.cursor()
sql = """INSERT INTO personas ( correo, nombre_completo, usr_danae, colectivo, activo)
VALUES (uuid(), '{0}', {1})""".format( persona.correo, persona.nombre_completo, persona.usr_danae, persona.colectivo, persona.activo)
cursor.execute(sql)
db.connection.commit()
return True
except Exception as ex:
raise Exception(ex) |
993,195 | b3c9b758ea0b683aa4762a1e8e30bfffb2074a00 |
"""
Find mirror tree of a binary tree
"""
class Node:
def __init__(self, data):
self.data = data
self.left = self.right = None
def mirror(root):
if not root:
return
mirror(root.left)
mirror(root.right)
temp = root.left
root.left = root.right
root.right = temp
def in_order(root):
if not root:
return
in_order(root.left)
print(root.data)
in_order(root.right)
root1 = Node(1)
root1.left = Node(2)
root1.right = Node(3)
root1.left.left = Node(4)
root1.left.right = Node(5)
""" Print inorder traversal of
the input tree """
print("Inorder traversal of the",
"constructed tree is")
in_order(root1)
""" Convert tree to its mirror """
mirror(root1)
""" Print inorder traversal of
the mirror tree """
print("\nInorder traversal of",
"the mirror treeis ")
in_order(root1)
|
993,196 | c7f1ae174534a8a7cc30a4345940d56cfca8d077 | # Generated by Django 3.1.2 on 2020-12-23 10:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0005_auto_20201223_1532'),
]
operations = [
migrations.RemoveField(
model_name='account',
name='USN',
),
migrations.RemoveField(
model_name='account',
name='course',
),
migrations.RemoveField(
model_name='account',
name='salary',
),
migrations.RemoveField(
model_name='account',
name='sem',
),
]
|
993,197 | a1732134e36c0bcd3fb9101fd97005c26cb00752 | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
PATH_TO_CKPT = "/home/seten/TFM/exported_graphs/model_player_pole_1/frozen_inference_graph.pb"
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = "/home/seten/TFM/exported_graphs/model_player_pole_1/detector_map.pbtxt"
PATH_TO_TEST_IMAGES_DIR = '/media/seten/Datos/diego/TFM/dataset_tfm/court_poles'
NUM_CLASSES = 2
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# detection
TEST_IMAGE_PATHS = []
for im_file in os.listdir(PATH_TO_TEST_IMAGES_DIR):
# print(im_file)
if im_file.endswith(".jpeg") and not os.path.isfile(
os.path.join(PATH_TO_TEST_IMAGES_DIR, im_file.replace(".jpeg", ".xml"))):
TEST_IMAGE_PATHS.append(os.path.join(PATH_TO_TEST_IMAGES_DIR, im_file))
# TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 8) ]
print(len(TEST_IMAGE_PATHS))
# Size, in inches, of the output images.
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def generate_pascal_xml(boxes, classes, scores, category_index, input_image_path, output_xml, min_score_thresh=.5):
from pascal_voc_writer import Writer
objects_to_include = []
# filter by score
image = Image.open(image_path)
width, height = image.size
writer = Writer(input_image_path, width, height)
for object_index in range(len(scores)):
# filter bad detections-
if scores[object_index] < min_score_thresh:
continue
# write objets
class_name = str(category_index[classes[object_index]]["name"])
box = boxes[object_index]
ymin = int(min(box[0], box[2]) * height)
xmin = int(min(box[1], box[3]) * width)
ymax = int(max(box[0], box[2]) * height)
xmax = int(max(box[1], box[3]) * width)
writer.addObject(class_name, xmin, ymin, xmax, ymax)
print("We are going to save pascal xml in {}".format(output_xml))
writer.save(output_xml)
subset_test = TEST_IMAGE_PATHS[:]
print("We are going to run the inference for {} images".format(len(subset_test)))
for image_path in subset_test:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=3)
# plt.figure(figsize=IMAGE_SIZE)
# plt.imshow(image_np)
dirname = os.path.dirname(image_path)
image_name = os.path.basename(image_path)
out_path = os.path.join(dirname, "ex_out", image_name)
print("Saving result image in {}".format(out_path))
Image.fromarray(image_np).save(out_path)
output_xml_path = os.path.join(dirname, "ex_out", image_name.split(".")[0]+".xml")
generate_pascal_xml(output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
image_path,
output_xml_path)
|
993,198 | aa42760de681f549967010332b60f13dd5ee2073 | from common.logger import Logging
from page_obj.base import Page
from readConfig import ReadConfig
logger = Logging('safeEdit').getlog()
config = ReadConfig()
class safeEdit(Page):
def __init__(self,driver):
Page.__init__(self,driver)
self.config_element = {
"账号输入框": ["id", "login_user_email"],
"密码输入框": ['id', "login_password"],
"登录按钮": ['css', "body > div.company-login.login01 > div > div.login-panel-body > form > div.form-group.no-margin-bot > div > button"],
"登录错误提示": ['id', 'show_error'],
"忘记密码": ['id', 'forget_password'],
"验证码登录": ['css', 'body > div.company-login.login01 > div > div.login-panel-body > form > div.tips > div > a.pull-right.code-btn'],
"账号密码登录": ['id', 'accout-login'],
"中英文切换": ['css', 'body > div.language > ul > li.language-active > a'],
"英文": ['linktext', 'English'],
"中文": ['linktext', '中文'],
"个人资料入口": ['css', '#hover-drop > a > img'],
"个人资料": ['css', '#hover-drop > ul > li:nth-child(2) > a'],
"添加新地址": ['css', '#tab3 > div > div > div > button'],
"账号安全": ['css', '#myTabs > li.active'],
"绑定邮箱": ['id', 'click_change_email'],
"邮件_密码输入框": ['id', 'email_oldpassword'],
"修改邮箱": ['id', 'new_email'],
"发送邮件": ['id', 'send_email'],
"邮箱_取消": ['css', '#cancel_email_send > div > input'],
"邮箱密码_错误提示": ['id', 'email_oldpassword-error'],
"邮箱_错误": ['id', 'new_email-error'],
"修改手机": ['id', 'click_change_mobile'],
"修改手机_old": ['id', 'phone_oldpassword'],
"修改手机_手机号码": ['id', 'mobile'],
"修改手机_弹窗": ['css', '#layui-layer10 > div.layui-layer-btn > a'],
"手机_获取验证码": ['id', 'get_verification_code'],
"修改手机提交": ['id', 'submit_change_mobile'],
"手机_取消": ['css', '#now_change_phone > div:nth-child(4) > div > input.btn.btn-primary.cancel-change'],
"重置密码": ['id', 'click_change_password'],
"原密码输入框": ['id', 'psw-old'],
"原密码错误提示": ['id', 'psw-old-error'],
"新密码": ['id', 'change_psw1'],
"新密码错误提示": ['id', 'change_psw1-error'],
"确认密码输入框": ['id', 'change_psw2'],
"确认密码错误提示": ['id', 'change_psw2-error'],
"修改密码提交": ['id', 'submit_change_password'],
"修改密码取消": ['css', '#now_change_password > div:nth-child(4) > div > input.btn.btn-primary.cancel-change'],
"邮件发送成功": ['id', 'email_send_sucess'],
"修改密码x": ['css', '#layui-layer1 > span > a'],
"退出": ['css', '#hover-drop > ul > li:nth-child(7) > a'],
"修改密码提示": ['css', '#layui-layer1 > div.layui-layer-content'],
}
def lognin(self):
#登录方法
url = config.getConfig('url')
username = config.getConfig('username')
password = config.getConfig('password')
self.send_keys('账号输入框', username)
self.send_keys("密码输入框", password)
self.click("登录按钮")
self.move('个人资料入口')
self.click('个人资料')
self.open(url+'dashboard/security/')
def edit_email_cancel(self):
self.click('绑定邮箱')
self.click('邮箱_取消')
return self.is_displayed('绑定邮箱')
def edit_email_commit(self):
password = config.getConfig('password')
email = config.getConfig('email')
self.click('绑定邮箱')
self.send_keys('邮件_密码输入框', password)
self.send_keys('修改邮箱', email)
self.click('发送邮件')
self.wait_time(2)
return self.text('邮件发送成功')
def change_phone_cancel(self):
self.click('修改手机')
self.click('手机_取消')
return self.is_displayed('修改手机')
def change_phone_commit(self):
pwd = config.getConfig('password')
phone = config.getConfig('phone')
self.click('修改手机')
self.send_keys('修改手机_old', pwd)
self.send_keys('修改手机_手机号码', phone)
self.click('手机_获取验证码')
try:
self.click('手机_获取验证码')
#self.alert(1)
logger.info('确定成功')
except Exception as e:
return False
|
993,199 | d2773b7dcb0374251d66a60a3175ecf0c88f873e | #python is a formally an interpreter language
#python will start by entering python in the cmdline
#python syntax primarlily uses white space for compiling
#GPA calculator
print('Welcome to the GPA calculator')
print('Please enter all your grades, one per line')
print('Enter a blank line to delegate them at the end')
points = {'A':4.0, 'B':3.0, 'C':2.0,'D':1.0,'F':0.0}
num_courses = 0
total_points = 0
done = False
while not done:
grade = input()
if(grade == ''):
done = True
elif grade not in points:
print('unknown grade has been entered')
else:
num_courses+=1
total_points+=points[grade]
if(num_courses>0):
print('Your GPA is {0:.3}'.format(total_points/num_courses))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.