seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
8320514544 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from pandas import DataFrame, read_csv, concat
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, Bidirectional, GRU,ConvLSTM2D, Flatten
from matplotlib import pyplot as plt
from numpy import concatenate, reshape, array
from sklearn.metrics import mean_squared_error, mean_absolute_error
from math import sqrt
from sklearn.preprocessing import MinMaxScaler
from sys import argv
import csv
import datetime
import time
# In[3]:
# Series to Supervised Learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
# print("I: ",i)
cols.append(df.shift(i))
# print("Column: ",cols)
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# print("Names: ",names)
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# print("COls: ",cols)
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# print("Names: ",names)
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# In[9]:
from tqdm import tqdm
fileNames=['BCH','BTC', 'ETC', 'ETH','EOS','LINK', 'LTC','DASH', 'MKR','OMG','XLM','XTZ','ZRX']
for fileName in fileNames:
for at in tqdm(range(50)):
dataset = read_csv('final_datasets/'+fileName+'.csv', parse_dates=['time'])
startIndex = 3 #start from 3rd column
nrows = dataset.shape[0]
values = dataset.iloc[:,startIndex:].values #Getting values - Total Sentiment and BTC Values
valuesCrypto = dataset.iloc[:,-1:].values #Getting values - C Values
# For predicting with just Cryptocurrency values, we have just 1 input variable.
# Incorporating sentiment values will make input variables=2
# Comment the below line if there are multiple features / input variable.
# values = values.reshape(-1,1) #Only do this if you have 1 input variable
num =dataset.loc[dataset['time'] == '2020-12-01'].index[0]
num2= dataset.iloc[[-1]].index[0]
percent=num/num2
scaler = MinMaxScaler(feature_range = (0,1))
scaler = scaler.fit(values)
scaled = scaler.fit_transform(values)
# Input and Output Sequence Length
input_sequence = 1
output_sequence = 1
# Call Series to Supervised Function
reframed = series_to_supervised(scaled, input_sequence, output_sequence)
# Drop current sentiment/any other feature that might be added in the future(at time t)
dropColumns = []
for i in range(values.shape[1]-1):
dropColumns.append('var{}(t)'.format(i+1))
reframed=reframed.drop(columns=dropColumns)
# Drop cuurent sentiment
#reframed=reframed.drop(columns=['var2(t-1)'])
# Ignore the headers
reframedValues = reframed.values
#Splitting data into train and test sets
n_train_days = int(percent*nrows) #90% data is train, 10% test
train = reframedValues[:n_train_days, :]
test = reframedValues[n_train_days:nrows, :]
# valuesCrypto = reframed.iloc[:,-1:].values #Getting values - C Values
#Assigning inputs and output datasets
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
#Reshaping input to be 3 dimensions (samples, timesteps, features)
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
#Building LSTM Neural Network model
model = Sequential()
model.add(Bidirectional(GRU(50, activation='relu', return_sequences=True, input_shape=(train_X.shape[1], train_X.shape[2]))))
model.add(LSTM(50,activation ='tanh'))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse',metrics=['acc'])
# Uncomment below line to get summary of the model
# print(model.summary(line_length=None, positions=None, print_fn=None))
#Fitting model
history = model.fit(train_X, train_y, epochs = 100, batch_size=64, validation_data=(test_X, test_y), verbose=0, shuffle=False) #Best so far: 100 neurons, epochs = 400, batch_size = 53
#saving model
model_json = model.to_json()
with open('models/'+fileName+'/'+fileName+"_"+str(at)+"_model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights('models/'+fileName+'/'+fileName+"_"+str(at)+"_model.h5")
print("Saved " + fileName+"_"+str(at)+"_model.h5 to disk")
# Predicition
model_prediction = model.predict(test_X)
# Inverse Scale
scalerCrypto = MinMaxScaler(feature_range = (0,1))
scalerCrypto = scaler.fit(valuesCrypto)
scaledCrypto = scaler.fit_transform(valuesCrypto)
model_prediction_unscale = scalerCrypto.inverse_transform(model_prediction)
predictedValues = reshape(model_prediction_unscale, model_prediction_unscale.shape[0])
actualValues = valuesCrypto[n_train_days+input_sequence:] #test_y+input_sequence:
actualValues = reshape(actualValues, actualValues.shape[0])
#Plotting training loss vs validation loss
# plt.plot(history.history['loss'], label='train')
# plt.plot(history.history['val_loss'], label='validation')
# plt.legend()
# plt.show()
#Visualising Results (Actual vs Predicted)
# plt.plot(actualValues, color = 'red', label = 'Actual '+ fileName + ' Value')
# plt.plot(predictedValues, color = 'blue', label = 'Predicted '+ fileName + ' Value') #[1:38]
# plt.title(fileName+' Trend Prediction')
# plt.xlabel('Time Interval (1 interval = 3 hours)')
# plt.ylabel('Price')
# plt.legend()
# Uncomment below line to save the figure
# plt.savefig('Trend_Graphs/'+'Trend Graph for '+fileName+'.png', dpi=700)
# plt.show()
actual= DataFrame(actualValues, columns= ['Actual Value'])
predicted=DataFrame(predictedValues, columns= ['Predicted Value'])
# Write to csv
writeFileName = "--Results.csv"
timestamp = DataFrame(dataset['time'][n_train_days:], columns= ['time'])
timestamp.reset_index(drop=True, inplace=True)
results=concat([timestamp,actual,predicted], axis=1)
# print("Head: ",results.head())
# print("Tail: ",results.tail())
results.dropna(inplace=True)
results.to_csv('Prediction Tables/'+fileName+'/'+fileName+'_'+str(at)+writeFileName, index= False)
# In[ ]:
| TandonAnanya/Crypto-Trend-Prediction | Multiple Models/LSTM-multiple-datasets.py | LSTM-multiple-datasets.py | py | 7,187 | python | en | code | 1 | github-code | 13 |
34745597648 | import numpy as np
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
class FCM:
def __init__(self, data, number_of_clusters=2, m=2, error = 0.01, random_state = 42, max_ind=150):
self.number_of_clusters = number_of_clusters
self.data = data.to_numpy().astype(np.float32)
#Fuzziness : m
self.m = m
self.J = 1.0
self.performance_index = 0.0
self.entropy = 0.0
self.max_ind = max_ind
self.fcp = []
self.dif = None
self.u = None
self.centers = None
self.error = error
self.random_state = random_state
# Generate The Initial Centers
def center_distribution(self):
dim = self.data.shape[1]
data_generated = []
np.random.seed(self.random_state)
for i in range(dim):
data_generated.append(abs(np.random.normal(0, 0.7, self.number_of_clusters)))
centers = np.vstack(data_generated).T
return centers
# 1st Step: Initialize the needed attributes
def initialize(self, data, number_of_clusters):
self.dif = 1.0
self.u = np.zeros((len(data), number_of_clusters))
rand = np.random.RandomState(self.random_state)
# self.centers = data[rand.randint(0, len(data), number_of_clusters)]
self.centers = self.center_distribution()
# 2en Step: Update the membership of the Data's
def update_membership(self):
old_u = np.copy(self.u)
for i in range(len(self.data)):
for j in range(self.number_of_clusters):
temp = 0
d_ij = np.linalg.norm(self.data[i]-self.centers[j])
for k in range(self.number_of_clusters):
power = 2/(self.m - 1)
d_ik = np.linalg.norm(self.data[i]-self.centers[k])
if d_ik != 0:
temp += (d_ij/d_ik)**power
if temp !=0:
self.u[i, j] = 1 / temp
else:
self.u[i, j] = 1
self.dif = np.linalg.norm(self.u - old_u)
# 3rd step: update the centers of the clustering
def update_centers(self):
for j in range(self.number_of_clusters):
temp1 = 0
sum_of_membership = 0
for i in range(len(self.data)):
temp_u = self.u[i, j] ** self.m
temp1 += temp_u*self.data[i]
sum_of_membership += temp_u
self.centers[j] = temp1 / sum_of_membership
# Calculate the sum of the squares of the error within the cluster
def calculate_cost(self):
J = 0.0
for i in range(len(self.data)):
for j in range(self.number_of_clusters):
J += (self.u[i, j] ** self.m) * \
(np.linalg.norm(self.data[i]-self.centers[j]) ** 2)
return J
# Calculate performance Index
def calculate_performance_Index(self):
avg = self.mean_data()
performance_index = 0.0
for i in range(len(self.data)):
for k in range(self.number_of_clusters):
small_value_of_optimal_C = np.linalg.norm(self.data[i] - self.centers[k]) ** 2
big_value_of_optimal_C = np.linalg.norm(self.centers[k] - avg) ** 2
performance_index += (self.u[i, k] ** self.m) * (
small_value_of_optimal_C - big_value_of_optimal_C)
return performance_index
# Calculate Entropy
def calculate_Entropy(self):
entropy = 0.0
for i in range(len(self.data)):
for k in range(self.number_of_clusters):
entropy -= self.u[i, k] * np.log2(self.u[i, k])
return entropy
# Combine the last steps for clustering
def fit(self):
self.initialize(self.data, self.number_of_clusters)
i = self.max_ind
while self.dif >= self.error and i > 0:
self.update_membership()
self.update_centers()
i -= 1
self.J = self.calculate_cost()
self.performance_index = self.calculate_performance_Index()
self.entropy = self.calculate_Entropy()
# Defuzzification of the data's based on max membership principle
def max_membership_defuzzification(self, data, u):
label = []
for i in range(len(data)):
label.append(np.argmax(u[i], axis=0))
return label
# Calculate average of the data
def mean_data(self):
avg = []
for i in range(self.data.shape[1]):
avg.append(np.mean(self.data[:, i]))
return np.array(avg)
# Generate data for representing the regions of the clustering centers
def generate_data(self):
max_x = np.max(self.data[:, 0])
max_y = np.max(self.data[:, 1])
x = np.random.uniform(0, max_x, 5000)
y = np.random.uniform(0, max_y, 5000)
return x, y
# Calculate membership of the Generated data
def calculate_membership(self, data):
u = np.zeros((len(data),self.number_of_clusters))
for i in range(len(data)):
for j in range(self.number_of_clusters):
temp = 0
d_ij = np.linalg.norm(data[i]-self.centers[j])
for k in range(self.number_of_clusters):
power = 2/(self.m - 1)
d_ik = np.linalg.norm(data[i]-self.centers[k])
if d_ik != 0:
temp += (d_ij/d_ik)**power
if temp !=0:
u[i, j] = 1 / temp
else:
u[i, j] = 1
return u
# plot the Data-set Before and After clustering
def plot(self):
color_map = ['b', 'm', 'c', 'r', 'g', 'orange', 'y', 'k', 'Brown', 'ForestGreen']
x = self.data[:, 0]
y = self.data[:, 1]
labels = self.max_membership_defuzzification(data=self.data, u=self.u)
label_color = [color_map[l] for l in labels]
center_color = [color_map[l] for l in range(len(self.centers))]
fig = plt.figure(figsize=(6, 7))
plt.subplot(2, 1, 1)
plt.scatter(x, y)
plt.title('Dataset-Before Clustering')
plt.subplot(2, 1, 2)
plt.scatter(x, y, marker='.', c=label_color)
plt.scatter(self.centers[:, 0], self.centers[:, 1], c=center_color,s=700, linewidths=1,alpha=0.3)
plt.title('Dataset-After Clustering')
plt.show()
# plot the clustering regions
def plot_clustering_regions(self):
color_map = ['b', 'm', 'c', 'r', 'g', 'orange', 'y', 'k', 'Brown', 'ForestGreen']
x, y = self.generate_data()
data = np.vstack((x, y)).T
u = self.calculate_membership(data=data)
labels = self.max_membership_defuzzification(data=data, u=u)
label_color = [color_map[l] for l in labels]
center_color = [color_map[l] for l in range(len(self.centers))]
plt.scatter(x, y, marker='.', c=label_color)
plt.scatter(self.centers[:, 0], self.centers[:, 1], c=center_color,s=700, linewidths=1,alpha=0.7, edgecolors='k')
plt.title("Clustering-Regions")
plt.show()
# calculate the performance_index and entropy and cost for different number of clusters and then plot the results
def plot_fcp(self):
fcp = []
entropy = []
J = []
x = []
for i in range(2, 11):
self.number_of_clusters = i
self.fit()
fcp.append(self.performance_index/i)
entropy.append(self.calculate_Entropy()/np.sqrt(i))
J.append(self.calculate_cost()/np.sqrt(i))
x.append(i)
# Create 2x2 sub plots
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(9, 7))
ax = plt.subplot(gs[0, 0]) # row 0, col 0
plt.plot(x, fcp)
plt.xlabel("Number of centers")
plt.ylabel("Performance-Index")
ax = plt.subplot(gs[0, 1]) # row 0, col 1
plt.plot(x, entropy)
plt.xlabel("Number of centers")
plt.ylabel("Entropy")
ax = plt.subplot(gs[1, :]) # row 1, span all columns
plt.plot(x, J)
plt.xlabel("Number of centers")
plt.ylabel("Cost")
plt.show()
| arashHarirpoosh/UniversityProjects | ComputationalIntelligence/3.FCM/Clustering/FCM_C_Means.py | FCM_C_Means.py | py | 8,264 | python | en | code | 0 | github-code | 13 |
38251350802 | ##################################### base_views #######################################################
from django.shortcuts import render, get_object_or_404
from ..models import Question
from django.core.paginator import Paginator
from django.db.models import Q
def index(request) :
# order_by('-create_date')는 작성일시 역순으로 정렬하라는 의미 (- 기호가 붙어 있으면 역방향, 없으면 순방향)
page = request.GET.get('page','1') # 페이지입력 없을때 디폴트값으로 1 설정
kw = request.GET.get('kw', '') # 검색어
question_list = Question.objects.order_by('-create_date')
if kw:
question_list = question_list.filter(
Q(subject__icontains=kw) | # 제목 검색
Q(content__icontains=kw) | # 내용 검색
Q(answer__content__icontains=kw) | # 답변 내용 검색
Q(author__username__icontains=kw) | # 질문 글쓴이 검색
Q(answer__author__username__icontains=kw) # 답변 글쓴이 검색
).distinct()
paginator = Paginator(question_list, 10) # 페이지당 10개씩 보여주기
page_obj = paginator.get_page(page)
context = {'question_list':page_obj, 'page': page, 'kw': kw}
return render(request, 'pybo/question_list.html', context) #render는 템플릿에 적용하여 HTML로 반환하는 함수
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id) # id가 없을떄 서버오류(500)에서 페이지를 찾을 수 없음(404)로 바꿔줌
context = {'question': question}
return render(request, 'pybo/question_detail.html', context)
##################################### 질문 views #######################################################
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from ..forms import QuestionForm
from ..models import Question
from django.contrib.auth.decorators import login_required
from django.contrib import messages
@login_required(login_url='common:login')
def question_create(request) :
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save(commit=False)
question.author = request.user # author 속성에 로그인 계정 저장
question.create_date = timezone.now()
question.save()
return redirect('pybo:index')
else:
form = QuestionForm()
context ={'form' : form}
return render (request, 'pybo/question_form.html', context)
@login_required(login_url='common:login')
def question_modify(request, question_id):
question = get_object_or_404(Question, pk=question_id)
if request.user != question.author:
messages.error(request, '수정권한이 없습니다')
return redirect('pybo:detail', question_id=question.id)
if request.method == "POST":
form = QuestionForm(request.POST, instance=question) # instance를 기준으로 QuestionForm을 생성하지만 request.POST의 값으로 덮어쓰라는 의미
if form.is_valid():
question = form.save(commit=False)
question.modify_date = timezone.now() # 수정일시 저장
question.save()
return redirect('pybo:detail', question_id=question.id)
else:
form = QuestionForm(instance=question) # instance 값을 이같이 주면 수정하는 화면에서 제목과 내용이 채워진 채로 보일 것
context = {'form': form}
return render(request, 'pybo/question_form.html', context)
@login_required(login_url='common:login')
def question_delete(request, question_id):
question = get_object_or_404(Question, pk=question_id)
if request.user != question.author:
messages.error(request, '삭제권한이 없습니다')
return redirect('pybo:detail', question_id=question.id)
question.delete()
return redirect('pybo:index')
@login_required(login_url='common:login')
def question_vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
if request.user == question.author:
messages.error(request, '본인이 작성한 글은 추천할수 없습니다')
else:
question.voter.add(request.user)
return redirect('pybo:detail', question_id=question.id)
##################################### 답변 views #######################################################
from django.shortcuts import render, get_object_or_404, redirect, resolve_url
from django.utils import timezone
from django.http import HttpResponseNotAllowed
from ..forms import AnswerForm
from ..models import Question, Answer
from django.contrib.auth.decorators import login_required
from django.contrib import messages
@login_required(login_url='common:login')
def answer_create(request, question_id):
question = get_object_or_404(Question, pk=question_id)
if request.method == "POST":
form = AnswerForm(request.POST) # post해서 가져온 form을 사용
if form.is_valid():
answer = form.save(commit=False)
answer.author = request.user # author 속성에 로그인 계정 저장
answer.create_date = timezone.now()
answer.question = question
answer.save()
return redirect('{}#answer_{}'.format(
resolve_url('pybo:detail', question_id=question.id), answer.id))
else:
return HttpResponseNotAllowed('Only POST is possible.')
context = {'question': question, 'form': form}
return render(request, 'pybo/question_detail.html', context)
@login_required(login_url='common:login')
def answer_modify(request, answer_id):
answer = get_object_or_404(Answer, pk=answer_id)
if request.user != answer.author:
messages.error(request, '수정권한이 없습니다')
return redirect('pybo:detail', question_id=answer.question.id)
if request.method == "POST":
form = AnswerForm(request.POST, instance=answer)
if form.is_valid():
answer = form.save(commit=False)
answer.modify_date = timezone.now()
answer.save()
return redirect('{}#answer_{}'.format(
resolve_url('pybo:detail', question_id=answer.question.id), answer.id))
else:
form = AnswerForm(instance=answer)
context = {'answer': answer, 'form': form}
return render(request, 'pybo/answer_form.html', context)
@login_required(login_url='common:login')
def answer_delete(request, answer_id):
answer = get_object_or_404(Answer, pk=answer_id)
if request.user != answer.author:
messages.error(request, '삭제권한이 없습니다')
else:
answer.delete()
return redirect('pybo:detail', question_id=answer.question.id)
@login_required(login_url='common:login')
def answer_vote(request, answer_id):
answer = get_object_or_404(Answer, pk=answer_id)
if request.user == answer.author:
messages.error(request, '본인이 작성한 글은 추천할수 없습니다')
else:
answer.voter.add(request.user)
return redirect('{}#answer_{}'.format(
resolve_url('pybo:detail', question_id=answer.question.id), answer.id))
##################################### 회원가입 views #######################################################
from django.contrib.auth import authenticate, login
from django.shortcuts import render, redirect
from common.forms import UserForm
def signup(request):
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password) # 사용자 인증
login(request, user) # 로그인
return redirect('index')
else:
form = UserForm()
return render(request, 'common/signup.html', {'form': form})
| johnpark144/Practical_Study | Python_django/(FBV)파이보게시판 핵심/장고/base_views.py | base_views.py | py | 8,223 | python | en | code | 3 | github-code | 13 |
37419661985 | # 复原IP地址, 比较蛋疼, 用python
class Solution(object):
def valid(self, s):
if len(s) == 1:
return 0 <= int(s)
elif len(s) == 2:
return s[0] != '0' and 0 <= int(s)
elif len(s) == 3:
return s[0] != '0' and 0 <= int(s) <= 255
else:
return False
def rec(self, s, pieces):
if pieces == 1:
if self.valid(s):
return [s]
else:
return []
res = []
for k in range(1, 4):
nk = s[:k]
if self.valid(nk):
rem = s[k:]
for tmp in self.rec(rem, pieces - 1):
res.append(nk + "." + tmp)
return res
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
return self.rec(s, 4)
if __name__ == "__main__":
s = Solution()
inp = "010010"
res = s.restoreIpAddresses(inp)
print(res)
| butflame/LeetcodePractice | bytedance/string/RestoreIPAddresses.py | RestoreIPAddresses.py | py | 1,001 | python | en | code | 0 | github-code | 13 |
36554202903 | import os
import json
#import helper functions for lmnft
import launchmynft
def getConfig():
configFile = open("config.json", 'r')
return list(json.load(configFile).values())
#gets config
config = getConfig()
#if windows True, else False (mac, linux)
isWindows = True if os.name == 'nt' else False
#if mint on launchmynft.io
if "launchmynft.io" in config[0]:
print("Found launchmynft.io link")
launchmynft.mint(config, isWindows)
#if platform not supported
else:
print("Could not recognize link")
| hankok16/LounchMyNFT-minting-bot | main.py | main.py | py | 536 | python | en | code | null | github-code | 13 |
17062005794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ZhimaCreditEpSceneTradeConsultModel(object):
def __init__(self):
self._apply_amount = None
self._biz_ext_param = None
self._customer_rating_no = None
self._out_order_no = None
self._product_code = None
@property
def apply_amount(self):
return self._apply_amount
@apply_amount.setter
def apply_amount(self, value):
self._apply_amount = value
@property
def biz_ext_param(self):
return self._biz_ext_param
@biz_ext_param.setter
def biz_ext_param(self, value):
self._biz_ext_param = value
@property
def customer_rating_no(self):
return self._customer_rating_no
@customer_rating_no.setter
def customer_rating_no(self, value):
self._customer_rating_no = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
def to_alipay_dict(self):
params = dict()
if self.apply_amount:
if hasattr(self.apply_amount, 'to_alipay_dict'):
params['apply_amount'] = self.apply_amount.to_alipay_dict()
else:
params['apply_amount'] = self.apply_amount
if self.biz_ext_param:
if hasattr(self.biz_ext_param, 'to_alipay_dict'):
params['biz_ext_param'] = self.biz_ext_param.to_alipay_dict()
else:
params['biz_ext_param'] = self.biz_ext_param
if self.customer_rating_no:
if hasattr(self.customer_rating_no, 'to_alipay_dict'):
params['customer_rating_no'] = self.customer_rating_no.to_alipay_dict()
else:
params['customer_rating_no'] = self.customer_rating_no
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZhimaCreditEpSceneTradeConsultModel()
if 'apply_amount' in d:
o.apply_amount = d['apply_amount']
if 'biz_ext_param' in d:
o.biz_ext_param = d['biz_ext_param']
if 'customer_rating_no' in d:
o.customer_rating_no = d['customer_rating_no']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'product_code' in d:
o.product_code = d['product_code']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ZhimaCreditEpSceneTradeConsultModel.py | ZhimaCreditEpSceneTradeConsultModel.py | py | 3,178 | python | en | code | 241 | github-code | 13 |
70148194257 | import json
import logging
import requests
from flask import abort
from flask import request, Response, jsonify
from marshmallow import ValidationError
from util.sensitive_words_blocking.words_blocking import DFA
from db.user import User
from db import db
def get_context(data_required=True):
"""
获取用户的 open_id 和 http 请求内容
:return: open_id, data(json)
"""
if data_required:
data = request.get_data().decode('utf-8')
# 屏蔽词检查
dfa = DFA()
data = dfa.filter_all(data)
try:
data = json.loads(data)
except Exception as e:
logging.error(e)
logging.error(data)
abort(make_response(status=-1, msg="sensitive_words_blocking failed.", return_data={}), 451)
else:
data = None
openid = None
# 启用公网访问后防止用户伪造 openid
if 'x-wx-source' in request.headers:
openid = request.headers['x-wx-openid']
return openid, data
def get_context_user(data_required=True):
"""
获取 user 和 http 请求内容
:return: user, data(json)
"""
# scoped_session = db.create_scoped_session()
openid, data = get_context(data_required)
user = User.query.filter(User.openid == openid).first()
# user = User.query.filter(User.openid == openid).first()
# scoped_session.remove()
if not user:
abort(make_response(status=-1, msg="Unauthorized or not registered.", return_data={},http_status_code=403))
return user, data
def make_response(status: int, msg: str, return_data: dict, http_status_code=200) -> Response:
"""
标准请求返回
:return: Response
"""
ret = jsonify({"code": status, "msg": msg, "data": return_data})
ret.status_code = http_status_code
return ret
def check_data(schema, data):
"""
json输入格式校验
:param schema: 校验规则
:param data: 校验数据
:return:若校验失败,abort并返回错误
"""
try:
return schema().load(data)
except ValidationError as e:
abort(make_response(status=-1, msg=str(e.messages), return_data={}, http_status_code=400))
| NJU-uFFFD/DDLChecker | backend/src/routes/utils.py | utils.py | py | 2,192 | python | en | code | 5 | github-code | 13 |
74262079056 | import pprint
import re
def getLines(path):
f = open(path)
lines = f.read().splitlines()
f.close()
return lines
def getRuleDict(input):
rules = [x.split('bags contain') for x in input]
ruleDict = {}
for rule in rules:
ruleDict[rule[0].strip()] = [x.strip().split(' ')[1] + " " + x.strip().split(' ')[2] for x in rule[1].split(',')]
return ruleDict
def getRuleDictWithCnt(input):
rules = [x.split('bags contain') for x in input]
ruleDict = {}
for rule in rules:
ruleDict[rule[0].strip()] = [x.strip().split(' ')[0] + " " + x.strip().split(' ')[1] + " " + x.strip().split(' ')[2] for x in rule[1].split(',')]
return ruleDict
def cntMasters(ruleDict, bagType, masterSet=None):
if not masterSet:
masterSet = set()
cnt = 0
for rule in ruleDict.keys():
if rule not in masterSet and bagType in ruleDict[rule]:
masterSet.add(rule)
cnt += 1 + cntMasters(ruleDict, rule, masterSet)
return cnt
def cntTotalSlaves(ruleDict, bagType):
cnt = 0
for slave in ruleDict[bagType]:
if (slave.split(' ')[1] + " " + slave.split(' ')[2]) != "other bags.":
cnt += int(slave.split(' ')[0]) + int(slave.split(' ')[0]) * cntTotalSlaves(ruleDict, slave.split(' ')[1] + " " + slave.split(' ')[2])
return cnt
def task1(input):
return cntMasters(getRuleDict(input), "shiny gold")
def task2(input):
return cntTotalSlaves(getRuleDictWithCnt(input), "shiny gold")
testInput1 = ["light red bags contain 1 bright white bag, 2 muted yellow bags.",
"dark orange bags contain 3 bright white bags, 4 muted yellow bags.",
"bright white bags contain 1 shiny gold bag.",
"muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.",
"shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.",
"dark olive bags contain 3 faded blue bags, 4 dotted black bags.",
"vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.",
"faded blue bags contain no other bags.",
"dotted black bags contain no other bags."]
testInput2 = ["shiny gold bags contain 2 dark red bags.",
"dark red bags contain 2 dark orange bags.",
"dark orange bags contain 2 dark yellow bags.",
"dark yellow bags contain 2 dark green bags.",
"dark green bags contain 2 dark blue bags.",
"dark blue bags contain 2 dark violet bags.",
"dark violet bags contain no other bags."]
input = getLines("07input.txt")
print()
print("Task 1")
print("---------")
print("Test: " + str(task1(testInput1)))
print("Answer: " + str(task1(input)))
print()
print()
print("Task 2")
print("---------")
print("Test1: " + str(task2(testInput1)))
print("Test2: " + str(task2(testInput2)))
print("Answer: " + str(task2(input)))
print() | jakobfje/advent-of-code | python/2020/07.py | 07.py | py | 2,796 | python | en | code | 0 | github-code | 13 |
22518103114 | import requests
import pandas as pd
import sqlite3
from products import Products
from user import User
def create_product_objects():
valid_response = requests.get('https://fakestoreapi.com/products', verify = False)
data2 = valid_response.json()
df = pd.DataFrame(data2)
with sqlite3.connect("StoreProducts.db") as connection:
cursor = connection.cursor()
cursor.execute("DROP TABLE IF EXISTS Products;")
df = df.applymap(str)
df.to_sql('Products', connection)
temp = cursor.execute("SELECT * FROM PRODUCTS")
for i in temp.fetchall():
Products(i)
def create_bill(user):
item_bought = user.item_bought
total_indent = 120
data = ""
data += ("Welcome To Friends Store".center(total_indent,' '))
data += "\n"
data += (("-"*total_indent))
data += "\n"
data += ("Customer's Name: {0}".format(user.name).ljust(total_indent))
data += "\n"
data += "\n"
data += (("-"*total_indent))
data += "\n"
data += ("\n\n")
data += "\n"
data += ("Purchase Details:".ljust(total_indent))
data += "\n"
data += (("-"*total_indent))
data += "\n"
data += ("\n\n\n")
data += "\n"
data += (("S.N.".ljust(5," ") + " | Product ID".ljust(15, " ") + " | Product Name".ljust(50) + " | Price".ljust(10) + " | Qty".ljust(10) + " | Sub Total".ljust(10)))
data += "\n"
tot = 0
count = 0
for _id in item_bought:
count+=1
data += (("".ljust(5," ") + " | ".ljust(15, " ") + " | ".ljust(50) + " | ".ljust(10) + " | ".ljust(10) + " | "))
obj = Products.get_product_object(_id)
qty = item_bought.get(_id)
tot += obj.get_total_price(qty)
data += "\n"
data += ((str(count).ljust(5," ") + (" | " + str(obj.get_id())).ljust(15, " ") + (" | "+obj.title[:45]).ljust(50) + (" | " + str(obj.price)).ljust(10) + (" | " + str(qty)).ljust(10) + (" | " + str(obj.get_total_price(qty)))))
data += "\n"
data += "\n"
data += (("-"*total_indent))
data += "\n\n"
data += ("Total\t|{:.2f}\t".format(tot).rjust(100," "))
data += "\n"
tax = tot * 0.13
data += ("Tax\t|{:.2f}\t".format(tax).rjust(97," "))
data += "\n"
disc = tot * 0.05
data += ("Discount\t|{:.2f}\t".format(disc).rjust(102," "))
data += "\n"
data += ("Net Total\t|{:.2f}\t".format(tot+tax-disc).rjust(104," "))
data += "\n"
data += "\n"
data += (("-"*total_indent))
data += "\n"
data += "\n"
data += ("Thank you for Shopping. Visit Us again !!!".center(total_indent))
data += "\n"
data += "\n"
data += (("-"*total_indent))
return data
if __name__=="__main__":
name = input("Enter your name: ")
user = User(name)
checkout = "N"
create_product_objects()
while(checkout in ["NO", "N"]):
Products.display_all_product()
ids = input("Select the product id: ")
print("\n")
print("=="*50)
try:
Products.get_product_object(ids).display_detail()
except AttributeError:
print("ERROR! ERROR!! ERROR!!!".center(50, " "),"\n\nNot a valid Produc Id selected. Please select a valid option.")
print("=="*50)
print("\n")
continue
print("=="*50)
print("\n")
cart = input("Do you want to add in the cart?\n").upper()
if (cart in ["YES", "Y"]):
qty = int(input("How many items do you want to add?\n"))
while(not(qty>0)):
qty = int(input("Quantity must be Greater than 0\nEnter Again:\n"))
user.set_item_bought(ids, qty)
checkout = input("Do you want to checkout? \n").upper()
with open("bill.txt", "w+") as file:
file.write(create_bill(user)) | helloaseem/billing_system | main_project.py | main_project.py | py | 3,849 | python | en | code | 0 | github-code | 13 |
22467469333 | """
Создайте собственный класс-исключение, обрабатывающий ситуацию деления на нуль.
Проверьте его работу на данных, вводимых пользователем.
При вводе пользователем нуля в качестве делителя программа должна
корректно обработать эту ситуацию и не завершиться с ошибкой.
"""
def input_float(message: str) -> float:
""" Диалог ввода чисел
:param message: сообщение
:return: число
"""
while True:
try:
value = float(input(message))
except ValueError:
print('Значение должно быть числовым')
continue
return value
class MyZeroDivisionError(ZeroDivisionError):
""" Ошибка деления на 0 """
class Digit:
""" Класс числа """
def __init__(self, value):
""" Инициализация
:param value: значение
"""
if not isinstance(value, (int, float)):
raise TypeError('Value must be a digit')
self.__value = value
@property
def value(self):
""" Свойство показывает значение
:return: значение
"""
return self.__value
def __str__(self):
""" Строковое представление
:return: строковое представление
"""
return str(self.__value)
def __truediv__(self, other):
""" Деление
:param other: объект типа Digit
:return: объект типа Digit
"""
if not isinstance(other, Digit):
raise TypeError('Value must be a digit')
if other.value == 0:
raise MyZeroDivisionError('division by zero')
return Digit(self.value / other.value)
if __name__ == '__main__':
while True:
d1 = Digit(input_float('Введите первое число: '))
d2 = Digit(input_float('Введите второе число: '))
print(f'Деление {d1} на {d2}: ')
try:
d3 = d1 / d2
except MyZeroDivisionError:
print('Ошибка: деление на 0.')
print('Повторите ввод.')
continue
print('Результат = ', d3)
if input('Прервать ввод? (q): ') == 'q':
break
| slavaprotogor/python_base | homeworks/lesson8/task2.py | task2.py | py | 2,609 | python | ru | code | 0 | github-code | 13 |
18899092679 | import os , requests
import discord
from discord.ext import commands
from dotenv import load_dotenv
import Cache
local_cache = {} # stores name : cache object with users cached values
NUM_OF_REQUEST = 0
players = [
'roooge',
'molgera12',
'newtronimus',
'kayj0'
]
# v5 api uses continental names whilst v4 uses the old server system
# this dictionary will help convert region names from the v4 system to v5
regions = {
'euw1':'europe',
'na1':'north-america'
}
intents = discord.Intents.all()
bot = commands.Bot(command_prefix='!',intents=intents)
def run_bot():
@bot.event
async def on_ready():
print('bot is running')
load_dotenv()
bot.run(os.getenv('TOKEN'))
async def loading(ctx):
await ctx.send('loading...')
async def finished_loading(ctx):
async for m in ctx.channel.history(limit=200):
if m.author == bot.user and m.content == 'loading...':
last_message = m
await last_message.delete()
return
@bot.command()
async def cache(ctx):
offload()
@bot.command()
async def load(ctx):
load()
@bot.command()
async def leaderboard(ctx, filter='winrate'):
NUM_OF_GAMES = 4
await loading(ctx)
# error testing
if filter not in ['winrate', 'kda']:
await ctx.send(f'{filter} is not a valid filter!')
return
player_list = []
if filter == 'winrate':
player_list = await filterWinrate(NUM_OF_GAMES)
elif filter =='kda':
player_list = filterKDA()
i = 1
medals = {
1 : ':first_place:',
2 : ':second_place:',
3 : ':third_place:',
}
await finished_loading(ctx)
await ctx.send(f'Leaderboards for winrates over {NUM_OF_GAMES} games:\n ')
for player in player_list:
out = f'{i}) {player[0]} | winrate: {player[1]}% {str(medals[i]) if i in medals else ""}'
await ctx.send(out)
i += 1
request_logs()
@bot.command()
async def winrate(ctx,summoner_name, num_of_match = 20, region='euw1'):
""" input summoner name and returns winrate of n matches
Args:
ctx (message): discord context variable
summoner_name (str): username of the summoner
num_of_match (int) : (defaults to 20) how far to go back
region (str, optional): region of the. Defaults to 'euw1'.
"""
await loading(ctx)
winrate = await getWinrate(summoner_name, num_of_match, region)
if winrate < 0:
await ctx.send(f'Error too many requests sent')
return
await finished_loading(ctx)
await ctx.send(f"{summoner_name}'s winrate is {winrate:.2f}% over {num_of_match} games")
request_logs()
async def filterKDA(summoner_name, region):
kda = []
for player in players:
kda.append((player, await getKDA(player, region)))
async def getKDA(name):
PUUID = await getPUUID(name)
if PUUID == -1: return -1
matchHistory = getMatchIDHistory(region,PUUID)
async def filterWinrate(NUM_OF_GAMES) -> list[str]:
"""Returns sorted list of names by winrates
Returns:
list[str]: list of sorted names by winrate
"""
winrate = []
for player in players:
winrate.append((player, await getWinrate(player,NUM_OF_GAMES)))
return sorted(winrate, key=lambda x: x[1], reverse=True)
async def getWinrate(summoner_name, num_of_match, region ='euw1'):
"""gets the winrate of a given player in a given region
Args:
ctx (message): discord context variable
summoner_name (str): username of the summoner
region (str, optional): region of the. Defaults to 'euw1'.
"""
PUUID = await getPUUID(summoner_name,region)
if PUUID == -1: return -1
matchIdHistory = await getMatchIDHistory(region,PUUID,start_index=0,number_of_matches=num_of_match)
if matchIdHistory == -1: return matchIdHistory
total_games, won_games = 0, 0
for matchID in matchIdHistory:
total_games += 1
if await wonGame(summoner_name,region, matchID, PUUID):
won_games += 1
return (won_games / total_games) * 100
async def getPUUID(summoner_name,region) -> str:
"""gets PUUID given a league username and region
Args:
summoner_name (str): username of the player
region (str): which servers used i.e euw1, na1
Returns:
str: _description_
"""
# CHECK IF DATA IS IN CACHE
if summoner_name in local_cache:
print(f'cache hit for {summoner_name}')
return local_cache[summoner_name].puuid
request_url = f'https://{region}.api.riotgames.com/lol/summoner/v4/summoners/by-name/{summoner_name}?api_key={os.getenv("RIOT_API_KEY")}'
response = requests.get(request_url)
if response.status_code != 200:
return -1
puuid = response.json()["puuid"]
# cache element
cache_player(summoner_name, puuid)
increaseReq()
return puuid
async def getMatchIDHistory(region,PUUID,start_index,number_of_matches) -> list[str]:
"""Returns a players last n matchID's
Args:
region (str): which servers i.e euw1, na1
PUUID (str): riot account id
start_index (int): which game number to start
number_of_matches (int): number of matches since start_index
Returns:
list[str]: list of match ids
"""
request_url = f'https://{regions[region]}.api.riotgames.com/lol/match/v5/matches/by-puuid/{PUUID}/ids?start={start_index}&count={number_of_matches}&api_key={os.getenv("RIOT_API_KEY")}'
response = requests.get(request_url)
if str(response.status_code)[0] == '4':
return -1
increaseReq()
return response.json()
async def getGameData(summoner_name, region, match_id, PUUID) -> dict:
"""Returns the match object containing info on the user
Args:
summoner_name (str) : name of the user
region (str): which riot server i.e euw1, na1
match_id (str): id of match
PUUID (str): riot account id
Returns:
dict: kda -> str, won -> bool
"""
# check cache
if summoner_name in local_cache:
cache_hit = local_cache[summoner_name].getFromCache(match_id)
if cache_hit:
print(f'cache hit for match: {match_id} for user: {summoner_name}')
return Cache.cacheToJson(cache_hit)
# request
request_url = f'https://{regions[region]}.api.riotgames.com/lol/match/v5/matches/{match_id}?api_key={os.getenv("RIOT_API_KEY")}'
response = requests.get(request_url)
if str(response.status_code)[0] == '4':
return -1
response = response.json()['info']['participants']
for user in response:
if user['puuid'] == PUUID:
response = user
break
match = {
'kda' : float(response['challenges']['kda']),
'won' : bool(response['win']),
}
cache_game(match_id,summoner_name,match['kda'],match['won'])
increaseReq()
return match
async def wonGame(summoner_name, region, match_id, PUUID) -> bool:
match = await getGameData(summoner_name,region,match_id,PUUID)
return match['won']
# CACHNING FUNCTIONS
def cache_game(match_id,name, kda, won) -> None:
if name not in local_cache:
local_cache[name] = Cache.Cache()
print(f'cache created for {name}')
local_cache[name].addGameToCache(match_id,kda,won)
def cache_player(name, puuid):
local_cache[name] = Cache.Cache(puuid)
def offload() -> None:
json = '{'
for player in players:
json += f'"{player}" : {Cache.cacheToJson(local_cache[player])},'
json = json[:-1] + '}'
with open('../request_cache.json', 'w') as f:
f.write(json)
f.close()
def load() -> None:
data = ''
with open('../request_cache.json', 'r') as f:
for line in f:
data += f.readline()
Cache.jsonToCache(data)
# DEBUGGING
def request_logs():
global NUM_OF_REQUEST
print(f'num of requests: {NUM_OF_REQUEST}')
NUM_OF_REQUEST = 0
def increaseReq():
global NUM_OF_REQUEST
NUM_OF_REQUEST += 1 | firozt/DiscordBot | Lstater/src/bot.py | bot.py | py | 8,188 | python | en | code | 0 | github-code | 13 |
3065271170 | """TnT (Train and Test) functions"""
from experiment_params import Parameters
from typing import Callable, Dict, Union
import torch
from torch import nn
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from networks import SmallNetwork, BigNetwork
from tqdm import tqdm
Log = Dict[str, Union[int, float]]
def print_log(_log: Log, step: int):
print(f" ##### STEP:{step} ##### ")
for k, v in _log.items():
print(f"{k}: {round(v, 3)}")
# Type definitions
ptModel = Union[SmallNetwork, BigNetwork]
ptLoss = Callable
ptOptimizer = optim.Optimizer
def train(
params: Parameters,
model: ptModel,
train_loader: DataLoader,
criterion: ptLoss,
optimizer: ptOptimizer,
logger: Callable,
) -> None:
# Set model to training mode
model.train()
accumulation_steps: int = 0
for idx, (data, target) in enumerate(train_loader):
# Init log
log: Log = {}
# Transfer data to device
data = data.to(params.device)
target = target.to(params.device)
# Forward and backward pass
prob_pred = model(data)
loss = criterion(prob_pred, target)
loss.backward()
# Counting samples and steps
model.n_samples += len(data)
model.n_steps += 1
accumulation_steps += 1
# Stepping if number of accumulation steps is reached or trainloader is empty
if ((idx+1) % params.n_accumulation_steps == 0) or ((idx + 1) == len(train_loader)):
optimizer.step()
optimizer.zero_grad()
# Log stats
log["train_loss"] = loss.item()
log["samples"] = model.n_samples
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent( # type: ignore
params.delta)
grad_norm = optimizer.privacy_engine.max_grad_norm # type: ignore
noise_multiplier = optimizer.privacy_engine.noise_multiplier # type: ignore
log["privacy/epsilon"] = epsilon
log["privacy/delta"] = params.delta
log["privacy/best_alpha"] = best_alpha
log["privacy/noise_multiplier"] = noise_multiplier
log["privacy/grad_norm"] = grad_norm
logger(log, model.n_steps)
accumulation_steps = 0
# if not, accumulate
else:
optimizer.virtual_step() # type: ignore
def test(
params: Parameters,
model: ptModel,
test_loader: DataLoader,
criterion: ptLoss,
logger: Callable,
) -> None:
# Set model to evaluation mode and init eval variables
model.eval()
test_loss: float = 0
n_correct: int = 0
n_total: int = 0
# Init log
log: Log = {}
# Disable gradients temporarely to save memory
with torch.no_grad():
for data, target in test_loader:
# Transfer data to device
data = data.to(params.device)
target = target.to(params.device)
# Test prediction and sum of batch loss
prob_pred = model(data)
test_loss += criterion(prob_pred, target).item()
# Get predicted class and count number of correct predictions
pred = prob_pred.argmax(dim=1)
n_correct += pred.eq(target.view_as(pred)).sum().item()
n_total += len(data)
# Calculate test accuracy and avg. loss
test_acc = 100. * (n_correct / n_total)
test_loss /= n_total
# Log test accuracy and loss
log["test_accuracy"] = test_acc
log["test_loss"] = test_loss
# Send logs from testing to logger
logger(log, model.n_steps)
model.train()
def train_and_test(
params: Parameters,
model: ptModel,
train_loader: DataLoader,
test_loader: DataLoader,
criterion: ptLoss,
optimizer: ptOptimizer
) -> None:
logger: Callable = print_log
for _ in tqdm(range(1, params.epochs+1), "Epoch"):
train(params, model, train_loader, criterion, optimizer, logger)
test(params, model, test_loader, criterion, logger)
| OsvaldFrisk/dp-not-all-noise-is-equal | src/tnt.py | tnt.py | py | 4,118 | python | en | code | 0 | github-code | 13 |
38546199012 | import os
import os.path as osp
import logging
from tqdm import tqdm
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ET
from utils import Center
log = logging.getLogger(__name__)
def load_xml(xml_path, frame_names=None, frame_dir=None):
if frame_names is None:
assert 0, 'frames_names is None'
tree = ET.parse(xml_path)
root = tree.getroot()
xyvs = {}
for child in root:
if child.tag!='track':
continue
for child2 in child:
if child2.tag!='points':
continue
fid = int( child2.attrib['frame'] )
is_outside = True if child2.attrib['outside']=='1' else False
visi = True if child2.attrib['occluded']=='0' else False
pts = child2.attrib['points'].split(',')
x, y = float(pts[0]), float(pts[1])
used_in_game = None
for child3 in child2:
if child3.attrib['name']=='used_in_game':
if child3.text=='0':
used_in_game = False
elif child3.text=='1':
used_in_game = True
else:
assert 0, 'unknown used_in_game value: {}'.format(child3.text)
if used_in_game is None:
assert 0, 'used_in_game not found'
if (not is_outside) and used_in_game:
frame_path = osp.join(frame_dir, '{:05d}.png'.format(fid))
if fid in xyvs.keys():
assert 0, 'more than one balls are annotated as used_in_games=1 in fid: {}'.format(fid)
xyvs[fid] = {'frame_path': frame_path,
'center': Center(is_visible=visi, x=x, y=y),
}
xyvs2 = {}
for frame_name in frame_names:
ind = int( osp.splitext(frame_name)[0] )
if ind in xyvs.keys():
xyvs2[ind] = xyvs[ind]
else:
frame_path = osp.join(frame_dir, frame_name)
xyvs2[ind] = {'frame_path': frame_path,
'center': Center(is_visible=False, x=-1., y=-1),
}
return xyvs2
def get_clips(cfg, train_or_test='test', gt=True):
root_dir = cfg['dataset']['root_dir']
frame_dirname = cfg['dataset']['frame_dirname']
anno_dirname = cfg['dataset']['anno_dirname']
videos = cfg['dataset'][train_or_test]['videos']
clip_dict = {}
for video in videos:
frame_dir = osp.join(root_dir, frame_dirname, video)
xml_path = osp.join(root_dir, anno_dirname, '{}.xml'.format(video))
frame_names = os.listdir(frame_dir)
frame_names.sort()
ball_xyvs = load_xml(xml_path, frame_dir=frame_dir, frame_names=frame_names)
clip_dict[(0, video)] = {'clip_dir_or_path': frame_dir, 'clip_gt_dict': ball_xyvs, 'frame_names': frame_names}
return clip_dict
class Soccer(object):
def __init__(self, cfg):
self._root_dir = cfg['dataset']['root_dir']
self._frame_dirname = cfg['dataset']['frame_dirname']
self._video_dirname = cfg['dataset']['video_dirname']
self._anno_dirname = cfg['dataset']['anno_dirname']
self._train_videos = cfg['dataset']['train']['videos']
self._test_videos = cfg['dataset']['test']['videos']
self._frames_in = cfg['model']['frames_in']
self._frames_out = cfg['model']['frames_out']
self._step = cfg['detector']['step']
self._load_train = cfg['dataloader']['train']
self._load_test = cfg['dataloader']['test']
self._load_train_clip = cfg['dataloader']['train_clip']
self._load_test_clip = cfg['dataloader']['test_clip']
self._train_all = []
self._train_clips = {}
self._train_clip_gts = {}
self._train_clip_disps = {}
if self._load_train or self._load_train_clip:
train_outputs = self._gen_seq_list(self._train_videos)
self._train_all = train_outputs['seq_list']
self._train_num_frames = train_outputs['num_frames']
self._train_num_frames_with_gt = train_outputs['num_frames_with_gt']
self._train_num_matches = train_outputs['num_matches']
self._train_num_rallies = train_outputs['num_rallies']
self._train_disp_mean = train_outputs['disp_mean']
self._train_disp_std = train_outputs['disp_std']
if self._load_train_clip:
self._train_clips = train_outputs['clip_seq_list_dict']
self._train_clip_gts = train_outputs['clip_seq_gt_dict_dict']
self._train_clip_disps = train_outputs['clip_seq_disps']
self._test_all = []
self._test_clips = {}
self._test_clip_gts = {}
self._test_clip_disps = {}
if self._load_test or self._load_test_clip:
test_outputs = self._gen_seq_list(self._test_videos)
self._test_all = test_outputs['seq_list']
self._test_num_frames = test_outputs['num_frames']
self._test_num_frames_with_gt = test_outputs['num_frames_with_gt']
self._test_num_matches = test_outputs['num_matches']
self._test_num_rallies = test_outputs['num_rallies']
self._test_disp_mean = test_outputs['disp_mean']
self._test_disp_std = test_outputs['disp_std']
if self._load_test_clip:
self._test_clips = test_outputs['clip_seq_list_dict']
self._test_clip_gts = test_outputs['clip_seq_gt_dict_dict']
self._test_clip_disps = test_outputs['clip_seq_disps']
log.info('=> Soccer loaded' )
log.info("Dataset statistics:")
log.info("-----------------------------------------------------------------------------------")
log.info("subset | # batch | # frame | # frame w/ gt | # clip | # game | disp.[pixel]")
log.info("-----------------------------------------------------------------------------------")
if self._load_train:
log.info("train | {:7d} | {:7d} | {:13d} | {:6d} | {:6d} | {:2.1f}+/-{:2.1f} ".format(len(self._train_all), self._train_num_frames, self._train_num_frames_with_gt, self._train_num_rallies, self._train_num_matches, self._train_disp_mean, self._train_disp_std ) )
if self._load_train_clip:
num_items_all = 0
num_frames_all = 0
num_frames_with_gt_all = 0
num_clips_all = 0
disps_all = []
for key, clip in self._train_clips.items():
num_items = len(clip)
num_frames = 0
for tmp in clip:
num_frames += len( tmp['frames'] )
num_frames_with_gt = num_frames
clip_name = '{}_{}'.format(key[0], key[1])
disps = np.array( self._train_clip_disps[key] )
log.info("{} | {:7d} | {:7d} | {:13d} | | | {:2.1f}+/-{:2.1f}".format(clip_name, num_items, num_frames, num_frames_with_gt, np.mean(disps), np.std(disps) ))
num_items_all += num_items
num_frames_all += num_frames
num_frames_with_gt_all += num_frames_with_gt
disps_all.extend(disps)
num_clips_all += 1
log.info("all | {:7d} | {:7d} | {:13d} | {:6d} | | {:2.1f}+/-{:2.1f}".format(num_items_all, num_frames_all, num_frames_with_gt_all, num_clips_all, np.mean(disps_all), np.std(disps_all) ))
if self._load_test:
log.info("test | {:7d} | {:7d} | {:13d} | {:6d} | {:6d} | {:2.1f}+/-{:2.1f} ".format(len(self._test_all), self._test_num_frames, self._test_num_frames_with_gt, self._test_num_rallies, self._test_num_matches, self._test_disp_mean, self._test_disp_std) )
if self._load_test_clip:
num_items_all = 0
num_frames_all = 0
num_frames_with_gt_all = 0
num_clips_all = 0
disps_all = []
for key, test_clip in self._test_clips.items():
num_items = len(test_clip)
num_frames = 0
for tmp in test_clip:
num_frames += len( tmp['frames'] )
num_frames_with_gt = num_frames
clip_name = '{}_{}'.format(key[0], key[1])
disps = np.array( self._test_clip_disps[key] )
log.info("{} | {:7d} | {:7d} | {:13d} | | | {:2.1f}+/-{:2.1f}".format(clip_name, num_items, num_frames, num_frames_with_gt, np.mean(disps), np.std(disps) ))
num_items_all += num_items
num_frames_all += num_frames
num_frames_with_gt_all += num_frames_with_gt
disps_all.extend(disps)
num_clips_all += 1
log.info("all | {:7d} | {:7d} | {:13d} | {:6d} | | {:2.1f}+/-{:2.1f}".format(num_items_all, num_frames_all, num_frames_with_gt_all, num_clips_all, np.mean(disps_all), np.std(disps_all) ))
log.info("-----------------------------------------------------------------------------------")
def _gen_seq_list(self, video_names):
seq_list = []
clip_seq_list_dict = {}
clip_seq_gt_dict_dict = {}
clip_seq_disps = {}
num_frames = 0
num_matches = 1
num_rallies = 0
num_frames_with_gt = 0
disps = []
for video_name in video_names:
num_rallies += 1
clip_seq_list = []
clip_seq_gt_dict = {}
frame_dir = osp.join(self._root_dir, self._frame_dirname, video_name)
anno_path = osp.join(self._root_dir, self._anno_dirname, '{}.xml'.format(video_name))
frame_names = os.listdir(frame_dir)
frame_names.sort()
ball_xyvs = load_xml(anno_path, frame_dir=frame_dir, frame_names=frame_names)
fids = list(ball_xyvs.keys())
num_frames += len(frame_names)
num_frames_with_gt += len(ball_xyvs)
for i in range(len(ball_xyvs)-self._frames_in+1):
inds = fids[i:i+self._frames_in]
names = [frame_names[j] for j in inds]
paths = [ osp.join(frame_dir, name) for name in names]
annos = [ ball_xyvs[j] for j in range(i,i+self._frames_in)]
seq_list.append( {'frames': paths, 'annos': annos, 'match': 0, 'clip': video_name})
if i%self._step==0:
clip_seq_list.append( {'frames': paths, 'annos': annos, 'match': 0, 'clip': video_name})
clip_seq_list_dict[(0, video_name)] = clip_seq_list
# compute diplacement between consecutive frames
clip_disps = []
for i in range(len(ball_xyvs)-1):
xy1, visi1 = ball_xyvs[i]['center'].xy, ball_xyvs[i]['center'].is_visible
xy2, visi2 = ball_xyvs[i+1]['center'].xy, ball_xyvs[i+1]['center'].is_visible
if visi1 and visi2:
disp = np.linalg.norm(np.array(xy1)-np.array(xy2))
disps.append(disp)
clip_disps.append(disp)
for i in range(len(ball_xyvs)):
path = ball_xyvs[i]['frame_path']
clip_seq_gt_dict[path] = ball_xyvs[i]['center']
clip_seq_gt_dict_dict[(0, video_name)] = clip_seq_gt_dict
clip_seq_disps[(0, video_name)] = clip_disps
return { 'seq_list': seq_list,
'clip_seq_list_dict': clip_seq_list_dict,
'clip_seq_gt_dict_dict': clip_seq_gt_dict_dict,
'clip_seq_disps': clip_seq_disps,
'num_frames': num_frames,
'num_frames_with_gt': num_frames_with_gt,
'num_matches': num_matches,
'num_rallies': num_rallies,
'disp_mean': np.mean(np.array(disps)),
'disp_std': np.std(np.array(disps))}
@property
def train(self):
return self._train_all
@property
def test(self):
return self._test_all
@property
def train_clips(self):
return self._train_clips
@property
def train_clip_gts(self):
return self._train_clip_gts
@property
def test_clips(self):
return self._test_clips
@property
def test_clip_gts(self):
return self._test_clip_gts
| nttcom/WASB-SBDT | src/datasets/soccer.py | soccer.py | py | 12,974 | python | en | code | 0 | github-code | 13 |
30938486379 | def calculator(altitudeinput):
from math import exp
import numpy as np
def isa(pressure,temperature,walk,a):
R = 287 # [J/kgK]
g0 = 9.80665 # [m/s2]
temperatureend = temperature + a*walk
if a ==0:
pressureend = pressure * exp(g0 * walk / (-R * temperatureend))
else:
pressureend = pressure * (temperatureend / temperature)**(-g0 / (a * R))
densityend = pressureend / (R * temperatureend)
return (temperatureend,pressureend,densityend)
def atmosjump(p1,t1,alt):
altitudes = np.array([11000, 20000, 32000, 47000, 51000, 71000])
allA = np.array([-0.0065,0,0.001,0.0028,0,-0.0028,-0.002])
idx = len(altitudes[altitudes<alt]) + 1
for i in range(idx):
p0 = p1
t0 = t1
h1 = min(alt,altitudes[i])
if i>0:
h1 = h1-altitudes[i-1]
a = allA[i]
t1, p1, rho1 = isa(p0, t0, h1, a)
return (t1,p1,rho1)
t1,p1,dens= atmosjump(101325.0 ,288.15, altitudeinput)
return(t1,p1,dens)
| iamlucasvieira/ISA-Altitude | functionISA.py | functionISA.py | py | 1,140 | python | en | code | 0 | github-code | 13 |
29752239316 | from tkinter import *
import cv2
from PIL import Image
root = Tk()
root.title("Ventana")
root.config(bg="skyblue")
left_frame = Frame(root, width=200, height=400)
left_frame.grid(row = 0, column = 0, padx = 10, pady = 5)
right_frame = Frame(root, width=650, height=400, bg='grey')
right_frame.grid(row = 0, column = 1, padx = 10, pady =5)
tool_bar = Frame(left_frame, width=180, height=185, bg="purple")
tool_bar.grid(row = 2, column = 0, padx = 5 , pady = 5)
Label(left_frame, text = "Example Test").grid(row=1,column = 0, padx = 5, pady = 5)
image = PhotoImage(file="sunset.gif")
original_image = image.subsample(3,3)
Label(left_frame, image=original_image).grid(row=0, column=0, padx=5,pady=5)
Label(right_frame, image=image).grid(row=1, column=0, padx=5, pady=5)
Label(tool_bar, text="Tools", relief=RAISED).grid(row=0, column=0, padx=5, pady=3, ipadx=10)
Label(tool_bar, text="Filters", relief=RAISED).grid(row=0, column=1, padx=5, pady=3, ipadx=10)
Label(tool_bar, text="Select").grid(row=1, column=0, padx=5, pady=3)
Label(tool_bar, text="Crop").grid(row=2, column=0, padx=5, pady=3)
Label(tool_bar, text="Rotate & Flip").grid(row=3, column=0, padx=5, pady=3)
Label(tool_bar, text="Resize").grid(row=4, column=0, padx=5, pady=3)
Label(tool_bar, text="Exposure").grid(row=5, column=0, padx=5, pady=3)
root.mainloop() | FranH20/GUI-Python | readhuli.py | readhuli.py | py | 1,331 | python | en | code | 0 | github-code | 13 |
3928742530 | import os
import sys
import inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import crawler as crawler
def find_gen(baseSettings, tests):
for i, test in enumerate(tests):
settings = baseSettings
for key,val in test[0].items():
settings[key] = val
myCrawler = crawler.createCrawler(settings)
myCrawler.memo = []
gen = myCrawler.generator()
cnt = 0
results = []
try:
while True:
name = next(gen)
results.append(name)
assert name in test[2], "Unexpected file ({}) appeared in found files. During Test: {}".format(name, i)
cnt += 1
except StopIteration:
assert cnt == test[1], "Found {} instead of {} {} files".format(cnt, test[1], test[0])
def find_list(baseSettings, tests):
for i,test in enumerate(tests):
settings = baseSettings
for key,val in test[0].items():
settings[key] = val
myCrawler = crawler.createCrawler(settings)
myCrawler.memo = []
try:
results = myCrawler.getList()
assert len(results) == test[1], "Found {} instead of {} files".format(len(results), test[1])
if len(test[2]) > 0:
for name in results:
assert name in test[2], "Unexpected file ({}) in Test {} appeared in found files. Expected {}".format(name, i, test[2])
except ValueError as VE:
assert settings["onlyOnce"] == False, "Unexpected exeption raises"
singleReturnCnt = 0
def callback_singleReturn(baseSettings, tests):
global singleReturnCnt
settings = baseSettings
settings["onlyOnce"] = False
for test in tests:
for key,val in test[0].items():
settings[key] = val
singleReturnCnt = 0
def callback (file):
global singleReturnCnt
if len(test[2]) > 0:
assert file in test[2], "Couldn't find file ({}) in {}".format(file, test[2])
singleReturnCnt +=1
myCrawler = crawler.createCrawler(settings, callback)
myCrawler.process()
assert singleReturnCnt == test[1], "Found {} instead of {} files".format(singleReturnCnt, test[1])
def callback_listReturn(baseSettings, tests):
settings = baseSettings
settings["singleReturn"] = False
for test in tests:
for key,val in test[0].items():
settings[key] = val
settings["onlyOnce"] = True
def callback (files):
if len(test[2]) > 0:
for file in files:
assert file in test[2], "Couldn't find file ({}) in {}".format(file, test[2])
assert len(files) == test[1], "Found {} instead of {} files".format(len(files), test[1])
myCrawler = crawler.createCrawler(settings, callback)
myCrawler.memo = []
myCrawler.process()
| b2aff6009/crawler | tests/testutils.py | testutils.py | py | 3,048 | python | en | code | 0 | github-code | 13 |
21161016845 | """
Dette programmet skal uttføre "tokenisering", dvs bryte opp en tekst opp i ord. Tokenisering er nødvendig utgangspunkt for de aller fleste språkteknologiske oppgaver.
- dev.txt skal ligge i samme mappe.
- Programmet er laget for python 3
For å kjøre programmet: obliga_steinrr.py
"""
# encoding: utf-8
class readFile:
""" Read filename and returns with content"""
def __init__(self, filename):
self.filename = filename
def read(self):
text = open(self.filename)
self.content = text.read()
text.close()
return (self.content)
###################################
########## main ###################
def main():
# initial parameters:
filename = "dev.txt"
occur = "er"
count = 0
liste = []
# Task 1:
################################################################################
# Part a:
################################
# Read text
problem = readFile(filename) # read file in same folder.
text = problem.read()
# Part b:
#################################
counted_occur = text.count(occur)
print ("------------------------")
print ("Task 1b: Count instances ")
print ("Combination \"%s\" occured %d in text: \"%s\"." %(occur, counted_occur, filename))
text = text.split() # split up text
for word in text:
if word.endswith(occur):
count += 1
# NOTE: It does not say explicit in task C to ignore letters like: . , : - " etc.
# However I have made extra effort to do so.
if (word[-1] == ".") or (word[-1] == ",") or (word[-1] == ":") or (word[-1] == "-"): # "." and "," is not a letter
if len(word)>2:
liste.append(word[-3:-1])
else:
liste.append(word)
else:
if len(word)>2:
liste.append(word[-2:])
else:
liste.append(word)
print ("While %d words ends with letter combination: \"%s\" in the same text" %(count, occur))
# Part c:
####################################
print ("------------------------")
print ("task 1c:")
# remove "-" from list
for word in liste:
if word[-1]=="-":
liste.remove(word)
for word in liste:
if word[-1] == "-":
print (word)
print ("first 3 words in list: %s" %liste[:3])
# convert list to string:
stringText = ' '.join(map(str, liste))
print ("Converted list to string.")
print ("First nine letters in string: %s" %stringText[:9])
# Task 2:
#######################################################################################
# Part a and b:
####################################
print ("------------------------")
print ("Task 2a and 2b:")
infile = open(filename)
lines = []
for line in infile:
if (line != "\n"): # split line and ignore lines with space (\n), we do not count empty lines
lines.append(line)
infile.close()
print ("Read file: \"%s\" as a list" %filename)
#print (lines)
counted_words, counted_lines = countLine(lines)
print ("Counted %d lines and %d words from file \"%s\"" %(counted_lines, counted_words, filename))
def countLine(lines):
""" Count lines and words from list"""
count_words = 0
count_lines = 0
for line in lines:
words = line.split()
for word in words:
#if (word != "\n"): # make sure a word is not equal "\n"
count_words += 1
count_lines += 1
return (count_words, count_lines)
if __name__ == "__main__":
main()
""" Run log:
python oblig1a_steinrr.py ------------------------
Task 1b: Count instances
Combination "er" occured 5093 in text: "dev.txt".
While 2625 words ends with letter combination: "er" in the same text
------------------------
task 1c:
first 3 words in list: ['er', 'nn', 'en']
Converted list to string.
First nine letters in string: er nn en
------------------------
Task 2a and 2b:
Read file: "dev.txt" as a list
Counted 972 lines and 32243 words from file "dev.txt"
"""
| rayruu/inf1820 | 1a/oblig1a_steinrr.py | oblig1a_steinrr.py | py | 4,195 | python | en | code | 0 | github-code | 13 |
31238910959 | def homework_9(bag_size, items): # 請同學記得把檔案名稱改成自己的學號(ex.1104813.py)
# depth first search / breadth first search + backtracking
len_items=len(items)
weight=[] #物品重量
price=[] #物品價值
for i in items:
weight.append(i[0])
price.append(i[1])
matrix=[[0 for i in range(bag_size+1)]for j in range(len_items)]
#設立一個初始為0空矩陣
for i in range (len_items):
for j in range(bag_size+1):
if j < weight[i]:
matrix[i][j] = matrix[i - 1][j]
else:
matrix[i][j] = max(matrix[i - 1][j], matrix[i - 1][j - weight[i]] + price[i])
#用迴圈找出最佳方法
for i in range(len_items):
re = []
for j in range(bag_size+1):
re.append(matrix[i][j]) #將找出的所有結果填入
return re[-1]
if __name__ == '__main__':
bag_size = 3
items = [[1,25],[4,120],[4,30],[1,130],[2,20]]
print(homework_9(bag_size, items))
# 155
| daniel880423/Member_System | file/hw9/1100419/hw9_s1100419_0.py | hw9_s1100419_0.py | py | 1,041 | python | en | code | 0 | github-code | 13 |
25823007530 | import os
from argparse import ArgumentParser
from phonerouting import PhoneOperatorList
def csv_to_dict(filename, delimiter=',', skip_header=1):
"""Read CSV file and convert it into a dictionary based on
the first two columns. The first column is used as keys of
type str, the second as the values, which are cast to float.
Parameters
----------
filename : str
Path to the CSV file
delimiter : str
Delimiter between the columns
skip_header : int
Number of lines to skip
"""
out_dict = {}
f = open(filename)
raw = f.read()
f.close()
raw = raw.split('\n')
for line in raw[skip_header:]:
columns = line.split(delimiter)
out_dict[columns[0]] = float(columns[1])
return out_dict
def main():
script_description = 'Get the cheapest price to call a number along with'
script_description += ' the name of the operator. Price lists are loaded'
script_description += ' from CSV files. By default, all files in the'
script_description += ' directory operators will be loaded. Alternatively'
script_description += ' specific operators can be selected using the'
script_description += ' arguments below.'
parser = ArgumentParser(description=script_description)
parser.add_argument('number', type=str,
help='Phone number without leading "+" or zeros')
parser.add_argument('--operators', type=str, default=None, nargs='+',
help='CSV files containing the price lists')
parser.add_argument('--operator-path', type=str, default='operators',
help='Path to CSV files containing the price lists')
args = parser.parse_args()
if args.operators is None:
args.operators = [os.path.join(args.operator_path, filename)
for filename in os.listdir(args.operator_path)
if filename.endswith('.csv')]
if len(args.operators) == 0:
raise ValueError("operator-path contains no CSV files")
# Use filename without extension as operator name
names = [fn.split('/')[-1][:-4] for fn in args.operators]
price_lists = [csv_to_dict(fn) for fn in args.operators]
# Initialize PhoneOperatorList with first operator file
operator_list = PhoneOperatorList(names, price_lists)
cheapest = operator_list.get_price(args.number)
if cheapest[0] is not None:
print('The cheapest price for this number is with ' +
f'{cheapest[0]}: ${cheapest[1]:.2f}/minute.')
else:
print("This number cannot be called with any of the listed operators.")
if __name__ == '__main__':
main()
| ufeindt/alatest-challenge | get_price.py | get_price.py | py | 2,696 | python | en | code | 0 | github-code | 13 |
70486631058 | import multiprocessing
# import copy_reg
import os
import types
from allennlp.predictors.predictor import Predictor
_model_url = "https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz"
# def _reduce_method(m):
# if m.im_self is None:
# return getattr, (m.im_class, m.im_func.func_name)
# else:
# return getattr, (m.im_self, m.im_func.func_name)
# copy_reg.pickle(types.MethodType, _reduce_method)
class MultiProcCRClass():
__instance = None
@staticmethod
def getInstance(verbose = False):
""" Static access method. """
if MultiProcCRClass.__instance == None:
MultiProcCRClass(verbose)
return MultiProcCRClass.__instance
def __init__(self, verbose):
""" Virtually private constructor. """
if MultiProcCRClass.__instance != None:
raise Exception("This class is a singleton!")
else:
MultiProcCRClass.__instance = self
self._verbose = verbose
if self._verbose == True :
print("Initializing predictor for CResolution")
self._predictor = Predictor.from_path(_model_url)
def splitArticle(self, article):
if self._verbose == True:
print("Splitting article.")
self.list_paras = article.split('\n')
return self.list_paras
def resolve_ForkIt(self, article):
paras = self.splitArticle(article)
if self._verbose == True:
print("Fork and then resolve")
pool = multiprocessing.Pool()
result = pool.map(self.resolve, paras)
return result
def resolve_ForkIt_custom(self, paras):
if type(paras) is not list:
raise Exception("Not a list of strings!")
if self._verbose == True:
print("Fork and then resolve")
pool = multiprocessing.Pool()
result = pool.map(self.resolve, paras)
return result
def resolve(self, text):
if self._verbose == True:
print("Resolving with PID : ", os.getpid())
self._prediction = self._predictor.predict(document=text)
self._resolved = self._predictor.coref_resolved(text)
return self._resolved | arg-hya/CRModels | MultiProcCRClass.py | MultiProcCRClass.py | py | 2,236 | python | en | code | 0 | github-code | 13 |
24534387320 | """
Given an integer array nums and an integer k, return thek most frequent elements.
You may return the answer in any order.
Test/edge cases:
- single element, k = 1
- one unique num, multiple elements of same type, k = 1
- multiple unique elements, k = max
- multiple unique, k = 1
- multiple unique, k != 1 or max (somewhere in between)
Logic:
- Create hashmap that counts amount of elements of top in array (histogram)
"""
from ds_templates import test_series as ts
def top_k_freq_sort(nums: list[int], k: int) -> list[int]:
histo = {}
for num in nums:
histo[num] = histo.get(num, 0) + 1
lst = [(freq, num) for (num, freq) in histo.items()]
lst.sort(reverse=True)
# lst = [num for (freq, num) in lst]
return lst[:k]
nums = [1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6]
# print(top_k_freq_sort(nums, 4))
def top_k_freq_bucket(nums: list[int], k:int) -> list[int]:
histo, res = {}, []
bucket = [[] for _ in range(len(nums) + 1)]
for num in nums:
histo[num] = histo.get(num, 0) + 1
for key in histo:
bucket[histo[key]].append(key)
print(histo)
for i in range(len(nums), 0, -1):
for n in bucket[i]:
res.append(n)
if len(res) == k:
return res
nums_bucket = [1]
print(top_k_freq_bucket(nums_bucket, 1))
| Hintzy/leetcode | Medium/347_top_k_frerquent_elements/top_k_frequent.py | top_k_frequent.py | py | 1,392 | python | en | code | 0 | github-code | 13 |
29762283959 | # -*- coding: utf-8 -*-
"""
Created on Thu May 19 12:26:26 2022
@author: aceso
"""
#%% Modules
import pandas as pd
import os
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import datetime
import pickle
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
from text_classification_lib import EDA, ModelConfig, Performance
from tensorflow.keras.utils import plot_model
# Constant
URL = "https://raw.githubusercontent.com/susanli2016/PyCon-Canada-2019-NLP-Tutorial/master/bbc-text.csv"
TOKEN_PATH = os.path.join(os.getcwd(), "Saved", "token.json")
LOG_PATH = os.path.join(os.getcwd(), 'Log')
log_dir = os.path.join(LOG_PATH, datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
MODEL_PATH = os.path.join(os.getcwd(), "Saved", "model.h5")
ONEHOT_SAVEPATH = os.path.join(os.getcwd(), "Saved", "onehot.pkl")
#%% Exploratory Data Analysis
# Data Loading
df = pd.read_csv(URL)
category = df["category"] # There're 5 categories
text = df["text"]
# Data Cleaning
eda = EDA()
split_text = eda.split(text)
# Data Vectorization
token_text = eda.category_token(data=split_text, token_save_path=TOKEN_PATH, num_words=2000)
print(token_text[2])
# Data Sequence Padding
[np.shape(i) for i in token_text] # to check the maxlen of word in each text
# maxlen is 300
pad_text = eda.text_pad_sequence(token_text)
# Data Preprocessing (Target One Hot Encoding)
one = OneHotEncoder(sparse=False)
nb_categories = len(category.unique())
encoded_category = one.fit_transform(np.expand_dims(category, axis=-1))
pickle.dump(one, open(ONEHOT_SAVEPATH, "wb"))
# Split the data into training and testing
X_train, X_test, y_train, y_test = train_test_split(pad_text, encoded_category,
test_size=0.2, random_state=123)
# The model only accept 3D array as input
X_train = np.expand_dims(X_train, axis=-1)
X_test = np.expand_dims(X_test, axis=-1)
# Inverse the category
print(y_train[0]) #[0,0,0,1,0]
print(one.inverse_transform(np.expand_dims(y_train[0], axis=0))) # This one is sport
#%% Model Configuration
nb_categories = len(category.unique())
nn = ModelConfig()
model = nn.lstm_layer(nb_words=2000, nb_categories=nb_categories, nodes=64)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics="acc")
# Plot model architecture
plot_model(model)
tensorboard = TensorBoard(log_dir, histogram_freq=1)
estop = EarlyStopping(monitor="val_loss", patience=5)
model.fit(X_train, y_train, epochs=20, validation_data=(X_test, y_test),
callbacks=[tensorboard, estop])
#%% Model Evaluation and Analysis
predicted = np.empty([len(X_test), 5]) # 5 onehot columns
for i, test in enumerate(X_test):
predicted[i,:] = model.predict(np.expand_dims(test, axis=0))
y_pred = np.argmax(predicted, axis=1)
y_true = np.argmax(y_test, axis=1)
score = Performance()
result = score.evaluate(y_true, y_pred)
#%% Model Saving
model.save(MODEL_PATH)
| AceSongip/Article_Categorization_Using_NLP | text_classification_training.py | text_classification_training.py | py | 2,984 | python | en | code | 0 | github-code | 13 |
23765601380 | from securityheaders.checkers import Finding, FindingType, FindingSeverity
from .checker import ExpectCTChecker
class ExpectCTHTTPReportURIChecker(ExpectCTChecker):
def check(self, headers, opt_options=dict()):
findings = []
expectct = self.getexpectct(headers)
if not expectct:
return findings
findings = []
if expectct.reporturi() and expectct.reporturi().startswith('http://'):
findings.append(Finding(expectct.headerkey,FindingType.SRC_HTTP,expectct.headerkey + 'communicates its reports via an insecure channel.', FindingSeverity.LOW, expectct.reporturi()))
return findings
| koenbuyens/securityheaders | securityheaders/checkers/expectct/httpreporturi.py | httpreporturi.py | py | 680 | python | en | code | 206 | github-code | 13 |
40791256980 |
import os
import torch
import numpy as np
import re
from torchvision.io import read_image
from pathlib import Path
from tqdm import tqdm
from PIL import Image
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchvision
from torch.utils.data import Dataset
classes = ['airplane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def tensor2pil(image: torch.Tensor):
''' output image : tensor to PIL
'''
if isinstance(image, list) or image.ndim == 4:
return [tensor2pil(im) for im in image]
assert image.ndim == 3
output_image = Image.fromarray(((image + 1.0) * 127.5).clamp(
0.0, 255.0).to(torch.uint8).permute(1, 2, 0).detach().cpu().numpy())
return output_image
@torch.no_grad()
def compute_clip_score(dataset: DataLoader, clip_model="ViT-B/32", device="cuda", how_many=5000):
print("Computing CLIP score")
import clip as openai_clip
if clip_model == "ViT-B/32":
clip, clip_preprocessor = openai_clip.load("ViT-B/32", device=device)
clip = clip.eval()
elif clip_model == "ViT-G/14":
import open_clip
clip, _, clip_preprocessor = open_clip.create_model_and_transforms("ViT-g-14", pretrained="laion2b_s12b_b42k")
clip = clip.to(device)
clip = clip.eval()
clip = clip.float()
else:
raise NotImplementedError
cos_sims = []
count = 0
for imgs, txts in tqdm(dataset):
imgs_pil = [clip_preprocessor(tensor2pil(img)) for img in imgs]
imgs = torch.stack(imgs_pil, dim=0).to(device)
texts = list()
for item in txts:
texts.append(classes[item])
tokens = openai_clip.tokenize(texts).to(device)
# Prepending text prompts with "A photo depicts "
# https://arxiv.org/abs/2104.08718
prepend_text = "A photo depicts "
prepend_text_token = openai_clip.tokenize(prepend_text)[:, 1:4].to(device)
prepend_text_tokens = prepend_text_token.expand(tokens.shape[0], -1)
start_tokens = tokens[:, :1]
new_text_tokens = torch.cat(
[start_tokens, prepend_text_tokens, tokens[:, 1:]], dim=1)[:, :77]
last_cols = new_text_tokens[:, 77 - 1:77]
last_cols[last_cols > 0] = 49407 # eot token
new_text_tokens = torch.cat([new_text_tokens[:, :76], last_cols], dim=1)
img_embs = clip.encode_image(imgs)
text_embs = clip.encode_text(new_text_tokens)
similarities = F.cosine_similarity(img_embs, text_embs, dim=1)
cos_sims.append(similarities)
count += similarities.shape[0]
if count >= how_many:
break
clip_score = torch.cat(cos_sims, dim=0)[:how_many].mean()
clip_score = clip_score.detach().cpu().numpy()
return clip_score
class CustomImageDataset(Dataset):
def __init__(self, img_dir, transform=None):
self.img_dir = img_dir
self.transform = transform
self.labels = list()
self.images = list()
for filename in os.listdir(self.img_dir):
f = os.path.join(self.img_dir, filename)
if os.path.isfile(f):
label = re.findall(r'\d+', filename)
self.labels.append(torch.tensor(int(label[0]), dtype=torch.int8))
image = read_image(f)
if self.transform:
self.images.append(self.transform(image))
else:
self.images.append(image)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
return self.images[idx], self.labels[idx]
if __name__ == "__main__":
transform_real = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
testset_real = torchvision.datasets.CIFAR10(root='../stablediffusion/data', train=False, download=True, transform=transform_real)
testloader_real = torch.utils.data.DataLoader(testset_real, batch_size=128, shuffle=False, num_workers=2)
img_dir = "../stablediffusion/fake_images/"
transform_fake = transforms.Compose([
# transforms.ToTensor(),
transforms.ConvertImageDtype(torch.float32),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
# transforms.Resize(32)
])
testset_fake = CustomImageDataset(img_dir = img_dir,transform = transform_fake)
testloader_fake = torch.utils.data.DataLoader(testset_fake, batch_size=128, shuffle=True, num_workers=2)
clip_score_real = compute_clip_score(testloader_real, how_many = 5000)
clip_score_fake = compute_clip_score(testloader_fake, how_many = 1000)
print("clip score real:", clip_score_real)
print("clip score fake:", clip_score_fake) | zheyizhu/Generative-models | evaluation/clip_score.py | clip_score.py | py | 4,809 | python | en | code | 0 | github-code | 13 |
37205035144 | # -*- coding: utf-8 -*-
import os
import unittest # pytest in future
config_filename = os.path.join(os.path.dirname(__file__),
"../config.yaml")
secrets_filename = os.path.join(os.path.dirname(__file__),
"../secrets/secrets")
os.environ["ARTIFACT_TRACKER_TYPE"] = "test"
os.environ["ARTIFACT_TRACKER_CONFIG"] = config_filename
os.environ["ARTIFACT_TRACKER_SECRETS"] = secrets_filename
from artifact_tracker import tracker_app # noqa E402
class ArtifactTrackerTests(unittest.TestCase):
def setUp(self):
self.tracker_app = tracker_app
self.tracker_app.create_test_app()
self.app = self.tracker_app.app
self.tracker_app.log.debug("initing test tracker_app")
self.app.config["TESTING"] = True
self.app.config["WTF_CSRF_ENABLED"] = True
self.app.testing = True
self.client = self.app.test_client()
self.app_context = self.app.app_context
self.app.config = self.client.application.config
self._ctx = self.app.test_request_context()
self._ctx.push()
from artifact_tracker import create_db
from sqlalchemy_utils import database_exists, create_database
with self.app_context():
if not database_exists(self.app.config["SQLALCHEMY_DATABASE_URI"]):
create_database(self.app.config["SQLALCHEMY_DATABASE_URI"])
create_db()
def tearDown(self):
from sqlalchemy_utils import database_exists, drop_database
with self.app_context():
self.tracker_app.log.debug("tearing down...")
self.tracker_app.db.session.close()
self.tracker_app.db.engine.dispose()
if database_exists(self.app.config["SQLALCHEMY_DATABASE_URI"]):
drop_database(self.app.config["SQLALCHEMY_DATABASE_URI"])
self._ctx.pop()
| oduwsdl/scholarly-orphans-trackers | tests/__init__.py | __init__.py | py | 1,900 | python | en | code | 0 | github-code | 13 |
34651988228 | import re
import os
import sys
import csv
import shutil
import logging
from subprocess import Popen, PIPE
from dataclasses import dataclass
from bs4 import BeautifulSoup
import requests
__version__ = "0.3.7"
CFG_DIR = os.path.expanduser("~/.venvipy")
DB_FILE = os.path.expanduser("~/.venvipy/py-installs")
ACTIVE_DIR = os.path.expanduser("~/.venvipy/selected-dir")
ACTIVE_VENV = os.path.expanduser("~/.venvipy/active-venv")
PYPI_URL = "https://pypi.org/search/"
logger = logging.getLogger(__name__)
#]===========================================================================[#
#] FIND PYTHON 3 INSTALLATIONS [#============================================[#
#]===========================================================================[#
@dataclass
class PythonInfo:
"""Info about Python installs."""
py_version: str
py_path: str
def to_version(value):
"""Convert a value to a readable version string.
"""
return f"Python {value}"
def to_path(bin_path, version):
"""Return the absolute path to a python binary.
"""
return os.path.join(bin_path, f"python{version}")
def is_writable(target_dir):
"""Test whether a directory is writable.
"""
if os.path.exists(target_dir):
test_file = os.path.join(target_dir, "test_file")
try:
logger.debug("Testing whether filesystem is writable...")
with open(test_file, "w+", encoding="utf-8") as f:
f.write("test")
os.remove(test_file)
logger.debug("Filesystem is writable")
return True
except OSError as e:
logger.debug(f"Filesystem is read-only\n{e}")
return False
else:
logger.debug(f"No such file or directory: {target_dir}")
return False
return False
def ensure_confdir():
"""Create `~/.venvipy` config directory.
"""
if not os.path.exists(CFG_DIR):
os.mkdir(CFG_DIR)
def ensure_dbfile():
"""Create the database in `~/.venvipy/py-installs`.
"""
if not os.path.exists(DB_FILE):
get_python_installs()
def ensure_active_dir():
"""Create the file that holds the selected path to venvs.
"""
ensure_confdir()
if not os.path.exists(ACTIVE_DIR):
with open(ACTIVE_DIR, "w+", encoding="utf-8") as f:
f.write("")
def ensure_active_venv():
"""Create the file that holds the selected path to venvs.
"""
ensure_confdir()
if not os.path.exists(ACTIVE_VENV):
with open(ACTIVE_VENV, "w+", encoding="utf-8") as f:
f.write("")
def get_python_version(py_path):
"""Return Python version.
"""
with Popen([py_path, "-V"], stdout=PIPE, text="utf-8") as res:
out, _ = res.communicate()
python_version = out.strip()
return python_version
def get_python_installs(relaunching=False):
"""
Write the found Python versions to `py-installs`. Create
a new database if `relaunching=True`.
"""
versions = [
"3.11", "3.10", "3.9", "3.8", "3.7", "3.6", "3.5", "3.4", "3.3"
]
py_info_list = []
ensure_confdir()
if not os.path.exists(DB_FILE) or relaunching:
with open(DB_FILE, "w", newline="", encoding="utf-8") as cf:
fields = ["PYTHON_VERSION", "PYTHON_PATH"]
writer = csv.DictWriter(
cf,
delimiter=",",
quoting=csv.QUOTE_ALL,
fieldnames=fields
)
writer.writeheader()
for i, version in enumerate(versions):
python_path = shutil.which(f"python{version}")
if python_path is not None:
python_version = get_python_version(python_path)
py_info = PythonInfo(python_version, python_path)
py_info_list.append(py_info)
writer.writerow({
"PYTHON_VERSION": py_info.py_version,
"PYTHON_PATH": py_info.py_path
})
cf.close()
# add the system's Python manually if running in a virtual env
if "VIRTUAL_ENV" in os.environ:
system_python = os.path.realpath(sys.executable)
add_python(system_python)
return py_info_list[::-1]
return False
def add_python(py_path):
"""
Write (append) a Python version and its path to `py-installs`.
"""
ensure_dbfile()
with open(DB_FILE, "a", newline="", encoding="utf-8") as cf:
fields = ["PYTHON_VERSION", "PYTHON_PATH"]
writer = csv.DictWriter(
cf,
delimiter=",",
quoting=csv.QUOTE_ALL,
fieldnames=fields
)
writer.writerow({
"PYTHON_VERSION": get_python_version(py_path),
"PYTHON_PATH": py_path
})
cf.close()
# remove the interpreter if running in a virtual env
if "VIRTUAL_ENV" in os.environ:
remove_env()
def remove_env():
"""
Remove our interpreter if we're running in a virtual
environment.
"""
with open(DB_FILE, "r", encoding="utf-8") as f:
lines = f.readlines()
with open(DB_FILE, "w", encoding="utf-8") as f:
for line in lines:
if sys.executable not in line.strip("\n"):
f.write(line)
#]===========================================================================[#
#] GET VENVS [#==============================================================[#
#]===========================================================================[#
@dataclass
class VenvInfo:
"""_"""
venv_name: str
venv_version: str
site_packages: str
is_installed: str
venv_comment: str
def get_venvs(path):
"""
Get the available virtual environments
from the specified folder.
"""
# return an emtpty list if directory doesn't exist
if not os.path.isdir(path):
return []
venv_info_list = []
for i, venv in enumerate(os.listdir(path)):
# build path to venv directory
valid_venv = os.path.join(path, venv)
# only look for dirs
if not os.path.isdir(valid_venv):
continue
# build path to pyvenv.cfg file
cfg_file = os.path.join(valid_venv, "pyvenv.cfg")
if not os.path.isfile(cfg_file):
continue
# build path to venvipy.cfg file
venvipy_cfg_file = os.path.join(valid_venv, "venvipy.cfg")
venv_name = os.path.basename(valid_venv)
venv_version = get_config(cfg_file, "version")
site_packages = get_config(cfg_file, "site_packages")
is_installed = get_config(cfg_file, "installed")
venv_comment = get_comment(venvipy_cfg_file)
venv_info = VenvInfo(
venv_name,
venv_version,
site_packages,
is_installed,
venv_comment
)
venv_info_list.append(venv_info)
return venv_info_list[::-1]
def get_config(cfg_file, cfg):
"""
Return the values as string from a `pyvenv.cfg` file.
Values for `cfg` can be: `version`, `py_path`,
`site_packages`, `installed`, `comment`.
"""
with open(cfg_file, "r", encoding="utf-8") as f:
lines = f.readlines()
if lines[2][13] == ".":
version = lines[2][10:13].strip() # python 3.x
else:
version = lines[2][10:14].strip() # python 3.10+
version_str = to_version(lines[2][10:].strip())
binary_path = to_path(lines[0][7:].strip(), version)
site_packages = lines[1][31:].strip()
if cfg == "version":
return version_str
if cfg == "py_path":
return binary_path
if cfg == "site_packages":
if site_packages == "true":
return "global"
if site_packages == "false":
return "isolated"
return "N/A"
if cfg == "installed":
ensure_dbfile()
with open(DB_FILE, newline="", encoding="utf-8") as cf:
reader = csv.DictReader(cf, delimiter=",")
for info in reader:
if binary_path == info["PYTHON_PATH"]:
return "yes"
return "no"
return "N/A"
def get_active_dir_str():
"""Return path to selected directory.
"""
ensure_active_dir()
with open(ACTIVE_DIR, "r", encoding="utf-8") as f:
selected_dir = f.read()
return selected_dir
return ""
def get_selected_dir():
"""
Get the selected directory path from `selected-dir`
file. Return `get_venvs()`.
"""
selected_dir = get_active_dir_str()
return get_venvs(selected_dir)
def get_comment(cfg_file):
"""Get the comment string from `venvipy_cfg` file.
"""
if os.path.exists(cfg_file):
with open(cfg_file, "r", encoding="utf-8") as f:
venv_comment = f.read()
return venv_comment
return ""
#]===========================================================================[#
#] GET INFOS FROM PYTHON PACKAGE INDEX [#====================================[#
#]===========================================================================[#
@dataclass
class PackageInfo:
"""_"""
pkg_name: str
pkg_version: str
pkg_info_2: str
pkg_summary: str
def get_package_infos(pkg):
"""
Scrape package infos from [PyPI](https://pypi.org).
"""
snippets = []
package_info_list = []
for page in range(1, 3):
params = {"q": pkg, "page": page}
with requests.Session() as session:
res = session.get(PYPI_URL, params=params)
soup = BeautifulSoup(res.text, "html.parser")
snippets += soup.select('a[class*="snippet"]')
if not hasattr(session, "start_url"):
session.start_url = res.url.rsplit("&page", maxsplit=1).pop(0)
for snippet in snippets:
pkg_name = re.sub(
r"\s+",
" ",
snippet.select_one('span[class*="package-snippet__name"]').text.strip()
)
pkg_version = re.sub(
r"\s+",
" ",
snippet.select_one('span[class*="package-snippet__version"]').text.strip()
)
pkg_info_2 = re.sub(
r"\s+",
" ",
snippet.select_one('span[class*="package-snippet__created"]').text.strip()
)
pkg_summary = re.sub(
r"\s+",
" ",
snippet.select_one('p[class*="package-snippet__description"]').text.strip()
)
pkg_info = PackageInfo(
pkg_name,
pkg_version,
pkg_info_2,
pkg_summary
)
package_info_list.append(pkg_info)
return package_info_list[::-1]
def get_installed_packages(venv_location, venv_name):
"""Get infos about installed packages.
"""
# build path to venv
venv_path = os.path.join(venv_location, venv_name)
# path to 'lib' folder
lib_dir = os.path.join(venv_path, "lib")
# list content
lib_dir_content = os.listdir(lib_dir)
# get 'python' folder
python_dir = lib_dir_content[0]
# build path to 'site-packages' folder
site_packages_dir = os.path.join(lib_dir, python_dir, "site-packages")
# get list of installed packages
package_info_list = []
site_packages = os.listdir(site_packages_dir)
for _, pkg in enumerate(site_packages):
if ".dist-info" in pkg:
meta_file = os.path.join(
site_packages_dir,
pkg,
"METADATA"
)
with open(meta_file, "r", encoding="utf-8") as f:
meta_data = f.readlines()
# search for each str
for i, line in enumerate(meta_data):
if "Name: " in line:
pkg_name = line[5:].strip()
if "Version: " in line:
pkg_version = line[8:].strip()
if "Author: " in line:
pkg_info_2 = line[7:].strip()
if "Summary: " in line:
pkg_summary = line[8:].strip()
pkg_info = PackageInfo(
pkg_name,
pkg_version,
pkg_info_2,
pkg_summary
)
package_info_list.append(pkg_info)
return package_info_list[::-1]
| sinusphi/venvipy | venvipy/get_data.py | get_data.py | py | 12,348 | python | en | code | 37 | github-code | 13 |
12229060250 | # -*- coding:utf-8 -*-
from django.conf.urls import url
from django.contrib import admin
from .views import (
BigmeterRTListAPIView,
getmapstationlist,
getmapsecondwaterlist,
showinfoStatics,
getinstanceflow,
getinstanceflow_data,
getWatermeterflow,
getWatermeterflow_data,
getWatermeterdaily,
getWatermeterdaily_data,
getWatermeterMonth,
getWatermeterMonth_data,
)
app_name='monitor-api'
urlpatterns = [
# url(r'^user/oranizationtree/$', OrganizationListAPIView.as_view(), name='organlist'),
# url(r'^create/$', PostCreateAPIView.as_view(), name='create'),
# url(r'^(?P<slug>[\w-]+)/$', PostDetailAPIView.as_view(), name='detail'),
# url(r'^(?P<slug>[\w-]+)/edit/$', PostUpdateAPIView.as_view(), name='update'),
# url(r'^(?P<slug>[\w-]+)/delete/$', PostDeleteAPIView.as_view(), name='delete'),
url(r'^station/list/$', BigmeterRTListAPIView.as_view(), name='stationlist'),
url(r'^getmapstationlist/$',getmapstationlist,name='getmapstationlist'),
url(r'^getmapsecondwaterlist/$',getmapsecondwaterlist,name='getmapsecondwaterlist'),
#
url(r'^realtimedata/getinstanceflow/$',getinstanceflow,name='getinstanceflow'),
url(r'^realtimedata/getinstanceflow_data/$',getinstanceflow_data,name='getinstanceflow_data'),
url(r'^realtimedata/showinfoStatics/$',showinfoStatics,name='showinfoStatics'),
url(r'^realtimedata/getWatermeterflow/$',getWatermeterflow,name='getWatermeterflow'),
url(r'^realtimedata/getWatermeterflow_data/$',getWatermeterflow_data,name='getWatermeterflow_data'),
url(r'^realtimedata/getWatermeterdaily/$',getWatermeterdaily,name='getWatermeterdaily'),
url(r'^realtimedata/getWatermeterdaily_data/$',getWatermeterdaily_data,name='getWatermeterdaily_data'),
url(r'^realtimedata/getWatermeterMonth/$',getWatermeterMonth,name='getWatermeterMonth'),
url(r'^realtimedata/getWatermeterMonth_data/$',getWatermeterMonth_data,name='getWatermeterMonth_data'),
# url(r'^dma/getDmaSelect/$', getDmaSelect, name='dmaselect'),
# url(r'^dma/list/$', DMAListAPIView.as_view(), name='dmalist'),
# url(r'^district/dmabaseinfo/$', dmabaseinfo, name='dmabaseinfo'),
# url(r'^community/list/$', CommunityListAPIView.as_view(), name='communitylist'),
# url(r'^secondwater/list/$', SecondWaterListAPIView.as_view(), name='secondwaterlist'),
]
| apengok/bsc2000 | monitor/api/urls.py | urls.py | py | 2,379 | python | en | code | 1 | github-code | 13 |
15214298592 | # -*- coding: utf-8 -*-
# greburs by InteGreat
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.osv import expression
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
production_ids = fields.One2many('mrp.production', 'sale_line_id', string='Produccion')
purchase_request_line_ids = fields.One2many('purchase.request.line', 'sale_line_id', string='Compra')
order_line_replenishment_id = fields.Many2one('sale.order.line.replenishment', copy=False)
@api.model_create_multi
def create(self, vals_list):
lines = super().create(vals_list)
for line in lines:
if line.qty_delivered_method == 'stock_move' and not line.order_line_replenishment_id:
route = self.env['stock.location.route'].search([('sale_selectable', '=', True)], limit=1)
self.env['sale.order.line.replenishment'].create({'order_line_id': line.id, 'route_id': route.id})
return lines
def _prepare_procurement_group_vals(self):
res = super()._prepare_procurement_group_vals()
res['sale_order_ids'] = [(4, self.order_id.id)]
return res
class SaleOrder(models.Model):
_inherit = "sale.order"
order_replenishment_ids = fields.One2many('sale.order.line.replenishment', 'order_id')
to_be_replenished = fields.Char(compute='_compute_order_replenishment_status')
purchase_request_line_count = fields.Integer('Solicitudes de compra', compute='_compute_purchase_request_count')
@api.depends('order_replenishment_ids.qty_open_demand', 'delivery_status')
def _compute_order_replenishment_status(self):
for order in self:
order.to_be_replenished = 'no'
if not order._origin.id:
# new order not saved: yes
order.to_be_replenished = 'new'
for line in order.order_replenishment_ids:
if (line.qty_open_demand > 0 or line.qty_to_order > 0) and order.delivery_status != 'done':
order.to_be_replenished = 'yes'
def action_run_order_replenishment(self):
for order in self:
for line in order.order_replenishment_ids:
line.action_replenish_line()
order.action_confirm()
# OVERRIDE: replenishment production ids added to mto logic
@api.depends('procurement_group_id.stock_move_ids.created_production_id.procurement_group_id.mrp_production_ids',
'order_line.production_ids')
def _compute_mrp_production_count(self):
super(SaleOrder, self)._compute_mrp_production_count()
for sale in self:
sale.mrp_production_count += len(sale.order_line.production_ids)
# override + replenishment production ids added to mto logic
def action_view_mrp_production(self):
self.ensure_one()
mrp_production_ids = \
self.procurement_group_id.stock_move_ids.created_production_id.procurement_group_id.mrp_production_ids.ids \
+ self.order_line.production_ids.ids
action = {
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
}
if len(mrp_production_ids) == 1:
action.update({
'view_mode': 'form',
'res_id': mrp_production_ids[0],
})
else:
action.update({
'name': _("Manufacturing Orders Generated by %s", self.name),
'domain': [('id', 'in', mrp_production_ids)],
'view_mode': 'tree,form',
})
return action
@api.depends('order_line.purchase_request_line_ids')
def _compute_purchase_request_count(self):
for sale in self:
sale.purchase_request_line_count = len(sale.order_line.purchase_request_line_ids)
def action_view_purchase_request(self):
self.ensure_one()
purchase_request_ids = self.order_line.purchase_request_line_ids.ids
action = {
'name': _("Solicitudes de compra generadas por %s", self.name),
'res_model': 'purchase.request.line',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', purchase_request_ids)],
'view_mode': 'tree,form',
}
return action
# override completely in order to don't loop twice
def name_get(self):
res = []
if self._context.get('sale_show_partner_name'):
partner = True
else:
partner = False
for order in self:
name = order.name
if order.client_order_ref:
name = '%s (%s)' % (name, order.client_order_ref)
if partner:
name = '%s - %s' % (name, order.partner_id.name)
res.append((order.id, name))
return res
# override completely
# we do not care about search by partner name even if in the context:'sale_show_partner_name'
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
if operator == 'ilike' and not (name or '').strip():
domain = []
elif operator in ('ilike', 'like', '=', '=like', '=ilike'):
domain = expression.AND([
args or [],
['|', ('name', operator, name), ('client_order_ref', operator, name)]
])
return self._search(domain, limit=limit, access_rights_uid=name_get_uid)
class SaleOrderLineReplenishment(models.Model):
_name = 'sale.order.line.replenishment'
_description = 'Order Line Replenishment Extension'
_inherits = {'sale.order.line': 'order_line_id'}
order_line_id = fields.Many2one('sale.order.line', auto_join=True, index=True, required=True, ondelete="cascade")
qty_free_product = fields.Float(compute='_compute_data', digits='Product Unit of Measure', compute_sudo=True)
qty_open_demand = fields.Float(string='Open', digits='Product Unit of Measure', compute='_compute_data', store=True)
qty_planned = fields.Float(string='Planned', digits='Product Unit of Measure', compute='_compute_data', compute_sudo=True)
location_dest_id = fields.Many2one('stock.location', string='Location', compute='onchange_procurement_action', compute_sudo=True)
procurement_action = fields.Selection([('manufacture', 'Producir'), ('buy', 'Comprar')], string='Acción',
default='manufacture', required=True)
qty_to_order = fields.Float('Otra cantidad', digits='Product Unit of Measure', default=0.0)
replenishment_route_id = fields.Many2one('stock.location.route', string='Route',
compute='onchange_procurement_action', store=True)
@api.depends('procurement_action', 'order_id.warehouse_id')
def onchange_procurement_action(self):
for line in self:
rule = self.env['stock.rule'].search([
('action', '=', line.procurement_action), ('warehouse_id', '=', line.order_id.warehouse_id.id)
], limit=1)
if rule:
line.replenishment_route_id = rule.route_id
line.location_dest_id = rule.location_id
else:
line.replenishment_route_id = False
line.location_dest_id = False
@api.depends('qty_to_deliver', 'production_ids.product_qty', 'purchase_request_line_ids.product_qty')
def _compute_data(self):
for line in self:
# qtys
line.qty_open_demand = 0
line.qty_planned = 0
line.qty_free_product = 0
if line.qty_to_deliver > 0.0:
# qtys
qty = 0.0
line.qty_free_product = \
line.product_id.with_context(location=line.order_id.warehouse_id.lot_stock_id.id).free_qty
for production in line.production_ids:
if production.state != 'cancel':
qty += production.product_qty
for request in line.purchase_request_line_ids:
if not request.cancelled:
qty += request.product_qty
line.qty_planned = qty
line.qty_open_demand = line.qty_to_deliver - line.qty_free_product - line.qty_reserved_delivery - qty
def action_replenish_line(self):
procurements = []
for line in self:
if line.qty_to_order > 0:
product_qty = line.qty_to_order
elif line.qty_open_demand > 0:
product_qty = line.qty_open_demand
else:
continue
line_uom = line.product_uom
quant_uom = line.product_id.uom_id
product_qty, procurement_uom = line_uom._adjust_uom_quantities(product_qty, quant_uom)
group_id = line._prepare_replenishment_procurement_group()
date_planned = line.order_id.commitment_date
values = {
'group_id': group_id,
'sale_line_id': line.order_line_id.id,
'date_planned': date_planned,
'route_ids': line.replenishment_route_id,
'product_description_variants': line.order_line_id._get_sale_order_line_multiline_description_variants(),
'company_id': line.order_id.company_id,
}
procurements.append(self.env['procurement.group'].Procurement(
line.product_id, product_qty, procurement_uom, line.location_dest_id,
line.name, line.order_id.name, line.order_id.company_id, values))
line.qty_to_order = 0
if procurements:
self.env['procurement.group'].run(procurements)
def _prepare_replenishment_procurement_group(self):
action = self.replenishment_route_id.rule_ids[0].action
if action == 'manufacture':
name = 'P-%s-%s-' % (self.order_id.name, str(self.sequence))
counted = len(self.production_ids)
else:
name = 'C-%s-%s-' % (self.order_id.name, str(self.sequence))
counted = len(self.purchase_request_line_ids)
return self.env['procurement.group'].create({
'name': name + str(counted + 1),
'move_type': self.order_id.picking_policy,
'sale_order_ids': [(4, self.order_id.id)],
})
| sgrebur/e3a | integreat_sale_mrp_mtso/models/sale.py | sale.py | py | 10,290 | python | en | code | 0 | github-code | 13 |
7505915210 | # 시간초과 (탑-다운 방식)
import sys
input = lambda: sys.stdin.readline().rstrip()
sys.setrecursionlimit(10**6)
m = 1000000007
d = [0] * 1000001
def dp(x):
if x == 0:
return 1
if x == 1:
return 2
if x == 2:
return 7
if d[x]:
return d[x]
d[x] = 3 * dp(x - 2) + 2 * dp(x - 1)
for i in range(3, x + 1):
d[x] += (2 * dp(x - i)) % m
d[x] %= m
return d[x]
n = int(input())
print(dp(n))
# 시간초과 (바텀 업 방식)
import sys
input = lambda: sys.stdin.readline().rstrip()
m = 1000000007
def dp(x):
d = [0] * 1000001
d[:3] = [1, 2, 7]
for i in range(3, x + 1):
d[i] = 3 * d[i - 2] + 2 * d[i - 1]
for j in range(i - 3, -1, -1):
d[i] += (2 * d[j]) % m
d[i] %= m
return d[x]
n = int(input())
print(dp(n))
# 2차원 행렬로 메모리 절약
import sys
input = lambda: sys.stdin.readline().rstrip()
m = 1000000007
def dp(x):
d = [[0, 0] for _ in range(1000001)]
d[0][0], d[1][0], d[2][0] = 1, 2, 7
for i in range(3, x + 1):
d[i][1] = (d[i - 3][0] + d[i - 1][1]) % m
d[i][0] = (2 * d[i - 1][0] + 3 * d[i - 2][0] + 2 * d[i][1]) % m
return d[x][0]
n = int(input())
print(dp(n)) | ryong9rrr/coding-test | 동적계획법/백준14852-타일채우기3.py | 백준14852-타일채우기3.py | py | 1,245 | python | en | code | 0 | github-code | 13 |
15721403756 | from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/settings/swap$",
views.SwapSettings.as_view(),
name="settings",
),
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/settings/swap/new/$",
views.SwapGroupCreate.as_view(),
name="settings.new",
),
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/settings/swap/(?P<pk>[0-9]+)/$",
views.SwapGroupEdit.as_view(),
name="settings.detail",
),
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/settings/swap/(?P<pk>[0-9]+)/delete/",
views.SwapGroupDelete.as_view(),
name="settings.delete",
),
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/swap$",
views.SwapStats.as_view(),
name="stats",
),
]
from pretix.multidomain import event_url
event_patterns = [
event_url(
r"^order/(?P<order>[^/]+)/(?P<secret>[A-Za-z0-9]+)/swap/$",
views.SwapOverview.as_view(),
name="swap.list",
),
event_url(
r"^order/(?P<order>[^/]+)/(?P<secret>[A-Za-z0-9]+)/swap/new$",
views.SwapCreate.as_view(),
name="swap.new",
),
event_url(
r"^order/(?P<order>[^/]+)/(?P<secret>[A-Za-z0-9]+)/swap/(?P<pk>[0-9]+)/cancel$",
views.SwapCancel.as_view(),
name="swap.cancel",
),
]
| rixx/pretix-swap | pretix_swap/urls.py | urls.py | py | 1,468 | python | en | code | 0 | github-code | 13 |
44793479222 | import csv
from textblob import TextBlob
file1 = open('/Users/Lucien/Documents/LevelEdu/sentiment_analysis/R_Scripts/pos_neg_labeled.csv', 'rb')
reader = csv.reader(file1)
new_csv = []
for row in reader:
text = row[2].decode('utf-8')
text = TextBlob(text)
row.append(text.sentiment.polarity)
new_csv.append(row)
file1.close()
file2 = open('/Users/Lucien/Documents/LevelEdu/sentiment_analysis/R_Scripts/pos_neg_labeled.csv', 'wb')
writer = csv.writer(file2)
writer.writerows(new_csv)
file2.close()
| lgendrot/midtown-sentiment-analysis | Python_Scripts/validation.py | validation.py | py | 515 | python | en | code | 0 | github-code | 13 |
13155845944 | # -*- coding: utf-8 -*-
r"""
Module for plotting cluster properties.
For inspiration, see http://www.astroexplorer.org/
"""
import sys
import numpy as np
import matplotlib.pyplot as pl
import matplotlib.colors as mcolors
from matplotlib.patches import Circle
import pandas as pd
import lightkurve as lk
# from transitleastsquares import final_T0_fit
from astropy.coordinates import Angle, SkyCoord, Distance
from astropy.visualization import ZScaleInterval
from astropy.time import Time
from astroquery.mast import Catalogs
from astropy.wcs import WCS
from astropy.io import fits
import astropy.units as u
from scipy.ndimage import zoom
from astroquery.skyview import SkyView
from astroplan.plots import plot_finder_image
from astropy.timeseries import LombScargle
from mpl_toolkits.mplot3d import Axes3D
from skimage import measure
import flammkuchen as fk
# Import from package
from chronos.target import Target
from chronos.cluster import ClusterCatalog, Cluster
from chronos.constants import Kepler_pix_scale, TESS_pix_scale
from chronos.utils import (
get_toi,
get_tois,
PadWithZeros,
get_mamajek_table,
parse_aperture_mask,
is_point_inside_mask,
is_gaiaid_in_cluster,
get_fluxes_within_mask,
get_rotation_period,
)
pl.style.use("default")
__all__ = [
"plot_tls",
"plot_odd_even",
"plot_hrd_spectral_types",
"plot_rotation_period",
"plot_possible_NEBs",
"plot_interactive",
"plot_aperture_outline",
"plot_aperture_outline2",
"plot_gaia_sources_on_survey",
"plot_gaia_sources_on_tpf",
"plot_cluster_kinematics",
"get_dss_data",
"plot_archival_images",
"plot_dss_image",
"plot_likelihood_grid",
"plot_out_of_transit",
"plot_fold_lc",
"df_to_gui",
]
# http://gsss.stsci.edu/SkySurveys/Surveys.htm
dss_description = {
"dss1": "POSS1 Red in the north; POSS2/UKSTU Blue in the south",
"poss2ukstu_red": "POSS2/UKSTU Red",
"poss2ukstu_ir": "POSS2/UKSTU Infrared",
"poss2ukstu_blue": "POSS2/UKSTU Blue",
"poss1_blue": "POSS1 Blue",
"poss1_red": "POSS1 Red",
"all": "best among all plates",
"quickv": "Quick-V Survey",
"phase2_gsc2": "HST Phase 2 Target Positioning (GSC 2)",
"phase2_gsc1": "HST Phase 2 Target Positioning (GSC 1)",
}
class MidPointLogNorm(mcolors.LogNorm):
"""
Log normalization with midpoint offset
from
https://stackoverflow.com/questions/48625475/python-shifted-logarithmic-colorbar-white-color-offset-to-center
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
mcolors.LogNorm.__init__(self, vmin=vmin, vmax=vmax, clip=clip)
self.midpoint = midpoint
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = (
[np.log(self.vmin), np.log(self.midpoint), np.log(self.vmax)],
[0, 0.5, 1],
)
return np.ma.masked_array(np.interp(np.log(value), x, y))
def plot_likelihood_grid(
mass_grid,
m2s,
m3s,
cmap="default",
use_norm_cbar=False,
label="",
reference=0,
aspect_ratio=1,
):
"""
Parameters
----------
mass_grid : 3-d array
mass grid of likelihood values
"""
fig, ax = pl.subplots(1, 1, figsize=(8, 8))
xmin, xmax = m2s[0], m2s[-1]
ymin, ymax = m3s[0], m3s[-1]
if use_norm_cbar:
norm = MidPointLogNorm(
vmin=mass_grid.min(), vmax=mass_grid.max(), midpoint=reference
)
else:
norm = None
# plot matrix
cbar = ax.imshow(
mass_grid,
origin="lower",
interpolation="none",
extent=[xmin, xmax, ymin, ymax],
cmap=cmap,
norm=norm,
)
pl.colorbar(
cbar, ax=ax, label=label, orientation="vertical" # shrink=0.9,
)
# add labels
ax.set_aspect(aspect_ratio)
pl.setp(
ax,
xlim=(xmin, xmax),
ylim=(ymin, ymax),
xlabel="secondary star mass (Msun)",
ylabel="tertiary star mass (Msun)",
)
return fig
def plot_mass_radius_diagram():
"""
https://github.com/oscaribv/fancy-massradius-plot/blob/master/mass_radius_plot.ipynb
"""
errmsg = "To be added later"
raise NotImplementedError(errmsg)
def plot_cluster_map(
target_coord=None,
catalog_name="Bouma2019",
cluster_name=None,
offset=10,
ax=None,
):
""" """
tra = target_coord.ra.deg
tdec = target_coord.dec.deg
if ax is None:
fig, ax = pl.subplot(1, 1, figsize=(5, 5))
if cluster_name is None:
cc = ClusterCatalog(catalog_name)
cat = cc.query_catalog()
coords = SkyCoord(
ra=cat["ra"],
dec=cat["dec"],
distance=cat["distance"],
unit=("deg", "deg", "pc"),
)
ax.scatter(coords.ra.deg, coords.dec.deg, "ro")
else:
c = Cluster(catalog_name=catalog_name, cluster_name=cluster_name)
mem = c.query_cluster_members()
rsig = mem["ra"].std()
dsig = mem["dec"].std()
r = np.sqrt(rsig**2 + dsig**2)
circle = pl.Circle((tra, tdec), r, color="r")
ax.plot(mem["ra"], mem["dec"], "r.", alpha=0.1)
ax.add_artist(circle)
ax.plot(tra, tdec, "bx")
ax.ylim(tdec - offset, tdec + offset)
ax.xlim(tra - offset, tra + offset)
return fig
def plot_orientation_on_tpf(tpf, ax=None):
"""
Plot the orientation arrows on tpf
Returns
-------
tpf read from lightkurve
"""
if ax is None:
fig, ax = pl.subplots(1, 1, figsize=(5, 5))
mean_tpf = np.nanmean(tpf.flux, axis=0)
zmin, zmax = ZScaleInterval(contrast=0.5)
ax.matshow(mean_tpf, vmin=zmin, vmax=zmax, origin="lower")
_ = plot_orientation(tpf, ax=ax)
return ax
def plot_orientation(tpf, ax):
"""overlay orientation arrows on tpf plot"""
nx, ny = tpf.flux.shape[1:]
x0, y0 = tpf.column + int(0.9 * nx), tpf.row + int(0.2 * ny)
# East
tmp = tpf.get_coordinates()
ra00, dec00 = tmp[0][0][0][0], tmp[1][0][0][0]
ra10, dec10 = tmp[0][0][0][-1], tmp[1][0][0][-1]
theta = np.arctan((dec10 - dec00) / (ra10 - ra00))
if (ra10 - ra00) < 0.0:
theta += np.pi
# theta = -22.*np.pi/180.
x1, y1 = 1.0 * np.cos(theta), 1.0 * np.sin(theta)
ax.arrow(x0, y0, x1, y1, head_width=0.2, color="white")
ax.text(x0 + 1.5 * x1, y0 + 1.5 * y1, "E", color="white")
# North
theta = theta + 90.0 * np.pi / 180.0
x1, y1 = 1.0 * np.cos(theta), 1.0 * np.sin(theta)
ax.arrow(x0, y0, x1, y1, head_width=0.2, color="white")
ax.text(x0 + 1.5 * x1, y0 + 1.5 * y1, "N", color="white")
return ax
def plot_gaia_sources_on_tpf(
tpf,
target_gaiaid,
gaia_sources=None,
sap_mask="pipeline",
depth=None,
kmax=1,
dmag_limit=8,
fov_rad=None,
cmap="viridis",
figsize=None,
ax=None,
invert_xaxis=False,
invert_yaxis=False,
pix_scale=TESS_pix_scale,
verbose=True,
**mask_kwargs,
):
"""
plot gaia sources brighter than dmag_limit; only annotated with starids
are those that are bright enough to cause reproduce the transit depth;
starids are in increasing separation
dmag_limit : float
maximum delta mag to consider; computed based on depth if None
TODO: correct for proper motion difference between
survey image and gaia DR2 positions
"""
if verbose:
print("Plotting nearby gaia sources on tpf.")
assert target_gaiaid is not None
img = np.nanmedian(tpf.flux, axis=0)
# make aperture mask
mask = parse_aperture_mask(tpf, sap_mask=sap_mask, **mask_kwargs)
ax = plot_aperture_outline(
img, mask=mask, imgwcs=tpf.wcs, figsize=figsize, cmap=cmap, ax=ax
)
if fov_rad is None:
nx, ny = tpf.shape[1:]
diag = np.sqrt(nx**2 + ny**2)
fov_rad = (0.4 * diag * pix_scale).to(u.arcmin).round(0)
if gaia_sources is None:
print(
"Querying Gaia sometimes hangs. Provide `gaia_sources` if you can."
)
target_coord = SkyCoord(
ra=tpf.header["RA_OBJ"], dec=tpf.header["DEC_OBJ"], unit="deg"
)
gaia_sources = Catalogs.query_region(
target_coord, radius=fov_rad, catalog="Gaia", version=2
).to_pandas()
assert len(gaia_sources) > 1, "gaia_sources contains single entry"
# find sources within mask
# target is assumed to be the first row
idx = gaia_sources["source_id"].astype(int).isin([target_gaiaid])
target_gmag = gaia_sources.loc[idx, "phot_g_mean_mag"].values[0]
# sources_inside_aperture = []
if depth is not None:
# compute delta mag limit given transit depth
dmag_limit = (
np.log10(kmax / depth - 1) if dmag_limit is None else dmag_limit
)
# get min_gmag inside mask
ra, dec = gaia_sources[["ra", "dec"]].values.T
pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)
contour_points = measure.find_contours(mask, level=0.1)[0]
isinside = [
is_point_inside_mask(contour_points, pix) for pix in pix_coords
]
# sources_inside_aperture.append(isinside)
min_gmag = gaia_sources.loc[isinside, "phot_g_mean_mag"].min()
if (target_gmag - min_gmag) != 0:
print(
f"target Gmag={target_gmag:.2f} is not the brightest within aperture (Gmag={min_gmag:.2f})"
)
else:
min_gmag = gaia_sources.phot_g_mean_mag.min() # brightest
dmag_limit = (
gaia_sources.phot_g_mean_mag.max()
if dmag_limit is None
else dmag_limit
)
base_ms = 128.0 # base marker size
starid = 1
# if very crowded, plot only top N
gmags = gaia_sources.phot_g_mean_mag
dmags = gmags - target_gmag
rank = np.argsort(dmags.values)
for index, row in gaia_sources.iterrows():
# FIXME: why some indexes are missing?
ra, dec, gmag, id = row[["ra", "dec", "phot_g_mean_mag", "source_id"]]
dmag = gmag - target_gmag
pix = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)[0]
contour_points = measure.find_contours(mask, level=0.1)[0]
color, alpha = "red", 1.0
# change marker color and transparency depending on the location and dmag
if is_point_inside_mask(contour_points, pix):
if int(id) == int(target_gaiaid):
# plot x on target
ax.plot(
pix[1],
pix[0],
marker="x",
ms=base_ms / 16,
c="k",
zorder=3,
)
if depth is not None:
# compute flux ratio with respect to brightest star
gamma = 1 + 10 ** (0.4 * (min_gmag - gmag))
if depth > kmax / gamma:
# orange if flux is insignificant
color = "C1"
else:
# outside aperture
color, alpha = "C1", 0.5
ax.scatter(
pix[1],
pix[0],
s=base_ms / 2**dmag, # fainter -> smaller
c=color,
alpha=alpha,
zorder=2,
edgecolor=None,
)
# choose which star to annotate
if len(gmags) < 20:
# sparse: annotate all
ax.text(pix[1], pix[0], str(starid), color="white", zorder=100)
elif len(gmags) > 50:
# crowded: annotate only 15 smallest dmag ones
if rank[starid - 1] < 15:
ax.text(pix[1], pix[0], str(starid), color="white", zorder=100)
elif (color == "red") & (dmag < dmag_limit):
# plot if within aperture and significant source of dilution
ax.text(pix[1], pix[0], str(starid), color="white", zorder=100)
elif color == "red":
# neither sparse nor crowded
# annotate if inside aperture
ax.text(pix[1], pix[0], str(starid), color="white", zorder=100)
starid += 1
# Make legend with 4 sizes representative of delta mags
dmags = dmags[dmags < dmag_limit]
_, dmags = pd.cut(dmags, 3, retbins=True)
for dmag in dmags:
size = base_ms / 2**dmag
# -1, -1 is outside the fov
# dmag = 0 if float(dmag)==0 else 0
ax.scatter(
-1,
-1,
s=size,
c="red",
alpha=0.6,
edgecolor=None,
zorder=10,
clip_on=True,
label=r"$\Delta m= $" + f"{dmag:.1f}",
)
ax.legend(fancybox=True, framealpha=0.5)
# set img limits
xdeg = (nx * pix_scale).to(u.arcmin)
ydeg = (ny * pix_scale).to(u.arcmin)
# orient such that north is up; east is left
if invert_yaxis:
# ax.invert_yaxis() # increasing upward
raise NotImplementedError()
if invert_xaxis:
# ax.invert_xaxis() #decresing rightward
raise NotImplementedError()
if hasattr(ax, "coords"):
ax.coords[0].set_major_formatter("dd:mm")
ax.coords[1].set_major_formatter("dd:mm")
pl.setp(
ax, xlim=(0, nx), ylim=(0, ny), xlabel=f"({xdeg:.2f} x {ydeg:.2f})"
)
return ax
def plot_gaia_sources_on_survey(
tpf,
target_gaiaid,
gaia_sources=None,
fov_rad=None,
depth=0.0,
kmax=1.0,
sap_mask="pipeline",
survey="DSS2 Red",
ax=None,
color_aper="C0", # pink
figsize=None,
invert_xaxis=False,
invert_yaxis=False,
pix_scale=TESS_pix_scale,
verbose=True,
**mask_kwargs,
):
"""Plot (superpose) Gaia sources on archival image
Parameters
----------
target_coord : astropy.coordinates
target coordinate
gaia_sources : pd.DataFrame
gaia sources table
fov_rad : astropy.unit
FOV radius
survey : str
image survey; see from astroquery.skyview import SkyView;
SkyView.list_surveys()
verbose : bool
print texts
ax : axis
subplot axis
color_aper : str
aperture outline color (default=C6)
kwargs : dict
keyword arguments for aper_radius, percentile
Returns
-------
ax : axis
subplot axis
TODO: correct for proper motion difference between
survey image and gaia DR2 positions
"""
if verbose:
print("Plotting nearby gaia sources on survey image.")
assert target_gaiaid is not None
ny, nx = tpf.flux.shape[1:]
if fov_rad is None:
diag = np.sqrt(nx**2 + ny**2)
fov_rad = (0.4 * diag * pix_scale).to(u.arcmin).round(0)
target_coord = SkyCoord(ra=tpf.ra * u.deg, dec=tpf.dec * u.deg)
if gaia_sources is None:
print(
"Querying Gaia sometimes hangs. Provide `gaia_sources` if you can."
)
gaia_sources = Catalogs.query_region(
target_coord, radius=fov_rad, catalog="Gaia", version=2
).to_pandas()
assert len(gaia_sources) > 1, "gaia_sources contains single entry"
# make aperture mask
mask = parse_aperture_mask(tpf, sap_mask=sap_mask, **mask_kwargs)
maskhdr = tpf.hdu[2].header
# make aperture mask outline
contour = np.zeros((ny, nx))
contour[np.where(mask)] = 1
contour = np.lib.pad(contour, 1, PadWithZeros)
highres = zoom(contour, 100, order=0, mode="nearest")
extent = np.array([-1, nx, -1, ny])
if verbose:
print(
f"Querying {survey} ({fov_rad:.2f} x {fov_rad:.2f}) archival image"
)
# -----------create figure---------------#
if ax is None:
# get img hdu for subplot projection
try:
hdu = SkyView.get_images(
position=target_coord.icrs.to_string(),
coordinates="icrs",
survey=survey,
radius=fov_rad,
grid=False,
)[0][0]
except Exception:
errmsg = "survey image not available"
raise FileNotFoundError(errmsg)
fig = pl.figure(figsize=figsize)
# define scaling in projection
ax = fig.add_subplot(111, projection=WCS(hdu.header))
# plot survey img
if str(target_coord.distance) == "nan":
target_coord = SkyCoord(ra=target_coord.ra, dec=target_coord.dec)
nax, hdu = plot_finder_image(
target_coord, ax=ax, fov_radius=fov_rad, survey=survey, reticle=False
)
imgwcs = WCS(hdu.header)
mx, my = hdu.data.shape
# plot mask
_ = ax.contour(
highres,
levels=[0.5],
extent=extent,
origin="lower",
linewidths=[3],
colors=color_aper,
transform=ax.get_transform(WCS(maskhdr)),
)
idx = gaia_sources["source_id"].astype(int).isin([target_gaiaid])
target_gmag = gaia_sources.loc[idx, "phot_g_mean_mag"].values[0]
for index, row in gaia_sources.iterrows():
marker, s = "o", 100
r, d, mag, id = row[["ra", "dec", "phot_g_mean_mag", "source_id"]]
pix = imgwcs.all_world2pix(np.c_[r, d], 1)[0]
if int(id) != int(target_gaiaid):
gamma = 1 + 10 ** (0.4 * (mag - target_gmag))
if depth > kmax / gamma:
# too deep to have originated from secondary star
edgecolor = "C1"
alpha = 1 # 0.5
else:
# possible NEBs
edgecolor = "C3"
alpha = 1
else:
s = 200
edgecolor = "C2"
marker = "s"
alpha = 1
nax.scatter(
pix[0],
pix[1],
marker=marker,
s=s,
edgecolor=edgecolor,
alpha=alpha,
facecolor="none",
)
# orient such that north is up; left is east
if invert_yaxis:
# ax.invert_yaxis()
raise NotImplementedError()
if invert_xaxis:
# ax.invert_xaxis()
raise NotImplementedError()
if hasattr(ax, "coords"):
ax.coords[0].set_major_formatter("dd:mm")
ax.coords[1].set_major_formatter("dd:mm")
# set img limits
pl.setp(
nax,
xlim=(0, mx),
ylim=(0, my),
)
nax.set_title(
f"{survey} ({fov_rad.value:.2f}' x {fov_rad.value:.2f}')", y=0.95
)
return ax
def get_dss_data(
ra,
dec,
survey="poss2ukstu_red",
plot=False,
height=1,
width=1,
epoch="J2000",
):
"""
Digitized Sky Survey (DSS)
http://archive.stsci.edu/cgi-bin/dss_form
Parameters
----------
survey : str
(default=poss2ukstu_red) see `dss_description`
height, width : float
image cutout height and width [arcmin]
epoch : str
default=J2000
Returns
-------
hdu
"""
survey_list = list(dss_description.keys())
if survey not in survey_list:
raise ValueError(f"{survey} not in:\n{survey_list}")
base_url = "http://archive.stsci.edu/cgi-bin/dss_search?v="
url = f"{base_url}{survey}&r={ra}&d={dec}&e={epoch}&h={height}&w={width}&f=fits&c=none&s=on&fov=NONE&v3"
try:
hdulist = fits.open(url)
# hdulist.info()
hdu = hdulist[0]
# data = hdu.data
# header = hdu.header
if plot:
_ = plot_dss_image(hdu)
return hdu
except Exception as e:
if isinstance(e, OSError):
print(f"Error: {e}\nsurvey={survey} image is likely unavailable.")
else:
raise Exception(f"Error: {e}")
def plot_dss_image(
hdu, cmap="gray", contrast=0.5, coord_format="dd:mm:ss", ax=None
):
"""
Plot output of get_dss_data:
hdu = get_dss_data(ra, dec)
"""
data, header = hdu.data, hdu.header
interval = ZScaleInterval(contrast=contrast)
zmin, zmax = interval.get_limits(data)
if ax is None:
fig = pl.figure(constrained_layout=True)
ax = fig.add_subplot(projection=WCS(header))
ax.imshow(data, vmin=zmin, vmax=zmax, cmap=cmap)
ax.set_xlabel("RA")
ax.set_ylabel("DEC", y=0.9)
title = f"{header['SURVEY']} ({header['FILTER']})\n"
title += f"{header['DATE-OBS'][:10]}"
ax.set_title(title)
# set RA from hourangle to degree
if hasattr(ax, "coords"):
ax.coords[1].set_major_formatter(coord_format)
ax.coords[0].set_major_formatter(coord_format)
return ax
def plot_archival_images(
ra,
dec,
survey1="dss1",
survey2="ps1", # "poss2ukstu_red",
filter="i",
fp1=None,
fp2=None,
height=1,
width=1,
cmap="gray",
reticle=True,
grid=True,
color="red",
contrast=0.5,
fontsize=14,
coord_format="dd:mm:ss",
return_baseline=False,
):
"""
Plot two archival images
See e.g.
https://s3.amazonaws.com/aasie/images/1538-3881/159/3/100/ajab5f15f2_hr.jpg
Uses reproject to have identical fov:
https://reproject.readthedocs.io/en/stable/
Parameters
----------
ra, dec : float
target coordinates in degrees
survey1, survey2 : str
survey from which the images will come from
fp1, fp2 : path
filepaths if the images were downloaded locally
height, width
fov of view in arcmin (default=1')
filter : str
(g,r,i,z,y) filter if survey = PS1
cmap : str
colormap (default='gray')
reticle : bool
plot circle to mark the original position of target in survey1
color : str
default='red'
contrast : float
ZScale contrast
Notes:
------
Account for space motion:
https://docs.astropy.org/en/stable/coordinates/apply_space_motion.html
The position offset can be computed as:
```
import numpy as np
pm = np.hypot(pmra, pmdec) #mas/yr
offset = pm*baseline_year/1e3
```
"""
pl.rcParams["font.size"] = fontsize
pl.rcParams["xtick.labelsize"] = fontsize
if (survey1 == "ps1") or (survey2 == "ps1"):
try:
import panstarrs3 as p3
fov = np.hypot(width, height) * u.arcmin
ps = p3.Panstarrs(
ra=ra,
dec=dec,
fov=fov.to(u.arcsec),
format="fits",
color=False,
)
img, hdr = ps.get_fits(filter=filter, verbose=False)
except Exception:
raise ModuleNotFoundError(
"pip install git+https://github.com/jpdeleon/panstarrs3.git"
)
# poss1
if fp1 is not None and fp2 is not None:
hdu1 = fits.open(fp1)[0]
hdu2 = fits.open(fp2)[0]
else:
if survey1 == "ps1":
hdu1 = fits.open(ps.get_url()[0])[0]
hdu1.header["DATE-OBS"] = Time(
hdu1.header["MJD-OBS"], format="mjd"
).strftime("%Y-%m-%d")
hdu1.header["FILTER"] = hdu1.header["FPA.FILTER"].split(".")[0]
hdu1.header["SURVEY"] = "Panstarrs1"
else:
hdu1 = get_dss_data(
ra, dec, height=height, width=width, survey=survey1
)
if survey2 == "ps1":
hdu2 = fits.open(ps.get_url()[0])[0]
hdu2.header["DATE-OBS"] = Time(
hdu2.header["MJD-OBS"], format="mjd"
).strftime("%Y-%m-%d")
hdu2.header["FILTER"] = hdu2.header["FPA.FILTER"].split(".")[0]
hdu2.header["SURVEY"] = "Panstarrs1"
else:
hdu2 = get_dss_data(
ra, dec, height=height, width=width, survey=survey2
)
try:
from reproject import reproject_interp
except Exception:
cmd = "pip install reproject"
raise ModuleNotFoundError(cmd)
projected_img, footprint = reproject_interp(hdu2, hdu1.header)
fig = pl.figure(figsize=(10, 5), constrained_layout=False)
interval = ZScaleInterval(contrast=contrast)
# data1 = hdu1.data
header1 = hdu1.header
ax1 = fig.add_subplot("121", projection=WCS(header1))
_ = plot_dss_image(
hdu1, cmap=cmap, contrast=contrast, coord_format=coord_format, ax=ax1
)
if reticle:
c = Circle(
(ra, dec),
0.001,
edgecolor=color,
facecolor="none",
lw=2,
transform=ax1.get_transform("fk5"),
)
ax1.add_patch(c)
filt1 = (
hdu1.header["FILTER"]
if hdu1.header["FILTER"] is not None
else survey1.split("_")[1]
)
# zmin, zmax = interval.get_limits(data1)
# ax1.imshow(projected_img, origin="lower", vmin=zmin, vmax=zmax, cmap="gray")
title = f"{header1['SURVEY']} ({filt1})\n"
title += f"{header1['DATE-OBS'][:10]}"
ax1.set_title(title)
# set RA from hourangle to degree
if hasattr(ax1, "coords"):
ax1.coords[0].set_major_formatter(coord_format)
ax1.coords[1].set_major_formatter(coord_format)
# recent
data2, header2 = hdu2.data, hdu2.header
ax2 = fig.add_subplot("122", projection=WCS(header1))
# _ = plot_dss_image(hdu2, ax=ax2)
zmin, zmax = interval.get_limits(data2)
ax2.imshow(projected_img, origin="lower", vmin=zmin, vmax=zmax, cmap=cmap)
if reticle:
c = Circle(
(ra, dec),
0.001,
edgecolor=color,
facecolor="none",
lw=2,
transform=ax2.get_transform("fk5"),
)
ax2.add_patch(c)
# ax2.scatter(ra, dec, 'r+')
filt2 = (
hdu2.header["FILTER"]
if hdu2.header["FILTER"] is not None
else survey2.split("_")[1]
)
ax2.coords["dec"].set_axislabel_position("r")
ax2.coords["dec"].set_ticklabel_position("r")
ax2.coords["dec"].set_axislabel("DEC")
ax2.set_xlabel("RA")
title = f"{header2['SURVEY']} ({filt2})\n"
title += f"{header2['DATE-OBS'][:10]}"
ax2.set_title(title)
# set RA from hourangle to degree
if hasattr(ax2, "coords"):
ax2.coords[0].set_major_formatter(coord_format)
ax2.coords[1].set_major_formatter(coord_format)
if grid:
[ax.grid(True) for ax in fig.axes]
fig.tight_layout(rect=[0, 0.03, 0.5, 0.9])
fig.suptitle(".", y=0.995)
fig.tight_layout()
if return_baseline:
baseline = int(header2["DATE-OBS"][:4]) - int(header1["DATE-OBS"][:4])
return fig, baseline
else:
return fig
def plot_aperture_outline(
img,
mask,
ax=None,
imgwcs=None,
cmap="viridis",
color_aper="C6",
figsize=None,
):
"""
see https://github.com/rodluger/everest/blob/56f61a36625c0d9a39cc52e96e38d257ee69dcd5/everest/standalone.py
"""
interval = ZScaleInterval(contrast=0.5)
ny, nx = mask.shape
contour = np.zeros((ny, nx))
contour[np.where(mask)] = 1
contour = np.lib.pad(contour, 1, PadWithZeros)
highres = zoom(contour, 100, order=0, mode="nearest")
extent = np.array([-1, nx, -1, ny])
if ax is None:
fig, ax = pl.subplots(
subplot_kw={"projection": imgwcs}, figsize=figsize
)
ax.set_xlabel("RA")
ax.set_ylabel("Dec")
_ = ax.contour(
highres,
levels=[0.5],
linewidths=[3],
extent=extent,
origin="lower",
colors=color_aper,
)
zmin, zmax = interval.get_limits(img)
ax.matshow(
img, origin="lower", cmap=cmap, vmin=zmin, vmax=zmax, extent=extent
)
# verts = cs.allsegs[0][0]
return ax
def plot_aperture_outline2(
img,
mask,
ax=None,
imgwcs=None,
cmap="viridis",
color_aper="C6",
figsize=None,
):
"""
see https://github.com/afeinstein20/eleanor/blob/master/eleanor/visualize.py#L78
"""
interval = ZScaleInterval(contrast=0.5)
f = lambda x, y: mask[int(y), int(x)]
g = np.vectorize(f)
if ax is None:
fig, ax = pl.subplots(
subplot_kw={"projection": imgwcs}, figsize=figsize
)
ax.set_xlabel("RA")
ax.set_ylabel("Dec")
x = np.linspace(0, mask.shape[1], mask.shape[1] * 100)
y = np.linspace(0, mask.shape[0], mask.shape[0] * 100)
extent = [0 - 0.5, x[:-1].max() - 0.5, 0 - 0.5, y[:-1].max() - 0.5]
X, Y = np.meshgrid(x[:-1], y[:-1])
Z = g(X[:-1], Y[:-1])
# plot contour
_ = ax.contour(
Z[::-1],
levels=[0.5],
colors=color_aper,
linewidths=[3],
extent=extent,
origin="lower",
)
zmin, zmax = interval.get_limits(img)
# plot image
ax.matshow(
img, origin="lower", cmap=cmap, vmin=zmin, vmax=zmax, extent=extent
)
return ax
def plot_possible_NEBs(gaia_sources, depth, gaiaid=None, kmax=1.0, ax=None):
""" """
assert len(gaia_sources) > 1, "gaia_sources contains single entry"
if ax is None:
fig, ax = pl.subplots(1, 1, figsize=(5, 5))
if gaiaid is None:
# nearest match (first entry row=0) is assumed as the target
gaiaid = gaia_sources.iloc[0]["source_id"]
idx = gaia_sources.source_id.isin([gaiaid])
target_gmag = gaia_sources.loc[idx, "phot_g_mean_mag"].values[0]
good, bad, dmags = [], [], []
for index, row in gaia_sources.iterrows():
id, mag = row[["source_id", "phot_g_mean_mag"]]
if int(id) != gaiaid:
dmag = mag - target_gmag
gamma = 1 + 10 ** (0.4 * dmag)
ax.plot(dmag, kmax / gamma, "b.")
dmags.append(dmag)
if depth > kmax / gamma:
# observed depth is too deep to have originated from the secondary star
good.append(id)
else:
# uncertain signal source
bad.append(id)
ax.axhline(depth, 0, 1, c="k", ls="--")
dmags = np.linspace(min(dmags), max(dmags), 100)
gammas = 1 + 10 ** (0.4 * dmags)
nbad = len(bad)
ax.plot(dmags, kmax / gammas, "r-", label=f"potential NEBs={nbad}")
ax.set_yscale("log")
ax.set_xlabel(r"$\Delta$Gmag")
ax.set_ylabel("Eclipse depth")
ax.legend()
return ax
def plot_rotation_period(
time,
flux,
err=None,
mask=None,
method="lombscargle",
min_per=0.5,
max_per=30,
npoints=20,
xlims=None,
ylims=None,
figsize=(10, 5),
title=None,
):
"""
method : str
lombscargle or acf (autocorrelation function)
"""
fig, ax = pl.subplots(1, 2, figsize=figsize, constrained_layout=True)
if mask is not None:
time, flux = time[~mask], flux[~mask]
err = None if err is None else err[~mask]
if method == "lombscargle":
ls = LombScargle(time, flux, dy=err)
frequencies, powers = ls.autopower(
minimum_frequency=1.0 / max_per, maximum_frequency=1.0 / min_per
)
best_freq = frequencies[np.argmax(powers)]
peak_period = 1.0 / best_freq
periods = 1.0 / frequencies
elif method == "acf":
raise NotImplementedError("Method not yet available")
else:
raise ValueError("Use method='lombscargle'")
# fit a gaussian to lombscargle power
prot, prot_err = get_rotation_period(
time,
flux,
min_per=min_per,
max_per=max_per,
npoints=npoints,
plot=False,
)
# left: periodogram
n = 0
ax[n].plot(periods, powers, "k-")
ax[n].axvline(
peak_period, 0, 1, ls="--", c="r", label=f"peak={peak_period:.2f}"
)
ax[n].axvline(
prot, 0, 1, ls="-", c="r", label=f"fit={prot:.2f}+/-{prot_err:.2f}"
)
ax[n].legend(title="Best period [d]")
ax[n].set_xscale("log")
ax[n].set_xlabel("Period [days]")
ax[n].set_ylabel("Lomb-Scargle Power")
# right: phase-folded lc and sinusoidal model
n = 1
offset = 0.5
t_fit = np.linspace(0, 1, 100) - offset
y_fit = ls.model(t_fit * peak_period - peak_period / 2, best_freq)
ax[n].plot(
t_fit * peak_period, y_fit, "r-", lw=3, label="sine model", zorder=3
)
# fold data
phase = ((time / peak_period) % 1) - offset
a = ax[n].scatter(
phase * peak_period, flux, c=time, cmap=pl.get_cmap("Blues")
)
pl.colorbar(a, ax=ax[n], label="Time [BTJD]")
ax[n].legend()
if xlims is None:
ax[n].set_xlim(-peak_period / 2, peak_period / 2)
else:
ax[n].set_xlim(*xlims)
if ylims is not None:
ax[n].set_ylim(*ylims)
ax[n].set_ylabel("Normalized Flux")
ax[n].set_xlabel("Phase [days]")
fig.suptitle(title)
return fig
def plot_tls(results, period=None, plabel=None, figsize=None):
"""
Attributes
----------
results : dict
results of after running tls.power()
* kwargs : dict
plotting kwargs e.g. {'figsize': (8,8), 'constrained_layout': True}
Returns
-------
fig : figure object
"""
fig, ax = pl.subplots(2, 1, figsize=figsize)
n = 0
label = f"TLS={results.period:.3}"
ax[n].axvline(results.period, alpha=0.4, lw=3, label=label)
ax[n].set_xlim(np.min(results.periods), np.max(results.periods))
for i in range(2, 10):
ax[n].axvline(i * results.period, alpha=0.4, lw=1, linestyle="dashed")
ax[n].axvline(results.period / i, alpha=0.4, lw=1, linestyle="dashed")
ax[n].set_ylabel(r"SDE")
ax[n].set_xlabel("Period (days)")
ax[n].plot(results.periods, results.power, color="black", lw=0.5)
ax[n].set_xlim(0, max(results.periods))
if period is not None:
ax[n].axvline(period, 0, 1, ls="--", c="r", label=plabel)
ax[n].legend(title="best period (d)")
n = 1
ax[n].plot(
results.model_folded_phase - 0.5, results.model_folded_model, color="b"
)
ax[n].scatter(
results.folded_phase - 0.5,
results.folded_y,
color="k",
s=10,
alpha=0.5,
zorder=2,
)
xlim = 3 * results.duration / results.period
ax[n].set_xlim(-xlim, xlim)
ax[n].set_xlabel("Phase")
ax[n].set_ylabel("Relative flux")
fig.tight_layout()
return fig
def plot_odd_even(
flat, period, epoch, duration=None, yline=None, figsize=(8, 4)
):
""" """
fig, axs = pl.subplots(
1, 2, figsize=figsize, sharey=True, constrained_layout=True
)
fold = flat.fold(period=period, t0=epoch)
ax = axs[0]
fold[fold.even_mask].scatter(label="even", ax=ax)
if yline is not None:
ax.axhline(yline, 0, 1, lw=2, ls="--", c="k")
ax = axs[1]
fold[fold.odd_mask].scatter(label="odd", ax=ax)
if yline is not None:
ax.axhline(yline, 0, 1, lw=2, ls="--", c="k")
if duration is not None:
xlim = 3 * duration / period
axs[0].set_xlim(-xlim, xlim)
axs[1].set_xlim(-xlim, xlim)
ax.set_ylabel("")
fig.subplots_adjust(wspace=0)
return fig
def plot_hrd_spectral_types(
x=None,
y=None,
c=None,
cmap="viridis",
invert_xaxis=True,
invert_yaxis=False,
**plot_kwargs,
):
""" """
df = get_mamajek_table()
fig, ax = pl.subplots(1, 1, **plot_kwargs)
if (x is not None) & (y is not None):
_ = df.plot.scatter(x=x, y=y, c=c, ax=ax, cmap=cmap)
# _ = df.plot.scatter(x='V-Ks', y='M_Ks', c='R_Rsun', cmap='viridis')
# ax.axhline(6.7, 0, 1, ls='--', c='k')
# ax.annotate(s='fully convective', xy=(7, 8), fontsize=12)
if invert_xaxis:
ax.invert_xaxis()
if invert_yaxis:
ax.invert_yaxis()
ax.set_ylabel(y)
ax.set_xlabel(x)
else:
classes = []
for idx, g in df.assign(SpT2=df["#SpT"].apply(lambda x: x[0])).groupby(
by="SpT2"
):
classes.append(idx)
x = g["logT"].astype(float)
y = g["logL"].astype(float)
ax.plot(x, y, label=idx)
ax.set_ylabel(r"$\log_{10}$ (L/L$_{\odot}$)")
ax.set_xlabel(r"$\log_{10}$ (T$_{\rm{eff}}$/K)")
ax.legend()
ax.invert_xaxis()
return fig
def plot_cluster_kinematics(
ticid=None,
toiid=None,
cluster_name=None,
frac_err=0.5,
rv=None,
savefig=False,
):
""" """
assert (ticid is not None) | (toiid is not None)
t = Target(toiid=toiid, ticid=ticid)
if cluster_name is None:
cluster, idxs = t.get_cluster_membership(
catalog_name="CantatGaudin2020",
return_idxs=True,
frac_err=frac_err,
sigma=5,
)
cluster_name = cluster.Cluster
c = Cluster(cluster_name=cluster_name)
df_target = t.query_gaia_dr2_catalog(return_nearest_xmatch=True)
if rv is not None:
df_target.radial_velocity = rv
else:
if np.isnan(df_target.radial_velocity):
rv = np.nanmean(list(t.query_vizier_param("RV").values()))
if not np.isnan(rv):
df_target.radial_velocity = rv
try:
fig1 = c.plot_xyz_uvw(
target_gaiaid=t.gaiaid, df_target=df_target, match_id=False
)
fig1.suptitle(f"{t.target_name} in {c.cluster_name}")
if savefig:
fp1 = f"{t.target_name}_galactocentric.png"
fig1.savefig(fp1, bbox_inches="tight")
except Exception as e:
print("Error: ", e)
# ==============
try:
log10age = c.get_cluster_age()
fig2 = c.plot_rdp_pmrv(
target_gaiaid=t.gaiaid, df_target=df_target, match_id=False
)
fig2.suptitle(f"{t.target_name} in {c.cluster_name}")
if savefig:
fp2 = f"{t.target_name}_kinematics.png"
fig2.savefig(fp2, bbox_inches="tight")
except Exception as e:
print("Error: ", e)
# ==============
try:
# TODO: AG50 doesn't yield G consistent with cmd
# if str(df_target.a_g_val) == "nan":
# vq = t.query_vizier_param("AG50")
# if "I/349/starhorse" in vq:
# df_target.a_g_val = vq["I/349/starhorse"]
# print("Using AG from starhorse.")
log10age = c.get_cluster_age()
ax = c.plot_cmd(
target_gaiaid=t.gaiaid,
df_target=df_target,
match_id=False,
log_age=log10age,
)
ax.set_title(f"{t.target_name} in {c.cluster_name}")
if savefig:
fp3 = f"{t.target_name}_cmd.png"
ax.figure.savefig(fp3, bbox_inches="tight")
except Exception as e:
print("Error: ", e)
try:
ax = c.plot_hrd(
target_gaiaid=t.gaiaid,
df_target=df_target,
match_id=False,
log_age=log10age,
)
ax.set_title(f"{t.target_name} in {c.cluster_name}")
if savefig:
fp4 = f"{t.target_name}_hrd.png"
ax.figure.savefig(fp4, bbox_inches="tight")
except Exception as e:
print("Error: ", e)
def plot_depth_dmag(gaia_catalog, gaiaid, depth, kmax=1.0, ax=None):
"""
gaia_catalog : pandas.DataFrame
gaia catalog
gaiaid : int
target gaia DR2 id
depth : float
observed transit depth
kmax : float
maximum depth
"""
good, bad, dmags = [], [], []
idx = gaia_catalog.source_id.isin([gaiaid])
target_gmag = gaia_catalog.iloc[idx]["phot_g_mean_mag"]
for _, row in gaia_catalog.iterrows():
id, mag = row[["source_id", "phot_g_mean_mag"]]
if int(id) != gaiaid:
dmag = mag - target_gmag
gamma = 1 + 10 ** (0.4 * dmag)
pl.plot(dmag, kmax / gamma, "b.")
dmags.append(dmag)
if depth > kmax / gamma:
# observed depth is too deep to have originated from the secondary star
good.append(id)
else:
# uncertain signal source
bad.append(id)
if ax is None:
fig, ax = pl.subplots(1, 1)
ax.axhline(depth, 0, 1, c="k", ls="--", label="TESS depth")
dmags = np.linspace(min(dmags), max(dmags), 100)
gammas = 1 + 10 ** (0.4 * dmags)
ax.plot(dmags, kmax / gammas, "r-")
ax.set_yscale("log")
return ax
def plot_out_of_transit(flat, per, t0, depth):
""" """
fig, axs = pl.subplots(3, 1, figsize=(10, 10), gridspec_kw={"hspace": 0.1})
dy = 5 if depth < 0.01 else 1.5
ylim = (1 - dy * depth, 1 + 1.1 * depth)
_ = plot_fold_lc(
flat, period=per, epoch=t0 + per / 2, duration=None, ax=axs[0]
)
axs[0].axhline(1 - depth, 0, 1, c="C1", ls="--")
pl.setp(axs[0], xlim=(-0.5, 0.5), ylim=ylim)
_ = plot_fold_lc(
flat, period=per, epoch=t0 + per / 2, duration=None, ax=axs[1]
)
axs[1].axhline(1 - depth, 0, 1, c="C1", ls="--")
axs[1].legend("")
pl.setp(axs[1], xlim=(-0.3, 0.3), title="", ylim=ylim)
_ = plot_fold_lc(
flat, period=per, epoch=t0 + per / 2, duration=None, ax=axs[2]
)
axs[2].axhline(1 - depth, 0, 1, c="C1", ls="--")
axs[2].legend("")
pl.setp(axs[2], xlim=(-0.1, 0.1), title="", ylim=ylim)
return fig
def plot_fold_lc(
flat, period=None, epoch=None, duration=None, binsize=10, ax=None
):
"""
plot folded lightcurve (uses TOI ephemeris by default)
"""
if ax is None:
fig, ax = pl.subplots(figsize=(12, 8))
errmsg = "Provide period and epoch."
assert (period is not None) & (epoch is not None), errmsg
fold = flat.fold(period=period, t0=epoch)
fold.scatter(ax=ax, c="k", alpha=0.5, label="raw")
fold.bin(binsize).scatter(ax=ax, s=20, c="C1", label=f"bin {binsize}")
if duration is not None:
xlim = 3 * duration / 24 / period
ax.set_xlim(-xlim, xlim)
if hasattr(flat, "target_name"):
target_name = flat.target_name
else:
target_name = f"TIC {flat.targetid}"
ax.set_title(f"{target_name} (sector {flat.sector})")
return ax
def plot_interactive(
catalog_name="CantatGaudin2020",
min_parallax=1.5,
thin=10,
width=800,
height=400,
):
"""show altair plots of TOI and clusters
Parameters
----------
plx_cut : float
parallax cut in mas; default=2 mas < 100pc
thin : integer
thinning factor to use ony every nth cluster member
"""
try:
import altair as alt
except ModuleNotFoundError:
print("pip install altair")
if sys.argv[-1].endswith("json"):
print("import altair; altair.renderers.enable('notebook')")
cc = ClusterCatalog(verbose=False)
df0 = cc.query_catalog(catalog_name=catalog_name, return_members=False)
df2 = cc.query_catalog(catalog_name=catalog_name, return_members=True)
# add members count from df2 in df0
# counts = df2.groupby('Cluster').size()
# counts.name = 'Nstars'
# counts = counts.reset_index()
# df0 = pd.merge(df0, counts, how='outer')
idx = df0.parallax >= min_parallax
df0 = df0.loc[idx]
df0["distance"] = Distance(parallax=df0["parallax"].values * u.mas).pc
# plot catalog
chart0 = (
alt.Chart(df0)
.mark_point(color="red")
.encode(
x=alt.X(
"ra:Q",
axis=alt.Axis(title="RA"),
scale=alt.Scale(domain=[0, 360]),
),
y=alt.Y(
"dec:Q",
axis=alt.Axis(title="Dec"),
scale=alt.Scale(domain=[-90, 90]),
),
tooltip=[
"Cluster:N",
"distance:Q",
"parallax:Q",
"pmra:Q",
"pmdec:Q",
"Nstars:Q",
],
)
.properties(width=width, height=height)
.interactive()
)
# get TOI list
toi = get_tois(verbose=False, clobber=False)
toi["TIC_ID"] = toi["TIC ID"]
toi["RA"] = Angle(toi["RA"].values, unit="hourangle").deg
toi["Dec"] = Angle(toi["Dec"].values, unit="deg").deg
# plot TOI
chart1 = (
alt.Chart(toi, title="TOI")
.transform_calculate(
# FIXME: url below doesn't work in pop-up chart
url="https://exofop.ipac.caltech.edu/tess/target.php?id="
+ alt.datum.TIC_ID
)
.mark_point(color="black")
.encode(
x=alt.X(
"RA:Q",
axis=alt.Axis(title="RA"),
scale=alt.Scale(domain=[0, 360]),
),
y=alt.Y(
"Dec:Q",
axis=alt.Axis(title="Dec"),
scale=alt.Scale(domain=[-90, 90]),
),
tooltip=[
"TOI:Q",
"TIC ID:Q",
"url:N",
"Stellar Distance (pc):Q",
"PM RA (mas/yr):Q",
"PM Dec (mas/yr):Q",
],
)
.properties(width=width, height=height)
.interactive()
)
# plot cluster members
idx = df2.parallax >= min_parallax
df2 = df2.loc[idx]
# skip other members
df2 = df2.iloc[::thin, :]
chart2 = (
alt.Chart(df2)
.mark_circle()
.encode(
x="ra:Q",
y="dec:Q",
color="Cluster:N",
tooltip=[
"source_id:O",
"parallax:Q",
"pmra:Q",
"pmdec:Q",
"phot_g_mean_mag:Q",
],
)
.properties(width=width, height=height)
.interactive()
)
return chart2 + chart1 + chart0
def df_to_gui(df, xaxis=None, yaxis=None):
"""
turn df columns into interactive 2D plots
"""
try:
import panel as pn
import hvplot.pandas
except Exception:
cmd = "pip install hvplot panel"
raise ModuleNotFoundError(cmd)
x = pn.widgets.Select(name="x", value=xaxis, options=df.columns.tolist())
y = pn.widgets.Select(name="y", value=yaxis, options=df.columns.tolist())
kind = pn.widgets.Select(
name="kind", value="scatter", options=["bivariate", "scatter"]
)
plot = df.hvplot(x=x, y=y, kind=kind, colorbar=False, width=600)
return pn.Row(pn.WidgetBox(x, y, kind), plot)
| jpdeleon/chronos | chronos/plot.py | plot.py | py | 45,997 | python | en | code | 5 | github-code | 13 |
7772687970 | import os
import kaa
import kaa.metadata
import core
backends = {}
def init(base):
"""
Initialize the kaa.webmetadata databases
"""
if backends:
return
import thetvdb as backend
backends['thetvdb'] = backend.TVDB(os.path.expanduser(base + '/thetvdb'))
def parse(filename, metadata=None):
"""
Parse the given filename and return information from the db. If
metadata is None it will be created using kaa.metadata. Each
dictionary-like object is allowed.
"""
if not metadata:
metadata = kaa.metadata.parse(filename)
if not metadata.get('series', None):
return None
for db in backends.values():
result = db.get_entry_from_metadata(metadata)
if result and isinstance(result, core.Episode):
return result
def search(filename, metadata=None, backend='thetvdb'):
"""
Search the given filename in the web. If metadata is None it will
be created using kaa.metadata. Each dictionary-like object is
allowed.
"""
if not backend in backends:
return None
if not metadata:
metadata = kaa.metadata.parse(filename)
if metadata.get('series', None):
return backends[backend].search(metadata.get('series'), filename, metadata)
return None
@kaa.coroutine()
def add_series_by_search_result(result, alias=None):
"""
Adds a new series given a SearchResult to the database.
"""
module = backends.get(result.id.split(':')[0], None)
if not module:
raise ValueError('Search result is not valid')
yield (yield module.add_series_by_search_result(result, alias))
def series(name):
for db in backends.values():
series = db.get_entry_from_metadata(dict(series=name))
if series:
return series
| freevo/kaa-webmetadata | src/tv/__init__.py | __init__.py | py | 1,797 | python | en | code | 2 | github-code | 13 |
28679669165 | #펠린드롬?
import sys
input = sys.stdin.readline
n = int(input())
numbers = list(map(int,input().split()))
isPel = [[0] * n for _ in range(n)]
#N * N 격자크기
for i in range(n):
isPel[i][i] = True #자기 자신은 무조건 펠린드롬
if i < n-1: isPel[i][i+1] = (numbers[i] == numbers[i+1])
for diff in range(2,n):
for i in range(n - diff):
isPel[i][i + diff] = isPel[i+1][i + diff-1] & (numbers[i] == numbers[i + diff])
#쿼리 수행
for _ in range(int(input())):
s,e = map(int,input().split())
print(int(isPel[s-1][e-1]))
| hodomaroo/BOJ-Solve | 백준/Gold/10942. 팰린드롬?/팰린드롬?.py | 팰린드롬?.py | py | 569 | python | en | code | 2 | github-code | 13 |
43370956216 | from sklearn.model_selection import train_test_split
import glob
import pickle
'''
This function creates train and validation sets to build model.
Also test set to test the model.
'''
def create_train_validation_test_sets(input_dir):
txtfiles=[]
''' Reading all files from the directory'''
class_label=[]
for filename in glob.glob(input_dir+"*.txt"):
txtfiles.append(filename)
'''labeling the files with file name : ham is considered as non-spam
spam is considered as spam.
'''
if(filename.__contains__('ham')):
class_label.append(1)
else:
class_label.append(0)
'''spliting the data into train and test sets'''
X_train, X_test, y_train, y_test = train_test_split(txtfiles, class_label, test_size = 0.1, random_state = 42)
''' saving the training and validation sets'''
with open("train_validation_sets.txt", 'wb') as f:
pickle.dump(X_train, f)
''' Saving the test set'''
with open("test_set.txt", 'wb') as f:
pickle.dump(X_test, f)
if __name__ == "__main__":
create_train_validation_test_sets("raw_input/") | Nayyaroddeen/spam_classification | preprocess.py | preprocess.py | py | 1,142 | python | en | code | 0 | github-code | 13 |
13529831262 | import pytz
import datetime
import cv2
import json
from openpyxl import Workbook
from django.http import JsonResponse, StreamingHttpResponse
from django.shortcuts import render, HttpResponse, redirect
from django.views.generic.detail import DetailView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core import serializers
from django.views.decorators import gzip
from .models import Point, CarRegister, WhiteList
from .forms import WhiteListForm
@login_required(login_url='/account/login')
def index(request):
return render(request, 'carRegister/index.html')
@login_required(login_url='/account/login')
def save_point(request):
if request.method == "POST":
x = request.POST.get("x")
y = request.POST.get("y")
camera = request.POST.get("camera")
x_relative = request.POST.get('x_relative')
point = Point.objects.create(x=x, y=y, camera=camera, x_relative=x_relative,
date=datetime.datetime.now(pytz.timezone("Asia/Yekaterinburg")))
point.save()
return HttpResponse('OK')
@login_required(login_url='/account/login')
def last_point(request):
camera = request.GET.get('camera', None)
if camera is not None:
point = Point.objects.filter(camera=int(camera)).order_by('-id').first()
if point is not None:
response_data = {'x': point.x, 'y': point.y}
return JsonResponse(response_data)
return JsonResponse({'error': 'Invalid request'})
@login_required(login_url='/account/login')
def actions_history(request):
objects = {}
if request.method == 'POST':
number = request.POST.get('number')
order = request.POST.get('order')
date_from = request.POST.get('date_from')
date_to = request.POST.get('date_to')
type_of_action = request.POST.get('type_of_action')
if number:
number = WhiteList.objects.filter(car_number=number.upper().strip())
cars = CarRegister.objects.filter(employee__in=number).filter(type_of_action=type_of_action)
else:
cars = CarRegister.objects.filter(type_of_action=type_of_action)
if order == "desc":
objects["objects"] = cars.order_by('-date')
else:
objects["objects"] = cars.order_by('date')
if date_from:
objects["objects"] = cars.filter(date__gte=date_from)
if date_to:
objects["objects"] = cars.filter(date__lte=date_to)
else:
objects["objects"] = CarRegister.objects.all()
return render(request, 'carRegister/actions-history.html', objects)
class MyModelDetailView(LoginRequiredMixin, DetailView):
login_url = '/account/login'
model = CarRegister
template_name = 'carRegister/actions-detail.html'
context_object_name = 'item'
@login_required(login_url='/account/login')
@gzip.gzip_page
def video_feed(request):
cap = cv2.VideoCapture(0)
def video_stream():
while True:
ret, frame = cap.read()
if not ret:
break
_, buffer = cv2.imencode('.jpg', cv2.flip(cv2.resize(frame, (800, 600)), 1))
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() + b'\r\n')
return StreamingHttpResponse(video_stream(), content_type='multipart/x-mixed-replace; boundary=frame')
@login_required(login_url='/account/login')
@gzip.gzip_page
def video_feed2(request):
cap = cv2.VideoCapture(0)
def video_stream():
while True:
ret, frame = cap.read()
if not ret:
break
_, buffer = cv2.imencode('.jpg', cv2.flip(cv2.resize(frame, (800, 600)), 1))
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() + b'\r\n')
return StreamingHttpResponse(video_stream(), content_type='multipart/x-mixed-replace; boundary=frame')
@login_required(login_url='/account/login')
def export_records(request):
number = request.GET.get('number')
order = request.GET.get('order')
date_from = request.GET.get('date_from')
date_to = request.GET.get('date_to')
type_of_action = request.GET.get('type_of_action')
if number:
number = WhiteList.objects.filter(car_number=number.upper().strip())
cars = CarRegister.objects.filter(employee__in=number).filter(type_of_action=type_of_action)
else:
cars = CarRegister.objects.filter(type_of_action=type_of_action)
if order == "desc":
objects = cars.order_by('-date')
else:
objects = cars.order_by('date')
if date_from:
objects = objects.filter(date__gte=date_from)
if date_to:
objects = objects.filter(date__lte=date_to)
queryset = objects
wb = Workbook()
ws = wb.active
ws.append(['№', 'Номер автомобиля', 'Марка', 'Модель', "Год выпуска", "Дата", "Тип действия"])
for i, obj in enumerate(queryset):
row = [i + 1, obj.employee.car_number, obj.employee.car_mark, obj.employee.car_model, obj.employee.car_year,
obj.date.strftime('%Y-%m-%d %H:%M:%S'), obj.type_of_action]
ws.append(row)
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="exported_data.xlsx"'
wb.save(response)
return response
@login_required(login_url='/account/login')
def add_white(request):
error = ''
if request.method == "POST":
form = WhiteListForm(request.POST)
if form.is_valid():
form.save()
return redirect('/white_list')
else:
error = 'Форма была неверной'
form = WhiteListForm()
data = {'form': form,
'error': error}
return render(request, 'carRegister/add_white.html', data)
def add_white_1c(request):
number = request.GET['number']
name = request.GET['name']
if WhiteList.objects.filter(car_number=number).exists():
return HttpResponse("Сотрудник с таким номером присутсвует в базе")
elif number and name:
white = WhiteList(car_number=number, name=name)
white.save()
return HttpResponse("Данные записаны успешно")
return HttpResponse("Неверные входные данные")
def get_white_list_1c(request):
white_list = serializers.serialize('json', WhiteList.objects.all())
return HttpResponse(white_list, content_type='application/json')
def white_list(request):
white_list_objs = WhiteList.objects.all()
return render(request, 'carRegister/white_list.html', {'objects': white_list_objs})
def delete_1c(request):
number = request.GET['number']
obj = WhiteList.objects.filter(car_number=number)
obj.delete()
return HttpResponse("OK")
def get_actions_history_1c(request):
actions = CarRegister.objects.all()
if 'name' in request.GET:
number = WhiteList.objects.filter(name=request.GET['name'])
actions = actions.filter(employee__in=number)
if "date_from" in request.GET:
actions = actions.filter(date__gte=request.GET['date_from'])
if "date_to" in request.GET:
actions = actions.filter(date__lte=request.GET['date_to'])
result = []
for act in actions:
tmp = {'name': act.employee.name, 'number': act.employee.car_number, 'date': str(act.date),
'type_of_action': act.type_of_action}
result.append(tmp)
res = json.dumps(result)
return HttpResponse(res, content_type='application/json')
@login_required(login_url='/account/login')
def white_list_delete(request, pk):
obj = WhiteList.objects.filter(id=pk)
obj.delete()
return redirect('/white_list')
| HENNESSYxie/NPR_web | NPR_web/carRegister/views.py | views.py | py | 7,922 | python | en | code | 0 | github-code | 13 |
21937531072 | from typing import Dict
from telegram import Update, MessageEntity
from telegram.ext import CallbackContext, Handler
from Constants import logger
from conversations.commands import MainCommands
from conversations.handlers import ADD_TASK_CONVERSATION_HANDLER, CHECK_TASK_CONVERSATION_HANDLER, \
LIST_TASKS_CONVERSATION_HANDLER, DELETE_TASK_CONVERSATION_HANDLER
from conversations.handlers.common import ROOT_CANCEL_HANDLER, HELP_HANDLER, INVALID_COMMAND_HANDLER
from conversations.handlers.start import START_CONVERSATION_HANDLER
from entities.ChatData import ChatData
switcher_v2: Dict[str, Handler] = {
MainCommands.START.value: START_CONVERSATION_HANDLER,
MainCommands.ADD_TASK.value: ADD_TASK_CONVERSATION_HANDLER,
MainCommands.CHECK_TASK.value: CHECK_TASK_CONVERSATION_HANDLER,
MainCommands.LIST_TASKS.value: LIST_TASKS_CONVERSATION_HANDLER,
MainCommands.DELETE_TASK.value: DELETE_TASK_CONVERSATION_HANDLER,
MainCommands.INVALID_COMMAND.value : INVALID_COMMAND_HANDLER,
MainCommands.HELP.value: HELP_HANDLER,
MainCommands.CANCEL.value: ROOT_CANCEL_HANDLER,
}
def root_router_v2(update: Update, context: CallbackContext):
chat_id = update.effective_chat.id
logger.info("[root_router] Entered conv again {}".format(chat_id))
command = update.message.text.split('@')[0].lstrip('/')
logger.info('[root_router] command: {}'.format(command))
handler = find_handler(command, chat_id, context.chat_data)
check = handler.check_update(update)
logger.info('[root_router] check: {}'.format(check))
if check is None or check is False:
handler = INVALID_COMMAND_HANDLER
check = handler.check_update(update)
logger.error('[root_router] check is false or none')
handler.handle_update(update, context.dispatcher, check, context)
def find_handler(command: str, chat_id: int, chat_data: ChatData):
if chat_id in chat_data:
chat_data: ChatData = chat_data[chat_id]
ongoing_conv: MainCommands = chat_data.ongoing_conversation
if ongoing_conv is not None:
logger.info('[find_handler] ongoing_conv: {}'.format(ongoing_conv.value))
return find_in_command_switcher(ongoing_conv.value)
logger.info('[find_handler] switcher key: {}'.format(command))
return find_in_command_switcher(command)
def find_in_command_switcher(command):
if command in switcher_v2:
return switcher_v2[command]
return switcher_v2[MainCommands.INVALID_COMMAND.value]
| dattatreya303/round_robin_tasker | conversations/callbacks/root_handler_callbacks.py | root_handler_callbacks.py | py | 2,499 | python | en | code | 0 | github-code | 13 |
36305313443 | import os
import requests
import time
import re
import random
import argparse
import logging
from config import IMPORTANT_COINS, WURL, MROOM, MTOKEN, MSERVER
def send_matrix_msg(msg):
if "**" not in msg:
data = {
"msgtype": "m.text",
"body": msg,
}
else:
formatted_msg = msg
for i in range(formatted_msg.count("**")):
if i % 2 == 0:
rep = "<strong>"
else:
rep = "</strong>"
formatted_msg = formatted_msg.replace("**", rep, 1)
formatted_msg = formatted_msg.replace("\n", "<br>")
data = {
"msgtype": "m.text",
"body": msg,
"format": "org.matrix.custom.html",
"formatted_body": formatted_msg,
}
url = "https://%(MSERVER)s/_matrix/client/r0/rooms/%(MROOM)s/send/m.room.message?access_token=%(MTOKEN)s"
url = url % {"MSERVER": MSERVER, "MROOM": MROOM, "MTOKEN": MTOKEN}
r = requests.post(url, json=data)
r.raise_for_status()
def fetch_text(url):
try:
r = requests.get(url)
r.raise_for_status()
except:
return None
return r.text
def fetch_links(url):
data = fetch_text(url)
if not data:
return None
links = re.findall("<a href=\"(\/currencies.+?\/)", data)
return links
def get_sign_change(text):
match = re.findall(
"priceValue.*?<span class=\"icon-Caret-(\w+)\"></span>(\d+\.\d+)",
text)
if not match:
return None, None
sign, change = match[0]
return sign, change
def gather_change(fh=False, logger=logging):
if fh:
links = []
for i in range(1, 6):
links += set(fetch_links(f"https://coinmarketcap.com/?page={i}"))
else:
wlinks = fetch_links(WURL)
top100links = fetch_links("https://coinmarketcap.com/")
links = set(wlinks + top100links)
if not links:
logger.error("Couldn't fetch links")
return
msgs = []
for link in links:
time.sleep(0.3 + random.random() * 0.2)
url = "https://coinmarketcap.com%s" % link
page = fetch_text(url)
if not page:
logger.error("Failed to fetch text from url %s" % url)
continue
coin_name = link.split("/")[2]
sign, change = get_sign_change(page)
if not sign: # or also change
logger.error("Failed to extract change and sign for %s" %
coin_name)
continue
change = float(change)
if sign.lower() == "down":
change = -change
if fh:
threshold = 20
else:
if link in top100links:
threshold = 5
else:
threshold = 10
logger.debug(f"{coin_name} {change}")
if abs(change) > threshold:
# If coin in important highlight
if coin_name in IMPORTANT_COINS:
coin_name = "**%s**" % coin_name
if change < -20:
msg = f"{coin_name} 24h change -> {change}% 📉 BEARISH"
elif change < 0:
msg = f"{coin_name} 24h change -> {change}% 📉"
elif change > 20:
msg = f"{coin_name} 24h change -> {change}% 📈 BULLISH"
else:
msg = f"{coin_name} 24h change -> {change}% 📈"
logger.info(msg)
msgs.append(msg)
if msgs:
try:
send_matrix_msg("\n".join(msgs))
except:
logger.error("Failed to send matrix message")
else:
logger.info("Matrix message sent")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-v',
"--verbose",
action="store_true",
default=False,
help="verbose mode")
parser.add_argument('-fh',
action="store_true",
default=False,
help="top 500 cryptos")
args = parser.parse_args()
logger = logging.getLogger("bot_watcher")
loglevel = logging.DEBUG if args.verbose else logging.INFO
logger.setLevel(loglevel)
logformat = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(logformat)
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
DIR = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(DIR, "bot_watcher.log")
filehandler = logging.FileHandler(filepath)
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
gather_change(args.fh, logger)
| MQ37/crypto-price-matrix-bot | main.py | main.py | py | 4,783 | python | en | code | 0 | github-code | 13 |
33582931227 | import discord
from discord import app_commands, Object
import re
import os
import random as rand
import asyncio
from typing import List
import logging
intents = discord.Intents.default()
intents.guilds = True
client = discord.Client(intents=intents)
tree = app_commands.CommandTree(client)
@client.event
async def on_ready():
print(discord.__version__)
print("ready")
await tree.sync()
print("go!")
@client.event
async def on_guild_join(guild):
print("new-server-join:" + guild.name + "," + str(guild.id))
@client.event
async def on_app_command_completion(interaction: discord.Interaction,command):
if interaction.user.bot:
return
print(command.name + "が" + interaction.guild.name + "(" + str(interaction.guild.id) + ")で" + interaction.user.name + "(" + str(interaction.user.id) + ")により実行")
@tree.command(description="1〜100のランダムな整数を1つ出します")
async def random(interaction: discord.Interaction):
if interaction.user.bot:
return
await interaction.response.send_message(rand.randrange(100)+1)
@tree.command(description="コイントスをします")
async def randcoin(interaction: discord.Interaction):
if interaction.user.bot:
return
await interaction.response.send_message("表" if rand.randrange(2) else "裏")
@tree.command(description="任意の数ダイスを振ります。?D?の形でオプションを入力してください")
async def randdice(interaction: discord.Interaction,roll:str = "1D6"):
if interaction.user.bot:
return
dice = re.fullmatch("\d+(d|D)\d+",roll)
if(dice == None):
await interaction.response.send_message("?D?の形式で入力してください")
return
else:
digits = re.findall("\d+",roll)
result = 0
if 0 < int(digits[0]) <= 100 and 0 < int(digits[1]) <= 1000:
for i in range(int(digits[0])):
result += rand.randrange(int(digits[1])) + 1
else:
await interaction.response.send_message("ダイスの数は100以下、出目は1000以下の自然数に設定してください。")
return
await interaction.response.send_message("`" + roll + "`: **" + str(result) + "**")
class Parter(discord.ui.View):
@discord.ui.button(label='参加', style=discord.ButtonStyle.green)
async def callbacksubmit(self, interaction: discord.Interaction, button: discord.ui.Button):
text = interaction.message.content
if "<@" + str(interaction.user.id) + ">" not in interaction.message.content:
text += "<@" + str(interaction.user.id) + ">"
else:
text = text.replace("<@" + str(interaction.user.id) + ">","")
await interaction.response.edit_message(content=text,view=self)
@discord.ui.button(label='GO!', style=discord.ButtonStyle.red)
async def callbackstart(self, interaction: discord.Interaction, button: discord.ui.Button):
users = re.findall(r'<@\d+>',interaction.message.content)
if len(users) == 0:
await interaction.channel.send(content="対象者がいません",delete_after=2)
else:
rand.shuffle(users)
text = ""
for i,user in enumerate(users):
text += str(i+1) + " :" + user + "\n"
self.children[0].disabled = True
self.children[1].disabled = True
await interaction.response.edit_message(view=self)
await interaction.channel.send(content=text)
@tree.command(description="ランダムに順番を決めます")
async def randorder(interaction: discord.Interaction):
if interaction.user.bot:
return
await interaction.response.send_message(content="対象者:",view=Parter())
@tree.command(description="ランダムBOTのヘルプを表示します")
async def help(interaction: discord.Interaction):
if interaction.user.bot:
return
await interaction.response.send_message(content="""`/random`:1~100のランダムな数字を一つ出力します
`/randcoin`:表か裏のどちらかをランダムで出力します
`/randdice`:ダイスロールをします サイコロの数や目は?D?の形式で指定することが出来ます 指定しない場合1D6となります
`/randorder`:ランダムに順番を決めます 実行するとボタンが表示され,ボタンを押した人が抽選の対象となります""")
client.run(os.environ["TOKEN"],log_level=logging.ERROR) | taisei12232/order-bot | discordbot.py | discordbot.py | py | 4,504 | python | en | code | 0 | github-code | 13 |
6929927110 | # -*- coding: utf-8 -*-
import os
import sys
import time
import math
import numpy as np
import random
from threading import Thread
from math import exp
from math import log
import torch
import torch.distributed as dist
from torch.autograd import Variable
from cjltest.utils_model import MySGD, test_model
def fixed_update(rank, size, args, time_length, model, momentum_buffers, correction, norm_gradients, param_storage, epochs, loss_record, status, group, cpu, gpu, ):
for epoch in range(epochs):
status.append(False)
time.sleep(time_length-5)
status[-1] = True
time.sleep(5)
# if rank == 1:
# print("Rank 1 (Thread-1) parameters (Before updates): ")
# print(model.parameters())
# print(momentum_buffers)
# calculate the loss and iterations
loss = sum(loss_record).item() / len(loss_record)
print("Rank: {}\t\tEpoch: {}\t\tLocal Updates: {}\t\tLoss: {}".format(rank, t, len(loss_record), loss))
# Synchronization
# send epoch train loss to PS
loss_cpu = torch.tensor(loss, device=cpu)
dist.gather(tensor=loss_cpu, dst=0, group=group)
# send K to PS
tau = float(len(loss_record))
k_cpu = torch.tensor(tau, device=cpu)
dist.gather(tensor=k_cpu, dst=0, group=group)
# Compute a_i
a = (tau - args.beta*(1-args.beta**tau) / (1-args.beta)) / (1 - args.beta)
a *= 1/(size-1)
a_cpu = torch.tensor(a, device=cpu)
# send a_i to server
dist.gather(tensor=a_cpu, dst=0, group=group)
# # send normalized gradients to server
for idx, param in enumerate(model.parameters()):
norm_gradients[idx] = param.data - param_storage[idx].data
norm_gradients[idx] /= args.lr*a*(size-1)
norm_g_cpu = torch.tensor(data=norm_gradients[idx].data, device=cpu)
dist.gather(tensor=norm_g_cpu, dst=0, group=group)
# receive the parameters
for idx, param in enumerate(model.parameters()):
recv = torch.zeros_like(param.data, device=cpu)
dist.scatter(tensor=recv, src=0, group=group)
param.data = torch.tensor(recv, device=gpu)
param_storage[idx].data = torch.zeros_like(param.data, device=gpu) + param.data
del(recv)
# # receive the normalized gradients d_i
for idx, param in enumerate(model.parameters()):
recv = torch.zeros_like(param.data, device=cpu)
dist.scatter(tensor=recv, src=0, group=group)
recv_d = torch.tensor(recv.data, device=gpu)
correction[idx].data = recv_d - norm_gradients[idx]
del(recv, recv_d)
# Set the momentums to zeros, after each synchronization
momentum_buffers[idx] = torch.zeros_like(param.data, device=gpu)
# print("Rank {} threshold: {}".format(rank, threshold))
print("Rank: {}\t\tEpoch: {}\t\tReceive the new gradient!".format(rank, epoch))
# if rank == 1:
# print("Rank 1 (Thread-1) parameters (end updates): ")
# print(model.parameters())
# print(momentum_buffers)
loss_record.clear()
if epoch % args.lr_decay == 0:
args.lr /= 10
# noinspection PyTypeChecker
# Notice: transferring requires cpu, calculation requires gpu
def run(rank, size, model, args, train_data, test_data, weight):
cpu = torch.device('cpu')
gpu = torch.device('cuda:{}'.format(rank%args.num_gpu))
model = model.cuda(gpu)
workers = [v+1 for v in range(size-1)]
_group = [w for w in workers].append(rank)
group = dist.new_group(_group)
param_storage = [torch.zeros_like(param.data, device=gpu) for param in model.parameters()]
# print('Rank {}: Waiting for receiving the model! '.format(rank))
# Receive initial model from server
for idx, p in enumerate(model.parameters()):
tmp_p = torch.zeros_like(p, device=cpu)
dist.scatter(tensor=tmp_p, src=0, group=group)
p.data = torch.tensor(tmp_p, device=gpu)
param_storage[idx].data += p.data
print('Rank {} successfully received the model. '.format(rank))
## gradients = [torch.zeros_like(param.data, device=gpu) for param in model.parameters()]
norm_gradients = [torch.zeros_like(param.data, device=gpu) for param in model.parameters()]
if args.local_iteration == 'linear':
local_iteration = (100+(50*rank))
elif args.local_iteration == 'SL':
local_iteration = (100+(10*rank))
elif args.local_iteration == 'LL':
local_iteration = (100+(100*rank))
elif args.local_iteration == 'exp':
local_iteration = (2**(rank-1))
else:
print('No matched local iteration!')
sys.exit(-1)
loss_record, status = [], []
## sync = Thread(target=fixed_update, args=(rank, size,args, args.time_length, model, momentum_buffers, correction, norm_gradients, param_storage, args.epochs, loss_record, status, group, cpu, gpu, ), daemon=True)
## sync.start()
optimizer = MySGD(model.parameters(), lr=args.lr)
if args.model in ['MnistCNN', 'AlexNet']:
criterion = torch.nn.NLLLoss()
else:
criterion = torch.nn.CrossEntropyLoss()
print('Rank {} begins!'.format(rank))
model.train()
batch_iter = iter(train_data)
for t in range(args.epochs):
for it in range(local_iteration):
try:
data, target = next(batch_iter)
except:
batch_iter = iter(train_data)
data, target = next(batch_iter)
data, target = Variable(data).cuda(gpu), Variable(target).cuda(gpu)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
delta_ws = optimizer.get_delta_w()
loss_record.append(loss.data)
print('Rank: {}\t\tEpoch: {}\t\tIteration: {}\t\tLoss: {}'.format(rank, t, len(loss_record)-1, loss_record[-1]))
for idx, param in enumerate(model.parameters()):
## gradients[idx] = delta_ws[idx] / args.lr
# if args.method == 'RemSGD':
# param.data = param.data - args.lr * gradients[idx] * (args.gamma ** math.log2(len(loss_record))) + args.beta * momentum_buffers[idx]
# momentum_buffers[idx] = - args.lr * gradients[idx] * (args.gamma ** math.log2(len(loss_record))) + args.beta * momentum_buffers[idx]
## if args.method == 'FedAvg':
## inter_grad = delta_ws[idx]/args.lr
#delta_ws[idx].cuda(gpu)
## momentum_buffers[idx] = args.beta * momentum_buffers[idx] + inter_grad
param.data = param.data - delta_ws[idx]
## else:
## print('No matched method! Need FedAvg.')
## sys.exit(-1)
# Synchronization
# calculate the loss and iterations
loss = sum(loss_record).item() / len(loss_record)
print("Rank: {}\t\tEpoch: {}\t\tLocal Updates: {}\t\tLoss: {}".format(rank, t, len(loss_record), loss))
# send epoch train loss to PS
loss_cpu = torch.tensor(loss*weight, device=cpu)
dist.gather(tensor=loss_cpu, dst=0, group=group)
# send weighted tau to PS
tau = float(len(loss_record))
k = tau * weight
# print(tau)
k_cpu = torch.tensor(k, device=cpu)
dist.gather(tensor=k_cpu, dst=0, group=group)
## # Compute a_i
## if args.local_solver == 'FedAvg':
## a = (tau - args.beta*(1-math.pow(args.beta, tau)) / (1-args.beta)) / (1 - args.beta)
## a_cpu = torch.tensor(tau*weight, device=cpu)
## elif args.local_solver == 'FedProx':
##
## # send a_i*p_i to server
## dist.gather(tensor=a_cpu, dst=0, group=group)
# # send normalized gradients*p_i to server
for idx, param in enumerate(model.parameters()):
norm_gradients[idx] = param.data - param_storage[idx].data
#print(norm_gradients[idx].data)
norm_gradients[idx] = norm_gradients[idx] / tau
norm_g_cpu = torch.tensor(data=norm_gradients[idx].data*weight, device=cpu)
dist.gather(tensor=norm_g_cpu, dst=0, group=group)
# receive the parameters
for idx, param in enumerate(model.parameters()):
recv = torch.zeros_like(param.data, device=cpu)
dist.scatter(tensor=recv, src=0, group=group)
param.data = torch.tensor(recv.data, device=gpu)
param_storage[idx].data = torch.zeros_like(param.data, device=gpu) + param.data
#print(param_storage[5].data)
#del(recv)
## # # receive the normalized gradients p_i*d_i
## for idx, param in enumerate(model.parameters()):
## recv = torch.zeros_like(param.data, device=cpu)
## dist.scatter(tensor=recv, src=0, group=group)
## recv_d = torch.tensor(recv.data, device=gpu)
## correction[idx].data = recv_d - norm_gradients[idx]
#del(recv, recv_d)
# Set the momentums to zeros, after each synchronization
# momentum_buffers[idx] = torch.zeros_like(param.data, device=gpu)
# print("Rank {} threshold: {}".format(rank, threshold))
print("Rank: {}\t\tEpoch: {}\t\tReceive the new gradient!".format(rank, t))
loss_record.clear()
## time.sleep(1/(2**(rank))) #### still need revised, to enlarge the difference between each workers ####
def init_processes(rank, size, model, args, train_data, test_data, weight, backend='mpi'):
dist.init_process_group(backend, rank=rank, world_size=size)
run(rank, size, model, args, train_data, test_data, weight)
| wanglikuan/FedNova | learner.py | learner.py | py | 9,901 | python | en | code | 0 | github-code | 13 |
16617943199 | # https://leetcode.com/problems/permutations/
import itertools
from typing import List
# Example 1:
#
nums = [1,2,3]
# Output: [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]
# Example 2:
#
# Input: nums = [0,1]
# Output: [[0,1],[1,0]]
# Example 3:
#
# Input: nums = [1]
# Output: [[1]]
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
results = []
prev_elements = []
def dfs(elements):
if len(elements) == 0:
results.append(prev_elements[:])
for e in elements:
next_elements = elements[:]
next_elements.remove(e)
prev_elements.append(e)
dfs(next_elements)
prev_elements.pop()
dfs(nums)
return results
def combine(self, n: int, k: int) -> List[List[int]]:
result = []
def dfs(elements, start, k):
if k == 0:
result.append(elements[:])
return
for i in range(start, n+1):
elements.append(i)
dfs(elements, i + 1, k - 1)
elements.pop()
dfs([], 1, k)
return result
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
result = []
# def dfs(sum:int, data:List):
# # print(data)
# if sum > target:
# return
# if sum == target:
# temp = sorted(data)
# if temp not in result:
# result.append(temp)
# return
#
# for i in candidates:
# data.append(i)
# dfs(sum + i, data)
# data.remove(i)
#
# dfs(0,[])
# return result
def dfs(csum, index, path):
if csum < 0:
return
if csum ==0:
result.append(path)
return
# index 로 하위원소만 체크..
for i in range(index, len(candidates)):
dfs(csum - candidates[i], i, path + [candidates[i]])
dfs(target, 0, [])
return result
def subsets(self, nums: List[int]) -> List[List[int]]:
result = []
# print(nums.index(2))
# def dfs(elements, temp:int):
# result.append(elements[:])
#
# if len(elements) == len(nums):
# return
#
# for i in range(temp, len(nums)):
# elements.append(i)
# dfs(elements, temp + 1)
# elements.remove(i)
#
# dfs([], 0)
# return result
def dfs(index, path):
result.append(path)
for i in range(index, len(nums)):
dfs(i + 1, path + [nums[i]])
dfs(0,[])
return result
# print(Solution().permute(nums))
# Example 1:
#
# n = 4
# k = 2
# Output:
# [
# [2,4],
# [3,4],
# [2,3],
# [1,2],
# [1,3],
# [1,4],
# ]
# Example 2:
#
# Input: n = 1, k = 1
# Output: [[1]]
# print(Solution().combine(n,k))
# Example 1:
#
# candidates = [2,3,6,7]
# target = 7
# Output: [[2,2,3],[7]]
# Explanation:
# 2 and 3 are candidates, and 2 + 2 + 3 = 7. Note that 2 can be used multiple times.
# 7 is a candidate, and 7 = 7.
# These are the only two combinations.
# Example 2:
#
# Input: candidates = [2,3,5], target = 8
# Output: [[2,2,2,2],[2,3,3],[3,5]]
# Example 3:
#
# Input: candidates = [2], target = 1
# Output: []
# Example 4:
#
# Input: candidates = [1], target = 1
# Output: [[1]]
# Example 5:
#
# Input: candidates = [1], target = 2
# Output: [[1,1]]
# print(Solution().combinationSum(candidates,target))
# Example 1:
#
nums = [1,2,3]
# Output: [[],[1],[2],[1,2],[3],[1,3],[2,3],[1,2,3]]
# Example 2:
#
# Input: nums = [0]
# Output: [[],[0]]
print(Solution().subsets(nums))
# print(type(nums.index(2))) | jihuncha/python_study_duplicated | Algorithm_95/pycharm_folder/210412_graph/210420_practice.py | 210420_practice.py | py | 3,937 | python | en | code | 2 | github-code | 13 |
23382792943 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class MovieItem(Item):
MainPageUrl = Field()
Title = Field()
Rating = Field()
Year = Field()
ID = Field()
Director = Field()
Synopsis = Field()
Genres = Field()
| dorseg/places-in-movies | crawler/crawler/items.py | items.py | py | 384 | python | en | code | 0 | github-code | 13 |
23631413312 | from db.run_sql import run_sql
from models.supplier import Supplier
#Save new Supplier
def save(supplier):
sql = "INSERT INTO suppliers (supplier_name, supplier_number, supplier_manager, supplier_address, supplier_phone) VALUES (%s, %s, %s, %s, %s) RETURNING *"
values = [supplier.supplier_name, supplier.supplier_number, supplier.supplier_manager, supplier.supplier_address, supplier.supplier_phone]
results = run_sql(sql, values)
id = results[0]['id']
supplier.id = id
return supplier
#Select all Suppliers
def select_all():
suppliers = []
sql = "SELECT * FROM suppliers ORDER BY supplier_name ASC"
results = run_sql(sql)
for result in results:
supplier = Supplier(result['supplier_name'], result['supplier_number'], result['supplier_manager'], result['supplier_address'], result['supplier_phone'], result['id'])
suppliers.append(supplier)
return suppliers
#Select Supplier by ID
def select(id):
supplier = None
sql = "SELECT * FROM suppliers WHERE id = %s"
values = [id]
result = run_sql(sql, values)[0]
if result is not None:
supplier = Supplier(result['supplier_name'], result['supplier_number'], result['supplier_manager'], result['supplier_address'], result['supplier_phone'], result['id'])
return supplier
#Update existing Supplier
def update(supplier):
sql = "UPDATE suppliers SET (supplier_name, supplier_number, supplier_manager, supplier_address, supplier_phone) = (%s, %s, %s, %s, %s) WHERE id = %s"
values = [supplier.supplier_name, supplier.supplier_number, supplier.supplier_manager, supplier.supplier_address, supplier.supplier_phone, supplier.id]
run_sql(sql, values)
#Delete all Suppliers
def delete_all():
sql = "DELETE FROM suppliers"
run_sql(sql)
#Delete Supplier by ID
def delete(id):
sql = "DELETE FROM suppliers WHERE id = %s"
values = [id]
run_sql(sql, values) | JackSlater99/ConstructionCostTracker-SoloProject | repositories/supplier_repository.py | supplier_repository.py | py | 1,926 | python | en | code | 1 | github-code | 13 |
27036466525 | import pickle
from unittest import result
from flask import Flask, request, app, jsonify, url_for, render_template
from flask_cors import cross_origin
import pandas as pd
import numpy as np
from app_log import log
from mongodb import MongoDBManagement
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings("ignore")
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
#Running via Api
@app.route('/predict_api', methods=['POST'])
def predict_api():
if request.method == 'POST':
try:
data = request.json["data"]
new_data = [list(data.values())]
output = model.predict(new_data)[0]
if output == 1:
text = 'The Forest is in Danger'
else:
text = 'Forest is Safe'
return jsonify(text)
except Exception as e:
log.error('error in input from Postman', e)
return jsonify('Check the input again!')
else:
return 'Method not POST'
#Running via html
@app.route('/', methods=['POST', 'GET'])
@cross_origin()
def index():
try:
log.info("Home page loaded successfully")
return render_template('index.html')
except Exception as e:
log.exception("Something went wrong on initiation process")
@app.route('/single_classification', methods=['POST', "GET"])
def single_classification():
try:
log.info("single classification initialization successfull")
return render_template('single_classification.html')
except Exception as e:
log.exception("Something went wrong on single_classification process", e)
@app.route('/predict_classification', methods=['POST', 'GET'])
@cross_origin()
def predict_classification():
if request.method == 'POST':
try:
data=[float(x) for x in request.form.values()]
final_features = [np.array(data)]
output=model.predict(final_features)[0]
if output == 0:
return render_template('not_fire.html')
else:
return render_template('result_fire.html')
return render_template('predict_classification.html')
except Exception as e:
log.error('Input error, check input', e)
else:
log.error('Post method expected')
@app.route('/batch_classification', methods=['POST', "GET"])
def batch_classification():
try:
log.info("batch_classification initialization successfull")
mongoClient = MongoDBManagement(password='assignment')
if mongoClient.isDatabasePresent(db_name='batch_data'):
if mongoClient.isCollectionPresent(db_name='batch_data', collection_name='classification_batch'):
response = mongoClient.getRecords(db_name='batch_data', collection_name='classification_batch')
print(response)
if response is not None:
batch = []
for i in response:
batch.append(i)
print(i)
batch_reg = pd.DataFrame(batch)
test_data = batch_reg.drop(columns='_id')
test_data.to_html("class_batch.html")
scaler = StandardScaler()
scaled_test_data = scaler.fit_transform(test_data)
scaled_test_data=pd.DataFrame(scaled_test_data)
data = model.predict(scaled_test_data.values)
result = pd.DataFrame(data)
result.to_csv("class_batch.csv")
result.to_html("class_batch_html.html")
log.info("Batch Pridiction successfull",)
return render_template('batch_classification.html', data=result)
return render_template('single_classification.html')
except Exception as e:
log.exception(" Something went wrong on batch_classification process")
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8000, debug=True) | Arkintea/Project-Algerian_Fire_Prediction | app.py | app.py | py | 4,095 | python | en | code | 1 | github-code | 13 |
38757036022 | # coding=utf-8
# author= YQZHU
from django.conf.urls import url, include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^ranking/', include([
url(r'^list_rules', views.list_rules, name='list_rules'),
url(r'^add_rule', views.add_rule, name='add_rule'),
url(r'^edit_rule/(?P<id>[0-9]+)$', views.edit_rule, name='edit_rule'),
url(r'^my_yuangong', views.my_yuangong, name='my_yuangong'),
url(r'^del_yuangong/(?P<id>[0-9]+)$', views.del_yuangong, name='del_yuangong'),
url(r'^add_yuangong', views.add_yuangong, name='add_yuangong'),
url(r'^list_weeks', views.list_weeks, name='list_weeks'),
url(r'^add_week', views.add_week, name='add_week'),
url(r'^edit_week/(?P<id>[0-9]+)$', views.edit_week, name='edit_week'),
url(r'^view_week/(?P<id>[0-9]+)$', views.view_week, name='view_week'),
url(r'^add_kaohe_record/(?P<id>[0-9]+)$', views.add_kaohe_record, name='add_kaohe_record'),
url(r'^del_kaohe_record/(?P<id>[0-9]+)$', views.del_kaohe_record, name='del_kaohe_record'),
]), name='ranking'),
]
| lianhuness/hongda_v2 | finance/finance_urls.py | finance_urls.py | py | 1,118 | python | en | code | 0 | github-code | 13 |
5867102645 | from flask import Flask, jsonify, render_template, request
import json
from datetime import timedelta
from service import Service
import model
app = Flask(__name__)
app.jinja_env.variable_start_string = '[[' # 解决jinja2和vue的分隔符{{}}冲突
app.jinja_env.variable_end_string = ']]'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1) # 浏览器不缓存实时更新静态文件
service = Service()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/query', methods=['GET', 'POST'])
def query():
query_traj = json.loads(request.form.get("query_traj"))
query_type = request.form.get("query_type")
time_range = json.loads(request.form.get("time_range"))
k = int(request.form.get("k"))
# time_range = [1478063519, 1478064044]
# time_range = None
# k = 3
traj_list, sim_list, compute_time, compute_count = service.knn_query(query_traj, query_type, k, time_range)
for i in range(len(traj_list)):
traj_list[i] = traj_list[i].to_json()
traj_list[i]['sim'] = sim_list[i]
traj_list[i].pop('embedding')
result = {"traj_list": traj_list, "compute_time": compute_time, "compute_count": compute_count}
return jsonify({"code": 200, "success": True, "result": result, "msg": "查询成功"})
if __name__ == '__main__':
app.run(debug=True)
| MaxLEAF3824/Trajectory | web/app.py | app.py | py | 1,358 | python | en | code | 0 | github-code | 13 |
73537228176 | """doc"""
def table(lis):
"""doc"""
print("+-+-+-+")
for i in lis:
print('|', end="")
for j in i:
print("%c|" % j, end="")
print()
print("+-+-+-+")
def checkwinner(lis):
"""doc"""
for i in range(3):
if lis[i][0] == lis[i][1] == lis[i][2]:
return True
elif lis[0][i] == lis[1][i] == lis[2][i]:
return True
if lis[0][0] == lis[1][1] == lis[2][2]:
return True
elif lis[0][2] == lis[1][1] == lis[2][0]:
return True
return False
def run():
"""doc"""
print("Welcome to OX!")
print(" ")
lis = [['1', '2', '3'], ['4', '5', '6'], ['7', '8', '9']]
table(lis)
print(" ")
play = 'X'
ind = 1
checl = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
while ind < 10:
print("It's %c's turn!" % play)
txt = input("Please enter cell number (1-9) --> ")
while not txt in checl:
txt = input("Please enter cell number (1-9) --> ")
for i in range(3):
for j in range(3):
if lis[i][j] == txt:
lis[i][j] = play
del checl[checl.index(txt)]
break
table(lis)
print(" ")
if checkwinner(lis):
print("The winner is %c!!" % play)
return 1
if play == 'X':
play = 'O'
else:
play = 'X'
ind += 1
print("Draw!!")
run()
# It's O's turn!
# Please enter cell number (1-9) --> 3
# +-+-+-+
# |1|2|O|
# +-+-+-+
# |4|X|6|
# +-+-+-+
# |7|8|9|
# +-+-+-+
| film8844/KMITL-Computer-Programming-Year-1 | week10/[Week 10] Tic-Tac-Toe.py | [Week 10] Tic-Tac-Toe.py | py | 1,619 | python | en | code | 0 | github-code | 13 |
14945258055 | from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from good.models import Good, Order
from contract.models import Montage
from django.views.generic import ListView, DetailView, FormView, TemplateView, CreateView
from good.forms import SearchOrderForm
from django.db.models import Q
from good.forms import CustomerForm, OrderForm, GoodForm, MontageForm
from datetime import datetime, timedelta
from .models import *
class OrderList(ListView):
model = Good
template_name = 'good/good.html'
paginate_by = 10
queryset = Good.objects.all().order_by("-completed")
def get_context_data(self, **kwargs):
context = super(OrderList, self).get_context_data(**kwargs)
context['form'] = SearchOrderForm()
return context
def get_queryset(self):
queryset = super().get_queryset()
form = SearchOrderForm(self.request.GET)
if form.is_valid():
information = form.cleaned_data['information']
if information:
queryset = queryset.filter(
Q(order__id__icontains=information)|
Q(order__customer__last_name__icontains=information)|
Q(order__customer__first_name__icontains=information)|
Q(order__customer__patronymic__icontains=information)
)
return queryset
class OrderCreateView(TemplateView):
template_name = 'good/good_form.html'
success_url = '/order/'
def get_context_data(self, **kwargs):
context = super(OrderCreateView, self).get_context_data(**kwargs)
context['form_customer'] = CustomerForm(self.request.POST)
context['form_order'] = OrderForm(self.request.POST)
context['form_good'] = GoodForm(self.request.POST)
return context
def post(self, request, *args, **kwargs):
error=''
context = self.get_context_data(**kwargs)
if context['form_customer'].is_valid():
instance_customer = context['form_customer'].save()
if context['form_order'].is_valid():
instance_order = context['form_order'].save()
if context['form_good'].is_valid():
instance_good = context['form_good'].save()
return self.render_to_response(context)
class DescriptionList(ListView):
model=Good
template_name='good/description_product.html'
def information(request, pk):
template_name='good/description_product.html'
order = Order.objects.get(id=pk)
goods = Good.objects.filter(order=order.id)
context={
'order':order,
'goods':goods
}
return render(request, 'good/description_product.html', context)
class GraphList(ListView):
model = Good
hour = 8
template_name = 'good/graph_list.html'
good_list=[]
def get_context_data(self, **kwargs):
context = super(GraphList, self).get_context_data(**kwargs)
today = datetime.today().replace(hour=8,minute=0)
day_list = [{'date':today +timedelta(days=x) ,'goods':[]}for x in range(4)]
for day in day_list:
start = day['date']
end = start + timedelta(hours=1)
for x in range(10):
day['goods'].append(
{'date':start.strftime('%H:%M'),
'data':Good.objects.filter(created__range=[start,end])
})
start+=timedelta(hours=1)
end+=timedelta(hours=1)
context['hour'] = range(8)
context['day_list'] = day_list
return context
class Graph_Montage(ListView):
model=Montage
template_name='good/graph_list.html'
queryset = Montage.objects.all().order_by("date")
class CardCreate(TemplateView):
template_name = 'good/create_card.html'
success_url = '/graphs/'
def get_context_data(self, **kwargs):
context = super(CardCreate, self).get_context_data(**kwargs)
context['form_montage'] = MontageForm(self.request.POST)
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
if context['form_montage'].is_valid():
instance_montage = context['form_montage'].save()
return HttpResponseRedirect("/good/graphs")
return self.render_to_response(context)
| duutka/windows_django | good/views.py | views.py | py | 4,342 | python | en | code | 0 | github-code | 13 |
10668462215 | import os
import click
from flask import Flask
from todoism.settings import config
from todoism.blueprints.todo import todo_bp
from todoism.blueprints.auth import auth_bp
from todoism.blueprints.home import home_bp
from todoism.extensions import db, login_manager
def create_app(config_name = None):
if config_name is None:
config_name = os.getenv('FLASK_CONFIG', 'development')
app = Flask('todoism')
app.config.from_object(config[config_name])
register_blueprints(app)
register_extensions(app)
register_commands(app)
return app
def register_blueprints(app):
app.register_blueprint(todo_bp)
app.register_blueprint(auth_bp)
app.register_blueprint(home_bp)
def register_extensions(app):
db.init_app(app)
login_manager.init_app(app)
def register_commands(app):
@app.cli.command()
@click.option('--drop', is_flag=False, help='create after drop')
def initdb(drop):
if drop:
pass
db.create_all()
click.echo('Initialized database.') | parkerhsu/Flask_Practice | BlueTodoism/todoism/__init__.py | __init__.py | py | 1,040 | python | en | code | 0 | github-code | 13 |
9537712457 | import os
import shutil
import signal
import time
import random
fr0m = 'monitor_dir'
to = 'monitor_dir_1'
def handle_signal(signal, frame) -> None:
global file_dict
print('Handler start')
for file in os.listdir(to):
os.remove(f'{to}/{file}')
print('Handler stop. Files have been deleted')
exit()
def run():
for file in os.listdir(fr0m):
# print(file)
time.sleep(random.randint(1,3))
print('Copy ' + file)
shutil.copy(f'{fr0m}/{file}', f'{to}/{file}')
if __name__ == '__main__':
signal.signal(signal.SIGINT, handle_signal)
run() | SimpleIN1/process_fires2 | ftp_tracker/copy_file.py | copy_file.py | py | 606 | python | en | code | 0 | github-code | 13 |
42700425295 | from random import randint
import pygame as pg
from .particles import create_particles, draw_particles
RED = (255, 0, 0)
def get_pos_center() -> tuple[int, int]:
width, height = pg.display.get_surface().get_size()
return int(width / 2), int(height / 2)
def gen_pos_random() -> tuple[int, int]:
width, height = pg.display.get_surface().get_size()
pox_x = randint(-width, width * 2)
pox_y = randint(-height, height * 2)
return pox_x, pox_y
def direction(speed: float, posSelf: int, posOther: int) -> float | int:
if posSelf > posOther:
return -speed
elif posSelf in range(posOther - 5, posOther + 5):
return 0
else:
return speed
class Enemy(pg.sprite.Sprite):
def __init__(self, *groups):
super().__init__(*groups)
self.life = 5
self.speed = 3
self.list_particles = []
self.image = pg.image.load('static/image/nave1.png')
self.radius = self.image.get_width()
self.rect = self.image.get_rect()
self.rect.topleft = gen_pos_random()
def _movement(self):
c_w, c_h = get_pos_center()
pos_X = direction(self.speed, self.rect.x, c_w)
pos_Y = direction(self.speed, self.rect.y, c_h)
if self.alive():
self.rect.move_ip(pos_X, pos_Y)
def _animate(self):
angle = -90
self.image = pg.transform.rotate(self.image, angle)
def _draw_particles(self):
if len(self.list_particles) <= 33:
create_particles(self.list_particles, self.rect)
draw_particles(self.list_particles)
def _collide_radius(self, playerRadius):
if self.rect.colliderect(playerRadius):
self.speed = 1
def _collide_and_die(self, playerLife, playerRect):
if self.rect.colliderect(playerRect):
playerLife.life -= 1
self.kill()
def _check_life_and_die(self):
if self.life <= 0:
self.kill()
def update(self, *args, **kwargs):
self._check_life_and_die()
self._animate()
self._movement()
self._draw_particles()
self._collide_radius(kwargs['playerRadius'])
self._collide_and_die(kwargs['playerLife'], kwargs['playerRect'])
def events(self, event, **kwargs):
...
def __str__(self) -> str:
return 'Enemy -> life: {}, speed: {}, center: {}'.format(
self.life, self.speed, self.rect.center
)
| Fernando-Medeiros/Pleiades | src/enemy/entity.py | entity.py | py | 2,464 | python | en | code | 0 | github-code | 13 |
34605548073 | # !/usr/bin/env python
import rospy
from std_msgs.msg import String, Int8, Float64
from robot.robot import ExoRobot
r = ExoRobot()
def callback(data):
rospy.loginfo(rospy.get_caller_id() + 'I heard %f', data.data)
goal_angle = (data.data / 180.0) *3.14
r.step(goal_angle)
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber('chatter', Float64, callback)
rospy.spin()
if __name__ == '__main__':
listener()
| QinjieLin-NU/exoedu-robot | main.py | main.py | py | 461 | python | en | code | 0 | github-code | 13 |
38460966365 | from multiprocessing import parent_process
import random
import math
class Chromosome:
genes=None
score = None
def __init__(self,g,f):
self.genes=g
self.score=f
def _generate_parent(target, geneSet, fitnessFn):
genes = []
while len(genes)<len(target):
sampleSize = min(len(target)- len(genes), len(geneSet))
genes.extend(random.sample(geneSet,sampleSize))
g = ''.join(genes)
f = fitnessFn(target,g)
c = Chromosome(g,f)
return c
def _mutate(parent, geneSet, target, fitnessFn):
child = list(parent.genes)
index = random.randrange(0, len(parent.genes))
alt1,alt2 = random.sample(geneSet,2)
child[index]= alt2 if child[index]== alt1 else alt1
g = ''.join(child)
f = fitnessFn(target,g)
c = Chromosome(g,f)
return c
def get_best(target, geneSet, fitnessFn, optimalScore, display,startTime):
targetLength = len(target)
random.seed()
parent = _generate_parent(target, geneSet,fitnessFn)
if(parent.score>=optimalScore):
display(parent,startTime)
return
while True:
child = _mutate(parent,geneSet, target,fitnessFn)
if(parent.score>=child.score):
continue
display(child,startTime)
if(child.score>=optimalScore):
return child
parent = child
| bkgsur/GeneticAlgorithms | genetic.py | genetic.py | py | 1,380 | python | en | code | 0 | github-code | 13 |
29276682281 | import pandas as pd
import numpy as np
from sklearn.preprocessing import scale
from sklearn import preprocessing
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD, RMSprop, Adadelta, Adam
import matplotlib.pyplot as plt
from keras import regularizers
import sklearn
from sklearn import svm
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
nb_epochs = 100
batch_size = 64
input_size = 992
num_classes = 2
def encoder():
model = Sequential()
model.add(Dense(512, input_dim=input_size, activation='relu', bias=True))
model.add(Dense(256, activation='relu', bias=True))
#model.add(Dense(128, activation='relu', bias=True))
return model
def decoder(e):
#e.add(Dense(256, input_dim=128, activation='relu', bias=True))
e.add(Dense(512, input_dim=256, activation='relu', bias=True))
e.add(Dense(input_size, activation='relu', bias=True))
e.compile(optimizer='adam', loss='mse')
return e
def train_val(rss, locations):
train_x, val_x, train_y, val_y = train_test_split(rss, locations, test_size=0.2)
return train_x, val_x, train_y, val_y
def regression(rss, locations):
train_X, val_X, train_Y, val_Y = train_val(rss, locations)
e = encoder()
d = decoder(e)
d.fit(train_X, train_X, nb_epoch=nb_epochs, batch_size=batch_size)
num_to_remove = 2
regularzation_penalty = 0.02
initilization_method = 'he_normal' #'random_uniform' ,'random_normal','TruncatedNormal' ,'glorot_uniform', 'glorot_nomral', 'he_normal', 'he_uniform'
#Optimizer
adam = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
for i in range(num_to_remove):
d.pop()
d.add(Dense(256, input_dim=256, activation='relu', kernel_initializer=initilization_method, kernel_regularizer=regularizers.l2(regularzation_penalty)))
d.add(Dropout(0.5))
d.add(Dense(256, activation='relu', kernel_initializer=initilization_method, kernel_regularizer=regularizers.l2(regularzation_penalty)))
d.add(Dropout(0.5))
d.add(Dense(num_classes, activation='linear', kernel_initializer=initilization_method, kernel_regularizer=regularizers.l2(regularzation_penalty)))
#Model compile
d.compile(loss='mean_squared_error',
optimizer='adam')
earlyStopping=keras.callbacks.EarlyStopping(monitor='val_loss', patience=60, verbose=0, mode='auto')
Model_best= keras.callbacks.ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)
d.fit(train_X, train_Y, validation_data=(val_X, val_Y), nb_epoch=nb_epochs, callbacks=[earlyStopping, Model_best], batch_size=batch_size)
return d | JonOnEarth/indoor-position | auto_regression.py | auto_regression.py | py | 2,783 | python | en | code | 3 | github-code | 13 |
8965495757 | # all fund code
import requests
from lxml import etree
from sql import Sql
Sql = Sql()
db_conn = Sql.conn_db('fund')
url = 'http://fund.eastmoney.com/allfund.html'
r = requests.get(url)
r.encoding = 'gb2312'
html = r.text
html = etree.HTML(html)
num_boxes = html.xpath('//div[@id="code_content"]//div[@class="num_box"]')
allfund = []
for num_box in [num_boxes[0]]:
lies = num_box.xpath('//div[@id="code_content"]//div[@class="num_box"]/ul/li')
for li in [lies[0]]:
funds = li.xpath('//div[@id="code_content"]//div[@class="num_box"]/ul/li/div/a[1]/text()')
for fund in funds:
print(fund)
code = fund.split(')')[0][1:]
name = fund.split(')')[1]
sql = 'insert into fund(code, name) values ("{}", "{}")'.format(code, name)
Sql.exec_sql(db_conn, sql)
| ryjfgjl/Fund | Spider/allfund.py | allfund.py | py | 842 | python | en | code | 0 | github-code | 13 |
42958759444 | #!/usr/bin/python3
# intents_blueprint.py
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'util'))
from flask import Blueprint, request, jsonify
from db_assets import aggregate_assets, get_total_capital
from StockPrices import getActives as getHotAssets, getDescription, assetExists
from RiskManagement import shouldBuy, shouldSell, analyzePortfolio
dialogflow_blueprint = Blueprint('dialogflow_blueprint', __name__, template_folder=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'templates'), static_folder=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'static'))
# Dialogflow intent webhook
@dialogflow_blueprint.route('/dialogflow/intent', methods=['POST'])
def post_dialogflow_webhook():
try:
if not request.get_json(force=True).get('queryResult').get('allRequiredParamsPresent'):
raise TypeError
uid = request.get_json(force=True).get('session').split('/')[-1]
action = request.get_json(force=True).get('queryResult').get('action')
parameters = request.get_json(force=True).get('queryResult').get('parameters')
except TypeError:
return jsonify({'success': False, 'fulfillmentText': 'Something went wrong with your request.'}), 400
chatbot_response = intents[action](uid=uid, **parameters)
return jsonify({'success': True, 'fulfillmentText': chatbot_response})
# Dialogflow intent descriptions
@dialogflow_blueprint.route('/dialogflow/intents', methods=['GET'])
def get_all_intents():
return jsonify({
'BUY_ASSET': 'Should I buy this asset?',
'SELL_ASSET': 'Should I sell this asset?',
'ANALYZE_PORTFOLIO': 'Can you analyze my current portfolio?'
}), 200
# Intent functions
def buy_asset(uid=None, asset=None, quantity=None, risk_management_price=None, **kwargs):
total_capital = get_total_capital(uid)
if not assetExists(asset):
return 'Sorry, but {asset} is not a valid ticker.'.format(asset=asset)
# Should I buy this asset? - Inputs: asset, quantity, risk price (stop loss)
return shouldBuy(asset, quantity, risk_management_price, total_capital)
def sell_asset(asset=None, **kwargs):
if not assetExists(asset):
return 'Sorry, but {asset} is not a valid ticker.'.format(asset=asset)
# Should I sell this asset? - Inputs: asset
return shouldSell(asset)
def analyze_portfolio(uid=None, **kwargs):
total_capital = get_total_capital(uid)
portfolio = aggregate_assets(uid, ignore_dates=False)
# Analyze my portfolio - Inputs: portfolio
return analyzePortfolio(portfolio, total_capital)
def hot_assets(**kwargs):
good_assets = getHotAssets()[0]
return 'Based on expert opinions, {stocks} have been doing very well. Some of these assets might be worth a closer look.'.format(stocks=', '.join(good_assets))
def what_is_asset(asset=None, **kwargs):
if not assetExists(asset):
return 'Sorry, but {asset} is not a valid ticker.'.format(asset=asset)
return '{description} {asset} is in the {industry} Industry of the {sector} Sector. {asset} is currently valued at ${price}.'.format(asset=asset, **getDescription(asset))
# Dialogflow intents
intents = {
'BUY_ASSET': buy_asset,
'SELL_ASSET': sell_asset,
'ANALYZE_PORTFOLIO': analyze_portfolio,
'HOT_ASSETS': hot_assets,
'WHAT_IS_ASSET': what_is_asset
}
| therealsharath/fizz | backend/src/flask/dialogflow_blueprint.py | dialogflow_blueprint.py | py | 3,443 | python | en | code | 1 | github-code | 13 |
15391978284 | import tkinter as tk
root = tk.Tk()
def line(event):
canvas.create_line(0,0, event.x,event.y)
canvas = tk.Canvas(root, width=400, height=400)
canvas.pack()
root.bind("<Button-1>", line)
root.mainloop() | chunin1103/BGclipping | drawing.py | drawing.py | py | 210 | python | en | code | 0 | github-code | 13 |
10844202415 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Assignment in BMP course - Program Association Table parser
Author: Jakub Lukac
E-mail: xlukac09@stud.fit.vutbr.cz
Created: 16-10-2019
Testing: python3.6
"""
import sys
from psi import PSI
class PAT(PSI):
__PAT_TABLE = 0x00
__TABLE_EXTENSION_ID = 0x03fd
def __init__(self, data):
# parse program-specific information frame
super().__init__(data)
if self.table_id != PAT.__PAT_TABLE:
print("PAT Error:", "Table ID is not PAT ID(0x00).", file=sys.stderr)
if not self.section_syntax_indicator:
print("PAT Error:", "Section syntax indicator bit not set to 1.", file=sys.stderr)
if self.private_bit:
print("PAT Error:", "Private bit not set to 0.", file=sys.stderr)
self.__parse_pat_table(self.table_data)
def __parse_pat_table(self, data):
position_indicator = 0
# parse Program Association Table
self.program_mapping = []
while position_indicator < len(data):
# 16 bits program number
service_id = int.from_bytes(data[position_indicator:position_indicator + 2], byteorder="big")
position_indicator += 2
# 3 bits reserved bits
reserved = (data[position_indicator] & 0xe0) >> 5
if reserved != 0x07:
print("PAT Error: ", "Reserved bits not set to 0x07.", reserved, file=sys.stderr)
# 13 bits pid
pid = int.from_bytes(data[position_indicator:position_indicator + 2], byteorder="big") & 0x1fff
position_indicator += 2
self.program_mapping.append((service_id, pid))
def __str__(self):
pat_str = super().__str__()
pat_str += "TS ID: {self.id:#06x}\nProgram mapping (service_id, pid): [".format(self=self) + ", ".join(
["(" + ", ".join([format(n, "#06x") for n in mapping]) + ")" for mapping in self.program_mapping]) + "]\n"
return pat_str
| cubolu/School-Projects | Python/BMS/dvb-t/pat.py | pat.py | py | 2,015 | python | en | code | 0 | github-code | 13 |
74176775379 | """ Panacea - throughput.py
1) Measures differential atmospheric refraction
.. moduleauthor:: Greg Zeimann <gregz@astro.as.utexas.edu>
"""
import numpy as np
import os.path as op
from utils import biweight_bin
from fiber_utils import bspline_x0
from astropy.io import fits
from dar import Dar
from telluricabs import TelluricAbs
try:
from pyhetdex.het.telescope import HetpupilModel
hetpupil_installed = True
except ImportError:
print('Cannot find HETpupilModel. Please check pyhetdex installation.')
print('For now, using default 50m**2 for mirror illumination')
hetpupil_installed = False
class Throughput:
''' Throughput for standard stars with LRS2 at the HET '''
def __init__(self, base_filename, side,
standard_folder='/Users/gregz/cure/virus_early/virus_config/'
'standards'):
'''
Aimed at calculating the relative throughput for a given standard star
which can be applied to science frames.
Parameters:
base_filename : str
The full path of the file, "multi_*_*_*", from the initial
panacea2.py reduction. For example:
"/work/03946/hetdex/maverick/reductions/20180206/lrs2/lrs20000035/"
"exp01/lrs2/multi_503_056_7001"
The "_{amp}.fits" is left off.
side : str
Either 'L' or 'R'. The input is case insensitive. This will
choose either the uv vs. orange or red vs. farred depending
on the base_filename which selects between LRS2-B and LRS2-R.
'''
self.base_filename = base_filename
self.side = side
self.standard_folder = standard_folder
if self.side.lower() == 'l':
self.amps = ['LL', 'LU']
self.wave_lims = [3640., 4610.]
elif self.side.lower() == 'r':
self.amps = ['RL', 'RU']
self.wave_lims = [4650., 7000.]
self.read_in_files()
def get_dar_model(self):
self.dar = Dar(self.ifux, self.ifuy, self.spec, self.wave)
self.dar.measure_dar()
self.dar.psfextract()
self.dar.rect_wave, self.dar.flux = self.restrict_wavelengths(
self.dar.rect_wave,
self.dar.flux)
def restrict_wavelengths(self, wave, spec):
sel = np.where((wave > self.wave_lims[0]) *
(wave < self.wave_lims[1]))[0]
return wave[sel], spec[sel]
def get_telluric_abs(self):
self.telabs = TelluricAbs(self.dar.rect_wave, self.clam, self.RH,
self.T, self.P, self.ZD)
self.telabs.fit_telluric_abs()
def read_in_files(self):
'''
Read in the multi* fits files for each amp and build the x, y positions
in the ifu as well as the spectrum (corrected for fiber to fiber)
and wavelength.
'''
x, y, spec, wave = ([], [], [], [])
for amp in self.amps:
fn = self.base_filename + ('_%s.fits' % amp)
F = fits.open(fn)
x.append(F['ifupos'].data[:, 0])
y.append(F['ifupos'].data[:, 1])
spec.append(F['spectrum'].data / F['fiber_to_fiber'].data)
wave.append(F['wavelength'].data)
self.object = F[0].header['OBJECT'].split('_')[0]
self.exptime = F[0].header['EXPTIME']
self.RH = F[0].header['HUMIDITY']
self.T = F[0].header['AMBTEMP']
self.P = F[0].header['BAROMPRE']
self.ZD = F[0].header['ZD']
self.ifux = np.hstack(x)
self.ifuy = np.hstack(y)
self.spec = np.vstack(spec)
self.wave = np.vstack(wave)
def get_standard_spectrum_from_file(self):
''' Read standard spectrum for self.object and convert to f_lam '''
filename = op.join(self.standard_folder,
'm' + self.object.lower() + '.dat.txt')
wave, standardmag = np.loadtxt(filename, usecols=(0, 1), unpack=True)
fnu = 10**(0.4 * (-48.6 - standardmag))
self.standard_flam = fnu * 2.99792e18 / wave**2
self.standard_wave = wave
def get_mirror_illumination(self, fn=None):
''' Use Hetpupil from Cure to calculate mirror illumination (cm^2) '''
if hetpupil_installed:
if fn is None:
fn = self.base_filename + ('_%s.fits' % self.amps[0])
mirror_illum = HetpupilModel([fn], normalize=False)
self.area = mirror_illum.fill_factor[0] * 55. * 1e4
else:
self.area = 50. * 1e4
def convert_units(self):
''' convert cnts/A to cnts/A/s/cm^2 '''
if not hasattr(self, 'area'):
self.get_mirror_illumination()
self.clam = self.dar.flux / self.exptime / self.area
def compare_spectrum_to_standard(self):
''' Bin measured clam spectrum and calculate response, R '''
if not hasattr(self, 'area'):
self.get_mirror_illumination()
if not hasattr(self, 'clam'):
self.convert_units()
xl = np.searchsorted(self.standard_wave, self.dar.rect_wave.min(),
side='left')
xh = np.searchsorted(self.standard_wave, self.dar.rect_wave.max(),
side='right')
self.binned_clam = np.array(biweight_bin(self.standard_wave[xl:xh],
self.dar.rect_wave,
self.clam))
self.R = self.standard_flam[xl:xh] / self.binned_clam
self.R_wave = self.standard_wave[xl:xh]
sel = np.where(np.isfinite(self.R) * (self.binned_clam >
(.4*np.nanmedian(self.binned_clam))))[0]
self.R_wave = self.R_wave[sel]
self.R = self.R[sel]
B, c = bspline_x0(self.R_wave, nknots=25)
sol = np.linalg.lstsq(c, self.R)[0]
self.smooth_R = np.dot(c, sol)
| grzeimann/Panacea | throughput.py | throughput.py | py | 5,997 | python | en | code | 8 | github-code | 13 |
5477184460 | import json
import requests
import pandas as pd
import boto3
from datetime import datetime
from flatten_json import flatten
from io import BytesIO, StringIO
from airflow.contrib.hooks.aws_hook import AwsHook
def get_aws_config(conn_id):
aws_hook = AwsHook(conn_id)
credentials = aws_hook.get_credentials()
return credentials
def dataframe_to_s3(s3_client, input_datafame, bucket_name, file_info, format):
if format == 'parquet':
out_buffer = BytesIO()
input_datafame.to_parquet(out_buffer, index=False)
elif format == 'csv':
out_buffer = StringIO()
input_datafame.to_csv(out_buffer, index=False)
else:
print("Undefined or No format defined")
filename = file_info[0]
filepath = file_info[1]
s3_client.put_object(Bucket=bucket_name, Key=filepath, Body=out_buffer.getvalue())
print(f'{filename} successfully loaded to s3')
def ingest_stations(**kwargs):
endpoint = kwargs['endpoint']
endpoint_api = f'https://api-core.bixi.com/gbfs/en/{endpoint}.json'
try:
station_status_response = requests.get(endpoint_api)
except:
print("issue with python calling api endpoint")
station_status = station_status_response.json()
last_updated = station_status['last_updated']
test_item = station_status['data']['stations'][0]
flattened_station_status = [flatten(d) for d in station_status['data']['stations']]
df = pd.DataFrame(flattened_station_status)
df['last_updated'] = last_updated
s3_client = boto3.client('s3', aws_access_key_id=get_aws_config('aws_credentials')[0], aws_secret_access_key=get_aws_config('aws_credentials')[1])
start_date = datetime.now()
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
day = start_date.strftime("%d")
hour = start_date.strftime("%H")
minute = int(start_date.strftime("%M"))
if (minute//5)*5 == 0:
min_bucket = '00'
elif (minute//5)*5 == 5:
min_bucket = '05'
else:
min_bucket = str((minute//5)*5)
filename = f'{endpoint}_{last_updated}'
filepath = f'station/{endpoint}/{year}/{month}/{day}/{hour}/{min_bucket}/{filename}'
bucket_name = 'bixi.qc.staged'
file_info = (filename, filepath)
dataframe_to_s3(s3_client, df, bucket_name, file_info, 'csv')
if __name__ == '__main__':
kwargs = {'endpoint':'station_status'}
| gurjarprateek/bixi-data-repository | airflow/dags/scripts/task_incremental_stations.py | task_incremental_stations.py | py | 2,237 | python | en | code | 0 | github-code | 13 |
13222391255 | import enum
from pydantic.types import Optional
from sqlmodel import Field, SQLModel, Enum, Column
from src.core.helpers.type_choices import UserStatusType
class UserBase(SQLModel):
name: str
email: str
username: str = Field(unique=True)
phone_number: Optional[str] = None
is_superuser: bool = Field(default=False)
is_staff: bool = Field(default=False)
type: UserStatusType = Field(sa_column=Column(Enum(UserStatusType)))
class Config:
schema_extra = {
"example": {
"id": 1,
"name": "Mark Doe",
"email": "mark@gmail.com",
"phone_number": "01630811624",
}
}
class User(UserBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
password: str
class UserCreate(UserBase):
password: str
class UserRead(UserBase):
id: int
class UserUpdate(UserBase):
name: Optional[str] = None
email: Optional[str] = None
phone_number: Optional[str] = None
class Config:
schema_extra = {
"example": {
"id": 1,
"name": "Mark Doe",
"email": "mark@gmail.com",
"phone_number": "01630811624",
}
}
| MahmudulHassan5809/fastapi-starter | src/accounts/models.py | models.py | py | 1,276 | python | en | code | 3 | github-code | 13 |
25918575239 | #!/usr/bin/env python3
# モジュールのインポート
import os
import tkinter
import tkinter.filedialog
import tkinter.messagebox
from strip_ansi import strip_ansi
from functools import reduce
def main():
# ファイル選択ダイアログの表示
root = tkinter.Tk()
root.withdraw()
fTyp = [("", "*")]
iDir = os.path.abspath(os.path.dirname(__file__))
filename = tkinter.filedialog.askopenfilename(
filetypes=fTyp, initialdir=iDir)
target = []
plain = []
with open(filename, "r", encoding="utf8") as fobj:
for i, l in enumerate(fobj, 1):
str = strip_ansi(l)
if '"patientList"' not in str and '"queryList"' not in str:
plain.append(str)
if 'ERROR' in str and not 'The Network Adapter could not establish the connection' in str:
target.append(i)
target = reduce(lambda acc, n: acc + [n] if not any(e < n + 1000 and e > n - 1000 for e in acc) else acc,
target,
[])
dirname = filename.split('/').pop()
os.makedirs(dirname, exist_ok=True)
for i, t in enumerate(target):
with open(f'/out/{dirname}/result{i:02}.log', "w", encoding="utf8") as wf:
start = t - 1000 if t - 1000 > 0 else 0
end = t + 1000 if t + 1000 <= len(plain) else len(plain)
for s in plain[start:end]:
wf.write(s)
if __name__ == "__main__":
main()
| Ischca/log-brewer | src/main.py | main.py | py | 1,466 | python | en | code | 0 | github-code | 13 |
36973574006 | from django.shortcuts import render, get_object_or_404, redirect
from blog.models import Post
from .models import Comment
from .forms import CommentForm
def post_comment(request, post_pk):
post = get_object_or_404(Post, pk=post_pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
# commit=False 的作用是仅仅利用表单的数据生成 Comment 模型类的实例,但还不保存评论数据到数据库。
comment = form.save(commit=False)
comment.post = post
comment.save()
# 重定向到 post 的详情页,实际上当 redirect 函数接收一个模型的实例时,它会调用这个模型实例的 get_absolute_url 方法,
# 然后重定向到 get_absolute_url 方法返回的 URL。
return redirect(post)
else:
comment_list = post.comment_set.all()
context = {'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
return redirect(post)
| a4322296/django | comments/views.py | views.py | py | 1,186 | python | zh | code | 1 | github-code | 13 |
39129547136 | from .constants import (
LRC_ATTRIBUTE,
LRC_LINE,
LRC_TIMESTAMP,
LRC_WORD,
MS_DIGITS,
TRANSLATION_DIVIDER,
)
from .file import LrcFile
from .line import LrcLine
from .parser import LrcParser
from .text import LrcText, LrcTextSegment
from .time import LrcTime
from .utils import *
__all__ = [
"LRC_TIMESTAMP",
"LRC_ATTRIBUTE",
"LRC_LINE",
"LRC_WORD",
"MS_DIGITS",
"TRANSLATION_DIVIDER",
"LrcLine",
"LrcTime",
"LrcTextSegment",
"LrcText",
"LrcParser",
"LrcFile",
]
| 283375/lrcparser_python | lrcparser/__init__.py | __init__.py | py | 536 | python | en | code | 0 | github-code | 13 |
19299886538 | from tabnanny import check
class Account:
def __init__(self,filepath):
self.filepath = filepath
with open(filepath,'r') as file:
self.balance = int(file.read())
def withdraw(self, amount,fees=0):
self.balance = self.balance - (int(amount) + int(fees))
self.commit()
def deposit(self, amount):
self.balance = self.balance + int(amount)
self.commit()
def commit(self):
with open(self.filepath,'w') as file:
file.write(str(self.balance))
class Checking(Account):
"""This class generates checking account objects"""
type="checking"
def __init__(self,filepath,fees):
Account.__init__(self,filepath)
self.fees = fees
def transfer(self,amount):
self.withdraw(amount,self.fees)
awni_checking = Checking('awni.txt',1)
awni_checking.transfer(500)
print(awni_checking.balance)
print(awni_checking.type)
#####
medo_checking = Checking('medo.txt',1)
medo_checking.transfer(500)
print(medo_checking.balance)
print(medo_checking.type)
print(awni_checking.__doc__) | mohamedawnallah/Object-Oriented-Programming | Bank Account Exercise/acc.py | acc.py | py | 1,121 | python | en | code | 0 | github-code | 13 |
22223093594 | '''
利用string库和os库编写程序,去除开单日期中的‘.’、‘/’符号,月和日保持2位,不足需要补齐,最后输出日期yyyyMMdd(20211207)。
把处理后的结果保存到‘\\home\\数据处理结果\\kdDate.csv’中
'''
import csv
import datetime
import xlrd
wj_path = 'C:\\Users\\14404\\Desktop\\数据分析\\原始数据-某图书机构在各电商平台销售数据 1130.xls'
# 工作簿
file = xlrd.open_workbook(wj_path)
# 第一个工作表
gzb = file[0]
# 行数
hs = gzb.nrows
print(hs)
# 列数
ls = gzb.ncols
print(ls)
# 遍历所有行
for i in range(1, hs):
lv = []
dyg = gzb.cell(i, 3).value
# print(dyg)
# 判断是不是被转换为float类型
if type(dyg) is float:
# 拿取时间类型的数据
dv = xlrd.xldate_as_tuple(dyg, file.datemode)
# 转换为python的时间类型
shi = datetime.date(dv[0], dv[1], dv[2])
else: # 没有被转换为float类型
tp = tuple(str(dyg).split('.'))
shi = datetime.date(int(tp[0]), int(tp[1]), int(tp[2]))
jg = str(shi).replace('-', '')
# 将excle的行转换成list
# [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]
lv.append(gzb.cell(i, 0).value)
lv.append(int(gzb.cell(i, 1).value))
lv.append(gzb.cell(i, 2).value)
lv.append(jg)
for j in range(4, 7):
value = gzb.cell(i, j).value
lv.append(value)
lv.append(int(gzb.cell(i, 7).value))
lv.append(int(gzb.cell(i, 8).value))
lv.append(gzb.cell(i, 9).value)
lv.append(int(gzb.cell(i, 10).value))
print(lv)
# 保存到csv
path = 'D:\\home\\数据处理结果\\kdDate.csv'
# 打开要保存的文件
xie = open(path, 'a', encoding='utf-8')
# 创建写入对象
obj = csv.writer(xie)
# 写入数据
obj.writerow(lv)
# 关闭文件
xie.close()
| qifiqi/codebase | python_codebase/数据分析/去除标点符号/去除符号.py | 去除符号.py | py | 1,846 | python | zh | code | 3 | github-code | 13 |
31843321571 | from keras.models import load_model
from keras.models import Model
from keras.layers import Conv2D
from keras.layers import Flatten
from keras.layers import concatenate
from keras.layers import Activation
from keras.layers import Reshape
import keras.backend as K
filepath = '../trained_models/300x300/weights.17-1.00.hdf5'
model = load_model(filepath)
input_layer = model.input
layer_1 = model.get_layer('concatenate_3').output
layer_2 = model.get_layer('concatenate_12').output
layer_3 = model.get_layer('concatenate_18').output
num_priors = [4, 6, 6]
num_classes = 21
boxes_1_class = Conv2D(num_priors[0] * num_classes, (3, 3))(layer_1)
boxes_1_flat_class = Flatten()(boxes_1_class)
boxes_2_class = Conv2D(num_priors[1] * num_classes, (3, 3))(layer_2)
boxes_2_flat_class = Flatten()(boxes_2_class)
boxes_3_class = Conv2D(num_priors[2] * num_classes, (3, 3))(layer_3)
boxes_3_flat_class = Flatten()(boxes_3_class)
boxes_1_loc = Conv2D(num_priors[0] * 4, (3, 3))(layer_1)
boxes_1_flat_loc = Flatten()(boxes_1_loc)
boxes_2_loc = Conv2D(num_priors[1] * 4, (3, 3))(layer_2)
boxes_2_flat_loc = Flatten()(boxes_2_loc)
boxes_3_loc = Conv2D(num_priors[2] * 4, (3, 3))(layer_3)
boxes_3_flat_loc = Flatten()(boxes_3_loc)
mbox_conf = concatenate([boxes_1_flat_class,
boxes_2_flat_class,
boxes_3_flat_class],
axis=1, name='concat_ssd_1')
mbox_loc = concatenate([boxes_1_flat_loc,
boxes_2_flat_loc,
boxes_3_flat_loc],
axis=1, name='concat_ssd_2')
num_boxes = K.int_shape(mbox_loc)[-1] // 4
mbox_loc = Reshape((num_boxes, 4))(mbox_loc)
mbox_conf = Reshape((num_boxes, num_classes))(mbox_conf)
mbox_conf = Activation('softmax', name='hola')(mbox_conf)
predictions = concatenate([mbox_loc, mbox_conf],
axis=2, name='predictions')
model2 = Model(inputs=input_layer, outputs=predictions)
| oarriaga/SSD-keras | src/utils/tests/modify_model.py | modify_model.py | py | 1,956 | python | en | code | 84 | github-code | 13 |
189994737 | from fastapi import FastAPI
from server.routes.sequence import router as SequenceRouter
app = FastAPI()
app.include_router(SequenceRouter, tags=["Sequence"], prefix="/sequence")
@app.get("/", tags=["Root"])
async def read_root():
return {"message": "Welcome :)"}
| megharosejayan/fastapi-sql | app/server/app.py | app.py | py | 276 | python | en | code | 0 | github-code | 13 |
35216414894 | from django.urls import path, include
from rakes import views
urlpatterns = [
path('', views.RakesHomePageView.as_view(), name='Rakes_home'),
path('RakeEntry', views.AddRake, name='Rakes_entry'),
path('ModuleAutocomplete', views.autocomplete1, name='autocomplete1'),
path('AddModule', views.AddModule, name='AddModule'),
path('ShowRakes', views.RakeListView.as_view(), name='Rake_list'),
path('ShowRakeDetail/<int:pk>/', views.RakeDetailView.as_view(), name='Rake_detail'),
path('ShowModule', views.ModuleListView.as_view(), name='Module_list'),
path('ShowModuleDetail/<int:pk>/',views.ModuleDetailView.as_view(), name='Module_detail'),
path('ModuleList/<int:pk>/edit/',
views.ModuleEditView.as_view(), name='Module_edit'),
path('RakeList/<int:pk>/edit/',
views.RakeEditView.as_view(), name='Rake_edit'),
path('moduleName', views.moduleName, name='moduleName'),
path('ModuleQuickLink2', views.ModuleDetailLink, name='ModuleDetailLink2'),
path('ModuleQuickLink3', views.ModuleDetailLink3, name='ModuleDetailLink3'),
path('RakeDetailLink2', views.RakeDetailLink2, name='RakeDetailLink2'),
path('RakeDetailLink3', views.RakeDetailLink3, name='RakeDetailLink3'),
path('WagonDetailLink', views.WagonDetailLink, name='WagonDetailLink'),
path('wagonnumberlink', views.wagonnumberlink, name='wagonnumberlink'),
]
| vinaykumar1908/082021i | rakes/urls.py | urls.py | py | 1,392 | python | en | code | 0 | github-code | 13 |
40200148393 | import pyfiglet
import sys
import socket
from datetime import datetime
# Defining a name
ascii_banner = pyfiglet.figlet_format("PORT SCANNER")
print(ascii_banner)
# Defining a target
if len(sys.argv) == 2:
# translate hostname to IPv4
ip = socket.gethostbyname(sys.argv[1])
else:
print("Invalid amount of Argument")
# Add Banner
print("-" * 70)
print("Target Scanning : " + ip)
print("Scanning started at:" + str(datetime.now()))
print("-" * 70)
try:
# scaning ports between 1 to 65,535
for port in range(1,65535):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
# returns an error indicator
result = s.connect_ex((ip,port))
if result ==0:
print("Hidden services found. \n Port {} is open. \n ".format(port))
s.close()
except KeyboardInterrupt:
print("\n Quiting Program !!!!")
sys.exit()
except socket.gaierror:
print("\n Hostname Could Not Be Resolved !!!!")
sys.exit()
except socket.error:
print("\n Server not responding !!!!")
sys.exit()
| dummy-co-der/Port-Scanner | port_scanner.py | port_scanner.py | py | 1,023 | python | en | code | 0 | github-code | 13 |
70108455378 | from telebot import types
def hotel_result_mark_up(text=None, prev=False, next=True, row_width=None, hotel_id=None):
search_res_mark_up = types.InlineKeyboardMarkup(row_width=row_width)
btn = types.InlineKeyboardButton(text=text, callback_data=text)
next_btn = types.InlineKeyboardButton(text='>', callback_data='next_hotel')
prev_btn = types.InlineKeyboardButton(text='<', callback_data='prev_hotel')
photos = types.InlineKeyboardButton(text='Фото', callback_data='get_photos')
reserve = types.InlineKeyboardButton(text='Забронировать', url=f'https://www.hotels.com/ho{hotel_id}')
exit = types.InlineKeyboardButton(text='Закончить про�мотр', callback_data='exit')
if prev and not next:
search_res_mark_up.add(prev_btn, btn, photos, reserve)
search_res_mark_up.add(exit)
elif next and not prev:
search_res_mark_up.add(btn, next_btn, photos, reserve)
search_res_mark_up.add(exit)
elif prev and next:
search_res_mark_up.add(prev_btn, btn, next_btn, photos, reserve)
search_res_mark_up.add(exit)
return search_res_mark_up
def hotel_photos_mark_up(text=None, prev=False, next=True, row_width=None):
photo_res_mark_up = types.InlineKeyboardMarkup(row_width=row_width)
btn = types.InlineKeyboardButton(text=text, callback_data=text)
next_btn = types.InlineKeyboardButton(text='>', callback_data='next_photo')
prev_btn = types.InlineKeyboardButton(text='<', callback_data='prev_photo')
back = types.InlineKeyboardButton(text='�азад', callback_data='back')
if prev and not next:
photo_res_mark_up.add(prev_btn, btn, back)
elif next and not prev:
photo_res_mark_up.add(btn, next_btn, back)
elif prev and next:
photo_res_mark_up.add(prev_btn, btn, next_btn, back)
return photo_res_mark_up
def exit_mark_up():
final_mark_up = types.InlineKeyboardMarkup()
back = types.InlineKeyboardButton(text='Да�', callback_data='another_search')
exit_bot = types.InlineKeyboardButton(text='�ет👎', callback_data='see_you_soon_mate')
final_mark_up.add(back, exit_bot)
return final_mark_up
| lexsorokin/HotelsForYou_bot | keyboards/custom_functions_kewboards/hotel_search_result_markup.py | hotel_search_result_markup.py | py | 2,191 | python | en | code | 0 | github-code | 13 |
6178260414 | import os
from numpy import array
from numpy.random import shuffle
from sentence_transformers import losses
from abc import ABC, abstractmethod
from torch import load, tensor, sum, clamp, long, save
from torch.nn.functional import normalize
from torch.optim import Adam
from torch.utils.data import IterableDataset, DataLoader
from transformers import set_seed, AutoTokenizer, AutoModel
from embed4sd.extractors import FineTuningDataExtractor
RANDOM_SEED = 2355764148 # set the random seed to an appropriate value
set_seed(RANDOM_SEED)
class CustomIterableDataset(IterableDataset):
def __init__(self, x_train, ids_train, y_train, start_indexes, end_indexes, iterations,
start, end):
super(CustomIterableDataset).__init__()
self.x_train = x_train
self.ids_train = ids_train
self.y_train = y_train
self.start_indexes = start_indexes
self.end_indexes = end_indexes
self.iterations = iterations
self.start = start
self.end = end
def __iter__(self):
set_seed(RANDOM_SEED)
class_indexes = array(range(17))
for iteration in range(self.iterations):
if iteration >= self.end:
print(f'Skipped iteration {iteration}.')
continue
shuffle(class_indexes) # shuffle class indexes
classes = class_indexes[:13] # first 13 classes will have 4 examples, the rest 3
idx = []
for c in range(17):
goal_indexes = list(range(self.start_indexes[c], self.end_indexes[c]))
shuffle(goal_indexes)
if c in classes:
count = 4
else:
count = 3
idx = idx + goal_indexes[:count]
if iteration < self.start:
print(f'Skipped iteration {iteration}.')
continue
print(f'Processing iteration {iteration}.')
x_ = [x for i, x in enumerate(self.x_train) if i in idx]
y_ = [y for i, y in enumerate(self.y_train) if i in idx]
ids_ = [id_ for i, id_ in enumerate(self.ids_train) if i in idx]
yield x_, ids_, y_
class RepresentationLearner(ABC):
"""
Abstract class extended by all representation learners.
"""
BATCH_SIZE = 64
LEARNING_RATE = 2e-5
def __init__(self, input_files: list, flags: list, margin: float):
self.margin = margin
self.training_data_extractor = FineTuningDataExtractor(input_files=input_files, flags=flags)
@abstractmethod
def load_data(self, k: int):
raise NotImplementedError()
def train_network(self, k, base_model_dir, output_dir, iterations, start_iteration, end_iteration):
[x_train, ids_train, y_train, start_indexes, end_indexes] = self.load_data(k)
train_ds = CustomIterableDataset(x_train=x_train,
ids_train=ids_train,
y_train=y_train,
start_indexes=start_indexes,
end_indexes=end_indexes,
iterations=iterations,
start=start_iteration,
end=end_iteration)
train_loader = DataLoader(train_ds)
tokenizer = AutoTokenizer.from_pretrained(base_model_dir)
model = AutoModel.from_pretrained(base_model_dir)
optimizer = Adam(params=model.parameters(), lr=self.LEARNING_RATE)
loss_fn = losses.BatchHardTripletLoss(
model=model,
margin=self.margin,
distance_metric=losses.BatchHardTripletLossDistanceFunction.cosine_distance)
path = os.path.join(output_dir, f'{str(start_iteration)}_model.pt')
if os.path.exists(path):
checkpoint = load(path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
checkpoint = None
if checkpoint:
print(f'Restored from {path}')
else:
print('Initializing from scratch.')
for i, (x, ids, y) in enumerate(train_loader):
print(f'Iteration: {i}')
model.train()
labels = tensor(y, dtype=long)
# implementation based on the examples from https://github.com/UKPLab/sentence-transformers
text = tokenizer([x_[0] for x_ in x], return_tensors="pt", max_length=128, truncation=True,
padding="max_length")
output = model(**text)
input_mask_expanded = text['attention_mask'].unsqueeze(-1).expand(output[0].size()).float()
embeddings = sum(output[0] * input_mask_expanded, 1) / clamp(
input_mask_expanded.sum(1), min=1e-9)
embeddings = normalize(embeddings, p=2, dim=1)
loss = loss_fn.batch_hard_triplet_loss(labels=labels, embeddings=embeddings)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i > 0) and ((i + 1) % 5) == 0:
model.eval()
save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, os.path.join(output_dir, f'{str(start_iteration + i + 1)}_model.pt'))
print(f'Saved checkpoint for step {str(start_iteration + i + 1)}: {output_dir}')
class GoalRepresentationLearner(RepresentationLearner):
def __init__(self, input_files: list, flags: list, margin: float = 0.4):
super(GoalRepresentationLearner, self).__init__(input_files, flags, margin)
def load_data(self, k):
goal_dict = dict()
start_indexes = []
end_indexes = []
data = self.training_data_extractor.run(goal_count=k, target_count=0)
data = data.fillna('').sort_values(by=['goal', 'target'], ascending=True)
current_start = 0
for label in range(1, 18):
goal_dict[label] = label - 1
start_indexes += [current_start]
end_indexes += [current_start + data[data['goal'] == label].shape[0]]
current_start += data[data['goal'] == label].shape[0]
x_train = data['modified_text_excerpt'].values.tolist()
y_train = [goal_dict[label] for label in data['goal'].values]
ids_train = data['id'].values.tolist()
return [x_train, ids_train, y_train, start_indexes, end_indexes]
class TargetRepresentationLearner(RepresentationLearner):
def __init__(self, input_files: list, flags: list, margin: float = 0.2):
super(TargetRepresentationLearner, self).__init__(input_files, flags, margin)
def load_data(self, k: int):
target_dict = dict()
start_indexes = []
end_indexes = []
data = self.training_data_extractor.run(goal_count=0, target_count=k)
data = data.fillna('').sort_values(by=['goal', 'target'], ascending=True)
current_start = 0
for label in range(1, 18):
start_indexes += [current_start]
current_start += data[data['goal'] == label].shape[0]
end_indexes += [current_start]
counter = 0
for label in data['target'].values:
if label not in target_dict.keys():
target_dict[label] = counter
counter += 1
x_train = data['modified_text_excerpt'].values.tolist()
y_train = [target_dict[label] for label in data['target'].values]
ids_train = data['id'].values.tolist()
return [x_train, ids_train, y_train, start_indexes, end_indexes]
if __name__ == '__main__':
# examples
learner = GoalRepresentationLearner(
input_files=[
r'path\to\xml\file', # file containing the revision of the general Wikipedia article
r'path\to\xml\file'], # file containing the revisions of the SDG-specific Wikipedia articles
flags=[True, # flag indicating that the first file contains a revision of the general Wikipedia article
False]) # flag indicating that the second file contains revisions of the SDG-specific Wikipedia articles
learner.train_network(k=14, # number of examples to be sampled by SDG in the fine-tuning set
base_model_dir=r'path\to\model\files', # directory containing the pre-trained model files
output_dir=r'path\to\last\checkpoint\file', # directory containing the last checkpoint
iterations=20, # total number of fine-tuning iterations
start_iteration=10, # start iteration, can be larger than 0 if the fine-tuning is resumed
end_iteration=20) # end iteration
learner = TargetRepresentationLearner(
input_files=[
r'path\to\xml\file', # file containing the revision of the general Wikipedia article
r'path\to\xml\file'], # file containing the revisions of the SDG-specific Wikipedia articles
flags=[True, # flag indicating that the first file contains a revision of the general Wikipedia article
False]) # flag indicating that the second file contains revisions of the SDG-specific Wikipedia articles
learner.train_network(k=17, # number of examples to be sampled by target in the fine-tuning set
base_model_dir=r'path\to\model\files', # directory containing the pre-trained model files
output_dir=r'path\to\last\checkpoint\file', # directory containing the last checkpoint
iterations=20, # total number of fine-tuning iterations
start_iteration=0, # start iteration, can be larger than 0 if the fine-tuning is resumed
end_iteration=10) # end iteration
| gjorgjevik/embed4sd | embed4sd/learners.py | learners.py | py | 10,050 | python | en | code | 0 | github-code | 13 |
42151242865 | from CVariable import CReadVariable,CWriteVariable
from Vector import *
#/==========================================================================
#*!
# @brief t_AssetVersionOne
#/
class t_AssetVersionOne:
#/==========================================================================
#*!
# @brief Member
#/
m_mId = 0 #U32
m_path = "" #std::string
m_iVersion = 0 #S32
m_uFlag = 0 #U32
m_width = 0 #U16
m_height = 0 #U16
m_uMd5a = 0 #U64
m_uMd5b = 0 #U64
#/==========================================================================
#*!
# @brief Constructor
#/
def __init__(self):
self.clear()
#/==========================================================================
#*!
# @brief Accessor
#/
def clear(self):
self.m_mId = 0
self.m_path = ""
self.m_iVersion = 0
self.m_uFlag = 0
self.m_width = 0
self.m_height = 0
self.m_uMd5a = 0
self.m_uMd5b = 0
def read(self,cVariable):
self.m_mId = cVariable.getU32()
self.m_path = cVariable.getString(255)
self.m_iVersion = cVariable.getS32()
self.m_uFlag = cVariable.getU32()
self.m_width = cVariable.getU16()
self.m_height = cVariable.getU16()
self.m_uMd5a = cVariable.getU64()
self.m_uMd5b = cVariable.getU64()
return True
def write(self,cVariable):
cVariable.putU32(self.m_mId)
cVariable.putString(self.m_path,255)
cVariable.putS32(self.m_iVersion)
cVariable.putU32(self.m_uFlag)
cVariable.putU16(self.m_width)
cVariable.putU16(self.m_height)
cVariable.putU64(self.m_uMd5a)
cVariable.putU64(self.m_uMd5b)
return True
| 3Dsamples/MakeHuman-unity | Assets/MakeHuman/Icons/KsSoft/Editor/Multilingual/tools/protocol/t_AssetVersionOne.py | t_AssetVersionOne.py | py | 1,543 | python | en | code | 2 | github-code | 13 |
18770809269 | # 외벽 점검
# N : dist 길이
# 시간복잡도: O(N!)
import itertools
def solution(n: int, weak: list, dist: list) -> int:
dist_len = len(dist)
weak = weak + [w + n for w in weak]
len_weak = len(weak)
for number_of_permutation in range(1, dist_len + 1):
# 외벽 검사할 친구들 뽑기
for friends in itertools.permutations(dist, number_of_permutation):
# 외벽 검사를 시작할 위치 뽑기
for start_count in range(len(weak) // 2):
inspected_wall = set()
# 친구 목록 가져오기
for f in friends:
f_start = weak[start_count]
# 친구 시작 위치부터 검사할 수 있는 위치까지 검사
while start_count < len_weak and f_start <= weak[start_count] <= f_start + f:
inspected_wall.add(weak[start_count] % n)
start_count += 1
# start_count >= len_weak 일시 마지막 위치를 넘어간것이므로 통과
if start_count >= len_weak:
break
# 검사한 외벽이 모든 weak 를 커버하면 반환
if len(inspected_wall) == len_weak // 2:
return number_of_permutation
# 모든 외벽 검사 결과가 실패할 경우
return -1
| galug/2023-algorithm-study | level_3/outside_wall_inspection.py | outside_wall_inspection.py | py | 1,396 | python | ko | code | null | github-code | 13 |
23026709382 | # -*- coding: utf-8 -*-
# @Author: IBNBlank
# @Date: 2019-01-20 19:32:03
# @Last Modified by: IBNBlank
# @Last Modified time: 2019-01-20 23:02:20
import cv2 as cv
gray_path = "..\\example\\image\\lena256.bmp"
gray = cv.imread(gray_path, cv.IMREAD_UNCHANGED)
color_path = "..\\example\\image\\lenacolor.png"
color = cv.imread(color_path, cv.IMREAD_UNCHANGED)
### gray image
gray_pixel = gray[100, 100]
print(gray_pixel)
### color image
# blue
blue_pixel = color[100, 100, 0]
print(blue_pixel)
# green
green_pixel = color[100, 100, 1]
print(green_pixel)
# red
red_pixel = color[100, 100, 2]
print(red_pixel)
# all
one_pixel = color[100, 100]
print(one_pixel) | IBNBlank/toy_code | OpenCV-Repository-master/02.图像处理基础/my_code/01.read_pixels.py | 01.read_pixels.py | py | 665 | python | en | code | 0 | github-code | 13 |
41229412144 | class Solution:
def my_sol(self, dividend: int, divisor: int) -> int:
# time limit excceded
if dividend == 0:
return 0
isPositive = True
if dividend < 0:
isPositive = not isPositive
dividend = abs(dividend)
if divisor < 0:
isPositive = not isPositive
divisor = abs(divisor)
count = 0
while dividend >= divisor:
dividend -= divisor
count += 1
return count if isPositive else -count
def sol1(self, dividend, divisor):
positive = (dividend < 0) is (divisor < 0)
dividend, divisor = abs(dividend), abs(divisor)
res = 0
while dividend >= divisor:
temp, i = divisor, 1
while dividend >= temp:
print(temp)
dividend -= temp
res += i
i <<= 1
temp <<= 1
print(f"res: {i}")
print(f"dividend: {dividend}")
print("_________________")
if not positive:
res = -res
return min(max(-2147483648, res), 2147483647) | devpotatopotato/devpotatopotato-LeetCode-Solutions | Solutions/29.py | 29.py | py | 1,186 | python | en | code | 0 | github-code | 13 |
40173140703 | import random
def create(width, heigth):
sideA = random.randint(0, width)
sideB = random.randint(0, heigth)
field = [sideA, sideB]
return field
def paint(pen, field):
sideA = field[0]
sideB = field[1]
pen.up()
pen.goto(sideA / 2 * (-1), sideB / 2 * (-1))
pen.down()
for _ in range(4):
if _ % 2 != 0:
pen.forward(sideB)
else:
pen.forward(sideA)
pen.left(90)
pen.up()
pen.goto(0, 0) | incente/LearningPython | Projects/Field helper/create_field.py | create_field.py | py | 491 | python | en | code | 0 | github-code | 13 |
35647056999 | from apscheduler.schedulers.asyncio import AsyncIOScheduler
from requests_cache import CachedSession
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from tzlocal import get_localzone
from app.configs import (
DATABASE_URI,
IPGEO_CACHE,
OPENWEATHER_CACHE,
SCHEDULER_JOBS_STORE,
)
db = create_engine(DATABASE_URI)
Session = sessionmaker(db)
ipgeo_request = CachedSession("ipgeolocation_cache", IPGEO_CACHE, expire_after=86400)
openweather_request = CachedSession(
"openweathermap_cache", OPENWEATHER_CACHE, expire_after=300
)
scheduler = AsyncIOScheduler(
timezone=get_localzone(),
jobstores={"default": SCHEDULER_JOBS_STORE},
replace_jobs=True,
)
| avillia/tg-weather-bot | app/configs/extensions.py | extensions.py | py | 711 | python | en | code | 1 | github-code | 13 |
24126515880 | # setting up the main window or using qcheckbox widgets
import sys
from PyQt6.QtWidgets import QApplication, QWidget, QCheckBox, QLabel
from PyQt6.QtCore import Qt
class MainWindow(QWidget):
def __init__(self) -> None:
super().__init__()
self.initializeUI()
def initializeUI(self):
self.setGeometry(100, 100, 250, 150)
self.setWindowTitle('qcheckbox')
self.setUpMainWindow()
self.show()
if __name__ == '__main__':
app = QApplication()
window = MainWindow()
sys.exit(app.exec())
| jonasht/beginning_pyQt_book | 3-addingMoreFunctionalityWithWidgets/7.py | 7.py | py | 560 | python | en | code | 0 | github-code | 13 |
1596988578 | """
@author: Matheus José Oliveira dos Santos
Last Edit: 26/05/2023
"""
import pandas as pd
import urllib.parse
import psycopg2
# ex:
# import os
# variable_value = os.getenv('VARIABLE_NAME')
class DB_interface:
def __init__(self,db_name) -> None:
print('connecting in: '+db_name)
self.db = psycopg2.connect(
host="localhost",
port=5432,
database=db_name,
user="Matheus",
password="password",
)
self.cursor = self.db.cursor()
return
def __enter__(self):
pass
def __exit__(self,exc_type, exc_value, exc_traceback):
self.close_db()
def add_row(self,table_name:str, *args) -> None:
if type(args[0]) == list:
aux_values = ','.join(["'"+str(i)+"'" for i in args[0]])
else:
aux_values = ','.join(["'"+str(i)+"'" for i in args])
command = "INSERT INTO {0} VALUES ({1});".format(table_name,aux_values)
#print(command)
self.cursor.execute(command)
def replace_table(self,table_name:str,df:pd.DataFrame, should_print = False) -> None:
self.delete_all_rows_of_table(table_name)
self.append_table_fast(table_name,df)
#for linha in range(0, df.shape[0]):
# aux = df.loc[linha, :].values.tolist()
# if should_print == True:
# print(aux)
# self.add_row(table_name,aux)
def replace_table_slow(self,table_name:str,df:pd.DataFrame, should_print = False) -> None:
self.delete_all_rows_of_table(table_name)
for linha in range(0, df.shape[0]):
aux = df.loc[linha, :].values.tolist()
if should_print == True:
print(aux)
self.add_row(table_name,aux)
def append_table(self,table_name:str,df:pd.DataFrame, should_print = False) -> None:
for linha in range(0, len(df)):
aux = df.loc[linha, :].values.tolist()
try:
self.add_row(table_name,aux)
except Exception as e:
print(e)
if should_print == True:
print(aux)
def append_table_fast(self,table_name:str,df:pd.DataFrame) -> None:
divs = [df[i:i + 1000] for i in range(0, len(df), 1000)]
i=0
for div in divs:
sql_query = f"INSERT INTO {table_name} ({', '.join(div.columns)}) VALUES "
sql_query += ', '.join(['(' + ', '.join([f"'{str(val)}'" if pd.notna(val) else 'NULL' for val in row]) + ')' for row in div.values])
i=i+1
print(i)
self.cursor.execute(sql_query)
def delete_all_rows_of_table(self,table_name:str) -> None:
command = "DELETE FROM {0}".format(table_name)
self.cursor.execute(command)
def get_table(self,table_name:str, company_name = None) -> pd.DataFrame:
if company_name == None:
command = "SELECT * FROM {0}".format(table_name)
else:
command = "SELECT * FROM {0} where BBGTicker = '{1}'".format(table_name,company_name)
df_return = pd.read_sql_query(command, self.db)
return df_return
def read_by_command(self, command:str) -> pd.DataFrame:
return pd.read_sql_query(command, self.db)
def execute_command(self, command:str) -> None:
self.cursor.execute(command)
def close_db(self):
self.db.close()
print('DB Closed')
def save_db(self):
self.db.commit()
print('DB Saved') | maj-oliveira/quant-finance-strategy | src/db_interface.py | db_interface.py | py | 3,528 | python | en | code | 0 | github-code | 13 |
25247344836 | from flask_restful import Resource
from flask import request
from bson import ObjectId
from dao.gameInstance import get_game_instance , make_move
from dao.user import get_user_by_id
from dao.move import make_move_entry
from validators.move import validate_move_obj , validate_move, get_winner ,check_status
from utils.constants import final_status
from dao.session import get_session
from views.gameInstance import single
class Move(Resource):
def get(self):
params = request.args.to_dict()
game_obj_id = ObjectId(params["gi_id"])
return single(get_game_instance(game_obj_id))
def post(self):
payload = request.json
#if not validate_move_obj(payload):
#return {"response" : "Bad request"}, 401
game_obj_id = ObjectId(payload['gi_id'])
curr_player = ObjectId(payload['curr_player'])
next_player = ObjectId(payload['next_player'])
proposed_state = payload['cstate']
user_token = payload['token']
if not (get_user_by_id(next_player) or get_user_by_id(curr_player)) :
return {"response" : "User not found"}, 404
user_session = get_session(user_token)
if not user_session :
return {"response" : "You Have To LogIn Again"}, 401
if not (user_session["user"] == curr_player):
return {"response" : "Bad request"}, 401
game_instance = get_game_instance(game_obj_id)
if not (curr_player == game_instance["next_player"]):
return {"response" : "Not Your Turn"}, 401
if not game_instance :
return {"response" : "GameInstance not found"}, 404
if not (next_player == game_instance['user1'] or next_player == game_instance['user2']) :
return {"response" : "Bad request"}, 401 # ******
if not validate_move(proposed_state,next_player,game_instance) :
return {"response" : "Invalid Move"}, 401
make_move_entry(game_obj_id,next_player,proposed_state)
winner = get_winner(proposed_state,game_instance['user1'],game_instance['user2'])
if not winner:
make_move(game_obj_id,proposed_state,next_player,check_status(proposed_state))
else:
make_move(game_obj_id,proposed_state,next_player,final_status,winner)
return single(get_game_instance(game_obj_id))
| mukeshbhakuni/messenger | tictactoe/gameservice/business_logic/serviceapis/move.py | move.py | py | 2,397 | python | en | code | 0 | github-code | 13 |
4500857112 | import tkinter as tk
# MÓDULO PARA AÑADIR ELEMENTOS A LA INTERFAZ
from tkinter import ttk
from tkinter import OptionMenu
from tkinter import StringVar
from tkinter import Text
from tkinter import messagebox
from interfaz_grafica2 import mostrar_mensaje
def insertar_producto():
producto = input_producto.get()
texto = area_texto.get("1.0",'end')
radio_b=seleccion_radio.get()
tipo=texto_desplegable.get()
print(producto)
print(texto.strip())
print(tipo)
print(radio_b)
cadena = "Nombre del producto: " + producto + "\n" + "Descripción: " + texto.strip() + "\n" + "Tipo de producto: " + tipo + \
"\n" + "Departamento: " + radio_b
mostrar_mensaje(1,cadena)
#Por hacer: insertar los datos en un fichero
def borrar_datos():
#Borrar Producto
input_producto.delete(0, 'end')
#Borrar área de texto
area_texto.delete(1.0, 'end')
#Borrar Lista desplegable
# --> POR HACER
#Borrar radio button
# --> POR HACER
##############################################################################################################################
# CREA LA VENTANA
ventana = tk.Tk()
# AÑADE TÍTULO Y DIMENSIONES
ventana.title("AÑADIR PRODUCTO")
#ventana.config(width=450, height=350)
ventana.geometry("450x450")
##############################################################################################################################
# Etiqueta y campo de texto - Nombre del producto
etiqueta_nombre_p = ttk.Label(text="Nombre del producto: ")
etiqueta_nombre_p.place(x=20, y=20)
input_producto = ttk.Entry()
input_producto.place(x=145, y=20, width=180)
##############################################################################################################################
# LISTA DESPLEGABLE
etiqueta_tipo_p = ttk.Label(text="Tipo de producto: ")
etiqueta_tipo_p.place(x=20, y=60)
tipos = ["Almacenable","Consumible","Servicio"]
# Texto que aparecerá en la lista desplegable
texto_desplegable = StringVar()
# AÑADIMOS EL TEXTO INICIAL
texto_desplegable.set("Selecciona tipo")
# CREAMOS Y COLOCAMOS LA LISTA DESPLEGABLE
menu_tipos = OptionMenu(ventana, texto_desplegable, *tipos)
# COLOCAMOS LA LISTA EN EL MENÚ CON .PLACE
menu_tipos.place(x=145, y=60, width=200)
##############################################################################################################################
# ÁREA DE TEXTO
etiqueta_descripción_p = ttk.Label(text="Descripción del producto: ")
etiqueta_descripción_p.place(x=20, y=100)
area_texto = Text(ventana, height = 5, width = 52, bg="light yellow")
area_texto.place(x=20, y=120)
###############################################################
# RADIO BUTTON
etiqueta_radio_p = ttk.Label(text="Área al que pertenece el producto: ")
etiqueta_radio_p.place(x=20, y=220)
seleccion_radio = tk.StringVar()
r1 = ttk.Radiobutton(ventana, text='Compras', value='Compras', variable=seleccion_radio)
r2 = ttk.Radiobutton(ventana, text='Ventas', value='Ventas', variable=seleccion_radio)
r3 = ttk.Radiobutton(ventana, text='Ambas', value='Compras/Ventas', variable=seleccion_radio)
r1.place(x=20, y=240)
r2.place(x=20, y=260)
r3.place(x=20, y=280)
###############################################################
boton_insertar_p = ttk.Button(text="Añadir producto", command=insertar_producto)
boton_insertar_p.place(x=300, y=300)
###############################################################
boton_insertar_p = ttk.Button(text="Borrar datos", command=borrar_datos)
boton_insertar_p.place(x=300, y=340)
###############################################################
# CARGA LA VENTANA
ventana.mainloop() | zengotita/SGE-Ejemplos | Tkinter/productos.py | productos.py | py | 3,760 | python | es | code | 0 | github-code | 13 |
71068117138 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 14:49:33 2018
@author: pwfa-facet2
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
#import pyzdde.arraytrace as at
import pyzdde.zdde as pyz
import random as rand
def beamline_matrix(d, c_x, c_y, rot_angle):
c_x, c_y, rot_angle = np.deg2rad(c_x), np.deg2rad(c_y), np.deg2rad(rot_angle)
drift = np.matrix([[d,0], [0,d]])
scaling = np.matrix([[2*np.cos(c_x), 0], [0,2*np.cos(c_y)]])
rot = np.matrix([[np.cos(rot_angle), -np.sin(rot_angle)], [np.sin(rot_angle), np.cos(rot_angle)]])
return(drift*scaling*rot)
file = r"C:\Users\pwfa-facet2\Desktop\slacecodes\raytracing\ml.zmx"
def config_simulation(file, chief_angle1_x,chief_angle1_y, chief_angle1_z):
link = pyz.createLink()
link.zLoadFile(file)
link.zSetWave(1,.800, 1)
setfile = link.zGetFile().lower().replace('.zmx', '.CFG')
S_512 = 5
grid_size = 20
GAUSS_WAIST, WAIST_X, WAIST_Y, DECENTER_X, DECENTER_Y = 0, 1, 2, 3, 4
beam_waist, x_off, y_off = 5, 0, 0
cfgfile = link.zSetPOPSettings('irr', setfile, startSurf=2, endSurf=2, field=1,
wave=1, beamType=GAUSS_WAIST, paramN=( (WAIST_X, WAIST_Y, DECENTER_X, DECENTER_Y), (beam_waist, beam_waist, x_off, y_off) ),
sampx=S_512, sampy=S_512, widex=grid_size, widey=grid_size, tPow=1, auto=0, ignPol=1)
link.zModifyPOPSettings(cfgfile, endSurf=26)
link.zModifyPOPSettings(cfgfile, paramN=( (1, 2, 3, 4), (5, 5,
0, 0) ))
link.zModifyPOPSettings(cfgfile, widex=grid_size)
link.zModifyPOPSettings(cfgfile, widey=grid_size)
link.zModifyPOPSettings(cfgfile, ignPol=1)
#1 to ignore pol;0 to use
link.zSaveFile(file)
link.zSetSurfaceParameter(3,3, chief_angle1_x)
link.zSetSurfaceParameter(3,4, chief_angle1_y)
link.zSetSurfaceParameter(3,5, chief_angle1_z)
link.zSetSurfaceParameter(9,3, chief_angle1_x)
link.zSetSurfaceParameter(9,4, chief_angle1_y)
link.zSetSurfaceParameter(9,5 , chief_angle1_z)
#fix lens decentering too
link.zSetSurfaceParameter(16,1, 0)#decenter x,y : 1,2
link.zSetSurfaceParameter(21,1, 0)
link.zSetSurfaceParameter(16,2, 0)#decenter x,y : 1,2
link.zSetSurfaceParameter(21,2, 0)
link.zSetSurfaceParameter(17,1, 0)#decenter x,y : 1,2
link.zSetSurfaceParameter(20,1, 0)
link.zSetSurfaceParameter(17,2, 0)#decenter x,y : 1,2
link.zSetSurfaceParameter(20,2, 0)
#link.zSetSurfaceParameter(3,5, chief_angle1_z)
link.zSaveFile(file)
#var
link.zSetSurfaceParameter(4, 3, 0) #3 = x-tilt, 4=y-tilt
link.zSetSurfaceParameter(4, 4, 0)
link.zSetSurfaceParameter(4, 5, 0)
link.zSetSurfaceParameter(8, 3, 0) #3 = x-tilt, 4=y-tilt
link.zSetSurfaceParameter(8, 4, 0)
link.zSetSurfaceParameter(8, 5, 0)
#####
#fix
link.zSetSurfaceParameter(5, 3, 0) #3 = x-tilt, 4=y-tilt
link.zSetSurfaceParameter(5, 4, 0)
link.zSetSurfaceParameter(5, 5, 0)
link.zSetSurfaceParameter(7, 3, 0) #3 = x-tilt, 4=y-tilt
link.zSetSurfaceParameter(7, 4, 0)
link.zSetSurfaceParameter(7, 5, 0)
link.zSaveFile(file)
n_ccd1_offsetx = link.zOperandValue('POPD', 26, 1, 0, 11)
n_ccd1_offsety = link.zOperandValue('POPD', 26, 1, 0, 12)
print(n_ccd1_offsetx, n_ccd1_offsety)
img_str = str(r'C:\Users\pwfa-facet2\Desktop\slacecodes\raytracing\img-norm.csv')
print(img_str)
link.zGetTextFile(textFileName=img_str, analysisType='Pop')
pyz.closeLink()
print('config set for testing!')
def algo_var(file, low_angle, high_angle):
link = pyz.createLink()
link.zLoadFile(file)
alpha1_x = np.random.uniform(low_angle, high_angle)
alpha1_y = np.random.uniform(low_angle, high_angle)
#insert variations
link.zSetSurfaceParameter(4, 3, alpha1_x) #3 = x-tilt, 4=y-tilt
link.zSetSurfaceParameter(4, 4, alpha1_y)
link.zSetSurfaceParameter(4, 5, 0)
link.zSetSurfaceParameter(8, 3, -alpha1_x) #3 = x-tilt, 4=y-tilt
link.zSetSurfaceParameter(8, 4, -alpha1_y)
link.zSetSurfaceParameter(8, 5, 0)
link.zSaveFile(file)
# print("random input variations:",alpha1_x, alpha1_y, alpha2_x, alpha2_y)
#print('config set for fixing!')
img_str = str(r'C:\Users\pwfa-facet2\Desktop\slacecodes\raytracing\varinput-norm.csv')
print(img_str)
link.zGetTextFile(textFileName=img_str, analysisType='Pop')
pyz.closeLink()
return(alpha1_x, alpha1_y)
config_simulation(file,45,0,0)
algo_var(file, 0,0.5)
def lens_mirror_beamline(d1,d2,f, cx, cy, rot_ang):
cx , cy, rot_ang = np.deg2rad(cx), np.deg2rad(cy), np.deg2rad(rot_ang)
d_1 = np.matrix([ [d1, 0], [0,d1] ])
d_2 = np.matrix([ [d2, 0], [0,d2] ])
lens = np.matrix([ [-1/f, 0], [0,-1/f] ])
axis = np.matrix([ [2*np.cos(cx), 0], [0, 2*np.cos(cy)] ])
rot = np.matrix([ [np.cos(rot_ang), -np.sin(rot_ang)], [np.sin(rot_ang), np.cos(rot_ang)] ])
result_lens = np.linalg.multi_dot([d_2, lens, d_1, axis, rot])
#print("lens contribution:",result_lens)
## standard beamline
mirror_scale = np.matrix([ [d1+d2, 0], [0, d1+d2] ])
result_mirror = np.linalg.multi_dot([mirror_scale, axis, rot])
#print("mirror contribution:",result_mirror)
result = result_lens + result_mirror
#print("total beamline:",result)
return result
def lens_no_errors_matrix(f, d1, d2, chiefx, chiefy, theta_rot, varx1, vary1):
chiefx, chiefy, theta_rot = np.deg2rad(chiefx), np.deg2rad(chiefy), np.deg2rad(theta_rot)
varx1, vary1 = np.deg2rad(varx1),np.deg2rad(vary1)
drift1 = np.matrix([[1,0,d1,0,0],
[0,1,0,d1,0],
[0,0,1,0,0],
[0,0,0,1,0],
[0,0,0,0,1]])
drift2 = np.matrix([[1,0,d2,0,0],
[0,1,0,d2,0],
[0,0,1,0,0],
[0,0,0,1,0],
[0,0,0,0,1]])
scaling = np.matrix([[1,0,0,0,0],
[0,1,0,0,0],
[0,0,np.cos(chiefx), 0,0],
[0,0,0,np.cos(chiefy),0],
[0,0,0,0,1]])
rot1 = np.matrix([[np.cos(theta_rot), -np.sin(theta_rot), 0,0,0],
[np.sin(theta_rot), np.cos(theta_rot),0,0,0],
[0,0,np.cos(theta_rot), -np.sin(theta_rot),0],
[0,0,np.sin(theta_rot), np.cos(theta_rot), 0],
[0,0,0,0,1]])
lens = np.matrix([[1,0,0,0,0],
[0,1,0,0,0],
[-1/f, 0,1,0,0],
[0,-1/f,0,1,0],
[0,0,0,0,1]])
t_mirror = np.matrix([[1,0,0,0,0],
[0,1,0,0,0],
[0,0,1,0,np.tan(2*varx1)],
[0,0,0,1,np.tan(2*vary1)],
[0,0,0,0,1]])
return(drift2*lens*drift1*scaling*rot1*t_mirror)
def algo_fix(file):
link = pyz.createLink()
link.zLoadFile(file)
status = 'not done'
#model for variation extraction
fmethod = beamline_matrix(400,45,0,90)#lens_mirror_beamline(200,200,200,45,0,90)
#execture first adjusment
it = 1
print("current iteration:", it)
#obtain current beam position at 1f point
offset_x = link.zOperandValue('POPD', 26, 1, 0, 11)
offset_y = link.zOperandValue('POPD', 26, 1, 0, 12)
curr_off_vec = np.matrix([ [offset_x], [offset_y] ])
print('current beam position:')
print(np.transpose(curr_off_vec))
#extract intiial variations
finv = np.linalg.inv(fmethod)
curr_var_vec = np.rad2deg(np.matmul(finv, curr_off_vec))
print("current variation vector:")
print(np.transpose(curr_var_vec))
corr_x = curr_var_vec.item(0)
corr_y = curr_var_vec.item(1)
#enact corrections
link.zSetSurfaceParameter(5, 3, -corr_x) #3 = x-tilt, 4=y-tilt
link.zSetSurfaceParameter(5, 4, -corr_y)
link.zSetSurfaceParameter(7, 3, corr_x)
link.zSetSurfaceParameter(7, 3, corr_y)
link.zSaveFile(file)
while status != 'done':
i = 1
#check corrections
offset_after_x = link.zOperandValue('POPD', 26, 1, 0, 11)
offset_after_y = link.zOperandValue('POPD', 26, 1, 0, 12)
after_vec = np.matrix([ [offset_after_x], [offset_after_y] ])
print("after correction beam position:")
print(np.transpose(after_vec))
diff_x = offset_after_x - 0; #not always going to be the origin
diff_y = offset_after_y -0; #not always going to be the origin
if (diff_x < 0.0001) and (diff_y <0.0001):
status = 'done'
else:
#make further corrections
after_var_vec = np.rad2deg(np.matmul(finv, after_vec))
print("after variation vector:")
print(np.transpose(after_var_vec))
a_corr_x = curr_var_vec.item(0)
a_corr_y = curr_var_vec.item(1)
#enact corrections
new_corr_x = corr_x + a_corr_x
new_corr_y = corr_y + a_corr_y
link.zSetSurfaceParameter(5, 3, -new_corr_x) #3 = x-tilt, 4=y-tilt
link.zSetSurfaceParameter(5, 4, -new_corr_y)
link.zSetSurfaceParameter(7, 3, new_corr_x)
link.zSetSurfaceParameter(7, 3, new_corr_y)
link.zSaveFile(file)
print('current it whileloop:',i)
i=i+1
#check current variations
algo_fix(file)
| eseguraca6/slacecodes | raytracing/lensmirrornodecenter.py | lensmirrornodecenter.py | py | 9,953 | python | en | code | 2 | github-code | 13 |
73570781776 | def add_twos(target):
count = 0
pile = 0
while pile < target and pile + 2 <= target:
pile = pile + 2
count = count + 1
return pile
def solve_case():
n = int(input())
weights = sorted([int(c) for c in input().split()])
two_amount = sum(list(filter(lambda x: x == 2, weights)))
one_amount = sum(list(filter(lambda x: x == 1, weights)))
total = sum(weights)
# If you can't divide into a whole number, it's obviously impossible
if total % 2 != 0:
print("NO")
return
# otherwise...
target = total / 2
pile = 0
# Add as many twos as you can
if two_amount > target:
pile = add_twos(target)
else:
pile = two_amount
if one_amount >= target-pile:
print("YES")
else:
print("NO")
t = int(input())
for i in range(0, t):
solve_case() | JDSeiler/programming-problems | codeforces/round-693/b-candies.py | b-candies.py | py | 876 | python | en | code | 0 | github-code | 13 |
16987145781 | import unittest
from RefactoringKata.VideoRental.VideoRental import Customer, Rental, Movie
class Test_VideoRental(unittest.TestCase):
def test_should_when(self):
customer = Customer("John")
movie = Movie("Fantasia", Movie.Children)
rental = Rental(movie, 1)
customer.add_rental(rental)
actual = customer.statement()
self.assertEqual(actual, "Rental Record for John\n\tFantasia\t1.5\nAmount owed is 1.5\nYou earned 1 frequent renter points")
if __name__ == "__main__":
unittest.main() | AAFINSYS/CleanerCodeInPython | RefactoringKata/VideoRental/test_videoRental.py | test_videoRental.py | py | 546 | python | en | code | 0 | github-code | 13 |
2864484188 | import httplib
import os
import mock
import stubout
import webtest
from google.apputils import app
from google.apputils import resources
from google.apputils import basetest
from simian import settings
from simian.mac import models
from simian.mac.admin import main as gae_main
from simian.mac.admin import xsrf
from simian.mac.common import auth
from tests.simian.mac.common import test
PLIST_FILE = 'simian/mac/common/testdata/testpackage.plist'
def GetTestData(rel_path):
path = os.path.dirname(os.path.realpath(__file__))
while os.path.basename(path) != 'tests':
path = os.path.dirname(path)
with open(os.path.join(path, rel_path)) as f:
return f.read()
@mock.patch.object(auth, 'IsGroupMember', return_value=True)
@mock.patch.object(xsrf, 'XsrfTokenValidate', return_value=True)
class UploadIconModuleTest(test.AppengineTest):
def setUp(self):
super(UploadIconModuleTest, self).setUp()
self.testapp = webtest.TestApp(gae_main.app)
self.plist = GetTestData(PLIST_FILE)
def testGCSBucketNotSet(self, *_):
resp = self.testapp.post(
'/admin/upload_icon/filename', status=httplib.NOT_FOUND)
self.assertIn('GCS bucket is not set', resp.body)
def testSuccess(self, *_):
settings.ICONS_GCS_BUCKET = 'test'
filename = 'testpackage.dmg'
munki_name = 'testpackage'
models.PackageInfo(
key_name=filename, filename=filename,
name=munki_name, _plist=self.plist).put()
content = 'ICON_CONTETN'
resp = self.testapp.post(
'/admin/upload_icon/%s' % filename,
upload_files=[('icon', '1.png', content)], status=httplib.FOUND)
self.assertTrue(
resp.headers['Location'].endswith('/admin/package/%s' % filename))
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
| googlearchive/simian | src/tests/simian/mac/admin/upload_icon_test.py | upload_icon_test.py | py | 1,811 | python | en | code | 334 | github-code | 13 |
2850641455 | from ...abstasks.AbsTaskRetrieval import AbsTaskRetrieval
from ...abstasks.BeIRPLTask import BeIRPLTask
class FiQAPLRetrieval(AbsTaskRetrieval, BeIRPLTask):
@property
def description(self):
return {
"name": "FiQA-PL",
"beir_name": "fiqa-pl",
"description": "Financial Opinion Mining and Question Answering",
"reference": "https://sites.google.com/view/fiqa/",
"benchmark": "BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language",
"type": "Retrieval",
"category": "s2p",
"eval_splits": ["test"],
"eval_langs": ["pl"],
"main_score": "ndcg_at_10",
}
| embeddings-benchmark/mteb | mteb/tasks/Retrieval/FiQAPLRetrieval.py | FiQAPLRetrieval.py | py | 714 | python | en | code | 755 | github-code | 13 |
9069119938 | import requests
import os
from os import path
import preprocessor as pre
#import TF_IDF as tf_idf
list_path=[]
def getFile(p):
for element in os.listdir(p):
if('.' not in element):
getFile(p+'/'+element)#
else:
list_path.append(p+"/"+element)
return list_path
# getFile('topic')
# print(list_path[0].split('/')[1])
def create_dir_tree(dir,topic):
if(not path.exists(dir)):
os.mkdir(dir)
if(not path.exists(dir+'/'+topic)):
os.mkdir(dir+'/'+topic)
def fetch(p):
list_url_file = getFile(p) # get file which is have url
list_path = []
for urls_file in list_url_file:
f = open(urls_file,'r',encoding='utf-8') # open file to get url and add to list urls
urls = f.read().split('\n') # beacause f.read() return class string so i have to use split to change to list
sub_path = urls_file.split('/') # get structure of dir examble 0: root 1:topic 2:file name
create_dir_tree('result',sub_path[1])
for i in range(len(urls)):
try: #
resp = requests.get(urls[i])
if resp.ok:
result_path = 'result/'+sub_path[1]+'/'+sub_path[2].split('.')[0]+'_'+str(i)+'.txt'
resf = open(result_path,'w',encoding='utf-8')
resf.write(resp.text)
except:
print('cant not fetch to ',urls[i])
print("*"*100)
def preprocessor(): # reprocessor data from fetch
for file in getFile('result'):
pre.preprocessor(file)
def run(p):
fetch(p)
preprocessor()
| nvtuehcmus/datamining | crawl_from_files.py | crawl_from_files.py | py | 1,649 | python | en | code | 0 | github-code | 13 |
48031287064 | # Random Modules
import random
for i in range(3):
random.random()
print(random.random())
# ==========================
for i in range(3):
print(random.randint(10, 20))
# ==========================
members = ['John', 'Merry', 'Bob', 'Mars']
leader = random.choice(members)
print (leader)
# ==========================
class Dice:
def roll(self):
first = random.randint(1, 6)
second = random.randint(1, 6)
return first, second
dice = Dice()
print(dice.roll()) | artemkiryu/Trunk_Repo | generatingRandomValues.py | generatingRandomValues.py | py | 534 | python | en | code | 1 | github-code | 13 |
32397892072 | """
Defineert de class ZAL
"""
from __future__ import annotations
from typing import AnyStr, Optional, Mapping, Iterator, Tuple
import dataclasses
from lxml import etree
from . import xml_utils
__all__ = ['Gegevensdienst', 'Zorgaanbieder', 'ZAL']
@dataclasses.dataclass(frozen=True)
class Gegevensdienst:
# pylint: disable=too-few-public-methods
"""
Een gegevensdienst uit de ZAL zoals beschreven op
https://afsprakenstelsel.medmij.nl/"""
id: str
authorization_endpoint_uri: str
token_endpoint_uri: str
def __repr__(self) -> str:
return f"<Gegevensdienst {self.id!r}>"
@dataclasses.dataclass(frozen=True)
class Zorgaanbieder:
# pylint: disable=too-few-public-methods
"""
Een zorgaanbieder uit de ZAL zoals beschreven op
https://afsprakenstelsel.medmij.nl/"""
naam: str
gegevensdiensten: Mapping[str, Gegevensdienst]
def __repr__(self) -> str:
return f"<Zorgaanbieder {self.naam!r}>"
class ZAL(Mapping[str, Zorgaanbieder]):
"""
Een zorgaanbiederslijst zoals beschreven op
https://afsprakenstelsel.medmij.nl/
>>> import medmij.tests.testdata
>>> zal = ZAL(medmij.tests.testdata.ZAL_EXAMPLE_XML)
>>> za = zal["umcharderwijk@medmij"]
>>> za
<Zorgaanbieder 'umcharderwijk@medmij'>
>>> za.gegevensdiensten["4"]
<Gegevensdienst '4'>
"""
NS = "xmlns://afsprakenstelsel.medmij.nl/zorgaanbiederslijst/release2/"
_parser: Optional[etree.XMLParser] = None
_zorgaanbieders: Mapping[str, Zorgaanbieder]
@classmethod
def _get_xsd_parser(cls) -> etree.XMLParser:
if cls._parser is None:
cls._parser = xml_utils.xsd_parser_from_resource("zal.xsd")
return cls._parser
def __init__(self, xmldata: AnyStr) -> None:
parser = self._get_xsd_parser()
xml = etree.fromstring(xmldata, parser=parser)
self._zorgaanbieders = self._parse(xml)
@staticmethod
def _parse(xml: etree.Element) -> Mapping[str, Zorgaanbieder]:
nss = {'z': ZAL.NS}
def gegevensdienst(node: etree.Element) -> Tuple[str, Gegevensdienst]:
token_endpoint_uri = node.xpath(
'.//z:TokenEndpointuri', namespaces=nss)[0].text
authorization_endpoint_uri = node.xpath(
'.//z:AuthorizationEndpointuri', namespaces=nss)[0].text
id_ = node.find('z:GegevensdienstId', namespaces=nss).text
return id_, Gegevensdienst(
id=id_,
token_endpoint_uri=token_endpoint_uri,
authorization_endpoint_uri=authorization_endpoint_uri,
)
def zorgaanbieder(node: etree.Element) -> Tuple[str, Zorgaanbieder]:
naam = node.find('z:Zorgaanbiedernaam', namespaces=nss).text
ggs = node.xpath('.//z:Gegevensdienst', namespaces=nss)
gegevensdiensten = dict(gegevensdienst(node) for node in ggs)
return naam, Zorgaanbieder(naam=naam,
gegevensdiensten=gegevensdiensten)
xpath = xml.xpath(f'//z:Zorgaanbieder', namespaces=nss)
return dict(zorgaanbieder(node) for node in xpath)
def __getitem__(self, key: str) -> Zorgaanbieder:
return self._zorgaanbieders[key]
def __iter__(self) -> Iterator:
return self._zorgaanbieders.__iter__()
def __len__(self) -> int:
return self._zorgaanbieders.__len__()
| Zorgdoc/medmij-python | medmij/zal.py | zal.py | py | 3,426 | python | nl | code | 0 | github-code | 13 |
20999522690 | import glob, os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import math
from shutil import copyfile
import datetime
import pickle
import csv
## IMAGE DISPLAY
def showImages(images, cols=None, rows=None, cmap=None):
if len(images) == 1:
showImage(images[0],cmap=cmap)
return
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(len(images))))
if rows is None:
rows = int(math.ceil(len(images) / cols))
if cols is None:
cols = int(math.ceil(len(images) / rows))
if type(images[0]) == type(""):
images = list(map(lambda image_path:cv2.imread(image_path),images))
i = 0
f, sub_plts = plt.subplots(rows, cols)
for r in range(rows):
for c in range(cols):
sub_plts[r, c].axis('off')
if i<len(images):
sub_plts[r,c].imshow(images[i],cmap=cmap)
i += 1
plt.show()
plt.close('all')
def showImage(image, cmap=None):
if type(image) == type(""):
image = cv2.imread(image)
plt.imshow(image, cmap=cmap)
plt.show()
plt.close('all')
def drawGrid(img,rows=10,cols=10):
img = img.copy()
h,w,d = img.shape
dh = h / rows
dw = w / cols
for r in range(rows):
for c in range(cols):
cv2.line(img, (0, int(dh*r)), (w,int(dh*r)), (255, 0, 0), 5) # horizontal
cv2.line(img, ( int(dw*c), 0), ( int(dw*c), h), (0, 255, 0), 5) # vertical
return img
## IMAGE MODIFICATION
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def cropImage(image,margins): # css style: top, right, bottom, left
h,w,d = image.shape
return image[margins[1]:w-margins[3], margins[0]:h-margins[2]]
def color_space(image, cspace=None):
if cspace == 'HSV':
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
return cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
return cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else:
return image.copy()
def normalizeImage(img):
img = img.copy()
if np.max(img) <= 1: # convert bitmask into image
img *= 255
if len(img.shape) == 2 or img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
## IMAGE LOAD
def saveImage(path, img, cspace = None):
if cspace is not None:
img = cv2.cvtColor(img, cspace)
cv2.imwrite(path,img)
pass
def loadImage(path, cspace = cv2.COLOR_BGR2RGB):
img = cv2.imread(path)
img = cv2.cvtColor(img, cspace)
return img
def loadImages(path, cspace = cv2.COLOR_BGR2RGB):
img_paths = glob.glob(path)
imgs = []
for img_path in img_paths:
img = loadImage(img_path,cspace=cspace)
imgs.append(img)
return imgs
## FS OPERATIONS
def replaceExtension(path, ext):
parts = path.split('.')
parts[-1] = ext
return ".".join(parts)
def filenameAppend(path, suffix):
parts = path.split(".")
ext = parts[-1]
base = ".".join(parts[:-1])+suffix+'.'+ext
return base
def filename(path):
parts = path.split('/')
if len(parts) > 0:
return parts[-1]
else:
return path
def copy(src,dst):
if not os.path.isfile(src):
return None
parts = dst.split('/')
os.makedirs("/".join(parts[:-1]), exist_ok=True)
return copyfile(src, dst)
def loadData(path):
if not os.path.exists(path):
return None
value=None
ext = path.split('.')[-1]
if ext == 'jpg':
value = cv2.imread(path)
elif ext == 'p':
with open(path, 'rb') as pfile:
value = pickle.load(pfile)
return value
def saveData(path,data):
print('saveData path', path)
print('saveData type', type(data))
ext = path.split('.')[-1]
if ext == 'jpg':
cv2.imwrite(path, data)
elif ext == 'p':
with open(path, 'wb') as pfile:
pickle.dump(data, pfile)
return True
def loadCSV(path, delimiter=',', quotes='"'):
if not os.path.exists(path):
return None
lines = []
with open(path, 'r') as csvfile:
csvreader = csv.reader(csvfile,delimiter=delimiter, quotechar=quotes)
lines = list(csvreader)
return lines
def makedirs(path):
os.makedirs(path, exist_ok=True)
## other
def standardDatetime():
return datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
def lastFile(pathFilter):
list_of_files = glob.glob(pathFilter)
latest_file = max(list_of_files, key=os.path.getctime)
return latest_file
## play sounds
'''
def play(path):
sound = pygame.mixer.Sound(path)
sound.play()
''' | cesare-montresor/deep-document-parser | utils.py | utils.py | py | 5,192 | python | en | code | 0 | github-code | 13 |
18025985935 | """
问题:根据每条边的权值,求出从起点s到其他每个顶点的最短路径和最短路径的长度。
说明:不考虑权值为负的情况,否则会出现负值圈问题。
s:起点
v:算法当前分析处理的顶点
u:与v邻接的顶点
d:从s到v的距离
d(u):从s到u的距离
e(v,u):顶点v到顶点u的边的权值
问题分析:
Dijkstra算法按阶段进行,同无权最短路径算法(先对距离为0的顶点处理,再对距离为1的顶点处理,以此类推)
一样,都是先找距离最小的。在每个阶段,Dijkstra算法选择一个顶点v,它在所有unknown顶点中具有最小的d(v)。
同时算法声明从s到v的最短路径是known的。阶段的其余部分为,对w的d(v)距离)和 prev(上一个顶点)更新工作
(当然也可能不更新)。
在算法的每个阶段,都是这样处理的:
1.在无权的情况下,若du =无穷 则置d(u)=d(v)+1
2.在有权的情况下,若du =无穷 则置d(u)=d(v)+e(v,u)
3.若d(u)!=无穷,开始分析:从顶点v到顶点u的路径,若能使得u的路径长比u原来的路径长短一点,那么就需要对
u进行更新,否则不对u更新。即满足d(v) + e(v,u) < d(u),就需要把d(u)的值更新为d(v) + e(v,u),同时顶点u
的prev值改成顶点v
"""
global edges
global vlist
global vset
class Vertex: # 顶点类
def __init__(self, vid, outlist):
self.vid = vid # 出边
self.outlist = outlist # 出边指向的顶点id的列表,也可以理解为邻接表(只存储索引值,不存储顶点对象)
self.known = False # 是否访问过
self.dist = float('inf') # s到该点的距离,默认为无穷大
self.prev = 0 # 上一个顶点的id,默认为0
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.vid == other.vid
else:
return False
def __hash__(self):
return hash(self.vid) # 通过vid计算出顶点对象的哈希值,相同顶点对象具有相同的哈希值
def addEdge(front, back, value): # 存储边的权值
edges[(front, back)] = value
def reset():
vset = set([v1, v2, v3, v4, v5, v6, v7])
for i in range(1, len(vlist)):
vlist[i].dist = float('inf')
vlist[i].known = False
return vset, vlist
def get_unknown_min(): # 此函数则代替优先队列的出队操作
min = 0
index = 0
flag = 0 # 找到第一个unknown顶点的标志
for i in range(1, len(vlist)):
if vlist[i].known is True: # 跳过所有known顶点
continue
else:
if flag == 0: # 拿到第一个unknown顶点的权值,与其他unknown顶点作比较
min = vlist[i].dist
index = i
else:
if vlist[i].dist < min:
min = vlist[i].dist
index = i
flag += 1
# 此时已经找到了未知的最小的元素是谁
vset.remove(vlist[index]) # 相当于执行出队操作
return vlist[index]
def dijkstra(start):
vlist[start].dist = 0
while len(vset) != 0:
v = get_unknown_min()
v.known = True
for u in v.outlist:
if vlist[u].known is True:
continue
else:
if vlist[u].dist == float('inf'):
vlist[u].dist = v.dist + edges[(v.vid, u)]
vlist[u].prev = v.vid
if vlist[u].dist > (v.dist + edges[(v.vid, u)]):
vlist[u].dist = v.dist + edges[(v.vid, u)]
vlist[u].prev = v.vid
else:
pass
def printpath(start, end):
path = []
path = getpath(start, end, path)
length = 1
spath = ''
for s in path:
if length >= len(path):
last = s
break
spath = spath + 'v' + str(s) + '-->'
length += 1
spath = spath + 'v' + str(last)
print('最短路径为 %s' % spath)
print('该最短路径的长度为', vlist[end].dist)
def getpath(start, index, path):
if index == start:
path.insert(0, start)
return path
if vlist[index].dist == float('inf'):
print('从起点到该顶点根本没有路径')
return
path.insert(0, index)
path = getpath(start, vlist[index].prev, path)
return path
if __name__ == '__main__':
edges = dict()
addEdge(1, 2, 2)
addEdge(1, 4, 1)
addEdge(3, 1, 4)
addEdge(4, 3, 2)
addEdge(2, 4, 3)
addEdge(2, 5, 10)
addEdge(4, 5, 2)
addEdge(3, 6, 5)
addEdge(4, 6, 8)
addEdge(4, 7, 4)
addEdge(7, 6, 1)
addEdge(5, 7, 6)
# 创建顶点对象
v1 = Vertex(1, [2, 4])
v2 = Vertex(2, [4, 5])
v3 = Vertex(3, [1, 6])
v4 = Vertex(4, [3, 5, 6, 7])
v5 = Vertex(5, [7])
v6 = Vertex(6, [])
v7 = Vertex(7, [6])
vlist = [False, v1, v2, v3, v4, v5, v6, v7]
vset = set([v1, v2, v3, v4, v5, v6, v7])
dijkstra(1)
printpath(1, 3)
printpath(1, 6)
printpath(1, 5)
vset, vlist = reset()
dijkstra(2)
printpath(2, 6)
printpath(2, 7)
vset, vlist = reset()
dijkstra(4)
printpath(4, 6)
printpath(4, 7)
| 7Bcoding/Python-data-structure-algorithm | 5-图论算法/Dijkstra-迪杰斯特拉算法.py | Dijkstra-迪杰斯特拉算法.py | py | 5,475 | python | zh | code | 1 | github-code | 13 |
17040795144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.RcsmartCommonAppInfo import RcsmartCommonAppInfo
from alipay.aop.api.domain.ApprovalQuery import ApprovalQuery
class AlipayFincoreComplianceRcservcenterRcsmartQueryModel(object):
def __init__(self):
self._app_info = None
self._approval_query = None
@property
def app_info(self):
return self._app_info
@app_info.setter
def app_info(self, value):
if isinstance(value, RcsmartCommonAppInfo):
self._app_info = value
else:
self._app_info = RcsmartCommonAppInfo.from_alipay_dict(value)
@property
def approval_query(self):
return self._approval_query
@approval_query.setter
def approval_query(self, value):
if isinstance(value, ApprovalQuery):
self._approval_query = value
else:
self._approval_query = ApprovalQuery.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.app_info:
if hasattr(self.app_info, 'to_alipay_dict'):
params['app_info'] = self.app_info.to_alipay_dict()
else:
params['app_info'] = self.app_info
if self.approval_query:
if hasattr(self.approval_query, 'to_alipay_dict'):
params['approval_query'] = self.approval_query.to_alipay_dict()
else:
params['approval_query'] = self.approval_query
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFincoreComplianceRcservcenterRcsmartQueryModel()
if 'app_info' in d:
o.app_info = d['app_info']
if 'approval_query' in d:
o.approval_query = d['approval_query']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFincoreComplianceRcservcenterRcsmartQueryModel.py | AlipayFincoreComplianceRcservcenterRcsmartQueryModel.py | py | 1,908 | python | en | code | 241 | github-code | 13 |
13614130380 | # Load libraries
import os
import matplotlib.image as mpimg
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset, DataLoader
from skimage import transform
# Create facial keypoint dataset class
class FacialKeypointsDataset(Dataset):
def __init__(self, key_points, root_dir, transform=None):
self.key_pts_frame = key_points
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.key_pts_frame)
def __getitem__(self, idx):
image_name = os.path.join(self.root_dir, self.key_pts_frame.iloc[idx, 0])
image = mpimg.imread(image_name)
if image.shape[2] == 4:
image = image[:, :, 0:3]
key_pts = self.key_pts_frame.iloc[idx, 1:].to_numpy()
key_pts = key_pts.astype('float').reshape(-1, 2)
sample = {'image': image, 'keypoints': key_pts}
if self.transform:
sample = self.transform(sample)
return sample
# Create transformations
class Normalize(object):
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
image_copy = np.copy(image)
key_pts_copy = np.copy(key_pts)
image_copy = cv2.cvtColor(image_copy, cv2.COLOR_RGB2GRAY)
image_copy = image_copy/255.0
key_pts_copy = (key_pts_copy - 100)/50.0
return {'image': image_copy, 'keypoints': key_pts_copy}
class Rescale(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size*h/w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
image = transform.resize(image, (new_h, new_w))
key_pts = key_pts*[new_w/w, new_h/h]
return {'image': image, 'keypoints': key_pts}
class RescaleImage(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
h, w = sample.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size*h/w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
image = transform.resize(sample, (new_h, new_w))
return image
class RandomCrop(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
key_pts = key_pts - [left, top]
return {'image': image, 'keypoints': key_pts}
class RandomCropImage(object): # Have to crop because the image is rescaled to the same scale, not exactly to the input
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
h, w = sample.shape[:2]
new_h, new_w = self.output_size
if h > new_h:
top = np.random.randint(0, h - new_h)
elif h == new_h:
top = 0
else:
top = np.random.randint(h - new_h, 0)
if w > new_w:
left = np.random.randint(0, w - new_w)
elif w == new_w:
left = 0
else:
left = np.random.randint(w - new_w, 0)
image = sample[top: top + new_h, left: left + new_w]
return image
class ToTensor(object):
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
if len(image.shape) == 2:
image = image.reshape(image.shape[0], image.shape[1], 1)
image = image.transpose(2, 0, 1)
return {'image': torch.from_numpy(image),
'keypoints': torch.from_numpy(key_pts)} | cverdence/face_detection | src/transforms.py | transforms.py | py | 4,935 | python | en | code | 1 | github-code | 13 |
42586654525 | # -*- coding: utf-8 -*-
import cv2 as cv
import time
import RPi.GPIO as GPIO
GPI0.setmode (GPI0.B0ARD)
GPIO.setup(13, GPIO.IN, pull_up_down=GPI0.PUD_DOWN)
capture = cv .VideoCapture(0)
index = 0
while(True) :
if(GPI0. input(13) == 1) :
print(1)
#capture = cv.VideoCapture(0)
ret, frame = capture. read()
#gray = cv.cvtColor(frame, CV.COLOR_ BGR2GRAY) ;
cv . imshow( "video", frame)
cv . imwrite(str( index)+" .jpg", frame)
index+=1
cv .waitKey (500)
else:
print(0)
cv .destroyAllWindows()
time.sleep(0.5)
| inseasonzzz/camerause | tian.py | tian.py | py | 623 | python | en | code | 0 | github-code | 13 |
74564552978 | #!/usr/bin/env python
"""
_New_
Oracle implementation of Masks.New
"""
from WMCore.WMBS.MySQL.Masks.New import New as NewMasksMySQL
class New(NewMasksMySQL):
sql = NewMasksMySQL.sql
def getDictBinds(self, jobList, inclusivemask):
binds = []
maskV = 'T' if inclusivemask else 'F'
for job in jobList:
binds.append({'jobid': job['id'], 'inclusivemask': maskV,
'firstevent': job['mask']['FirstEvent'],
'lastevent': job['mask']['LastEvent'],
'firstrun': job['mask']['FirstRun'],
'lastrun': job['mask']['LastRun'],
'firstlumi': job['mask']['FirstLumi'],
'lastlumi': job['mask']['LastLumi']})
return binds
def execute(self, jobList, inclusivemask=True, conn=None, transaction=False):
binds = self.getDictBinds(jobList, inclusivemask)
self.dbi.processData(self.sql, binds, conn=conn, transaction=transaction)
return
| dmwm/WMCore | src/python/WMCore/WMBS/Oracle/Masks/New.py | New.py | py | 1,051 | python | en | code | 44 | github-code | 13 |
29498185056 | import asyncio
import logging
import os
import re
import requests
import subprocess
import sys
import threading
from hashlib import sha512
from operator import itemgetter
from handlers.base import BaseHandler
from handlers.mixins import NonemptyMessageMixin, RateLimitMixin
from handlers.registry import register_handler
import settings
from mcstatus import MinecraftServer
logger = logging.getLogger('ninjabot.handler')
"""
Source: https://github.com/ivanseidel/Is-Now-Illegal
"""
@register_handler('illegal')
class IllegalGIF(NonemptyMessageMixin, RateLimitMixin, BaseHandler):
async def wait_generation(self, file_location, thread=None):
while True:
if thread is None or not thread.is_alive():
if os.path.isfile(file_location):
await self.send_file(file_location)
return
else:
raise OSError
await asyncio.sleep(0.4)
async def respond(self):
gif_message = self.content_str
while True:
match = re.search('<(:[A-Z]+:)[0-9]+>', gif_message)
try:
gif_message = gif_message.replace(match.group(0), match.group(1))
except Exception:
break
if len(gif_message) >= 15:
await self.send_message('Only sentences less than 15 characters are allowed.')
return
logger.info('Generating illegal GIF with message "%s"', gif_message)
filename = sha512(gif_message.encode('utf-8')).hexdigest() + '.gif'
file_location = os.path.join(settings.ILLEGAL_CACHE, filename)
if os.path.isfile(file_location):
logger.info('Illegal GIF with message "%s" already exists. Sending and returning.', gif_message)
await self.bot.loop.create_task(self.wait_generation(file_location))
return
msg = await self.send_message(self.discord_user.mention + ', please wait while the GIF is generated.')
try:
t = threading.Thread(
target=subprocess.run,
args=([
sys.executable, os.path.join(settings.ILLEGAL_DIR, 'rotoscope', 'generate.py'),
gif_message, os.path.join(settings.ILLEGAL_DIR, 'rotoscope', 'GIF', 'Trump'),
file_location,
],),
)
t.start()
await self.bot.loop.create_task(self.wait_generation(file_location, t))
except Exception:
logger.error('Failed to generate illegal GIF with message "%s".', gif_message)
await self.send_message('Generation of the GIF failed for an unknown reason...')
await self.delete_message(msg)
"""
Source: https://github.com/Dinnerbone/mcstatus
"""
@register_handler('mcstatus')
class MCStatus(NonemptyMessageMixin, RateLimitMixin, BaseHandler):
argument_name = 'address'
async def respond(self):
address = self.content[0].strip()
server = MinecraftServer.lookup(address)
logger.info('Querying minecraft server with IP %s.', address)
try:
status = server.status().raw
if isinstance(status['description'], dict):
name = status['description']['text']
else:
name = status['description']
online_players = status['players']['online']
max_players = status['players']['max']
em = self.create_embed('Minecraft Server Status', 'Server query to {}'.format(address), colour=0xFF630A)
if name:
em.add_field(name='Name', value=name)
em.add_field(name='Version', value=status['version']['name'])
em.add_field(name='Ping', value=str(server.ping()))
players = ['No one is online right now.']
if online_players > 0:
players = list(map(itemgetter('name'), status['players']['sample']))
em.add_field(name='Online Players ({}/{})'.format(online_players, max_players), value='\n'.join(players))
await self.send_message(embed=em)
except Exception as e:
logger.warn('Failed to query %s. %s', address, e)
await self.send_message('Could not query the server. Please check that the address is correct.')
@register_handler('inspire')
class Inspire(RateLimitMixin, BaseHandler):
limit_seconds = 1
async def respond(self):
await self.send_message(requests.get('http://inspirobot.me/api?generate=true').text)
| Ninjaclasher/Ninjabot | handlers/third_party.py | third_party.py | py | 4,531 | python | en | code | 1 | github-code | 13 |
12216925557 | import cv2
import face_recognition
import numpy as np
import known_faces as faces
# https://github.com/ageitgey/face_recognition/blob/master/examples/facerec_from_webcam_faster.py
# 1. Need to load all images and resize photos and make a new set in new folder.
# 2. Fixed red error during running and found.
# 3. Refactor code to collect all name and location in the same place like teacher's code.
# Use this code to be code base because it's faster than teacher's code.
# Next steps:
# 1. จะให้มัน logs เข้า Google sheets อย่างไร
# 2. จะเช็คยังไงว่า อันนี้คือ check-in อันนีี้คือ check-out
# 3. สร้าง function มาสำหรับถ่าย VDO แล้วก้อ capture หน้าออกมาเยอะๆเลย
# 4. แต่หลังจากได้หน้าออกมาเยอะๆแล้ว จะต้องสร้าง function มา rename ชื่อให้มันและตามด้วยตัวเลขเรียงกันไปเยอะๆด้วย
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# ========================================================
known_face_names = []
known_face_encodings = []
for face in faces.known_faces:
try:
print(face)
known_face_names.append(face[0])
face_image = face_recognition.load_image_file(face[1])
# print('--- face_image ----')
# print(face_image)
# print('--------face_recognition.face_encodings----------')
# print(face_recognition.face_encodings(face_image))
face_encoding = face_recognition.face_encodings(face_image)[0]
known_face_encodings.append(face_encoding)
except IndexError as err:
print('--- Exception ---')
print(err)
pass
# =========================================================
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
# print('------ best match ---------')
# print(face_distances)
# print(best_match_index)
if min(face_distances) < 0.45: # if distance is low that mean => very match
name = known_face_names[best_match_index]
face_names.append(name)
else:
face_names.append('Unknown')
# if matches[best_match_index]:
# name = known_face_names[best_match_index]
# face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
| atthana/computer_vision_codium | base_code.py | base_code.py | py | 3,871 | python | en | code | 0 | github-code | 13 |
71996118739 | def calculate_average(scores):
total_subjects = len(scores)
total_score = sum(scores.values())
average_score = total_score / total_subjects
return average_score
scores = {}
num_subjects = int(input("Enter the number of subjects: "))
for i in range(num_subjects):
subject = input(f"Enter the name of subject {i+1}: ")
score = float(input(f"Enter the score for subject {i+1}: "))
scores[subject] = score
average_score = calculate_average(scores)
if average_score >= 90:
grade = "A"
elif average_score >= 80:
grade = "B"
elif average_score >= 70:
grade = "C"
elif average_score >= 60:
grade = "D"
else:
grade = "F"
grade_points = {
"A": 4.0,
"B": 3.0,
"C": 2.0,
"D": 1.0,
"F": 0.0
}
print(f"\nAverage score: {average_score:.2f}")
print(f"Letter grade: {grade}")
print(f"Grade points: {grade_points[grade]}")
| uakk101/Material | PythonPractice/Tasks/Task_03.py | Task_03.py | py | 884 | python | en | code | 0 | github-code | 13 |
72626466578 | #!/usr/bin/env python3
import sys
def read_fasta(filepath):
"""
Generator to read multiline fasta found at the filepath
as required.
Yields a tuple containing the (accession, sequence)
Arguments:
filepath -- string containing path to Fasta formatted file
Return:
(accession, sequence) -- tuple containing two strings, first with the
accession and second with sequence
"""
with open(filepath) as filehandle:
accession = None
sequence = ""
for line in filehandle:
# removes newline character from the end of the line
line = line.strip()
if line.startswith(">"):
# will be True if accession==None
if accession:
"""
yield is similar to return but works for generators
the next iteration the function will return after
the yield command until the generator is exhausted
i.e. all the file has been read in this case
https://wiki.python.org/moin/Generators
"""
yield (accession, sequence)
accession = line
sequence = ""
else:
sequence += line
if accession:
yield (accession, sequence)
def find_longest_shared_motif(sequences):
"""
Find the longest shared motif i.e. longest common substring
from a list of sequences. While avoiding doing as much unecessary
work as possible.
A suffix tree is the theoretically optimal way to do this but is
probably excessive for a problem like this.
Arguments:
sequences -- list of sequences formatted as strings
Return:
shared_motif -- longest shared motif/substring between sequences
"""
# we only want to look at the shortest first because
# no shared motif can be longer than the shortest sequence
sequences.sort(key=lambda s: len(s))
shortest_seq = sequences.pop(0)
longest_motif = ""
for start_ix in range(len(shortest_seq)):
# step backwards using negative 3rd argument in range
for end_ix in range(len(shortest_seq), start_ix, -1):
# only bother checking if we haven't already found something longer
current_candidate = shortest_seq[start_ix: end_ix]
if len(current_candidate) > len(longest_motif):
share_motif = []
for seq in sequences:
if current_candidate in seq:
share_motif.append(True)
# if any sequence doesn't have the motif then it isn't
# common so we can move straight onto the next candidate
# using break
else:
share_motif.append(False)
break
# if everything shares the new longer candidate update the
# longest found motif
if all(share_motif):
longest_motif = current_candidate
return longest_motif
if __name__ == '__main__':
sequences = [seq[1] for seq in read_fasta(sys.argv[1])]
longest_motif = find_longest_shared_motif(sequences)
print(longest_motif)
| Znigneering/BioinformaticTurtorial | Comments/find_shared_motif.py | find_shared_motif.py | py | 3,350 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.